]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-3.1.8-201201062207.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.1.8-201201062207.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index dfa6fc6..0095943 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9 +*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13 @@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17 +*.gmo
18 *.grep
19 *.grp
20 *.gz
21 @@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25 +*.vim
26 *.xml
27 *.xz
28 *_MODULES
29 +*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33 @@ -70,6 +74,7 @@ Kerntypes
34 Module.markers
35 Module.symvers
36 PENDING
37 +PERF*
38 SCCS
39 System.map*
40 TAGS
41 @@ -93,19 +98,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45 +builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51 +clut_vga16.c
52 +common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59 +config.c
60 config.mak
61 config.mak.autogen
62 +config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66 @@ -119,6 +129,7 @@ dslm
67 elf2ecoff
68 elfconfig.h*
69 evergreen_reg_safe.h
70 +exception_policy.conf
71 fixdep
72 flask.h
73 fore200e_mkfirm
74 @@ -126,12 +137,15 @@ fore200e_pca_fw.c*
75 gconf
76 gconf.glade.h
77 gen-devlist
78 +gen-kdb_cmds.c
79 gen_crc32table
80 gen_init_cpio
81 generated
82 genheaders
83 genksyms
84 *_gray256.c
85 +hash
86 +hid-example
87 hpet_example
88 hugepage-mmap
89 hugepage-shm
90 @@ -146,7 +160,7 @@ int32.c
91 int4.c
92 int8.c
93 kallsyms
94 -kconfig
95 +kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99 @@ -154,7 +168,6 @@ kxgettext
100 lkc_defs.h
101 lex.c
102 lex.*.c
103 -linux
104 logo_*.c
105 logo_*_clut224.c
106 logo_*_mono.c
107 @@ -166,14 +179,15 @@ machtypes.h
108 map
109 map_hugetlb
110 maui_boot.h
111 -media
112 mconf
113 +mdp
114 miboot*
115 mk_elfconfig
116 mkboot
117 mkbugboot
118 mkcpustr
119 mkdep
120 +mkpiggy
121 mkprep
122 mkregtable
123 mktables
124 @@ -209,6 +223,7 @@ r300_reg_safe.h
125 r420_reg_safe.h
126 r600_reg_safe.h
127 recordmcount
128 +regdb.c
129 relocs
130 rlim_names.h
131 rn50_reg_safe.h
132 @@ -219,6 +234,7 @@ setup
133 setup.bin
134 setup.elf
135 sImage
136 +slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140 @@ -229,6 +245,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144 +user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148 @@ -246,7 +263,9 @@ vmlinux
149 vmlinux-*
150 vmlinux.aout
151 vmlinux.bin.all
152 +vmlinux.bin.bz2
153 vmlinux.lds
154 +vmlinux.relocs
155 vmlinuz
156 voffset.h
157 vsyscall.lds
158 @@ -254,9 +273,11 @@ vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162 +utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168 +zconf.lex.c
169 zoffset.h
170 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
171 index d6e6724..a024ce8 100644
172 --- a/Documentation/kernel-parameters.txt
173 +++ b/Documentation/kernel-parameters.txt
174 @@ -1898,6 +1898,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
175 the specified number of seconds. This is to be used if
176 your oopses keep scrolling off the screen.
177
178 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
179 + virtualization environments that don't cope well with the
180 + expand down segment used by UDEREF on X86-32 or the frequent
181 + page table updates on X86-64.
182 +
183 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
184 +
185 pcbit= [HW,ISDN]
186
187 pcd. [PARIDE]
188 diff --git a/Makefile b/Makefile
189 index 64a2e76..5b86280 100644
190 --- a/Makefile
191 +++ b/Makefile
192 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
193
194 HOSTCC = gcc
195 HOSTCXX = g++
196 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
197 -HOSTCXXFLAGS = -O2
198 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
199 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
200 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
201
202 # Decide whether to build built-in, modular, or both.
203 # Normally, just do built-in.
204 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
205 # Rules shared between *config targets and build targets
206
207 # Basic helpers built in scripts/
208 -PHONY += scripts_basic
209 -scripts_basic:
210 +PHONY += scripts_basic gcc-plugins
211 +scripts_basic: gcc-plugins
212 $(Q)$(MAKE) $(build)=scripts/basic
213 $(Q)rm -f .tmp_quiet_recordmcount
214
215 @@ -564,6 +565,46 @@ else
216 KBUILD_CFLAGS += -O2
217 endif
218
219 +ifndef DISABLE_PAX_PLUGINS
220 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
221 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
222 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
223 +endif
224 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
225 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
226 +STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
227 +endif
228 +ifdef CONFIG_KALLOCSTAT_PLUGIN
229 +KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
230 +endif
231 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
232 +KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
233 +KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
234 +endif
235 +ifdef CONFIG_CHECKER_PLUGIN
236 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
237 +CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
238 +endif
239 +endif
240 +GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
241 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
242 +ifeq ($(KBUILD_EXTMOD),)
243 +gcc-plugins:
244 + $(Q)$(MAKE) $(build)=tools/gcc
245 +else
246 +gcc-plugins: ;
247 +endif
248 +else
249 +gcc-plugins:
250 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
251 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
252 +else
253 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
254 +endif
255 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
256 +endif
257 +endif
258 +
259 include $(srctree)/arch/$(SRCARCH)/Makefile
260
261 ifneq ($(CONFIG_FRAME_WARN),0)
262 @@ -708,7 +749,7 @@ export mod_strip_cmd
263
264
265 ifeq ($(KBUILD_EXTMOD),)
266 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
267 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
268
269 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
270 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
271 @@ -932,6 +973,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
272
273 # The actual objects are generated when descending,
274 # make sure no implicit rule kicks in
275 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
276 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
277
278 # Handle descending into subdirectories listed in $(vmlinux-dirs)
279 @@ -941,7 +983,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
280 # Error messages still appears in the original language
281
282 PHONY += $(vmlinux-dirs)
283 -$(vmlinux-dirs): prepare scripts
284 +$(vmlinux-dirs): gcc-plugins prepare scripts
285 $(Q)$(MAKE) $(build)=$@
286
287 # Store (new) KERNELRELASE string in include/config/kernel.release
288 @@ -986,6 +1028,7 @@ prepare0: archprepare FORCE
289 $(Q)$(MAKE) $(build)=. missing-syscalls
290
291 # All the preparing..
292 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
293 prepare: prepare0
294
295 # Generate some files
296 @@ -1087,6 +1130,7 @@ all: modules
297 # using awk while concatenating to the final file.
298
299 PHONY += modules
300 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
301 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
302 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
303 @$(kecho) ' Building modules, stage 2.';
304 @@ -1102,7 +1146,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
305
306 # Target to prepare building external modules
307 PHONY += modules_prepare
308 -modules_prepare: prepare scripts
309 +modules_prepare: gcc-plugins prepare scripts
310
311 # Target to install modules
312 PHONY += modules_install
313 @@ -1198,7 +1242,7 @@ distclean: mrproper
314 @find $(srctree) $(RCS_FIND_IGNORE) \
315 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
316 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
317 - -o -name '.*.rej' -o -size 0 \
318 + -o -name '.*.rej' -o -name '*.so' -o -size 0 \
319 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
320 -type f -print | xargs rm -f
321
322 @@ -1360,6 +1404,7 @@ PHONY += $(module-dirs) modules
323 $(module-dirs): crmodverdir $(objtree)/Module.symvers
324 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
325
326 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
327 modules: $(module-dirs)
328 @$(kecho) ' Building modules, stage 2.';
329 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
330 @@ -1486,17 +1531,19 @@ else
331 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
332 endif
333
334 -%.s: %.c prepare scripts FORCE
335 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
336 +%.s: %.c gcc-plugins prepare scripts FORCE
337 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
338 %.i: %.c prepare scripts FORCE
339 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
340 -%.o: %.c prepare scripts FORCE
341 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
342 +%.o: %.c gcc-plugins prepare scripts FORCE
343 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
344 %.lst: %.c prepare scripts FORCE
345 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
346 -%.s: %.S prepare scripts FORCE
347 +%.s: %.S gcc-plugins prepare scripts FORCE
348 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
349 -%.o: %.S prepare scripts FORCE
350 +%.o: %.S gcc-plugins prepare scripts FORCE
351 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
352 %.symtypes: %.c prepare scripts FORCE
353 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
354 @@ -1506,11 +1553,13 @@ endif
355 $(cmd_crmodverdir)
356 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
357 $(build)=$(build-dir)
358 -%/: prepare scripts FORCE
359 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
360 +%/: gcc-plugins prepare scripts FORCE
361 $(cmd_crmodverdir)
362 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
363 $(build)=$(build-dir)
364 -%.ko: prepare scripts FORCE
365 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
366 +%.ko: gcc-plugins prepare scripts FORCE
367 $(cmd_crmodverdir)
368 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
369 $(build)=$(build-dir) $(@:.ko=.o)
370 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
371 index da5449e..7418343 100644
372 --- a/arch/alpha/include/asm/elf.h
373 +++ b/arch/alpha/include/asm/elf.h
374 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
375
376 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
377
378 +#ifdef CONFIG_PAX_ASLR
379 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
380 +
381 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
382 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
383 +#endif
384 +
385 /* $0 is set by ld.so to a pointer to a function which might be
386 registered using atexit. This provides a mean for the dynamic
387 linker to call DT_FINI functions for shared libraries that have
388 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
389 index de98a73..bd4f1f8 100644
390 --- a/arch/alpha/include/asm/pgtable.h
391 +++ b/arch/alpha/include/asm/pgtable.h
392 @@ -101,6 +101,17 @@ struct vm_area_struct;
393 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
394 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
395 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
396 +
397 +#ifdef CONFIG_PAX_PAGEEXEC
398 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
399 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
400 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
401 +#else
402 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
403 +# define PAGE_COPY_NOEXEC PAGE_COPY
404 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
405 +#endif
406 +
407 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
408
409 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
410 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
411 index 2fd00b7..cfd5069 100644
412 --- a/arch/alpha/kernel/module.c
413 +++ b/arch/alpha/kernel/module.c
414 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
415
416 /* The small sections were sorted to the end of the segment.
417 The following should definitely cover them. */
418 - gp = (u64)me->module_core + me->core_size - 0x8000;
419 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
420 got = sechdrs[me->arch.gotsecindex].sh_addr;
421
422 for (i = 0; i < n; i++) {
423 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
424 index 01e8715..be0e80f 100644
425 --- a/arch/alpha/kernel/osf_sys.c
426 +++ b/arch/alpha/kernel/osf_sys.c
427 @@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
428 /* At this point: (!vma || addr < vma->vm_end). */
429 if (limit - len < addr)
430 return -ENOMEM;
431 - if (!vma || addr + len <= vma->vm_start)
432 + if (check_heap_stack_gap(vma, addr, len))
433 return addr;
434 addr = vma->vm_end;
435 vma = vma->vm_next;
436 @@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
437 merely specific addresses, but regions of memory -- perhaps
438 this feature should be incorporated into all ports? */
439
440 +#ifdef CONFIG_PAX_RANDMMAP
441 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
442 +#endif
443 +
444 if (addr) {
445 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
446 if (addr != (unsigned long) -ENOMEM)
447 @@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
448 }
449
450 /* Next, try allocating at TASK_UNMAPPED_BASE. */
451 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
452 - len, limit);
453 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
454 +
455 if (addr != (unsigned long) -ENOMEM)
456 return addr;
457
458 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
459 index fadd5f8..904e73a 100644
460 --- a/arch/alpha/mm/fault.c
461 +++ b/arch/alpha/mm/fault.c
462 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
463 __reload_thread(pcb);
464 }
465
466 +#ifdef CONFIG_PAX_PAGEEXEC
467 +/*
468 + * PaX: decide what to do with offenders (regs->pc = fault address)
469 + *
470 + * returns 1 when task should be killed
471 + * 2 when patched PLT trampoline was detected
472 + * 3 when unpatched PLT trampoline was detected
473 + */
474 +static int pax_handle_fetch_fault(struct pt_regs *regs)
475 +{
476 +
477 +#ifdef CONFIG_PAX_EMUPLT
478 + int err;
479 +
480 + do { /* PaX: patched PLT emulation #1 */
481 + unsigned int ldah, ldq, jmp;
482 +
483 + err = get_user(ldah, (unsigned int *)regs->pc);
484 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
485 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
486 +
487 + if (err)
488 + break;
489 +
490 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
491 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
492 + jmp == 0x6BFB0000U)
493 + {
494 + unsigned long r27, addr;
495 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
496 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
497 +
498 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
499 + err = get_user(r27, (unsigned long *)addr);
500 + if (err)
501 + break;
502 +
503 + regs->r27 = r27;
504 + regs->pc = r27;
505 + return 2;
506 + }
507 + } while (0);
508 +
509 + do { /* PaX: patched PLT emulation #2 */
510 + unsigned int ldah, lda, br;
511 +
512 + err = get_user(ldah, (unsigned int *)regs->pc);
513 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
514 + err |= get_user(br, (unsigned int *)(regs->pc+8));
515 +
516 + if (err)
517 + break;
518 +
519 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
520 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
521 + (br & 0xFFE00000U) == 0xC3E00000U)
522 + {
523 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
524 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
525 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
526 +
527 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
528 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
529 + return 2;
530 + }
531 + } while (0);
532 +
533 + do { /* PaX: unpatched PLT emulation */
534 + unsigned int br;
535 +
536 + err = get_user(br, (unsigned int *)regs->pc);
537 +
538 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
539 + unsigned int br2, ldq, nop, jmp;
540 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
541 +
542 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
543 + err = get_user(br2, (unsigned int *)addr);
544 + err |= get_user(ldq, (unsigned int *)(addr+4));
545 + err |= get_user(nop, (unsigned int *)(addr+8));
546 + err |= get_user(jmp, (unsigned int *)(addr+12));
547 + err |= get_user(resolver, (unsigned long *)(addr+16));
548 +
549 + if (err)
550 + break;
551 +
552 + if (br2 == 0xC3600000U &&
553 + ldq == 0xA77B000CU &&
554 + nop == 0x47FF041FU &&
555 + jmp == 0x6B7B0000U)
556 + {
557 + regs->r28 = regs->pc+4;
558 + regs->r27 = addr+16;
559 + regs->pc = resolver;
560 + return 3;
561 + }
562 + }
563 + } while (0);
564 +#endif
565 +
566 + return 1;
567 +}
568 +
569 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
570 +{
571 + unsigned long i;
572 +
573 + printk(KERN_ERR "PAX: bytes at PC: ");
574 + for (i = 0; i < 5; i++) {
575 + unsigned int c;
576 + if (get_user(c, (unsigned int *)pc+i))
577 + printk(KERN_CONT "???????? ");
578 + else
579 + printk(KERN_CONT "%08x ", c);
580 + }
581 + printk("\n");
582 +}
583 +#endif
584
585 /*
586 * This routine handles page faults. It determines the address,
587 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
588 good_area:
589 si_code = SEGV_ACCERR;
590 if (cause < 0) {
591 - if (!(vma->vm_flags & VM_EXEC))
592 + if (!(vma->vm_flags & VM_EXEC)) {
593 +
594 +#ifdef CONFIG_PAX_PAGEEXEC
595 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
596 + goto bad_area;
597 +
598 + up_read(&mm->mmap_sem);
599 + switch (pax_handle_fetch_fault(regs)) {
600 +
601 +#ifdef CONFIG_PAX_EMUPLT
602 + case 2:
603 + case 3:
604 + return;
605 +#endif
606 +
607 + }
608 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
609 + do_group_exit(SIGKILL);
610 +#else
611 goto bad_area;
612 +#endif
613 +
614 + }
615 } else if (!cause) {
616 /* Allow reads even for write-only mappings */
617 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
618 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
619 index 86976d0..8a57797 100644
620 --- a/arch/arm/include/asm/atomic.h
621 +++ b/arch/arm/include/asm/atomic.h
622 @@ -239,6 +239,14 @@ typedef struct {
623 u64 __aligned(8) counter;
624 } atomic64_t;
625
626 +#ifdef CONFIG_PAX_REFCOUNT
627 +typedef struct {
628 + u64 __aligned(8) counter;
629 +} atomic64_unchecked_t;
630 +#else
631 +typedef atomic64_t atomic64_unchecked_t;
632 +#endif
633 +
634 #define ATOMIC64_INIT(i) { (i) }
635
636 static inline u64 atomic64_read(atomic64_t *v)
637 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
638 index 0e9ce8d..6ef1e03 100644
639 --- a/arch/arm/include/asm/elf.h
640 +++ b/arch/arm/include/asm/elf.h
641 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647 +
648 +#ifdef CONFIG_PAX_ASLR
649 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
650 +
651 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
652 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
653 +#endif
654
655 /* When the program starts, a1 contains a pointer to a function to be
656 registered with atexit, as per the SVR4 ABI. A value of 0 means we
657 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
658 extern void elf_set_personality(const struct elf32_hdr *);
659 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
660
661 -struct mm_struct;
662 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
663 -#define arch_randomize_brk arch_randomize_brk
664 -
665 extern int vectors_user_mapping(void);
666 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
667 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
668 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
669 index e51b1e8..32a3113 100644
670 --- a/arch/arm/include/asm/kmap_types.h
671 +++ b/arch/arm/include/asm/kmap_types.h
672 @@ -21,6 +21,7 @@ enum km_type {
673 KM_L1_CACHE,
674 KM_L2_CACHE,
675 KM_KDB,
676 + KM_CLEARPAGE,
677 KM_TYPE_NR
678 };
679
680 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
681 index b293616..96310e5 100644
682 --- a/arch/arm/include/asm/uaccess.h
683 +++ b/arch/arm/include/asm/uaccess.h
684 @@ -22,6 +22,8 @@
685 #define VERIFY_READ 0
686 #define VERIFY_WRITE 1
687
688 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
689 +
690 /*
691 * The exception table consists of pairs of addresses: the first is the
692 * address of an instruction that is allowed to fault, and the second is
693 @@ -387,8 +389,23 @@ do { \
694
695
696 #ifdef CONFIG_MMU
697 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
698 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
699 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
700 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
701 +
702 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
703 +{
704 + if (!__builtin_constant_p(n))
705 + check_object_size(to, n, false);
706 + return ___copy_from_user(to, from, n);
707 +}
708 +
709 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
710 +{
711 + if (!__builtin_constant_p(n))
712 + check_object_size(from, n, true);
713 + return ___copy_to_user(to, from, n);
714 +}
715 +
716 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
717 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
718 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
719 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
720
721 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
722 {
723 + if ((long)n < 0)
724 + return n;
725 +
726 if (access_ok(VERIFY_READ, from, n))
727 n = __copy_from_user(to, from, n);
728 else /* security hole - plug it */
729 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
730
731 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
732 {
733 + if ((long)n < 0)
734 + return n;
735 +
736 if (access_ok(VERIFY_WRITE, to, n))
737 n = __copy_to_user(to, from, n);
738 return n;
739 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
740 index aeef960..2966009 100644
741 --- a/arch/arm/kernel/armksyms.c
742 +++ b/arch/arm/kernel/armksyms.c
743 @@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
744 #ifdef CONFIG_MMU
745 EXPORT_SYMBOL(copy_page);
746
747 -EXPORT_SYMBOL(__copy_from_user);
748 -EXPORT_SYMBOL(__copy_to_user);
749 +EXPORT_SYMBOL(___copy_from_user);
750 +EXPORT_SYMBOL(___copy_to_user);
751 EXPORT_SYMBOL(__clear_user);
752
753 EXPORT_SYMBOL(__get_user_1);
754 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
755 index c9d11ea..5078081 100644
756 --- a/arch/arm/kernel/process.c
757 +++ b/arch/arm/kernel/process.c
758 @@ -28,7 +28,6 @@
759 #include <linux/tick.h>
760 #include <linux/utsname.h>
761 #include <linux/uaccess.h>
762 -#include <linux/random.h>
763 #include <linux/hw_breakpoint.h>
764 #include <linux/cpuidle.h>
765
766 @@ -484,12 +483,6 @@ unsigned long get_wchan(struct task_struct *p)
767 return 0;
768 }
769
770 -unsigned long arch_randomize_brk(struct mm_struct *mm)
771 -{
772 - unsigned long range_end = mm->brk + 0x02000000;
773 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
774 -}
775 -
776 #ifdef CONFIG_MMU
777 /*
778 * The vectors page is always readable from user space for the
779 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
780 index bc9f9da..c75d826 100644
781 --- a/arch/arm/kernel/traps.c
782 +++ b/arch/arm/kernel/traps.c
783 @@ -257,6 +257,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
784
785 static DEFINE_SPINLOCK(die_lock);
786
787 +extern void gr_handle_kernel_exploit(void);
788 +
789 /*
790 * This function is protected against re-entrancy.
791 */
792 @@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs *regs, int err)
793 panic("Fatal exception in interrupt");
794 if (panic_on_oops)
795 panic("Fatal exception");
796 +
797 + gr_handle_kernel_exploit();
798 +
799 if (ret != NOTIFY_STOP)
800 do_exit(SIGSEGV);
801 }
802 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
803 index 66a477a..bee61d3 100644
804 --- a/arch/arm/lib/copy_from_user.S
805 +++ b/arch/arm/lib/copy_from_user.S
806 @@ -16,7 +16,7 @@
807 /*
808 * Prototype:
809 *
810 - * size_t __copy_from_user(void *to, const void *from, size_t n)
811 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
812 *
813 * Purpose:
814 *
815 @@ -84,11 +84,11 @@
816
817 .text
818
819 -ENTRY(__copy_from_user)
820 +ENTRY(___copy_from_user)
821
822 #include "copy_template.S"
823
824 -ENDPROC(__copy_from_user)
825 +ENDPROC(___copy_from_user)
826
827 .pushsection .fixup,"ax"
828 .align 0
829 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
830 index d066df6..df28194 100644
831 --- a/arch/arm/lib/copy_to_user.S
832 +++ b/arch/arm/lib/copy_to_user.S
833 @@ -16,7 +16,7 @@
834 /*
835 * Prototype:
836 *
837 - * size_t __copy_to_user(void *to, const void *from, size_t n)
838 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
839 *
840 * Purpose:
841 *
842 @@ -88,11 +88,11 @@
843 .text
844
845 ENTRY(__copy_to_user_std)
846 -WEAK(__copy_to_user)
847 +WEAK(___copy_to_user)
848
849 #include "copy_template.S"
850
851 -ENDPROC(__copy_to_user)
852 +ENDPROC(___copy_to_user)
853 ENDPROC(__copy_to_user_std)
854
855 .pushsection .fixup,"ax"
856 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
857 index d0ece2a..5ae2f39 100644
858 --- a/arch/arm/lib/uaccess.S
859 +++ b/arch/arm/lib/uaccess.S
860 @@ -20,7 +20,7 @@
861
862 #define PAGE_SHIFT 12
863
864 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
865 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
866 * Purpose : copy a block to user memory from kernel memory
867 * Params : to - user memory
868 * : from - kernel memory
869 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
870 sub r2, r2, ip
871 b .Lc2u_dest_aligned
872
873 -ENTRY(__copy_to_user)
874 +ENTRY(___copy_to_user)
875 stmfd sp!, {r2, r4 - r7, lr}
876 cmp r2, #4
877 blt .Lc2u_not_enough
878 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault
879 ldrgtb r3, [r1], #0
880 USER( T(strgtb) r3, [r0], #1) @ May fault
881 b .Lc2u_finished
882 -ENDPROC(__copy_to_user)
883 +ENDPROC(___copy_to_user)
884
885 .pushsection .fixup,"ax"
886 .align 0
887 9001: ldmfd sp!, {r0, r4 - r7, pc}
888 .popsection
889
890 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
891 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
892 * Purpose : copy a block from user memory to kernel memory
893 * Params : to - kernel memory
894 * : from - user memory
895 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
896 sub r2, r2, ip
897 b .Lcfu_dest_aligned
898
899 -ENTRY(__copy_from_user)
900 +ENTRY(___copy_from_user)
901 stmfd sp!, {r0, r2, r4 - r7, lr}
902 cmp r2, #4
903 blt .Lcfu_not_enough
904 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault
905 USER( T(ldrgtb) r3, [r1], #1) @ May fault
906 strgtb r3, [r0], #1
907 b .Lcfu_finished
908 -ENDPROC(__copy_from_user)
909 +ENDPROC(___copy_from_user)
910
911 .pushsection .fixup,"ax"
912 .align 0
913 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
914 index 8b9b136..70d5100 100644
915 --- a/arch/arm/lib/uaccess_with_memcpy.c
916 +++ b/arch/arm/lib/uaccess_with_memcpy.c
917 @@ -103,7 +103,7 @@ out:
918 }
919
920 unsigned long
921 -__copy_to_user(void __user *to, const void *from, unsigned long n)
922 +___copy_to_user(void __user *to, const void *from, unsigned long n)
923 {
924 /*
925 * This test is stubbed out of the main function above to keep
926 diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
927 index 2b2d51c..0127490 100644
928 --- a/arch/arm/mach-ux500/mbox-db5500.c
929 +++ b/arch/arm/mach-ux500/mbox-db5500.c
930 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
931 return sprintf(buf, "0x%X\n", mbox_value);
932 }
933
934 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
935 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
936
937 static int mbox_show(struct seq_file *s, void *data)
938 {
939 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
940 index 3b5ea68..42fc9af 100644
941 --- a/arch/arm/mm/fault.c
942 +++ b/arch/arm/mm/fault.c
943 @@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
944 }
945 #endif
946
947 +#ifdef CONFIG_PAX_PAGEEXEC
948 + if (fsr & FSR_LNX_PF) {
949 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
950 + do_group_exit(SIGKILL);
951 + }
952 +#endif
953 +
954 tsk->thread.address = addr;
955 tsk->thread.error_code = fsr;
956 tsk->thread.trap_no = 14;
957 @@ -383,6 +390,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
958 }
959 #endif /* CONFIG_MMU */
960
961 +#ifdef CONFIG_PAX_PAGEEXEC
962 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
963 +{
964 + long i;
965 +
966 + printk(KERN_ERR "PAX: bytes at PC: ");
967 + for (i = 0; i < 20; i++) {
968 + unsigned char c;
969 + if (get_user(c, (__force unsigned char __user *)pc+i))
970 + printk(KERN_CONT "?? ");
971 + else
972 + printk(KERN_CONT "%02x ", c);
973 + }
974 + printk("\n");
975 +
976 + printk(KERN_ERR "PAX: bytes at SP-4: ");
977 + for (i = -1; i < 20; i++) {
978 + unsigned long c;
979 + if (get_user(c, (__force unsigned long __user *)sp+i))
980 + printk(KERN_CONT "???????? ");
981 + else
982 + printk(KERN_CONT "%08lx ", c);
983 + }
984 + printk("\n");
985 +}
986 +#endif
987 +
988 /*
989 * First Level Translation Fault Handler
990 *
991 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
992 index 74be05f..f605b8c 100644
993 --- a/arch/arm/mm/mmap.c
994 +++ b/arch/arm/mm/mmap.c
995 @@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
996 if (len > TASK_SIZE)
997 return -ENOMEM;
998
999 +#ifdef CONFIG_PAX_RANDMMAP
1000 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1001 +#endif
1002 +
1003 if (addr) {
1004 if (do_align)
1005 addr = COLOUR_ALIGN(addr, pgoff);
1006 @@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1007 addr = PAGE_ALIGN(addr);
1008
1009 vma = find_vma(mm, addr);
1010 - if (TASK_SIZE - len >= addr &&
1011 - (!vma || addr + len <= vma->vm_start))
1012 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1013 return addr;
1014 }
1015 if (len > mm->cached_hole_size) {
1016 - start_addr = addr = mm->free_area_cache;
1017 + start_addr = addr = mm->free_area_cache;
1018 } else {
1019 - start_addr = addr = TASK_UNMAPPED_BASE;
1020 - mm->cached_hole_size = 0;
1021 + start_addr = addr = mm->mmap_base;
1022 + mm->cached_hole_size = 0;
1023 }
1024 /* 8 bits of randomness in 20 address space bits */
1025 if ((current->flags & PF_RANDOMIZE) &&
1026 @@ -100,14 +103,14 @@ full_search:
1027 * Start a new search - just in case we missed
1028 * some holes.
1029 */
1030 - if (start_addr != TASK_UNMAPPED_BASE) {
1031 - start_addr = addr = TASK_UNMAPPED_BASE;
1032 + if (start_addr != mm->mmap_base) {
1033 + start_addr = addr = mm->mmap_base;
1034 mm->cached_hole_size = 0;
1035 goto full_search;
1036 }
1037 return -ENOMEM;
1038 }
1039 - if (!vma || addr + len <= vma->vm_start) {
1040 + if (check_heap_stack_gap(vma, addr, len)) {
1041 /*
1042 * Remember the place where we stopped the search:
1043 */
1044 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1045 index 3b3159b..425ea94 100644
1046 --- a/arch/avr32/include/asm/elf.h
1047 +++ b/arch/avr32/include/asm/elf.h
1048 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1049 the loader. We need to make sure that it is out of the way of the program
1050 that it will "exec", and that there is sufficient room for the brk. */
1051
1052 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1053 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1054
1055 +#ifdef CONFIG_PAX_ASLR
1056 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1057 +
1058 +#define PAX_DELTA_MMAP_LEN 15
1059 +#define PAX_DELTA_STACK_LEN 15
1060 +#endif
1061
1062 /* This yields a mask that user programs can use to figure out what
1063 instruction set this CPU supports. This could be done in user space,
1064 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1065 index b7f5c68..556135c 100644
1066 --- a/arch/avr32/include/asm/kmap_types.h
1067 +++ b/arch/avr32/include/asm/kmap_types.h
1068 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1069 D(11) KM_IRQ1,
1070 D(12) KM_SOFTIRQ0,
1071 D(13) KM_SOFTIRQ1,
1072 -D(14) KM_TYPE_NR
1073 +D(14) KM_CLEARPAGE,
1074 +D(15) KM_TYPE_NR
1075 };
1076
1077 #undef D
1078 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1079 index f7040a1..db9f300 100644
1080 --- a/arch/avr32/mm/fault.c
1081 +++ b/arch/avr32/mm/fault.c
1082 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1083
1084 int exception_trace = 1;
1085
1086 +#ifdef CONFIG_PAX_PAGEEXEC
1087 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1088 +{
1089 + unsigned long i;
1090 +
1091 + printk(KERN_ERR "PAX: bytes at PC: ");
1092 + for (i = 0; i < 20; i++) {
1093 + unsigned char c;
1094 + if (get_user(c, (unsigned char *)pc+i))
1095 + printk(KERN_CONT "???????? ");
1096 + else
1097 + printk(KERN_CONT "%02x ", c);
1098 + }
1099 + printk("\n");
1100 +}
1101 +#endif
1102 +
1103 /*
1104 * This routine handles page faults. It determines the address and the
1105 * problem, and then passes it off to one of the appropriate routines.
1106 @@ -156,6 +173,16 @@ bad_area:
1107 up_read(&mm->mmap_sem);
1108
1109 if (user_mode(regs)) {
1110 +
1111 +#ifdef CONFIG_PAX_PAGEEXEC
1112 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1113 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1114 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1115 + do_group_exit(SIGKILL);
1116 + }
1117 + }
1118 +#endif
1119 +
1120 if (exception_trace && printk_ratelimit())
1121 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1122 "sp %08lx ecr %lu\n",
1123 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1124 index f8e16b2..c73ff79 100644
1125 --- a/arch/frv/include/asm/kmap_types.h
1126 +++ b/arch/frv/include/asm/kmap_types.h
1127 @@ -23,6 +23,7 @@ enum km_type {
1128 KM_IRQ1,
1129 KM_SOFTIRQ0,
1130 KM_SOFTIRQ1,
1131 + KM_CLEARPAGE,
1132 KM_TYPE_NR
1133 };
1134
1135 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1136 index 385fd30..6c3d97e 100644
1137 --- a/arch/frv/mm/elf-fdpic.c
1138 +++ b/arch/frv/mm/elf-fdpic.c
1139 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1140 if (addr) {
1141 addr = PAGE_ALIGN(addr);
1142 vma = find_vma(current->mm, addr);
1143 - if (TASK_SIZE - len >= addr &&
1144 - (!vma || addr + len <= vma->vm_start))
1145 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1146 goto success;
1147 }
1148
1149 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1150 for (; vma; vma = vma->vm_next) {
1151 if (addr > limit)
1152 break;
1153 - if (addr + len <= vma->vm_start)
1154 + if (check_heap_stack_gap(vma, addr, len))
1155 goto success;
1156 addr = vma->vm_end;
1157 }
1158 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1159 for (; vma; vma = vma->vm_next) {
1160 if (addr > limit)
1161 break;
1162 - if (addr + len <= vma->vm_start)
1163 + if (check_heap_stack_gap(vma, addr, len))
1164 goto success;
1165 addr = vma->vm_end;
1166 }
1167 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1168 index b5298eb..67c6e62 100644
1169 --- a/arch/ia64/include/asm/elf.h
1170 +++ b/arch/ia64/include/asm/elf.h
1171 @@ -42,6 +42,13 @@
1172 */
1173 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1174
1175 +#ifdef CONFIG_PAX_ASLR
1176 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1177 +
1178 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1179 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1180 +#endif
1181 +
1182 #define PT_IA_64_UNWIND 0x70000001
1183
1184 /* IA-64 relocations: */
1185 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1186 index 1a97af3..7529d31 100644
1187 --- a/arch/ia64/include/asm/pgtable.h
1188 +++ b/arch/ia64/include/asm/pgtable.h
1189 @@ -12,7 +12,7 @@
1190 * David Mosberger-Tang <davidm@hpl.hp.com>
1191 */
1192
1193 -
1194 +#include <linux/const.h>
1195 #include <asm/mman.h>
1196 #include <asm/page.h>
1197 #include <asm/processor.h>
1198 @@ -143,6 +143,17 @@
1199 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1200 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1201 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1202 +
1203 +#ifdef CONFIG_PAX_PAGEEXEC
1204 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1205 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1206 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1207 +#else
1208 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1209 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1210 +# define PAGE_COPY_NOEXEC PAGE_COPY
1211 +#endif
1212 +
1213 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1214 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1215 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1216 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1217 index b77768d..e0795eb 100644
1218 --- a/arch/ia64/include/asm/spinlock.h
1219 +++ b/arch/ia64/include/asm/spinlock.h
1220 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
1221 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1222
1223 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1224 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1225 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1226 }
1227
1228 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
1229 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1230 index 449c8c0..432a3d2 100644
1231 --- a/arch/ia64/include/asm/uaccess.h
1232 +++ b/arch/ia64/include/asm/uaccess.h
1233 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1234 const void *__cu_from = (from); \
1235 long __cu_len = (n); \
1236 \
1237 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1238 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1239 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1240 __cu_len; \
1241 })
1242 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1243 long __cu_len = (n); \
1244 \
1245 __chk_user_ptr(__cu_from); \
1246 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1247 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1248 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1249 __cu_len; \
1250 })
1251 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1252 index 24603be..948052d 100644
1253 --- a/arch/ia64/kernel/module.c
1254 +++ b/arch/ia64/kernel/module.c
1255 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
1256 void
1257 module_free (struct module *mod, void *module_region)
1258 {
1259 - if (mod && mod->arch.init_unw_table &&
1260 - module_region == mod->module_init) {
1261 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1262 unw_remove_unwind_table(mod->arch.init_unw_table);
1263 mod->arch.init_unw_table = NULL;
1264 }
1265 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1266 }
1267
1268 static inline int
1269 +in_init_rx (const struct module *mod, uint64_t addr)
1270 +{
1271 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1272 +}
1273 +
1274 +static inline int
1275 +in_init_rw (const struct module *mod, uint64_t addr)
1276 +{
1277 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1278 +}
1279 +
1280 +static inline int
1281 in_init (const struct module *mod, uint64_t addr)
1282 {
1283 - return addr - (uint64_t) mod->module_init < mod->init_size;
1284 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1285 +}
1286 +
1287 +static inline int
1288 +in_core_rx (const struct module *mod, uint64_t addr)
1289 +{
1290 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1291 +}
1292 +
1293 +static inline int
1294 +in_core_rw (const struct module *mod, uint64_t addr)
1295 +{
1296 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1297 }
1298
1299 static inline int
1300 in_core (const struct module *mod, uint64_t addr)
1301 {
1302 - return addr - (uint64_t) mod->module_core < mod->core_size;
1303 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1304 }
1305
1306 static inline int
1307 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1308 break;
1309
1310 case RV_BDREL:
1311 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1312 + if (in_init_rx(mod, val))
1313 + val -= (uint64_t) mod->module_init_rx;
1314 + else if (in_init_rw(mod, val))
1315 + val -= (uint64_t) mod->module_init_rw;
1316 + else if (in_core_rx(mod, val))
1317 + val -= (uint64_t) mod->module_core_rx;
1318 + else if (in_core_rw(mod, val))
1319 + val -= (uint64_t) mod->module_core_rw;
1320 break;
1321
1322 case RV_LTV:
1323 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1324 * addresses have been selected...
1325 */
1326 uint64_t gp;
1327 - if (mod->core_size > MAX_LTOFF)
1328 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1329 /*
1330 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1331 * at the end of the module.
1332 */
1333 - gp = mod->core_size - MAX_LTOFF / 2;
1334 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1335 else
1336 - gp = mod->core_size / 2;
1337 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1338 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1339 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1340 mod->arch.gp = gp;
1341 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1342 }
1343 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1344 index 609d500..7dde2a8 100644
1345 --- a/arch/ia64/kernel/sys_ia64.c
1346 +++ b/arch/ia64/kernel/sys_ia64.c
1347 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1348 if (REGION_NUMBER(addr) == RGN_HPAGE)
1349 addr = 0;
1350 #endif
1351 +
1352 +#ifdef CONFIG_PAX_RANDMMAP
1353 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1354 + addr = mm->free_area_cache;
1355 + else
1356 +#endif
1357 +
1358 if (!addr)
1359 addr = mm->free_area_cache;
1360
1361 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1362 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1363 /* At this point: (!vma || addr < vma->vm_end). */
1364 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1365 - if (start_addr != TASK_UNMAPPED_BASE) {
1366 + if (start_addr != mm->mmap_base) {
1367 /* Start a new search --- just in case we missed some holes. */
1368 - addr = TASK_UNMAPPED_BASE;
1369 + addr = mm->mmap_base;
1370 goto full_search;
1371 }
1372 return -ENOMEM;
1373 }
1374 - if (!vma || addr + len <= vma->vm_start) {
1375 + if (check_heap_stack_gap(vma, addr, len)) {
1376 /* Remember the address where we stopped this search: */
1377 mm->free_area_cache = addr + len;
1378 return addr;
1379 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1380 index 53c0ba0..2accdde 100644
1381 --- a/arch/ia64/kernel/vmlinux.lds.S
1382 +++ b/arch/ia64/kernel/vmlinux.lds.S
1383 @@ -199,7 +199,7 @@ SECTIONS {
1384 /* Per-cpu data: */
1385 . = ALIGN(PERCPU_PAGE_SIZE);
1386 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
1387 - __phys_per_cpu_start = __per_cpu_load;
1388 + __phys_per_cpu_start = per_cpu_load;
1389 /*
1390 * ensure percpu data fits
1391 * into percpu page size
1392 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1393 index 20b3593..1ce77f0 100644
1394 --- a/arch/ia64/mm/fault.c
1395 +++ b/arch/ia64/mm/fault.c
1396 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
1397 return pte_present(pte);
1398 }
1399
1400 +#ifdef CONFIG_PAX_PAGEEXEC
1401 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1402 +{
1403 + unsigned long i;
1404 +
1405 + printk(KERN_ERR "PAX: bytes at PC: ");
1406 + for (i = 0; i < 8; i++) {
1407 + unsigned int c;
1408 + if (get_user(c, (unsigned int *)pc+i))
1409 + printk(KERN_CONT "???????? ");
1410 + else
1411 + printk(KERN_CONT "%08x ", c);
1412 + }
1413 + printk("\n");
1414 +}
1415 +#endif
1416 +
1417 void __kprobes
1418 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1419 {
1420 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1421 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1422 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1423
1424 - if ((vma->vm_flags & mask) != mask)
1425 + if ((vma->vm_flags & mask) != mask) {
1426 +
1427 +#ifdef CONFIG_PAX_PAGEEXEC
1428 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1429 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1430 + goto bad_area;
1431 +
1432 + up_read(&mm->mmap_sem);
1433 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1434 + do_group_exit(SIGKILL);
1435 + }
1436 +#endif
1437 +
1438 goto bad_area;
1439
1440 + }
1441 +
1442 /*
1443 * If for any reason at all we couldn't handle the fault, make
1444 * sure we exit gracefully rather than endlessly redo the
1445 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1446 index 5ca674b..e0e1b70 100644
1447 --- a/arch/ia64/mm/hugetlbpage.c
1448 +++ b/arch/ia64/mm/hugetlbpage.c
1449 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1450 /* At this point: (!vmm || addr < vmm->vm_end). */
1451 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1452 return -ENOMEM;
1453 - if (!vmm || (addr + len) <= vmm->vm_start)
1454 + if (check_heap_stack_gap(vmm, addr, len))
1455 return addr;
1456 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1457 }
1458 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1459 index 00cb0e2..2ad8024 100644
1460 --- a/arch/ia64/mm/init.c
1461 +++ b/arch/ia64/mm/init.c
1462 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1463 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1464 vma->vm_end = vma->vm_start + PAGE_SIZE;
1465 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1466 +
1467 +#ifdef CONFIG_PAX_PAGEEXEC
1468 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1469 + vma->vm_flags &= ~VM_EXEC;
1470 +
1471 +#ifdef CONFIG_PAX_MPROTECT
1472 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1473 + vma->vm_flags &= ~VM_MAYEXEC;
1474 +#endif
1475 +
1476 + }
1477 +#endif
1478 +
1479 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1480 down_write(&current->mm->mmap_sem);
1481 if (insert_vm_struct(current->mm, vma)) {
1482 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1483 index 82abd15..d95ae5d 100644
1484 --- a/arch/m32r/lib/usercopy.c
1485 +++ b/arch/m32r/lib/usercopy.c
1486 @@ -14,6 +14,9 @@
1487 unsigned long
1488 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1489 {
1490 + if ((long)n < 0)
1491 + return n;
1492 +
1493 prefetch(from);
1494 if (access_ok(VERIFY_WRITE, to, n))
1495 __copy_user(to,from,n);
1496 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1497 unsigned long
1498 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1499 {
1500 + if ((long)n < 0)
1501 + return n;
1502 +
1503 prefetchw(to);
1504 if (access_ok(VERIFY_READ, from, n))
1505 __copy_user_zeroing(to,from,n);
1506 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1507 index 455c0ac..ad65fbe 100644
1508 --- a/arch/mips/include/asm/elf.h
1509 +++ b/arch/mips/include/asm/elf.h
1510 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1511 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1512 #endif
1513
1514 +#ifdef CONFIG_PAX_ASLR
1515 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1516 +
1517 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1518 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1519 +#endif
1520 +
1521 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1522 struct linux_binprm;
1523 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1524 int uses_interp);
1525
1526 -struct mm_struct;
1527 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1528 -#define arch_randomize_brk arch_randomize_brk
1529 -
1530 #endif /* _ASM_ELF_H */
1531 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1532 index e59cd1a..8e329d6 100644
1533 --- a/arch/mips/include/asm/page.h
1534 +++ b/arch/mips/include/asm/page.h
1535 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1536 #ifdef CONFIG_CPU_MIPS32
1537 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1538 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1539 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1540 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1541 #else
1542 typedef struct { unsigned long long pte; } pte_t;
1543 #define pte_val(x) ((x).pte)
1544 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1545 index 6018c80..7c37203 100644
1546 --- a/arch/mips/include/asm/system.h
1547 +++ b/arch/mips/include/asm/system.h
1548 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1549 */
1550 #define __ARCH_WANT_UNLOCKED_CTXSW
1551
1552 -extern unsigned long arch_align_stack(unsigned long sp);
1553 +#define arch_align_stack(x) ((x) & ~0xfUL)
1554
1555 #endif /* _ASM_SYSTEM_H */
1556 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1557 index 9fdd8bc..4bd7f1a 100644
1558 --- a/arch/mips/kernel/binfmt_elfn32.c
1559 +++ b/arch/mips/kernel/binfmt_elfn32.c
1560 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1561 #undef ELF_ET_DYN_BASE
1562 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1563
1564 +#ifdef CONFIG_PAX_ASLR
1565 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1566 +
1567 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1568 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1569 +#endif
1570 +
1571 #include <asm/processor.h>
1572 #include <linux/module.h>
1573 #include <linux/elfcore.h>
1574 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
1575 index ff44823..97f8906 100644
1576 --- a/arch/mips/kernel/binfmt_elfo32.c
1577 +++ b/arch/mips/kernel/binfmt_elfo32.c
1578 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1579 #undef ELF_ET_DYN_BASE
1580 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1581
1582 +#ifdef CONFIG_PAX_ASLR
1583 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1584 +
1585 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1586 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1587 +#endif
1588 +
1589 #include <asm/processor.h>
1590
1591 /*
1592 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
1593 index b30cb25..454c0a9 100644
1594 --- a/arch/mips/kernel/process.c
1595 +++ b/arch/mips/kernel/process.c
1596 @@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task)
1597 out:
1598 return pc;
1599 }
1600 -
1601 -/*
1602 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1603 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1604 - */
1605 -unsigned long arch_align_stack(unsigned long sp)
1606 -{
1607 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1608 - sp -= get_random_int() & ~PAGE_MASK;
1609 -
1610 - return sp & ALMASK;
1611 -}
1612 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
1613 index 937cf33..adb39bb 100644
1614 --- a/arch/mips/mm/fault.c
1615 +++ b/arch/mips/mm/fault.c
1616 @@ -28,6 +28,23 @@
1617 #include <asm/highmem.h> /* For VMALLOC_END */
1618 #include <linux/kdebug.h>
1619
1620 +#ifdef CONFIG_PAX_PAGEEXEC
1621 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1622 +{
1623 + unsigned long i;
1624 +
1625 + printk(KERN_ERR "PAX: bytes at PC: ");
1626 + for (i = 0; i < 5; i++) {
1627 + unsigned int c;
1628 + if (get_user(c, (unsigned int *)pc+i))
1629 + printk(KERN_CONT "???????? ");
1630 + else
1631 + printk(KERN_CONT "%08x ", c);
1632 + }
1633 + printk("\n");
1634 +}
1635 +#endif
1636 +
1637 /*
1638 * This routine handles page faults. It determines the address,
1639 * and the problem, and then passes it off to one of the appropriate
1640 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
1641 index 302d779..7d35bf8 100644
1642 --- a/arch/mips/mm/mmap.c
1643 +++ b/arch/mips/mm/mmap.c
1644 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1645 do_color_align = 1;
1646
1647 /* requesting a specific address */
1648 +
1649 +#ifdef CONFIG_PAX_RANDMMAP
1650 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1651 +#endif
1652 +
1653 if (addr) {
1654 if (do_color_align)
1655 addr = COLOUR_ALIGN(addr, pgoff);
1656 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1657 addr = PAGE_ALIGN(addr);
1658
1659 vma = find_vma(mm, addr);
1660 - if (TASK_SIZE - len >= addr &&
1661 - (!vma || addr + len <= vma->vm_start))
1662 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1663 return addr;
1664 }
1665
1666 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1667 /* At this point: (!vma || addr < vma->vm_end). */
1668 if (TASK_SIZE - len < addr)
1669 return -ENOMEM;
1670 - if (!vma || addr + len <= vma->vm_start)
1671 + if (check_heap_stack_gap(vmm, addr, len))
1672 return addr;
1673 addr = vma->vm_end;
1674 if (do_color_align)
1675 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1676 /* make sure it can fit in the remaining address space */
1677 if (likely(addr > len)) {
1678 vma = find_vma(mm, addr - len);
1679 - if (!vma || addr <= vma->vm_start) {
1680 + if (check_heap_stack_gap(vmm, addr - len, len))
1681 /* cache the address as a hint for next time */
1682 return mm->free_area_cache = addr - len;
1683 }
1684 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1685 * return with success:
1686 */
1687 vma = find_vma(mm, addr);
1688 - if (likely(!vma || addr + len <= vma->vm_start)) {
1689 + if (check_heap_stack_gap(vmm, addr, len)) {
1690 /* cache the address as a hint for next time */
1691 return mm->free_area_cache = addr;
1692 }
1693 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1694 mm->unmap_area = arch_unmap_area_topdown;
1695 }
1696 }
1697 -
1698 -static inline unsigned long brk_rnd(void)
1699 -{
1700 - unsigned long rnd = get_random_int();
1701 -
1702 - rnd = rnd << PAGE_SHIFT;
1703 - /* 8MB for 32bit, 256MB for 64bit */
1704 - if (TASK_IS_32BIT_ADDR)
1705 - rnd = rnd & 0x7ffffful;
1706 - else
1707 - rnd = rnd & 0xffffffful;
1708 -
1709 - return rnd;
1710 -}
1711 -
1712 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1713 -{
1714 - unsigned long base = mm->brk;
1715 - unsigned long ret;
1716 -
1717 - ret = PAGE_ALIGN(base + brk_rnd());
1718 -
1719 - if (ret < mm->brk)
1720 - return mm->brk;
1721 -
1722 - return ret;
1723 -}
1724 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
1725 index 19f6cb1..6c78cf2 100644
1726 --- a/arch/parisc/include/asm/elf.h
1727 +++ b/arch/parisc/include/asm/elf.h
1728 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
1729
1730 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1731
1732 +#ifdef CONFIG_PAX_ASLR
1733 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1734 +
1735 +#define PAX_DELTA_MMAP_LEN 16
1736 +#define PAX_DELTA_STACK_LEN 16
1737 +#endif
1738 +
1739 /* This yields a mask that user programs can use to figure out what
1740 instruction set this CPU supports. This could be done in user space,
1741 but it's not easy, and we've already done it here. */
1742 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
1743 index 22dadeb..f6c2be4 100644
1744 --- a/arch/parisc/include/asm/pgtable.h
1745 +++ b/arch/parisc/include/asm/pgtable.h
1746 @@ -210,6 +210,17 @@ struct vm_area_struct;
1747 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1748 #define PAGE_COPY PAGE_EXECREAD
1749 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1750 +
1751 +#ifdef CONFIG_PAX_PAGEEXEC
1752 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1753 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1754 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1755 +#else
1756 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1757 +# define PAGE_COPY_NOEXEC PAGE_COPY
1758 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1759 +#endif
1760 +
1761 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1762 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1763 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1764 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
1765 index 5e34ccf..672bc9c 100644
1766 --- a/arch/parisc/kernel/module.c
1767 +++ b/arch/parisc/kernel/module.c
1768 @@ -98,16 +98,38 @@
1769
1770 /* three functions to determine where in the module core
1771 * or init pieces the location is */
1772 +static inline int in_init_rx(struct module *me, void *loc)
1773 +{
1774 + return (loc >= me->module_init_rx &&
1775 + loc < (me->module_init_rx + me->init_size_rx));
1776 +}
1777 +
1778 +static inline int in_init_rw(struct module *me, void *loc)
1779 +{
1780 + return (loc >= me->module_init_rw &&
1781 + loc < (me->module_init_rw + me->init_size_rw));
1782 +}
1783 +
1784 static inline int in_init(struct module *me, void *loc)
1785 {
1786 - return (loc >= me->module_init &&
1787 - loc <= (me->module_init + me->init_size));
1788 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1789 +}
1790 +
1791 +static inline int in_core_rx(struct module *me, void *loc)
1792 +{
1793 + return (loc >= me->module_core_rx &&
1794 + loc < (me->module_core_rx + me->core_size_rx));
1795 +}
1796 +
1797 +static inline int in_core_rw(struct module *me, void *loc)
1798 +{
1799 + return (loc >= me->module_core_rw &&
1800 + loc < (me->module_core_rw + me->core_size_rw));
1801 }
1802
1803 static inline int in_core(struct module *me, void *loc)
1804 {
1805 - return (loc >= me->module_core &&
1806 - loc <= (me->module_core + me->core_size));
1807 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1808 }
1809
1810 static inline int in_local(struct module *me, void *loc)
1811 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
1812 }
1813
1814 /* align things a bit */
1815 - me->core_size = ALIGN(me->core_size, 16);
1816 - me->arch.got_offset = me->core_size;
1817 - me->core_size += gots * sizeof(struct got_entry);
1818 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1819 + me->arch.got_offset = me->core_size_rw;
1820 + me->core_size_rw += gots * sizeof(struct got_entry);
1821
1822 - me->core_size = ALIGN(me->core_size, 16);
1823 - me->arch.fdesc_offset = me->core_size;
1824 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1825 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1826 + me->arch.fdesc_offset = me->core_size_rw;
1827 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1828
1829 me->arch.got_max = gots;
1830 me->arch.fdesc_max = fdescs;
1831 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1832
1833 BUG_ON(value == 0);
1834
1835 - got = me->module_core + me->arch.got_offset;
1836 + got = me->module_core_rw + me->arch.got_offset;
1837 for (i = 0; got[i].addr; i++)
1838 if (got[i].addr == value)
1839 goto out;
1840 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1841 #ifdef CONFIG_64BIT
1842 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1843 {
1844 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1845 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1846
1847 if (!value) {
1848 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1849 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1850
1851 /* Create new one */
1852 fdesc->addr = value;
1853 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1854 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1855 return (Elf_Addr)fdesc;
1856 }
1857 #endif /* CONFIG_64BIT */
1858 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
1859
1860 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1861 end = table + sechdrs[me->arch.unwind_section].sh_size;
1862 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1863 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1864
1865 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1866 me->arch.unwind_section, table, end, gp);
1867 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
1868 index c9b9322..02d8940 100644
1869 --- a/arch/parisc/kernel/sys_parisc.c
1870 +++ b/arch/parisc/kernel/sys_parisc.c
1871 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
1872 /* At this point: (!vma || addr < vma->vm_end). */
1873 if (TASK_SIZE - len < addr)
1874 return -ENOMEM;
1875 - if (!vma || addr + len <= vma->vm_start)
1876 + if (check_heap_stack_gap(vma, addr, len))
1877 return addr;
1878 addr = vma->vm_end;
1879 }
1880 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
1881 /* At this point: (!vma || addr < vma->vm_end). */
1882 if (TASK_SIZE - len < addr)
1883 return -ENOMEM;
1884 - if (!vma || addr + len <= vma->vm_start)
1885 + if (check_heap_stack_gap(vma, addr, len))
1886 return addr;
1887 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1888 if (addr < vma->vm_end) /* handle wraparound */
1889 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
1890 if (flags & MAP_FIXED)
1891 return addr;
1892 if (!addr)
1893 - addr = TASK_UNMAPPED_BASE;
1894 + addr = current->mm->mmap_base;
1895
1896 if (filp) {
1897 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1898 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
1899 index f19e660..414fe24 100644
1900 --- a/arch/parisc/kernel/traps.c
1901 +++ b/arch/parisc/kernel/traps.c
1902 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
1903
1904 down_read(&current->mm->mmap_sem);
1905 vma = find_vma(current->mm,regs->iaoq[0]);
1906 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1907 - && (vma->vm_flags & VM_EXEC)) {
1908 -
1909 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1910 fault_address = regs->iaoq[0];
1911 fault_space = regs->iasq[0];
1912
1913 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
1914 index 18162ce..94de376 100644
1915 --- a/arch/parisc/mm/fault.c
1916 +++ b/arch/parisc/mm/fault.c
1917 @@ -15,6 +15,7 @@
1918 #include <linux/sched.h>
1919 #include <linux/interrupt.h>
1920 #include <linux/module.h>
1921 +#include <linux/unistd.h>
1922
1923 #include <asm/uaccess.h>
1924 #include <asm/traps.h>
1925 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
1926 static unsigned long
1927 parisc_acctyp(unsigned long code, unsigned int inst)
1928 {
1929 - if (code == 6 || code == 16)
1930 + if (code == 6 || code == 7 || code == 16)
1931 return VM_EXEC;
1932
1933 switch (inst & 0xf0000000) {
1934 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
1935 }
1936 #endif
1937
1938 +#ifdef CONFIG_PAX_PAGEEXEC
1939 +/*
1940 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1941 + *
1942 + * returns 1 when task should be killed
1943 + * 2 when rt_sigreturn trampoline was detected
1944 + * 3 when unpatched PLT trampoline was detected
1945 + */
1946 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1947 +{
1948 +
1949 +#ifdef CONFIG_PAX_EMUPLT
1950 + int err;
1951 +
1952 + do { /* PaX: unpatched PLT emulation */
1953 + unsigned int bl, depwi;
1954 +
1955 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1956 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1957 +
1958 + if (err)
1959 + break;
1960 +
1961 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1962 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1963 +
1964 + err = get_user(ldw, (unsigned int *)addr);
1965 + err |= get_user(bv, (unsigned int *)(addr+4));
1966 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1967 +
1968 + if (err)
1969 + break;
1970 +
1971 + if (ldw == 0x0E801096U &&
1972 + bv == 0xEAC0C000U &&
1973 + ldw2 == 0x0E881095U)
1974 + {
1975 + unsigned int resolver, map;
1976 +
1977 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1978 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1979 + if (err)
1980 + break;
1981 +
1982 + regs->gr[20] = instruction_pointer(regs)+8;
1983 + regs->gr[21] = map;
1984 + regs->gr[22] = resolver;
1985 + regs->iaoq[0] = resolver | 3UL;
1986 + regs->iaoq[1] = regs->iaoq[0] + 4;
1987 + return 3;
1988 + }
1989 + }
1990 + } while (0);
1991 +#endif
1992 +
1993 +#ifdef CONFIG_PAX_EMUTRAMP
1994 +
1995 +#ifndef CONFIG_PAX_EMUSIGRT
1996 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1997 + return 1;
1998 +#endif
1999 +
2000 + do { /* PaX: rt_sigreturn emulation */
2001 + unsigned int ldi1, ldi2, bel, nop;
2002 +
2003 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2004 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2005 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2006 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2007 +
2008 + if (err)
2009 + break;
2010 +
2011 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2012 + ldi2 == 0x3414015AU &&
2013 + bel == 0xE4008200U &&
2014 + nop == 0x08000240U)
2015 + {
2016 + regs->gr[25] = (ldi1 & 2) >> 1;
2017 + regs->gr[20] = __NR_rt_sigreturn;
2018 + regs->gr[31] = regs->iaoq[1] + 16;
2019 + regs->sr[0] = regs->iasq[1];
2020 + regs->iaoq[0] = 0x100UL;
2021 + regs->iaoq[1] = regs->iaoq[0] + 4;
2022 + regs->iasq[0] = regs->sr[2];
2023 + regs->iasq[1] = regs->sr[2];
2024 + return 2;
2025 + }
2026 + } while (0);
2027 +#endif
2028 +
2029 + return 1;
2030 +}
2031 +
2032 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2033 +{
2034 + unsigned long i;
2035 +
2036 + printk(KERN_ERR "PAX: bytes at PC: ");
2037 + for (i = 0; i < 5; i++) {
2038 + unsigned int c;
2039 + if (get_user(c, (unsigned int *)pc+i))
2040 + printk(KERN_CONT "???????? ");
2041 + else
2042 + printk(KERN_CONT "%08x ", c);
2043 + }
2044 + printk("\n");
2045 +}
2046 +#endif
2047 +
2048 int fixup_exception(struct pt_regs *regs)
2049 {
2050 const struct exception_table_entry *fix;
2051 @@ -192,8 +303,33 @@ good_area:
2052
2053 acc_type = parisc_acctyp(code,regs->iir);
2054
2055 - if ((vma->vm_flags & acc_type) != acc_type)
2056 + if ((vma->vm_flags & acc_type) != acc_type) {
2057 +
2058 +#ifdef CONFIG_PAX_PAGEEXEC
2059 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2060 + (address & ~3UL) == instruction_pointer(regs))
2061 + {
2062 + up_read(&mm->mmap_sem);
2063 + switch (pax_handle_fetch_fault(regs)) {
2064 +
2065 +#ifdef CONFIG_PAX_EMUPLT
2066 + case 3:
2067 + return;
2068 +#endif
2069 +
2070 +#ifdef CONFIG_PAX_EMUTRAMP
2071 + case 2:
2072 + return;
2073 +#endif
2074 +
2075 + }
2076 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2077 + do_group_exit(SIGKILL);
2078 + }
2079 +#endif
2080 +
2081 goto bad_area;
2082 + }
2083
2084 /*
2085 * If for any reason at all we couldn't handle the fault, make
2086 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2087 index 3bf9cca..e7457d0 100644
2088 --- a/arch/powerpc/include/asm/elf.h
2089 +++ b/arch/powerpc/include/asm/elf.h
2090 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2091 the loader. We need to make sure that it is out of the way of the program
2092 that it will "exec", and that there is sufficient room for the brk. */
2093
2094 -extern unsigned long randomize_et_dyn(unsigned long base);
2095 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2096 +#define ELF_ET_DYN_BASE (0x20000000)
2097 +
2098 +#ifdef CONFIG_PAX_ASLR
2099 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2100 +
2101 +#ifdef __powerpc64__
2102 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
2103 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
2104 +#else
2105 +#define PAX_DELTA_MMAP_LEN 15
2106 +#define PAX_DELTA_STACK_LEN 15
2107 +#endif
2108 +#endif
2109
2110 /*
2111 * Our registers are always unsigned longs, whether we're a 32 bit
2112 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2113 (0x7ff >> (PAGE_SHIFT - 12)) : \
2114 (0x3ffff >> (PAGE_SHIFT - 12)))
2115
2116 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2117 -#define arch_randomize_brk arch_randomize_brk
2118 -
2119 #endif /* __KERNEL__ */
2120
2121 /*
2122 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2123 index bca8fdc..61e9580 100644
2124 --- a/arch/powerpc/include/asm/kmap_types.h
2125 +++ b/arch/powerpc/include/asm/kmap_types.h
2126 @@ -27,6 +27,7 @@ enum km_type {
2127 KM_PPC_SYNC_PAGE,
2128 KM_PPC_SYNC_ICACHE,
2129 KM_KDB,
2130 + KM_CLEARPAGE,
2131 KM_TYPE_NR
2132 };
2133
2134 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
2135 index d4a7f64..451de1c 100644
2136 --- a/arch/powerpc/include/asm/mman.h
2137 +++ b/arch/powerpc/include/asm/mman.h
2138 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
2139 }
2140 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
2141
2142 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
2143 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
2144 {
2145 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
2146 }
2147 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2148 index 2cd664e..1d2e8a7 100644
2149 --- a/arch/powerpc/include/asm/page.h
2150 +++ b/arch/powerpc/include/asm/page.h
2151 @@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
2152 * and needs to be executable. This means the whole heap ends
2153 * up being executable.
2154 */
2155 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2156 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2157 +#define VM_DATA_DEFAULT_FLAGS32 \
2158 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2159 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2160
2161 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2162 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2163 @@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
2164 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2165 #endif
2166
2167 +#define ktla_ktva(addr) (addr)
2168 +#define ktva_ktla(addr) (addr)
2169 +
2170 #ifndef __ASSEMBLY__
2171
2172 #undef STRICT_MM_TYPECHECKS
2173 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2174 index 9356262..ea96148 100644
2175 --- a/arch/powerpc/include/asm/page_64.h
2176 +++ b/arch/powerpc/include/asm/page_64.h
2177 @@ -155,15 +155,18 @@ do { \
2178 * stack by default, so in the absence of a PT_GNU_STACK program header
2179 * we turn execute permission off.
2180 */
2181 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2182 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2183 +#define VM_STACK_DEFAULT_FLAGS32 \
2184 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2185 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2186
2187 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2188 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2189
2190 +#ifndef CONFIG_PAX_PAGEEXEC
2191 #define VM_STACK_DEFAULT_FLAGS \
2192 (is_32bit_task() ? \
2193 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2194 +#endif
2195
2196 #include <asm-generic/getorder.h>
2197
2198 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2199 index 88b0bd9..e32bc67 100644
2200 --- a/arch/powerpc/include/asm/pgtable.h
2201 +++ b/arch/powerpc/include/asm/pgtable.h
2202 @@ -2,6 +2,7 @@
2203 #define _ASM_POWERPC_PGTABLE_H
2204 #ifdef __KERNEL__
2205
2206 +#include <linux/const.h>
2207 #ifndef __ASSEMBLY__
2208 #include <asm/processor.h> /* For TASK_SIZE */
2209 #include <asm/mmu.h>
2210 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2211 index 4aad413..85d86bf 100644
2212 --- a/arch/powerpc/include/asm/pte-hash32.h
2213 +++ b/arch/powerpc/include/asm/pte-hash32.h
2214 @@ -21,6 +21,7 @@
2215 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2216 #define _PAGE_USER 0x004 /* usermode access allowed */
2217 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2218 +#define _PAGE_EXEC _PAGE_GUARDED
2219 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2220 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2221 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2222 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2223 index 559da19..7e5835c 100644
2224 --- a/arch/powerpc/include/asm/reg.h
2225 +++ b/arch/powerpc/include/asm/reg.h
2226 @@ -212,6 +212,7 @@
2227 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2228 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2229 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2230 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2231 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2232 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2233 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2234 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2235 index e30a13d..2b7d994 100644
2236 --- a/arch/powerpc/include/asm/system.h
2237 +++ b/arch/powerpc/include/asm/system.h
2238 @@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2239 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2240 #endif
2241
2242 -extern unsigned long arch_align_stack(unsigned long sp);
2243 +#define arch_align_stack(x) ((x) & ~0xfUL)
2244
2245 /* Used in very early kernel initialization. */
2246 extern unsigned long reloc_offset(void);
2247 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2248 index bd0fb84..a42a14b 100644
2249 --- a/arch/powerpc/include/asm/uaccess.h
2250 +++ b/arch/powerpc/include/asm/uaccess.h
2251 @@ -13,6 +13,8 @@
2252 #define VERIFY_READ 0
2253 #define VERIFY_WRITE 1
2254
2255 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2256 +
2257 /*
2258 * The fs value determines whether argument validity checking should be
2259 * performed or not. If get_fs() == USER_DS, checking is performed, with
2260 @@ -327,52 +329,6 @@ do { \
2261 extern unsigned long __copy_tofrom_user(void __user *to,
2262 const void __user *from, unsigned long size);
2263
2264 -#ifndef __powerpc64__
2265 -
2266 -static inline unsigned long copy_from_user(void *to,
2267 - const void __user *from, unsigned long n)
2268 -{
2269 - unsigned long over;
2270 -
2271 - if (access_ok(VERIFY_READ, from, n))
2272 - return __copy_tofrom_user((__force void __user *)to, from, n);
2273 - if ((unsigned long)from < TASK_SIZE) {
2274 - over = (unsigned long)from + n - TASK_SIZE;
2275 - return __copy_tofrom_user((__force void __user *)to, from,
2276 - n - over) + over;
2277 - }
2278 - return n;
2279 -}
2280 -
2281 -static inline unsigned long copy_to_user(void __user *to,
2282 - const void *from, unsigned long n)
2283 -{
2284 - unsigned long over;
2285 -
2286 - if (access_ok(VERIFY_WRITE, to, n))
2287 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2288 - if ((unsigned long)to < TASK_SIZE) {
2289 - over = (unsigned long)to + n - TASK_SIZE;
2290 - return __copy_tofrom_user(to, (__force void __user *)from,
2291 - n - over) + over;
2292 - }
2293 - return n;
2294 -}
2295 -
2296 -#else /* __powerpc64__ */
2297 -
2298 -#define __copy_in_user(to, from, size) \
2299 - __copy_tofrom_user((to), (from), (size))
2300 -
2301 -extern unsigned long copy_from_user(void *to, const void __user *from,
2302 - unsigned long n);
2303 -extern unsigned long copy_to_user(void __user *to, const void *from,
2304 - unsigned long n);
2305 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2306 - unsigned long n);
2307 -
2308 -#endif /* __powerpc64__ */
2309 -
2310 static inline unsigned long __copy_from_user_inatomic(void *to,
2311 const void __user *from, unsigned long n)
2312 {
2313 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2314 if (ret == 0)
2315 return 0;
2316 }
2317 +
2318 + if (!__builtin_constant_p(n))
2319 + check_object_size(to, n, false);
2320 +
2321 return __copy_tofrom_user((__force void __user *)to, from, n);
2322 }
2323
2324 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2325 if (ret == 0)
2326 return 0;
2327 }
2328 +
2329 + if (!__builtin_constant_p(n))
2330 + check_object_size(from, n, true);
2331 +
2332 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2333 }
2334
2335 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2336 return __copy_to_user_inatomic(to, from, size);
2337 }
2338
2339 +#ifndef __powerpc64__
2340 +
2341 +static inline unsigned long __must_check copy_from_user(void *to,
2342 + const void __user *from, unsigned long n)
2343 +{
2344 + unsigned long over;
2345 +
2346 + if ((long)n < 0)
2347 + return n;
2348 +
2349 + if (access_ok(VERIFY_READ, from, n)) {
2350 + if (!__builtin_constant_p(n))
2351 + check_object_size(to, n, false);
2352 + return __copy_tofrom_user((__force void __user *)to, from, n);
2353 + }
2354 + if ((unsigned long)from < TASK_SIZE) {
2355 + over = (unsigned long)from + n - TASK_SIZE;
2356 + if (!__builtin_constant_p(n - over))
2357 + check_object_size(to, n - over, false);
2358 + return __copy_tofrom_user((__force void __user *)to, from,
2359 + n - over) + over;
2360 + }
2361 + return n;
2362 +}
2363 +
2364 +static inline unsigned long __must_check copy_to_user(void __user *to,
2365 + const void *from, unsigned long n)
2366 +{
2367 + unsigned long over;
2368 +
2369 + if ((long)n < 0)
2370 + return n;
2371 +
2372 + if (access_ok(VERIFY_WRITE, to, n)) {
2373 + if (!__builtin_constant_p(n))
2374 + check_object_size(from, n, true);
2375 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2376 + }
2377 + if ((unsigned long)to < TASK_SIZE) {
2378 + over = (unsigned long)to + n - TASK_SIZE;
2379 + if (!__builtin_constant_p(n))
2380 + check_object_size(from, n - over, true);
2381 + return __copy_tofrom_user(to, (__force void __user *)from,
2382 + n - over) + over;
2383 + }
2384 + return n;
2385 +}
2386 +
2387 +#else /* __powerpc64__ */
2388 +
2389 +#define __copy_in_user(to, from, size) \
2390 + __copy_tofrom_user((to), (from), (size))
2391 +
2392 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2393 +{
2394 + if ((long)n < 0 || n > INT_MAX)
2395 + return n;
2396 +
2397 + if (!__builtin_constant_p(n))
2398 + check_object_size(to, n, false);
2399 +
2400 + if (likely(access_ok(VERIFY_READ, from, n)))
2401 + n = __copy_from_user(to, from, n);
2402 + else
2403 + memset(to, 0, n);
2404 + return n;
2405 +}
2406 +
2407 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2408 +{
2409 + if ((long)n < 0 || n > INT_MAX)
2410 + return n;
2411 +
2412 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2413 + if (!__builtin_constant_p(n))
2414 + check_object_size(from, n, true);
2415 + n = __copy_to_user(to, from, n);
2416 + }
2417 + return n;
2418 +}
2419 +
2420 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2421 + unsigned long n);
2422 +
2423 +#endif /* __powerpc64__ */
2424 +
2425 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2426
2427 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2428 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
2429 index 429983c..7af363b 100644
2430 --- a/arch/powerpc/kernel/exceptions-64e.S
2431 +++ b/arch/powerpc/kernel/exceptions-64e.S
2432 @@ -587,6 +587,7 @@ storage_fault_common:
2433 std r14,_DAR(r1)
2434 std r15,_DSISR(r1)
2435 addi r3,r1,STACK_FRAME_OVERHEAD
2436 + bl .save_nvgprs
2437 mr r4,r14
2438 mr r5,r15
2439 ld r14,PACA_EXGEN+EX_R14(r13)
2440 @@ -596,8 +597,7 @@ storage_fault_common:
2441 cmpdi r3,0
2442 bne- 1f
2443 b .ret_from_except_lite
2444 -1: bl .save_nvgprs
2445 - mr r5,r3
2446 +1: mr r5,r3
2447 addi r3,r1,STACK_FRAME_OVERHEAD
2448 ld r4,_DAR(r1)
2449 bl .bad_page_fault
2450 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
2451 index 41b02c7..05e76fb 100644
2452 --- a/arch/powerpc/kernel/exceptions-64s.S
2453 +++ b/arch/powerpc/kernel/exceptions-64s.S
2454 @@ -1014,10 +1014,10 @@ handle_page_fault:
2455 11: ld r4,_DAR(r1)
2456 ld r5,_DSISR(r1)
2457 addi r3,r1,STACK_FRAME_OVERHEAD
2458 + bl .save_nvgprs
2459 bl .do_page_fault
2460 cmpdi r3,0
2461 beq+ 13f
2462 - bl .save_nvgprs
2463 mr r5,r3
2464 addi r3,r1,STACK_FRAME_OVERHEAD
2465 lwz r4,_DAR(r1)
2466 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
2467 index 0b6d796..d760ddb 100644
2468 --- a/arch/powerpc/kernel/module_32.c
2469 +++ b/arch/powerpc/kernel/module_32.c
2470 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
2471 me->arch.core_plt_section = i;
2472 }
2473 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2474 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2475 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2476 return -ENOEXEC;
2477 }
2478
2479 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
2480
2481 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2482 /* Init, or core PLT? */
2483 - if (location >= mod->module_core
2484 - && location < mod->module_core + mod->core_size)
2485 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2486 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2487 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2488 - else
2489 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2490 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2491 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2492 + else {
2493 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2494 + return ~0UL;
2495 + }
2496
2497 /* Find this entry, or if that fails, the next avail. entry */
2498 while (entry->jump[0]) {
2499 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
2500 index 8f53954..a704ad6 100644
2501 --- a/arch/powerpc/kernel/process.c
2502 +++ b/arch/powerpc/kernel/process.c
2503 @@ -682,8 +682,8 @@ void show_regs(struct pt_regs * regs)
2504 * Lookup NIP late so we have the best change of getting the
2505 * above info out without failing
2506 */
2507 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2508 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2509 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2510 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2511 #endif
2512 show_stack(current, (unsigned long *) regs->gpr[1]);
2513 if (!user_mode(regs))
2514 @@ -1187,10 +1187,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2515 newsp = stack[0];
2516 ip = stack[STACK_FRAME_LR_SAVE];
2517 if (!firstframe || ip != lr) {
2518 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2519 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2520 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2521 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2522 - printk(" (%pS)",
2523 + printk(" (%pA)",
2524 (void *)current->ret_stack[curr_frame].ret);
2525 curr_frame--;
2526 }
2527 @@ -1210,7 +1210,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2528 struct pt_regs *regs = (struct pt_regs *)
2529 (sp + STACK_FRAME_OVERHEAD);
2530 lr = regs->link;
2531 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2532 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2533 regs->trap, (void *)regs->nip, (void *)lr);
2534 firstframe = 1;
2535 }
2536 @@ -1285,58 +1285,3 @@ void thread_info_cache_init(void)
2537 }
2538
2539 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2540 -
2541 -unsigned long arch_align_stack(unsigned long sp)
2542 -{
2543 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2544 - sp -= get_random_int() & ~PAGE_MASK;
2545 - return sp & ~0xf;
2546 -}
2547 -
2548 -static inline unsigned long brk_rnd(void)
2549 -{
2550 - unsigned long rnd = 0;
2551 -
2552 - /* 8MB for 32bit, 1GB for 64bit */
2553 - if (is_32bit_task())
2554 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2555 - else
2556 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2557 -
2558 - return rnd << PAGE_SHIFT;
2559 -}
2560 -
2561 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2562 -{
2563 - unsigned long base = mm->brk;
2564 - unsigned long ret;
2565 -
2566 -#ifdef CONFIG_PPC_STD_MMU_64
2567 - /*
2568 - * If we are using 1TB segments and we are allowed to randomise
2569 - * the heap, we can put it above 1TB so it is backed by a 1TB
2570 - * segment. Otherwise the heap will be in the bottom 1TB
2571 - * which always uses 256MB segments and this may result in a
2572 - * performance penalty.
2573 - */
2574 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2575 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2576 -#endif
2577 -
2578 - ret = PAGE_ALIGN(base + brk_rnd());
2579 -
2580 - if (ret < mm->brk)
2581 - return mm->brk;
2582 -
2583 - return ret;
2584 -}
2585 -
2586 -unsigned long randomize_et_dyn(unsigned long base)
2587 -{
2588 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2589 -
2590 - if (ret < base)
2591 - return base;
2592 -
2593 - return ret;
2594 -}
2595 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
2596 index 78b76dc..7f232ef 100644
2597 --- a/arch/powerpc/kernel/signal_32.c
2598 +++ b/arch/powerpc/kernel/signal_32.c
2599 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
2600 /* Save user registers on the stack */
2601 frame = &rt_sf->uc.uc_mcontext;
2602 addr = frame;
2603 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2604 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2605 if (save_user_regs(regs, frame, 0, 1))
2606 goto badframe;
2607 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2608 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
2609 index e91c736..742ec06 100644
2610 --- a/arch/powerpc/kernel/signal_64.c
2611 +++ b/arch/powerpc/kernel/signal_64.c
2612 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
2613 current->thread.fpscr.val = 0;
2614
2615 /* Set up to return from userspace. */
2616 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2617 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2618 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2619 } else {
2620 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2621 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
2622 index f19d977..8ac286e 100644
2623 --- a/arch/powerpc/kernel/traps.c
2624 +++ b/arch/powerpc/kernel/traps.c
2625 @@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2626 static inline void pmac_backlight_unblank(void) { }
2627 #endif
2628
2629 +extern void gr_handle_kernel_exploit(void);
2630 +
2631 int die(const char *str, struct pt_regs *regs, long err)
2632 {
2633 static struct {
2634 @@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
2635 if (panic_on_oops)
2636 panic("Fatal exception");
2637
2638 + gr_handle_kernel_exploit();
2639 +
2640 oops_exit();
2641 do_exit(err);
2642
2643 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
2644 index 142ab10..236e61a 100644
2645 --- a/arch/powerpc/kernel/vdso.c
2646 +++ b/arch/powerpc/kernel/vdso.c
2647 @@ -36,6 +36,7 @@
2648 #include <asm/firmware.h>
2649 #include <asm/vdso.h>
2650 #include <asm/vdso_datapage.h>
2651 +#include <asm/mman.h>
2652
2653 #include "setup.h"
2654
2655 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2656 vdso_base = VDSO32_MBASE;
2657 #endif
2658
2659 - current->mm->context.vdso_base = 0;
2660 + current->mm->context.vdso_base = ~0UL;
2661
2662 /* vDSO has a problem and was disabled, just don't "enable" it for the
2663 * process
2664 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2665 vdso_base = get_unmapped_area(NULL, vdso_base,
2666 (vdso_pages << PAGE_SHIFT) +
2667 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2668 - 0, 0);
2669 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2670 if (IS_ERR_VALUE(vdso_base)) {
2671 rc = vdso_base;
2672 goto fail_mmapsem;
2673 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
2674 index 5eea6f3..5d10396 100644
2675 --- a/arch/powerpc/lib/usercopy_64.c
2676 +++ b/arch/powerpc/lib/usercopy_64.c
2677 @@ -9,22 +9,6 @@
2678 #include <linux/module.h>
2679 #include <asm/uaccess.h>
2680
2681 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2682 -{
2683 - if (likely(access_ok(VERIFY_READ, from, n)))
2684 - n = __copy_from_user(to, from, n);
2685 - else
2686 - memset(to, 0, n);
2687 - return n;
2688 -}
2689 -
2690 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2691 -{
2692 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2693 - n = __copy_to_user(to, from, n);
2694 - return n;
2695 -}
2696 -
2697 unsigned long copy_in_user(void __user *to, const void __user *from,
2698 unsigned long n)
2699 {
2700 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
2701 return n;
2702 }
2703
2704 -EXPORT_SYMBOL(copy_from_user);
2705 -EXPORT_SYMBOL(copy_to_user);
2706 EXPORT_SYMBOL(copy_in_user);
2707
2708 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
2709 index 5efe8c9..db9ceef 100644
2710 --- a/arch/powerpc/mm/fault.c
2711 +++ b/arch/powerpc/mm/fault.c
2712 @@ -32,6 +32,10 @@
2713 #include <linux/perf_event.h>
2714 #include <linux/magic.h>
2715 #include <linux/ratelimit.h>
2716 +#include <linux/slab.h>
2717 +#include <linux/pagemap.h>
2718 +#include <linux/compiler.h>
2719 +#include <linux/unistd.h>
2720
2721 #include <asm/firmware.h>
2722 #include <asm/page.h>
2723 @@ -43,6 +47,7 @@
2724 #include <asm/tlbflush.h>
2725 #include <asm/siginfo.h>
2726 #include <mm/mmu_decl.h>
2727 +#include <asm/ptrace.h>
2728
2729 #ifdef CONFIG_KPROBES
2730 static inline int notify_page_fault(struct pt_regs *regs)
2731 @@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
2732 }
2733 #endif
2734
2735 +#ifdef CONFIG_PAX_PAGEEXEC
2736 +/*
2737 + * PaX: decide what to do with offenders (regs->nip = fault address)
2738 + *
2739 + * returns 1 when task should be killed
2740 + */
2741 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2742 +{
2743 + return 1;
2744 +}
2745 +
2746 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2747 +{
2748 + unsigned long i;
2749 +
2750 + printk(KERN_ERR "PAX: bytes at PC: ");
2751 + for (i = 0; i < 5; i++) {
2752 + unsigned int c;
2753 + if (get_user(c, (unsigned int __user *)pc+i))
2754 + printk(KERN_CONT "???????? ");
2755 + else
2756 + printk(KERN_CONT "%08x ", c);
2757 + }
2758 + printk("\n");
2759 +}
2760 +#endif
2761 +
2762 /*
2763 * Check whether the instruction at regs->nip is a store using
2764 * an update addressing form which will update r1.
2765 @@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
2766 * indicate errors in DSISR but can validly be set in SRR1.
2767 */
2768 if (trap == 0x400)
2769 - error_code &= 0x48200000;
2770 + error_code &= 0x58200000;
2771 else
2772 is_write = error_code & DSISR_ISSTORE;
2773 #else
2774 @@ -259,7 +291,7 @@ good_area:
2775 * "undefined". Of those that can be set, this is the only
2776 * one which seems bad.
2777 */
2778 - if (error_code & 0x10000000)
2779 + if (error_code & DSISR_GUARDED)
2780 /* Guarded storage error. */
2781 goto bad_area;
2782 #endif /* CONFIG_8xx */
2783 @@ -274,7 +306,7 @@ good_area:
2784 * processors use the same I/D cache coherency mechanism
2785 * as embedded.
2786 */
2787 - if (error_code & DSISR_PROTFAULT)
2788 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2789 goto bad_area;
2790 #endif /* CONFIG_PPC_STD_MMU */
2791
2792 @@ -343,6 +375,23 @@ bad_area:
2793 bad_area_nosemaphore:
2794 /* User mode accesses cause a SIGSEGV */
2795 if (user_mode(regs)) {
2796 +
2797 +#ifdef CONFIG_PAX_PAGEEXEC
2798 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2799 +#ifdef CONFIG_PPC_STD_MMU
2800 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2801 +#else
2802 + if (is_exec && regs->nip == address) {
2803 +#endif
2804 + switch (pax_handle_fetch_fault(regs)) {
2805 + }
2806 +
2807 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2808 + do_group_exit(SIGKILL);
2809 + }
2810 + }
2811 +#endif
2812 +
2813 _exception(SIGSEGV, regs, code, address);
2814 return 0;
2815 }
2816 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
2817 index 5a783d8..c23e14b 100644
2818 --- a/arch/powerpc/mm/mmap_64.c
2819 +++ b/arch/powerpc/mm/mmap_64.c
2820 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2821 */
2822 if (mmap_is_legacy()) {
2823 mm->mmap_base = TASK_UNMAPPED_BASE;
2824 +
2825 +#ifdef CONFIG_PAX_RANDMMAP
2826 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2827 + mm->mmap_base += mm->delta_mmap;
2828 +#endif
2829 +
2830 mm->get_unmapped_area = arch_get_unmapped_area;
2831 mm->unmap_area = arch_unmap_area;
2832 } else {
2833 mm->mmap_base = mmap_base();
2834 +
2835 +#ifdef CONFIG_PAX_RANDMMAP
2836 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2837 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2838 +#endif
2839 +
2840 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2841 mm->unmap_area = arch_unmap_area_topdown;
2842 }
2843 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
2844 index ba51948..23009d9 100644
2845 --- a/arch/powerpc/mm/slice.c
2846 +++ b/arch/powerpc/mm/slice.c
2847 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
2848 if ((mm->task_size - len) < addr)
2849 return 0;
2850 vma = find_vma(mm, addr);
2851 - return (!vma || (addr + len) <= vma->vm_start);
2852 + return check_heap_stack_gap(vma, addr, len);
2853 }
2854
2855 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2856 @@ -256,7 +256,7 @@ full_search:
2857 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2858 continue;
2859 }
2860 - if (!vma || addr + len <= vma->vm_start) {
2861 + if (check_heap_stack_gap(vma, addr, len)) {
2862 /*
2863 * Remember the place where we stopped the search:
2864 */
2865 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2866 }
2867 }
2868
2869 - addr = mm->mmap_base;
2870 - while (addr > len) {
2871 + if (mm->mmap_base < len)
2872 + addr = -ENOMEM;
2873 + else
2874 + addr = mm->mmap_base - len;
2875 +
2876 + while (!IS_ERR_VALUE(addr)) {
2877 /* Go down by chunk size */
2878 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2879 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2880
2881 /* Check for hit with different page size */
2882 mask = slice_range_to_mask(addr, len);
2883 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2884 * return with success:
2885 */
2886 vma = find_vma(mm, addr);
2887 - if (!vma || (addr + len) <= vma->vm_start) {
2888 + if (check_heap_stack_gap(vma, addr, len)) {
2889 /* remember the address as a hint for next time */
2890 if (use_cache)
2891 mm->free_area_cache = addr;
2892 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2893 mm->cached_hole_size = vma->vm_start - addr;
2894
2895 /* try just below the current vma->vm_start */
2896 - addr = vma->vm_start;
2897 + addr = skip_heap_stack_gap(vma, len);
2898 }
2899
2900 /*
2901 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
2902 if (fixed && addr > (mm->task_size - len))
2903 return -EINVAL;
2904
2905 +#ifdef CONFIG_PAX_RANDMMAP
2906 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2907 + addr = 0;
2908 +#endif
2909 +
2910 /* If hint, make sure it matches our alignment restrictions */
2911 if (!fixed && addr) {
2912 addr = _ALIGN_UP(addr, 1ul << pshift);
2913 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
2914 index 547f1a6..3fff354 100644
2915 --- a/arch/s390/include/asm/elf.h
2916 +++ b/arch/s390/include/asm/elf.h
2917 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2918 the loader. We need to make sure that it is out of the way of the program
2919 that it will "exec", and that there is sufficient room for the brk. */
2920
2921 -extern unsigned long randomize_et_dyn(unsigned long base);
2922 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2923 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2924 +
2925 +#ifdef CONFIG_PAX_ASLR
2926 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2927 +
2928 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2929 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2930 +#endif
2931
2932 /* This yields a mask that user programs can use to figure out what
2933 instruction set this CPU supports. */
2934 @@ -211,7 +217,4 @@ struct linux_binprm;
2935 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2936 int arch_setup_additional_pages(struct linux_binprm *, int);
2937
2938 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2939 -#define arch_randomize_brk arch_randomize_brk
2940 -
2941 #endif
2942 diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
2943 index 6582f69..b69906f 100644
2944 --- a/arch/s390/include/asm/system.h
2945 +++ b/arch/s390/include/asm/system.h
2946 @@ -256,7 +256,7 @@ extern void (*_machine_restart)(char *command);
2947 extern void (*_machine_halt)(void);
2948 extern void (*_machine_power_off)(void);
2949
2950 -extern unsigned long arch_align_stack(unsigned long sp);
2951 +#define arch_align_stack(x) ((x) & ~0xfUL)
2952
2953 static inline int tprot(unsigned long addr)
2954 {
2955 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
2956 index 2b23885..e136e31 100644
2957 --- a/arch/s390/include/asm/uaccess.h
2958 +++ b/arch/s390/include/asm/uaccess.h
2959 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
2960 copy_to_user(void __user *to, const void *from, unsigned long n)
2961 {
2962 might_fault();
2963 +
2964 + if ((long)n < 0)
2965 + return n;
2966 +
2967 if (access_ok(VERIFY_WRITE, to, n))
2968 n = __copy_to_user(to, from, n);
2969 return n;
2970 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
2971 static inline unsigned long __must_check
2972 __copy_from_user(void *to, const void __user *from, unsigned long n)
2973 {
2974 + if ((long)n < 0)
2975 + return n;
2976 +
2977 if (__builtin_constant_p(n) && (n <= 256))
2978 return uaccess.copy_from_user_small(n, from, to);
2979 else
2980 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
2981 unsigned int sz = __compiletime_object_size(to);
2982
2983 might_fault();
2984 +
2985 + if ((long)n < 0)
2986 + return n;
2987 +
2988 if (unlikely(sz != -1 && sz < n)) {
2989 copy_from_user_overflow();
2990 return n;
2991 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
2992 index dfcb343..eda788a 100644
2993 --- a/arch/s390/kernel/module.c
2994 +++ b/arch/s390/kernel/module.c
2995 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
2996
2997 /* Increase core size by size of got & plt and set start
2998 offsets for got and plt. */
2999 - me->core_size = ALIGN(me->core_size, 4);
3000 - me->arch.got_offset = me->core_size;
3001 - me->core_size += me->arch.got_size;
3002 - me->arch.plt_offset = me->core_size;
3003 - me->core_size += me->arch.plt_size;
3004 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3005 + me->arch.got_offset = me->core_size_rw;
3006 + me->core_size_rw += me->arch.got_size;
3007 + me->arch.plt_offset = me->core_size_rx;
3008 + me->core_size_rx += me->arch.plt_size;
3009 return 0;
3010 }
3011
3012 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3013 if (info->got_initialized == 0) {
3014 Elf_Addr *gotent;
3015
3016 - gotent = me->module_core + me->arch.got_offset +
3017 + gotent = me->module_core_rw + me->arch.got_offset +
3018 info->got_offset;
3019 *gotent = val;
3020 info->got_initialized = 1;
3021 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3022 else if (r_type == R_390_GOTENT ||
3023 r_type == R_390_GOTPLTENT)
3024 *(unsigned int *) loc =
3025 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3026 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3027 else if (r_type == R_390_GOT64 ||
3028 r_type == R_390_GOTPLT64)
3029 *(unsigned long *) loc = val;
3030 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3031 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3032 if (info->plt_initialized == 0) {
3033 unsigned int *ip;
3034 - ip = me->module_core + me->arch.plt_offset +
3035 + ip = me->module_core_rx + me->arch.plt_offset +
3036 info->plt_offset;
3037 #ifndef CONFIG_64BIT
3038 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3039 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3040 val - loc + 0xffffUL < 0x1ffffeUL) ||
3041 (r_type == R_390_PLT32DBL &&
3042 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3043 - val = (Elf_Addr) me->module_core +
3044 + val = (Elf_Addr) me->module_core_rx +
3045 me->arch.plt_offset +
3046 info->plt_offset;
3047 val += rela->r_addend - loc;
3048 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3049 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3050 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3051 val = val + rela->r_addend -
3052 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3053 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3054 if (r_type == R_390_GOTOFF16)
3055 *(unsigned short *) loc = val;
3056 else if (r_type == R_390_GOTOFF32)
3057 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3058 break;
3059 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3060 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3061 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3062 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3063 rela->r_addend - loc;
3064 if (r_type == R_390_GOTPC)
3065 *(unsigned int *) loc = val;
3066 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
3067 index 541a750..8739853 100644
3068 --- a/arch/s390/kernel/process.c
3069 +++ b/arch/s390/kernel/process.c
3070 @@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_struct *p)
3071 }
3072 return 0;
3073 }
3074 -
3075 -unsigned long arch_align_stack(unsigned long sp)
3076 -{
3077 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3078 - sp -= get_random_int() & ~PAGE_MASK;
3079 - return sp & ~0xf;
3080 -}
3081 -
3082 -static inline unsigned long brk_rnd(void)
3083 -{
3084 - /* 8MB for 32bit, 1GB for 64bit */
3085 - if (is_32bit_task())
3086 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
3087 - else
3088 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
3089 -}
3090 -
3091 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3092 -{
3093 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
3094 -
3095 - if (ret < mm->brk)
3096 - return mm->brk;
3097 - return ret;
3098 -}
3099 -
3100 -unsigned long randomize_et_dyn(unsigned long base)
3101 -{
3102 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3103 -
3104 - if (!(current->flags & PF_RANDOMIZE))
3105 - return base;
3106 - if (ret < base)
3107 - return base;
3108 - return ret;
3109 -}
3110 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
3111 index 7b371c3..ad06cf1 100644
3112 --- a/arch/s390/kernel/setup.c
3113 +++ b/arch/s390/kernel/setup.c
3114 @@ -271,7 +271,7 @@ static int __init early_parse_mem(char *p)
3115 }
3116 early_param("mem", early_parse_mem);
3117
3118 -unsigned int user_mode = HOME_SPACE_MODE;
3119 +unsigned int user_mode = SECONDARY_SPACE_MODE;
3120 EXPORT_SYMBOL_GPL(user_mode);
3121
3122 static int set_amode_and_uaccess(unsigned long user_amode,
3123 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
3124 index c9a9f7f..60d0315 100644
3125 --- a/arch/s390/mm/mmap.c
3126 +++ b/arch/s390/mm/mmap.c
3127 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3128 */
3129 if (mmap_is_legacy()) {
3130 mm->mmap_base = TASK_UNMAPPED_BASE;
3131 +
3132 +#ifdef CONFIG_PAX_RANDMMAP
3133 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3134 + mm->mmap_base += mm->delta_mmap;
3135 +#endif
3136 +
3137 mm->get_unmapped_area = arch_get_unmapped_area;
3138 mm->unmap_area = arch_unmap_area;
3139 } else {
3140 mm->mmap_base = mmap_base();
3141 +
3142 +#ifdef CONFIG_PAX_RANDMMAP
3143 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3144 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3145 +#endif
3146 +
3147 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3148 mm->unmap_area = arch_unmap_area_topdown;
3149 }
3150 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3151 */
3152 if (mmap_is_legacy()) {
3153 mm->mmap_base = TASK_UNMAPPED_BASE;
3154 +
3155 +#ifdef CONFIG_PAX_RANDMMAP
3156 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3157 + mm->mmap_base += mm->delta_mmap;
3158 +#endif
3159 +
3160 mm->get_unmapped_area = s390_get_unmapped_area;
3161 mm->unmap_area = arch_unmap_area;
3162 } else {
3163 mm->mmap_base = mmap_base();
3164 +
3165 +#ifdef CONFIG_PAX_RANDMMAP
3166 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3167 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3168 +#endif
3169 +
3170 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3171 mm->unmap_area = arch_unmap_area_topdown;
3172 }
3173 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
3174 index 589d5c7..669e274 100644
3175 --- a/arch/score/include/asm/system.h
3176 +++ b/arch/score/include/asm/system.h
3177 @@ -17,7 +17,7 @@ do { \
3178 #define finish_arch_switch(prev) do {} while (0)
3179
3180 typedef void (*vi_handler_t)(void);
3181 -extern unsigned long arch_align_stack(unsigned long sp);
3182 +#define arch_align_stack(x) (x)
3183
3184 #define mb() barrier()
3185 #define rmb() barrier()
3186 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
3187 index 25d0803..d6c8e36 100644
3188 --- a/arch/score/kernel/process.c
3189 +++ b/arch/score/kernel/process.c
3190 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
3191
3192 return task_pt_regs(task)->cp0_epc;
3193 }
3194 -
3195 -unsigned long arch_align_stack(unsigned long sp)
3196 -{
3197 - return sp;
3198 -}
3199 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
3200 index afeb710..d1d1289 100644
3201 --- a/arch/sh/mm/mmap.c
3202 +++ b/arch/sh/mm/mmap.c
3203 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3204 addr = PAGE_ALIGN(addr);
3205
3206 vma = find_vma(mm, addr);
3207 - if (TASK_SIZE - len >= addr &&
3208 - (!vma || addr + len <= vma->vm_start))
3209 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3210 return addr;
3211 }
3212
3213 @@ -106,7 +105,7 @@ full_search:
3214 }
3215 return -ENOMEM;
3216 }
3217 - if (likely(!vma || addr + len <= vma->vm_start)) {
3218 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3219 /*
3220 * Remember the place where we stopped the search:
3221 */
3222 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3223 addr = PAGE_ALIGN(addr);
3224
3225 vma = find_vma(mm, addr);
3226 - if (TASK_SIZE - len >= addr &&
3227 - (!vma || addr + len <= vma->vm_start))
3228 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3229 return addr;
3230 }
3231
3232 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3233 /* make sure it can fit in the remaining address space */
3234 if (likely(addr > len)) {
3235 vma = find_vma(mm, addr-len);
3236 - if (!vma || addr <= vma->vm_start) {
3237 + if (check_heap_stack_gap(vma, addr - len, len)) {
3238 /* remember the address as a hint for next time */
3239 return (mm->free_area_cache = addr-len);
3240 }
3241 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3242 if (unlikely(mm->mmap_base < len))
3243 goto bottomup;
3244
3245 - addr = mm->mmap_base-len;
3246 - if (do_colour_align)
3247 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3248 + addr = mm->mmap_base - len;
3249
3250 do {
3251 + if (do_colour_align)
3252 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3253 /*
3254 * Lookup failure means no vma is above this address,
3255 * else if new region fits below vma->vm_start,
3256 * return with success:
3257 */
3258 vma = find_vma(mm, addr);
3259 - if (likely(!vma || addr+len <= vma->vm_start)) {
3260 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3261 /* remember the address as a hint for next time */
3262 return (mm->free_area_cache = addr);
3263 }
3264 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3265 mm->cached_hole_size = vma->vm_start - addr;
3266
3267 /* try just below the current vma->vm_start */
3268 - addr = vma->vm_start-len;
3269 - if (do_colour_align)
3270 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3271 - } while (likely(len < vma->vm_start));
3272 + addr = skip_heap_stack_gap(vma, len);
3273 + } while (!IS_ERR_VALUE(addr));
3274
3275 bottomup:
3276 /*
3277 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
3278 index ad1fb5d..fc5315b 100644
3279 --- a/arch/sparc/Makefile
3280 +++ b/arch/sparc/Makefile
3281 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
3282 # Export what is needed by arch/sparc/boot/Makefile
3283 export VMLINUX_INIT VMLINUX_MAIN
3284 VMLINUX_INIT := $(head-y) $(init-y)
3285 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
3286 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
3287 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
3288 VMLINUX_MAIN += $(drivers-y) $(net-y)
3289
3290 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
3291 index 9f421df..b81fc12 100644
3292 --- a/arch/sparc/include/asm/atomic_64.h
3293 +++ b/arch/sparc/include/asm/atomic_64.h
3294 @@ -14,18 +14,40 @@
3295 #define ATOMIC64_INIT(i) { (i) }
3296
3297 #define atomic_read(v) (*(volatile int *)&(v)->counter)
3298 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3299 +{
3300 + return v->counter;
3301 +}
3302 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
3303 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3304 +{
3305 + return v->counter;
3306 +}
3307
3308 #define atomic_set(v, i) (((v)->counter) = i)
3309 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3310 +{
3311 + v->counter = i;
3312 +}
3313 #define atomic64_set(v, i) (((v)->counter) = i)
3314 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3315 +{
3316 + v->counter = i;
3317 +}
3318
3319 extern void atomic_add(int, atomic_t *);
3320 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3321 extern void atomic64_add(long, atomic64_t *);
3322 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3323 extern void atomic_sub(int, atomic_t *);
3324 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3325 extern void atomic64_sub(long, atomic64_t *);
3326 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3327
3328 extern int atomic_add_ret(int, atomic_t *);
3329 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3330 extern long atomic64_add_ret(long, atomic64_t *);
3331 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3332 extern int atomic_sub_ret(int, atomic_t *);
3333 extern long atomic64_sub_ret(long, atomic64_t *);
3334
3335 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3336 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3337
3338 #define atomic_inc_return(v) atomic_add_ret(1, v)
3339 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3340 +{
3341 + return atomic_add_ret_unchecked(1, v);
3342 +}
3343 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3344 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3345 +{
3346 + return atomic64_add_ret_unchecked(1, v);
3347 +}
3348
3349 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3350 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3351
3352 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3353 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3354 +{
3355 + return atomic_add_ret_unchecked(i, v);
3356 +}
3357 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3358 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3359 +{
3360 + return atomic64_add_ret_unchecked(i, v);
3361 +}
3362
3363 /*
3364 * atomic_inc_and_test - increment and test
3365 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3366 * other cases.
3367 */
3368 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3369 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3370 +{
3371 + return atomic_inc_return_unchecked(v) == 0;
3372 +}
3373 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3374
3375 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3376 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3377 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3378
3379 #define atomic_inc(v) atomic_add(1, v)
3380 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3381 +{
3382 + atomic_add_unchecked(1, v);
3383 +}
3384 #define atomic64_inc(v) atomic64_add(1, v)
3385 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3386 +{
3387 + atomic64_add_unchecked(1, v);
3388 +}
3389
3390 #define atomic_dec(v) atomic_sub(1, v)
3391 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3392 +{
3393 + atomic_sub_unchecked(1, v);
3394 +}
3395 #define atomic64_dec(v) atomic64_sub(1, v)
3396 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3397 +{
3398 + atomic64_sub_unchecked(1, v);
3399 +}
3400
3401 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3402 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3403
3404 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3405 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3406 +{
3407 + return cmpxchg(&v->counter, old, new);
3408 +}
3409 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3410 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3411 +{
3412 + return xchg(&v->counter, new);
3413 +}
3414
3415 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3416 {
3417 - int c, old;
3418 + int c, old, new;
3419 c = atomic_read(v);
3420 for (;;) {
3421 - if (unlikely(c == (u)))
3422 + if (unlikely(c == u))
3423 break;
3424 - old = atomic_cmpxchg((v), c, c + (a));
3425 +
3426 + asm volatile("addcc %2, %0, %0\n"
3427 +
3428 +#ifdef CONFIG_PAX_REFCOUNT
3429 + "tvs %%icc, 6\n"
3430 +#endif
3431 +
3432 + : "=r" (new)
3433 + : "0" (c), "ir" (a)
3434 + : "cc");
3435 +
3436 + old = atomic_cmpxchg(v, c, new);
3437 if (likely(old == c))
3438 break;
3439 c = old;
3440 @@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3441 #define atomic64_cmpxchg(v, o, n) \
3442 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3443 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3444 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3445 +{
3446 + return xchg(&v->counter, new);
3447 +}
3448
3449 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3450 {
3451 - long c, old;
3452 + long c, old, new;
3453 c = atomic64_read(v);
3454 for (;;) {
3455 - if (unlikely(c == (u)))
3456 + if (unlikely(c == u))
3457 break;
3458 - old = atomic64_cmpxchg((v), c, c + (a));
3459 +
3460 + asm volatile("addcc %2, %0, %0\n"
3461 +
3462 +#ifdef CONFIG_PAX_REFCOUNT
3463 + "tvs %%xcc, 6\n"
3464 +#endif
3465 +
3466 + : "=r" (new)
3467 + : "0" (c), "ir" (a)
3468 + : "cc");
3469 +
3470 + old = atomic64_cmpxchg(v, c, new);
3471 if (likely(old == c))
3472 break;
3473 c = old;
3474 }
3475 - return c != (u);
3476 + return c != u;
3477 }
3478
3479 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3480 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
3481 index 69358b5..17b4745 100644
3482 --- a/arch/sparc/include/asm/cache.h
3483 +++ b/arch/sparc/include/asm/cache.h
3484 @@ -10,7 +10,7 @@
3485 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3486
3487 #define L1_CACHE_SHIFT 5
3488 -#define L1_CACHE_BYTES 32
3489 +#define L1_CACHE_BYTES 32UL
3490
3491 #ifdef CONFIG_SPARC32
3492 #define SMP_CACHE_BYTES_SHIFT 5
3493 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
3494 index 4269ca6..e3da77f 100644
3495 --- a/arch/sparc/include/asm/elf_32.h
3496 +++ b/arch/sparc/include/asm/elf_32.h
3497 @@ -114,6 +114,13 @@ typedef struct {
3498
3499 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3500
3501 +#ifdef CONFIG_PAX_ASLR
3502 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3503 +
3504 +#define PAX_DELTA_MMAP_LEN 16
3505 +#define PAX_DELTA_STACK_LEN 16
3506 +#endif
3507 +
3508 /* This yields a mask that user programs can use to figure out what
3509 instruction set this cpu supports. This can NOT be done in userspace
3510 on Sparc. */
3511 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
3512 index 7df8b7f..4946269 100644
3513 --- a/arch/sparc/include/asm/elf_64.h
3514 +++ b/arch/sparc/include/asm/elf_64.h
3515 @@ -180,6 +180,13 @@ typedef struct {
3516 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3517 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3518
3519 +#ifdef CONFIG_PAX_ASLR
3520 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3521 +
3522 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3523 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3524 +#endif
3525 +
3526 extern unsigned long sparc64_elf_hwcap;
3527 #define ELF_HWCAP sparc64_elf_hwcap
3528
3529 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
3530 index a790cc6..091ed94 100644
3531 --- a/arch/sparc/include/asm/pgtable_32.h
3532 +++ b/arch/sparc/include/asm/pgtable_32.h
3533 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3534 BTFIXUPDEF_INT(page_none)
3535 BTFIXUPDEF_INT(page_copy)
3536 BTFIXUPDEF_INT(page_readonly)
3537 +
3538 +#ifdef CONFIG_PAX_PAGEEXEC
3539 +BTFIXUPDEF_INT(page_shared_noexec)
3540 +BTFIXUPDEF_INT(page_copy_noexec)
3541 +BTFIXUPDEF_INT(page_readonly_noexec)
3542 +#endif
3543 +
3544 BTFIXUPDEF_INT(page_kernel)
3545
3546 #define PMD_SHIFT SUN4C_PMD_SHIFT
3547 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3548 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3549 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3550
3551 +#ifdef CONFIG_PAX_PAGEEXEC
3552 +extern pgprot_t PAGE_SHARED_NOEXEC;
3553 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3554 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3555 +#else
3556 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3557 +# define PAGE_COPY_NOEXEC PAGE_COPY
3558 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3559 +#endif
3560 +
3561 extern unsigned long page_kernel;
3562
3563 #ifdef MODULE
3564 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
3565 index f6ae2b2..b03ffc7 100644
3566 --- a/arch/sparc/include/asm/pgtsrmmu.h
3567 +++ b/arch/sparc/include/asm/pgtsrmmu.h
3568 @@ -115,6 +115,13 @@
3569 SRMMU_EXEC | SRMMU_REF)
3570 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3571 SRMMU_EXEC | SRMMU_REF)
3572 +
3573 +#ifdef CONFIG_PAX_PAGEEXEC
3574 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3575 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3576 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3577 +#endif
3578 +
3579 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3580 SRMMU_DIRTY | SRMMU_REF)
3581
3582 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
3583 index 9689176..63c18ea 100644
3584 --- a/arch/sparc/include/asm/spinlock_64.h
3585 +++ b/arch/sparc/include/asm/spinlock_64.h
3586 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
3587
3588 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3589
3590 -static void inline arch_read_lock(arch_rwlock_t *lock)
3591 +static inline void arch_read_lock(arch_rwlock_t *lock)
3592 {
3593 unsigned long tmp1, tmp2;
3594
3595 __asm__ __volatile__ (
3596 "1: ldsw [%2], %0\n"
3597 " brlz,pn %0, 2f\n"
3598 -"4: add %0, 1, %1\n"
3599 +"4: addcc %0, 1, %1\n"
3600 +
3601 +#ifdef CONFIG_PAX_REFCOUNT
3602 +" tvs %%icc, 6\n"
3603 +#endif
3604 +
3605 " cas [%2], %0, %1\n"
3606 " cmp %0, %1\n"
3607 " bne,pn %%icc, 1b\n"
3608 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
3609 " .previous"
3610 : "=&r" (tmp1), "=&r" (tmp2)
3611 : "r" (lock)
3612 - : "memory");
3613 + : "memory", "cc");
3614 }
3615
3616 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3617 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3618 {
3619 int tmp1, tmp2;
3620
3621 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3622 "1: ldsw [%2], %0\n"
3623 " brlz,a,pn %0, 2f\n"
3624 " mov 0, %0\n"
3625 -" add %0, 1, %1\n"
3626 +" addcc %0, 1, %1\n"
3627 +
3628 +#ifdef CONFIG_PAX_REFCOUNT
3629 +" tvs %%icc, 6\n"
3630 +#endif
3631 +
3632 " cas [%2], %0, %1\n"
3633 " cmp %0, %1\n"
3634 " bne,pn %%icc, 1b\n"
3635 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3636 return tmp1;
3637 }
3638
3639 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3640 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3641 {
3642 unsigned long tmp1, tmp2;
3643
3644 __asm__ __volatile__(
3645 "1: lduw [%2], %0\n"
3646 -" sub %0, 1, %1\n"
3647 +" subcc %0, 1, %1\n"
3648 +
3649 +#ifdef CONFIG_PAX_REFCOUNT
3650 +" tvs %%icc, 6\n"
3651 +#endif
3652 +
3653 " cas [%2], %0, %1\n"
3654 " cmp %0, %1\n"
3655 " bne,pn %%xcc, 1b\n"
3656 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
3657 : "memory");
3658 }
3659
3660 -static void inline arch_write_lock(arch_rwlock_t *lock)
3661 +static inline void arch_write_lock(arch_rwlock_t *lock)
3662 {
3663 unsigned long mask, tmp1, tmp2;
3664
3665 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
3666 : "memory");
3667 }
3668
3669 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3670 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3671 {
3672 __asm__ __volatile__(
3673 " stw %%g0, [%0]"
3674 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
3675 : "memory");
3676 }
3677
3678 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3679 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3680 {
3681 unsigned long mask, tmp1, tmp2, result;
3682
3683 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
3684 index fa57532..e1a4c53 100644
3685 --- a/arch/sparc/include/asm/thread_info_32.h
3686 +++ b/arch/sparc/include/asm/thread_info_32.h
3687 @@ -50,6 +50,8 @@ struct thread_info {
3688 unsigned long w_saved;
3689
3690 struct restart_block restart_block;
3691 +
3692 + unsigned long lowest_stack;
3693 };
3694
3695 /*
3696 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
3697 index 60d86be..952dea1 100644
3698 --- a/arch/sparc/include/asm/thread_info_64.h
3699 +++ b/arch/sparc/include/asm/thread_info_64.h
3700 @@ -63,6 +63,8 @@ struct thread_info {
3701 struct pt_regs *kern_una_regs;
3702 unsigned int kern_una_insn;
3703
3704 + unsigned long lowest_stack;
3705 +
3706 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3707 };
3708
3709 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
3710 index e88fbe5..96b0ce5 100644
3711 --- a/arch/sparc/include/asm/uaccess.h
3712 +++ b/arch/sparc/include/asm/uaccess.h
3713 @@ -1,5 +1,13 @@
3714 #ifndef ___ASM_SPARC_UACCESS_H
3715 #define ___ASM_SPARC_UACCESS_H
3716 +
3717 +#ifdef __KERNEL__
3718 +#ifndef __ASSEMBLY__
3719 +#include <linux/types.h>
3720 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3721 +#endif
3722 +#endif
3723 +
3724 #if defined(__sparc__) && defined(__arch64__)
3725 #include <asm/uaccess_64.h>
3726 #else
3727 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
3728 index 8303ac4..07f333d 100644
3729 --- a/arch/sparc/include/asm/uaccess_32.h
3730 +++ b/arch/sparc/include/asm/uaccess_32.h
3731 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
3732
3733 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3734 {
3735 - if (n && __access_ok((unsigned long) to, n))
3736 + if ((long)n < 0)
3737 + return n;
3738 +
3739 + if (n && __access_ok((unsigned long) to, n)) {
3740 + if (!__builtin_constant_p(n))
3741 + check_object_size(from, n, true);
3742 return __copy_user(to, (__force void __user *) from, n);
3743 - else
3744 + } else
3745 return n;
3746 }
3747
3748 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3749 {
3750 + if ((long)n < 0)
3751 + return n;
3752 +
3753 + if (!__builtin_constant_p(n))
3754 + check_object_size(from, n, true);
3755 +
3756 return __copy_user(to, (__force void __user *) from, n);
3757 }
3758
3759 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3760 {
3761 - if (n && __access_ok((unsigned long) from, n))
3762 + if ((long)n < 0)
3763 + return n;
3764 +
3765 + if (n && __access_ok((unsigned long) from, n)) {
3766 + if (!__builtin_constant_p(n))
3767 + check_object_size(to, n, false);
3768 return __copy_user((__force void __user *) to, from, n);
3769 - else
3770 + } else
3771 return n;
3772 }
3773
3774 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3775 {
3776 + if ((long)n < 0)
3777 + return n;
3778 +
3779 return __copy_user((__force void __user *) to, from, n);
3780 }
3781
3782 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
3783 index 3e1449f..5293a0e 100644
3784 --- a/arch/sparc/include/asm/uaccess_64.h
3785 +++ b/arch/sparc/include/asm/uaccess_64.h
3786 @@ -10,6 +10,7 @@
3787 #include <linux/compiler.h>
3788 #include <linux/string.h>
3789 #include <linux/thread_info.h>
3790 +#include <linux/kernel.h>
3791 #include <asm/asi.h>
3792 #include <asm/system.h>
3793 #include <asm/spitfire.h>
3794 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
3795 static inline unsigned long __must_check
3796 copy_from_user(void *to, const void __user *from, unsigned long size)
3797 {
3798 - unsigned long ret = ___copy_from_user(to, from, size);
3799 + unsigned long ret;
3800
3801 + if ((long)size < 0 || size > INT_MAX)
3802 + return size;
3803 +
3804 + if (!__builtin_constant_p(size))
3805 + check_object_size(to, size, false);
3806 +
3807 + ret = ___copy_from_user(to, from, size);
3808 if (unlikely(ret))
3809 ret = copy_from_user_fixup(to, from, size);
3810
3811 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
3812 static inline unsigned long __must_check
3813 copy_to_user(void __user *to, const void *from, unsigned long size)
3814 {
3815 - unsigned long ret = ___copy_to_user(to, from, size);
3816 + unsigned long ret;
3817
3818 + if ((long)size < 0 || size > INT_MAX)
3819 + return size;
3820 +
3821 + if (!__builtin_constant_p(size))
3822 + check_object_size(from, size, true);
3823 +
3824 + ret = ___copy_to_user(to, from, size);
3825 if (unlikely(ret))
3826 ret = copy_to_user_fixup(to, from, size);
3827 return ret;
3828 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
3829 index cb85458..e063f17 100644
3830 --- a/arch/sparc/kernel/Makefile
3831 +++ b/arch/sparc/kernel/Makefile
3832 @@ -3,7 +3,7 @@
3833 #
3834
3835 asflags-y := -ansi
3836 -ccflags-y := -Werror
3837 +#ccflags-y := -Werror
3838
3839 extra-y := head_$(BITS).o
3840 extra-y += init_task.o
3841 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
3842 index f793742..4d880af 100644
3843 --- a/arch/sparc/kernel/process_32.c
3844 +++ b/arch/sparc/kernel/process_32.c
3845 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3846 rw->ins[4], rw->ins[5],
3847 rw->ins[6],
3848 rw->ins[7]);
3849 - printk("%pS\n", (void *) rw->ins[7]);
3850 + printk("%pA\n", (void *) rw->ins[7]);
3851 rw = (struct reg_window32 *) rw->ins[6];
3852 }
3853 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3854 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3855
3856 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3857 r->psr, r->pc, r->npc, r->y, print_tainted());
3858 - printk("PC: <%pS>\n", (void *) r->pc);
3859 + printk("PC: <%pA>\n", (void *) r->pc);
3860 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3861 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3862 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3863 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3864 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3865 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3866 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3867 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3868
3869 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3870 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3871 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
3872 rw = (struct reg_window32 *) fp;
3873 pc = rw->ins[7];
3874 printk("[%08lx : ", pc);
3875 - printk("%pS ] ", (void *) pc);
3876 + printk("%pA ] ", (void *) pc);
3877 fp = rw->ins[6];
3878 } while (++count < 16);
3879 printk("\n");
3880 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
3881 index d959cd0..7b42812 100644
3882 --- a/arch/sparc/kernel/process_64.c
3883 +++ b/arch/sparc/kernel/process_64.c
3884 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
3885 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3886 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3887 if (regs->tstate & TSTATE_PRIV)
3888 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3889 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3890 }
3891
3892 void show_regs(struct pt_regs *regs)
3893 {
3894 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3895 regs->tpc, regs->tnpc, regs->y, print_tainted());
3896 - printk("TPC: <%pS>\n", (void *) regs->tpc);
3897 + printk("TPC: <%pA>\n", (void *) regs->tpc);
3898 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3899 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3900 regs->u_regs[3]);
3901 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3902 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3903 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3904 regs->u_regs[15]);
3905 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3906 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3907 show_regwindow(regs);
3908 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3909 }
3910 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
3911 ((tp && tp->task) ? tp->task->pid : -1));
3912
3913 if (gp->tstate & TSTATE_PRIV) {
3914 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3915 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3916 (void *) gp->tpc,
3917 (void *) gp->o7,
3918 (void *) gp->i7,
3919 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
3920 index 42b282f..28ce9f2 100644
3921 --- a/arch/sparc/kernel/sys_sparc_32.c
3922 +++ b/arch/sparc/kernel/sys_sparc_32.c
3923 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3924 if (ARCH_SUN4C && len > 0x20000000)
3925 return -ENOMEM;
3926 if (!addr)
3927 - addr = TASK_UNMAPPED_BASE;
3928 + addr = current->mm->mmap_base;
3929
3930 if (flags & MAP_SHARED)
3931 addr = COLOUR_ALIGN(addr);
3932 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3933 }
3934 if (TASK_SIZE - PAGE_SIZE - len < addr)
3935 return -ENOMEM;
3936 - if (!vmm || addr + len <= vmm->vm_start)
3937 + if (check_heap_stack_gap(vmm, addr, len))
3938 return addr;
3939 addr = vmm->vm_end;
3940 if (flags & MAP_SHARED)
3941 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
3942 index 908b47a..aa9e584 100644
3943 --- a/arch/sparc/kernel/sys_sparc_64.c
3944 +++ b/arch/sparc/kernel/sys_sparc_64.c
3945 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3946 /* We do not accept a shared mapping if it would violate
3947 * cache aliasing constraints.
3948 */
3949 - if ((flags & MAP_SHARED) &&
3950 + if ((filp || (flags & MAP_SHARED)) &&
3951 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3952 return -EINVAL;
3953 return addr;
3954 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3955 if (filp || (flags & MAP_SHARED))
3956 do_color_align = 1;
3957
3958 +#ifdef CONFIG_PAX_RANDMMAP
3959 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3960 +#endif
3961 +
3962 if (addr) {
3963 if (do_color_align)
3964 addr = COLOUR_ALIGN(addr, pgoff);
3965 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3966 addr = PAGE_ALIGN(addr);
3967
3968 vma = find_vma(mm, addr);
3969 - if (task_size - len >= addr &&
3970 - (!vma || addr + len <= vma->vm_start))
3971 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3972 return addr;
3973 }
3974
3975 if (len > mm->cached_hole_size) {
3976 - start_addr = addr = mm->free_area_cache;
3977 + start_addr = addr = mm->free_area_cache;
3978 } else {
3979 - start_addr = addr = TASK_UNMAPPED_BASE;
3980 + start_addr = addr = mm->mmap_base;
3981 mm->cached_hole_size = 0;
3982 }
3983
3984 @@ -174,14 +177,14 @@ full_search:
3985 vma = find_vma(mm, VA_EXCLUDE_END);
3986 }
3987 if (unlikely(task_size < addr)) {
3988 - if (start_addr != TASK_UNMAPPED_BASE) {
3989 - start_addr = addr = TASK_UNMAPPED_BASE;
3990 + if (start_addr != mm->mmap_base) {
3991 + start_addr = addr = mm->mmap_base;
3992 mm->cached_hole_size = 0;
3993 goto full_search;
3994 }
3995 return -ENOMEM;
3996 }
3997 - if (likely(!vma || addr + len <= vma->vm_start)) {
3998 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3999 /*
4000 * Remember the place where we stopped the search:
4001 */
4002 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4003 /* We do not accept a shared mapping if it would violate
4004 * cache aliasing constraints.
4005 */
4006 - if ((flags & MAP_SHARED) &&
4007 + if ((filp || (flags & MAP_SHARED)) &&
4008 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4009 return -EINVAL;
4010 return addr;
4011 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4012 addr = PAGE_ALIGN(addr);
4013
4014 vma = find_vma(mm, addr);
4015 - if (task_size - len >= addr &&
4016 - (!vma || addr + len <= vma->vm_start))
4017 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4018 return addr;
4019 }
4020
4021 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4022 /* make sure it can fit in the remaining address space */
4023 if (likely(addr > len)) {
4024 vma = find_vma(mm, addr-len);
4025 - if (!vma || addr <= vma->vm_start) {
4026 + if (check_heap_stack_gap(vma, addr - len, len)) {
4027 /* remember the address as a hint for next time */
4028 return (mm->free_area_cache = addr-len);
4029 }
4030 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4031 if (unlikely(mm->mmap_base < len))
4032 goto bottomup;
4033
4034 - addr = mm->mmap_base-len;
4035 - if (do_color_align)
4036 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4037 + addr = mm->mmap_base - len;
4038
4039 do {
4040 + if (do_color_align)
4041 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4042 /*
4043 * Lookup failure means no vma is above this address,
4044 * else if new region fits below vma->vm_start,
4045 * return with success:
4046 */
4047 vma = find_vma(mm, addr);
4048 - if (likely(!vma || addr+len <= vma->vm_start)) {
4049 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4050 /* remember the address as a hint for next time */
4051 return (mm->free_area_cache = addr);
4052 }
4053 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4054 mm->cached_hole_size = vma->vm_start - addr;
4055
4056 /* try just below the current vma->vm_start */
4057 - addr = vma->vm_start-len;
4058 - if (do_color_align)
4059 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4060 - } while (likely(len < vma->vm_start));
4061 + addr = skip_heap_stack_gap(vma, len);
4062 + } while (!IS_ERR_VALUE(addr));
4063
4064 bottomup:
4065 /*
4066 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4067 gap == RLIM_INFINITY ||
4068 sysctl_legacy_va_layout) {
4069 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4070 +
4071 +#ifdef CONFIG_PAX_RANDMMAP
4072 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4073 + mm->mmap_base += mm->delta_mmap;
4074 +#endif
4075 +
4076 mm->get_unmapped_area = arch_get_unmapped_area;
4077 mm->unmap_area = arch_unmap_area;
4078 } else {
4079 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4080 gap = (task_size / 6 * 5);
4081
4082 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4083 +
4084 +#ifdef CONFIG_PAX_RANDMMAP
4085 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4086 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4087 +#endif
4088 +
4089 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4090 mm->unmap_area = arch_unmap_area_topdown;
4091 }
4092 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
4093 index c0490c7..84959d1 100644
4094 --- a/arch/sparc/kernel/traps_32.c
4095 +++ b/arch/sparc/kernel/traps_32.c
4096 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
4097 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4098 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4099
4100 +extern void gr_handle_kernel_exploit(void);
4101 +
4102 void die_if_kernel(char *str, struct pt_regs *regs)
4103 {
4104 static int die_counter;
4105 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4106 count++ < 30 &&
4107 (((unsigned long) rw) >= PAGE_OFFSET) &&
4108 !(((unsigned long) rw) & 0x7)) {
4109 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4110 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4111 (void *) rw->ins[7]);
4112 rw = (struct reg_window32 *)rw->ins[6];
4113 }
4114 }
4115 printk("Instruction DUMP:");
4116 instruction_dump ((unsigned long *) regs->pc);
4117 - if(regs->psr & PSR_PS)
4118 + if(regs->psr & PSR_PS) {
4119 + gr_handle_kernel_exploit();
4120 do_exit(SIGKILL);
4121 + }
4122 do_exit(SIGSEGV);
4123 }
4124
4125 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
4126 index 0cbdaa4..438e4c9 100644
4127 --- a/arch/sparc/kernel/traps_64.c
4128 +++ b/arch/sparc/kernel/traps_64.c
4129 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
4130 i + 1,
4131 p->trapstack[i].tstate, p->trapstack[i].tpc,
4132 p->trapstack[i].tnpc, p->trapstack[i].tt);
4133 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4134 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4135 }
4136 }
4137
4138 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
4139
4140 lvl -= 0x100;
4141 if (regs->tstate & TSTATE_PRIV) {
4142 +
4143 +#ifdef CONFIG_PAX_REFCOUNT
4144 + if (lvl == 6)
4145 + pax_report_refcount_overflow(regs);
4146 +#endif
4147 +
4148 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4149 die_if_kernel(buffer, regs);
4150 }
4151 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
4152 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4153 {
4154 char buffer[32];
4155 -
4156 +
4157 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4158 0, lvl, SIGTRAP) == NOTIFY_STOP)
4159 return;
4160
4161 +#ifdef CONFIG_PAX_REFCOUNT
4162 + if (lvl == 6)
4163 + pax_report_refcount_overflow(regs);
4164 +#endif
4165 +
4166 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4167
4168 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4169 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
4170 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4171 printk("%s" "ERROR(%d): ",
4172 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4173 - printk("TPC<%pS>\n", (void *) regs->tpc);
4174 + printk("TPC<%pA>\n", (void *) regs->tpc);
4175 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4176 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4177 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4178 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4179 smp_processor_id(),
4180 (type & 0x1) ? 'I' : 'D',
4181 regs->tpc);
4182 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4183 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4184 panic("Irrecoverable Cheetah+ parity error.");
4185 }
4186
4187 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4188 smp_processor_id(),
4189 (type & 0x1) ? 'I' : 'D',
4190 regs->tpc);
4191 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4192 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4193 }
4194
4195 struct sun4v_error_entry {
4196 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
4197
4198 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4199 regs->tpc, tl);
4200 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4201 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4202 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4203 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4204 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4205 (void *) regs->u_regs[UREG_I7]);
4206 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4207 "pte[%lx] error[%lx]\n",
4208 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
4209
4210 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4211 regs->tpc, tl);
4212 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4213 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4214 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4215 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4216 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4217 (void *) regs->u_regs[UREG_I7]);
4218 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4219 "pte[%lx] error[%lx]\n",
4220 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4221 fp = (unsigned long)sf->fp + STACK_BIAS;
4222 }
4223
4224 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4225 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4226 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4227 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
4228 int index = tsk->curr_ret_stack;
4229 if (tsk->ret_stack && index >= graph) {
4230 pc = tsk->ret_stack[index - graph].ret;
4231 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4232 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4233 graph++;
4234 }
4235 }
4236 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
4237 return (struct reg_window *) (fp + STACK_BIAS);
4238 }
4239
4240 +extern void gr_handle_kernel_exploit(void);
4241 +
4242 void die_if_kernel(char *str, struct pt_regs *regs)
4243 {
4244 static int die_counter;
4245 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4246 while (rw &&
4247 count++ < 30 &&
4248 kstack_valid(tp, (unsigned long) rw)) {
4249 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4250 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4251 (void *) rw->ins[7]);
4252
4253 rw = kernel_stack_up(rw);
4254 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4255 }
4256 user_instruction_dump ((unsigned int __user *) regs->tpc);
4257 }
4258 - if (regs->tstate & TSTATE_PRIV)
4259 + if (regs->tstate & TSTATE_PRIV) {
4260 + gr_handle_kernel_exploit();
4261 do_exit(SIGKILL);
4262 + }
4263 do_exit(SIGSEGV);
4264 }
4265 EXPORT_SYMBOL(die_if_kernel);
4266 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
4267 index 76e4ac1..78f8bb1 100644
4268 --- a/arch/sparc/kernel/unaligned_64.c
4269 +++ b/arch/sparc/kernel/unaligned_64.c
4270 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
4271 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
4272
4273 if (__ratelimit(&ratelimit)) {
4274 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4275 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4276 regs->tpc, (void *) regs->tpc);
4277 }
4278 }
4279 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
4280 index a3fc437..fea9957 100644
4281 --- a/arch/sparc/lib/Makefile
4282 +++ b/arch/sparc/lib/Makefile
4283 @@ -2,7 +2,7 @@
4284 #
4285
4286 asflags-y := -ansi -DST_DIV0=0x02
4287 -ccflags-y := -Werror
4288 +#ccflags-y := -Werror
4289
4290 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4291 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4292 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
4293 index 59186e0..f747d7a 100644
4294 --- a/arch/sparc/lib/atomic_64.S
4295 +++ b/arch/sparc/lib/atomic_64.S
4296 @@ -18,7 +18,12 @@
4297 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4298 BACKOFF_SETUP(%o2)
4299 1: lduw [%o1], %g1
4300 - add %g1, %o0, %g7
4301 + addcc %g1, %o0, %g7
4302 +
4303 +#ifdef CONFIG_PAX_REFCOUNT
4304 + tvs %icc, 6
4305 +#endif
4306 +
4307 cas [%o1], %g1, %g7
4308 cmp %g1, %g7
4309 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4310 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4311 2: BACKOFF_SPIN(%o2, %o3, 1b)
4312 .size atomic_add, .-atomic_add
4313
4314 + .globl atomic_add_unchecked
4315 + .type atomic_add_unchecked,#function
4316 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4317 + BACKOFF_SETUP(%o2)
4318 +1: lduw [%o1], %g1
4319 + add %g1, %o0, %g7
4320 + cas [%o1], %g1, %g7
4321 + cmp %g1, %g7
4322 + bne,pn %icc, 2f
4323 + nop
4324 + retl
4325 + nop
4326 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4327 + .size atomic_add_unchecked, .-atomic_add_unchecked
4328 +
4329 .globl atomic_sub
4330 .type atomic_sub,#function
4331 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4332 BACKOFF_SETUP(%o2)
4333 1: lduw [%o1], %g1
4334 - sub %g1, %o0, %g7
4335 + subcc %g1, %o0, %g7
4336 +
4337 +#ifdef CONFIG_PAX_REFCOUNT
4338 + tvs %icc, 6
4339 +#endif
4340 +
4341 cas [%o1], %g1, %g7
4342 cmp %g1, %g7
4343 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4344 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4345 2: BACKOFF_SPIN(%o2, %o3, 1b)
4346 .size atomic_sub, .-atomic_sub
4347
4348 + .globl atomic_sub_unchecked
4349 + .type atomic_sub_unchecked,#function
4350 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4351 + BACKOFF_SETUP(%o2)
4352 +1: lduw [%o1], %g1
4353 + sub %g1, %o0, %g7
4354 + cas [%o1], %g1, %g7
4355 + cmp %g1, %g7
4356 + bne,pn %icc, 2f
4357 + nop
4358 + retl
4359 + nop
4360 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4361 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4362 +
4363 .globl atomic_add_ret
4364 .type atomic_add_ret,#function
4365 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4366 BACKOFF_SETUP(%o2)
4367 1: lduw [%o1], %g1
4368 - add %g1, %o0, %g7
4369 + addcc %g1, %o0, %g7
4370 +
4371 +#ifdef CONFIG_PAX_REFCOUNT
4372 + tvs %icc, 6
4373 +#endif
4374 +
4375 cas [%o1], %g1, %g7
4376 cmp %g1, %g7
4377 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4378 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4379 2: BACKOFF_SPIN(%o2, %o3, 1b)
4380 .size atomic_add_ret, .-atomic_add_ret
4381
4382 + .globl atomic_add_ret_unchecked
4383 + .type atomic_add_ret_unchecked,#function
4384 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4385 + BACKOFF_SETUP(%o2)
4386 +1: lduw [%o1], %g1
4387 + addcc %g1, %o0, %g7
4388 + cas [%o1], %g1, %g7
4389 + cmp %g1, %g7
4390 + bne,pn %icc, 2f
4391 + add %g7, %o0, %g7
4392 + sra %g7, 0, %o0
4393 + retl
4394 + nop
4395 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4396 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4397 +
4398 .globl atomic_sub_ret
4399 .type atomic_sub_ret,#function
4400 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4401 BACKOFF_SETUP(%o2)
4402 1: lduw [%o1], %g1
4403 - sub %g1, %o0, %g7
4404 + subcc %g1, %o0, %g7
4405 +
4406 +#ifdef CONFIG_PAX_REFCOUNT
4407 + tvs %icc, 6
4408 +#endif
4409 +
4410 cas [%o1], %g1, %g7
4411 cmp %g1, %g7
4412 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4413 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4414 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4415 BACKOFF_SETUP(%o2)
4416 1: ldx [%o1], %g1
4417 - add %g1, %o0, %g7
4418 + addcc %g1, %o0, %g7
4419 +
4420 +#ifdef CONFIG_PAX_REFCOUNT
4421 + tvs %xcc, 6
4422 +#endif
4423 +
4424 casx [%o1], %g1, %g7
4425 cmp %g1, %g7
4426 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4427 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4428 2: BACKOFF_SPIN(%o2, %o3, 1b)
4429 .size atomic64_add, .-atomic64_add
4430
4431 + .globl atomic64_add_unchecked
4432 + .type atomic64_add_unchecked,#function
4433 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4434 + BACKOFF_SETUP(%o2)
4435 +1: ldx [%o1], %g1
4436 + addcc %g1, %o0, %g7
4437 + casx [%o1], %g1, %g7
4438 + cmp %g1, %g7
4439 + bne,pn %xcc, 2f
4440 + nop
4441 + retl
4442 + nop
4443 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4444 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4445 +
4446 .globl atomic64_sub
4447 .type atomic64_sub,#function
4448 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4449 BACKOFF_SETUP(%o2)
4450 1: ldx [%o1], %g1
4451 - sub %g1, %o0, %g7
4452 + subcc %g1, %o0, %g7
4453 +
4454 +#ifdef CONFIG_PAX_REFCOUNT
4455 + tvs %xcc, 6
4456 +#endif
4457 +
4458 casx [%o1], %g1, %g7
4459 cmp %g1, %g7
4460 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4461 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4462 2: BACKOFF_SPIN(%o2, %o3, 1b)
4463 .size atomic64_sub, .-atomic64_sub
4464
4465 + .globl atomic64_sub_unchecked
4466 + .type atomic64_sub_unchecked,#function
4467 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4468 + BACKOFF_SETUP(%o2)
4469 +1: ldx [%o1], %g1
4470 + subcc %g1, %o0, %g7
4471 + casx [%o1], %g1, %g7
4472 + cmp %g1, %g7
4473 + bne,pn %xcc, 2f
4474 + nop
4475 + retl
4476 + nop
4477 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4478 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4479 +
4480 .globl atomic64_add_ret
4481 .type atomic64_add_ret,#function
4482 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4483 BACKOFF_SETUP(%o2)
4484 1: ldx [%o1], %g1
4485 - add %g1, %o0, %g7
4486 + addcc %g1, %o0, %g7
4487 +
4488 +#ifdef CONFIG_PAX_REFCOUNT
4489 + tvs %xcc, 6
4490 +#endif
4491 +
4492 casx [%o1], %g1, %g7
4493 cmp %g1, %g7
4494 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4495 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4496 2: BACKOFF_SPIN(%o2, %o3, 1b)
4497 .size atomic64_add_ret, .-atomic64_add_ret
4498
4499 + .globl atomic64_add_ret_unchecked
4500 + .type atomic64_add_ret_unchecked,#function
4501 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4502 + BACKOFF_SETUP(%o2)
4503 +1: ldx [%o1], %g1
4504 + addcc %g1, %o0, %g7
4505 + casx [%o1], %g1, %g7
4506 + cmp %g1, %g7
4507 + bne,pn %xcc, 2f
4508 + add %g7, %o0, %g7
4509 + mov %g7, %o0
4510 + retl
4511 + nop
4512 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4513 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4514 +
4515 .globl atomic64_sub_ret
4516 .type atomic64_sub_ret,#function
4517 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4518 BACKOFF_SETUP(%o2)
4519 1: ldx [%o1], %g1
4520 - sub %g1, %o0, %g7
4521 + subcc %g1, %o0, %g7
4522 +
4523 +#ifdef CONFIG_PAX_REFCOUNT
4524 + tvs %xcc, 6
4525 +#endif
4526 +
4527 casx [%o1], %g1, %g7
4528 cmp %g1, %g7
4529 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4530 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
4531 index 1b30bb3..b4a16c7 100644
4532 --- a/arch/sparc/lib/ksyms.c
4533 +++ b/arch/sparc/lib/ksyms.c
4534 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4535
4536 /* Atomic counter implementation. */
4537 EXPORT_SYMBOL(atomic_add);
4538 +EXPORT_SYMBOL(atomic_add_unchecked);
4539 EXPORT_SYMBOL(atomic_add_ret);
4540 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
4541 EXPORT_SYMBOL(atomic_sub);
4542 +EXPORT_SYMBOL(atomic_sub_unchecked);
4543 EXPORT_SYMBOL(atomic_sub_ret);
4544 EXPORT_SYMBOL(atomic64_add);
4545 +EXPORT_SYMBOL(atomic64_add_unchecked);
4546 EXPORT_SYMBOL(atomic64_add_ret);
4547 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4548 EXPORT_SYMBOL(atomic64_sub);
4549 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4550 EXPORT_SYMBOL(atomic64_sub_ret);
4551
4552 /* Atomic bit operations. */
4553 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
4554 index 301421c..e2535d1 100644
4555 --- a/arch/sparc/mm/Makefile
4556 +++ b/arch/sparc/mm/Makefile
4557 @@ -2,7 +2,7 @@
4558 #
4559
4560 asflags-y := -ansi
4561 -ccflags-y := -Werror
4562 +#ccflags-y := -Werror
4563
4564 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4565 obj-y += fault_$(BITS).o
4566 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
4567 index aa1c1b1..f93e28f 100644
4568 --- a/arch/sparc/mm/fault_32.c
4569 +++ b/arch/sparc/mm/fault_32.c
4570 @@ -22,6 +22,9 @@
4571 #include <linux/interrupt.h>
4572 #include <linux/module.h>
4573 #include <linux/kdebug.h>
4574 +#include <linux/slab.h>
4575 +#include <linux/pagemap.h>
4576 +#include <linux/compiler.h>
4577
4578 #include <asm/system.h>
4579 #include <asm/page.h>
4580 @@ -209,6 +212,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
4581 return safe_compute_effective_address(regs, insn);
4582 }
4583
4584 +#ifdef CONFIG_PAX_PAGEEXEC
4585 +#ifdef CONFIG_PAX_DLRESOLVE
4586 +static void pax_emuplt_close(struct vm_area_struct *vma)
4587 +{
4588 + vma->vm_mm->call_dl_resolve = 0UL;
4589 +}
4590 +
4591 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4592 +{
4593 + unsigned int *kaddr;
4594 +
4595 + vmf->page = alloc_page(GFP_HIGHUSER);
4596 + if (!vmf->page)
4597 + return VM_FAULT_OOM;
4598 +
4599 + kaddr = kmap(vmf->page);
4600 + memset(kaddr, 0, PAGE_SIZE);
4601 + kaddr[0] = 0x9DE3BFA8U; /* save */
4602 + flush_dcache_page(vmf->page);
4603 + kunmap(vmf->page);
4604 + return VM_FAULT_MAJOR;
4605 +}
4606 +
4607 +static const struct vm_operations_struct pax_vm_ops = {
4608 + .close = pax_emuplt_close,
4609 + .fault = pax_emuplt_fault
4610 +};
4611 +
4612 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4613 +{
4614 + int ret;
4615 +
4616 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4617 + vma->vm_mm = current->mm;
4618 + vma->vm_start = addr;
4619 + vma->vm_end = addr + PAGE_SIZE;
4620 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4621 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4622 + vma->vm_ops = &pax_vm_ops;
4623 +
4624 + ret = insert_vm_struct(current->mm, vma);
4625 + if (ret)
4626 + return ret;
4627 +
4628 + ++current->mm->total_vm;
4629 + return 0;
4630 +}
4631 +#endif
4632 +
4633 +/*
4634 + * PaX: decide what to do with offenders (regs->pc = fault address)
4635 + *
4636 + * returns 1 when task should be killed
4637 + * 2 when patched PLT trampoline was detected
4638 + * 3 when unpatched PLT trampoline was detected
4639 + */
4640 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4641 +{
4642 +
4643 +#ifdef CONFIG_PAX_EMUPLT
4644 + int err;
4645 +
4646 + do { /* PaX: patched PLT emulation #1 */
4647 + unsigned int sethi1, sethi2, jmpl;
4648 +
4649 + err = get_user(sethi1, (unsigned int *)regs->pc);
4650 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4651 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4652 +
4653 + if (err)
4654 + break;
4655 +
4656 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4657 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4658 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4659 + {
4660 + unsigned int addr;
4661 +
4662 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4663 + addr = regs->u_regs[UREG_G1];
4664 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4665 + regs->pc = addr;
4666 + regs->npc = addr+4;
4667 + return 2;
4668 + }
4669 + } while (0);
4670 +
4671 + { /* PaX: patched PLT emulation #2 */
4672 + unsigned int ba;
4673 +
4674 + err = get_user(ba, (unsigned int *)regs->pc);
4675 +
4676 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4677 + unsigned int addr;
4678 +
4679 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4680 + regs->pc = addr;
4681 + regs->npc = addr+4;
4682 + return 2;
4683 + }
4684 + }
4685 +
4686 + do { /* PaX: patched PLT emulation #3 */
4687 + unsigned int sethi, jmpl, nop;
4688 +
4689 + err = get_user(sethi, (unsigned int *)regs->pc);
4690 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4691 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4692 +
4693 + if (err)
4694 + break;
4695 +
4696 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4697 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4698 + nop == 0x01000000U)
4699 + {
4700 + unsigned int addr;
4701 +
4702 + addr = (sethi & 0x003FFFFFU) << 10;
4703 + regs->u_regs[UREG_G1] = addr;
4704 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4705 + regs->pc = addr;
4706 + regs->npc = addr+4;
4707 + return 2;
4708 + }
4709 + } while (0);
4710 +
4711 + do { /* PaX: unpatched PLT emulation step 1 */
4712 + unsigned int sethi, ba, nop;
4713 +
4714 + err = get_user(sethi, (unsigned int *)regs->pc);
4715 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4716 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4717 +
4718 + if (err)
4719 + break;
4720 +
4721 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4722 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4723 + nop == 0x01000000U)
4724 + {
4725 + unsigned int addr, save, call;
4726 +
4727 + if ((ba & 0xFFC00000U) == 0x30800000U)
4728 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4729 + else
4730 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4731 +
4732 + err = get_user(save, (unsigned int *)addr);
4733 + err |= get_user(call, (unsigned int *)(addr+4));
4734 + err |= get_user(nop, (unsigned int *)(addr+8));
4735 + if (err)
4736 + break;
4737 +
4738 +#ifdef CONFIG_PAX_DLRESOLVE
4739 + if (save == 0x9DE3BFA8U &&
4740 + (call & 0xC0000000U) == 0x40000000U &&
4741 + nop == 0x01000000U)
4742 + {
4743 + struct vm_area_struct *vma;
4744 + unsigned long call_dl_resolve;
4745 +
4746 + down_read(&current->mm->mmap_sem);
4747 + call_dl_resolve = current->mm->call_dl_resolve;
4748 + up_read(&current->mm->mmap_sem);
4749 + if (likely(call_dl_resolve))
4750 + goto emulate;
4751 +
4752 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4753 +
4754 + down_write(&current->mm->mmap_sem);
4755 + if (current->mm->call_dl_resolve) {
4756 + call_dl_resolve = current->mm->call_dl_resolve;
4757 + up_write(&current->mm->mmap_sem);
4758 + if (vma)
4759 + kmem_cache_free(vm_area_cachep, vma);
4760 + goto emulate;
4761 + }
4762 +
4763 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4764 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4765 + up_write(&current->mm->mmap_sem);
4766 + if (vma)
4767 + kmem_cache_free(vm_area_cachep, vma);
4768 + return 1;
4769 + }
4770 +
4771 + if (pax_insert_vma(vma, call_dl_resolve)) {
4772 + up_write(&current->mm->mmap_sem);
4773 + kmem_cache_free(vm_area_cachep, vma);
4774 + return 1;
4775 + }
4776 +
4777 + current->mm->call_dl_resolve = call_dl_resolve;
4778 + up_write(&current->mm->mmap_sem);
4779 +
4780 +emulate:
4781 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4782 + regs->pc = call_dl_resolve;
4783 + regs->npc = addr+4;
4784 + return 3;
4785 + }
4786 +#endif
4787 +
4788 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4789 + if ((save & 0xFFC00000U) == 0x05000000U &&
4790 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4791 + nop == 0x01000000U)
4792 + {
4793 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4794 + regs->u_regs[UREG_G2] = addr + 4;
4795 + addr = (save & 0x003FFFFFU) << 10;
4796 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4797 + regs->pc = addr;
4798 + regs->npc = addr+4;
4799 + return 3;
4800 + }
4801 + }
4802 + } while (0);
4803 +
4804 + do { /* PaX: unpatched PLT emulation step 2 */
4805 + unsigned int save, call, nop;
4806 +
4807 + err = get_user(save, (unsigned int *)(regs->pc-4));
4808 + err |= get_user(call, (unsigned int *)regs->pc);
4809 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
4810 + if (err)
4811 + break;
4812 +
4813 + if (save == 0x9DE3BFA8U &&
4814 + (call & 0xC0000000U) == 0x40000000U &&
4815 + nop == 0x01000000U)
4816 + {
4817 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4818 +
4819 + regs->u_regs[UREG_RETPC] = regs->pc;
4820 + regs->pc = dl_resolve;
4821 + regs->npc = dl_resolve+4;
4822 + return 3;
4823 + }
4824 + } while (0);
4825 +#endif
4826 +
4827 + return 1;
4828 +}
4829 +
4830 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4831 +{
4832 + unsigned long i;
4833 +
4834 + printk(KERN_ERR "PAX: bytes at PC: ");
4835 + for (i = 0; i < 8; i++) {
4836 + unsigned int c;
4837 + if (get_user(c, (unsigned int *)pc+i))
4838 + printk(KERN_CONT "???????? ");
4839 + else
4840 + printk(KERN_CONT "%08x ", c);
4841 + }
4842 + printk("\n");
4843 +}
4844 +#endif
4845 +
4846 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4847 int text_fault)
4848 {
4849 @@ -281,6 +546,24 @@ good_area:
4850 if(!(vma->vm_flags & VM_WRITE))
4851 goto bad_area;
4852 } else {
4853 +
4854 +#ifdef CONFIG_PAX_PAGEEXEC
4855 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4856 + up_read(&mm->mmap_sem);
4857 + switch (pax_handle_fetch_fault(regs)) {
4858 +
4859 +#ifdef CONFIG_PAX_EMUPLT
4860 + case 2:
4861 + case 3:
4862 + return;
4863 +#endif
4864 +
4865 + }
4866 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4867 + do_group_exit(SIGKILL);
4868 + }
4869 +#endif
4870 +
4871 /* Allow reads even for write-only mappings */
4872 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4873 goto bad_area;
4874 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
4875 index 504c062..6fcb9c6 100644
4876 --- a/arch/sparc/mm/fault_64.c
4877 +++ b/arch/sparc/mm/fault_64.c
4878 @@ -21,6 +21,9 @@
4879 #include <linux/kprobes.h>
4880 #include <linux/kdebug.h>
4881 #include <linux/percpu.h>
4882 +#include <linux/slab.h>
4883 +#include <linux/pagemap.h>
4884 +#include <linux/compiler.h>
4885
4886 #include <asm/page.h>
4887 #include <asm/pgtable.h>
4888 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
4889 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4890 regs->tpc);
4891 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4892 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4893 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4894 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4895 dump_stack();
4896 unhandled_fault(regs->tpc, current, regs);
4897 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
4898 show_regs(regs);
4899 }
4900
4901 +#ifdef CONFIG_PAX_PAGEEXEC
4902 +#ifdef CONFIG_PAX_DLRESOLVE
4903 +static void pax_emuplt_close(struct vm_area_struct *vma)
4904 +{
4905 + vma->vm_mm->call_dl_resolve = 0UL;
4906 +}
4907 +
4908 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4909 +{
4910 + unsigned int *kaddr;
4911 +
4912 + vmf->page = alloc_page(GFP_HIGHUSER);
4913 + if (!vmf->page)
4914 + return VM_FAULT_OOM;
4915 +
4916 + kaddr = kmap(vmf->page);
4917 + memset(kaddr, 0, PAGE_SIZE);
4918 + kaddr[0] = 0x9DE3BFA8U; /* save */
4919 + flush_dcache_page(vmf->page);
4920 + kunmap(vmf->page);
4921 + return VM_FAULT_MAJOR;
4922 +}
4923 +
4924 +static const struct vm_operations_struct pax_vm_ops = {
4925 + .close = pax_emuplt_close,
4926 + .fault = pax_emuplt_fault
4927 +};
4928 +
4929 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4930 +{
4931 + int ret;
4932 +
4933 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4934 + vma->vm_mm = current->mm;
4935 + vma->vm_start = addr;
4936 + vma->vm_end = addr + PAGE_SIZE;
4937 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4938 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4939 + vma->vm_ops = &pax_vm_ops;
4940 +
4941 + ret = insert_vm_struct(current->mm, vma);
4942 + if (ret)
4943 + return ret;
4944 +
4945 + ++current->mm->total_vm;
4946 + return 0;
4947 +}
4948 +#endif
4949 +
4950 +/*
4951 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4952 + *
4953 + * returns 1 when task should be killed
4954 + * 2 when patched PLT trampoline was detected
4955 + * 3 when unpatched PLT trampoline was detected
4956 + */
4957 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4958 +{
4959 +
4960 +#ifdef CONFIG_PAX_EMUPLT
4961 + int err;
4962 +
4963 + do { /* PaX: patched PLT emulation #1 */
4964 + unsigned int sethi1, sethi2, jmpl;
4965 +
4966 + err = get_user(sethi1, (unsigned int *)regs->tpc);
4967 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4968 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4969 +
4970 + if (err)
4971 + break;
4972 +
4973 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4974 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4975 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4976 + {
4977 + unsigned long addr;
4978 +
4979 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4980 + addr = regs->u_regs[UREG_G1];
4981 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4982 +
4983 + if (test_thread_flag(TIF_32BIT))
4984 + addr &= 0xFFFFFFFFUL;
4985 +
4986 + regs->tpc = addr;
4987 + regs->tnpc = addr+4;
4988 + return 2;
4989 + }
4990 + } while (0);
4991 +
4992 + { /* PaX: patched PLT emulation #2 */
4993 + unsigned int ba;
4994 +
4995 + err = get_user(ba, (unsigned int *)regs->tpc);
4996 +
4997 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4998 + unsigned long addr;
4999 +
5000 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5001 +
5002 + if (test_thread_flag(TIF_32BIT))
5003 + addr &= 0xFFFFFFFFUL;
5004 +
5005 + regs->tpc = addr;
5006 + regs->tnpc = addr+4;
5007 + return 2;
5008 + }
5009 + }
5010 +
5011 + do { /* PaX: patched PLT emulation #3 */
5012 + unsigned int sethi, jmpl, nop;
5013 +
5014 + err = get_user(sethi, (unsigned int *)regs->tpc);
5015 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5016 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5017 +
5018 + if (err)
5019 + break;
5020 +
5021 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5022 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5023 + nop == 0x01000000U)
5024 + {
5025 + unsigned long addr;
5026 +
5027 + addr = (sethi & 0x003FFFFFU) << 10;
5028 + regs->u_regs[UREG_G1] = addr;
5029 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5030 +
5031 + if (test_thread_flag(TIF_32BIT))
5032 + addr &= 0xFFFFFFFFUL;
5033 +
5034 + regs->tpc = addr;
5035 + regs->tnpc = addr+4;
5036 + return 2;
5037 + }
5038 + } while (0);
5039 +
5040 + do { /* PaX: patched PLT emulation #4 */
5041 + unsigned int sethi, mov1, call, mov2;
5042 +
5043 + err = get_user(sethi, (unsigned int *)regs->tpc);
5044 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5045 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5046 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5047 +
5048 + if (err)
5049 + break;
5050 +
5051 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5052 + mov1 == 0x8210000FU &&
5053 + (call & 0xC0000000U) == 0x40000000U &&
5054 + mov2 == 0x9E100001U)
5055 + {
5056 + unsigned long addr;
5057 +
5058 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5059 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5060 +
5061 + if (test_thread_flag(TIF_32BIT))
5062 + addr &= 0xFFFFFFFFUL;
5063 +
5064 + regs->tpc = addr;
5065 + regs->tnpc = addr+4;
5066 + return 2;
5067 + }
5068 + } while (0);
5069 +
5070 + do { /* PaX: patched PLT emulation #5 */
5071 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5072 +
5073 + err = get_user(sethi, (unsigned int *)regs->tpc);
5074 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5075 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5076 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5077 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5078 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5079 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5080 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5081 +
5082 + if (err)
5083 + break;
5084 +
5085 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5086 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5087 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5088 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5089 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5090 + sllx == 0x83287020U &&
5091 + jmpl == 0x81C04005U &&
5092 + nop == 0x01000000U)
5093 + {
5094 + unsigned long addr;
5095 +
5096 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5097 + regs->u_regs[UREG_G1] <<= 32;
5098 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5099 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5100 + regs->tpc = addr;
5101 + regs->tnpc = addr+4;
5102 + return 2;
5103 + }
5104 + } while (0);
5105 +
5106 + do { /* PaX: patched PLT emulation #6 */
5107 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5108 +
5109 + err = get_user(sethi, (unsigned int *)regs->tpc);
5110 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5111 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5112 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5113 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5114 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5115 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5116 +
5117 + if (err)
5118 + break;
5119 +
5120 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5121 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5122 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5123 + sllx == 0x83287020U &&
5124 + (or & 0xFFFFE000U) == 0x8A116000U &&
5125 + jmpl == 0x81C04005U &&
5126 + nop == 0x01000000U)
5127 + {
5128 + unsigned long addr;
5129 +
5130 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5131 + regs->u_regs[UREG_G1] <<= 32;
5132 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5133 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5134 + regs->tpc = addr;
5135 + regs->tnpc = addr+4;
5136 + return 2;
5137 + }
5138 + } while (0);
5139 +
5140 + do { /* PaX: unpatched PLT emulation step 1 */
5141 + unsigned int sethi, ba, nop;
5142 +
5143 + err = get_user(sethi, (unsigned int *)regs->tpc);
5144 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5145 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5146 +
5147 + if (err)
5148 + break;
5149 +
5150 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5151 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5152 + nop == 0x01000000U)
5153 + {
5154 + unsigned long addr;
5155 + unsigned int save, call;
5156 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5157 +
5158 + if ((ba & 0xFFC00000U) == 0x30800000U)
5159 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5160 + else
5161 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5162 +
5163 + if (test_thread_flag(TIF_32BIT))
5164 + addr &= 0xFFFFFFFFUL;
5165 +
5166 + err = get_user(save, (unsigned int *)addr);
5167 + err |= get_user(call, (unsigned int *)(addr+4));
5168 + err |= get_user(nop, (unsigned int *)(addr+8));
5169 + if (err)
5170 + break;
5171 +
5172 +#ifdef CONFIG_PAX_DLRESOLVE
5173 + if (save == 0x9DE3BFA8U &&
5174 + (call & 0xC0000000U) == 0x40000000U &&
5175 + nop == 0x01000000U)
5176 + {
5177 + struct vm_area_struct *vma;
5178 + unsigned long call_dl_resolve;
5179 +
5180 + down_read(&current->mm->mmap_sem);
5181 + call_dl_resolve = current->mm->call_dl_resolve;
5182 + up_read(&current->mm->mmap_sem);
5183 + if (likely(call_dl_resolve))
5184 + goto emulate;
5185 +
5186 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5187 +
5188 + down_write(&current->mm->mmap_sem);
5189 + if (current->mm->call_dl_resolve) {
5190 + call_dl_resolve = current->mm->call_dl_resolve;
5191 + up_write(&current->mm->mmap_sem);
5192 + if (vma)
5193 + kmem_cache_free(vm_area_cachep, vma);
5194 + goto emulate;
5195 + }
5196 +
5197 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5198 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5199 + up_write(&current->mm->mmap_sem);
5200 + if (vma)
5201 + kmem_cache_free(vm_area_cachep, vma);
5202 + return 1;
5203 + }
5204 +
5205 + if (pax_insert_vma(vma, call_dl_resolve)) {
5206 + up_write(&current->mm->mmap_sem);
5207 + kmem_cache_free(vm_area_cachep, vma);
5208 + return 1;
5209 + }
5210 +
5211 + current->mm->call_dl_resolve = call_dl_resolve;
5212 + up_write(&current->mm->mmap_sem);
5213 +
5214 +emulate:
5215 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5216 + regs->tpc = call_dl_resolve;
5217 + regs->tnpc = addr+4;
5218 + return 3;
5219 + }
5220 +#endif
5221 +
5222 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5223 + if ((save & 0xFFC00000U) == 0x05000000U &&
5224 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5225 + nop == 0x01000000U)
5226 + {
5227 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5228 + regs->u_regs[UREG_G2] = addr + 4;
5229 + addr = (save & 0x003FFFFFU) << 10;
5230 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5231 +
5232 + if (test_thread_flag(TIF_32BIT))
5233 + addr &= 0xFFFFFFFFUL;
5234 +
5235 + regs->tpc = addr;
5236 + regs->tnpc = addr+4;
5237 + return 3;
5238 + }
5239 +
5240 + /* PaX: 64-bit PLT stub */
5241 + err = get_user(sethi1, (unsigned int *)addr);
5242 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5243 + err |= get_user(or1, (unsigned int *)(addr+8));
5244 + err |= get_user(or2, (unsigned int *)(addr+12));
5245 + err |= get_user(sllx, (unsigned int *)(addr+16));
5246 + err |= get_user(add, (unsigned int *)(addr+20));
5247 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5248 + err |= get_user(nop, (unsigned int *)(addr+28));
5249 + if (err)
5250 + break;
5251 +
5252 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5253 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5254 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5255 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5256 + sllx == 0x89293020U &&
5257 + add == 0x8A010005U &&
5258 + jmpl == 0x89C14000U &&
5259 + nop == 0x01000000U)
5260 + {
5261 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5262 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5263 + regs->u_regs[UREG_G4] <<= 32;
5264 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5265 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5266 + regs->u_regs[UREG_G4] = addr + 24;
5267 + addr = regs->u_regs[UREG_G5];
5268 + regs->tpc = addr;
5269 + regs->tnpc = addr+4;
5270 + return 3;
5271 + }
5272 + }
5273 + } while (0);
5274 +
5275 +#ifdef CONFIG_PAX_DLRESOLVE
5276 + do { /* PaX: unpatched PLT emulation step 2 */
5277 + unsigned int save, call, nop;
5278 +
5279 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5280 + err |= get_user(call, (unsigned int *)regs->tpc);
5281 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5282 + if (err)
5283 + break;
5284 +
5285 + if (save == 0x9DE3BFA8U &&
5286 + (call & 0xC0000000U) == 0x40000000U &&
5287 + nop == 0x01000000U)
5288 + {
5289 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5290 +
5291 + if (test_thread_flag(TIF_32BIT))
5292 + dl_resolve &= 0xFFFFFFFFUL;
5293 +
5294 + regs->u_regs[UREG_RETPC] = regs->tpc;
5295 + regs->tpc = dl_resolve;
5296 + regs->tnpc = dl_resolve+4;
5297 + return 3;
5298 + }
5299 + } while (0);
5300 +#endif
5301 +
5302 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5303 + unsigned int sethi, ba, nop;
5304 +
5305 + err = get_user(sethi, (unsigned int *)regs->tpc);
5306 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5307 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5308 +
5309 + if (err)
5310 + break;
5311 +
5312 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5313 + (ba & 0xFFF00000U) == 0x30600000U &&
5314 + nop == 0x01000000U)
5315 + {
5316 + unsigned long addr;
5317 +
5318 + addr = (sethi & 0x003FFFFFU) << 10;
5319 + regs->u_regs[UREG_G1] = addr;
5320 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5321 +
5322 + if (test_thread_flag(TIF_32BIT))
5323 + addr &= 0xFFFFFFFFUL;
5324 +
5325 + regs->tpc = addr;
5326 + regs->tnpc = addr+4;
5327 + return 2;
5328 + }
5329 + } while (0);
5330 +
5331 +#endif
5332 +
5333 + return 1;
5334 +}
5335 +
5336 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5337 +{
5338 + unsigned long i;
5339 +
5340 + printk(KERN_ERR "PAX: bytes at PC: ");
5341 + for (i = 0; i < 8; i++) {
5342 + unsigned int c;
5343 + if (get_user(c, (unsigned int *)pc+i))
5344 + printk(KERN_CONT "???????? ");
5345 + else
5346 + printk(KERN_CONT "%08x ", c);
5347 + }
5348 + printk("\n");
5349 +}
5350 +#endif
5351 +
5352 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5353 {
5354 struct mm_struct *mm = current->mm;
5355 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5356 if (!vma)
5357 goto bad_area;
5358
5359 +#ifdef CONFIG_PAX_PAGEEXEC
5360 + /* PaX: detect ITLB misses on non-exec pages */
5361 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5362 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5363 + {
5364 + if (address != regs->tpc)
5365 + goto good_area;
5366 +
5367 + up_read(&mm->mmap_sem);
5368 + switch (pax_handle_fetch_fault(regs)) {
5369 +
5370 +#ifdef CONFIG_PAX_EMUPLT
5371 + case 2:
5372 + case 3:
5373 + return;
5374 +#endif
5375 +
5376 + }
5377 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5378 + do_group_exit(SIGKILL);
5379 + }
5380 +#endif
5381 +
5382 /* Pure DTLB misses do not tell us whether the fault causing
5383 * load/store/atomic was a write or not, it only says that there
5384 * was no match. So in such a case we (carefully) read the
5385 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
5386 index f4e9764..5682724 100644
5387 --- a/arch/sparc/mm/hugetlbpage.c
5388 +++ b/arch/sparc/mm/hugetlbpage.c
5389 @@ -68,7 +68,7 @@ full_search:
5390 }
5391 return -ENOMEM;
5392 }
5393 - if (likely(!vma || addr + len <= vma->vm_start)) {
5394 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5395 /*
5396 * Remember the place where we stopped the search:
5397 */
5398 @@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5399 /* make sure it can fit in the remaining address space */
5400 if (likely(addr > len)) {
5401 vma = find_vma(mm, addr-len);
5402 - if (!vma || addr <= vma->vm_start) {
5403 + if (check_heap_stack_gap(vma, addr - len, len)) {
5404 /* remember the address as a hint for next time */
5405 return (mm->free_area_cache = addr-len);
5406 }
5407 @@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5408 if (unlikely(mm->mmap_base < len))
5409 goto bottomup;
5410
5411 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5412 + addr = mm->mmap_base - len;
5413
5414 do {
5415 + addr &= HPAGE_MASK;
5416 /*
5417 * Lookup failure means no vma is above this address,
5418 * else if new region fits below vma->vm_start,
5419 * return with success:
5420 */
5421 vma = find_vma(mm, addr);
5422 - if (likely(!vma || addr+len <= vma->vm_start)) {
5423 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5424 /* remember the address as a hint for next time */
5425 return (mm->free_area_cache = addr);
5426 }
5427 @@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5428 mm->cached_hole_size = vma->vm_start - addr;
5429
5430 /* try just below the current vma->vm_start */
5431 - addr = (vma->vm_start-len) & HPAGE_MASK;
5432 - } while (likely(len < vma->vm_start));
5433 + addr = skip_heap_stack_gap(vma, len);
5434 + } while (!IS_ERR_VALUE(addr));
5435
5436 bottomup:
5437 /*
5438 @@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
5439 if (addr) {
5440 addr = ALIGN(addr, HPAGE_SIZE);
5441 vma = find_vma(mm, addr);
5442 - if (task_size - len >= addr &&
5443 - (!vma || addr + len <= vma->vm_start))
5444 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5445 return addr;
5446 }
5447 if (mm->get_unmapped_area == arch_get_unmapped_area)
5448 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
5449 index 7b00de6..78239f4 100644
5450 --- a/arch/sparc/mm/init_32.c
5451 +++ b/arch/sparc/mm/init_32.c
5452 @@ -316,6 +316,9 @@ extern void device_scan(void);
5453 pgprot_t PAGE_SHARED __read_mostly;
5454 EXPORT_SYMBOL(PAGE_SHARED);
5455
5456 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5457 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5458 +
5459 void __init paging_init(void)
5460 {
5461 switch(sparc_cpu_model) {
5462 @@ -344,17 +347,17 @@ void __init paging_init(void)
5463
5464 /* Initialize the protection map with non-constant, MMU dependent values. */
5465 protection_map[0] = PAGE_NONE;
5466 - protection_map[1] = PAGE_READONLY;
5467 - protection_map[2] = PAGE_COPY;
5468 - protection_map[3] = PAGE_COPY;
5469 + protection_map[1] = PAGE_READONLY_NOEXEC;
5470 + protection_map[2] = PAGE_COPY_NOEXEC;
5471 + protection_map[3] = PAGE_COPY_NOEXEC;
5472 protection_map[4] = PAGE_READONLY;
5473 protection_map[5] = PAGE_READONLY;
5474 protection_map[6] = PAGE_COPY;
5475 protection_map[7] = PAGE_COPY;
5476 protection_map[8] = PAGE_NONE;
5477 - protection_map[9] = PAGE_READONLY;
5478 - protection_map[10] = PAGE_SHARED;
5479 - protection_map[11] = PAGE_SHARED;
5480 + protection_map[9] = PAGE_READONLY_NOEXEC;
5481 + protection_map[10] = PAGE_SHARED_NOEXEC;
5482 + protection_map[11] = PAGE_SHARED_NOEXEC;
5483 protection_map[12] = PAGE_READONLY;
5484 protection_map[13] = PAGE_READONLY;
5485 protection_map[14] = PAGE_SHARED;
5486 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
5487 index cbef74e..c38fead 100644
5488 --- a/arch/sparc/mm/srmmu.c
5489 +++ b/arch/sparc/mm/srmmu.c
5490 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5491 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5492 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5493 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5494 +
5495 +#ifdef CONFIG_PAX_PAGEEXEC
5496 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5497 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5498 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5499 +#endif
5500 +
5501 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5502 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5503
5504 diff --git a/arch/um/Makefile b/arch/um/Makefile
5505 index c0f712c..3a5c4c9 100644
5506 --- a/arch/um/Makefile
5507 +++ b/arch/um/Makefile
5508 @@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
5509 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5510 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
5511
5512 +ifdef CONSTIFY_PLUGIN
5513 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5514 +endif
5515 +
5516 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
5517
5518 #This will adjust *FLAGS accordingly to the platform.
5519 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
5520 index 6c03acd..a5e0215 100644
5521 --- a/arch/um/include/asm/kmap_types.h
5522 +++ b/arch/um/include/asm/kmap_types.h
5523 @@ -23,6 +23,7 @@ enum km_type {
5524 KM_IRQ1,
5525 KM_SOFTIRQ0,
5526 KM_SOFTIRQ1,
5527 + KM_CLEARPAGE,
5528 KM_TYPE_NR
5529 };
5530
5531 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
5532 index 4cc9b6c..02e5029 100644
5533 --- a/arch/um/include/asm/page.h
5534 +++ b/arch/um/include/asm/page.h
5535 @@ -14,6 +14,9 @@
5536 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5537 #define PAGE_MASK (~(PAGE_SIZE-1))
5538
5539 +#define ktla_ktva(addr) (addr)
5540 +#define ktva_ktla(addr) (addr)
5541 +
5542 #ifndef __ASSEMBLY__
5543
5544 struct page;
5545 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
5546 index 21c1ae7..4640aaa 100644
5547 --- a/arch/um/kernel/process.c
5548 +++ b/arch/um/kernel/process.c
5549 @@ -404,22 +404,6 @@ int singlestepping(void * t)
5550 return 2;
5551 }
5552
5553 -/*
5554 - * Only x86 and x86_64 have an arch_align_stack().
5555 - * All other arches have "#define arch_align_stack(x) (x)"
5556 - * in their asm/system.h
5557 - * As this is included in UML from asm-um/system-generic.h,
5558 - * we can use it to behave as the subarch does.
5559 - */
5560 -#ifndef arch_align_stack
5561 -unsigned long arch_align_stack(unsigned long sp)
5562 -{
5563 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5564 - sp -= get_random_int() % 8192;
5565 - return sp & ~0xf;
5566 -}
5567 -#endif
5568 -
5569 unsigned long get_wchan(struct task_struct *p)
5570 {
5571 unsigned long stack_page, sp, ip;
5572 diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
5573 index d1b93c4..ae1b7fd 100644
5574 --- a/arch/um/sys-i386/shared/sysdep/system.h
5575 +++ b/arch/um/sys-i386/shared/sysdep/system.h
5576 @@ -17,7 +17,7 @@
5577 # define AT_VECTOR_SIZE_ARCH 1
5578 #endif
5579
5580 -extern unsigned long arch_align_stack(unsigned long sp);
5581 +#define arch_align_stack(x) ((x) & ~0xfUL)
5582
5583 void default_idle(void);
5584
5585 diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
5586 index 70ca357..728d1cc 100644
5587 --- a/arch/um/sys-i386/syscalls.c
5588 +++ b/arch/um/sys-i386/syscalls.c
5589 @@ -11,6 +11,21 @@
5590 #include "asm/uaccess.h"
5591 #include "asm/unistd.h"
5592
5593 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5594 +{
5595 + unsigned long pax_task_size = TASK_SIZE;
5596 +
5597 +#ifdef CONFIG_PAX_SEGMEXEC
5598 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5599 + pax_task_size = SEGMEXEC_TASK_SIZE;
5600 +#endif
5601 +
5602 + if (len > pax_task_size || addr > pax_task_size - len)
5603 + return -EINVAL;
5604 +
5605 + return 0;
5606 +}
5607 +
5608 /*
5609 * The prototype on i386 is:
5610 *
5611 diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
5612 index d1b93c4..ae1b7fd 100644
5613 --- a/arch/um/sys-x86_64/shared/sysdep/system.h
5614 +++ b/arch/um/sys-x86_64/shared/sysdep/system.h
5615 @@ -17,7 +17,7 @@
5616 # define AT_VECTOR_SIZE_ARCH 1
5617 #endif
5618
5619 -extern unsigned long arch_align_stack(unsigned long sp);
5620 +#define arch_align_stack(x) ((x) & ~0xfUL)
5621
5622 void default_idle(void);
5623
5624 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
5625 index 6a47bb2..dc9a868 100644
5626 --- a/arch/x86/Kconfig
5627 +++ b/arch/x86/Kconfig
5628 @@ -236,7 +236,7 @@ config X86_HT
5629
5630 config X86_32_LAZY_GS
5631 def_bool y
5632 - depends on X86_32 && !CC_STACKPROTECTOR
5633 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
5634
5635 config ARCH_HWEIGHT_CFLAGS
5636 string
5637 @@ -1019,7 +1019,7 @@ choice
5638
5639 config NOHIGHMEM
5640 bool "off"
5641 - depends on !X86_NUMAQ
5642 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5643 ---help---
5644 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
5645 However, the address space of 32-bit x86 processors is only 4
5646 @@ -1056,7 +1056,7 @@ config NOHIGHMEM
5647
5648 config HIGHMEM4G
5649 bool "4GB"
5650 - depends on !X86_NUMAQ
5651 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5652 ---help---
5653 Select this if you have a 32-bit processor and between 1 and 4
5654 gigabytes of physical RAM.
5655 @@ -1110,7 +1110,7 @@ config PAGE_OFFSET
5656 hex
5657 default 0xB0000000 if VMSPLIT_3G_OPT
5658 default 0x80000000 if VMSPLIT_2G
5659 - default 0x78000000 if VMSPLIT_2G_OPT
5660 + default 0x70000000 if VMSPLIT_2G_OPT
5661 default 0x40000000 if VMSPLIT_1G
5662 default 0xC0000000
5663 depends on X86_32
5664 @@ -1484,6 +1484,7 @@ config SECCOMP
5665
5666 config CC_STACKPROTECTOR
5667 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
5668 + depends on X86_64 || !PAX_MEMORY_UDEREF
5669 ---help---
5670 This option turns on the -fstack-protector GCC feature. This
5671 feature puts, at the beginning of functions, a canary value on
5672 @@ -1541,6 +1542,7 @@ config KEXEC_JUMP
5673 config PHYSICAL_START
5674 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
5675 default "0x1000000"
5676 + range 0x400000 0x40000000
5677 ---help---
5678 This gives the physical address where the kernel is loaded.
5679
5680 @@ -1604,6 +1606,7 @@ config X86_NEED_RELOCS
5681 config PHYSICAL_ALIGN
5682 hex "Alignment value to which kernel should be aligned" if X86_32
5683 default "0x1000000"
5684 + range 0x400000 0x1000000 if PAX_KERNEXEC
5685 range 0x2000 0x1000000
5686 ---help---
5687 This value puts the alignment restrictions on physical address
5688 @@ -1635,9 +1638,10 @@ config HOTPLUG_CPU
5689 Say N if you want to disable CPU hotplug.
5690
5691 config COMPAT_VDSO
5692 - def_bool y
5693 + def_bool n
5694 prompt "Compat VDSO support"
5695 depends on X86_32 || IA32_EMULATION
5696 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
5697 ---help---
5698 Map the 32-bit VDSO to the predictable old-style address too.
5699
5700 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
5701 index e3ca7e0..b30b28a 100644
5702 --- a/arch/x86/Kconfig.cpu
5703 +++ b/arch/x86/Kconfig.cpu
5704 @@ -341,7 +341,7 @@ config X86_PPRO_FENCE
5705
5706 config X86_F00F_BUG
5707 def_bool y
5708 - depends on M586MMX || M586TSC || M586 || M486 || M386
5709 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
5710
5711 config X86_INVD_BUG
5712 def_bool y
5713 @@ -365,7 +365,7 @@ config X86_POPAD_OK
5714
5715 config X86_ALIGNMENT_16
5716 def_bool y
5717 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5718 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5719
5720 config X86_INTEL_USERCOPY
5721 def_bool y
5722 @@ -411,7 +411,7 @@ config X86_CMPXCHG64
5723 # generates cmov.
5724 config X86_CMOV
5725 def_bool y
5726 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5727 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5728
5729 config X86_MINIMUM_CPU_FAMILY
5730 int
5731 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
5732 index c0f8a5c..6404f61 100644
5733 --- a/arch/x86/Kconfig.debug
5734 +++ b/arch/x86/Kconfig.debug
5735 @@ -81,7 +81,7 @@ config X86_PTDUMP
5736 config DEBUG_RODATA
5737 bool "Write protect kernel read-only data structures"
5738 default y
5739 - depends on DEBUG_KERNEL
5740 + depends on DEBUG_KERNEL && BROKEN
5741 ---help---
5742 Mark the kernel read-only data as write-protected in the pagetables,
5743 in order to catch accidental (and incorrect) writes to such const
5744 @@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
5745
5746 config DEBUG_SET_MODULE_RONX
5747 bool "Set loadable kernel module data as NX and text as RO"
5748 - depends on MODULES
5749 + depends on MODULES && BROKEN
5750 ---help---
5751 This option helps catch unintended modifications to loadable
5752 kernel module's text and read-only data. It also prevents execution
5753 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
5754 index b02e509..2631e48 100644
5755 --- a/arch/x86/Makefile
5756 +++ b/arch/x86/Makefile
5757 @@ -46,6 +46,7 @@ else
5758 UTS_MACHINE := x86_64
5759 CHECKFLAGS += -D__x86_64__ -m64
5760
5761 + biarch := $(call cc-option,-m64)
5762 KBUILD_AFLAGS += -m64
5763 KBUILD_CFLAGS += -m64
5764
5765 @@ -195,3 +196,12 @@ define archhelp
5766 echo ' FDARGS="..." arguments for the booted kernel'
5767 echo ' FDINITRD=file initrd for the booted kernel'
5768 endef
5769 +
5770 +define OLD_LD
5771 +
5772 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
5773 +*** Please upgrade your binutils to 2.18 or newer
5774 +endef
5775 +
5776 +archprepare:
5777 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
5778 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
5779 index 95365a8..52f857b 100644
5780 --- a/arch/x86/boot/Makefile
5781 +++ b/arch/x86/boot/Makefile
5782 @@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
5783 $(call cc-option, -fno-stack-protector) \
5784 $(call cc-option, -mpreferred-stack-boundary=2)
5785 KBUILD_CFLAGS += $(call cc-option, -m32)
5786 +ifdef CONSTIFY_PLUGIN
5787 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5788 +endif
5789 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5790 GCOV_PROFILE := n
5791
5792 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
5793 index 878e4b9..20537ab 100644
5794 --- a/arch/x86/boot/bitops.h
5795 +++ b/arch/x86/boot/bitops.h
5796 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5797 u8 v;
5798 const u32 *p = (const u32 *)addr;
5799
5800 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5801 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5802 return v;
5803 }
5804
5805 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5806
5807 static inline void set_bit(int nr, void *addr)
5808 {
5809 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5810 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5811 }
5812
5813 #endif /* BOOT_BITOPS_H */
5814 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
5815 index c7093bd..d4247ffe0 100644
5816 --- a/arch/x86/boot/boot.h
5817 +++ b/arch/x86/boot/boot.h
5818 @@ -85,7 +85,7 @@ static inline void io_delay(void)
5819 static inline u16 ds(void)
5820 {
5821 u16 seg;
5822 - asm("movw %%ds,%0" : "=rm" (seg));
5823 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5824 return seg;
5825 }
5826
5827 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
5828 static inline int memcmp(const void *s1, const void *s2, size_t len)
5829 {
5830 u8 diff;
5831 - asm("repe; cmpsb; setnz %0"
5832 + asm volatile("repe; cmpsb; setnz %0"
5833 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5834 return diff;
5835 }
5836 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
5837 index 09664ef..edc5d03 100644
5838 --- a/arch/x86/boot/compressed/Makefile
5839 +++ b/arch/x86/boot/compressed/Makefile
5840 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
5841 KBUILD_CFLAGS += $(cflags-y)
5842 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5843 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5844 +ifdef CONSTIFY_PLUGIN
5845 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5846 +endif
5847
5848 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5849 GCOV_PROFILE := n
5850 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
5851 index 67a655a..b924059 100644
5852 --- a/arch/x86/boot/compressed/head_32.S
5853 +++ b/arch/x86/boot/compressed/head_32.S
5854 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5855 notl %eax
5856 andl %eax, %ebx
5857 #else
5858 - movl $LOAD_PHYSICAL_ADDR, %ebx
5859 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5860 #endif
5861
5862 /* Target address to relocate to for decompression */
5863 @@ -162,7 +162,7 @@ relocated:
5864 * and where it was actually loaded.
5865 */
5866 movl %ebp, %ebx
5867 - subl $LOAD_PHYSICAL_ADDR, %ebx
5868 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5869 jz 2f /* Nothing to be done if loaded at compiled addr. */
5870 /*
5871 * Process relocations.
5872 @@ -170,8 +170,7 @@ relocated:
5873
5874 1: subl $4, %edi
5875 movl (%edi), %ecx
5876 - testl %ecx, %ecx
5877 - jz 2f
5878 + jecxz 2f
5879 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5880 jmp 1b
5881 2:
5882 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
5883 index 35af09d..99c9676 100644
5884 --- a/arch/x86/boot/compressed/head_64.S
5885 +++ b/arch/x86/boot/compressed/head_64.S
5886 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5887 notl %eax
5888 andl %eax, %ebx
5889 #else
5890 - movl $LOAD_PHYSICAL_ADDR, %ebx
5891 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5892 #endif
5893
5894 /* Target address to relocate to for decompression */
5895 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5896 notq %rax
5897 andq %rax, %rbp
5898 #else
5899 - movq $LOAD_PHYSICAL_ADDR, %rbp
5900 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5901 #endif
5902
5903 /* Target address to relocate to for decompression */
5904 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
5905 index 3a19d04..7c1d55a 100644
5906 --- a/arch/x86/boot/compressed/misc.c
5907 +++ b/arch/x86/boot/compressed/misc.c
5908 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
5909 case PT_LOAD:
5910 #ifdef CONFIG_RELOCATABLE
5911 dest = output;
5912 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5913 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5914 #else
5915 dest = (void *)(phdr->p_paddr);
5916 #endif
5917 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
5918 error("Destination address too large");
5919 #endif
5920 #ifndef CONFIG_RELOCATABLE
5921 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5922 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5923 error("Wrong destination address");
5924 #endif
5925
5926 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
5927 index 89bbf4e..869908e 100644
5928 --- a/arch/x86/boot/compressed/relocs.c
5929 +++ b/arch/x86/boot/compressed/relocs.c
5930 @@ -13,8 +13,11 @@
5931
5932 static void die(char *fmt, ...);
5933
5934 +#include "../../../../include/generated/autoconf.h"
5935 +
5936 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5937 static Elf32_Ehdr ehdr;
5938 +static Elf32_Phdr *phdr;
5939 static unsigned long reloc_count, reloc_idx;
5940 static unsigned long *relocs;
5941
5942 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5943 }
5944 }
5945
5946 +static void read_phdrs(FILE *fp)
5947 +{
5948 + unsigned int i;
5949 +
5950 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5951 + if (!phdr) {
5952 + die("Unable to allocate %d program headers\n",
5953 + ehdr.e_phnum);
5954 + }
5955 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5956 + die("Seek to %d failed: %s\n",
5957 + ehdr.e_phoff, strerror(errno));
5958 + }
5959 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5960 + die("Cannot read ELF program headers: %s\n",
5961 + strerror(errno));
5962 + }
5963 + for(i = 0; i < ehdr.e_phnum; i++) {
5964 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5965 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5966 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5967 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5968 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5969 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5970 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5971 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5972 + }
5973 +
5974 +}
5975 +
5976 static void read_shdrs(FILE *fp)
5977 {
5978 - int i;
5979 + unsigned int i;
5980 Elf32_Shdr shdr;
5981
5982 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5983 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5984
5985 static void read_strtabs(FILE *fp)
5986 {
5987 - int i;
5988 + unsigned int i;
5989 for (i = 0; i < ehdr.e_shnum; i++) {
5990 struct section *sec = &secs[i];
5991 if (sec->shdr.sh_type != SHT_STRTAB) {
5992 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5993
5994 static void read_symtabs(FILE *fp)
5995 {
5996 - int i,j;
5997 + unsigned int i,j;
5998 for (i = 0; i < ehdr.e_shnum; i++) {
5999 struct section *sec = &secs[i];
6000 if (sec->shdr.sh_type != SHT_SYMTAB) {
6001 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
6002
6003 static void read_relocs(FILE *fp)
6004 {
6005 - int i,j;
6006 + unsigned int i,j;
6007 + uint32_t base;
6008 +
6009 for (i = 0; i < ehdr.e_shnum; i++) {
6010 struct section *sec = &secs[i];
6011 if (sec->shdr.sh_type != SHT_REL) {
6012 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
6013 die("Cannot read symbol table: %s\n",
6014 strerror(errno));
6015 }
6016 + base = 0;
6017 + for (j = 0; j < ehdr.e_phnum; j++) {
6018 + if (phdr[j].p_type != PT_LOAD )
6019 + continue;
6020 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6021 + continue;
6022 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6023 + break;
6024 + }
6025 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6026 Elf32_Rel *rel = &sec->reltab[j];
6027 - rel->r_offset = elf32_to_cpu(rel->r_offset);
6028 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6029 rel->r_info = elf32_to_cpu(rel->r_info);
6030 }
6031 }
6032 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
6033
6034 static void print_absolute_symbols(void)
6035 {
6036 - int i;
6037 + unsigned int i;
6038 printf("Absolute symbols\n");
6039 printf(" Num: Value Size Type Bind Visibility Name\n");
6040 for (i = 0; i < ehdr.e_shnum; i++) {
6041 struct section *sec = &secs[i];
6042 char *sym_strtab;
6043 Elf32_Sym *sh_symtab;
6044 - int j;
6045 + unsigned int j;
6046
6047 if (sec->shdr.sh_type != SHT_SYMTAB) {
6048 continue;
6049 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
6050
6051 static void print_absolute_relocs(void)
6052 {
6053 - int i, printed = 0;
6054 + unsigned int i, printed = 0;
6055
6056 for (i = 0; i < ehdr.e_shnum; i++) {
6057 struct section *sec = &secs[i];
6058 struct section *sec_applies, *sec_symtab;
6059 char *sym_strtab;
6060 Elf32_Sym *sh_symtab;
6061 - int j;
6062 + unsigned int j;
6063 if (sec->shdr.sh_type != SHT_REL) {
6064 continue;
6065 }
6066 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
6067
6068 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6069 {
6070 - int i;
6071 + unsigned int i;
6072 /* Walk through the relocations */
6073 for (i = 0; i < ehdr.e_shnum; i++) {
6074 char *sym_strtab;
6075 Elf32_Sym *sh_symtab;
6076 struct section *sec_applies, *sec_symtab;
6077 - int j;
6078 + unsigned int j;
6079 struct section *sec = &secs[i];
6080
6081 if (sec->shdr.sh_type != SHT_REL) {
6082 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6083 !is_rel_reloc(sym_name(sym_strtab, sym))) {
6084 continue;
6085 }
6086 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6087 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6088 + continue;
6089 +
6090 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6091 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6092 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6093 + continue;
6094 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6095 + continue;
6096 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6097 + continue;
6098 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6099 + continue;
6100 +#endif
6101 +
6102 switch (r_type) {
6103 case R_386_NONE:
6104 case R_386_PC32:
6105 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
6106
6107 static void emit_relocs(int as_text)
6108 {
6109 - int i;
6110 + unsigned int i;
6111 /* Count how many relocations I have and allocate space for them. */
6112 reloc_count = 0;
6113 walk_relocs(count_reloc);
6114 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
6115 fname, strerror(errno));
6116 }
6117 read_ehdr(fp);
6118 + read_phdrs(fp);
6119 read_shdrs(fp);
6120 read_strtabs(fp);
6121 read_symtabs(fp);
6122 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
6123 index 4d3ff03..e4972ff 100644
6124 --- a/arch/x86/boot/cpucheck.c
6125 +++ b/arch/x86/boot/cpucheck.c
6126 @@ -74,7 +74,7 @@ static int has_fpu(void)
6127 u16 fcw = -1, fsw = -1;
6128 u32 cr0;
6129
6130 - asm("movl %%cr0,%0" : "=r" (cr0));
6131 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6132 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6133 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6134 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6135 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6136 {
6137 u32 f0, f1;
6138
6139 - asm("pushfl ; "
6140 + asm volatile("pushfl ; "
6141 "pushfl ; "
6142 "popl %0 ; "
6143 "movl %0,%1 ; "
6144 @@ -115,7 +115,7 @@ static void get_flags(void)
6145 set_bit(X86_FEATURE_FPU, cpu.flags);
6146
6147 if (has_eflag(X86_EFLAGS_ID)) {
6148 - asm("cpuid"
6149 + asm volatile("cpuid"
6150 : "=a" (max_intel_level),
6151 "=b" (cpu_vendor[0]),
6152 "=d" (cpu_vendor[1]),
6153 @@ -124,7 +124,7 @@ static void get_flags(void)
6154
6155 if (max_intel_level >= 0x00000001 &&
6156 max_intel_level <= 0x0000ffff) {
6157 - asm("cpuid"
6158 + asm volatile("cpuid"
6159 : "=a" (tfms),
6160 "=c" (cpu.flags[4]),
6161 "=d" (cpu.flags[0])
6162 @@ -136,7 +136,7 @@ static void get_flags(void)
6163 cpu.model += ((tfms >> 16) & 0xf) << 4;
6164 }
6165
6166 - asm("cpuid"
6167 + asm volatile("cpuid"
6168 : "=a" (max_amd_level)
6169 : "a" (0x80000000)
6170 : "ebx", "ecx", "edx");
6171 @@ -144,7 +144,7 @@ static void get_flags(void)
6172 if (max_amd_level >= 0x80000001 &&
6173 max_amd_level <= 0x8000ffff) {
6174 u32 eax = 0x80000001;
6175 - asm("cpuid"
6176 + asm volatile("cpuid"
6177 : "+a" (eax),
6178 "=c" (cpu.flags[6]),
6179 "=d" (cpu.flags[1])
6180 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6181 u32 ecx = MSR_K7_HWCR;
6182 u32 eax, edx;
6183
6184 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6185 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6186 eax &= ~(1 << 15);
6187 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6188 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6189
6190 get_flags(); /* Make sure it really did something */
6191 err = check_flags();
6192 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6193 u32 ecx = MSR_VIA_FCR;
6194 u32 eax, edx;
6195
6196 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6197 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6198 eax |= (1<<1)|(1<<7);
6199 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6200 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6201
6202 set_bit(X86_FEATURE_CX8, cpu.flags);
6203 err = check_flags();
6204 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6205 u32 eax, edx;
6206 u32 level = 1;
6207
6208 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6209 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6210 - asm("cpuid"
6211 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6212 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6213 + asm volatile("cpuid"
6214 : "+a" (level), "=d" (cpu.flags[0])
6215 : : "ecx", "ebx");
6216 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6217 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6218
6219 err = check_flags();
6220 }
6221 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
6222 index 93e689f..504ba09 100644
6223 --- a/arch/x86/boot/header.S
6224 +++ b/arch/x86/boot/header.S
6225 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
6226 # single linked list of
6227 # struct setup_data
6228
6229 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6230 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6231
6232 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6233 #define VO_INIT_SIZE (VO__end - VO__text)
6234 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
6235 index db75d07..8e6d0af 100644
6236 --- a/arch/x86/boot/memory.c
6237 +++ b/arch/x86/boot/memory.c
6238 @@ -19,7 +19,7 @@
6239
6240 static int detect_memory_e820(void)
6241 {
6242 - int count = 0;
6243 + unsigned int count = 0;
6244 struct biosregs ireg, oreg;
6245 struct e820entry *desc = boot_params.e820_map;
6246 static struct e820entry buf; /* static so it is zeroed */
6247 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
6248 index 11e8c6e..fdbb1ed 100644
6249 --- a/arch/x86/boot/video-vesa.c
6250 +++ b/arch/x86/boot/video-vesa.c
6251 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6252
6253 boot_params.screen_info.vesapm_seg = oreg.es;
6254 boot_params.screen_info.vesapm_off = oreg.di;
6255 + boot_params.screen_info.vesapm_size = oreg.cx;
6256 }
6257
6258 /*
6259 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
6260 index 43eda28..5ab5fdb 100644
6261 --- a/arch/x86/boot/video.c
6262 +++ b/arch/x86/boot/video.c
6263 @@ -96,7 +96,7 @@ static void store_mode_params(void)
6264 static unsigned int get_entry(void)
6265 {
6266 char entry_buf[4];
6267 - int i, len = 0;
6268 + unsigned int i, len = 0;
6269 int key;
6270 unsigned int v;
6271
6272 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
6273 index 5b577d5..3c1fed4 100644
6274 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
6275 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
6276 @@ -8,6 +8,8 @@
6277 * including this sentence is retained in full.
6278 */
6279
6280 +#include <asm/alternative-asm.h>
6281 +
6282 .extern crypto_ft_tab
6283 .extern crypto_it_tab
6284 .extern crypto_fl_tab
6285 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
6286 je B192; \
6287 leaq 32(r9),r9;
6288
6289 +#define ret pax_force_retaddr 0, 1; ret
6290 +
6291 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
6292 movq r1,r2; \
6293 movq r3,r4; \
6294 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
6295 index be6d9e3..21fbbca 100644
6296 --- a/arch/x86/crypto/aesni-intel_asm.S
6297 +++ b/arch/x86/crypto/aesni-intel_asm.S
6298 @@ -31,6 +31,7 @@
6299
6300 #include <linux/linkage.h>
6301 #include <asm/inst.h>
6302 +#include <asm/alternative-asm.h>
6303
6304 #ifdef __x86_64__
6305 .data
6306 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
6307 pop %r14
6308 pop %r13
6309 pop %r12
6310 + pax_force_retaddr 0, 1
6311 ret
6312 +ENDPROC(aesni_gcm_dec)
6313
6314
6315 /*****************************************************************************
6316 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
6317 pop %r14
6318 pop %r13
6319 pop %r12
6320 + pax_force_retaddr 0, 1
6321 ret
6322 +ENDPROC(aesni_gcm_enc)
6323
6324 #endif
6325
6326 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
6327 pxor %xmm1, %xmm0
6328 movaps %xmm0, (TKEYP)
6329 add $0x10, TKEYP
6330 + pax_force_retaddr_bts
6331 ret
6332
6333 .align 4
6334 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
6335 shufps $0b01001110, %xmm2, %xmm1
6336 movaps %xmm1, 0x10(TKEYP)
6337 add $0x20, TKEYP
6338 + pax_force_retaddr_bts
6339 ret
6340
6341 .align 4
6342 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
6343
6344 movaps %xmm0, (TKEYP)
6345 add $0x10, TKEYP
6346 + pax_force_retaddr_bts
6347 ret
6348
6349 .align 4
6350 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
6351 pxor %xmm1, %xmm2
6352 movaps %xmm2, (TKEYP)
6353 add $0x10, TKEYP
6354 + pax_force_retaddr_bts
6355 ret
6356
6357 /*
6358 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
6359 #ifndef __x86_64__
6360 popl KEYP
6361 #endif
6362 + pax_force_retaddr 0, 1
6363 ret
6364 +ENDPROC(aesni_set_key)
6365
6366 /*
6367 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
6368 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
6369 popl KLEN
6370 popl KEYP
6371 #endif
6372 + pax_force_retaddr 0, 1
6373 ret
6374 +ENDPROC(aesni_enc)
6375
6376 /*
6377 * _aesni_enc1: internal ABI
6378 @@ -1959,6 +1972,7 @@ _aesni_enc1:
6379 AESENC KEY STATE
6380 movaps 0x70(TKEYP), KEY
6381 AESENCLAST KEY STATE
6382 + pax_force_retaddr_bts
6383 ret
6384
6385 /*
6386 @@ -2067,6 +2081,7 @@ _aesni_enc4:
6387 AESENCLAST KEY STATE2
6388 AESENCLAST KEY STATE3
6389 AESENCLAST KEY STATE4
6390 + pax_force_retaddr_bts
6391 ret
6392
6393 /*
6394 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
6395 popl KLEN
6396 popl KEYP
6397 #endif
6398 + pax_force_retaddr 0, 1
6399 ret
6400 +ENDPROC(aesni_dec)
6401
6402 /*
6403 * _aesni_dec1: internal ABI
6404 @@ -2146,6 +2163,7 @@ _aesni_dec1:
6405 AESDEC KEY STATE
6406 movaps 0x70(TKEYP), KEY
6407 AESDECLAST KEY STATE
6408 + pax_force_retaddr_bts
6409 ret
6410
6411 /*
6412 @@ -2254,6 +2272,7 @@ _aesni_dec4:
6413 AESDECLAST KEY STATE2
6414 AESDECLAST KEY STATE3
6415 AESDECLAST KEY STATE4
6416 + pax_force_retaddr_bts
6417 ret
6418
6419 /*
6420 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
6421 popl KEYP
6422 popl LEN
6423 #endif
6424 + pax_force_retaddr 0, 1
6425 ret
6426 +ENDPROC(aesni_ecb_enc)
6427
6428 /*
6429 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6430 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
6431 popl KEYP
6432 popl LEN
6433 #endif
6434 + pax_force_retaddr 0, 1
6435 ret
6436 +ENDPROC(aesni_ecb_dec)
6437
6438 /*
6439 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6440 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
6441 popl LEN
6442 popl IVP
6443 #endif
6444 + pax_force_retaddr 0, 1
6445 ret
6446 +ENDPROC(aesni_cbc_enc)
6447
6448 /*
6449 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6450 @@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
6451 popl LEN
6452 popl IVP
6453 #endif
6454 + pax_force_retaddr 0, 1
6455 ret
6456 +ENDPROC(aesni_cbc_dec)
6457
6458 #ifdef __x86_64__
6459 .align 16
6460 @@ -2524,6 +2551,7 @@ _aesni_inc_init:
6461 mov $1, TCTR_LOW
6462 MOVQ_R64_XMM TCTR_LOW INC
6463 MOVQ_R64_XMM CTR TCTR_LOW
6464 + pax_force_retaddr_bts
6465 ret
6466
6467 /*
6468 @@ -2552,6 +2580,7 @@ _aesni_inc:
6469 .Linc_low:
6470 movaps CTR, IV
6471 PSHUFB_XMM BSWAP_MASK IV
6472 + pax_force_retaddr_bts
6473 ret
6474
6475 /*
6476 @@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
6477 .Lctr_enc_ret:
6478 movups IV, (IVP)
6479 .Lctr_enc_just_ret:
6480 + pax_force_retaddr 0, 1
6481 ret
6482 +ENDPROC(aesni_ctr_enc)
6483 #endif
6484 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6485 index 6214a9b..1f4fc9a 100644
6486 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
6487 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6488 @@ -1,3 +1,5 @@
6489 +#include <asm/alternative-asm.h>
6490 +
6491 # enter ECRYPT_encrypt_bytes
6492 .text
6493 .p2align 5
6494 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
6495 add %r11,%rsp
6496 mov %rdi,%rax
6497 mov %rsi,%rdx
6498 + pax_force_retaddr 0, 1
6499 ret
6500 # bytesatleast65:
6501 ._bytesatleast65:
6502 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
6503 add %r11,%rsp
6504 mov %rdi,%rax
6505 mov %rsi,%rdx
6506 + pax_force_retaddr
6507 ret
6508 # enter ECRYPT_ivsetup
6509 .text
6510 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
6511 add %r11,%rsp
6512 mov %rdi,%rax
6513 mov %rsi,%rdx
6514 + pax_force_retaddr
6515 ret
6516 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
6517 index 573aa10..b73ad89 100644
6518 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
6519 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
6520 @@ -21,6 +21,7 @@
6521 .text
6522
6523 #include <asm/asm-offsets.h>
6524 +#include <asm/alternative-asm.h>
6525
6526 #define a_offset 0
6527 #define b_offset 4
6528 @@ -269,6 +270,7 @@ twofish_enc_blk:
6529
6530 popq R1
6531 movq $1,%rax
6532 + pax_force_retaddr 0, 1
6533 ret
6534
6535 twofish_dec_blk:
6536 @@ -321,4 +323,5 @@ twofish_dec_blk:
6537
6538 popq R1
6539 movq $1,%rax
6540 + pax_force_retaddr 0, 1
6541 ret
6542 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
6543 index fd84387..0b4af7d 100644
6544 --- a/arch/x86/ia32/ia32_aout.c
6545 +++ b/arch/x86/ia32/ia32_aout.c
6546 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
6547 unsigned long dump_start, dump_size;
6548 struct user32 dump;
6549
6550 + memset(&dump, 0, sizeof(dump));
6551 +
6552 fs = get_fs();
6553 set_fs(KERNEL_DS);
6554 has_dumped = 1;
6555 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
6556 index 6557769..ef6ae89 100644
6557 --- a/arch/x86/ia32/ia32_signal.c
6558 +++ b/arch/x86/ia32/ia32_signal.c
6559 @@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
6560 }
6561 seg = get_fs();
6562 set_fs(KERNEL_DS);
6563 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
6564 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
6565 set_fs(seg);
6566 if (ret >= 0 && uoss_ptr) {
6567 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
6568 @@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
6569 */
6570 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6571 size_t frame_size,
6572 - void **fpstate)
6573 + void __user **fpstate)
6574 {
6575 unsigned long sp;
6576
6577 @@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6578
6579 if (used_math()) {
6580 sp = sp - sig_xstate_ia32_size;
6581 - *fpstate = (struct _fpstate_ia32 *) sp;
6582 + *fpstate = (struct _fpstate_ia32 __user *) sp;
6583 if (save_i387_xstate_ia32(*fpstate) < 0)
6584 return (void __user *) -1L;
6585 }
6586 @@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6587 sp -= frame_size;
6588 /* Align the stack pointer according to the i386 ABI,
6589 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6590 - sp = ((sp + 4) & -16ul) - 4;
6591 + sp = ((sp - 12) & -16ul) - 4;
6592 return (void __user *) sp;
6593 }
6594
6595 @@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
6596 * These are actually not used anymore, but left because some
6597 * gdb versions depend on them as a marker.
6598 */
6599 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6600 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6601 } put_user_catch(err);
6602
6603 if (err)
6604 @@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6605 0xb8,
6606 __NR_ia32_rt_sigreturn,
6607 0x80cd,
6608 - 0,
6609 + 0
6610 };
6611
6612 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6613 @@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6614
6615 if (ka->sa.sa_flags & SA_RESTORER)
6616 restorer = ka->sa.sa_restorer;
6617 + else if (current->mm->context.vdso)
6618 + /* Return stub is in 32bit vsyscall page */
6619 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6620 else
6621 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6622 - rt_sigreturn);
6623 + restorer = &frame->retcode;
6624 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6625
6626 /*
6627 * Not actually used anymore, but left because some gdb
6628 * versions need it.
6629 */
6630 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6631 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6632 } put_user_catch(err);
6633
6634 if (err)
6635 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
6636 index 54edb207..f5101b9 100644
6637 --- a/arch/x86/ia32/ia32entry.S
6638 +++ b/arch/x86/ia32/ia32entry.S
6639 @@ -13,7 +13,9 @@
6640 #include <asm/thread_info.h>
6641 #include <asm/segment.h>
6642 #include <asm/irqflags.h>
6643 +#include <asm/pgtable.h>
6644 #include <linux/linkage.h>
6645 +#include <asm/alternative-asm.h>
6646
6647 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6648 #include <linux/elf-em.h>
6649 @@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
6650 ENDPROC(native_irq_enable_sysexit)
6651 #endif
6652
6653 + .macro pax_enter_kernel_user
6654 + pax_set_fptr_mask
6655 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6656 + call pax_enter_kernel_user
6657 +#endif
6658 + .endm
6659 +
6660 + .macro pax_exit_kernel_user
6661 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6662 + call pax_exit_kernel_user
6663 +#endif
6664 +#ifdef CONFIG_PAX_RANDKSTACK
6665 + pushq %rax
6666 + pushq %r11
6667 + call pax_randomize_kstack
6668 + popq %r11
6669 + popq %rax
6670 +#endif
6671 + .endm
6672 +
6673 +.macro pax_erase_kstack
6674 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6675 + call pax_erase_kstack
6676 +#endif
6677 +.endm
6678 +
6679 /*
6680 * 32bit SYSENTER instruction entry.
6681 *
6682 @@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
6683 CFI_REGISTER rsp,rbp
6684 SWAPGS_UNSAFE_STACK
6685 movq PER_CPU_VAR(kernel_stack), %rsp
6686 - addq $(KERNEL_STACK_OFFSET),%rsp
6687 - /*
6688 - * No need to follow this irqs on/off section: the syscall
6689 - * disabled irqs, here we enable it straight after entry:
6690 - */
6691 - ENABLE_INTERRUPTS(CLBR_NONE)
6692 movl %ebp,%ebp /* zero extension */
6693 pushq_cfi $__USER32_DS
6694 /*CFI_REL_OFFSET ss,0*/
6695 @@ -134,25 +156,38 @@ ENTRY(ia32_sysenter_target)
6696 CFI_REL_OFFSET rsp,0
6697 pushfq_cfi
6698 /*CFI_REL_OFFSET rflags,0*/
6699 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6700 - CFI_REGISTER rip,r10
6701 + GET_THREAD_INFO(%r11)
6702 + movl TI_sysenter_return(%r11), %r11d
6703 + CFI_REGISTER rip,r11
6704 pushq_cfi $__USER32_CS
6705 /*CFI_REL_OFFSET cs,0*/
6706 movl %eax, %eax
6707 - pushq_cfi %r10
6708 + pushq_cfi %r11
6709 CFI_REL_OFFSET rip,0
6710 pushq_cfi %rax
6711 cld
6712 SAVE_ARGS 0,1,0
6713 + pax_enter_kernel_user
6714 + /*
6715 + * No need to follow this irqs on/off section: the syscall
6716 + * disabled irqs, here we enable it straight after entry:
6717 + */
6718 + ENABLE_INTERRUPTS(CLBR_NONE)
6719 /* no need to do an access_ok check here because rbp has been
6720 32bit zero extended */
6721 +
6722 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6723 + mov $PAX_USER_SHADOW_BASE,%r11
6724 + add %r11,%rbp
6725 +#endif
6726 +
6727 1: movl (%rbp),%ebp
6728 .section __ex_table,"a"
6729 .quad 1b,ia32_badarg
6730 .previous
6731 - GET_THREAD_INFO(%r10)
6732 - orl $TS_COMPAT,TI_status(%r10)
6733 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6734 + GET_THREAD_INFO(%r11)
6735 + orl $TS_COMPAT,TI_status(%r11)
6736 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6737 CFI_REMEMBER_STATE
6738 jnz sysenter_tracesys
6739 cmpq $(IA32_NR_syscalls-1),%rax
6740 @@ -162,13 +197,15 @@ sysenter_do_call:
6741 sysenter_dispatch:
6742 call *ia32_sys_call_table(,%rax,8)
6743 movq %rax,RAX-ARGOFFSET(%rsp)
6744 - GET_THREAD_INFO(%r10)
6745 + GET_THREAD_INFO(%r11)
6746 DISABLE_INTERRUPTS(CLBR_NONE)
6747 TRACE_IRQS_OFF
6748 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6749 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6750 jnz sysexit_audit
6751 sysexit_from_sys_call:
6752 - andl $~TS_COMPAT,TI_status(%r10)
6753 + pax_exit_kernel_user
6754 + pax_erase_kstack
6755 + andl $~TS_COMPAT,TI_status(%r11)
6756 /* clear IF, that popfq doesn't enable interrupts early */
6757 andl $~0x200,EFLAGS-R11(%rsp)
6758 movl RIP-R11(%rsp),%edx /* User %eip */
6759 @@ -194,6 +231,9 @@ sysexit_from_sys_call:
6760 movl %eax,%esi /* 2nd arg: syscall number */
6761 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6762 call audit_syscall_entry
6763 +
6764 + pax_erase_kstack
6765 +
6766 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6767 cmpq $(IA32_NR_syscalls-1),%rax
6768 ja ia32_badsys
6769 @@ -205,7 +245,7 @@ sysexit_from_sys_call:
6770 .endm
6771
6772 .macro auditsys_exit exit
6773 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6774 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6775 jnz ia32_ret_from_sys_call
6776 TRACE_IRQS_ON
6777 sti
6778 @@ -215,12 +255,12 @@ sysexit_from_sys_call:
6779 movzbl %al,%edi /* zero-extend that into %edi */
6780 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
6781 call audit_syscall_exit
6782 - GET_THREAD_INFO(%r10)
6783 + GET_THREAD_INFO(%r11)
6784 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
6785 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
6786 cli
6787 TRACE_IRQS_OFF
6788 - testl %edi,TI_flags(%r10)
6789 + testl %edi,TI_flags(%r11)
6790 jz \exit
6791 CLEAR_RREGS -ARGOFFSET
6792 jmp int_with_check
6793 @@ -238,7 +278,7 @@ sysexit_audit:
6794
6795 sysenter_tracesys:
6796 #ifdef CONFIG_AUDITSYSCALL
6797 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6798 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6799 jz sysenter_auditsys
6800 #endif
6801 SAVE_REST
6802 @@ -246,6 +286,9 @@ sysenter_tracesys:
6803 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6804 movq %rsp,%rdi /* &pt_regs -> arg1 */
6805 call syscall_trace_enter
6806 +
6807 + pax_erase_kstack
6808 +
6809 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6810 RESTORE_REST
6811 cmpq $(IA32_NR_syscalls-1),%rax
6812 @@ -277,19 +320,20 @@ ENDPROC(ia32_sysenter_target)
6813 ENTRY(ia32_cstar_target)
6814 CFI_STARTPROC32 simple
6815 CFI_SIGNAL_FRAME
6816 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6817 + CFI_DEF_CFA rsp,0
6818 CFI_REGISTER rip,rcx
6819 /*CFI_REGISTER rflags,r11*/
6820 SWAPGS_UNSAFE_STACK
6821 movl %esp,%r8d
6822 CFI_REGISTER rsp,r8
6823 movq PER_CPU_VAR(kernel_stack),%rsp
6824 + SAVE_ARGS 8*6,0,0
6825 + pax_enter_kernel_user
6826 /*
6827 * No need to follow this irqs on/off section: the syscall
6828 * disabled irqs and here we enable it straight after entry:
6829 */
6830 ENABLE_INTERRUPTS(CLBR_NONE)
6831 - SAVE_ARGS 8,0,0
6832 movl %eax,%eax /* zero extension */
6833 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6834 movq %rcx,RIP-ARGOFFSET(%rsp)
6835 @@ -305,13 +349,19 @@ ENTRY(ia32_cstar_target)
6836 /* no need to do an access_ok check here because r8 has been
6837 32bit zero extended */
6838 /* hardware stack frame is complete now */
6839 +
6840 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6841 + mov $PAX_USER_SHADOW_BASE,%r11
6842 + add %r11,%r8
6843 +#endif
6844 +
6845 1: movl (%r8),%r9d
6846 .section __ex_table,"a"
6847 .quad 1b,ia32_badarg
6848 .previous
6849 - GET_THREAD_INFO(%r10)
6850 - orl $TS_COMPAT,TI_status(%r10)
6851 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6852 + GET_THREAD_INFO(%r11)
6853 + orl $TS_COMPAT,TI_status(%r11)
6854 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6855 CFI_REMEMBER_STATE
6856 jnz cstar_tracesys
6857 cmpq $IA32_NR_syscalls-1,%rax
6858 @@ -321,13 +371,15 @@ cstar_do_call:
6859 cstar_dispatch:
6860 call *ia32_sys_call_table(,%rax,8)
6861 movq %rax,RAX-ARGOFFSET(%rsp)
6862 - GET_THREAD_INFO(%r10)
6863 + GET_THREAD_INFO(%r11)
6864 DISABLE_INTERRUPTS(CLBR_NONE)
6865 TRACE_IRQS_OFF
6866 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6867 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6868 jnz sysretl_audit
6869 sysretl_from_sys_call:
6870 - andl $~TS_COMPAT,TI_status(%r10)
6871 + pax_exit_kernel_user
6872 + pax_erase_kstack
6873 + andl $~TS_COMPAT,TI_status(%r11)
6874 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
6875 movl RIP-ARGOFFSET(%rsp),%ecx
6876 CFI_REGISTER rip,rcx
6877 @@ -355,7 +407,7 @@ sysretl_audit:
6878
6879 cstar_tracesys:
6880 #ifdef CONFIG_AUDITSYSCALL
6881 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6882 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6883 jz cstar_auditsys
6884 #endif
6885 xchgl %r9d,%ebp
6886 @@ -364,6 +416,9 @@ cstar_tracesys:
6887 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6888 movq %rsp,%rdi /* &pt_regs -> arg1 */
6889 call syscall_trace_enter
6890 +
6891 + pax_erase_kstack
6892 +
6893 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6894 RESTORE_REST
6895 xchgl %ebp,%r9d
6896 @@ -409,20 +464,21 @@ ENTRY(ia32_syscall)
6897 CFI_REL_OFFSET rip,RIP-RIP
6898 PARAVIRT_ADJUST_EXCEPTION_FRAME
6899 SWAPGS
6900 - /*
6901 - * No need to follow this irqs on/off section: the syscall
6902 - * disabled irqs and here we enable it straight after entry:
6903 - */
6904 - ENABLE_INTERRUPTS(CLBR_NONE)
6905 movl %eax,%eax
6906 pushq_cfi %rax
6907 cld
6908 /* note the registers are not zero extended to the sf.
6909 this could be a problem. */
6910 SAVE_ARGS 0,1,0
6911 - GET_THREAD_INFO(%r10)
6912 - orl $TS_COMPAT,TI_status(%r10)
6913 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6914 + pax_enter_kernel_user
6915 + /*
6916 + * No need to follow this irqs on/off section: the syscall
6917 + * disabled irqs and here we enable it straight after entry:
6918 + */
6919 + ENABLE_INTERRUPTS(CLBR_NONE)
6920 + GET_THREAD_INFO(%r11)
6921 + orl $TS_COMPAT,TI_status(%r11)
6922 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6923 jnz ia32_tracesys
6924 cmpq $(IA32_NR_syscalls-1),%rax
6925 ja ia32_badsys
6926 @@ -441,6 +497,9 @@ ia32_tracesys:
6927 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6928 movq %rsp,%rdi /* &pt_regs -> arg1 */
6929 call syscall_trace_enter
6930 +
6931 + pax_erase_kstack
6932 +
6933 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6934 RESTORE_REST
6935 cmpq $(IA32_NR_syscalls-1),%rax
6936 @@ -455,6 +514,7 @@ ia32_badsys:
6937
6938 quiet_ni_syscall:
6939 movq $-ENOSYS,%rax
6940 + pax_force_retaddr
6941 ret
6942 CFI_ENDPROC
6943
6944 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
6945 index f6f5c53..b358b28 100644
6946 --- a/arch/x86/ia32/sys_ia32.c
6947 +++ b/arch/x86/ia32/sys_ia32.c
6948 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
6949 */
6950 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
6951 {
6952 - typeof(ubuf->st_uid) uid = 0;
6953 - typeof(ubuf->st_gid) gid = 0;
6954 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
6955 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
6956 SET_UID(uid, stat->uid);
6957 SET_GID(gid, stat->gid);
6958 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
6959 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
6960 }
6961 set_fs(KERNEL_DS);
6962 ret = sys_rt_sigprocmask(how,
6963 - set ? (sigset_t __user *)&s : NULL,
6964 - oset ? (sigset_t __user *)&s : NULL,
6965 + set ? (sigset_t __force_user *)&s : NULL,
6966 + oset ? (sigset_t __force_user *)&s : NULL,
6967 sigsetsize);
6968 set_fs(old_fs);
6969 if (ret)
6970 @@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
6971 return alarm_setitimer(seconds);
6972 }
6973
6974 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
6975 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
6976 int options)
6977 {
6978 return compat_sys_wait4(pid, stat_addr, options, NULL);
6979 @@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
6980 mm_segment_t old_fs = get_fs();
6981
6982 set_fs(KERNEL_DS);
6983 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
6984 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
6985 set_fs(old_fs);
6986 if (put_compat_timespec(&t, interval))
6987 return -EFAULT;
6988 @@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
6989 mm_segment_t old_fs = get_fs();
6990
6991 set_fs(KERNEL_DS);
6992 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
6993 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
6994 set_fs(old_fs);
6995 if (!ret) {
6996 switch (_NSIG_WORDS) {
6997 @@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
6998 if (copy_siginfo_from_user32(&info, uinfo))
6999 return -EFAULT;
7000 set_fs(KERNEL_DS);
7001 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
7002 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
7003 set_fs(old_fs);
7004 return ret;
7005 }
7006 @@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
7007 return -EFAULT;
7008
7009 set_fs(KERNEL_DS);
7010 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
7011 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
7012 count);
7013 set_fs(old_fs);
7014
7015 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
7016 index 091508b..0ee32ec 100644
7017 --- a/arch/x86/include/asm/alternative-asm.h
7018 +++ b/arch/x86/include/asm/alternative-asm.h
7019 @@ -15,6 +15,45 @@
7020 .endm
7021 #endif
7022
7023 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
7024 + .macro pax_force_retaddr_bts rip=0
7025 + btsq $63,\rip(%rsp)
7026 + .endm
7027 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
7028 + .macro pax_force_retaddr rip=0, reload=0
7029 + btsq $63,\rip(%rsp)
7030 + .endm
7031 + .macro pax_force_fptr ptr
7032 + btsq $63,\ptr
7033 + .endm
7034 + .macro pax_set_fptr_mask
7035 + .endm
7036 +#endif
7037 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
7038 + .macro pax_force_retaddr rip=0, reload=0
7039 + .if \reload
7040 + pax_set_fptr_mask
7041 + .endif
7042 + orq %r10,\rip(%rsp)
7043 + .endm
7044 + .macro pax_force_fptr ptr
7045 + orq %r10,\ptr
7046 + .endm
7047 + .macro pax_set_fptr_mask
7048 + movabs $0x8000000000000000,%r10
7049 + .endm
7050 +#endif
7051 +#else
7052 + .macro pax_force_retaddr rip=0, reload=0
7053 + .endm
7054 + .macro pax_force_fptr ptr
7055 + .endm
7056 + .macro pax_force_retaddr_bts rip=0
7057 + .endm
7058 + .macro pax_set_fptr_mask
7059 + .endm
7060 +#endif
7061 +
7062 .macro altinstruction_entry orig alt feature orig_len alt_len
7063 .long \orig - .
7064 .long \alt - .
7065 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
7066 index 37ad100..7d47faa 100644
7067 --- a/arch/x86/include/asm/alternative.h
7068 +++ b/arch/x86/include/asm/alternative.h
7069 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
7070 ".section .discard,\"aw\",@progbits\n" \
7071 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
7072 ".previous\n" \
7073 - ".section .altinstr_replacement, \"ax\"\n" \
7074 + ".section .altinstr_replacement, \"a\"\n" \
7075 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
7076 ".previous"
7077
7078 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
7079 index 9b7273c..e9fcc24 100644
7080 --- a/arch/x86/include/asm/apic.h
7081 +++ b/arch/x86/include/asm/apic.h
7082 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
7083
7084 #ifdef CONFIG_X86_LOCAL_APIC
7085
7086 -extern unsigned int apic_verbosity;
7087 +extern int apic_verbosity;
7088 extern int local_apic_timer_c2_ok;
7089
7090 extern int disable_apic;
7091 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
7092 index 20370c6..a2eb9b0 100644
7093 --- a/arch/x86/include/asm/apm.h
7094 +++ b/arch/x86/include/asm/apm.h
7095 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
7096 __asm__ __volatile__(APM_DO_ZERO_SEGS
7097 "pushl %%edi\n\t"
7098 "pushl %%ebp\n\t"
7099 - "lcall *%%cs:apm_bios_entry\n\t"
7100 + "lcall *%%ss:apm_bios_entry\n\t"
7101 "setc %%al\n\t"
7102 "popl %%ebp\n\t"
7103 "popl %%edi\n\t"
7104 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
7105 __asm__ __volatile__(APM_DO_ZERO_SEGS
7106 "pushl %%edi\n\t"
7107 "pushl %%ebp\n\t"
7108 - "lcall *%%cs:apm_bios_entry\n\t"
7109 + "lcall *%%ss:apm_bios_entry\n\t"
7110 "setc %%bl\n\t"
7111 "popl %%ebp\n\t"
7112 "popl %%edi\n\t"
7113 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
7114 index 10572e3..2618d91 100644
7115 --- a/arch/x86/include/asm/atomic.h
7116 +++ b/arch/x86/include/asm/atomic.h
7117 @@ -22,7 +22,18 @@
7118 */
7119 static inline int atomic_read(const atomic_t *v)
7120 {
7121 - return (*(volatile int *)&(v)->counter);
7122 + return (*(volatile const int *)&(v)->counter);
7123 +}
7124 +
7125 +/**
7126 + * atomic_read_unchecked - read atomic variable
7127 + * @v: pointer of type atomic_unchecked_t
7128 + *
7129 + * Atomically reads the value of @v.
7130 + */
7131 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7132 +{
7133 + return (*(volatile const int *)&(v)->counter);
7134 }
7135
7136 /**
7137 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
7138 }
7139
7140 /**
7141 + * atomic_set_unchecked - set atomic variable
7142 + * @v: pointer of type atomic_unchecked_t
7143 + * @i: required value
7144 + *
7145 + * Atomically sets the value of @v to @i.
7146 + */
7147 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7148 +{
7149 + v->counter = i;
7150 +}
7151 +
7152 +/**
7153 * atomic_add - add integer to atomic variable
7154 * @i: integer value to add
7155 * @v: pointer of type atomic_t
7156 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
7157 */
7158 static inline void atomic_add(int i, atomic_t *v)
7159 {
7160 - asm volatile(LOCK_PREFIX "addl %1,%0"
7161 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7162 +
7163 +#ifdef CONFIG_PAX_REFCOUNT
7164 + "jno 0f\n"
7165 + LOCK_PREFIX "subl %1,%0\n"
7166 + "int $4\n0:\n"
7167 + _ASM_EXTABLE(0b, 0b)
7168 +#endif
7169 +
7170 + : "+m" (v->counter)
7171 + : "ir" (i));
7172 +}
7173 +
7174 +/**
7175 + * atomic_add_unchecked - add integer to atomic variable
7176 + * @i: integer value to add
7177 + * @v: pointer of type atomic_unchecked_t
7178 + *
7179 + * Atomically adds @i to @v.
7180 + */
7181 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7182 +{
7183 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7184 : "+m" (v->counter)
7185 : "ir" (i));
7186 }
7187 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
7188 */
7189 static inline void atomic_sub(int i, atomic_t *v)
7190 {
7191 - asm volatile(LOCK_PREFIX "subl %1,%0"
7192 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7193 +
7194 +#ifdef CONFIG_PAX_REFCOUNT
7195 + "jno 0f\n"
7196 + LOCK_PREFIX "addl %1,%0\n"
7197 + "int $4\n0:\n"
7198 + _ASM_EXTABLE(0b, 0b)
7199 +#endif
7200 +
7201 + : "+m" (v->counter)
7202 + : "ir" (i));
7203 +}
7204 +
7205 +/**
7206 + * atomic_sub_unchecked - subtract integer from atomic variable
7207 + * @i: integer value to subtract
7208 + * @v: pointer of type atomic_unchecked_t
7209 + *
7210 + * Atomically subtracts @i from @v.
7211 + */
7212 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7213 +{
7214 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7215 : "+m" (v->counter)
7216 : "ir" (i));
7217 }
7218 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7219 {
7220 unsigned char c;
7221
7222 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7223 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7224 +
7225 +#ifdef CONFIG_PAX_REFCOUNT
7226 + "jno 0f\n"
7227 + LOCK_PREFIX "addl %2,%0\n"
7228 + "int $4\n0:\n"
7229 + _ASM_EXTABLE(0b, 0b)
7230 +#endif
7231 +
7232 + "sete %1\n"
7233 : "+m" (v->counter), "=qm" (c)
7234 : "ir" (i) : "memory");
7235 return c;
7236 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7237 */
7238 static inline void atomic_inc(atomic_t *v)
7239 {
7240 - asm volatile(LOCK_PREFIX "incl %0"
7241 + asm volatile(LOCK_PREFIX "incl %0\n"
7242 +
7243 +#ifdef CONFIG_PAX_REFCOUNT
7244 + "jno 0f\n"
7245 + LOCK_PREFIX "decl %0\n"
7246 + "int $4\n0:\n"
7247 + _ASM_EXTABLE(0b, 0b)
7248 +#endif
7249 +
7250 + : "+m" (v->counter));
7251 +}
7252 +
7253 +/**
7254 + * atomic_inc_unchecked - increment atomic variable
7255 + * @v: pointer of type atomic_unchecked_t
7256 + *
7257 + * Atomically increments @v by 1.
7258 + */
7259 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7260 +{
7261 + asm volatile(LOCK_PREFIX "incl %0\n"
7262 : "+m" (v->counter));
7263 }
7264
7265 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
7266 */
7267 static inline void atomic_dec(atomic_t *v)
7268 {
7269 - asm volatile(LOCK_PREFIX "decl %0"
7270 + asm volatile(LOCK_PREFIX "decl %0\n"
7271 +
7272 +#ifdef CONFIG_PAX_REFCOUNT
7273 + "jno 0f\n"
7274 + LOCK_PREFIX "incl %0\n"
7275 + "int $4\n0:\n"
7276 + _ASM_EXTABLE(0b, 0b)
7277 +#endif
7278 +
7279 + : "+m" (v->counter));
7280 +}
7281 +
7282 +/**
7283 + * atomic_dec_unchecked - decrement atomic variable
7284 + * @v: pointer of type atomic_unchecked_t
7285 + *
7286 + * Atomically decrements @v by 1.
7287 + */
7288 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7289 +{
7290 + asm volatile(LOCK_PREFIX "decl %0\n"
7291 : "+m" (v->counter));
7292 }
7293
7294 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
7295 {
7296 unsigned char c;
7297
7298 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7299 + asm volatile(LOCK_PREFIX "decl %0\n"
7300 +
7301 +#ifdef CONFIG_PAX_REFCOUNT
7302 + "jno 0f\n"
7303 + LOCK_PREFIX "incl %0\n"
7304 + "int $4\n0:\n"
7305 + _ASM_EXTABLE(0b, 0b)
7306 +#endif
7307 +
7308 + "sete %1\n"
7309 : "+m" (v->counter), "=qm" (c)
7310 : : "memory");
7311 return c != 0;
7312 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
7313 {
7314 unsigned char c;
7315
7316 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7317 + asm volatile(LOCK_PREFIX "incl %0\n"
7318 +
7319 +#ifdef CONFIG_PAX_REFCOUNT
7320 + "jno 0f\n"
7321 + LOCK_PREFIX "decl %0\n"
7322 + "int $4\n0:\n"
7323 + _ASM_EXTABLE(0b, 0b)
7324 +#endif
7325 +
7326 + "sete %1\n"
7327 + : "+m" (v->counter), "=qm" (c)
7328 + : : "memory");
7329 + return c != 0;
7330 +}
7331 +
7332 +/**
7333 + * atomic_inc_and_test_unchecked - increment and test
7334 + * @v: pointer of type atomic_unchecked_t
7335 + *
7336 + * Atomically increments @v by 1
7337 + * and returns true if the result is zero, or false for all
7338 + * other cases.
7339 + */
7340 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7341 +{
7342 + unsigned char c;
7343 +
7344 + asm volatile(LOCK_PREFIX "incl %0\n"
7345 + "sete %1\n"
7346 : "+m" (v->counter), "=qm" (c)
7347 : : "memory");
7348 return c != 0;
7349 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
7350 {
7351 unsigned char c;
7352
7353 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7354 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7355 +
7356 +#ifdef CONFIG_PAX_REFCOUNT
7357 + "jno 0f\n"
7358 + LOCK_PREFIX "subl %2,%0\n"
7359 + "int $4\n0:\n"
7360 + _ASM_EXTABLE(0b, 0b)
7361 +#endif
7362 +
7363 + "sets %1\n"
7364 : "+m" (v->counter), "=qm" (c)
7365 : "ir" (i) : "memory");
7366 return c;
7367 @@ -180,6 +342,46 @@ static inline int atomic_add_return(int i, atomic_t *v)
7368 #endif
7369 /* Modern 486+ processor */
7370 __i = i;
7371 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7372 +
7373 +#ifdef CONFIG_PAX_REFCOUNT
7374 + "jno 0f\n"
7375 + "movl %0, %1\n"
7376 + "int $4\n0:\n"
7377 + _ASM_EXTABLE(0b, 0b)
7378 +#endif
7379 +
7380 + : "+r" (i), "+m" (v->counter)
7381 + : : "memory");
7382 + return i + __i;
7383 +
7384 +#ifdef CONFIG_M386
7385 +no_xadd: /* Legacy 386 processor */
7386 + local_irq_save(flags);
7387 + __i = atomic_read(v);
7388 + atomic_set(v, i + __i);
7389 + local_irq_restore(flags);
7390 + return i + __i;
7391 +#endif
7392 +}
7393 +
7394 +/**
7395 + * atomic_add_return_unchecked - add integer and return
7396 + * @v: pointer of type atomic_unchecked_t
7397 + * @i: integer value to add
7398 + *
7399 + * Atomically adds @i to @v and returns @i + @v
7400 + */
7401 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7402 +{
7403 + int __i;
7404 +#ifdef CONFIG_M386
7405 + unsigned long flags;
7406 + if (unlikely(boot_cpu_data.x86 <= 3))
7407 + goto no_xadd;
7408 +#endif
7409 + /* Modern 486+ processor */
7410 + __i = i;
7411 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7412 : "+r" (i), "+m" (v->counter)
7413 : : "memory");
7414 @@ -208,6 +410,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
7415 }
7416
7417 #define atomic_inc_return(v) (atomic_add_return(1, v))
7418 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7419 +{
7420 + return atomic_add_return_unchecked(1, v);
7421 +}
7422 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7423
7424 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7425 @@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7426 return cmpxchg(&v->counter, old, new);
7427 }
7428
7429 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7430 +{
7431 + return cmpxchg(&v->counter, old, new);
7432 +}
7433 +
7434 static inline int atomic_xchg(atomic_t *v, int new)
7435 {
7436 return xchg(&v->counter, new);
7437 }
7438
7439 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7440 +{
7441 + return xchg(&v->counter, new);
7442 +}
7443 +
7444 /**
7445 * __atomic_add_unless - add unless the number is already a given value
7446 * @v: pointer of type atomic_t
7447 @@ -231,12 +447,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
7448 */
7449 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7450 {
7451 - int c, old;
7452 + int c, old, new;
7453 c = atomic_read(v);
7454 for (;;) {
7455 - if (unlikely(c == (u)))
7456 + if (unlikely(c == u))
7457 break;
7458 - old = atomic_cmpxchg((v), c, c + (a));
7459 +
7460 + asm volatile("addl %2,%0\n"
7461 +
7462 +#ifdef CONFIG_PAX_REFCOUNT
7463 + "jno 0f\n"
7464 + "subl %2,%0\n"
7465 + "int $4\n0:\n"
7466 + _ASM_EXTABLE(0b, 0b)
7467 +#endif
7468 +
7469 + : "=r" (new)
7470 + : "0" (c), "ir" (a));
7471 +
7472 + old = atomic_cmpxchg(v, c, new);
7473 if (likely(old == c))
7474 break;
7475 c = old;
7476 @@ -244,6 +473,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7477 return c;
7478 }
7479
7480 +/**
7481 + * atomic_inc_not_zero_hint - increment if not null
7482 + * @v: pointer of type atomic_t
7483 + * @hint: probable value of the atomic before the increment
7484 + *
7485 + * This version of atomic_inc_not_zero() gives a hint of probable
7486 + * value of the atomic. This helps processor to not read the memory
7487 + * before doing the atomic read/modify/write cycle, lowering
7488 + * number of bus transactions on some arches.
7489 + *
7490 + * Returns: 0 if increment was not done, 1 otherwise.
7491 + */
7492 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
7493 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
7494 +{
7495 + int val, c = hint, new;
7496 +
7497 + /* sanity test, should be removed by compiler if hint is a constant */
7498 + if (!hint)
7499 + return __atomic_add_unless(v, 1, 0);
7500 +
7501 + do {
7502 + asm volatile("incl %0\n"
7503 +
7504 +#ifdef CONFIG_PAX_REFCOUNT
7505 + "jno 0f\n"
7506 + "decl %0\n"
7507 + "int $4\n0:\n"
7508 + _ASM_EXTABLE(0b, 0b)
7509 +#endif
7510 +
7511 + : "=r" (new)
7512 + : "0" (c));
7513 +
7514 + val = atomic_cmpxchg(v, c, new);
7515 + if (val == c)
7516 + return 1;
7517 + c = val;
7518 + } while (c);
7519 +
7520 + return 0;
7521 +}
7522
7523 /*
7524 * atomic_dec_if_positive - decrement by 1 if old value positive
7525 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
7526 index 24098aa..1e37723 100644
7527 --- a/arch/x86/include/asm/atomic64_32.h
7528 +++ b/arch/x86/include/asm/atomic64_32.h
7529 @@ -12,6 +12,14 @@ typedef struct {
7530 u64 __aligned(8) counter;
7531 } atomic64_t;
7532
7533 +#ifdef CONFIG_PAX_REFCOUNT
7534 +typedef struct {
7535 + u64 __aligned(8) counter;
7536 +} atomic64_unchecked_t;
7537 +#else
7538 +typedef atomic64_t atomic64_unchecked_t;
7539 +#endif
7540 +
7541 #define ATOMIC64_INIT(val) { (val) }
7542
7543 #ifdef CONFIG_X86_CMPXCHG64
7544 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
7545 }
7546
7547 /**
7548 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
7549 + * @p: pointer to type atomic64_unchecked_t
7550 + * @o: expected value
7551 + * @n: new value
7552 + *
7553 + * Atomically sets @v to @n if it was equal to @o and returns
7554 + * the old value.
7555 + */
7556 +
7557 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
7558 +{
7559 + return cmpxchg64(&v->counter, o, n);
7560 +}
7561 +
7562 +/**
7563 * atomic64_xchg - xchg atomic64 variable
7564 * @v: pointer to type atomic64_t
7565 * @n: value to assign
7566 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
7567 }
7568
7569 /**
7570 + * atomic64_set_unchecked - set atomic64 variable
7571 + * @v: pointer to type atomic64_unchecked_t
7572 + * @n: value to assign
7573 + *
7574 + * Atomically sets the value of @v to @n.
7575 + */
7576 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
7577 +{
7578 + unsigned high = (unsigned)(i >> 32);
7579 + unsigned low = (unsigned)i;
7580 + asm volatile(ATOMIC64_ALTERNATIVE(set)
7581 + : "+b" (low), "+c" (high)
7582 + : "S" (v)
7583 + : "eax", "edx", "memory"
7584 + );
7585 +}
7586 +
7587 +/**
7588 * atomic64_read - read atomic64 variable
7589 * @v: pointer to type atomic64_t
7590 *
7591 @@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
7592 }
7593
7594 /**
7595 + * atomic64_read_unchecked - read atomic64 variable
7596 + * @v: pointer to type atomic64_unchecked_t
7597 + *
7598 + * Atomically reads the value of @v and returns it.
7599 + */
7600 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
7601 +{
7602 + long long r;
7603 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
7604 + : "=A" (r), "+c" (v)
7605 + : : "memory"
7606 + );
7607 + return r;
7608 + }
7609 +
7610 +/**
7611 * atomic64_add_return - add and return
7612 * @i: integer value to add
7613 * @v: pointer to type atomic64_t
7614 @@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
7615 return i;
7616 }
7617
7618 +/**
7619 + * atomic64_add_return_unchecked - add and return
7620 + * @i: integer value to add
7621 + * @v: pointer to type atomic64_unchecked_t
7622 + *
7623 + * Atomically adds @i to @v and returns @i + *@v
7624 + */
7625 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
7626 +{
7627 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
7628 + : "+A" (i), "+c" (v)
7629 + : : "memory"
7630 + );
7631 + return i;
7632 +}
7633 +
7634 /*
7635 * Other variants with different arithmetic operators:
7636 */
7637 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
7638 return a;
7639 }
7640
7641 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7642 +{
7643 + long long a;
7644 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
7645 + : "=A" (a)
7646 + : "S" (v)
7647 + : "memory", "ecx"
7648 + );
7649 + return a;
7650 +}
7651 +
7652 static inline long long atomic64_dec_return(atomic64_t *v)
7653 {
7654 long long a;
7655 @@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
7656 }
7657
7658 /**
7659 + * atomic64_add_unchecked - add integer to atomic64 variable
7660 + * @i: integer value to add
7661 + * @v: pointer to type atomic64_unchecked_t
7662 + *
7663 + * Atomically adds @i to @v.
7664 + */
7665 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
7666 +{
7667 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
7668 + : "+A" (i), "+c" (v)
7669 + : : "memory"
7670 + );
7671 + return i;
7672 +}
7673 +
7674 +/**
7675 * atomic64_sub - subtract the atomic64 variable
7676 * @i: integer value to subtract
7677 * @v: pointer to type atomic64_t
7678 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
7679 index 017594d..d3fcf72 100644
7680 --- a/arch/x86/include/asm/atomic64_64.h
7681 +++ b/arch/x86/include/asm/atomic64_64.h
7682 @@ -18,7 +18,19 @@
7683 */
7684 static inline long atomic64_read(const atomic64_t *v)
7685 {
7686 - return (*(volatile long *)&(v)->counter);
7687 + return (*(volatile const long *)&(v)->counter);
7688 +}
7689 +
7690 +/**
7691 + * atomic64_read_unchecked - read atomic64 variable
7692 + * @v: pointer of type atomic64_unchecked_t
7693 + *
7694 + * Atomically reads the value of @v.
7695 + * Doesn't imply a read memory barrier.
7696 + */
7697 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7698 +{
7699 + return (*(volatile const long *)&(v)->counter);
7700 }
7701
7702 /**
7703 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
7704 }
7705
7706 /**
7707 + * atomic64_set_unchecked - set atomic64 variable
7708 + * @v: pointer to type atomic64_unchecked_t
7709 + * @i: required value
7710 + *
7711 + * Atomically sets the value of @v to @i.
7712 + */
7713 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7714 +{
7715 + v->counter = i;
7716 +}
7717 +
7718 +/**
7719 * atomic64_add - add integer to atomic64 variable
7720 * @i: integer value to add
7721 * @v: pointer to type atomic64_t
7722 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
7723 */
7724 static inline void atomic64_add(long i, atomic64_t *v)
7725 {
7726 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
7727 +
7728 +#ifdef CONFIG_PAX_REFCOUNT
7729 + "jno 0f\n"
7730 + LOCK_PREFIX "subq %1,%0\n"
7731 + "int $4\n0:\n"
7732 + _ASM_EXTABLE(0b, 0b)
7733 +#endif
7734 +
7735 + : "=m" (v->counter)
7736 + : "er" (i), "m" (v->counter));
7737 +}
7738 +
7739 +/**
7740 + * atomic64_add_unchecked - add integer to atomic64 variable
7741 + * @i: integer value to add
7742 + * @v: pointer to type atomic64_unchecked_t
7743 + *
7744 + * Atomically adds @i to @v.
7745 + */
7746 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7747 +{
7748 asm volatile(LOCK_PREFIX "addq %1,%0"
7749 : "=m" (v->counter)
7750 : "er" (i), "m" (v->counter));
7751 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
7752 */
7753 static inline void atomic64_sub(long i, atomic64_t *v)
7754 {
7755 - asm volatile(LOCK_PREFIX "subq %1,%0"
7756 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7757 +
7758 +#ifdef CONFIG_PAX_REFCOUNT
7759 + "jno 0f\n"
7760 + LOCK_PREFIX "addq %1,%0\n"
7761 + "int $4\n0:\n"
7762 + _ASM_EXTABLE(0b, 0b)
7763 +#endif
7764 +
7765 + : "=m" (v->counter)
7766 + : "er" (i), "m" (v->counter));
7767 +}
7768 +
7769 +/**
7770 + * atomic64_sub_unchecked - subtract the atomic64 variable
7771 + * @i: integer value to subtract
7772 + * @v: pointer to type atomic64_unchecked_t
7773 + *
7774 + * Atomically subtracts @i from @v.
7775 + */
7776 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
7777 +{
7778 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7779 : "=m" (v->counter)
7780 : "er" (i), "m" (v->counter));
7781 }
7782 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7783 {
7784 unsigned char c;
7785
7786 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7787 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
7788 +
7789 +#ifdef CONFIG_PAX_REFCOUNT
7790 + "jno 0f\n"
7791 + LOCK_PREFIX "addq %2,%0\n"
7792 + "int $4\n0:\n"
7793 + _ASM_EXTABLE(0b, 0b)
7794 +#endif
7795 +
7796 + "sete %1\n"
7797 : "=m" (v->counter), "=qm" (c)
7798 : "er" (i), "m" (v->counter) : "memory");
7799 return c;
7800 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7801 */
7802 static inline void atomic64_inc(atomic64_t *v)
7803 {
7804 + asm volatile(LOCK_PREFIX "incq %0\n"
7805 +
7806 +#ifdef CONFIG_PAX_REFCOUNT
7807 + "jno 0f\n"
7808 + LOCK_PREFIX "decq %0\n"
7809 + "int $4\n0:\n"
7810 + _ASM_EXTABLE(0b, 0b)
7811 +#endif
7812 +
7813 + : "=m" (v->counter)
7814 + : "m" (v->counter));
7815 +}
7816 +
7817 +/**
7818 + * atomic64_inc_unchecked - increment atomic64 variable
7819 + * @v: pointer to type atomic64_unchecked_t
7820 + *
7821 + * Atomically increments @v by 1.
7822 + */
7823 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7824 +{
7825 asm volatile(LOCK_PREFIX "incq %0"
7826 : "=m" (v->counter)
7827 : "m" (v->counter));
7828 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
7829 */
7830 static inline void atomic64_dec(atomic64_t *v)
7831 {
7832 - asm volatile(LOCK_PREFIX "decq %0"
7833 + asm volatile(LOCK_PREFIX "decq %0\n"
7834 +
7835 +#ifdef CONFIG_PAX_REFCOUNT
7836 + "jno 0f\n"
7837 + LOCK_PREFIX "incq %0\n"
7838 + "int $4\n0:\n"
7839 + _ASM_EXTABLE(0b, 0b)
7840 +#endif
7841 +
7842 + : "=m" (v->counter)
7843 + : "m" (v->counter));
7844 +}
7845 +
7846 +/**
7847 + * atomic64_dec_unchecked - decrement atomic64 variable
7848 + * @v: pointer to type atomic64_t
7849 + *
7850 + * Atomically decrements @v by 1.
7851 + */
7852 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7853 +{
7854 + asm volatile(LOCK_PREFIX "decq %0\n"
7855 : "=m" (v->counter)
7856 : "m" (v->counter));
7857 }
7858 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
7859 {
7860 unsigned char c;
7861
7862 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
7863 + asm volatile(LOCK_PREFIX "decq %0\n"
7864 +
7865 +#ifdef CONFIG_PAX_REFCOUNT
7866 + "jno 0f\n"
7867 + LOCK_PREFIX "incq %0\n"
7868 + "int $4\n0:\n"
7869 + _ASM_EXTABLE(0b, 0b)
7870 +#endif
7871 +
7872 + "sete %1\n"
7873 : "=m" (v->counter), "=qm" (c)
7874 : "m" (v->counter) : "memory");
7875 return c != 0;
7876 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
7877 {
7878 unsigned char c;
7879
7880 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
7881 + asm volatile(LOCK_PREFIX "incq %0\n"
7882 +
7883 +#ifdef CONFIG_PAX_REFCOUNT
7884 + "jno 0f\n"
7885 + LOCK_PREFIX "decq %0\n"
7886 + "int $4\n0:\n"
7887 + _ASM_EXTABLE(0b, 0b)
7888 +#endif
7889 +
7890 + "sete %1\n"
7891 : "=m" (v->counter), "=qm" (c)
7892 : "m" (v->counter) : "memory");
7893 return c != 0;
7894 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7895 {
7896 unsigned char c;
7897
7898 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7899 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
7900 +
7901 +#ifdef CONFIG_PAX_REFCOUNT
7902 + "jno 0f\n"
7903 + LOCK_PREFIX "subq %2,%0\n"
7904 + "int $4\n0:\n"
7905 + _ASM_EXTABLE(0b, 0b)
7906 +#endif
7907 +
7908 + "sets %1\n"
7909 : "=m" (v->counter), "=qm" (c)
7910 : "er" (i), "m" (v->counter) : "memory");
7911 return c;
7912 @@ -171,7 +317,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7913 static inline long atomic64_add_return(long i, atomic64_t *v)
7914 {
7915 long __i = i;
7916 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7917 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7918 +
7919 +#ifdef CONFIG_PAX_REFCOUNT
7920 + "jno 0f\n"
7921 + "movq %0, %1\n"
7922 + "int $4\n0:\n"
7923 + _ASM_EXTABLE(0b, 0b)
7924 +#endif
7925 +
7926 + : "+r" (i), "+m" (v->counter)
7927 + : : "memory");
7928 + return i + __i;
7929 +}
7930 +
7931 +/**
7932 + * atomic64_add_return_unchecked - add and return
7933 + * @i: integer value to add
7934 + * @v: pointer to type atomic64_unchecked_t
7935 + *
7936 + * Atomically adds @i to @v and returns @i + @v
7937 + */
7938 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7939 +{
7940 + long __i = i;
7941 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
7942 : "+r" (i), "+m" (v->counter)
7943 : : "memory");
7944 return i + __i;
7945 @@ -183,6 +353,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
7946 }
7947
7948 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7949 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7950 +{
7951 + return atomic64_add_return_unchecked(1, v);
7952 +}
7953 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7954
7955 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7956 @@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7957 return cmpxchg(&v->counter, old, new);
7958 }
7959
7960 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
7961 +{
7962 + return cmpxchg(&v->counter, old, new);
7963 +}
7964 +
7965 static inline long atomic64_xchg(atomic64_t *v, long new)
7966 {
7967 return xchg(&v->counter, new);
7968 @@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
7969 */
7970 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
7971 {
7972 - long c, old;
7973 + long c, old, new;
7974 c = atomic64_read(v);
7975 for (;;) {
7976 - if (unlikely(c == (u)))
7977 + if (unlikely(c == u))
7978 break;
7979 - old = atomic64_cmpxchg((v), c, c + (a));
7980 +
7981 + asm volatile("add %2,%0\n"
7982 +
7983 +#ifdef CONFIG_PAX_REFCOUNT
7984 + "jno 0f\n"
7985 + "sub %2,%0\n"
7986 + "int $4\n0:\n"
7987 + _ASM_EXTABLE(0b, 0b)
7988 +#endif
7989 +
7990 + : "=r" (new)
7991 + : "0" (c), "ir" (a));
7992 +
7993 + old = atomic64_cmpxchg(v, c, new);
7994 if (likely(old == c))
7995 break;
7996 c = old;
7997 }
7998 - return c != (u);
7999 + return c != u;
8000 }
8001
8002 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8003 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
8004 index 1775d6e..b65017f 100644
8005 --- a/arch/x86/include/asm/bitops.h
8006 +++ b/arch/x86/include/asm/bitops.h
8007 @@ -38,7 +38,7 @@
8008 * a mask operation on a byte.
8009 */
8010 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8011 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8012 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8013 #define CONST_MASK(nr) (1 << ((nr) & 7))
8014
8015 /**
8016 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
8017 index 5e1a2ee..c9f9533 100644
8018 --- a/arch/x86/include/asm/boot.h
8019 +++ b/arch/x86/include/asm/boot.h
8020 @@ -11,10 +11,15 @@
8021 #include <asm/pgtable_types.h>
8022
8023 /* Physical address where kernel should be loaded. */
8024 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8025 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8026 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8027 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8028
8029 +#ifndef __ASSEMBLY__
8030 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
8031 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8032 +#endif
8033 +
8034 /* Minimum kernel alignment, as a power of two */
8035 #ifdef CONFIG_X86_64
8036 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8037 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
8038 index 48f99f1..d78ebf9 100644
8039 --- a/arch/x86/include/asm/cache.h
8040 +++ b/arch/x86/include/asm/cache.h
8041 @@ -5,12 +5,13 @@
8042
8043 /* L1 cache line size */
8044 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8045 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8046 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8047
8048 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8049 +#define __read_only __attribute__((__section__(".data..read_only")))
8050
8051 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
8052 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
8053 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
8054
8055 #ifdef CONFIG_X86_VSMP
8056 #ifdef CONFIG_SMP
8057 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
8058 index 4e12668..501d239 100644
8059 --- a/arch/x86/include/asm/cacheflush.h
8060 +++ b/arch/x86/include/asm/cacheflush.h
8061 @@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
8062 unsigned long pg_flags = pg->flags & _PGMT_MASK;
8063
8064 if (pg_flags == _PGMT_DEFAULT)
8065 - return -1;
8066 + return ~0UL;
8067 else if (pg_flags == _PGMT_WC)
8068 return _PAGE_CACHE_WC;
8069 else if (pg_flags == _PGMT_UC_MINUS)
8070 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
8071 index 46fc474..b02b0f9 100644
8072 --- a/arch/x86/include/asm/checksum_32.h
8073 +++ b/arch/x86/include/asm/checksum_32.h
8074 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
8075 int len, __wsum sum,
8076 int *src_err_ptr, int *dst_err_ptr);
8077
8078 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8079 + int len, __wsum sum,
8080 + int *src_err_ptr, int *dst_err_ptr);
8081 +
8082 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8083 + int len, __wsum sum,
8084 + int *src_err_ptr, int *dst_err_ptr);
8085 +
8086 /*
8087 * Note: when you get a NULL pointer exception here this means someone
8088 * passed in an incorrect kernel address to one of these functions.
8089 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
8090 int *err_ptr)
8091 {
8092 might_sleep();
8093 - return csum_partial_copy_generic((__force void *)src, dst,
8094 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
8095 len, sum, err_ptr, NULL);
8096 }
8097
8098 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
8099 {
8100 might_sleep();
8101 if (access_ok(VERIFY_WRITE, dst, len))
8102 - return csum_partial_copy_generic(src, (__force void *)dst,
8103 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8104 len, sum, NULL, err_ptr);
8105
8106 if (len)
8107 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
8108 index 88b23a4..d2e5f9f 100644
8109 --- a/arch/x86/include/asm/cpufeature.h
8110 +++ b/arch/x86/include/asm/cpufeature.h
8111 @@ -358,7 +358,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
8112 ".section .discard,\"aw\",@progbits\n"
8113 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
8114 ".previous\n"
8115 - ".section .altinstr_replacement,\"ax\"\n"
8116 + ".section .altinstr_replacement,\"a\"\n"
8117 "3: movb $1,%0\n"
8118 "4:\n"
8119 ".previous\n"
8120 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
8121 index 41935fa..3b40db8 100644
8122 --- a/arch/x86/include/asm/desc.h
8123 +++ b/arch/x86/include/asm/desc.h
8124 @@ -4,6 +4,7 @@
8125 #include <asm/desc_defs.h>
8126 #include <asm/ldt.h>
8127 #include <asm/mmu.h>
8128 +#include <asm/pgtable.h>
8129
8130 #include <linux/smp.h>
8131
8132 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8133
8134 desc->type = (info->read_exec_only ^ 1) << 1;
8135 desc->type |= info->contents << 2;
8136 + desc->type |= info->seg_not_present ^ 1;
8137
8138 desc->s = 1;
8139 desc->dpl = 0x3;
8140 @@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8141 }
8142
8143 extern struct desc_ptr idt_descr;
8144 -extern gate_desc idt_table[];
8145 -
8146 -struct gdt_page {
8147 - struct desc_struct gdt[GDT_ENTRIES];
8148 -} __attribute__((aligned(PAGE_SIZE)));
8149 -
8150 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8151 +extern gate_desc idt_table[256];
8152
8153 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8154 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8155 {
8156 - return per_cpu(gdt_page, cpu).gdt;
8157 + return cpu_gdt_table[cpu];
8158 }
8159
8160 #ifdef CONFIG_X86_64
8161 @@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
8162 unsigned long base, unsigned dpl, unsigned flags,
8163 unsigned short seg)
8164 {
8165 - gate->a = (seg << 16) | (base & 0xffff);
8166 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8167 + gate->gate.offset_low = base;
8168 + gate->gate.seg = seg;
8169 + gate->gate.reserved = 0;
8170 + gate->gate.type = type;
8171 + gate->gate.s = 0;
8172 + gate->gate.dpl = dpl;
8173 + gate->gate.p = 1;
8174 + gate->gate.offset_high = base >> 16;
8175 }
8176
8177 #endif
8178 @@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
8179
8180 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
8181 {
8182 + pax_open_kernel();
8183 memcpy(&idt[entry], gate, sizeof(*gate));
8184 + pax_close_kernel();
8185 }
8186
8187 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
8188 {
8189 + pax_open_kernel();
8190 memcpy(&ldt[entry], desc, 8);
8191 + pax_close_kernel();
8192 }
8193
8194 static inline void
8195 @@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
8196 default: size = sizeof(*gdt); break;
8197 }
8198
8199 + pax_open_kernel();
8200 memcpy(&gdt[entry], desc, size);
8201 + pax_close_kernel();
8202 }
8203
8204 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8205 @@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
8206
8207 static inline void native_load_tr_desc(void)
8208 {
8209 + pax_open_kernel();
8210 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8211 + pax_close_kernel();
8212 }
8213
8214 static inline void native_load_gdt(const struct desc_ptr *dtr)
8215 @@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
8216 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8217 unsigned int i;
8218
8219 + pax_open_kernel();
8220 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8221 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8222 + pax_close_kernel();
8223 }
8224
8225 #define _LDT_empty(info) \
8226 @@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
8227 desc->limit = (limit >> 16) & 0xf;
8228 }
8229
8230 -static inline void _set_gate(int gate, unsigned type, void *addr,
8231 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8232 unsigned dpl, unsigned ist, unsigned seg)
8233 {
8234 gate_desc s;
8235 @@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
8236 * Pentium F0 0F bugfix can have resulted in the mapped
8237 * IDT being write-protected.
8238 */
8239 -static inline void set_intr_gate(unsigned int n, void *addr)
8240 +static inline void set_intr_gate(unsigned int n, const void *addr)
8241 {
8242 BUG_ON((unsigned)n > 0xFF);
8243 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8244 @@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
8245 /*
8246 * This routine sets up an interrupt gate at directory privilege level 3.
8247 */
8248 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8249 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8250 {
8251 BUG_ON((unsigned)n > 0xFF);
8252 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8253 }
8254
8255 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8256 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8257 {
8258 BUG_ON((unsigned)n > 0xFF);
8259 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8260 }
8261
8262 -static inline void set_trap_gate(unsigned int n, void *addr)
8263 +static inline void set_trap_gate(unsigned int n, const void *addr)
8264 {
8265 BUG_ON((unsigned)n > 0xFF);
8266 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8267 @@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
8268 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8269 {
8270 BUG_ON((unsigned)n > 0xFF);
8271 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8272 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8273 }
8274
8275 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8276 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8277 {
8278 BUG_ON((unsigned)n > 0xFF);
8279 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8280 }
8281
8282 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8283 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8284 {
8285 BUG_ON((unsigned)n > 0xFF);
8286 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8287 }
8288
8289 +#ifdef CONFIG_X86_32
8290 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8291 +{
8292 + struct desc_struct d;
8293 +
8294 + if (likely(limit))
8295 + limit = (limit - 1UL) >> PAGE_SHIFT;
8296 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8297 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8298 +}
8299 +#endif
8300 +
8301 #endif /* _ASM_X86_DESC_H */
8302 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
8303 index 278441f..b95a174 100644
8304 --- a/arch/x86/include/asm/desc_defs.h
8305 +++ b/arch/x86/include/asm/desc_defs.h
8306 @@ -31,6 +31,12 @@ struct desc_struct {
8307 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8308 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8309 };
8310 + struct {
8311 + u16 offset_low;
8312 + u16 seg;
8313 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8314 + unsigned offset_high: 16;
8315 + } gate;
8316 };
8317 } __attribute__((packed));
8318
8319 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
8320 index 908b969..a1f4eb4 100644
8321 --- a/arch/x86/include/asm/e820.h
8322 +++ b/arch/x86/include/asm/e820.h
8323 @@ -69,7 +69,7 @@ struct e820map {
8324 #define ISA_START_ADDRESS 0xa0000
8325 #define ISA_END_ADDRESS 0x100000
8326
8327 -#define BIOS_BEGIN 0x000a0000
8328 +#define BIOS_BEGIN 0x000c0000
8329 #define BIOS_END 0x00100000
8330
8331 #define BIOS_ROM_BASE 0xffe00000
8332 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
8333 index f2ad216..eb24c96 100644
8334 --- a/arch/x86/include/asm/elf.h
8335 +++ b/arch/x86/include/asm/elf.h
8336 @@ -237,7 +237,25 @@ extern int force_personality32;
8337 the loader. We need to make sure that it is out of the way of the program
8338 that it will "exec", and that there is sufficient room for the brk. */
8339
8340 +#ifdef CONFIG_PAX_SEGMEXEC
8341 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8342 +#else
8343 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8344 +#endif
8345 +
8346 +#ifdef CONFIG_PAX_ASLR
8347 +#ifdef CONFIG_X86_32
8348 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8349 +
8350 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8351 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8352 +#else
8353 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8354 +
8355 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8356 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8357 +#endif
8358 +#endif
8359
8360 /* This yields a mask that user programs can use to figure out what
8361 instruction set this CPU supports. This could be done in user space,
8362 @@ -290,9 +308,7 @@ do { \
8363
8364 #define ARCH_DLINFO \
8365 do { \
8366 - if (vdso_enabled) \
8367 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8368 - (unsigned long)current->mm->context.vdso); \
8369 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8370 } while (0)
8371
8372 #define AT_SYSINFO 32
8373 @@ -303,7 +319,7 @@ do { \
8374
8375 #endif /* !CONFIG_X86_32 */
8376
8377 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8378 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8379
8380 #define VDSO_ENTRY \
8381 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8382 @@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8383 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8384 #define compat_arch_setup_additional_pages syscall32_setup_pages
8385
8386 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8387 -#define arch_randomize_brk arch_randomize_brk
8388 -
8389 #endif /* _ASM_X86_ELF_H */
8390 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
8391 index cc70c1c..d96d011 100644
8392 --- a/arch/x86/include/asm/emergency-restart.h
8393 +++ b/arch/x86/include/asm/emergency-restart.h
8394 @@ -15,6 +15,6 @@ enum reboot_type {
8395
8396 extern enum reboot_type reboot_type;
8397
8398 -extern void machine_emergency_restart(void);
8399 +extern void machine_emergency_restart(void) __noreturn;
8400
8401 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8402 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
8403 index d09bb03..4ea4194 100644
8404 --- a/arch/x86/include/asm/futex.h
8405 +++ b/arch/x86/include/asm/futex.h
8406 @@ -12,16 +12,18 @@
8407 #include <asm/system.h>
8408
8409 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8410 + typecheck(u32 __user *, uaddr); \
8411 asm volatile("1:\t" insn "\n" \
8412 "2:\t.section .fixup,\"ax\"\n" \
8413 "3:\tmov\t%3, %1\n" \
8414 "\tjmp\t2b\n" \
8415 "\t.previous\n" \
8416 _ASM_EXTABLE(1b, 3b) \
8417 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8418 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
8419 : "i" (-EFAULT), "0" (oparg), "1" (0))
8420
8421 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8422 + typecheck(u32 __user *, uaddr); \
8423 asm volatile("1:\tmovl %2, %0\n" \
8424 "\tmovl\t%0, %3\n" \
8425 "\t" insn "\n" \
8426 @@ -34,7 +36,7 @@
8427 _ASM_EXTABLE(1b, 4b) \
8428 _ASM_EXTABLE(2b, 4b) \
8429 : "=&a" (oldval), "=&r" (ret), \
8430 - "+m" (*uaddr), "=&r" (tem) \
8431 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
8432 : "r" (oparg), "i" (-EFAULT), "1" (0))
8433
8434 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8435 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8436
8437 switch (op) {
8438 case FUTEX_OP_SET:
8439 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8440 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8441 break;
8442 case FUTEX_OP_ADD:
8443 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8444 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8445 uaddr, oparg);
8446 break;
8447 case FUTEX_OP_OR:
8448 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
8449 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8450 return -EFAULT;
8451
8452 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
8453 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
8454 "2:\t.section .fixup, \"ax\"\n"
8455 "3:\tmov %3, %0\n"
8456 "\tjmp 2b\n"
8457 "\t.previous\n"
8458 _ASM_EXTABLE(1b, 3b)
8459 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
8460 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
8461 : "i" (-EFAULT), "r" (newval), "1" (oldval)
8462 : "memory"
8463 );
8464 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
8465 index 0919905..2cf38d6 100644
8466 --- a/arch/x86/include/asm/hw_irq.h
8467 +++ b/arch/x86/include/asm/hw_irq.h
8468 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
8469 extern void enable_IO_APIC(void);
8470
8471 /* Statistics */
8472 -extern atomic_t irq_err_count;
8473 -extern atomic_t irq_mis_count;
8474 +extern atomic_unchecked_t irq_err_count;
8475 +extern atomic_unchecked_t irq_mis_count;
8476
8477 /* EISA */
8478 extern void eisa_set_level_irq(unsigned int irq);
8479 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
8480 index c9e09ea..73888df 100644
8481 --- a/arch/x86/include/asm/i387.h
8482 +++ b/arch/x86/include/asm/i387.h
8483 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
8484 {
8485 int err;
8486
8487 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8488 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8489 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
8490 +#endif
8491 +
8492 /* See comment in fxsave() below. */
8493 #ifdef CONFIG_AS_FXSAVEQ
8494 asm volatile("1: fxrstorq %[fx]\n\t"
8495 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
8496 {
8497 int err;
8498
8499 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8500 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8501 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8502 +#endif
8503 +
8504 /*
8505 * Clear the bytes not touched by the fxsave and reserved
8506 * for the SW usage.
8507 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu *fpu)
8508 #endif /* CONFIG_X86_64 */
8509
8510 /* We need a safe address that is cheap to find and that is already
8511 - in L1 during context switch. The best choices are unfortunately
8512 - different for UP and SMP */
8513 -#ifdef CONFIG_SMP
8514 -#define safe_address (__per_cpu_offset[0])
8515 -#else
8516 -#define safe_address (kstat_cpu(0).cpustat.user)
8517 -#endif
8518 + in L1 during context switch. */
8519 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8520
8521 /*
8522 * These must be called with preempt disabled
8523 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void)
8524 struct thread_info *me = current_thread_info();
8525 preempt_disable();
8526 if (me->status & TS_USEDFPU)
8527 - __save_init_fpu(me->task);
8528 + __save_init_fpu(current);
8529 else
8530 clts();
8531 }
8532 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
8533 index d8e8eef..99f81ae 100644
8534 --- a/arch/x86/include/asm/io.h
8535 +++ b/arch/x86/include/asm/io.h
8536 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
8537
8538 #include <linux/vmalloc.h>
8539
8540 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8541 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8542 +{
8543 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8544 +}
8545 +
8546 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8547 +{
8548 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8549 +}
8550 +
8551 /*
8552 * Convert a virtual cached pointer to an uncached pointer
8553 */
8554 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
8555 index bba3cf8..06bc8da 100644
8556 --- a/arch/x86/include/asm/irqflags.h
8557 +++ b/arch/x86/include/asm/irqflags.h
8558 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
8559 sti; \
8560 sysexit
8561
8562 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8563 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8564 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8565 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8566 +
8567 #else
8568 #define INTERRUPT_RETURN iret
8569 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8570 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
8571 index 5478825..839e88c 100644
8572 --- a/arch/x86/include/asm/kprobes.h
8573 +++ b/arch/x86/include/asm/kprobes.h
8574 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
8575 #define RELATIVEJUMP_SIZE 5
8576 #define RELATIVECALL_OPCODE 0xe8
8577 #define RELATIVE_ADDR_SIZE 4
8578 -#define MAX_STACK_SIZE 64
8579 -#define MIN_STACK_SIZE(ADDR) \
8580 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8581 - THREAD_SIZE - (unsigned long)(ADDR))) \
8582 - ? (MAX_STACK_SIZE) \
8583 - : (((unsigned long)current_thread_info()) + \
8584 - THREAD_SIZE - (unsigned long)(ADDR)))
8585 +#define MAX_STACK_SIZE 64UL
8586 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8587
8588 #define flush_insn_slot(p) do { } while (0)
8589
8590 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
8591 index dd51c83..66cbfac 100644
8592 --- a/arch/x86/include/asm/kvm_host.h
8593 +++ b/arch/x86/include/asm/kvm_host.h
8594 @@ -456,7 +456,7 @@ struct kvm_arch {
8595 unsigned int n_requested_mmu_pages;
8596 unsigned int n_max_mmu_pages;
8597 unsigned int indirect_shadow_pages;
8598 - atomic_t invlpg_counter;
8599 + atomic_unchecked_t invlpg_counter;
8600 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
8601 /*
8602 * Hash table of struct kvm_mmu_page.
8603 @@ -636,7 +636,7 @@ struct kvm_x86_ops {
8604 enum x86_intercept_stage stage);
8605
8606 const struct trace_print_flags *exit_reasons_str;
8607 -};
8608 +} __do_const;
8609
8610 struct kvm_arch_async_pf {
8611 u32 token;
8612 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
8613 index 9cdae5d..300d20f 100644
8614 --- a/arch/x86/include/asm/local.h
8615 +++ b/arch/x86/include/asm/local.h
8616 @@ -18,26 +18,58 @@ typedef struct {
8617
8618 static inline void local_inc(local_t *l)
8619 {
8620 - asm volatile(_ASM_INC "%0"
8621 + asm volatile(_ASM_INC "%0\n"
8622 +
8623 +#ifdef CONFIG_PAX_REFCOUNT
8624 + "jno 0f\n"
8625 + _ASM_DEC "%0\n"
8626 + "int $4\n0:\n"
8627 + _ASM_EXTABLE(0b, 0b)
8628 +#endif
8629 +
8630 : "+m" (l->a.counter));
8631 }
8632
8633 static inline void local_dec(local_t *l)
8634 {
8635 - asm volatile(_ASM_DEC "%0"
8636 + asm volatile(_ASM_DEC "%0\n"
8637 +
8638 +#ifdef CONFIG_PAX_REFCOUNT
8639 + "jno 0f\n"
8640 + _ASM_INC "%0\n"
8641 + "int $4\n0:\n"
8642 + _ASM_EXTABLE(0b, 0b)
8643 +#endif
8644 +
8645 : "+m" (l->a.counter));
8646 }
8647
8648 static inline void local_add(long i, local_t *l)
8649 {
8650 - asm volatile(_ASM_ADD "%1,%0"
8651 + asm volatile(_ASM_ADD "%1,%0\n"
8652 +
8653 +#ifdef CONFIG_PAX_REFCOUNT
8654 + "jno 0f\n"
8655 + _ASM_SUB "%1,%0\n"
8656 + "int $4\n0:\n"
8657 + _ASM_EXTABLE(0b, 0b)
8658 +#endif
8659 +
8660 : "+m" (l->a.counter)
8661 : "ir" (i));
8662 }
8663
8664 static inline void local_sub(long i, local_t *l)
8665 {
8666 - asm volatile(_ASM_SUB "%1,%0"
8667 + asm volatile(_ASM_SUB "%1,%0\n"
8668 +
8669 +#ifdef CONFIG_PAX_REFCOUNT
8670 + "jno 0f\n"
8671 + _ASM_ADD "%1,%0\n"
8672 + "int $4\n0:\n"
8673 + _ASM_EXTABLE(0b, 0b)
8674 +#endif
8675 +
8676 : "+m" (l->a.counter)
8677 : "ir" (i));
8678 }
8679 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
8680 {
8681 unsigned char c;
8682
8683 - asm volatile(_ASM_SUB "%2,%0; sete %1"
8684 + asm volatile(_ASM_SUB "%2,%0\n"
8685 +
8686 +#ifdef CONFIG_PAX_REFCOUNT
8687 + "jno 0f\n"
8688 + _ASM_ADD "%2,%0\n"
8689 + "int $4\n0:\n"
8690 + _ASM_EXTABLE(0b, 0b)
8691 +#endif
8692 +
8693 + "sete %1\n"
8694 : "+m" (l->a.counter), "=qm" (c)
8695 : "ir" (i) : "memory");
8696 return c;
8697 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
8698 {
8699 unsigned char c;
8700
8701 - asm volatile(_ASM_DEC "%0; sete %1"
8702 + asm volatile(_ASM_DEC "%0\n"
8703 +
8704 +#ifdef CONFIG_PAX_REFCOUNT
8705 + "jno 0f\n"
8706 + _ASM_INC "%0\n"
8707 + "int $4\n0:\n"
8708 + _ASM_EXTABLE(0b, 0b)
8709 +#endif
8710 +
8711 + "sete %1\n"
8712 : "+m" (l->a.counter), "=qm" (c)
8713 : : "memory");
8714 return c != 0;
8715 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
8716 {
8717 unsigned char c;
8718
8719 - asm volatile(_ASM_INC "%0; sete %1"
8720 + asm volatile(_ASM_INC "%0\n"
8721 +
8722 +#ifdef CONFIG_PAX_REFCOUNT
8723 + "jno 0f\n"
8724 + _ASM_DEC "%0\n"
8725 + "int $4\n0:\n"
8726 + _ASM_EXTABLE(0b, 0b)
8727 +#endif
8728 +
8729 + "sete %1\n"
8730 : "+m" (l->a.counter), "=qm" (c)
8731 : : "memory");
8732 return c != 0;
8733 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
8734 {
8735 unsigned char c;
8736
8737 - asm volatile(_ASM_ADD "%2,%0; sets %1"
8738 + asm volatile(_ASM_ADD "%2,%0\n"
8739 +
8740 +#ifdef CONFIG_PAX_REFCOUNT
8741 + "jno 0f\n"
8742 + _ASM_SUB "%2,%0\n"
8743 + "int $4\n0:\n"
8744 + _ASM_EXTABLE(0b, 0b)
8745 +#endif
8746 +
8747 + "sets %1\n"
8748 : "+m" (l->a.counter), "=qm" (c)
8749 : "ir" (i) : "memory");
8750 return c;
8751 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
8752 #endif
8753 /* Modern 486+ processor */
8754 __i = i;
8755 - asm volatile(_ASM_XADD "%0, %1;"
8756 + asm volatile(_ASM_XADD "%0, %1\n"
8757 +
8758 +#ifdef CONFIG_PAX_REFCOUNT
8759 + "jno 0f\n"
8760 + _ASM_MOV "%0,%1\n"
8761 + "int $4\n0:\n"
8762 + _ASM_EXTABLE(0b, 0b)
8763 +#endif
8764 +
8765 : "+r" (i), "+m" (l->a.counter)
8766 : : "memory");
8767 return i + __i;
8768 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
8769 index 593e51d..fa69c9a 100644
8770 --- a/arch/x86/include/asm/mman.h
8771 +++ b/arch/x86/include/asm/mman.h
8772 @@ -5,4 +5,14 @@
8773
8774 #include <asm-generic/mman.h>
8775
8776 +#ifdef __KERNEL__
8777 +#ifndef __ASSEMBLY__
8778 +#ifdef CONFIG_X86_32
8779 +#define arch_mmap_check i386_mmap_check
8780 +int i386_mmap_check(unsigned long addr, unsigned long len,
8781 + unsigned long flags);
8782 +#endif
8783 +#endif
8784 +#endif
8785 +
8786 #endif /* _ASM_X86_MMAN_H */
8787 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
8788 index 5f55e69..e20bfb1 100644
8789 --- a/arch/x86/include/asm/mmu.h
8790 +++ b/arch/x86/include/asm/mmu.h
8791 @@ -9,7 +9,7 @@
8792 * we put the segment information here.
8793 */
8794 typedef struct {
8795 - void *ldt;
8796 + struct desc_struct *ldt;
8797 int size;
8798
8799 #ifdef CONFIG_X86_64
8800 @@ -18,7 +18,19 @@ typedef struct {
8801 #endif
8802
8803 struct mutex lock;
8804 - void *vdso;
8805 + unsigned long vdso;
8806 +
8807 +#ifdef CONFIG_X86_32
8808 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
8809 + unsigned long user_cs_base;
8810 + unsigned long user_cs_limit;
8811 +
8812 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8813 + cpumask_t cpu_user_cs_mask;
8814 +#endif
8815 +
8816 +#endif
8817 +#endif
8818 } mm_context_t;
8819
8820 #ifdef CONFIG_SMP
8821 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
8822 index 6902152..399f3a2 100644
8823 --- a/arch/x86/include/asm/mmu_context.h
8824 +++ b/arch/x86/include/asm/mmu_context.h
8825 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
8826
8827 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8828 {
8829 +
8830 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8831 + unsigned int i;
8832 + pgd_t *pgd;
8833 +
8834 + pax_open_kernel();
8835 + pgd = get_cpu_pgd(smp_processor_id());
8836 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8837 + set_pgd_batched(pgd+i, native_make_pgd(0));
8838 + pax_close_kernel();
8839 +#endif
8840 +
8841 #ifdef CONFIG_SMP
8842 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8843 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8844 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8845 struct task_struct *tsk)
8846 {
8847 unsigned cpu = smp_processor_id();
8848 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8849 + int tlbstate = TLBSTATE_OK;
8850 +#endif
8851
8852 if (likely(prev != next)) {
8853 #ifdef CONFIG_SMP
8854 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8855 + tlbstate = percpu_read(cpu_tlbstate.state);
8856 +#endif
8857 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8858 percpu_write(cpu_tlbstate.active_mm, next);
8859 #endif
8860 cpumask_set_cpu(cpu, mm_cpumask(next));
8861
8862 /* Re-load page tables */
8863 +#ifdef CONFIG_PAX_PER_CPU_PGD
8864 + pax_open_kernel();
8865 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8866 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8867 + pax_close_kernel();
8868 + load_cr3(get_cpu_pgd(cpu));
8869 +#else
8870 load_cr3(next->pgd);
8871 +#endif
8872
8873 /* stop flush ipis for the previous mm */
8874 cpumask_clear_cpu(cpu, mm_cpumask(prev));
8875 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8876 */
8877 if (unlikely(prev->context.ldt != next->context.ldt))
8878 load_LDT_nolock(&next->context);
8879 - }
8880 +
8881 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8882 + if (!(__supported_pte_mask & _PAGE_NX)) {
8883 + smp_mb__before_clear_bit();
8884 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
8885 + smp_mb__after_clear_bit();
8886 + cpu_set(cpu, next->context.cpu_user_cs_mask);
8887 + }
8888 +#endif
8889 +
8890 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8891 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
8892 + prev->context.user_cs_limit != next->context.user_cs_limit))
8893 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8894 #ifdef CONFIG_SMP
8895 + else if (unlikely(tlbstate != TLBSTATE_OK))
8896 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8897 +#endif
8898 +#endif
8899 +
8900 + }
8901 else {
8902 +
8903 +#ifdef CONFIG_PAX_PER_CPU_PGD
8904 + pax_open_kernel();
8905 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8906 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8907 + pax_close_kernel();
8908 + load_cr3(get_cpu_pgd(cpu));
8909 +#endif
8910 +
8911 +#ifdef CONFIG_SMP
8912 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8913 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
8914
8915 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8916 * tlb flush IPI delivery. We must reload CR3
8917 * to make sure to use no freed page tables.
8918 */
8919 +
8920 +#ifndef CONFIG_PAX_PER_CPU_PGD
8921 load_cr3(next->pgd);
8922 +#endif
8923 +
8924 load_LDT_nolock(&next->context);
8925 +
8926 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
8927 + if (!(__supported_pte_mask & _PAGE_NX))
8928 + cpu_set(cpu, next->context.cpu_user_cs_mask);
8929 +#endif
8930 +
8931 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8932 +#ifdef CONFIG_PAX_PAGEEXEC
8933 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
8934 +#endif
8935 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8936 +#endif
8937 +
8938 }
8939 +#endif
8940 }
8941 -#endif
8942 }
8943
8944 #define activate_mm(prev, next) \
8945 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
8946 index 9eae775..c914fea 100644
8947 --- a/arch/x86/include/asm/module.h
8948 +++ b/arch/x86/include/asm/module.h
8949 @@ -5,6 +5,7 @@
8950
8951 #ifdef CONFIG_X86_64
8952 /* X86_64 does not define MODULE_PROC_FAMILY */
8953 +#define MODULE_PROC_FAMILY ""
8954 #elif defined CONFIG_M386
8955 #define MODULE_PROC_FAMILY "386 "
8956 #elif defined CONFIG_M486
8957 @@ -59,8 +60,20 @@
8958 #error unknown processor family
8959 #endif
8960
8961 -#ifdef CONFIG_X86_32
8962 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
8963 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8964 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
8965 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
8966 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
8967 +#else
8968 +#define MODULE_PAX_KERNEXEC ""
8969 #endif
8970
8971 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8972 +#define MODULE_PAX_UDEREF "UDEREF "
8973 +#else
8974 +#define MODULE_PAX_UDEREF ""
8975 +#endif
8976 +
8977 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
8978 +
8979 #endif /* _ASM_X86_MODULE_H */
8980 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
8981 index 7639dbf..e08a58c 100644
8982 --- a/arch/x86/include/asm/page_64_types.h
8983 +++ b/arch/x86/include/asm/page_64_types.h
8984 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
8985
8986 /* duplicated to the one in bootmem.h */
8987 extern unsigned long max_pfn;
8988 -extern unsigned long phys_base;
8989 +extern const unsigned long phys_base;
8990
8991 extern unsigned long __phys_addr(unsigned long);
8992 #define __phys_reloc_hide(x) (x)
8993 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
8994 index a7d2db9..edb023e 100644
8995 --- a/arch/x86/include/asm/paravirt.h
8996 +++ b/arch/x86/include/asm/paravirt.h
8997 @@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
8998 val);
8999 }
9000
9001 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9002 +{
9003 + pgdval_t val = native_pgd_val(pgd);
9004 +
9005 + if (sizeof(pgdval_t) > sizeof(long))
9006 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9007 + val, (u64)val >> 32);
9008 + else
9009 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9010 + val);
9011 +}
9012 +
9013 static inline void pgd_clear(pgd_t *pgdp)
9014 {
9015 set_pgd(pgdp, __pgd(0));
9016 @@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
9017 pv_mmu_ops.set_fixmap(idx, phys, flags);
9018 }
9019
9020 +#ifdef CONFIG_PAX_KERNEXEC
9021 +static inline unsigned long pax_open_kernel(void)
9022 +{
9023 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9024 +}
9025 +
9026 +static inline unsigned long pax_close_kernel(void)
9027 +{
9028 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9029 +}
9030 +#else
9031 +static inline unsigned long pax_open_kernel(void) { return 0; }
9032 +static inline unsigned long pax_close_kernel(void) { return 0; }
9033 +#endif
9034 +
9035 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9036
9037 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
9038 @@ -964,7 +991,7 @@ extern void default_banner(void);
9039
9040 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9041 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9042 -#define PARA_INDIRECT(addr) *%cs:addr
9043 +#define PARA_INDIRECT(addr) *%ss:addr
9044 #endif
9045
9046 #define INTERRUPT_RETURN \
9047 @@ -1041,6 +1068,21 @@ extern void default_banner(void);
9048 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9049 CLBR_NONE, \
9050 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9051 +
9052 +#define GET_CR0_INTO_RDI \
9053 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9054 + mov %rax,%rdi
9055 +
9056 +#define SET_RDI_INTO_CR0 \
9057 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9058 +
9059 +#define GET_CR3_INTO_RDI \
9060 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9061 + mov %rax,%rdi
9062 +
9063 +#define SET_RDI_INTO_CR3 \
9064 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9065 +
9066 #endif /* CONFIG_X86_32 */
9067
9068 #endif /* __ASSEMBLY__ */
9069 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
9070 index 8e8b9a4..f07d725 100644
9071 --- a/arch/x86/include/asm/paravirt_types.h
9072 +++ b/arch/x86/include/asm/paravirt_types.h
9073 @@ -84,20 +84,20 @@ struct pv_init_ops {
9074 */
9075 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9076 unsigned long addr, unsigned len);
9077 -};
9078 +} __no_const;
9079
9080
9081 struct pv_lazy_ops {
9082 /* Set deferred update mode, used for batching operations. */
9083 void (*enter)(void);
9084 void (*leave)(void);
9085 -};
9086 +} __no_const;
9087
9088 struct pv_time_ops {
9089 unsigned long long (*sched_clock)(void);
9090 unsigned long long (*steal_clock)(int cpu);
9091 unsigned long (*get_tsc_khz)(void);
9092 -};
9093 +} __no_const;
9094
9095 struct pv_cpu_ops {
9096 /* hooks for various privileged instructions */
9097 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
9098
9099 void (*start_context_switch)(struct task_struct *prev);
9100 void (*end_context_switch)(struct task_struct *next);
9101 -};
9102 +} __no_const;
9103
9104 struct pv_irq_ops {
9105 /*
9106 @@ -224,7 +224,7 @@ struct pv_apic_ops {
9107 unsigned long start_eip,
9108 unsigned long start_esp);
9109 #endif
9110 -};
9111 +} __no_const;
9112
9113 struct pv_mmu_ops {
9114 unsigned long (*read_cr2)(void);
9115 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
9116 struct paravirt_callee_save make_pud;
9117
9118 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9119 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9120 #endif /* PAGETABLE_LEVELS == 4 */
9121 #endif /* PAGETABLE_LEVELS >= 3 */
9122
9123 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
9124 an mfn. We can tell which is which from the index. */
9125 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9126 phys_addr_t phys, pgprot_t flags);
9127 +
9128 +#ifdef CONFIG_PAX_KERNEXEC
9129 + unsigned long (*pax_open_kernel)(void);
9130 + unsigned long (*pax_close_kernel)(void);
9131 +#endif
9132 +
9133 };
9134
9135 struct arch_spinlock;
9136 @@ -334,7 +341,7 @@ struct pv_lock_ops {
9137 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
9138 int (*spin_trylock)(struct arch_spinlock *lock);
9139 void (*spin_unlock)(struct arch_spinlock *lock);
9140 -};
9141 +} __no_const;
9142
9143 /* This contains all the paravirt structures: we get a convenient
9144 * number for each function using the offset which we use to indicate
9145 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
9146 index b4389a4..b7ff22c 100644
9147 --- a/arch/x86/include/asm/pgalloc.h
9148 +++ b/arch/x86/include/asm/pgalloc.h
9149 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
9150 pmd_t *pmd, pte_t *pte)
9151 {
9152 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9153 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9154 +}
9155 +
9156 +static inline void pmd_populate_user(struct mm_struct *mm,
9157 + pmd_t *pmd, pte_t *pte)
9158 +{
9159 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9160 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9161 }
9162
9163 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
9164 index 98391db..8f6984e 100644
9165 --- a/arch/x86/include/asm/pgtable-2level.h
9166 +++ b/arch/x86/include/asm/pgtable-2level.h
9167 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
9168
9169 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9170 {
9171 + pax_open_kernel();
9172 *pmdp = pmd;
9173 + pax_close_kernel();
9174 }
9175
9176 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9177 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
9178 index effff47..f9e4035 100644
9179 --- a/arch/x86/include/asm/pgtable-3level.h
9180 +++ b/arch/x86/include/asm/pgtable-3level.h
9181 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9182
9183 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9184 {
9185 + pax_open_kernel();
9186 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9187 + pax_close_kernel();
9188 }
9189
9190 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9191 {
9192 + pax_open_kernel();
9193 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9194 + pax_close_kernel();
9195 }
9196
9197 /*
9198 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
9199 index 18601c8..3d716d1 100644
9200 --- a/arch/x86/include/asm/pgtable.h
9201 +++ b/arch/x86/include/asm/pgtable.h
9202 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9203
9204 #ifndef __PAGETABLE_PUD_FOLDED
9205 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9206 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9207 #define pgd_clear(pgd) native_pgd_clear(pgd)
9208 #endif
9209
9210 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9211
9212 #define arch_end_context_switch(prev) do {} while(0)
9213
9214 +#define pax_open_kernel() native_pax_open_kernel()
9215 +#define pax_close_kernel() native_pax_close_kernel()
9216 #endif /* CONFIG_PARAVIRT */
9217
9218 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9219 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9220 +
9221 +#ifdef CONFIG_PAX_KERNEXEC
9222 +static inline unsigned long native_pax_open_kernel(void)
9223 +{
9224 + unsigned long cr0;
9225 +
9226 + preempt_disable();
9227 + barrier();
9228 + cr0 = read_cr0() ^ X86_CR0_WP;
9229 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9230 + write_cr0(cr0);
9231 + return cr0 ^ X86_CR0_WP;
9232 +}
9233 +
9234 +static inline unsigned long native_pax_close_kernel(void)
9235 +{
9236 + unsigned long cr0;
9237 +
9238 + cr0 = read_cr0() ^ X86_CR0_WP;
9239 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9240 + write_cr0(cr0);
9241 + barrier();
9242 + preempt_enable_no_resched();
9243 + return cr0 ^ X86_CR0_WP;
9244 +}
9245 +#else
9246 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9247 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9248 +#endif
9249 +
9250 /*
9251 * The following only work if pte_present() is true.
9252 * Undefined behaviour if not..
9253 */
9254 +static inline int pte_user(pte_t pte)
9255 +{
9256 + return pte_val(pte) & _PAGE_USER;
9257 +}
9258 +
9259 static inline int pte_dirty(pte_t pte)
9260 {
9261 return pte_flags(pte) & _PAGE_DIRTY;
9262 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
9263 return pte_clear_flags(pte, _PAGE_RW);
9264 }
9265
9266 +static inline pte_t pte_mkread(pte_t pte)
9267 +{
9268 + return __pte(pte_val(pte) | _PAGE_USER);
9269 +}
9270 +
9271 static inline pte_t pte_mkexec(pte_t pte)
9272 {
9273 - return pte_clear_flags(pte, _PAGE_NX);
9274 +#ifdef CONFIG_X86_PAE
9275 + if (__supported_pte_mask & _PAGE_NX)
9276 + return pte_clear_flags(pte, _PAGE_NX);
9277 + else
9278 +#endif
9279 + return pte_set_flags(pte, _PAGE_USER);
9280 +}
9281 +
9282 +static inline pte_t pte_exprotect(pte_t pte)
9283 +{
9284 +#ifdef CONFIG_X86_PAE
9285 + if (__supported_pte_mask & _PAGE_NX)
9286 + return pte_set_flags(pte, _PAGE_NX);
9287 + else
9288 +#endif
9289 + return pte_clear_flags(pte, _PAGE_USER);
9290 }
9291
9292 static inline pte_t pte_mkdirty(pte_t pte)
9293 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
9294 #endif
9295
9296 #ifndef __ASSEMBLY__
9297 +
9298 +#ifdef CONFIG_PAX_PER_CPU_PGD
9299 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9300 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9301 +{
9302 + return cpu_pgd[cpu];
9303 +}
9304 +#endif
9305 +
9306 #include <linux/mm_types.h>
9307
9308 static inline int pte_none(pte_t pte)
9309 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
9310
9311 static inline int pgd_bad(pgd_t pgd)
9312 {
9313 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9314 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9315 }
9316
9317 static inline int pgd_none(pgd_t pgd)
9318 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
9319 * pgd_offset() returns a (pgd_t *)
9320 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9321 */
9322 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9323 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9324 +
9325 +#ifdef CONFIG_PAX_PER_CPU_PGD
9326 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9327 +#endif
9328 +
9329 /*
9330 * a shortcut which implies the use of the kernel's pgd, instead
9331 * of a process's
9332 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
9333 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9334 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9335
9336 +#ifdef CONFIG_X86_32
9337 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9338 +#else
9339 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9340 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9341 +
9342 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9343 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9344 +#else
9345 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9346 +#endif
9347 +
9348 +#endif
9349 +
9350 #ifndef __ASSEMBLY__
9351
9352 extern int direct_gbpages;
9353 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
9354 * dst and src can be on the same page, but the range must not overlap,
9355 * and must not cross a page boundary.
9356 */
9357 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9358 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9359 {
9360 - memcpy(dst, src, count * sizeof(pgd_t));
9361 + pax_open_kernel();
9362 + while (count--)
9363 + *dst++ = *src++;
9364 + pax_close_kernel();
9365 }
9366
9367 +#ifdef CONFIG_PAX_PER_CPU_PGD
9368 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9369 +#endif
9370 +
9371 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9372 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9373 +#else
9374 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9375 +#endif
9376
9377 #include <asm-generic/pgtable.h>
9378 #endif /* __ASSEMBLY__ */
9379 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
9380 index 0c92113..34a77c6 100644
9381 --- a/arch/x86/include/asm/pgtable_32.h
9382 +++ b/arch/x86/include/asm/pgtable_32.h
9383 @@ -25,9 +25,6 @@
9384 struct mm_struct;
9385 struct vm_area_struct;
9386
9387 -extern pgd_t swapper_pg_dir[1024];
9388 -extern pgd_t initial_page_table[1024];
9389 -
9390 static inline void pgtable_cache_init(void) { }
9391 static inline void check_pgt_cache(void) { }
9392 void paging_init(void);
9393 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9394 # include <asm/pgtable-2level.h>
9395 #endif
9396
9397 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9398 +extern pgd_t initial_page_table[PTRS_PER_PGD];
9399 +#ifdef CONFIG_X86_PAE
9400 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9401 +#endif
9402 +
9403 #if defined(CONFIG_HIGHPTE)
9404 #define pte_offset_map(dir, address) \
9405 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
9406 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9407 /* Clear a kernel PTE and flush it from the TLB */
9408 #define kpte_clear_flush(ptep, vaddr) \
9409 do { \
9410 + pax_open_kernel(); \
9411 pte_clear(&init_mm, (vaddr), (ptep)); \
9412 + pax_close_kernel(); \
9413 __flush_tlb_one((vaddr)); \
9414 } while (0)
9415
9416 @@ -74,6 +79,9 @@ do { \
9417
9418 #endif /* !__ASSEMBLY__ */
9419
9420 +#define HAVE_ARCH_UNMAPPED_AREA
9421 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9422 +
9423 /*
9424 * kern_addr_valid() is (1) for FLATMEM and (0) for
9425 * SPARSEMEM and DISCONTIGMEM
9426 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
9427 index ed5903b..c7fe163 100644
9428 --- a/arch/x86/include/asm/pgtable_32_types.h
9429 +++ b/arch/x86/include/asm/pgtable_32_types.h
9430 @@ -8,7 +8,7 @@
9431 */
9432 #ifdef CONFIG_X86_PAE
9433 # include <asm/pgtable-3level_types.h>
9434 -# define PMD_SIZE (1UL << PMD_SHIFT)
9435 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9436 # define PMD_MASK (~(PMD_SIZE - 1))
9437 #else
9438 # include <asm/pgtable-2level_types.h>
9439 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
9440 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9441 #endif
9442
9443 +#ifdef CONFIG_PAX_KERNEXEC
9444 +#ifndef __ASSEMBLY__
9445 +extern unsigned char MODULES_EXEC_VADDR[];
9446 +extern unsigned char MODULES_EXEC_END[];
9447 +#endif
9448 +#include <asm/boot.h>
9449 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9450 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9451 +#else
9452 +#define ktla_ktva(addr) (addr)
9453 +#define ktva_ktla(addr) (addr)
9454 +#endif
9455 +
9456 #define MODULES_VADDR VMALLOC_START
9457 #define MODULES_END VMALLOC_END
9458 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9459 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
9460 index 975f709..107976d 100644
9461 --- a/arch/x86/include/asm/pgtable_64.h
9462 +++ b/arch/x86/include/asm/pgtable_64.h
9463 @@ -16,10 +16,14 @@
9464
9465 extern pud_t level3_kernel_pgt[512];
9466 extern pud_t level3_ident_pgt[512];
9467 +extern pud_t level3_vmalloc_start_pgt[512];
9468 +extern pud_t level3_vmalloc_end_pgt[512];
9469 +extern pud_t level3_vmemmap_pgt[512];
9470 +extern pud_t level2_vmemmap_pgt[512];
9471 extern pmd_t level2_kernel_pgt[512];
9472 extern pmd_t level2_fixmap_pgt[512];
9473 -extern pmd_t level2_ident_pgt[512];
9474 -extern pgd_t init_level4_pgt[];
9475 +extern pmd_t level2_ident_pgt[512*2];
9476 +extern pgd_t init_level4_pgt[512];
9477
9478 #define swapper_pg_dir init_level4_pgt
9479
9480 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9481
9482 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9483 {
9484 + pax_open_kernel();
9485 *pmdp = pmd;
9486 + pax_close_kernel();
9487 }
9488
9489 static inline void native_pmd_clear(pmd_t *pmd)
9490 @@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
9491
9492 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9493 {
9494 + pax_open_kernel();
9495 + *pgdp = pgd;
9496 + pax_close_kernel();
9497 +}
9498 +
9499 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9500 +{
9501 *pgdp = pgd;
9502 }
9503
9504 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
9505 index 766ea16..5b96cb3 100644
9506 --- a/arch/x86/include/asm/pgtable_64_types.h
9507 +++ b/arch/x86/include/asm/pgtable_64_types.h
9508 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9509 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9510 #define MODULES_END _AC(0xffffffffff000000, UL)
9511 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9512 +#define MODULES_EXEC_VADDR MODULES_VADDR
9513 +#define MODULES_EXEC_END MODULES_END
9514 +
9515 +#define ktla_ktva(addr) (addr)
9516 +#define ktva_ktla(addr) (addr)
9517
9518 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9519 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
9520 index 013286a..8b42f4f 100644
9521 --- a/arch/x86/include/asm/pgtable_types.h
9522 +++ b/arch/x86/include/asm/pgtable_types.h
9523 @@ -16,13 +16,12 @@
9524 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9525 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9526 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9527 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9528 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9529 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9530 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9531 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9532 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9533 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9534 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
9535 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9536 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
9537 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9538
9539 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9540 @@ -40,7 +39,6 @@
9541 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9542 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9543 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9544 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9545 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9546 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9547 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9548 @@ -57,8 +55,10 @@
9549
9550 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9551 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9552 -#else
9553 +#elif defined(CONFIG_KMEMCHECK)
9554 #define _PAGE_NX (_AT(pteval_t, 0))
9555 +#else
9556 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9557 #endif
9558
9559 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9560 @@ -96,6 +96,9 @@
9561 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9562 _PAGE_ACCESSED)
9563
9564 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9565 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9566 +
9567 #define __PAGE_KERNEL_EXEC \
9568 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9569 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9570 @@ -106,7 +109,7 @@
9571 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9572 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9573 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9574 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9575 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9576 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
9577 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
9578 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9579 @@ -168,8 +171,8 @@
9580 * bits are combined, this will alow user to access the high address mapped
9581 * VDSO in the presence of CONFIG_COMPAT_VDSO
9582 */
9583 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9584 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9585 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9586 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9587 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9588 #endif
9589
9590 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
9591 {
9592 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9593 }
9594 +#endif
9595
9596 +#if PAGETABLE_LEVELS == 3
9597 +#include <asm-generic/pgtable-nopud.h>
9598 +#endif
9599 +
9600 +#if PAGETABLE_LEVELS == 2
9601 +#include <asm-generic/pgtable-nopmd.h>
9602 +#endif
9603 +
9604 +#ifndef __ASSEMBLY__
9605 #if PAGETABLE_LEVELS > 3
9606 typedef struct { pudval_t pud; } pud_t;
9607
9608 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
9609 return pud.pud;
9610 }
9611 #else
9612 -#include <asm-generic/pgtable-nopud.h>
9613 -
9614 static inline pudval_t native_pud_val(pud_t pud)
9615 {
9616 return native_pgd_val(pud.pgd);
9617 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
9618 return pmd.pmd;
9619 }
9620 #else
9621 -#include <asm-generic/pgtable-nopmd.h>
9622 -
9623 static inline pmdval_t native_pmd_val(pmd_t pmd)
9624 {
9625 return native_pgd_val(pmd.pud.pgd);
9626 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
9627
9628 extern pteval_t __supported_pte_mask;
9629 extern void set_nx(void);
9630 -extern int nx_enabled;
9631
9632 #define pgprot_writecombine pgprot_writecombine
9633 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9634 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
9635 index 0d1171c..36571a9 100644
9636 --- a/arch/x86/include/asm/processor.h
9637 +++ b/arch/x86/include/asm/processor.h
9638 @@ -266,7 +266,7 @@ struct tss_struct {
9639
9640 } ____cacheline_aligned;
9641
9642 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9643 +extern struct tss_struct init_tss[NR_CPUS];
9644
9645 /*
9646 * Save the original ist values for checking stack pointers during debugging
9647 @@ -858,11 +858,18 @@ static inline void spin_lock_prefetch(const void *x)
9648 */
9649 #define TASK_SIZE PAGE_OFFSET
9650 #define TASK_SIZE_MAX TASK_SIZE
9651 +
9652 +#ifdef CONFIG_PAX_SEGMEXEC
9653 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9654 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9655 +#else
9656 #define STACK_TOP TASK_SIZE
9657 -#define STACK_TOP_MAX STACK_TOP
9658 +#endif
9659 +
9660 +#define STACK_TOP_MAX TASK_SIZE
9661
9662 #define INIT_THREAD { \
9663 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9664 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9665 .vm86_info = NULL, \
9666 .sysenter_cs = __KERNEL_CS, \
9667 .io_bitmap_ptr = NULL, \
9668 @@ -876,7 +883,7 @@ static inline void spin_lock_prefetch(const void *x)
9669 */
9670 #define INIT_TSS { \
9671 .x86_tss = { \
9672 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9673 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9674 .ss0 = __KERNEL_DS, \
9675 .ss1 = __KERNEL_CS, \
9676 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9677 @@ -887,11 +894,7 @@ static inline void spin_lock_prefetch(const void *x)
9678 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9679
9680 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9681 -#define KSTK_TOP(info) \
9682 -({ \
9683 - unsigned long *__ptr = (unsigned long *)(info); \
9684 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9685 -})
9686 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9687
9688 /*
9689 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9690 @@ -906,7 +909,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9691 #define task_pt_regs(task) \
9692 ({ \
9693 struct pt_regs *__regs__; \
9694 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9695 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9696 __regs__ - 1; \
9697 })
9698
9699 @@ -916,13 +919,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9700 /*
9701 * User space process size. 47bits minus one guard page.
9702 */
9703 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9704 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9705
9706 /* This decides where the kernel will search for a free chunk of vm
9707 * space during mmap's.
9708 */
9709 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9710 - 0xc0000000 : 0xFFFFe000)
9711 + 0xc0000000 : 0xFFFFf000)
9712
9713 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9714 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9715 @@ -933,11 +936,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9716 #define STACK_TOP_MAX TASK_SIZE_MAX
9717
9718 #define INIT_THREAD { \
9719 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9720 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9721 }
9722
9723 #define INIT_TSS { \
9724 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9725 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9726 }
9727
9728 /*
9729 @@ -959,6 +962,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
9730 */
9731 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9732
9733 +#ifdef CONFIG_PAX_SEGMEXEC
9734 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9735 +#endif
9736 +
9737 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9738
9739 /* Get/set a process' ability to use the timestamp counter instruction */
9740 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
9741 index 3566454..4bdfb8c 100644
9742 --- a/arch/x86/include/asm/ptrace.h
9743 +++ b/arch/x86/include/asm/ptrace.h
9744 @@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
9745 }
9746
9747 /*
9748 - * user_mode_vm(regs) determines whether a register set came from user mode.
9749 + * user_mode(regs) determines whether a register set came from user mode.
9750 * This is true if V8086 mode was enabled OR if the register set was from
9751 * protected mode with RPL-3 CS value. This tricky test checks that with
9752 * one comparison. Many places in the kernel can bypass this full check
9753 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9754 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9755 + * be used.
9756 */
9757 -static inline int user_mode(struct pt_regs *regs)
9758 +static inline int user_mode_novm(struct pt_regs *regs)
9759 {
9760 #ifdef CONFIG_X86_32
9761 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9762 #else
9763 - return !!(regs->cs & 3);
9764 + return !!(regs->cs & SEGMENT_RPL_MASK);
9765 #endif
9766 }
9767
9768 -static inline int user_mode_vm(struct pt_regs *regs)
9769 +static inline int user_mode(struct pt_regs *regs)
9770 {
9771 #ifdef CONFIG_X86_32
9772 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9773 USER_RPL;
9774 #else
9775 - return user_mode(regs);
9776 + return user_mode_novm(regs);
9777 #endif
9778 }
9779
9780 @@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
9781 #ifdef CONFIG_X86_64
9782 static inline bool user_64bit_mode(struct pt_regs *regs)
9783 {
9784 + unsigned long cs = regs->cs & 0xffff;
9785 #ifndef CONFIG_PARAVIRT
9786 /*
9787 * On non-paravirt systems, this is the only long mode CPL 3
9788 * selector. We do not allow long mode selectors in the LDT.
9789 */
9790 - return regs->cs == __USER_CS;
9791 + return cs == __USER_CS;
9792 #else
9793 /* Headers are too twisted for this to go in paravirt.h. */
9794 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
9795 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
9796 #endif
9797 }
9798 #endif
9799 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
9800 index 3250e3d..20db631 100644
9801 --- a/arch/x86/include/asm/reboot.h
9802 +++ b/arch/x86/include/asm/reboot.h
9803 @@ -6,19 +6,19 @@
9804 struct pt_regs;
9805
9806 struct machine_ops {
9807 - void (*restart)(char *cmd);
9808 - void (*halt)(void);
9809 - void (*power_off)(void);
9810 + void (* __noreturn restart)(char *cmd);
9811 + void (* __noreturn halt)(void);
9812 + void (* __noreturn power_off)(void);
9813 void (*shutdown)(void);
9814 void (*crash_shutdown)(struct pt_regs *);
9815 - void (*emergency_restart)(void);
9816 -};
9817 + void (* __noreturn emergency_restart)(void);
9818 +} __no_const;
9819
9820 extern struct machine_ops machine_ops;
9821
9822 void native_machine_crash_shutdown(struct pt_regs *regs);
9823 void native_machine_shutdown(void);
9824 -void machine_real_restart(unsigned int type);
9825 +void machine_real_restart(unsigned int type) __noreturn;
9826 /* These must match dispatch_table in reboot_32.S */
9827 #define MRR_BIOS 0
9828 #define MRR_APM 1
9829 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
9830 index df4cd32..27ae072 100644
9831 --- a/arch/x86/include/asm/rwsem.h
9832 +++ b/arch/x86/include/asm/rwsem.h
9833 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
9834 {
9835 asm volatile("# beginning down_read\n\t"
9836 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9837 +
9838 +#ifdef CONFIG_PAX_REFCOUNT
9839 + "jno 0f\n"
9840 + LOCK_PREFIX _ASM_DEC "(%1)\n"
9841 + "int $4\n0:\n"
9842 + _ASM_EXTABLE(0b, 0b)
9843 +#endif
9844 +
9845 /* adds 0x00000001 */
9846 " jns 1f\n"
9847 " call call_rwsem_down_read_failed\n"
9848 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
9849 "1:\n\t"
9850 " mov %1,%2\n\t"
9851 " add %3,%2\n\t"
9852 +
9853 +#ifdef CONFIG_PAX_REFCOUNT
9854 + "jno 0f\n"
9855 + "sub %3,%2\n"
9856 + "int $4\n0:\n"
9857 + _ASM_EXTABLE(0b, 0b)
9858 +#endif
9859 +
9860 " jle 2f\n\t"
9861 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9862 " jnz 1b\n\t"
9863 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
9864 long tmp;
9865 asm volatile("# beginning down_write\n\t"
9866 LOCK_PREFIX " xadd %1,(%2)\n\t"
9867 +
9868 +#ifdef CONFIG_PAX_REFCOUNT
9869 + "jno 0f\n"
9870 + "mov %1,(%2)\n"
9871 + "int $4\n0:\n"
9872 + _ASM_EXTABLE(0b, 0b)
9873 +#endif
9874 +
9875 /* adds 0xffff0001, returns the old value */
9876 " test %1,%1\n\t"
9877 /* was the count 0 before? */
9878 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
9879 long tmp;
9880 asm volatile("# beginning __up_read\n\t"
9881 LOCK_PREFIX " xadd %1,(%2)\n\t"
9882 +
9883 +#ifdef CONFIG_PAX_REFCOUNT
9884 + "jno 0f\n"
9885 + "mov %1,(%2)\n"
9886 + "int $4\n0:\n"
9887 + _ASM_EXTABLE(0b, 0b)
9888 +#endif
9889 +
9890 /* subtracts 1, returns the old value */
9891 " jns 1f\n\t"
9892 " call call_rwsem_wake\n" /* expects old value in %edx */
9893 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
9894 long tmp;
9895 asm volatile("# beginning __up_write\n\t"
9896 LOCK_PREFIX " xadd %1,(%2)\n\t"
9897 +
9898 +#ifdef CONFIG_PAX_REFCOUNT
9899 + "jno 0f\n"
9900 + "mov %1,(%2)\n"
9901 + "int $4\n0:\n"
9902 + _ASM_EXTABLE(0b, 0b)
9903 +#endif
9904 +
9905 /* subtracts 0xffff0001, returns the old value */
9906 " jns 1f\n\t"
9907 " call call_rwsem_wake\n" /* expects old value in %edx */
9908 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
9909 {
9910 asm volatile("# beginning __downgrade_write\n\t"
9911 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
9912 +
9913 +#ifdef CONFIG_PAX_REFCOUNT
9914 + "jno 0f\n"
9915 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
9916 + "int $4\n0:\n"
9917 + _ASM_EXTABLE(0b, 0b)
9918 +#endif
9919 +
9920 /*
9921 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
9922 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
9923 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
9924 */
9925 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
9926 {
9927 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
9928 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
9929 +
9930 +#ifdef CONFIG_PAX_REFCOUNT
9931 + "jno 0f\n"
9932 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
9933 + "int $4\n0:\n"
9934 + _ASM_EXTABLE(0b, 0b)
9935 +#endif
9936 +
9937 : "+m" (sem->count)
9938 : "er" (delta));
9939 }
9940 @@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
9941 {
9942 long tmp = delta;
9943
9944 - asm volatile(LOCK_PREFIX "xadd %0,%1"
9945 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
9946 +
9947 +#ifdef CONFIG_PAX_REFCOUNT
9948 + "jno 0f\n"
9949 + "mov %0,%1\n"
9950 + "int $4\n0:\n"
9951 + _ASM_EXTABLE(0b, 0b)
9952 +#endif
9953 +
9954 : "+r" (tmp), "+m" (sem->count)
9955 : : "memory");
9956
9957 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
9958 index 5e64171..f58957e 100644
9959 --- a/arch/x86/include/asm/segment.h
9960 +++ b/arch/x86/include/asm/segment.h
9961 @@ -64,10 +64,15 @@
9962 * 26 - ESPFIX small SS
9963 * 27 - per-cpu [ offset to per-cpu data area ]
9964 * 28 - stack_canary-20 [ for stack protector ]
9965 - * 29 - unused
9966 - * 30 - unused
9967 + * 29 - PCI BIOS CS
9968 + * 30 - PCI BIOS DS
9969 * 31 - TSS for double fault handler
9970 */
9971 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
9972 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
9973 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
9974 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
9975 +
9976 #define GDT_ENTRY_TLS_MIN 6
9977 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
9978
9979 @@ -79,6 +84,8 @@
9980
9981 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
9982
9983 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
9984 +
9985 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
9986
9987 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
9988 @@ -104,6 +111,12 @@
9989 #define __KERNEL_STACK_CANARY 0
9990 #endif
9991
9992 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
9993 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
9994 +
9995 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
9996 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
9997 +
9998 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
9999
10000 /*
10001 @@ -141,7 +154,7 @@
10002 */
10003
10004 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10005 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10006 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10007
10008
10009 #else
10010 @@ -165,6 +178,8 @@
10011 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
10012 #define __USER32_DS __USER_DS
10013
10014 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10015 +
10016 #define GDT_ENTRY_TSS 8 /* needs two entries */
10017 #define GDT_ENTRY_LDT 10 /* needs two entries */
10018 #define GDT_ENTRY_TLS_MIN 12
10019 @@ -185,6 +200,7 @@
10020 #endif
10021
10022 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
10023 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
10024 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
10025 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
10026 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
10027 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
10028 index 73b11bc..d4a3b63 100644
10029 --- a/arch/x86/include/asm/smp.h
10030 +++ b/arch/x86/include/asm/smp.h
10031 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10032 /* cpus sharing the last level cache: */
10033 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
10034 DECLARE_PER_CPU(u16, cpu_llc_id);
10035 -DECLARE_PER_CPU(int, cpu_number);
10036 +DECLARE_PER_CPU(unsigned int, cpu_number);
10037
10038 static inline struct cpumask *cpu_sibling_mask(int cpu)
10039 {
10040 @@ -77,7 +77,7 @@ struct smp_ops {
10041
10042 void (*send_call_func_ipi)(const struct cpumask *mask);
10043 void (*send_call_func_single_ipi)(int cpu);
10044 -};
10045 +} __no_const;
10046
10047 /* Globals due to paravirt */
10048 extern void set_cpu_sibling_map(int cpu);
10049 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
10050 extern int safe_smp_processor_id(void);
10051
10052 #elif defined(CONFIG_X86_64_SMP)
10053 -#define raw_smp_processor_id() (percpu_read(cpu_number))
10054 -
10055 -#define stack_smp_processor_id() \
10056 -({ \
10057 - struct thread_info *ti; \
10058 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10059 - ti->cpu; \
10060 -})
10061 +#define raw_smp_processor_id() (percpu_read(cpu_number))
10062 +#define stack_smp_processor_id() raw_smp_processor_id()
10063 #define safe_smp_processor_id() smp_processor_id()
10064
10065 #endif
10066 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
10067 index ee67edf..49c796b 100644
10068 --- a/arch/x86/include/asm/spinlock.h
10069 +++ b/arch/x86/include/asm/spinlock.h
10070 @@ -248,6 +248,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
10071 static inline void arch_read_lock(arch_rwlock_t *rw)
10072 {
10073 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
10074 +
10075 +#ifdef CONFIG_PAX_REFCOUNT
10076 + "jno 0f\n"
10077 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
10078 + "int $4\n0:\n"
10079 + _ASM_EXTABLE(0b, 0b)
10080 +#endif
10081 +
10082 "jns 1f\n"
10083 "call __read_lock_failed\n\t"
10084 "1:\n"
10085 @@ -257,6 +265,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
10086 static inline void arch_write_lock(arch_rwlock_t *rw)
10087 {
10088 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
10089 +
10090 +#ifdef CONFIG_PAX_REFCOUNT
10091 + "jno 0f\n"
10092 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
10093 + "int $4\n0:\n"
10094 + _ASM_EXTABLE(0b, 0b)
10095 +#endif
10096 +
10097 "jz 1f\n"
10098 "call __write_lock_failed\n\t"
10099 "1:\n"
10100 @@ -286,13 +302,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
10101
10102 static inline void arch_read_unlock(arch_rwlock_t *rw)
10103 {
10104 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
10105 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
10106 +
10107 +#ifdef CONFIG_PAX_REFCOUNT
10108 + "jno 0f\n"
10109 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
10110 + "int $4\n0:\n"
10111 + _ASM_EXTABLE(0b, 0b)
10112 +#endif
10113 +
10114 :"+m" (rw->lock) : : "memory");
10115 }
10116
10117 static inline void arch_write_unlock(arch_rwlock_t *rw)
10118 {
10119 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
10120 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
10121 +
10122 +#ifdef CONFIG_PAX_REFCOUNT
10123 + "jno 0f\n"
10124 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
10125 + "int $4\n0:\n"
10126 + _ASM_EXTABLE(0b, 0b)
10127 +#endif
10128 +
10129 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
10130 }
10131
10132 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
10133 index 1575177..cb23f52 100644
10134 --- a/arch/x86/include/asm/stackprotector.h
10135 +++ b/arch/x86/include/asm/stackprotector.h
10136 @@ -48,7 +48,7 @@
10137 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10138 */
10139 #define GDT_STACK_CANARY_INIT \
10140 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10141 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10142
10143 /*
10144 * Initialize the stackprotector canary value.
10145 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
10146
10147 static inline void load_stack_canary_segment(void)
10148 {
10149 -#ifdef CONFIG_X86_32
10150 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10151 asm volatile ("mov %0, %%gs" : : "r" (0));
10152 #endif
10153 }
10154 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
10155 index 70bbe39..4ae2bd4 100644
10156 --- a/arch/x86/include/asm/stacktrace.h
10157 +++ b/arch/x86/include/asm/stacktrace.h
10158 @@ -11,28 +11,20 @@
10159
10160 extern int kstack_depth_to_print;
10161
10162 -struct thread_info;
10163 +struct task_struct;
10164 struct stacktrace_ops;
10165
10166 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
10167 - unsigned long *stack,
10168 - unsigned long bp,
10169 - const struct stacktrace_ops *ops,
10170 - void *data,
10171 - unsigned long *end,
10172 - int *graph);
10173 +typedef unsigned long walk_stack_t(struct task_struct *task,
10174 + void *stack_start,
10175 + unsigned long *stack,
10176 + unsigned long bp,
10177 + const struct stacktrace_ops *ops,
10178 + void *data,
10179 + unsigned long *end,
10180 + int *graph);
10181
10182 -extern unsigned long
10183 -print_context_stack(struct thread_info *tinfo,
10184 - unsigned long *stack, unsigned long bp,
10185 - const struct stacktrace_ops *ops, void *data,
10186 - unsigned long *end, int *graph);
10187 -
10188 -extern unsigned long
10189 -print_context_stack_bp(struct thread_info *tinfo,
10190 - unsigned long *stack, unsigned long bp,
10191 - const struct stacktrace_ops *ops, void *data,
10192 - unsigned long *end, int *graph);
10193 +extern walk_stack_t print_context_stack;
10194 +extern walk_stack_t print_context_stack_bp;
10195
10196 /* Generic stack tracer with callbacks */
10197
10198 @@ -40,7 +32,7 @@ struct stacktrace_ops {
10199 void (*address)(void *data, unsigned long address, int reliable);
10200 /* On negative return stop dumping */
10201 int (*stack)(void *data, char *name);
10202 - walk_stack_t walk_stack;
10203 + walk_stack_t *walk_stack;
10204 };
10205
10206 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
10207 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
10208 index cb23852..2dde194 100644
10209 --- a/arch/x86/include/asm/sys_ia32.h
10210 +++ b/arch/x86/include/asm/sys_ia32.h
10211 @@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
10212 compat_sigset_t __user *, unsigned int);
10213 asmlinkage long sys32_alarm(unsigned int);
10214
10215 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
10216 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
10217 asmlinkage long sys32_sysfs(int, u32, u32);
10218
10219 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
10220 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
10221 index 2d2f01c..f985723 100644
10222 --- a/arch/x86/include/asm/system.h
10223 +++ b/arch/x86/include/asm/system.h
10224 @@ -129,7 +129,7 @@ do { \
10225 "call __switch_to\n\t" \
10226 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10227 __switch_canary \
10228 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
10229 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10230 "movq %%rax,%%rdi\n\t" \
10231 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10232 "jnz ret_from_fork\n\t" \
10233 @@ -140,7 +140,7 @@ do { \
10234 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10235 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10236 [_tif_fork] "i" (_TIF_FORK), \
10237 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
10238 + [thread_info] "m" (current_tinfo), \
10239 [current_task] "m" (current_task) \
10240 __switch_canary_iparam \
10241 : "memory", "cc" __EXTRA_CLOBBER)
10242 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
10243 {
10244 unsigned long __limit;
10245 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10246 - return __limit + 1;
10247 + return __limit;
10248 }
10249
10250 static inline void native_clts(void)
10251 @@ -397,13 +397,13 @@ void enable_hlt(void);
10252
10253 void cpu_idle_wait(void);
10254
10255 -extern unsigned long arch_align_stack(unsigned long sp);
10256 +#define arch_align_stack(x) ((x) & ~0xfUL)
10257 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10258
10259 void default_idle(void);
10260 bool set_pm_idle_to_default(void);
10261
10262 -void stop_this_cpu(void *dummy);
10263 +void stop_this_cpu(void *dummy) __noreturn;
10264
10265 /*
10266 * Force strict CPU ordering.
10267 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
10268 index a1fe5c1..ee326d8 100644
10269 --- a/arch/x86/include/asm/thread_info.h
10270 +++ b/arch/x86/include/asm/thread_info.h
10271 @@ -10,6 +10,7 @@
10272 #include <linux/compiler.h>
10273 #include <asm/page.h>
10274 #include <asm/types.h>
10275 +#include <asm/percpu.h>
10276
10277 /*
10278 * low level task data that entry.S needs immediate access to
10279 @@ -24,7 +25,6 @@ struct exec_domain;
10280 #include <linux/atomic.h>
10281
10282 struct thread_info {
10283 - struct task_struct *task; /* main task structure */
10284 struct exec_domain *exec_domain; /* execution domain */
10285 __u32 flags; /* low level flags */
10286 __u32 status; /* thread synchronous flags */
10287 @@ -34,18 +34,12 @@ struct thread_info {
10288 mm_segment_t addr_limit;
10289 struct restart_block restart_block;
10290 void __user *sysenter_return;
10291 -#ifdef CONFIG_X86_32
10292 - unsigned long previous_esp; /* ESP of the previous stack in
10293 - case of nested (IRQ) stacks
10294 - */
10295 - __u8 supervisor_stack[0];
10296 -#endif
10297 + unsigned long lowest_stack;
10298 int uaccess_err;
10299 };
10300
10301 -#define INIT_THREAD_INFO(tsk) \
10302 +#define INIT_THREAD_INFO \
10303 { \
10304 - .task = &tsk, \
10305 .exec_domain = &default_exec_domain, \
10306 .flags = 0, \
10307 .cpu = 0, \
10308 @@ -56,7 +50,7 @@ struct thread_info {
10309 }, \
10310 }
10311
10312 -#define init_thread_info (init_thread_union.thread_info)
10313 +#define init_thread_info (init_thread_union.stack)
10314 #define init_stack (init_thread_union.stack)
10315
10316 #else /* !__ASSEMBLY__ */
10317 @@ -170,45 +164,40 @@ struct thread_info {
10318 ret; \
10319 })
10320
10321 -#ifdef CONFIG_X86_32
10322 -
10323 -#define STACK_WARN (THREAD_SIZE/8)
10324 -/*
10325 - * macros/functions for gaining access to the thread information structure
10326 - *
10327 - * preempt_count needs to be 1 initially, until the scheduler is functional.
10328 - */
10329 -#ifndef __ASSEMBLY__
10330 -
10331 -
10332 -/* how to get the current stack pointer from C */
10333 -register unsigned long current_stack_pointer asm("esp") __used;
10334 -
10335 -/* how to get the thread information struct from C */
10336 -static inline struct thread_info *current_thread_info(void)
10337 -{
10338 - return (struct thread_info *)
10339 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10340 -}
10341 -
10342 -#else /* !__ASSEMBLY__ */
10343 -
10344 +#ifdef __ASSEMBLY__
10345 /* how to get the thread information struct from ASM */
10346 #define GET_THREAD_INFO(reg) \
10347 - movl $-THREAD_SIZE, reg; \
10348 - andl %esp, reg
10349 + mov PER_CPU_VAR(current_tinfo), reg
10350
10351 /* use this one if reg already contains %esp */
10352 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10353 - andl $-THREAD_SIZE, reg
10354 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10355 +#else
10356 +/* how to get the thread information struct from C */
10357 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10358 +
10359 +static __always_inline struct thread_info *current_thread_info(void)
10360 +{
10361 + return percpu_read_stable(current_tinfo);
10362 +}
10363 +#endif
10364 +
10365 +#ifdef CONFIG_X86_32
10366 +
10367 +#define STACK_WARN (THREAD_SIZE/8)
10368 +/*
10369 + * macros/functions for gaining access to the thread information structure
10370 + *
10371 + * preempt_count needs to be 1 initially, until the scheduler is functional.
10372 + */
10373 +#ifndef __ASSEMBLY__
10374 +
10375 +/* how to get the current stack pointer from C */
10376 +register unsigned long current_stack_pointer asm("esp") __used;
10377
10378 #endif
10379
10380 #else /* X86_32 */
10381
10382 -#include <asm/percpu.h>
10383 -#define KERNEL_STACK_OFFSET (5*8)
10384 -
10385 /*
10386 * macros/functions for gaining access to the thread information structure
10387 * preempt_count needs to be 1 initially, until the scheduler is functional.
10388 @@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
10389 #ifndef __ASSEMBLY__
10390 DECLARE_PER_CPU(unsigned long, kernel_stack);
10391
10392 -static inline struct thread_info *current_thread_info(void)
10393 -{
10394 - struct thread_info *ti;
10395 - ti = (void *)(percpu_read_stable(kernel_stack) +
10396 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10397 - return ti;
10398 -}
10399 -
10400 -#else /* !__ASSEMBLY__ */
10401 -
10402 -/* how to get the thread information struct from ASM */
10403 -#define GET_THREAD_INFO(reg) \
10404 - movq PER_CPU_VAR(kernel_stack),reg ; \
10405 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10406 -
10407 +/* how to get the current stack pointer from C */
10408 +register unsigned long current_stack_pointer asm("rsp") __used;
10409 #endif
10410
10411 #endif /* !X86_32 */
10412 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
10413 extern void free_thread_info(struct thread_info *ti);
10414 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10415 #define arch_task_cache_init arch_task_cache_init
10416 +
10417 +#define __HAVE_THREAD_FUNCTIONS
10418 +#define task_thread_info(task) (&(task)->tinfo)
10419 +#define task_stack_page(task) ((task)->stack)
10420 +#define setup_thread_stack(p, org) do {} while (0)
10421 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10422 +
10423 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10424 +extern struct task_struct *alloc_task_struct_node(int node);
10425 +extern void free_task_struct(struct task_struct *);
10426 +
10427 #endif
10428 #endif /* _ASM_X86_THREAD_INFO_H */
10429 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
10430 index 36361bf..324f262 100644
10431 --- a/arch/x86/include/asm/uaccess.h
10432 +++ b/arch/x86/include/asm/uaccess.h
10433 @@ -7,12 +7,15 @@
10434 #include <linux/compiler.h>
10435 #include <linux/thread_info.h>
10436 #include <linux/string.h>
10437 +#include <linux/sched.h>
10438 #include <asm/asm.h>
10439 #include <asm/page.h>
10440
10441 #define VERIFY_READ 0
10442 #define VERIFY_WRITE 1
10443
10444 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
10445 +
10446 /*
10447 * The fs value determines whether argument validity checking should be
10448 * performed or not. If get_fs() == USER_DS, checking is performed, with
10449 @@ -28,7 +31,12 @@
10450
10451 #define get_ds() (KERNEL_DS)
10452 #define get_fs() (current_thread_info()->addr_limit)
10453 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10454 +void __set_fs(mm_segment_t x);
10455 +void set_fs(mm_segment_t x);
10456 +#else
10457 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10458 +#endif
10459
10460 #define segment_eq(a, b) ((a).seg == (b).seg)
10461
10462 @@ -76,7 +84,33 @@
10463 * checks that the pointer is in the user space range - after calling
10464 * this function, memory access functions may still return -EFAULT.
10465 */
10466 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10467 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10468 +#define access_ok(type, addr, size) \
10469 +({ \
10470 + long __size = size; \
10471 + unsigned long __addr = (unsigned long)addr; \
10472 + unsigned long __addr_ao = __addr & PAGE_MASK; \
10473 + unsigned long __end_ao = __addr + __size - 1; \
10474 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10475 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10476 + while(__addr_ao <= __end_ao) { \
10477 + char __c_ao; \
10478 + __addr_ao += PAGE_SIZE; \
10479 + if (__size > PAGE_SIZE) \
10480 + cond_resched(); \
10481 + if (__get_user(__c_ao, (char __user *)__addr)) \
10482 + break; \
10483 + if (type != VERIFY_WRITE) { \
10484 + __addr = __addr_ao; \
10485 + continue; \
10486 + } \
10487 + if (__put_user(__c_ao, (char __user *)__addr)) \
10488 + break; \
10489 + __addr = __addr_ao; \
10490 + } \
10491 + } \
10492 + __ret_ao; \
10493 +})
10494
10495 /*
10496 * The exception table consists of pairs of addresses: the first is the
10497 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10498 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10499 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10500
10501 -
10502 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10503 +#define __copyuser_seg "gs;"
10504 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10505 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10506 +#else
10507 +#define __copyuser_seg
10508 +#define __COPYUSER_SET_ES
10509 +#define __COPYUSER_RESTORE_ES
10510 +#endif
10511
10512 #ifdef CONFIG_X86_32
10513 #define __put_user_asm_u64(x, addr, err, errret) \
10514 - asm volatile("1: movl %%eax,0(%2)\n" \
10515 - "2: movl %%edx,4(%2)\n" \
10516 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10517 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10518 "3:\n" \
10519 ".section .fixup,\"ax\"\n" \
10520 "4: movl %3,%0\n" \
10521 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10522 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10523
10524 #define __put_user_asm_ex_u64(x, addr) \
10525 - asm volatile("1: movl %%eax,0(%1)\n" \
10526 - "2: movl %%edx,4(%1)\n" \
10527 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10528 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10529 "3:\n" \
10530 _ASM_EXTABLE(1b, 2b - 1b) \
10531 _ASM_EXTABLE(2b, 3b - 2b) \
10532 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
10533 __typeof__(*(ptr)) __pu_val; \
10534 __chk_user_ptr(ptr); \
10535 might_fault(); \
10536 - __pu_val = x; \
10537 + __pu_val = (x); \
10538 switch (sizeof(*(ptr))) { \
10539 case 1: \
10540 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10541 @@ -373,7 +415,7 @@ do { \
10542 } while (0)
10543
10544 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10545 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10546 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10547 "2:\n" \
10548 ".section .fixup,\"ax\"\n" \
10549 "3: mov %3,%0\n" \
10550 @@ -381,7 +423,7 @@ do { \
10551 " jmp 2b\n" \
10552 ".previous\n" \
10553 _ASM_EXTABLE(1b, 3b) \
10554 - : "=r" (err), ltype(x) \
10555 + : "=r" (err), ltype (x) \
10556 : "m" (__m(addr)), "i" (errret), "0" (err))
10557
10558 #define __get_user_size_ex(x, ptr, size) \
10559 @@ -406,7 +448,7 @@ do { \
10560 } while (0)
10561
10562 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10563 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10564 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10565 "2:\n" \
10566 _ASM_EXTABLE(1b, 2b - 1b) \
10567 : ltype(x) : "m" (__m(addr)))
10568 @@ -423,13 +465,24 @@ do { \
10569 int __gu_err; \
10570 unsigned long __gu_val; \
10571 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10572 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
10573 + (x) = (__typeof__(*(ptr)))__gu_val; \
10574 __gu_err; \
10575 })
10576
10577 /* FIXME: this hack is definitely wrong -AK */
10578 struct __large_struct { unsigned long buf[100]; };
10579 -#define __m(x) (*(struct __large_struct __user *)(x))
10580 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10581 +#define ____m(x) \
10582 +({ \
10583 + unsigned long ____x = (unsigned long)(x); \
10584 + if (____x < PAX_USER_SHADOW_BASE) \
10585 + ____x += PAX_USER_SHADOW_BASE; \
10586 + (void __user *)____x; \
10587 +})
10588 +#else
10589 +#define ____m(x) (x)
10590 +#endif
10591 +#define __m(x) (*(struct __large_struct __user *)____m(x))
10592
10593 /*
10594 * Tell gcc we read from memory instead of writing: this is because
10595 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
10596 * aliasing issues.
10597 */
10598 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10599 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10600 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10601 "2:\n" \
10602 ".section .fixup,\"ax\"\n" \
10603 "3: mov %3,%0\n" \
10604 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
10605 ".previous\n" \
10606 _ASM_EXTABLE(1b, 3b) \
10607 : "=r"(err) \
10608 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10609 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10610
10611 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10612 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10613 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10614 "2:\n" \
10615 _ASM_EXTABLE(1b, 2b - 1b) \
10616 : : ltype(x), "m" (__m(addr)))
10617 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
10618 * On error, the variable @x is set to zero.
10619 */
10620
10621 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10622 +#define __get_user(x, ptr) get_user((x), (ptr))
10623 +#else
10624 #define __get_user(x, ptr) \
10625 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10626 +#endif
10627
10628 /**
10629 * __put_user: - Write a simple value into user space, with less checking.
10630 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
10631 * Returns zero on success, or -EFAULT on error.
10632 */
10633
10634 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10635 +#define __put_user(x, ptr) put_user((x), (ptr))
10636 +#else
10637 #define __put_user(x, ptr) \
10638 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10639 +#endif
10640
10641 #define __get_user_unaligned __get_user
10642 #define __put_user_unaligned __put_user
10643 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
10644 #define get_user_ex(x, ptr) do { \
10645 unsigned long __gue_val; \
10646 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10647 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
10648 + (x) = (__typeof__(*(ptr)))__gue_val; \
10649 } while (0)
10650
10651 #ifdef CONFIG_X86_WP_WORKS_OK
10652 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
10653 index 566e803..89f1e60 100644
10654 --- a/arch/x86/include/asm/uaccess_32.h
10655 +++ b/arch/x86/include/asm/uaccess_32.h
10656 @@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
10657 static __always_inline unsigned long __must_check
10658 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10659 {
10660 + pax_track_stack();
10661 +
10662 + if ((long)n < 0)
10663 + return n;
10664 +
10665 if (__builtin_constant_p(n)) {
10666 unsigned long ret;
10667
10668 @@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10669 return ret;
10670 }
10671 }
10672 + if (!__builtin_constant_p(n))
10673 + check_object_size(from, n, true);
10674 return __copy_to_user_ll(to, from, n);
10675 }
10676
10677 @@ -82,12 +89,16 @@ static __always_inline unsigned long __must_check
10678 __copy_to_user(void __user *to, const void *from, unsigned long n)
10679 {
10680 might_fault();
10681 +
10682 return __copy_to_user_inatomic(to, from, n);
10683 }
10684
10685 static __always_inline unsigned long
10686 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10687 {
10688 + if ((long)n < 0)
10689 + return n;
10690 +
10691 /* Avoid zeroing the tail if the copy fails..
10692 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10693 * but as the zeroing behaviour is only significant when n is not
10694 @@ -137,6 +148,12 @@ static __always_inline unsigned long
10695 __copy_from_user(void *to, const void __user *from, unsigned long n)
10696 {
10697 might_fault();
10698 +
10699 + pax_track_stack();
10700 +
10701 + if ((long)n < 0)
10702 + return n;
10703 +
10704 if (__builtin_constant_p(n)) {
10705 unsigned long ret;
10706
10707 @@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
10708 return ret;
10709 }
10710 }
10711 + if (!__builtin_constant_p(n))
10712 + check_object_size(to, n, false);
10713 return __copy_from_user_ll(to, from, n);
10714 }
10715
10716 @@ -159,6 +178,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
10717 const void __user *from, unsigned long n)
10718 {
10719 might_fault();
10720 +
10721 + if ((long)n < 0)
10722 + return n;
10723 +
10724 if (__builtin_constant_p(n)) {
10725 unsigned long ret;
10726
10727 @@ -181,15 +204,19 @@ static __always_inline unsigned long
10728 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10729 unsigned long n)
10730 {
10731 - return __copy_from_user_ll_nocache_nozero(to, from, n);
10732 + if ((long)n < 0)
10733 + return n;
10734 +
10735 + return __copy_from_user_ll_nocache_nozero(to, from, n);
10736 }
10737
10738 -unsigned long __must_check copy_to_user(void __user *to,
10739 - const void *from, unsigned long n);
10740 -unsigned long __must_check _copy_from_user(void *to,
10741 - const void __user *from,
10742 - unsigned long n);
10743 -
10744 +extern void copy_to_user_overflow(void)
10745 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10746 + __compiletime_error("copy_to_user() buffer size is not provably correct")
10747 +#else
10748 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
10749 +#endif
10750 +;
10751
10752 extern void copy_from_user_overflow(void)
10753 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10754 @@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void)
10755 #endif
10756 ;
10757
10758 -static inline unsigned long __must_check copy_from_user(void *to,
10759 - const void __user *from,
10760 - unsigned long n)
10761 +/**
10762 + * copy_to_user: - Copy a block of data into user space.
10763 + * @to: Destination address, in user space.
10764 + * @from: Source address, in kernel space.
10765 + * @n: Number of bytes to copy.
10766 + *
10767 + * Context: User context only. This function may sleep.
10768 + *
10769 + * Copy data from kernel space to user space.
10770 + *
10771 + * Returns number of bytes that could not be copied.
10772 + * On success, this will be zero.
10773 + */
10774 +static inline unsigned long __must_check
10775 +copy_to_user(void __user *to, const void *from, unsigned long n)
10776 +{
10777 + int sz = __compiletime_object_size(from);
10778 +
10779 + if (unlikely(sz != -1 && sz < n))
10780 + copy_to_user_overflow();
10781 + else if (access_ok(VERIFY_WRITE, to, n))
10782 + n = __copy_to_user(to, from, n);
10783 + return n;
10784 +}
10785 +
10786 +/**
10787 + * copy_from_user: - Copy a block of data from user space.
10788 + * @to: Destination address, in kernel space.
10789 + * @from: Source address, in user space.
10790 + * @n: Number of bytes to copy.
10791 + *
10792 + * Context: User context only. This function may sleep.
10793 + *
10794 + * Copy data from user space to kernel space.
10795 + *
10796 + * Returns number of bytes that could not be copied.
10797 + * On success, this will be zero.
10798 + *
10799 + * If some data could not be copied, this function will pad the copied
10800 + * data to the requested size using zero bytes.
10801 + */
10802 +static inline unsigned long __must_check
10803 +copy_from_user(void *to, const void __user *from, unsigned long n)
10804 {
10805 int sz = __compiletime_object_size(to);
10806
10807 - if (likely(sz == -1 || sz >= n))
10808 - n = _copy_from_user(to, from, n);
10809 - else
10810 + if (unlikely(sz != -1 && sz < n))
10811 copy_from_user_overflow();
10812 -
10813 + else if (access_ok(VERIFY_READ, from, n))
10814 + n = __copy_from_user(to, from, n);
10815 + else if ((long)n > 0) {
10816 + if (!__builtin_constant_p(n))
10817 + check_object_size(to, n, false);
10818 + memset(to, 0, n);
10819 + }
10820 return n;
10821 }
10822
10823 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
10824 index 1c66d30..59bd7d4 100644
10825 --- a/arch/x86/include/asm/uaccess_64.h
10826 +++ b/arch/x86/include/asm/uaccess_64.h
10827 @@ -10,6 +10,9 @@
10828 #include <asm/alternative.h>
10829 #include <asm/cpufeature.h>
10830 #include <asm/page.h>
10831 +#include <asm/pgtable.h>
10832 +
10833 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
10834
10835 /*
10836 * Copy To/From Userspace
10837 @@ -17,12 +20,12 @@
10838
10839 /* Handles exceptions in both to and from, but doesn't do access_ok */
10840 __must_check unsigned long
10841 -copy_user_generic_string(void *to, const void *from, unsigned len);
10842 +copy_user_generic_string(void *to, const void *from, unsigned long len);
10843 __must_check unsigned long
10844 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
10845 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
10846
10847 static __always_inline __must_check unsigned long
10848 -copy_user_generic(void *to, const void *from, unsigned len)
10849 +copy_user_generic(void *to, const void *from, unsigned long len)
10850 {
10851 unsigned ret;
10852
10853 @@ -36,138 +39,226 @@ copy_user_generic(void *to, const void *from, unsigned len)
10854 return ret;
10855 }
10856
10857 +static __always_inline __must_check unsigned long
10858 +__copy_to_user(void __user *to, const void *from, unsigned long len);
10859 +static __always_inline __must_check unsigned long
10860 +__copy_from_user(void *to, const void __user *from, unsigned long len);
10861 __must_check unsigned long
10862 -_copy_to_user(void __user *to, const void *from, unsigned len);
10863 -__must_check unsigned long
10864 -_copy_from_user(void *to, const void __user *from, unsigned len);
10865 -__must_check unsigned long
10866 -copy_in_user(void __user *to, const void __user *from, unsigned len);
10867 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
10868
10869 static inline unsigned long __must_check copy_from_user(void *to,
10870 const void __user *from,
10871 - unsigned long n)
10872 + unsigned n)
10873 {
10874 - int sz = __compiletime_object_size(to);
10875 -
10876 might_fault();
10877 - if (likely(sz == -1 || sz >= n))
10878 - n = _copy_from_user(to, from, n);
10879 -#ifdef CONFIG_DEBUG_VM
10880 - else
10881 - WARN(1, "Buffer overflow detected!\n");
10882 -#endif
10883 +
10884 + if (access_ok(VERIFY_READ, from, n))
10885 + n = __copy_from_user(to, from, n);
10886 + else if (n < INT_MAX) {
10887 + if (!__builtin_constant_p(n))
10888 + check_object_size(to, n, false);
10889 + memset(to, 0, n);
10890 + }
10891 return n;
10892 }
10893
10894 static __always_inline __must_check
10895 -int copy_to_user(void __user *dst, const void *src, unsigned size)
10896 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
10897 {
10898 might_fault();
10899
10900 - return _copy_to_user(dst, src, size);
10901 + if (access_ok(VERIFY_WRITE, dst, size))
10902 + size = __copy_to_user(dst, src, size);
10903 + return size;
10904 }
10905
10906 static __always_inline __must_check
10907 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
10908 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
10909 {
10910 - int ret = 0;
10911 + int sz = __compiletime_object_size(dst);
10912 + unsigned ret = 0;
10913
10914 might_fault();
10915 - if (!__builtin_constant_p(size))
10916 - return copy_user_generic(dst, (__force void *)src, size);
10917 +
10918 + pax_track_stack();
10919 +
10920 + if (size > INT_MAX)
10921 + return size;
10922 +
10923 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10924 + if (!__access_ok(VERIFY_READ, src, size))
10925 + return size;
10926 +#endif
10927 +
10928 + if (unlikely(sz != -1 && sz < size)) {
10929 +#ifdef CONFIG_DEBUG_VM
10930 + WARN(1, "Buffer overflow detected!\n");
10931 +#endif
10932 + return size;
10933 + }
10934 +
10935 + if (!__builtin_constant_p(size)) {
10936 + check_object_size(dst, size, false);
10937 +
10938 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10939 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10940 + src += PAX_USER_SHADOW_BASE;
10941 +#endif
10942 +
10943 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
10944 + }
10945 switch (size) {
10946 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10947 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10948 ret, "b", "b", "=q", 1);
10949 return ret;
10950 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10951 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10952 ret, "w", "w", "=r", 2);
10953 return ret;
10954 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10955 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10956 ret, "l", "k", "=r", 4);
10957 return ret;
10958 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10959 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10960 ret, "q", "", "=r", 8);
10961 return ret;
10962 case 10:
10963 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10964 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10965 ret, "q", "", "=r", 10);
10966 if (unlikely(ret))
10967 return ret;
10968 __get_user_asm(*(u16 *)(8 + (char *)dst),
10969 - (u16 __user *)(8 + (char __user *)src),
10970 + (const u16 __user *)(8 + (const char __user *)src),
10971 ret, "w", "w", "=r", 2);
10972 return ret;
10973 case 16:
10974 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10975 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10976 ret, "q", "", "=r", 16);
10977 if (unlikely(ret))
10978 return ret;
10979 __get_user_asm(*(u64 *)(8 + (char *)dst),
10980 - (u64 __user *)(8 + (char __user *)src),
10981 + (const u64 __user *)(8 + (const char __user *)src),
10982 ret, "q", "", "=r", 8);
10983 return ret;
10984 default:
10985 - return copy_user_generic(dst, (__force void *)src, size);
10986 +
10987 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10988 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10989 + src += PAX_USER_SHADOW_BASE;
10990 +#endif
10991 +
10992 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
10993 }
10994 }
10995
10996 static __always_inline __must_check
10997 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
10998 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
10999 {
11000 - int ret = 0;
11001 + int sz = __compiletime_object_size(src);
11002 + unsigned ret = 0;
11003
11004 might_fault();
11005 - if (!__builtin_constant_p(size))
11006 - return copy_user_generic((__force void *)dst, src, size);
11007 +
11008 + pax_track_stack();
11009 +
11010 + if (size > INT_MAX)
11011 + return size;
11012 +
11013 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11014 + if (!__access_ok(VERIFY_WRITE, dst, size))
11015 + return size;
11016 +#endif
11017 +
11018 + if (unlikely(sz != -1 && sz < size)) {
11019 +#ifdef CONFIG_DEBUG_VM
11020 + WARN(1, "Buffer overflow detected!\n");
11021 +#endif
11022 + return size;
11023 + }
11024 +
11025 + if (!__builtin_constant_p(size)) {
11026 + check_object_size(src, size, true);
11027 +
11028 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11029 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11030 + dst += PAX_USER_SHADOW_BASE;
11031 +#endif
11032 +
11033 + return copy_user_generic((__force_kernel void *)dst, src, size);
11034 + }
11035 switch (size) {
11036 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
11037 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
11038 ret, "b", "b", "iq", 1);
11039 return ret;
11040 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
11041 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
11042 ret, "w", "w", "ir", 2);
11043 return ret;
11044 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
11045 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
11046 ret, "l", "k", "ir", 4);
11047 return ret;
11048 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11049 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11050 ret, "q", "", "er", 8);
11051 return ret;
11052 case 10:
11053 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11054 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11055 ret, "q", "", "er", 10);
11056 if (unlikely(ret))
11057 return ret;
11058 asm("":::"memory");
11059 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11060 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11061 ret, "w", "w", "ir", 2);
11062 return ret;
11063 case 16:
11064 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11065 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11066 ret, "q", "", "er", 16);
11067 if (unlikely(ret))
11068 return ret;
11069 asm("":::"memory");
11070 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11071 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11072 ret, "q", "", "er", 8);
11073 return ret;
11074 default:
11075 - return copy_user_generic((__force void *)dst, src, size);
11076 +
11077 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11078 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11079 + dst += PAX_USER_SHADOW_BASE;
11080 +#endif
11081 +
11082 + return copy_user_generic((__force_kernel void *)dst, src, size);
11083 }
11084 }
11085
11086 static __always_inline __must_check
11087 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11088 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
11089 {
11090 - int ret = 0;
11091 + unsigned ret = 0;
11092
11093 might_fault();
11094 - if (!__builtin_constant_p(size))
11095 - return copy_user_generic((__force void *)dst,
11096 - (__force void *)src, size);
11097 +
11098 + if (size > INT_MAX)
11099 + return size;
11100 +
11101 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11102 + if (!__access_ok(VERIFY_READ, src, size))
11103 + return size;
11104 + if (!__access_ok(VERIFY_WRITE, dst, size))
11105 + return size;
11106 +#endif
11107 +
11108 + if (!__builtin_constant_p(size)) {
11109 +
11110 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11111 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11112 + src += PAX_USER_SHADOW_BASE;
11113 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11114 + dst += PAX_USER_SHADOW_BASE;
11115 +#endif
11116 +
11117 + return copy_user_generic((__force_kernel void *)dst,
11118 + (__force_kernel const void *)src, size);
11119 + }
11120 switch (size) {
11121 case 1: {
11122 u8 tmp;
11123 - __get_user_asm(tmp, (u8 __user *)src,
11124 + __get_user_asm(tmp, (const u8 __user *)src,
11125 ret, "b", "b", "=q", 1);
11126 if (likely(!ret))
11127 __put_user_asm(tmp, (u8 __user *)dst,
11128 @@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11129 }
11130 case 2: {
11131 u16 tmp;
11132 - __get_user_asm(tmp, (u16 __user *)src,
11133 + __get_user_asm(tmp, (const u16 __user *)src,
11134 ret, "w", "w", "=r", 2);
11135 if (likely(!ret))
11136 __put_user_asm(tmp, (u16 __user *)dst,
11137 @@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11138
11139 case 4: {
11140 u32 tmp;
11141 - __get_user_asm(tmp, (u32 __user *)src,
11142 + __get_user_asm(tmp, (const u32 __user *)src,
11143 ret, "l", "k", "=r", 4);
11144 if (likely(!ret))
11145 __put_user_asm(tmp, (u32 __user *)dst,
11146 @@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11147 }
11148 case 8: {
11149 u64 tmp;
11150 - __get_user_asm(tmp, (u64 __user *)src,
11151 + __get_user_asm(tmp, (const u64 __user *)src,
11152 ret, "q", "", "=r", 8);
11153 if (likely(!ret))
11154 __put_user_asm(tmp, (u64 __user *)dst,
11155 @@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11156 return ret;
11157 }
11158 default:
11159 - return copy_user_generic((__force void *)dst,
11160 - (__force void *)src, size);
11161 +
11162 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11163 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11164 + src += PAX_USER_SHADOW_BASE;
11165 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11166 + dst += PAX_USER_SHADOW_BASE;
11167 +#endif
11168 +
11169 + return copy_user_generic((__force_kernel void *)dst,
11170 + (__force_kernel const void *)src, size);
11171 }
11172 }
11173
11174 @@ -219,35 +318,74 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11175 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11176
11177 static __must_check __always_inline int
11178 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11179 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
11180 {
11181 - return copy_user_generic(dst, (__force const void *)src, size);
11182 + pax_track_stack();
11183 +
11184 + if (size > INT_MAX)
11185 + return size;
11186 +
11187 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11188 + if (!__access_ok(VERIFY_READ, src, size))
11189 + return size;
11190 +
11191 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11192 + src += PAX_USER_SHADOW_BASE;
11193 +#endif
11194 +
11195 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11196 }
11197
11198 -static __must_check __always_inline int
11199 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11200 +static __must_check __always_inline unsigned long
11201 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
11202 {
11203 - return copy_user_generic((__force void *)dst, src, size);
11204 + if (size > INT_MAX)
11205 + return size;
11206 +
11207 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11208 + if (!__access_ok(VERIFY_WRITE, dst, size))
11209 + return size;
11210 +
11211 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11212 + dst += PAX_USER_SHADOW_BASE;
11213 +#endif
11214 +
11215 + return copy_user_generic((__force_kernel void *)dst, src, size);
11216 }
11217
11218 -extern long __copy_user_nocache(void *dst, const void __user *src,
11219 - unsigned size, int zerorest);
11220 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11221 + unsigned long size, int zerorest);
11222
11223 -static inline int
11224 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11225 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
11226 {
11227 might_sleep();
11228 +
11229 + if (size > INT_MAX)
11230 + return size;
11231 +
11232 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11233 + if (!__access_ok(VERIFY_READ, src, size))
11234 + return size;
11235 +#endif
11236 +
11237 return __copy_user_nocache(dst, src, size, 1);
11238 }
11239
11240 -static inline int
11241 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11242 - unsigned size)
11243 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11244 + unsigned long size)
11245 {
11246 + if (size > INT_MAX)
11247 + return size;
11248 +
11249 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11250 + if (!__access_ok(VERIFY_READ, src, size))
11251 + return size;
11252 +#endif
11253 +
11254 return __copy_user_nocache(dst, src, size, 0);
11255 }
11256
11257 -unsigned long
11258 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11259 +extern unsigned long
11260 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
11261
11262 #endif /* _ASM_X86_UACCESS_64_H */
11263 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
11264 index bb05228..d763d5b 100644
11265 --- a/arch/x86/include/asm/vdso.h
11266 +++ b/arch/x86/include/asm/vdso.h
11267 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
11268 #define VDSO32_SYMBOL(base, name) \
11269 ({ \
11270 extern const char VDSO32_##name[]; \
11271 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11272 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11273 })
11274 #endif
11275
11276 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
11277 index d3d8590..d296b5f 100644
11278 --- a/arch/x86/include/asm/x86_init.h
11279 +++ b/arch/x86/include/asm/x86_init.h
11280 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
11281 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11282 void (*find_smp_config)(void);
11283 void (*get_smp_config)(unsigned int early);
11284 -};
11285 +} __no_const;
11286
11287 /**
11288 * struct x86_init_resources - platform specific resource related ops
11289 @@ -42,7 +42,7 @@ struct x86_init_resources {
11290 void (*probe_roms)(void);
11291 void (*reserve_resources)(void);
11292 char *(*memory_setup)(void);
11293 -};
11294 +} __no_const;
11295
11296 /**
11297 * struct x86_init_irqs - platform specific interrupt setup
11298 @@ -55,7 +55,7 @@ struct x86_init_irqs {
11299 void (*pre_vector_init)(void);
11300 void (*intr_init)(void);
11301 void (*trap_init)(void);
11302 -};
11303 +} __no_const;
11304
11305 /**
11306 * struct x86_init_oem - oem platform specific customizing functions
11307 @@ -65,7 +65,7 @@ struct x86_init_irqs {
11308 struct x86_init_oem {
11309 void (*arch_setup)(void);
11310 void (*banner)(void);
11311 -};
11312 +} __no_const;
11313
11314 /**
11315 * struct x86_init_mapping - platform specific initial kernel pagetable setup
11316 @@ -76,7 +76,7 @@ struct x86_init_oem {
11317 */
11318 struct x86_init_mapping {
11319 void (*pagetable_reserve)(u64 start, u64 end);
11320 -};
11321 +} __no_const;
11322
11323 /**
11324 * struct x86_init_paging - platform specific paging functions
11325 @@ -86,7 +86,7 @@ struct x86_init_mapping {
11326 struct x86_init_paging {
11327 void (*pagetable_setup_start)(pgd_t *base);
11328 void (*pagetable_setup_done)(pgd_t *base);
11329 -};
11330 +} __no_const;
11331
11332 /**
11333 * struct x86_init_timers - platform specific timer setup
11334 @@ -101,7 +101,7 @@ struct x86_init_timers {
11335 void (*tsc_pre_init)(void);
11336 void (*timer_init)(void);
11337 void (*wallclock_init)(void);
11338 -};
11339 +} __no_const;
11340
11341 /**
11342 * struct x86_init_iommu - platform specific iommu setup
11343 @@ -109,7 +109,7 @@ struct x86_init_timers {
11344 */
11345 struct x86_init_iommu {
11346 int (*iommu_init)(void);
11347 -};
11348 +} __no_const;
11349
11350 /**
11351 * struct x86_init_pci - platform specific pci init functions
11352 @@ -123,7 +123,7 @@ struct x86_init_pci {
11353 int (*init)(void);
11354 void (*init_irq)(void);
11355 void (*fixup_irqs)(void);
11356 -};
11357 +} __no_const;
11358
11359 /**
11360 * struct x86_init_ops - functions for platform specific setup
11361 @@ -139,7 +139,7 @@ struct x86_init_ops {
11362 struct x86_init_timers timers;
11363 struct x86_init_iommu iommu;
11364 struct x86_init_pci pci;
11365 -};
11366 +} __no_const;
11367
11368 /**
11369 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11370 @@ -147,7 +147,7 @@ struct x86_init_ops {
11371 */
11372 struct x86_cpuinit_ops {
11373 void (*setup_percpu_clockev)(void);
11374 -};
11375 +} __no_const;
11376
11377 /**
11378 * struct x86_platform_ops - platform specific runtime functions
11379 @@ -166,7 +166,7 @@ struct x86_platform_ops {
11380 bool (*is_untracked_pat_range)(u64 start, u64 end);
11381 void (*nmi_init)(void);
11382 int (*i8042_detect)(void);
11383 -};
11384 +} __no_const;
11385
11386 struct pci_dev;
11387
11388 @@ -174,7 +174,7 @@ struct x86_msi_ops {
11389 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
11390 void (*teardown_msi_irq)(unsigned int irq);
11391 void (*teardown_msi_irqs)(struct pci_dev *dev);
11392 -};
11393 +} __no_const;
11394
11395 extern struct x86_init_ops x86_init;
11396 extern struct x86_cpuinit_ops x86_cpuinit;
11397 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
11398 index c6ce245..ffbdab7 100644
11399 --- a/arch/x86/include/asm/xsave.h
11400 +++ b/arch/x86/include/asm/xsave.h
11401 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11402 {
11403 int err;
11404
11405 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11406 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11407 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11408 +#endif
11409 +
11410 /*
11411 * Clear the xsave header first, so that reserved fields are
11412 * initialized to zero.
11413 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11414 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
11415 {
11416 int err;
11417 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
11418 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
11419 u32 lmask = mask;
11420 u32 hmask = mask >> 32;
11421
11422 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11423 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11424 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11425 +#endif
11426 +
11427 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11428 "2:\n"
11429 ".section .fixup,\"ax\"\n"
11430 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
11431 index 6a564ac..9b1340c 100644
11432 --- a/arch/x86/kernel/acpi/realmode/Makefile
11433 +++ b/arch/x86/kernel/acpi/realmode/Makefile
11434 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
11435 $(call cc-option, -fno-stack-protector) \
11436 $(call cc-option, -mpreferred-stack-boundary=2)
11437 KBUILD_CFLAGS += $(call cc-option, -m32)
11438 +ifdef CONSTIFY_PLUGIN
11439 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11440 +endif
11441 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11442 GCOV_PROFILE := n
11443
11444 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
11445 index b4fd836..4358fe3 100644
11446 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
11447 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
11448 @@ -108,6 +108,9 @@ wakeup_code:
11449 /* Do any other stuff... */
11450
11451 #ifndef CONFIG_64BIT
11452 + /* Recheck NX bit overrides (64bit path does this in trampoline */
11453 + call verify_cpu
11454 +
11455 /* This could also be done in C code... */
11456 movl pmode_cr3, %eax
11457 movl %eax, %cr3
11458 @@ -131,6 +134,7 @@ wakeup_code:
11459 movl pmode_cr0, %eax
11460 movl %eax, %cr0
11461 jmp pmode_return
11462 +# include "../../verify_cpu.S"
11463 #else
11464 pushw $0
11465 pushw trampoline_segment
11466 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
11467 index 103b6ab..2004d0a 100644
11468 --- a/arch/x86/kernel/acpi/sleep.c
11469 +++ b/arch/x86/kernel/acpi/sleep.c
11470 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
11471 header->trampoline_segment = trampoline_address() >> 4;
11472 #ifdef CONFIG_SMP
11473 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11474 +
11475 + pax_open_kernel();
11476 early_gdt_descr.address =
11477 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11478 + pax_close_kernel();
11479 +
11480 initial_gs = per_cpu_offset(smp_processor_id());
11481 #endif
11482 initial_code = (unsigned long)wakeup_long64;
11483 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
11484 index 13ab720..95d5442 100644
11485 --- a/arch/x86/kernel/acpi/wakeup_32.S
11486 +++ b/arch/x86/kernel/acpi/wakeup_32.S
11487 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11488 # and restore the stack ... but you need gdt for this to work
11489 movl saved_context_esp, %esp
11490
11491 - movl %cs:saved_magic, %eax
11492 - cmpl $0x12345678, %eax
11493 + cmpl $0x12345678, saved_magic
11494 jne bogus_magic
11495
11496 # jump to place where we left off
11497 - movl saved_eip, %eax
11498 - jmp *%eax
11499 + jmp *(saved_eip)
11500
11501 bogus_magic:
11502 jmp bogus_magic
11503 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
11504 index c638228..16dfa8d 100644
11505 --- a/arch/x86/kernel/alternative.c
11506 +++ b/arch/x86/kernel/alternative.c
11507 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
11508 */
11509 for (a = start; a < end; a++) {
11510 instr = (u8 *)&a->instr_offset + a->instr_offset;
11511 +
11512 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11513 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11514 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
11515 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11516 +#endif
11517 +
11518 replacement = (u8 *)&a->repl_offset + a->repl_offset;
11519 BUG_ON(a->replacementlen > a->instrlen);
11520 BUG_ON(a->instrlen > sizeof(insnbuf));
11521 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
11522 for (poff = start; poff < end; poff++) {
11523 u8 *ptr = (u8 *)poff + *poff;
11524
11525 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11526 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11527 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11528 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11529 +#endif
11530 +
11531 if (!*poff || ptr < text || ptr >= text_end)
11532 continue;
11533 /* turn DS segment override prefix into lock prefix */
11534 - if (*ptr == 0x3e)
11535 + if (*ktla_ktva(ptr) == 0x3e)
11536 text_poke(ptr, ((unsigned char []){0xf0}), 1);
11537 };
11538 mutex_unlock(&text_mutex);
11539 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
11540 for (poff = start; poff < end; poff++) {
11541 u8 *ptr = (u8 *)poff + *poff;
11542
11543 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11544 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11545 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11546 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11547 +#endif
11548 +
11549 if (!*poff || ptr < text || ptr >= text_end)
11550 continue;
11551 /* turn lock prefix into DS segment override prefix */
11552 - if (*ptr == 0xf0)
11553 + if (*ktla_ktva(ptr) == 0xf0)
11554 text_poke(ptr, ((unsigned char []){0x3E}), 1);
11555 };
11556 mutex_unlock(&text_mutex);
11557 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
11558
11559 BUG_ON(p->len > MAX_PATCH_LEN);
11560 /* prep the buffer with the original instructions */
11561 - memcpy(insnbuf, p->instr, p->len);
11562 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11563 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11564 (unsigned long)p->instr, p->len);
11565
11566 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
11567 if (smp_alt_once)
11568 free_init_pages("SMP alternatives",
11569 (unsigned long)__smp_locks,
11570 - (unsigned long)__smp_locks_end);
11571 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11572
11573 restart_nmi();
11574 }
11575 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
11576 * instructions. And on the local CPU you need to be protected again NMI or MCE
11577 * handlers seeing an inconsistent instruction while you patch.
11578 */
11579 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
11580 +void *__kprobes text_poke_early(void *addr, const void *opcode,
11581 size_t len)
11582 {
11583 unsigned long flags;
11584 local_irq_save(flags);
11585 - memcpy(addr, opcode, len);
11586 +
11587 + pax_open_kernel();
11588 + memcpy(ktla_ktva(addr), opcode, len);
11589 sync_core();
11590 + pax_close_kernel();
11591 +
11592 local_irq_restore(flags);
11593 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11594 that causes hangs on some VIA CPUs. */
11595 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
11596 */
11597 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11598 {
11599 - unsigned long flags;
11600 - char *vaddr;
11601 + unsigned char *vaddr = ktla_ktva(addr);
11602 struct page *pages[2];
11603 - int i;
11604 + size_t i;
11605
11606 if (!core_kernel_text((unsigned long)addr)) {
11607 - pages[0] = vmalloc_to_page(addr);
11608 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11609 + pages[0] = vmalloc_to_page(vaddr);
11610 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11611 } else {
11612 - pages[0] = virt_to_page(addr);
11613 + pages[0] = virt_to_page(vaddr);
11614 WARN_ON(!PageReserved(pages[0]));
11615 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11616 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11617 }
11618 BUG_ON(!pages[0]);
11619 - local_irq_save(flags);
11620 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11621 - if (pages[1])
11622 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11623 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11624 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11625 - clear_fixmap(FIX_TEXT_POKE0);
11626 - if (pages[1])
11627 - clear_fixmap(FIX_TEXT_POKE1);
11628 - local_flush_tlb();
11629 - sync_core();
11630 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11631 - that causes hangs on some VIA CPUs. */
11632 + text_poke_early(addr, opcode, len);
11633 for (i = 0; i < len; i++)
11634 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11635 - local_irq_restore(flags);
11636 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11637 return addr;
11638 }
11639
11640 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
11641 index 52fa563..5de9d9c 100644
11642 --- a/arch/x86/kernel/apic/apic.c
11643 +++ b/arch/x86/kernel/apic/apic.c
11644 @@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
11645 /*
11646 * Debug level, exported for io_apic.c
11647 */
11648 -unsigned int apic_verbosity;
11649 +int apic_verbosity;
11650
11651 int pic_mode;
11652
11653 @@ -1835,7 +1835,7 @@ void smp_error_interrupt(struct pt_regs *regs)
11654 apic_write(APIC_ESR, 0);
11655 v1 = apic_read(APIC_ESR);
11656 ack_APIC_irq();
11657 - atomic_inc(&irq_err_count);
11658 + atomic_inc_unchecked(&irq_err_count);
11659
11660 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
11661 smp_processor_id(), v0 , v1);
11662 @@ -2209,6 +2209,8 @@ static int __cpuinit apic_cluster_num(void)
11663 u16 *bios_cpu_apicid;
11664 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11665
11666 + pax_track_stack();
11667 +
11668 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11669 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11670
11671 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
11672 index 8eb863e..32e6934 100644
11673 --- a/arch/x86/kernel/apic/io_apic.c
11674 +++ b/arch/x86/kernel/apic/io_apic.c
11675 @@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
11676 }
11677 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11678
11679 -void lock_vector_lock(void)
11680 +void lock_vector_lock(void) __acquires(vector_lock)
11681 {
11682 /* Used to the online set of cpus does not change
11683 * during assign_irq_vector.
11684 @@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
11685 raw_spin_lock(&vector_lock);
11686 }
11687
11688 -void unlock_vector_lock(void)
11689 +void unlock_vector_lock(void) __releases(vector_lock)
11690 {
11691 raw_spin_unlock(&vector_lock);
11692 }
11693 @@ -2405,7 +2405,7 @@ static void ack_apic_edge(struct irq_data *data)
11694 ack_APIC_irq();
11695 }
11696
11697 -atomic_t irq_mis_count;
11698 +atomic_unchecked_t irq_mis_count;
11699
11700 /*
11701 * IO-APIC versions below 0x20 don't support EOI register.
11702 @@ -2513,7 +2513,7 @@ static void ack_apic_level(struct irq_data *data)
11703 * at the cpu.
11704 */
11705 if (!(v & (1 << (i & 0x1f)))) {
11706 - atomic_inc(&irq_mis_count);
11707 + atomic_inc_unchecked(&irq_mis_count);
11708
11709 eoi_ioapic_irq(irq, cfg);
11710 }
11711 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
11712 index 0371c48..54cdf63 100644
11713 --- a/arch/x86/kernel/apm_32.c
11714 +++ b/arch/x86/kernel/apm_32.c
11715 @@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
11716 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11717 * even though they are called in protected mode.
11718 */
11719 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11720 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11721 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11722
11723 static const char driver_version[] = "1.16ac"; /* no spaces */
11724 @@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
11725 BUG_ON(cpu != 0);
11726 gdt = get_cpu_gdt_table(cpu);
11727 save_desc_40 = gdt[0x40 / 8];
11728 +
11729 + pax_open_kernel();
11730 gdt[0x40 / 8] = bad_bios_desc;
11731 + pax_close_kernel();
11732
11733 apm_irq_save(flags);
11734 APM_DO_SAVE_SEGS;
11735 @@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
11736 &call->esi);
11737 APM_DO_RESTORE_SEGS;
11738 apm_irq_restore(flags);
11739 +
11740 + pax_open_kernel();
11741 gdt[0x40 / 8] = save_desc_40;
11742 + pax_close_kernel();
11743 +
11744 put_cpu();
11745
11746 return call->eax & 0xff;
11747 @@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void *_call)
11748 BUG_ON(cpu != 0);
11749 gdt = get_cpu_gdt_table(cpu);
11750 save_desc_40 = gdt[0x40 / 8];
11751 +
11752 + pax_open_kernel();
11753 gdt[0x40 / 8] = bad_bios_desc;
11754 + pax_close_kernel();
11755
11756 apm_irq_save(flags);
11757 APM_DO_SAVE_SEGS;
11758 @@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void *_call)
11759 &call->eax);
11760 APM_DO_RESTORE_SEGS;
11761 apm_irq_restore(flags);
11762 +
11763 + pax_open_kernel();
11764 gdt[0x40 / 8] = save_desc_40;
11765 + pax_close_kernel();
11766 +
11767 put_cpu();
11768 return error;
11769 }
11770 @@ -2349,12 +2363,15 @@ static int __init apm_init(void)
11771 * code to that CPU.
11772 */
11773 gdt = get_cpu_gdt_table(0);
11774 +
11775 + pax_open_kernel();
11776 set_desc_base(&gdt[APM_CS >> 3],
11777 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11778 set_desc_base(&gdt[APM_CS_16 >> 3],
11779 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11780 set_desc_base(&gdt[APM_DS >> 3],
11781 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11782 + pax_close_kernel();
11783
11784 proc_create("apm", 0, NULL, &apm_file_ops);
11785
11786 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
11787 index 4f13faf..87db5d2 100644
11788 --- a/arch/x86/kernel/asm-offsets.c
11789 +++ b/arch/x86/kernel/asm-offsets.c
11790 @@ -33,6 +33,8 @@ void common(void) {
11791 OFFSET(TI_status, thread_info, status);
11792 OFFSET(TI_addr_limit, thread_info, addr_limit);
11793 OFFSET(TI_preempt_count, thread_info, preempt_count);
11794 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11795 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11796
11797 BLANK();
11798 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
11799 @@ -53,8 +55,26 @@ void common(void) {
11800 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11801 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11802 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11803 +
11804 +#ifdef CONFIG_PAX_KERNEXEC
11805 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11806 #endif
11807
11808 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11809 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11810 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11811 +#ifdef CONFIG_X86_64
11812 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
11813 +#endif
11814 +#endif
11815 +
11816 +#endif
11817 +
11818 + BLANK();
11819 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11820 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11821 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11822 +
11823 #ifdef CONFIG_XEN
11824 BLANK();
11825 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11826 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
11827 index e72a119..6e2955d 100644
11828 --- a/arch/x86/kernel/asm-offsets_64.c
11829 +++ b/arch/x86/kernel/asm-offsets_64.c
11830 @@ -69,6 +69,7 @@ int main(void)
11831 BLANK();
11832 #undef ENTRY
11833
11834 + DEFINE(TSS_size, sizeof(struct tss_struct));
11835 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
11836 BLANK();
11837
11838 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
11839 index 6042981..e638266 100644
11840 --- a/arch/x86/kernel/cpu/Makefile
11841 +++ b/arch/x86/kernel/cpu/Makefile
11842 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
11843 CFLAGS_REMOVE_perf_event.o = -pg
11844 endif
11845
11846 -# Make sure load_percpu_segment has no stackprotector
11847 -nostackp := $(call cc-option, -fno-stack-protector)
11848 -CFLAGS_common.o := $(nostackp)
11849 -
11850 obj-y := intel_cacheinfo.o scattered.o topology.o
11851 obj-y += proc.o capflags.o powerflags.o common.o
11852 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
11853 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
11854 index b13ed39..603286c 100644
11855 --- a/arch/x86/kernel/cpu/amd.c
11856 +++ b/arch/x86/kernel/cpu/amd.c
11857 @@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
11858 unsigned int size)
11859 {
11860 /* AMD errata T13 (order #21922) */
11861 - if ((c->x86 == 6)) {
11862 + if (c->x86 == 6) {
11863 /* Duron Rev A0 */
11864 if (c->x86_model == 3 && c->x86_mask == 0)
11865 size = 64;
11866 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
11867 index 6218439..ab2e4ab 100644
11868 --- a/arch/x86/kernel/cpu/common.c
11869 +++ b/arch/x86/kernel/cpu/common.c
11870 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
11871
11872 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11873
11874 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11875 -#ifdef CONFIG_X86_64
11876 - /*
11877 - * We need valid kernel segments for data and code in long mode too
11878 - * IRET will check the segment types kkeil 2000/10/28
11879 - * Also sysret mandates a special GDT layout
11880 - *
11881 - * TLS descriptors are currently at a different place compared to i386.
11882 - * Hopefully nobody expects them at a fixed place (Wine?)
11883 - */
11884 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11885 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11886 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11887 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11888 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11889 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11890 -#else
11891 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11892 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11893 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11894 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11895 - /*
11896 - * Segments used for calling PnP BIOS have byte granularity.
11897 - * They code segments and data segments have fixed 64k limits,
11898 - * the transfer segment sizes are set at run time.
11899 - */
11900 - /* 32-bit code */
11901 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11902 - /* 16-bit code */
11903 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11904 - /* 16-bit data */
11905 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11906 - /* 16-bit data */
11907 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11908 - /* 16-bit data */
11909 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11910 - /*
11911 - * The APM segments have byte granularity and their bases
11912 - * are set at run time. All have 64k limits.
11913 - */
11914 - /* 32-bit code */
11915 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11916 - /* 16-bit code */
11917 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11918 - /* data */
11919 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11920 -
11921 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11922 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11923 - GDT_STACK_CANARY_INIT
11924 -#endif
11925 -} };
11926 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
11927 -
11928 static int __init x86_xsave_setup(char *s)
11929 {
11930 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
11931 @@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
11932 {
11933 struct desc_ptr gdt_descr;
11934
11935 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
11936 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
11937 gdt_descr.size = GDT_SIZE - 1;
11938 load_gdt(&gdt_descr);
11939 /* Reload the per-cpu base */
11940 @@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
11941 /* Filter out anything that depends on CPUID levels we don't have */
11942 filter_cpuid_features(c, true);
11943
11944 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
11945 + setup_clear_cpu_cap(X86_FEATURE_SEP);
11946 +#endif
11947 +
11948 /* If the model name is still unset, do table lookup. */
11949 if (!c->x86_model_id[0]) {
11950 const char *p;
11951 @@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(char *arg)
11952 }
11953 __setup("clearcpuid=", setup_disablecpuid);
11954
11955 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
11956 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
11957 +
11958 #ifdef CONFIG_X86_64
11959 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
11960
11961 @@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
11962 EXPORT_PER_CPU_SYMBOL(current_task);
11963
11964 DEFINE_PER_CPU(unsigned long, kernel_stack) =
11965 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
11966 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
11967 EXPORT_PER_CPU_SYMBOL(kernel_stack);
11968
11969 DEFINE_PER_CPU(char *, irq_stack_ptr) =
11970 @@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
11971 {
11972 memset(regs, 0, sizeof(struct pt_regs));
11973 regs->fs = __KERNEL_PERCPU;
11974 - regs->gs = __KERNEL_STACK_CANARY;
11975 + savesegment(gs, regs->gs);
11976
11977 return regs;
11978 }
11979 @@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
11980 int i;
11981
11982 cpu = stack_smp_processor_id();
11983 - t = &per_cpu(init_tss, cpu);
11984 + t = init_tss + cpu;
11985 oist = &per_cpu(orig_ist, cpu);
11986
11987 #ifdef CONFIG_NUMA
11988 @@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
11989 switch_to_new_gdt(cpu);
11990 loadsegment(fs, 0);
11991
11992 - load_idt((const struct desc_ptr *)&idt_descr);
11993 + load_idt(&idt_descr);
11994
11995 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
11996 syscall_init();
11997 @@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
11998 wrmsrl(MSR_KERNEL_GS_BASE, 0);
11999 barrier();
12000
12001 - x86_configure_nx();
12002 if (cpu != 0)
12003 enable_x2apic();
12004
12005 @@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
12006 {
12007 int cpu = smp_processor_id();
12008 struct task_struct *curr = current;
12009 - struct tss_struct *t = &per_cpu(init_tss, cpu);
12010 + struct tss_struct *t = init_tss + cpu;
12011 struct thread_struct *thread = &curr->thread;
12012
12013 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12014 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
12015 index ed6086e..a1dcf29 100644
12016 --- a/arch/x86/kernel/cpu/intel.c
12017 +++ b/arch/x86/kernel/cpu/intel.c
12018 @@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug(void)
12019 * Update the IDT descriptor and reload the IDT so that
12020 * it uses the read-only mapped virtual address.
12021 */
12022 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12023 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12024 load_idt(&idt_descr);
12025 }
12026 #endif
12027 diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
12028 index 0ed633c..82cef2a 100644
12029 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
12030 +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
12031 @@ -215,7 +215,9 @@ static int inject_init(void)
12032 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
12033 return -ENOMEM;
12034 printk(KERN_INFO "Machine check injector initialized\n");
12035 - mce_chrdev_ops.write = mce_write;
12036 + pax_open_kernel();
12037 + *(void **)&mce_chrdev_ops.write = mce_write;
12038 + pax_close_kernel();
12039 register_die_notifier(&mce_raise_nb);
12040 return 0;
12041 }
12042 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
12043 index 08363b0..ee26113 100644
12044 --- a/arch/x86/kernel/cpu/mcheck/mce.c
12045 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
12046 @@ -42,6 +42,7 @@
12047 #include <asm/processor.h>
12048 #include <asm/mce.h>
12049 #include <asm/msr.h>
12050 +#include <asm/local.h>
12051
12052 #include "mce-internal.h"
12053
12054 @@ -205,7 +206,7 @@ static void print_mce(struct mce *m)
12055 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12056 m->cs, m->ip);
12057
12058 - if (m->cs == __KERNEL_CS)
12059 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12060 print_symbol("{%s}", m->ip);
12061 pr_cont("\n");
12062 }
12063 @@ -233,10 +234,10 @@ static void print_mce(struct mce *m)
12064
12065 #define PANIC_TIMEOUT 5 /* 5 seconds */
12066
12067 -static atomic_t mce_paniced;
12068 +static atomic_unchecked_t mce_paniced;
12069
12070 static int fake_panic;
12071 -static atomic_t mce_fake_paniced;
12072 +static atomic_unchecked_t mce_fake_paniced;
12073
12074 /* Panic in progress. Enable interrupts and wait for final IPI */
12075 static void wait_for_panic(void)
12076 @@ -260,7 +261,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12077 /*
12078 * Make sure only one CPU runs in machine check panic
12079 */
12080 - if (atomic_inc_return(&mce_paniced) > 1)
12081 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12082 wait_for_panic();
12083 barrier();
12084
12085 @@ -268,7 +269,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12086 console_verbose();
12087 } else {
12088 /* Don't log too much for fake panic */
12089 - if (atomic_inc_return(&mce_fake_paniced) > 1)
12090 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12091 return;
12092 }
12093 /* First print corrected ones that are still unlogged */
12094 @@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
12095 * might have been modified by someone else.
12096 */
12097 rmb();
12098 - if (atomic_read(&mce_paniced))
12099 + if (atomic_read_unchecked(&mce_paniced))
12100 wait_for_panic();
12101 if (!monarch_timeout)
12102 goto out;
12103 @@ -1392,7 +1393,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
12104 }
12105
12106 /* Call the installed machine check handler for this CPU setup. */
12107 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
12108 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
12109 unexpected_machine_check;
12110
12111 /*
12112 @@ -1415,7 +1416,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12113 return;
12114 }
12115
12116 + pax_open_kernel();
12117 machine_check_vector = do_machine_check;
12118 + pax_close_kernel();
12119
12120 __mcheck_cpu_init_generic();
12121 __mcheck_cpu_init_vendor(c);
12122 @@ -1429,7 +1432,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12123 */
12124
12125 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
12126 -static int mce_chrdev_open_count; /* #times opened */
12127 +static local_t mce_chrdev_open_count; /* #times opened */
12128 static int mce_chrdev_open_exclu; /* already open exclusive? */
12129
12130 static int mce_chrdev_open(struct inode *inode, struct file *file)
12131 @@ -1437,7 +1440,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12132 spin_lock(&mce_chrdev_state_lock);
12133
12134 if (mce_chrdev_open_exclu ||
12135 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
12136 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
12137 spin_unlock(&mce_chrdev_state_lock);
12138
12139 return -EBUSY;
12140 @@ -1445,7 +1448,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12141
12142 if (file->f_flags & O_EXCL)
12143 mce_chrdev_open_exclu = 1;
12144 - mce_chrdev_open_count++;
12145 + local_inc(&mce_chrdev_open_count);
12146
12147 spin_unlock(&mce_chrdev_state_lock);
12148
12149 @@ -1456,7 +1459,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
12150 {
12151 spin_lock(&mce_chrdev_state_lock);
12152
12153 - mce_chrdev_open_count--;
12154 + local_dec(&mce_chrdev_open_count);
12155 mce_chrdev_open_exclu = 0;
12156
12157 spin_unlock(&mce_chrdev_state_lock);
12158 @@ -2147,7 +2150,7 @@ struct dentry *mce_get_debugfs_dir(void)
12159 static void mce_reset(void)
12160 {
12161 cpu_missing = 0;
12162 - atomic_set(&mce_fake_paniced, 0);
12163 + atomic_set_unchecked(&mce_fake_paniced, 0);
12164 atomic_set(&mce_executing, 0);
12165 atomic_set(&mce_callin, 0);
12166 atomic_set(&global_nwo, 0);
12167 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
12168 index 5c0e653..1e82c7c 100644
12169 --- a/arch/x86/kernel/cpu/mcheck/p5.c
12170 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
12171 @@ -50,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
12172 if (!cpu_has(c, X86_FEATURE_MCE))
12173 return;
12174
12175 + pax_open_kernel();
12176 machine_check_vector = pentium_machine_check;
12177 + pax_close_kernel();
12178 /* Make sure the vector pointer is visible before we enable MCEs: */
12179 wmb();
12180
12181 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
12182 index 54060f5..e6ba93d 100644
12183 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
12184 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
12185 @@ -24,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
12186 {
12187 u32 lo, hi;
12188
12189 + pax_open_kernel();
12190 machine_check_vector = winchip_machine_check;
12191 + pax_close_kernel();
12192 /* Make sure the vector pointer is visible before we enable MCEs: */
12193 wmb();
12194
12195 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
12196 index 6b96110..0da73eb 100644
12197 --- a/arch/x86/kernel/cpu/mtrr/main.c
12198 +++ b/arch/x86/kernel/cpu/mtrr/main.c
12199 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
12200 u64 size_or_mask, size_and_mask;
12201 static bool mtrr_aps_delayed_init;
12202
12203 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12204 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12205
12206 const struct mtrr_ops *mtrr_if;
12207
12208 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
12209 index df5e41f..816c719 100644
12210 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
12211 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
12212 @@ -25,7 +25,7 @@ struct mtrr_ops {
12213 int (*validate_add_page)(unsigned long base, unsigned long size,
12214 unsigned int type);
12215 int (*have_wrcomb)(void);
12216 -};
12217 +} __do_const;
12218
12219 extern int generic_get_free_region(unsigned long base, unsigned long size,
12220 int replace_reg);
12221 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
12222 index cfa62ec..9250dd7 100644
12223 --- a/arch/x86/kernel/cpu/perf_event.c
12224 +++ b/arch/x86/kernel/cpu/perf_event.c
12225 @@ -795,6 +795,8 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
12226 int i, j, w, wmax, num = 0;
12227 struct hw_perf_event *hwc;
12228
12229 + pax_track_stack();
12230 +
12231 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
12232
12233 for (i = 0; i < n; i++) {
12234 @@ -1919,7 +1921,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
12235 break;
12236
12237 perf_callchain_store(entry, frame.return_address);
12238 - fp = frame.next_frame;
12239 + fp = (const void __force_user *)frame.next_frame;
12240 }
12241 }
12242
12243 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
12244 index 764c7c2..c5d9c7b 100644
12245 --- a/arch/x86/kernel/crash.c
12246 +++ b/arch/x86/kernel/crash.c
12247 @@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
12248 regs = args->regs;
12249
12250 #ifdef CONFIG_X86_32
12251 - if (!user_mode_vm(regs)) {
12252 + if (!user_mode(regs)) {
12253 crash_fixup_ss_esp(&fixed_regs, regs);
12254 regs = &fixed_regs;
12255 }
12256 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
12257 index 37250fe..bf2ec74 100644
12258 --- a/arch/x86/kernel/doublefault_32.c
12259 +++ b/arch/x86/kernel/doublefault_32.c
12260 @@ -11,7 +11,7 @@
12261
12262 #define DOUBLEFAULT_STACKSIZE (1024)
12263 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12264 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12265 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12266
12267 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12268
12269 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12270 unsigned long gdt, tss;
12271
12272 store_gdt(&gdt_desc);
12273 - gdt = gdt_desc.address;
12274 + gdt = (unsigned long)gdt_desc.address;
12275
12276 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12277
12278 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
12279 /* 0x2 bit is always set */
12280 .flags = X86_EFLAGS_SF | 0x2,
12281 .sp = STACK_START,
12282 - .es = __USER_DS,
12283 + .es = __KERNEL_DS,
12284 .cs = __KERNEL_CS,
12285 .ss = __KERNEL_DS,
12286 - .ds = __USER_DS,
12287 + .ds = __KERNEL_DS,
12288 .fs = __KERNEL_PERCPU,
12289
12290 .__cr3 = __pa_nodebug(swapper_pg_dir),
12291 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
12292 index 1aae78f..aab3a3d 100644
12293 --- a/arch/x86/kernel/dumpstack.c
12294 +++ b/arch/x86/kernel/dumpstack.c
12295 @@ -2,6 +2,9 @@
12296 * Copyright (C) 1991, 1992 Linus Torvalds
12297 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12298 */
12299 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12300 +#define __INCLUDED_BY_HIDESYM 1
12301 +#endif
12302 #include <linux/kallsyms.h>
12303 #include <linux/kprobes.h>
12304 #include <linux/uaccess.h>
12305 @@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
12306 static void
12307 print_ftrace_graph_addr(unsigned long addr, void *data,
12308 const struct stacktrace_ops *ops,
12309 - struct thread_info *tinfo, int *graph)
12310 + struct task_struct *task, int *graph)
12311 {
12312 - struct task_struct *task = tinfo->task;
12313 unsigned long ret_addr;
12314 int index = task->curr_ret_stack;
12315
12316 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12317 static inline void
12318 print_ftrace_graph_addr(unsigned long addr, void *data,
12319 const struct stacktrace_ops *ops,
12320 - struct thread_info *tinfo, int *graph)
12321 + struct task_struct *task, int *graph)
12322 { }
12323 #endif
12324
12325 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12326 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12327 */
12328
12329 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12330 - void *p, unsigned int size, void *end)
12331 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12332 {
12333 - void *t = tinfo;
12334 if (end) {
12335 if (p < end && p >= (end-THREAD_SIZE))
12336 return 1;
12337 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
12338 }
12339
12340 unsigned long
12341 -print_context_stack(struct thread_info *tinfo,
12342 +print_context_stack(struct task_struct *task, void *stack_start,
12343 unsigned long *stack, unsigned long bp,
12344 const struct stacktrace_ops *ops, void *data,
12345 unsigned long *end, int *graph)
12346 {
12347 struct stack_frame *frame = (struct stack_frame *)bp;
12348
12349 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12350 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12351 unsigned long addr;
12352
12353 addr = *stack;
12354 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
12355 } else {
12356 ops->address(data, addr, 0);
12357 }
12358 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12359 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12360 }
12361 stack++;
12362 }
12363 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
12364 EXPORT_SYMBOL_GPL(print_context_stack);
12365
12366 unsigned long
12367 -print_context_stack_bp(struct thread_info *tinfo,
12368 +print_context_stack_bp(struct task_struct *task, void *stack_start,
12369 unsigned long *stack, unsigned long bp,
12370 const struct stacktrace_ops *ops, void *data,
12371 unsigned long *end, int *graph)
12372 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12373 struct stack_frame *frame = (struct stack_frame *)bp;
12374 unsigned long *ret_addr = &frame->return_address;
12375
12376 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
12377 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
12378 unsigned long addr = *ret_addr;
12379
12380 if (!__kernel_text_address(addr))
12381 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12382 ops->address(data, addr, 1);
12383 frame = frame->next_frame;
12384 ret_addr = &frame->return_address;
12385 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12386 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12387 }
12388
12389 return (unsigned long)frame;
12390 @@ -186,7 +186,7 @@ void dump_stack(void)
12391
12392 bp = stack_frame(current, NULL);
12393 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12394 - current->pid, current->comm, print_tainted(),
12395 + task_pid_nr(current), current->comm, print_tainted(),
12396 init_utsname()->release,
12397 (int)strcspn(init_utsname()->version, " "),
12398 init_utsname()->version);
12399 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
12400 }
12401 EXPORT_SYMBOL_GPL(oops_begin);
12402
12403 +extern void gr_handle_kernel_exploit(void);
12404 +
12405 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12406 {
12407 if (regs && kexec_should_crash(current))
12408 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12409 panic("Fatal exception in interrupt");
12410 if (panic_on_oops)
12411 panic("Fatal exception");
12412 - do_exit(signr);
12413 +
12414 + gr_handle_kernel_exploit();
12415 +
12416 + do_group_exit(signr);
12417 }
12418
12419 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12420 @@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12421
12422 show_registers(regs);
12423 #ifdef CONFIG_X86_32
12424 - if (user_mode_vm(regs)) {
12425 + if (user_mode(regs)) {
12426 sp = regs->sp;
12427 ss = regs->ss & 0xffff;
12428 } else {
12429 @@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
12430 unsigned long flags = oops_begin();
12431 int sig = SIGSEGV;
12432
12433 - if (!user_mode_vm(regs))
12434 + if (!user_mode(regs))
12435 report_bug(regs->ip, regs);
12436
12437 if (__die(str, regs, err))
12438 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
12439 index 3b97a80..667ce7a 100644
12440 --- a/arch/x86/kernel/dumpstack_32.c
12441 +++ b/arch/x86/kernel/dumpstack_32.c
12442 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12443 bp = stack_frame(task, regs);
12444
12445 for (;;) {
12446 - struct thread_info *context;
12447 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12448
12449 - context = (struct thread_info *)
12450 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12451 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
12452 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12453
12454 - stack = (unsigned long *)context->previous_esp;
12455 - if (!stack)
12456 + if (stack_start == task_stack_page(task))
12457 break;
12458 + stack = *(unsigned long **)stack_start;
12459 if (ops->stack(data, "IRQ") < 0)
12460 break;
12461 touch_nmi_watchdog();
12462 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
12463 * When in-kernel, we also print out the stack and code at the
12464 * time of the fault..
12465 */
12466 - if (!user_mode_vm(regs)) {
12467 + if (!user_mode(regs)) {
12468 unsigned int code_prologue = code_bytes * 43 / 64;
12469 unsigned int code_len = code_bytes;
12470 unsigned char c;
12471 u8 *ip;
12472 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12473
12474 printk(KERN_EMERG "Stack:\n");
12475 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
12476
12477 printk(KERN_EMERG "Code: ");
12478
12479 - ip = (u8 *)regs->ip - code_prologue;
12480 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12481 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12482 /* try starting at IP */
12483 - ip = (u8 *)regs->ip;
12484 + ip = (u8 *)regs->ip + cs_base;
12485 code_len = code_len - code_prologue + 1;
12486 }
12487 for (i = 0; i < code_len; i++, ip++) {
12488 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
12489 printk(" Bad EIP value.");
12490 break;
12491 }
12492 - if (ip == (u8 *)regs->ip)
12493 + if (ip == (u8 *)regs->ip + cs_base)
12494 printk("<%02x> ", c);
12495 else
12496 printk("%02x ", c);
12497 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
12498 {
12499 unsigned short ud2;
12500
12501 + ip = ktla_ktva(ip);
12502 if (ip < PAGE_OFFSET)
12503 return 0;
12504 if (probe_kernel_address((unsigned short *)ip, ud2))
12505 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
12506
12507 return ud2 == 0x0b0f;
12508 }
12509 +
12510 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12511 +void pax_check_alloca(unsigned long size)
12512 +{
12513 + unsigned long sp = (unsigned long)&sp, stack_left;
12514 +
12515 + /* all kernel stacks are of the same size */
12516 + stack_left = sp & (THREAD_SIZE - 1);
12517 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12518 +}
12519 +EXPORT_SYMBOL(pax_check_alloca);
12520 +#endif
12521 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
12522 index 19853ad..508ca79 100644
12523 --- a/arch/x86/kernel/dumpstack_64.c
12524 +++ b/arch/x86/kernel/dumpstack_64.c
12525 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12526 unsigned long *irq_stack_end =
12527 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12528 unsigned used = 0;
12529 - struct thread_info *tinfo;
12530 int graph = 0;
12531 unsigned long dummy;
12532 + void *stack_start;
12533
12534 if (!task)
12535 task = current;
12536 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12537 * current stack address. If the stacks consist of nested
12538 * exceptions
12539 */
12540 - tinfo = task_thread_info(task);
12541 for (;;) {
12542 char *id;
12543 unsigned long *estack_end;
12544 +
12545 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12546 &used, &id);
12547
12548 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12549 if (ops->stack(data, id) < 0)
12550 break;
12551
12552 - bp = ops->walk_stack(tinfo, stack, bp, ops,
12553 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12554 data, estack_end, &graph);
12555 ops->stack(data, "<EOE>");
12556 /*
12557 @@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12558 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
12559 if (ops->stack(data, "IRQ") < 0)
12560 break;
12561 - bp = ops->walk_stack(tinfo, stack, bp,
12562 + bp = ops->walk_stack(task, irq_stack, stack, bp,
12563 ops, data, irq_stack_end, &graph);
12564 /*
12565 * We link to the next stack (which would be
12566 @@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12567 /*
12568 * This handles the process stack:
12569 */
12570 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12571 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12572 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12573 put_cpu();
12574 }
12575 EXPORT_SYMBOL(dump_trace);
12576 @@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
12577
12578 return ud2 == 0x0b0f;
12579 }
12580 +
12581 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12582 +void pax_check_alloca(unsigned long size)
12583 +{
12584 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
12585 + unsigned cpu, used;
12586 + char *id;
12587 +
12588 + /* check the process stack first */
12589 + stack_start = (unsigned long)task_stack_page(current);
12590 + stack_end = stack_start + THREAD_SIZE;
12591 + if (likely(stack_start <= sp && sp < stack_end)) {
12592 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
12593 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12594 + return;
12595 + }
12596 +
12597 + cpu = get_cpu();
12598 +
12599 + /* check the irq stacks */
12600 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
12601 + stack_start = stack_end - IRQ_STACK_SIZE;
12602 + if (stack_start <= sp && sp < stack_end) {
12603 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
12604 + put_cpu();
12605 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12606 + return;
12607 + }
12608 +
12609 + /* check the exception stacks */
12610 + used = 0;
12611 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
12612 + stack_start = stack_end - EXCEPTION_STKSZ;
12613 + if (stack_end && stack_start <= sp && sp < stack_end) {
12614 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
12615 + put_cpu();
12616 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12617 + return;
12618 + }
12619 +
12620 + put_cpu();
12621 +
12622 + /* unknown stack */
12623 + BUG();
12624 +}
12625 +EXPORT_SYMBOL(pax_check_alloca);
12626 +#endif
12627 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
12628 index cd28a35..2601699 100644
12629 --- a/arch/x86/kernel/early_printk.c
12630 +++ b/arch/x86/kernel/early_printk.c
12631 @@ -7,6 +7,7 @@
12632 #include <linux/pci_regs.h>
12633 #include <linux/pci_ids.h>
12634 #include <linux/errno.h>
12635 +#include <linux/sched.h>
12636 #include <asm/io.h>
12637 #include <asm/processor.h>
12638 #include <asm/fcntl.h>
12639 @@ -179,6 +180,8 @@ asmlinkage void early_printk(const char *fmt, ...)
12640 int n;
12641 va_list ap;
12642
12643 + pax_track_stack();
12644 +
12645 va_start(ap, fmt);
12646 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12647 early_console->write(early_console, buf, n);
12648 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
12649 index f3f6f53..0841b66 100644
12650 --- a/arch/x86/kernel/entry_32.S
12651 +++ b/arch/x86/kernel/entry_32.S
12652 @@ -186,13 +186,146 @@
12653 /*CFI_REL_OFFSET gs, PT_GS*/
12654 .endm
12655 .macro SET_KERNEL_GS reg
12656 +
12657 +#ifdef CONFIG_CC_STACKPROTECTOR
12658 movl $(__KERNEL_STACK_CANARY), \reg
12659 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12660 + movl $(__USER_DS), \reg
12661 +#else
12662 + xorl \reg, \reg
12663 +#endif
12664 +
12665 movl \reg, %gs
12666 .endm
12667
12668 #endif /* CONFIG_X86_32_LAZY_GS */
12669
12670 -.macro SAVE_ALL
12671 +.macro pax_enter_kernel
12672 +#ifdef CONFIG_PAX_KERNEXEC
12673 + call pax_enter_kernel
12674 +#endif
12675 +.endm
12676 +
12677 +.macro pax_exit_kernel
12678 +#ifdef CONFIG_PAX_KERNEXEC
12679 + call pax_exit_kernel
12680 +#endif
12681 +.endm
12682 +
12683 +#ifdef CONFIG_PAX_KERNEXEC
12684 +ENTRY(pax_enter_kernel)
12685 +#ifdef CONFIG_PARAVIRT
12686 + pushl %eax
12687 + pushl %ecx
12688 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12689 + mov %eax, %esi
12690 +#else
12691 + mov %cr0, %esi
12692 +#endif
12693 + bts $16, %esi
12694 + jnc 1f
12695 + mov %cs, %esi
12696 + cmp $__KERNEL_CS, %esi
12697 + jz 3f
12698 + ljmp $__KERNEL_CS, $3f
12699 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12700 +2:
12701 +#ifdef CONFIG_PARAVIRT
12702 + mov %esi, %eax
12703 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12704 +#else
12705 + mov %esi, %cr0
12706 +#endif
12707 +3:
12708 +#ifdef CONFIG_PARAVIRT
12709 + popl %ecx
12710 + popl %eax
12711 +#endif
12712 + ret
12713 +ENDPROC(pax_enter_kernel)
12714 +
12715 +ENTRY(pax_exit_kernel)
12716 +#ifdef CONFIG_PARAVIRT
12717 + pushl %eax
12718 + pushl %ecx
12719 +#endif
12720 + mov %cs, %esi
12721 + cmp $__KERNEXEC_KERNEL_CS, %esi
12722 + jnz 2f
12723 +#ifdef CONFIG_PARAVIRT
12724 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12725 + mov %eax, %esi
12726 +#else
12727 + mov %cr0, %esi
12728 +#endif
12729 + btr $16, %esi
12730 + ljmp $__KERNEL_CS, $1f
12731 +1:
12732 +#ifdef CONFIG_PARAVIRT
12733 + mov %esi, %eax
12734 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12735 +#else
12736 + mov %esi, %cr0
12737 +#endif
12738 +2:
12739 +#ifdef CONFIG_PARAVIRT
12740 + popl %ecx
12741 + popl %eax
12742 +#endif
12743 + ret
12744 +ENDPROC(pax_exit_kernel)
12745 +#endif
12746 +
12747 +.macro pax_erase_kstack
12748 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12749 + call pax_erase_kstack
12750 +#endif
12751 +.endm
12752 +
12753 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12754 +/*
12755 + * ebp: thread_info
12756 + * ecx, edx: can be clobbered
12757 + */
12758 +ENTRY(pax_erase_kstack)
12759 + pushl %edi
12760 + pushl %eax
12761 +
12762 + mov TI_lowest_stack(%ebp), %edi
12763 + mov $-0xBEEF, %eax
12764 + std
12765 +
12766 +1: mov %edi, %ecx
12767 + and $THREAD_SIZE_asm - 1, %ecx
12768 + shr $2, %ecx
12769 + repne scasl
12770 + jecxz 2f
12771 +
12772 + cmp $2*16, %ecx
12773 + jc 2f
12774 +
12775 + mov $2*16, %ecx
12776 + repe scasl
12777 + jecxz 2f
12778 + jne 1b
12779 +
12780 +2: cld
12781 + mov %esp, %ecx
12782 + sub %edi, %ecx
12783 + shr $2, %ecx
12784 + rep stosl
12785 +
12786 + mov TI_task_thread_sp0(%ebp), %edi
12787 + sub $128, %edi
12788 + mov %edi, TI_lowest_stack(%ebp)
12789 +
12790 + popl %eax
12791 + popl %edi
12792 + ret
12793 +ENDPROC(pax_erase_kstack)
12794 +#endif
12795 +
12796 +.macro __SAVE_ALL _DS
12797 cld
12798 PUSH_GS
12799 pushl_cfi %fs
12800 @@ -215,7 +348,7 @@
12801 CFI_REL_OFFSET ecx, 0
12802 pushl_cfi %ebx
12803 CFI_REL_OFFSET ebx, 0
12804 - movl $(__USER_DS), %edx
12805 + movl $\_DS, %edx
12806 movl %edx, %ds
12807 movl %edx, %es
12808 movl $(__KERNEL_PERCPU), %edx
12809 @@ -223,6 +356,15 @@
12810 SET_KERNEL_GS %edx
12811 .endm
12812
12813 +.macro SAVE_ALL
12814 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
12815 + __SAVE_ALL __KERNEL_DS
12816 + pax_enter_kernel
12817 +#else
12818 + __SAVE_ALL __USER_DS
12819 +#endif
12820 +.endm
12821 +
12822 .macro RESTORE_INT_REGS
12823 popl_cfi %ebx
12824 CFI_RESTORE ebx
12825 @@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
12826 popfl_cfi
12827 jmp syscall_exit
12828 CFI_ENDPROC
12829 -END(ret_from_fork)
12830 +ENDPROC(ret_from_fork)
12831
12832 /*
12833 * Interrupt exit functions should be protected against kprobes
12834 @@ -333,7 +475,15 @@ check_userspace:
12835 movb PT_CS(%esp), %al
12836 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
12837 cmpl $USER_RPL, %eax
12838 +
12839 +#ifdef CONFIG_PAX_KERNEXEC
12840 + jae resume_userspace
12841 +
12842 + PAX_EXIT_KERNEL
12843 + jmp resume_kernel
12844 +#else
12845 jb resume_kernel # not returning to v8086 or userspace
12846 +#endif
12847
12848 ENTRY(resume_userspace)
12849 LOCKDEP_SYS_EXIT
12850 @@ -345,8 +495,8 @@ ENTRY(resume_userspace)
12851 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
12852 # int/exception return?
12853 jne work_pending
12854 - jmp restore_all
12855 -END(ret_from_exception)
12856 + jmp restore_all_pax
12857 +ENDPROC(ret_from_exception)
12858
12859 #ifdef CONFIG_PREEMPT
12860 ENTRY(resume_kernel)
12861 @@ -361,7 +511,7 @@ need_resched:
12862 jz restore_all
12863 call preempt_schedule_irq
12864 jmp need_resched
12865 -END(resume_kernel)
12866 +ENDPROC(resume_kernel)
12867 #endif
12868 CFI_ENDPROC
12869 /*
12870 @@ -395,23 +545,34 @@ sysenter_past_esp:
12871 /*CFI_REL_OFFSET cs, 0*/
12872 /*
12873 * Push current_thread_info()->sysenter_return to the stack.
12874 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
12875 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
12876 */
12877 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
12878 + pushl_cfi $0
12879 CFI_REL_OFFSET eip, 0
12880
12881 pushl_cfi %eax
12882 SAVE_ALL
12883 + GET_THREAD_INFO(%ebp)
12884 + movl TI_sysenter_return(%ebp),%ebp
12885 + movl %ebp,PT_EIP(%esp)
12886 ENABLE_INTERRUPTS(CLBR_NONE)
12887
12888 /*
12889 * Load the potential sixth argument from user stack.
12890 * Careful about security.
12891 */
12892 + movl PT_OLDESP(%esp),%ebp
12893 +
12894 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12895 + mov PT_OLDSS(%esp),%ds
12896 +1: movl %ds:(%ebp),%ebp
12897 + push %ss
12898 + pop %ds
12899 +#else
12900 cmpl $__PAGE_OFFSET-3,%ebp
12901 jae syscall_fault
12902 1: movl (%ebp),%ebp
12903 +#endif
12904 +
12905 movl %ebp,PT_EBP(%esp)
12906 .section __ex_table,"a"
12907 .align 4
12908 @@ -434,12 +595,24 @@ sysenter_do_call:
12909 testl $_TIF_ALLWORK_MASK, %ecx
12910 jne sysexit_audit
12911 sysenter_exit:
12912 +
12913 +#ifdef CONFIG_PAX_RANDKSTACK
12914 + pushl_cfi %eax
12915 + movl %esp, %eax
12916 + call pax_randomize_kstack
12917 + popl_cfi %eax
12918 +#endif
12919 +
12920 + pax_erase_kstack
12921 +
12922 /* if something modifies registers it must also disable sysexit */
12923 movl PT_EIP(%esp), %edx
12924 movl PT_OLDESP(%esp), %ecx
12925 xorl %ebp,%ebp
12926 TRACE_IRQS_ON
12927 1: mov PT_FS(%esp), %fs
12928 +2: mov PT_DS(%esp), %ds
12929 +3: mov PT_ES(%esp), %es
12930 PTGS_TO_GS
12931 ENABLE_INTERRUPTS_SYSEXIT
12932
12933 @@ -456,6 +629,9 @@ sysenter_audit:
12934 movl %eax,%edx /* 2nd arg: syscall number */
12935 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
12936 call audit_syscall_entry
12937 +
12938 + pax_erase_kstack
12939 +
12940 pushl_cfi %ebx
12941 movl PT_EAX(%esp),%eax /* reload syscall number */
12942 jmp sysenter_do_call
12943 @@ -482,11 +658,17 @@ sysexit_audit:
12944
12945 CFI_ENDPROC
12946 .pushsection .fixup,"ax"
12947 -2: movl $0,PT_FS(%esp)
12948 +4: movl $0,PT_FS(%esp)
12949 + jmp 1b
12950 +5: movl $0,PT_DS(%esp)
12951 + jmp 1b
12952 +6: movl $0,PT_ES(%esp)
12953 jmp 1b
12954 .section __ex_table,"a"
12955 .align 4
12956 - .long 1b,2b
12957 + .long 1b,4b
12958 + .long 2b,5b
12959 + .long 3b,6b
12960 .popsection
12961 PTGS_TO_GS_EX
12962 ENDPROC(ia32_sysenter_target)
12963 @@ -519,6 +701,15 @@ syscall_exit:
12964 testl $_TIF_ALLWORK_MASK, %ecx # current->work
12965 jne syscall_exit_work
12966
12967 +restore_all_pax:
12968 +
12969 +#ifdef CONFIG_PAX_RANDKSTACK
12970 + movl %esp, %eax
12971 + call pax_randomize_kstack
12972 +#endif
12973 +
12974 + pax_erase_kstack
12975 +
12976 restore_all:
12977 TRACE_IRQS_IRET
12978 restore_all_notrace:
12979 @@ -578,14 +769,34 @@ ldt_ss:
12980 * compensating for the offset by changing to the ESPFIX segment with
12981 * a base address that matches for the difference.
12982 */
12983 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
12984 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
12985 mov %esp, %edx /* load kernel esp */
12986 mov PT_OLDESP(%esp), %eax /* load userspace esp */
12987 mov %dx, %ax /* eax: new kernel esp */
12988 sub %eax, %edx /* offset (low word is 0) */
12989 +#ifdef CONFIG_SMP
12990 + movl PER_CPU_VAR(cpu_number), %ebx
12991 + shll $PAGE_SHIFT_asm, %ebx
12992 + addl $cpu_gdt_table, %ebx
12993 +#else
12994 + movl $cpu_gdt_table, %ebx
12995 +#endif
12996 shr $16, %edx
12997 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
12998 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
12999 +
13000 +#ifdef CONFIG_PAX_KERNEXEC
13001 + mov %cr0, %esi
13002 + btr $16, %esi
13003 + mov %esi, %cr0
13004 +#endif
13005 +
13006 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
13007 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
13008 +
13009 +#ifdef CONFIG_PAX_KERNEXEC
13010 + bts $16, %esi
13011 + mov %esi, %cr0
13012 +#endif
13013 +
13014 pushl_cfi $__ESPFIX_SS
13015 pushl_cfi %eax /* new kernel esp */
13016 /* Disable interrupts, but do not irqtrace this section: we
13017 @@ -614,34 +825,28 @@ work_resched:
13018 movl TI_flags(%ebp), %ecx
13019 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13020 # than syscall tracing?
13021 - jz restore_all
13022 + jz restore_all_pax
13023 testb $_TIF_NEED_RESCHED, %cl
13024 jnz work_resched
13025
13026 work_notifysig: # deal with pending signals and
13027 # notify-resume requests
13028 + movl %esp, %eax
13029 #ifdef CONFIG_VM86
13030 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13031 - movl %esp, %eax
13032 - jne work_notifysig_v86 # returning to kernel-space or
13033 + jz 1f # returning to kernel-space or
13034 # vm86-space
13035 - xorl %edx, %edx
13036 - call do_notify_resume
13037 - jmp resume_userspace_sig
13038
13039 - ALIGN
13040 -work_notifysig_v86:
13041 pushl_cfi %ecx # save ti_flags for do_notify_resume
13042 call save_v86_state # %eax contains pt_regs pointer
13043 popl_cfi %ecx
13044 movl %eax, %esp
13045 -#else
13046 - movl %esp, %eax
13047 +1:
13048 #endif
13049 xorl %edx, %edx
13050 call do_notify_resume
13051 jmp resume_userspace_sig
13052 -END(work_pending)
13053 +ENDPROC(work_pending)
13054
13055 # perform syscall exit tracing
13056 ALIGN
13057 @@ -649,11 +854,14 @@ syscall_trace_entry:
13058 movl $-ENOSYS,PT_EAX(%esp)
13059 movl %esp, %eax
13060 call syscall_trace_enter
13061 +
13062 + pax_erase_kstack
13063 +
13064 /* What it returned is what we'll actually use. */
13065 cmpl $(nr_syscalls), %eax
13066 jnae syscall_call
13067 jmp syscall_exit
13068 -END(syscall_trace_entry)
13069 +ENDPROC(syscall_trace_entry)
13070
13071 # perform syscall exit tracing
13072 ALIGN
13073 @@ -666,20 +874,24 @@ syscall_exit_work:
13074 movl %esp, %eax
13075 call syscall_trace_leave
13076 jmp resume_userspace
13077 -END(syscall_exit_work)
13078 +ENDPROC(syscall_exit_work)
13079 CFI_ENDPROC
13080
13081 RING0_INT_FRAME # can't unwind into user space anyway
13082 syscall_fault:
13083 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13084 + push %ss
13085 + pop %ds
13086 +#endif
13087 GET_THREAD_INFO(%ebp)
13088 movl $-EFAULT,PT_EAX(%esp)
13089 jmp resume_userspace
13090 -END(syscall_fault)
13091 +ENDPROC(syscall_fault)
13092
13093 syscall_badsys:
13094 movl $-ENOSYS,PT_EAX(%esp)
13095 jmp resume_userspace
13096 -END(syscall_badsys)
13097 +ENDPROC(syscall_badsys)
13098 CFI_ENDPROC
13099 /*
13100 * End of kprobes section
13101 @@ -753,6 +965,36 @@ ptregs_clone:
13102 CFI_ENDPROC
13103 ENDPROC(ptregs_clone)
13104
13105 + ALIGN;
13106 +ENTRY(kernel_execve)
13107 + CFI_STARTPROC
13108 + pushl_cfi %ebp
13109 + sub $PT_OLDSS+4,%esp
13110 + pushl_cfi %edi
13111 + pushl_cfi %ecx
13112 + pushl_cfi %eax
13113 + lea 3*4(%esp),%edi
13114 + mov $PT_OLDSS/4+1,%ecx
13115 + xorl %eax,%eax
13116 + rep stosl
13117 + popl_cfi %eax
13118 + popl_cfi %ecx
13119 + popl_cfi %edi
13120 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13121 + pushl_cfi %esp
13122 + call sys_execve
13123 + add $4,%esp
13124 + CFI_ADJUST_CFA_OFFSET -4
13125 + GET_THREAD_INFO(%ebp)
13126 + test %eax,%eax
13127 + jz syscall_exit
13128 + add $PT_OLDSS+4,%esp
13129 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
13130 + popl_cfi %ebp
13131 + ret
13132 + CFI_ENDPROC
13133 +ENDPROC(kernel_execve)
13134 +
13135 .macro FIXUP_ESPFIX_STACK
13136 /*
13137 * Switch back for ESPFIX stack to the normal zerobased stack
13138 @@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
13139 * normal stack and adjusts ESP with the matching offset.
13140 */
13141 /* fixup the stack */
13142 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
13143 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
13144 +#ifdef CONFIG_SMP
13145 + movl PER_CPU_VAR(cpu_number), %ebx
13146 + shll $PAGE_SHIFT_asm, %ebx
13147 + addl $cpu_gdt_table, %ebx
13148 +#else
13149 + movl $cpu_gdt_table, %ebx
13150 +#endif
13151 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
13152 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
13153 shl $16, %eax
13154 addl %esp, %eax /* the adjusted stack pointer */
13155 pushl_cfi $__KERNEL_DS
13156 @@ -816,7 +1065,7 @@ vector=vector+1
13157 .endr
13158 2: jmp common_interrupt
13159 .endr
13160 -END(irq_entries_start)
13161 +ENDPROC(irq_entries_start)
13162
13163 .previous
13164 END(interrupt)
13165 @@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
13166 pushl_cfi $do_coprocessor_error
13167 jmp error_code
13168 CFI_ENDPROC
13169 -END(coprocessor_error)
13170 +ENDPROC(coprocessor_error)
13171
13172 ENTRY(simd_coprocessor_error)
13173 RING0_INT_FRAME
13174 @@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
13175 #endif
13176 jmp error_code
13177 CFI_ENDPROC
13178 -END(simd_coprocessor_error)
13179 +ENDPROC(simd_coprocessor_error)
13180
13181 ENTRY(device_not_available)
13182 RING0_INT_FRAME
13183 @@ -893,7 +1142,7 @@ ENTRY(device_not_available)
13184 pushl_cfi $do_device_not_available
13185 jmp error_code
13186 CFI_ENDPROC
13187 -END(device_not_available)
13188 +ENDPROC(device_not_available)
13189
13190 #ifdef CONFIG_PARAVIRT
13191 ENTRY(native_iret)
13192 @@ -902,12 +1151,12 @@ ENTRY(native_iret)
13193 .align 4
13194 .long native_iret, iret_exc
13195 .previous
13196 -END(native_iret)
13197 +ENDPROC(native_iret)
13198
13199 ENTRY(native_irq_enable_sysexit)
13200 sti
13201 sysexit
13202 -END(native_irq_enable_sysexit)
13203 +ENDPROC(native_irq_enable_sysexit)
13204 #endif
13205
13206 ENTRY(overflow)
13207 @@ -916,7 +1165,7 @@ ENTRY(overflow)
13208 pushl_cfi $do_overflow
13209 jmp error_code
13210 CFI_ENDPROC
13211 -END(overflow)
13212 +ENDPROC(overflow)
13213
13214 ENTRY(bounds)
13215 RING0_INT_FRAME
13216 @@ -924,7 +1173,7 @@ ENTRY(bounds)
13217 pushl_cfi $do_bounds
13218 jmp error_code
13219 CFI_ENDPROC
13220 -END(bounds)
13221 +ENDPROC(bounds)
13222
13223 ENTRY(invalid_op)
13224 RING0_INT_FRAME
13225 @@ -932,7 +1181,7 @@ ENTRY(invalid_op)
13226 pushl_cfi $do_invalid_op
13227 jmp error_code
13228 CFI_ENDPROC
13229 -END(invalid_op)
13230 +ENDPROC(invalid_op)
13231
13232 ENTRY(coprocessor_segment_overrun)
13233 RING0_INT_FRAME
13234 @@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
13235 pushl_cfi $do_coprocessor_segment_overrun
13236 jmp error_code
13237 CFI_ENDPROC
13238 -END(coprocessor_segment_overrun)
13239 +ENDPROC(coprocessor_segment_overrun)
13240
13241 ENTRY(invalid_TSS)
13242 RING0_EC_FRAME
13243 pushl_cfi $do_invalid_TSS
13244 jmp error_code
13245 CFI_ENDPROC
13246 -END(invalid_TSS)
13247 +ENDPROC(invalid_TSS)
13248
13249 ENTRY(segment_not_present)
13250 RING0_EC_FRAME
13251 pushl_cfi $do_segment_not_present
13252 jmp error_code
13253 CFI_ENDPROC
13254 -END(segment_not_present)
13255 +ENDPROC(segment_not_present)
13256
13257 ENTRY(stack_segment)
13258 RING0_EC_FRAME
13259 pushl_cfi $do_stack_segment
13260 jmp error_code
13261 CFI_ENDPROC
13262 -END(stack_segment)
13263 +ENDPROC(stack_segment)
13264
13265 ENTRY(alignment_check)
13266 RING0_EC_FRAME
13267 pushl_cfi $do_alignment_check
13268 jmp error_code
13269 CFI_ENDPROC
13270 -END(alignment_check)
13271 +ENDPROC(alignment_check)
13272
13273 ENTRY(divide_error)
13274 RING0_INT_FRAME
13275 @@ -976,7 +1225,7 @@ ENTRY(divide_error)
13276 pushl_cfi $do_divide_error
13277 jmp error_code
13278 CFI_ENDPROC
13279 -END(divide_error)
13280 +ENDPROC(divide_error)
13281
13282 #ifdef CONFIG_X86_MCE
13283 ENTRY(machine_check)
13284 @@ -985,7 +1234,7 @@ ENTRY(machine_check)
13285 pushl_cfi machine_check_vector
13286 jmp error_code
13287 CFI_ENDPROC
13288 -END(machine_check)
13289 +ENDPROC(machine_check)
13290 #endif
13291
13292 ENTRY(spurious_interrupt_bug)
13293 @@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
13294 pushl_cfi $do_spurious_interrupt_bug
13295 jmp error_code
13296 CFI_ENDPROC
13297 -END(spurious_interrupt_bug)
13298 +ENDPROC(spurious_interrupt_bug)
13299 /*
13300 * End of kprobes section
13301 */
13302 @@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
13303
13304 ENTRY(mcount)
13305 ret
13306 -END(mcount)
13307 +ENDPROC(mcount)
13308
13309 ENTRY(ftrace_caller)
13310 cmpl $0, function_trace_stop
13311 @@ -1138,7 +1387,7 @@ ftrace_graph_call:
13312 .globl ftrace_stub
13313 ftrace_stub:
13314 ret
13315 -END(ftrace_caller)
13316 +ENDPROC(ftrace_caller)
13317
13318 #else /* ! CONFIG_DYNAMIC_FTRACE */
13319
13320 @@ -1174,7 +1423,7 @@ trace:
13321 popl %ecx
13322 popl %eax
13323 jmp ftrace_stub
13324 -END(mcount)
13325 +ENDPROC(mcount)
13326 #endif /* CONFIG_DYNAMIC_FTRACE */
13327 #endif /* CONFIG_FUNCTION_TRACER */
13328
13329 @@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
13330 popl %ecx
13331 popl %eax
13332 ret
13333 -END(ftrace_graph_caller)
13334 +ENDPROC(ftrace_graph_caller)
13335
13336 .globl return_to_handler
13337 return_to_handler:
13338 @@ -1209,7 +1458,6 @@ return_to_handler:
13339 jmp *%ecx
13340 #endif
13341
13342 -.section .rodata,"a"
13343 #include "syscall_table_32.S"
13344
13345 syscall_table_size=(.-sys_call_table)
13346 @@ -1255,15 +1503,18 @@ error_code:
13347 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13348 REG_TO_PTGS %ecx
13349 SET_KERNEL_GS %ecx
13350 - movl $(__USER_DS), %ecx
13351 + movl $(__KERNEL_DS), %ecx
13352 movl %ecx, %ds
13353 movl %ecx, %es
13354 +
13355 + pax_enter_kernel
13356 +
13357 TRACE_IRQS_OFF
13358 movl %esp,%eax # pt_regs pointer
13359 call *%edi
13360 jmp ret_from_exception
13361 CFI_ENDPROC
13362 -END(page_fault)
13363 +ENDPROC(page_fault)
13364
13365 /*
13366 * Debug traps and NMI can happen at the one SYSENTER instruction
13367 @@ -1305,7 +1556,7 @@ debug_stack_correct:
13368 call do_debug
13369 jmp ret_from_exception
13370 CFI_ENDPROC
13371 -END(debug)
13372 +ENDPROC(debug)
13373
13374 /*
13375 * NMI is doubly nasty. It can happen _while_ we're handling
13376 @@ -1342,6 +1593,9 @@ nmi_stack_correct:
13377 xorl %edx,%edx # zero error code
13378 movl %esp,%eax # pt_regs pointer
13379 call do_nmi
13380 +
13381 + pax_exit_kernel
13382 +
13383 jmp restore_all_notrace
13384 CFI_ENDPROC
13385
13386 @@ -1378,12 +1632,15 @@ nmi_espfix_stack:
13387 FIXUP_ESPFIX_STACK # %eax == %esp
13388 xorl %edx,%edx # zero error code
13389 call do_nmi
13390 +
13391 + pax_exit_kernel
13392 +
13393 RESTORE_REGS
13394 lss 12+4(%esp), %esp # back to espfix stack
13395 CFI_ADJUST_CFA_OFFSET -24
13396 jmp irq_return
13397 CFI_ENDPROC
13398 -END(nmi)
13399 +ENDPROC(nmi)
13400
13401 ENTRY(int3)
13402 RING0_INT_FRAME
13403 @@ -1395,14 +1652,14 @@ ENTRY(int3)
13404 call do_int3
13405 jmp ret_from_exception
13406 CFI_ENDPROC
13407 -END(int3)
13408 +ENDPROC(int3)
13409
13410 ENTRY(general_protection)
13411 RING0_EC_FRAME
13412 pushl_cfi $do_general_protection
13413 jmp error_code
13414 CFI_ENDPROC
13415 -END(general_protection)
13416 +ENDPROC(general_protection)
13417
13418 #ifdef CONFIG_KVM_GUEST
13419 ENTRY(async_page_fault)
13420 @@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
13421 pushl_cfi $do_async_page_fault
13422 jmp error_code
13423 CFI_ENDPROC
13424 -END(async_page_fault)
13425 +ENDPROC(async_page_fault)
13426 #endif
13427
13428 /*
13429 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
13430 index 6419bb0..bb59ca4 100644
13431 --- a/arch/x86/kernel/entry_64.S
13432 +++ b/arch/x86/kernel/entry_64.S
13433 @@ -55,6 +55,8 @@
13434 #include <asm/paravirt.h>
13435 #include <asm/ftrace.h>
13436 #include <asm/percpu.h>
13437 +#include <asm/pgtable.h>
13438 +#include <asm/alternative-asm.h>
13439
13440 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13441 #include <linux/elf-em.h>
13442 @@ -68,8 +70,9 @@
13443 #ifdef CONFIG_FUNCTION_TRACER
13444 #ifdef CONFIG_DYNAMIC_FTRACE
13445 ENTRY(mcount)
13446 + pax_force_retaddr
13447 retq
13448 -END(mcount)
13449 +ENDPROC(mcount)
13450
13451 ENTRY(ftrace_caller)
13452 cmpl $0, function_trace_stop
13453 @@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
13454 #endif
13455
13456 GLOBAL(ftrace_stub)
13457 + pax_force_retaddr
13458 retq
13459 -END(ftrace_caller)
13460 +ENDPROC(ftrace_caller)
13461
13462 #else /* ! CONFIG_DYNAMIC_FTRACE */
13463 ENTRY(mcount)
13464 @@ -112,6 +116,7 @@ ENTRY(mcount)
13465 #endif
13466
13467 GLOBAL(ftrace_stub)
13468 + pax_force_retaddr
13469 retq
13470
13471 trace:
13472 @@ -121,12 +126,13 @@ trace:
13473 movq 8(%rbp), %rsi
13474 subq $MCOUNT_INSN_SIZE, %rdi
13475
13476 + pax_force_fptr ftrace_trace_function
13477 call *ftrace_trace_function
13478
13479 MCOUNT_RESTORE_FRAME
13480
13481 jmp ftrace_stub
13482 -END(mcount)
13483 +ENDPROC(mcount)
13484 #endif /* CONFIG_DYNAMIC_FTRACE */
13485 #endif /* CONFIG_FUNCTION_TRACER */
13486
13487 @@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
13488
13489 MCOUNT_RESTORE_FRAME
13490
13491 + pax_force_retaddr
13492 retq
13493 -END(ftrace_graph_caller)
13494 +ENDPROC(ftrace_graph_caller)
13495
13496 GLOBAL(return_to_handler)
13497 subq $24, %rsp
13498 @@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
13499 movq 8(%rsp), %rdx
13500 movq (%rsp), %rax
13501 addq $24, %rsp
13502 + pax_force_fptr %rdi
13503 jmp *%rdi
13504 #endif
13505
13506 @@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
13507 ENDPROC(native_usergs_sysret64)
13508 #endif /* CONFIG_PARAVIRT */
13509
13510 + .macro ljmpq sel, off
13511 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13512 + .byte 0x48; ljmp *1234f(%rip)
13513 + .pushsection .rodata
13514 + .align 16
13515 + 1234: .quad \off; .word \sel
13516 + .popsection
13517 +#else
13518 + pushq $\sel
13519 + pushq $\off
13520 + lretq
13521 +#endif
13522 + .endm
13523 +
13524 + .macro pax_enter_kernel
13525 + pax_set_fptr_mask
13526 +#ifdef CONFIG_PAX_KERNEXEC
13527 + call pax_enter_kernel
13528 +#endif
13529 + .endm
13530 +
13531 + .macro pax_exit_kernel
13532 +#ifdef CONFIG_PAX_KERNEXEC
13533 + call pax_exit_kernel
13534 +#endif
13535 + .endm
13536 +
13537 +#ifdef CONFIG_PAX_KERNEXEC
13538 +ENTRY(pax_enter_kernel)
13539 + pushq %rdi
13540 +
13541 +#ifdef CONFIG_PARAVIRT
13542 + PV_SAVE_REGS(CLBR_RDI)
13543 +#endif
13544 +
13545 + GET_CR0_INTO_RDI
13546 + bts $16,%rdi
13547 + jnc 3f
13548 + mov %cs,%edi
13549 + cmp $__KERNEL_CS,%edi
13550 + jnz 2f
13551 +1:
13552 +
13553 +#ifdef CONFIG_PARAVIRT
13554 + PV_RESTORE_REGS(CLBR_RDI)
13555 +#endif
13556 +
13557 + popq %rdi
13558 + pax_force_retaddr
13559 + retq
13560 +
13561 +2: ljmpq __KERNEL_CS,1f
13562 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
13563 +4: SET_RDI_INTO_CR0
13564 + jmp 1b
13565 +ENDPROC(pax_enter_kernel)
13566 +
13567 +ENTRY(pax_exit_kernel)
13568 + pushq %rdi
13569 +
13570 +#ifdef CONFIG_PARAVIRT
13571 + PV_SAVE_REGS(CLBR_RDI)
13572 +#endif
13573 +
13574 + mov %cs,%rdi
13575 + cmp $__KERNEXEC_KERNEL_CS,%edi
13576 + jz 2f
13577 +1:
13578 +
13579 +#ifdef CONFIG_PARAVIRT
13580 + PV_RESTORE_REGS(CLBR_RDI);
13581 +#endif
13582 +
13583 + popq %rdi
13584 + pax_force_retaddr
13585 + retq
13586 +
13587 +2: GET_CR0_INTO_RDI
13588 + btr $16,%rdi
13589 + ljmpq __KERNEL_CS,3f
13590 +3: SET_RDI_INTO_CR0
13591 + jmp 1b
13592 +#ifdef CONFIG_PARAVIRT
13593 + PV_RESTORE_REGS(CLBR_RDI);
13594 +#endif
13595 +
13596 + popq %rdi
13597 + pax_force_retaddr
13598 + retq
13599 +ENDPROC(pax_exit_kernel)
13600 +#endif
13601 +
13602 + .macro pax_enter_kernel_user
13603 + pax_set_fptr_mask
13604 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13605 + call pax_enter_kernel_user
13606 +#endif
13607 + .endm
13608 +
13609 + .macro pax_exit_kernel_user
13610 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13611 + call pax_exit_kernel_user
13612 +#endif
13613 +#ifdef CONFIG_PAX_RANDKSTACK
13614 + pushq %rax
13615 + call pax_randomize_kstack
13616 + popq %rax
13617 +#endif
13618 + .endm
13619 +
13620 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13621 +ENTRY(pax_enter_kernel_user)
13622 + pushq %rdi
13623 + pushq %rbx
13624 +
13625 +#ifdef CONFIG_PARAVIRT
13626 + PV_SAVE_REGS(CLBR_RDI)
13627 +#endif
13628 +
13629 + GET_CR3_INTO_RDI
13630 + mov %rdi,%rbx
13631 + add $__START_KERNEL_map,%rbx
13632 + sub phys_base(%rip),%rbx
13633 +
13634 +#ifdef CONFIG_PARAVIRT
13635 + pushq %rdi
13636 + cmpl $0, pv_info+PARAVIRT_enabled
13637 + jz 1f
13638 + i = 0
13639 + .rept USER_PGD_PTRS
13640 + mov i*8(%rbx),%rsi
13641 + mov $0,%sil
13642 + lea i*8(%rbx),%rdi
13643 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13644 + i = i + 1
13645 + .endr
13646 + jmp 2f
13647 +1:
13648 +#endif
13649 +
13650 + i = 0
13651 + .rept USER_PGD_PTRS
13652 + movb $0,i*8(%rbx)
13653 + i = i + 1
13654 + .endr
13655 +
13656 +#ifdef CONFIG_PARAVIRT
13657 +2: popq %rdi
13658 +#endif
13659 + SET_RDI_INTO_CR3
13660 +
13661 +#ifdef CONFIG_PAX_KERNEXEC
13662 + GET_CR0_INTO_RDI
13663 + bts $16,%rdi
13664 + SET_RDI_INTO_CR0
13665 +#endif
13666 +
13667 +#ifdef CONFIG_PARAVIRT
13668 + PV_RESTORE_REGS(CLBR_RDI)
13669 +#endif
13670 +
13671 + popq %rbx
13672 + popq %rdi
13673 + pax_force_retaddr
13674 + retq
13675 +ENDPROC(pax_enter_kernel_user)
13676 +
13677 +ENTRY(pax_exit_kernel_user)
13678 + push %rdi
13679 +
13680 +#ifdef CONFIG_PARAVIRT
13681 + pushq %rbx
13682 + PV_SAVE_REGS(CLBR_RDI)
13683 +#endif
13684 +
13685 +#ifdef CONFIG_PAX_KERNEXEC
13686 + GET_CR0_INTO_RDI
13687 + btr $16,%rdi
13688 + SET_RDI_INTO_CR0
13689 +#endif
13690 +
13691 + GET_CR3_INTO_RDI
13692 + add $__START_KERNEL_map,%rdi
13693 + sub phys_base(%rip),%rdi
13694 +
13695 +#ifdef CONFIG_PARAVIRT
13696 + cmpl $0, pv_info+PARAVIRT_enabled
13697 + jz 1f
13698 + mov %rdi,%rbx
13699 + i = 0
13700 + .rept USER_PGD_PTRS
13701 + mov i*8(%rbx),%rsi
13702 + mov $0x67,%sil
13703 + lea i*8(%rbx),%rdi
13704 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13705 + i = i + 1
13706 + .endr
13707 + jmp 2f
13708 +1:
13709 +#endif
13710 +
13711 + i = 0
13712 + .rept USER_PGD_PTRS
13713 + movb $0x67,i*8(%rdi)
13714 + i = i + 1
13715 + .endr
13716 +
13717 +#ifdef CONFIG_PARAVIRT
13718 +2: PV_RESTORE_REGS(CLBR_RDI)
13719 + popq %rbx
13720 +#endif
13721 +
13722 + popq %rdi
13723 + pax_force_retaddr
13724 + retq
13725 +ENDPROC(pax_exit_kernel_user)
13726 +#endif
13727 +
13728 +.macro pax_erase_kstack
13729 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13730 + call pax_erase_kstack
13731 +#endif
13732 +.endm
13733 +
13734 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13735 +/*
13736 + * r11: thread_info
13737 + * rcx, rdx: can be clobbered
13738 + */
13739 +ENTRY(pax_erase_kstack)
13740 + pushq %rdi
13741 + pushq %rax
13742 + pushq %r11
13743 +
13744 + GET_THREAD_INFO(%r11)
13745 + mov TI_lowest_stack(%r11), %rdi
13746 + mov $-0xBEEF, %rax
13747 + std
13748 +
13749 +1: mov %edi, %ecx
13750 + and $THREAD_SIZE_asm - 1, %ecx
13751 + shr $3, %ecx
13752 + repne scasq
13753 + jecxz 2f
13754 +
13755 + cmp $2*8, %ecx
13756 + jc 2f
13757 +
13758 + mov $2*8, %ecx
13759 + repe scasq
13760 + jecxz 2f
13761 + jne 1b
13762 +
13763 +2: cld
13764 + mov %esp, %ecx
13765 + sub %edi, %ecx
13766 +
13767 + cmp $THREAD_SIZE_asm, %rcx
13768 + jb 3f
13769 + ud2
13770 +3:
13771 +
13772 + shr $3, %ecx
13773 + rep stosq
13774 +
13775 + mov TI_task_thread_sp0(%r11), %rdi
13776 + sub $256, %rdi
13777 + mov %rdi, TI_lowest_stack(%r11)
13778 +
13779 + popq %r11
13780 + popq %rax
13781 + popq %rdi
13782 + pax_force_retaddr
13783 + ret
13784 +ENDPROC(pax_erase_kstack)
13785 +#endif
13786
13787 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13788 #ifdef CONFIG_TRACE_IRQFLAGS
13789 @@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
13790 .endm
13791
13792 .macro UNFAKE_STACK_FRAME
13793 - addq $8*6, %rsp
13794 - CFI_ADJUST_CFA_OFFSET -(6*8)
13795 + addq $8*6 + ARG_SKIP, %rsp
13796 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
13797 .endm
13798
13799 /*
13800 @@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
13801 movq %rsp, %rsi
13802
13803 leaq -RBP(%rsp),%rdi /* arg1 for handler */
13804 - testl $3, CS(%rdi)
13805 + testb $3, CS(%rdi)
13806 je 1f
13807 SWAPGS
13808 /*
13809 @@ -350,9 +634,10 @@ ENTRY(save_rest)
13810 movq_cfi r15, R15+16
13811 movq %r11, 8(%rsp) /* return address */
13812 FIXUP_TOP_OF_STACK %r11, 16
13813 + pax_force_retaddr
13814 ret
13815 CFI_ENDPROC
13816 -END(save_rest)
13817 +ENDPROC(save_rest)
13818
13819 /* save complete stack frame */
13820 .pushsection .kprobes.text, "ax"
13821 @@ -381,9 +666,10 @@ ENTRY(save_paranoid)
13822 js 1f /* negative -> in kernel */
13823 SWAPGS
13824 xorl %ebx,%ebx
13825 -1: ret
13826 +1: pax_force_retaddr_bts
13827 + ret
13828 CFI_ENDPROC
13829 -END(save_paranoid)
13830 +ENDPROC(save_paranoid)
13831 .popsection
13832
13833 /*
13834 @@ -405,7 +691,7 @@ ENTRY(ret_from_fork)
13835
13836 RESTORE_REST
13837
13838 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13839 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13840 je int_ret_from_sys_call
13841
13842 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13843 @@ -415,7 +701,7 @@ ENTRY(ret_from_fork)
13844 jmp ret_from_sys_call # go to the SYSRET fastpath
13845
13846 CFI_ENDPROC
13847 -END(ret_from_fork)
13848 +ENDPROC(ret_from_fork)
13849
13850 /*
13851 * System call entry. Up to 6 arguments in registers are supported.
13852 @@ -451,7 +737,7 @@ END(ret_from_fork)
13853 ENTRY(system_call)
13854 CFI_STARTPROC simple
13855 CFI_SIGNAL_FRAME
13856 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13857 + CFI_DEF_CFA rsp,0
13858 CFI_REGISTER rip,rcx
13859 /*CFI_REGISTER rflags,r11*/
13860 SWAPGS_UNSAFE_STACK
13861 @@ -464,12 +750,13 @@ ENTRY(system_call_after_swapgs)
13862
13863 movq %rsp,PER_CPU_VAR(old_rsp)
13864 movq PER_CPU_VAR(kernel_stack),%rsp
13865 + SAVE_ARGS 8*6,0
13866 + pax_enter_kernel_user
13867 /*
13868 * No need to follow this irqs off/on section - it's straight
13869 * and short:
13870 */
13871 ENABLE_INTERRUPTS(CLBR_NONE)
13872 - SAVE_ARGS 8,0
13873 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13874 movq %rcx,RIP-ARGOFFSET(%rsp)
13875 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13876 @@ -479,7 +766,7 @@ ENTRY(system_call_after_swapgs)
13877 system_call_fastpath:
13878 cmpq $__NR_syscall_max,%rax
13879 ja badsys
13880 - movq %r10,%rcx
13881 + movq R10-ARGOFFSET(%rsp),%rcx
13882 call *sys_call_table(,%rax,8) # XXX: rip relative
13883 movq %rax,RAX-ARGOFFSET(%rsp)
13884 /*
13885 @@ -498,6 +785,8 @@ sysret_check:
13886 andl %edi,%edx
13887 jnz sysret_careful
13888 CFI_REMEMBER_STATE
13889 + pax_exit_kernel_user
13890 + pax_erase_kstack
13891 /*
13892 * sysretq will re-enable interrupts:
13893 */
13894 @@ -549,14 +838,18 @@ badsys:
13895 * jump back to the normal fast path.
13896 */
13897 auditsys:
13898 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
13899 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
13900 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
13901 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
13902 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
13903 movq %rax,%rsi /* 2nd arg: syscall number */
13904 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13905 call audit_syscall_entry
13906 +
13907 + pax_erase_kstack
13908 +
13909 LOAD_ARGS 0 /* reload call-clobbered registers */
13910 + pax_set_fptr_mask
13911 jmp system_call_fastpath
13912
13913 /*
13914 @@ -586,16 +879,20 @@ tracesys:
13915 FIXUP_TOP_OF_STACK %rdi
13916 movq %rsp,%rdi
13917 call syscall_trace_enter
13918 +
13919 + pax_erase_kstack
13920 +
13921 /*
13922 * Reload arg registers from stack in case ptrace changed them.
13923 * We don't reload %rax because syscall_trace_enter() returned
13924 * the value it wants us to use in the table lookup.
13925 */
13926 LOAD_ARGS ARGOFFSET, 1
13927 + pax_set_fptr_mask
13928 RESTORE_REST
13929 cmpq $__NR_syscall_max,%rax
13930 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
13931 - movq %r10,%rcx /* fixup for C */
13932 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
13933 call *sys_call_table(,%rax,8)
13934 movq %rax,RAX-ARGOFFSET(%rsp)
13935 /* Use IRET because user could have changed frame */
13936 @@ -607,7 +904,7 @@ tracesys:
13937 GLOBAL(int_ret_from_sys_call)
13938 DISABLE_INTERRUPTS(CLBR_NONE)
13939 TRACE_IRQS_OFF
13940 - testl $3,CS-ARGOFFSET(%rsp)
13941 + testb $3,CS-ARGOFFSET(%rsp)
13942 je retint_restore_args
13943 movl $_TIF_ALLWORK_MASK,%edi
13944 /* edi: mask to check */
13945 @@ -664,7 +961,7 @@ int_restore_rest:
13946 TRACE_IRQS_OFF
13947 jmp int_with_check
13948 CFI_ENDPROC
13949 -END(system_call)
13950 +ENDPROC(system_call)
13951
13952 /*
13953 * Certain special system calls that need to save a complete full stack frame.
13954 @@ -680,7 +977,7 @@ ENTRY(\label)
13955 call \func
13956 jmp ptregscall_common
13957 CFI_ENDPROC
13958 -END(\label)
13959 +ENDPROC(\label)
13960 .endm
13961
13962 PTREGSCALL stub_clone, sys_clone, %r8
13963 @@ -698,9 +995,10 @@ ENTRY(ptregscall_common)
13964 movq_cfi_restore R12+8, r12
13965 movq_cfi_restore RBP+8, rbp
13966 movq_cfi_restore RBX+8, rbx
13967 + pax_force_retaddr
13968 ret $REST_SKIP /* pop extended registers */
13969 CFI_ENDPROC
13970 -END(ptregscall_common)
13971 +ENDPROC(ptregscall_common)
13972
13973 ENTRY(stub_execve)
13974 CFI_STARTPROC
13975 @@ -715,7 +1013,7 @@ ENTRY(stub_execve)
13976 RESTORE_REST
13977 jmp int_ret_from_sys_call
13978 CFI_ENDPROC
13979 -END(stub_execve)
13980 +ENDPROC(stub_execve)
13981
13982 /*
13983 * sigreturn is special because it needs to restore all registers on return.
13984 @@ -733,7 +1031,7 @@ ENTRY(stub_rt_sigreturn)
13985 RESTORE_REST
13986 jmp int_ret_from_sys_call
13987 CFI_ENDPROC
13988 -END(stub_rt_sigreturn)
13989 +ENDPROC(stub_rt_sigreturn)
13990
13991 /*
13992 * Build the entry stubs and pointer table with some assembler magic.
13993 @@ -768,7 +1066,7 @@ vector=vector+1
13994 2: jmp common_interrupt
13995 .endr
13996 CFI_ENDPROC
13997 -END(irq_entries_start)
13998 +ENDPROC(irq_entries_start)
13999
14000 .previous
14001 END(interrupt)
14002 @@ -789,6 +1087,16 @@ END(interrupt)
14003 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
14004 SAVE_ARGS_IRQ
14005 PARTIAL_FRAME 0
14006 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14007 + testb $3, CS(%rdi)
14008 + jnz 1f
14009 + pax_enter_kernel
14010 + jmp 2f
14011 +1: pax_enter_kernel_user
14012 +2:
14013 +#else
14014 + pax_enter_kernel
14015 +#endif
14016 call \func
14017 .endm
14018
14019 @@ -820,7 +1128,7 @@ ret_from_intr:
14020
14021 exit_intr:
14022 GET_THREAD_INFO(%rcx)
14023 - testl $3,CS-ARGOFFSET(%rsp)
14024 + testb $3,CS-ARGOFFSET(%rsp)
14025 je retint_kernel
14026
14027 /* Interrupt came from user space */
14028 @@ -842,12 +1150,16 @@ retint_swapgs: /* return to user-space */
14029 * The iretq could re-enable interrupts:
14030 */
14031 DISABLE_INTERRUPTS(CLBR_ANY)
14032 + pax_exit_kernel_user
14033 + pax_erase_kstack
14034 TRACE_IRQS_IRETQ
14035 SWAPGS
14036 jmp restore_args
14037
14038 retint_restore_args: /* return to kernel space */
14039 DISABLE_INTERRUPTS(CLBR_ANY)
14040 + pax_exit_kernel
14041 + pax_force_retaddr RIP-ARGOFFSET
14042 /*
14043 * The iretq could re-enable interrupts:
14044 */
14045 @@ -936,7 +1248,7 @@ ENTRY(retint_kernel)
14046 #endif
14047
14048 CFI_ENDPROC
14049 -END(common_interrupt)
14050 +ENDPROC(common_interrupt)
14051 /*
14052 * End of kprobes section
14053 */
14054 @@ -952,7 +1264,7 @@ ENTRY(\sym)
14055 interrupt \do_sym
14056 jmp ret_from_intr
14057 CFI_ENDPROC
14058 -END(\sym)
14059 +ENDPROC(\sym)
14060 .endm
14061
14062 #ifdef CONFIG_SMP
14063 @@ -1017,12 +1329,22 @@ ENTRY(\sym)
14064 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14065 call error_entry
14066 DEFAULT_FRAME 0
14067 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14068 + testb $3, CS(%rsp)
14069 + jnz 1f
14070 + pax_enter_kernel
14071 + jmp 2f
14072 +1: pax_enter_kernel_user
14073 +2:
14074 +#else
14075 + pax_enter_kernel
14076 +#endif
14077 movq %rsp,%rdi /* pt_regs pointer */
14078 xorl %esi,%esi /* no error code */
14079 call \do_sym
14080 jmp error_exit /* %ebx: no swapgs flag */
14081 CFI_ENDPROC
14082 -END(\sym)
14083 +ENDPROC(\sym)
14084 .endm
14085
14086 .macro paranoidzeroentry sym do_sym
14087 @@ -1034,15 +1356,25 @@ ENTRY(\sym)
14088 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14089 call save_paranoid
14090 TRACE_IRQS_OFF
14091 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14092 + testb $3, CS(%rsp)
14093 + jnz 1f
14094 + pax_enter_kernel
14095 + jmp 2f
14096 +1: pax_enter_kernel_user
14097 +2:
14098 +#else
14099 + pax_enter_kernel
14100 +#endif
14101 movq %rsp,%rdi /* pt_regs pointer */
14102 xorl %esi,%esi /* no error code */
14103 call \do_sym
14104 jmp paranoid_exit /* %ebx: no swapgs flag */
14105 CFI_ENDPROC
14106 -END(\sym)
14107 +ENDPROC(\sym)
14108 .endm
14109
14110 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
14111 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
14112 .macro paranoidzeroentry_ist sym do_sym ist
14113 ENTRY(\sym)
14114 INTR_FRAME
14115 @@ -1052,14 +1384,30 @@ ENTRY(\sym)
14116 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14117 call save_paranoid
14118 TRACE_IRQS_OFF
14119 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14120 + testb $3, CS(%rsp)
14121 + jnz 1f
14122 + pax_enter_kernel
14123 + jmp 2f
14124 +1: pax_enter_kernel_user
14125 +2:
14126 +#else
14127 + pax_enter_kernel
14128 +#endif
14129 movq %rsp,%rdi /* pt_regs pointer */
14130 xorl %esi,%esi /* no error code */
14131 +#ifdef CONFIG_SMP
14132 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
14133 + lea init_tss(%r12), %r12
14134 +#else
14135 + lea init_tss(%rip), %r12
14136 +#endif
14137 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14138 call \do_sym
14139 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14140 jmp paranoid_exit /* %ebx: no swapgs flag */
14141 CFI_ENDPROC
14142 -END(\sym)
14143 +ENDPROC(\sym)
14144 .endm
14145
14146 .macro errorentry sym do_sym
14147 @@ -1070,13 +1418,23 @@ ENTRY(\sym)
14148 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14149 call error_entry
14150 DEFAULT_FRAME 0
14151 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14152 + testb $3, CS(%rsp)
14153 + jnz 1f
14154 + pax_enter_kernel
14155 + jmp 2f
14156 +1: pax_enter_kernel_user
14157 +2:
14158 +#else
14159 + pax_enter_kernel
14160 +#endif
14161 movq %rsp,%rdi /* pt_regs pointer */
14162 movq ORIG_RAX(%rsp),%rsi /* get error code */
14163 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14164 call \do_sym
14165 jmp error_exit /* %ebx: no swapgs flag */
14166 CFI_ENDPROC
14167 -END(\sym)
14168 +ENDPROC(\sym)
14169 .endm
14170
14171 /* error code is on the stack already */
14172 @@ -1089,13 +1447,23 @@ ENTRY(\sym)
14173 call save_paranoid
14174 DEFAULT_FRAME 0
14175 TRACE_IRQS_OFF
14176 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14177 + testb $3, CS(%rsp)
14178 + jnz 1f
14179 + pax_enter_kernel
14180 + jmp 2f
14181 +1: pax_enter_kernel_user
14182 +2:
14183 +#else
14184 + pax_enter_kernel
14185 +#endif
14186 movq %rsp,%rdi /* pt_regs pointer */
14187 movq ORIG_RAX(%rsp),%rsi /* get error code */
14188 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14189 call \do_sym
14190 jmp paranoid_exit /* %ebx: no swapgs flag */
14191 CFI_ENDPROC
14192 -END(\sym)
14193 +ENDPROC(\sym)
14194 .endm
14195
14196 zeroentry divide_error do_divide_error
14197 @@ -1125,9 +1493,10 @@ gs_change:
14198 2: mfence /* workaround */
14199 SWAPGS
14200 popfq_cfi
14201 + pax_force_retaddr
14202 ret
14203 CFI_ENDPROC
14204 -END(native_load_gs_index)
14205 +ENDPROC(native_load_gs_index)
14206
14207 .section __ex_table,"a"
14208 .align 8
14209 @@ -1149,13 +1518,14 @@ ENTRY(kernel_thread_helper)
14210 * Here we are in the child and the registers are set as they were
14211 * at kernel_thread() invocation in the parent.
14212 */
14213 + pax_force_fptr %rsi
14214 call *%rsi
14215 # exit
14216 mov %eax, %edi
14217 call do_exit
14218 ud2 # padding for call trace
14219 CFI_ENDPROC
14220 -END(kernel_thread_helper)
14221 +ENDPROC(kernel_thread_helper)
14222
14223 /*
14224 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
14225 @@ -1182,11 +1552,11 @@ ENTRY(kernel_execve)
14226 RESTORE_REST
14227 testq %rax,%rax
14228 je int_ret_from_sys_call
14229 - RESTORE_ARGS
14230 UNFAKE_STACK_FRAME
14231 + pax_force_retaddr
14232 ret
14233 CFI_ENDPROC
14234 -END(kernel_execve)
14235 +ENDPROC(kernel_execve)
14236
14237 /* Call softirq on interrupt stack. Interrupts are off. */
14238 ENTRY(call_softirq)
14239 @@ -1204,9 +1574,10 @@ ENTRY(call_softirq)
14240 CFI_DEF_CFA_REGISTER rsp
14241 CFI_ADJUST_CFA_OFFSET -8
14242 decl PER_CPU_VAR(irq_count)
14243 + pax_force_retaddr
14244 ret
14245 CFI_ENDPROC
14246 -END(call_softirq)
14247 +ENDPROC(call_softirq)
14248
14249 #ifdef CONFIG_XEN
14250 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
14251 @@ -1244,7 +1615,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
14252 decl PER_CPU_VAR(irq_count)
14253 jmp error_exit
14254 CFI_ENDPROC
14255 -END(xen_do_hypervisor_callback)
14256 +ENDPROC(xen_do_hypervisor_callback)
14257
14258 /*
14259 * Hypervisor uses this for application faults while it executes.
14260 @@ -1303,7 +1674,7 @@ ENTRY(xen_failsafe_callback)
14261 SAVE_ALL
14262 jmp error_exit
14263 CFI_ENDPROC
14264 -END(xen_failsafe_callback)
14265 +ENDPROC(xen_failsafe_callback)
14266
14267 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
14268 xen_hvm_callback_vector xen_evtchn_do_upcall
14269 @@ -1352,16 +1723,31 @@ ENTRY(paranoid_exit)
14270 TRACE_IRQS_OFF
14271 testl %ebx,%ebx /* swapgs needed? */
14272 jnz paranoid_restore
14273 - testl $3,CS(%rsp)
14274 + testb $3,CS(%rsp)
14275 jnz paranoid_userspace
14276 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14277 + pax_exit_kernel
14278 + TRACE_IRQS_IRETQ 0
14279 + SWAPGS_UNSAFE_STACK
14280 + RESTORE_ALL 8
14281 + pax_force_retaddr_bts
14282 + jmp irq_return
14283 +#endif
14284 paranoid_swapgs:
14285 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14286 + pax_exit_kernel_user
14287 +#else
14288 + pax_exit_kernel
14289 +#endif
14290 TRACE_IRQS_IRETQ 0
14291 SWAPGS_UNSAFE_STACK
14292 RESTORE_ALL 8
14293 jmp irq_return
14294 paranoid_restore:
14295 + pax_exit_kernel
14296 TRACE_IRQS_IRETQ 0
14297 RESTORE_ALL 8
14298 + pax_force_retaddr_bts
14299 jmp irq_return
14300 paranoid_userspace:
14301 GET_THREAD_INFO(%rcx)
14302 @@ -1390,7 +1776,7 @@ paranoid_schedule:
14303 TRACE_IRQS_OFF
14304 jmp paranoid_userspace
14305 CFI_ENDPROC
14306 -END(paranoid_exit)
14307 +ENDPROC(paranoid_exit)
14308
14309 /*
14310 * Exception entry point. This expects an error code/orig_rax on the stack.
14311 @@ -1417,12 +1803,13 @@ ENTRY(error_entry)
14312 movq_cfi r14, R14+8
14313 movq_cfi r15, R15+8
14314 xorl %ebx,%ebx
14315 - testl $3,CS+8(%rsp)
14316 + testb $3,CS+8(%rsp)
14317 je error_kernelspace
14318 error_swapgs:
14319 SWAPGS
14320 error_sti:
14321 TRACE_IRQS_OFF
14322 + pax_force_retaddr_bts
14323 ret
14324
14325 /*
14326 @@ -1449,7 +1836,7 @@ bstep_iret:
14327 movq %rcx,RIP+8(%rsp)
14328 jmp error_swapgs
14329 CFI_ENDPROC
14330 -END(error_entry)
14331 +ENDPROC(error_entry)
14332
14333
14334 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
14335 @@ -1469,7 +1856,7 @@ ENTRY(error_exit)
14336 jnz retint_careful
14337 jmp retint_swapgs
14338 CFI_ENDPROC
14339 -END(error_exit)
14340 +ENDPROC(error_exit)
14341
14342
14343 /* runs on exception stack */
14344 @@ -1481,6 +1868,16 @@ ENTRY(nmi)
14345 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14346 call save_paranoid
14347 DEFAULT_FRAME 0
14348 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14349 + testb $3, CS(%rsp)
14350 + jnz 1f
14351 + pax_enter_kernel
14352 + jmp 2f
14353 +1: pax_enter_kernel_user
14354 +2:
14355 +#else
14356 + pax_enter_kernel
14357 +#endif
14358 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14359 movq %rsp,%rdi
14360 movq $-1,%rsi
14361 @@ -1491,12 +1888,28 @@ ENTRY(nmi)
14362 DISABLE_INTERRUPTS(CLBR_NONE)
14363 testl %ebx,%ebx /* swapgs needed? */
14364 jnz nmi_restore
14365 - testl $3,CS(%rsp)
14366 + testb $3,CS(%rsp)
14367 jnz nmi_userspace
14368 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14369 + pax_exit_kernel
14370 + SWAPGS_UNSAFE_STACK
14371 + RESTORE_ALL 8
14372 + pax_force_retaddr_bts
14373 + jmp irq_return
14374 +#endif
14375 nmi_swapgs:
14376 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14377 + pax_exit_kernel_user
14378 +#else
14379 + pax_exit_kernel
14380 +#endif
14381 SWAPGS_UNSAFE_STACK
14382 + RESTORE_ALL 8
14383 + jmp irq_return
14384 nmi_restore:
14385 + pax_exit_kernel
14386 RESTORE_ALL 8
14387 + pax_force_retaddr_bts
14388 jmp irq_return
14389 nmi_userspace:
14390 GET_THREAD_INFO(%rcx)
14391 @@ -1525,14 +1938,14 @@ nmi_schedule:
14392 jmp paranoid_exit
14393 CFI_ENDPROC
14394 #endif
14395 -END(nmi)
14396 +ENDPROC(nmi)
14397
14398 ENTRY(ignore_sysret)
14399 CFI_STARTPROC
14400 mov $-ENOSYS,%eax
14401 sysret
14402 CFI_ENDPROC
14403 -END(ignore_sysret)
14404 +ENDPROC(ignore_sysret)
14405
14406 /*
14407 * End of kprobes section
14408 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
14409 index c9a281f..ce2f317 100644
14410 --- a/arch/x86/kernel/ftrace.c
14411 +++ b/arch/x86/kernel/ftrace.c
14412 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
14413 static const void *mod_code_newcode; /* holds the text to write to the IP */
14414
14415 static unsigned nmi_wait_count;
14416 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
14417 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14418
14419 int ftrace_arch_read_dyn_info(char *buf, int size)
14420 {
14421 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
14422
14423 r = snprintf(buf, size, "%u %u",
14424 nmi_wait_count,
14425 - atomic_read(&nmi_update_count));
14426 + atomic_read_unchecked(&nmi_update_count));
14427 return r;
14428 }
14429
14430 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
14431
14432 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14433 smp_rmb();
14434 + pax_open_kernel();
14435 ftrace_mod_code();
14436 - atomic_inc(&nmi_update_count);
14437 + pax_close_kernel();
14438 + atomic_inc_unchecked(&nmi_update_count);
14439 }
14440 /* Must have previous changes seen before executions */
14441 smp_mb();
14442 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
14443 {
14444 unsigned char replaced[MCOUNT_INSN_SIZE];
14445
14446 + ip = ktla_ktva(ip);
14447 +
14448 /*
14449 * Note: Due to modules and __init, code can
14450 * disappear and change, we need to protect against faulting
14451 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
14452 unsigned char old[MCOUNT_INSN_SIZE], *new;
14453 int ret;
14454
14455 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14456 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14457 new = ftrace_call_replace(ip, (unsigned long)func);
14458 ret = ftrace_modify_code(ip, old, new);
14459
14460 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
14461 {
14462 unsigned char code[MCOUNT_INSN_SIZE];
14463
14464 + ip = ktla_ktva(ip);
14465 +
14466 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14467 return -EFAULT;
14468
14469 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
14470 index 3bb0850..55a56f4 100644
14471 --- a/arch/x86/kernel/head32.c
14472 +++ b/arch/x86/kernel/head32.c
14473 @@ -19,6 +19,7 @@
14474 #include <asm/io_apic.h>
14475 #include <asm/bios_ebda.h>
14476 #include <asm/tlbflush.h>
14477 +#include <asm/boot.h>
14478
14479 static void __init i386_default_early_setup(void)
14480 {
14481 @@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
14482 {
14483 memblock_init();
14484
14485 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14486 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14487
14488 #ifdef CONFIG_BLK_DEV_INITRD
14489 /* Reserve INITRD */
14490 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
14491 index ce0be7c..c41476e 100644
14492 --- a/arch/x86/kernel/head_32.S
14493 +++ b/arch/x86/kernel/head_32.S
14494 @@ -25,6 +25,12 @@
14495 /* Physical address */
14496 #define pa(X) ((X) - __PAGE_OFFSET)
14497
14498 +#ifdef CONFIG_PAX_KERNEXEC
14499 +#define ta(X) (X)
14500 +#else
14501 +#define ta(X) ((X) - __PAGE_OFFSET)
14502 +#endif
14503 +
14504 /*
14505 * References to members of the new_cpu_data structure.
14506 */
14507 @@ -54,11 +60,7 @@
14508 * and small than max_low_pfn, otherwise will waste some page table entries
14509 */
14510
14511 -#if PTRS_PER_PMD > 1
14512 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14513 -#else
14514 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14515 -#endif
14516 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14517
14518 /* Number of possible pages in the lowmem region */
14519 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
14520 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
14521 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14522
14523 /*
14524 + * Real beginning of normal "text" segment
14525 + */
14526 +ENTRY(stext)
14527 +ENTRY(_stext)
14528 +
14529 +/*
14530 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14531 * %esi points to the real-mode code as a 32-bit pointer.
14532 * CS and DS must be 4 GB flat segments, but we don't depend on
14533 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14534 * can.
14535 */
14536 __HEAD
14537 +
14538 +#ifdef CONFIG_PAX_KERNEXEC
14539 + jmp startup_32
14540 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14541 +.fill PAGE_SIZE-5,1,0xcc
14542 +#endif
14543 +
14544 ENTRY(startup_32)
14545 movl pa(stack_start),%ecx
14546
14547 @@ -105,6 +120,57 @@ ENTRY(startup_32)
14548 2:
14549 leal -__PAGE_OFFSET(%ecx),%esp
14550
14551 +#ifdef CONFIG_SMP
14552 + movl $pa(cpu_gdt_table),%edi
14553 + movl $__per_cpu_load,%eax
14554 + movw %ax,__KERNEL_PERCPU + 2(%edi)
14555 + rorl $16,%eax
14556 + movb %al,__KERNEL_PERCPU + 4(%edi)
14557 + movb %ah,__KERNEL_PERCPU + 7(%edi)
14558 + movl $__per_cpu_end - 1,%eax
14559 + subl $__per_cpu_start,%eax
14560 + movw %ax,__KERNEL_PERCPU + 0(%edi)
14561 +#endif
14562 +
14563 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14564 + movl $NR_CPUS,%ecx
14565 + movl $pa(cpu_gdt_table),%edi
14566 +1:
14567 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14568 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14569 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14570 + addl $PAGE_SIZE_asm,%edi
14571 + loop 1b
14572 +#endif
14573 +
14574 +#ifdef CONFIG_PAX_KERNEXEC
14575 + movl $pa(boot_gdt),%edi
14576 + movl $__LOAD_PHYSICAL_ADDR,%eax
14577 + movw %ax,__BOOT_CS + 2(%edi)
14578 + rorl $16,%eax
14579 + movb %al,__BOOT_CS + 4(%edi)
14580 + movb %ah,__BOOT_CS + 7(%edi)
14581 + rorl $16,%eax
14582 +
14583 + ljmp $(__BOOT_CS),$1f
14584 +1:
14585 +
14586 + movl $NR_CPUS,%ecx
14587 + movl $pa(cpu_gdt_table),%edi
14588 + addl $__PAGE_OFFSET,%eax
14589 +1:
14590 + movw %ax,__KERNEL_CS + 2(%edi)
14591 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14592 + rorl $16,%eax
14593 + movb %al,__KERNEL_CS + 4(%edi)
14594 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14595 + movb %ah,__KERNEL_CS + 7(%edi)
14596 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14597 + rorl $16,%eax
14598 + addl $PAGE_SIZE_asm,%edi
14599 + loop 1b
14600 +#endif
14601 +
14602 /*
14603 * Clear BSS first so that there are no surprises...
14604 */
14605 @@ -195,8 +261,11 @@ ENTRY(startup_32)
14606 movl %eax, pa(max_pfn_mapped)
14607
14608 /* Do early initialization of the fixmap area */
14609 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14610 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
14611 +#ifdef CONFIG_COMPAT_VDSO
14612 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
14613 +#else
14614 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
14615 +#endif
14616 #else /* Not PAE */
14617
14618 page_pde_offset = (__PAGE_OFFSET >> 20);
14619 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14620 movl %eax, pa(max_pfn_mapped)
14621
14622 /* Do early initialization of the fixmap area */
14623 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14624 - movl %eax,pa(initial_page_table+0xffc)
14625 +#ifdef CONFIG_COMPAT_VDSO
14626 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
14627 +#else
14628 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
14629 +#endif
14630 #endif
14631
14632 #ifdef CONFIG_PARAVIRT
14633 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14634 cmpl $num_subarch_entries, %eax
14635 jae bad_subarch
14636
14637 - movl pa(subarch_entries)(,%eax,4), %eax
14638 - subl $__PAGE_OFFSET, %eax
14639 - jmp *%eax
14640 + jmp *pa(subarch_entries)(,%eax,4)
14641
14642 bad_subarch:
14643 WEAK(lguest_entry)
14644 @@ -255,10 +325,10 @@ WEAK(xen_entry)
14645 __INITDATA
14646
14647 subarch_entries:
14648 - .long default_entry /* normal x86/PC */
14649 - .long lguest_entry /* lguest hypervisor */
14650 - .long xen_entry /* Xen hypervisor */
14651 - .long default_entry /* Moorestown MID */
14652 + .long ta(default_entry) /* normal x86/PC */
14653 + .long ta(lguest_entry) /* lguest hypervisor */
14654 + .long ta(xen_entry) /* Xen hypervisor */
14655 + .long ta(default_entry) /* Moorestown MID */
14656 num_subarch_entries = (. - subarch_entries) / 4
14657 .previous
14658 #else
14659 @@ -312,6 +382,7 @@ default_entry:
14660 orl %edx,%eax
14661 movl %eax,%cr4
14662
14663 +#ifdef CONFIG_X86_PAE
14664 testb $X86_CR4_PAE, %al # check if PAE is enabled
14665 jz 6f
14666
14667 @@ -340,6 +411,9 @@ default_entry:
14668 /* Make changes effective */
14669 wrmsr
14670
14671 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14672 +#endif
14673 +
14674 6:
14675
14676 /*
14677 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
14678 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14679 movl %eax,%ss # after changing gdt.
14680
14681 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
14682 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14683 movl %eax,%ds
14684 movl %eax,%es
14685
14686 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
14687 */
14688 cmpb $0,ready
14689 jne 1f
14690 - movl $gdt_page,%eax
14691 + movl $cpu_gdt_table,%eax
14692 movl $stack_canary,%ecx
14693 +#ifdef CONFIG_SMP
14694 + addl $__per_cpu_load,%ecx
14695 +#endif
14696 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14697 shrl $16, %ecx
14698 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14699 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14700 1:
14701 -#endif
14702 movl $(__KERNEL_STACK_CANARY),%eax
14703 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14704 + movl $(__USER_DS),%eax
14705 +#else
14706 + xorl %eax,%eax
14707 +#endif
14708 movl %eax,%gs
14709
14710 xorl %eax,%eax # Clear LDT
14711 @@ -558,22 +639,22 @@ early_page_fault:
14712 jmp early_fault
14713
14714 early_fault:
14715 - cld
14716 #ifdef CONFIG_PRINTK
14717 + cmpl $1,%ss:early_recursion_flag
14718 + je hlt_loop
14719 + incl %ss:early_recursion_flag
14720 + cld
14721 pusha
14722 movl $(__KERNEL_DS),%eax
14723 movl %eax,%ds
14724 movl %eax,%es
14725 - cmpl $2,early_recursion_flag
14726 - je hlt_loop
14727 - incl early_recursion_flag
14728 movl %cr2,%eax
14729 pushl %eax
14730 pushl %edx /* trapno */
14731 pushl $fault_msg
14732 call printk
14733 +; call dump_stack
14734 #endif
14735 - call dump_stack
14736 hlt_loop:
14737 hlt
14738 jmp hlt_loop
14739 @@ -581,8 +662,11 @@ hlt_loop:
14740 /* This is the default interrupt "handler" :-) */
14741 ALIGN
14742 ignore_int:
14743 - cld
14744 #ifdef CONFIG_PRINTK
14745 + cmpl $2,%ss:early_recursion_flag
14746 + je hlt_loop
14747 + incl %ss:early_recursion_flag
14748 + cld
14749 pushl %eax
14750 pushl %ecx
14751 pushl %edx
14752 @@ -591,9 +675,6 @@ ignore_int:
14753 movl $(__KERNEL_DS),%eax
14754 movl %eax,%ds
14755 movl %eax,%es
14756 - cmpl $2,early_recursion_flag
14757 - je hlt_loop
14758 - incl early_recursion_flag
14759 pushl 16(%esp)
14760 pushl 24(%esp)
14761 pushl 32(%esp)
14762 @@ -622,29 +703,43 @@ ENTRY(initial_code)
14763 /*
14764 * BSS section
14765 */
14766 -__PAGE_ALIGNED_BSS
14767 - .align PAGE_SIZE
14768 #ifdef CONFIG_X86_PAE
14769 +.section .initial_pg_pmd,"a",@progbits
14770 initial_pg_pmd:
14771 .fill 1024*KPMDS,4,0
14772 #else
14773 +.section .initial_page_table,"a",@progbits
14774 ENTRY(initial_page_table)
14775 .fill 1024,4,0
14776 #endif
14777 +.section .initial_pg_fixmap,"a",@progbits
14778 initial_pg_fixmap:
14779 .fill 1024,4,0
14780 +.section .empty_zero_page,"a",@progbits
14781 ENTRY(empty_zero_page)
14782 .fill 4096,1,0
14783 +.section .swapper_pg_dir,"a",@progbits
14784 ENTRY(swapper_pg_dir)
14785 +#ifdef CONFIG_X86_PAE
14786 + .fill 4,8,0
14787 +#else
14788 .fill 1024,4,0
14789 +#endif
14790 +
14791 +/*
14792 + * The IDT has to be page-aligned to simplify the Pentium
14793 + * F0 0F bug workaround.. We have a special link segment
14794 + * for this.
14795 + */
14796 +.section .idt,"a",@progbits
14797 +ENTRY(idt_table)
14798 + .fill 256,8,0
14799
14800 /*
14801 * This starts the data section.
14802 */
14803 #ifdef CONFIG_X86_PAE
14804 -__PAGE_ALIGNED_DATA
14805 - /* Page-aligned for the benefit of paravirt? */
14806 - .align PAGE_SIZE
14807 +.section .initial_page_table,"a",@progbits
14808 ENTRY(initial_page_table)
14809 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14810 # if KPMDS == 3
14811 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
14812 # error "Kernel PMDs should be 1, 2 or 3"
14813 # endif
14814 .align PAGE_SIZE /* needs to be page-sized too */
14815 +
14816 +#ifdef CONFIG_PAX_PER_CPU_PGD
14817 +ENTRY(cpu_pgd)
14818 + .rept NR_CPUS
14819 + .fill 4,8,0
14820 + .endr
14821 +#endif
14822 +
14823 #endif
14824
14825 .data
14826 .balign 4
14827 ENTRY(stack_start)
14828 - .long init_thread_union+THREAD_SIZE
14829 + .long init_thread_union+THREAD_SIZE-8
14830
14831 +ready: .byte 0
14832 +
14833 +.section .rodata,"a",@progbits
14834 early_recursion_flag:
14835 .long 0
14836
14837 -ready: .byte 0
14838 -
14839 int_msg:
14840 .asciz "Unknown interrupt or fault at: %p %p %p\n"
14841
14842 @@ -707,7 +811,7 @@ fault_msg:
14843 .word 0 # 32 bit align gdt_desc.address
14844 boot_gdt_descr:
14845 .word __BOOT_DS+7
14846 - .long boot_gdt - __PAGE_OFFSET
14847 + .long pa(boot_gdt)
14848
14849 .word 0 # 32-bit align idt_desc.address
14850 idt_descr:
14851 @@ -718,7 +822,7 @@ idt_descr:
14852 .word 0 # 32 bit align gdt_desc.address
14853 ENTRY(early_gdt_descr)
14854 .word GDT_ENTRIES*8-1
14855 - .long gdt_page /* Overwritten for secondary CPUs */
14856 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
14857
14858 /*
14859 * The boot_gdt must mirror the equivalent in setup.S and is
14860 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
14861 .align L1_CACHE_BYTES
14862 ENTRY(boot_gdt)
14863 .fill GDT_ENTRY_BOOT_CS,8,0
14864 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14865 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14866 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14867 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14868 +
14869 + .align PAGE_SIZE_asm
14870 +ENTRY(cpu_gdt_table)
14871 + .rept NR_CPUS
14872 + .quad 0x0000000000000000 /* NULL descriptor */
14873 + .quad 0x0000000000000000 /* 0x0b reserved */
14874 + .quad 0x0000000000000000 /* 0x13 reserved */
14875 + .quad 0x0000000000000000 /* 0x1b reserved */
14876 +
14877 +#ifdef CONFIG_PAX_KERNEXEC
14878 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14879 +#else
14880 + .quad 0x0000000000000000 /* 0x20 unused */
14881 +#endif
14882 +
14883 + .quad 0x0000000000000000 /* 0x28 unused */
14884 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14885 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14886 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14887 + .quad 0x0000000000000000 /* 0x4b reserved */
14888 + .quad 0x0000000000000000 /* 0x53 reserved */
14889 + .quad 0x0000000000000000 /* 0x5b reserved */
14890 +
14891 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14892 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14893 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14894 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14895 +
14896 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14897 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14898 +
14899 + /*
14900 + * Segments used for calling PnP BIOS have byte granularity.
14901 + * The code segments and data segments have fixed 64k limits,
14902 + * the transfer segment sizes are set at run time.
14903 + */
14904 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
14905 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
14906 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
14907 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
14908 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
14909 +
14910 + /*
14911 + * The APM segments have byte granularity and their bases
14912 + * are set at run time. All have 64k limits.
14913 + */
14914 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14915 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14916 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
14917 +
14918 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14919 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14920 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14921 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14922 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14923 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14924 +
14925 + /* Be sure this is zeroed to avoid false validations in Xen */
14926 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14927 + .endr
14928 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
14929 index e11e394..9aebc5d 100644
14930 --- a/arch/x86/kernel/head_64.S
14931 +++ b/arch/x86/kernel/head_64.S
14932 @@ -19,6 +19,8 @@
14933 #include <asm/cache.h>
14934 #include <asm/processor-flags.h>
14935 #include <asm/percpu.h>
14936 +#include <asm/cpufeature.h>
14937 +#include <asm/alternative-asm.h>
14938
14939 #ifdef CONFIG_PARAVIRT
14940 #include <asm/asm-offsets.h>
14941 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
14942 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14943 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14944 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14945 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
14946 +L3_VMALLOC_START = pud_index(VMALLOC_START)
14947 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
14948 +L3_VMALLOC_END = pud_index(VMALLOC_END)
14949 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14950 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14951
14952 .text
14953 __HEAD
14954 @@ -85,35 +93,23 @@ startup_64:
14955 */
14956 addq %rbp, init_level4_pgt + 0(%rip)
14957 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14958 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14959 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
14960 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14961 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14962
14963 addq %rbp, level3_ident_pgt + 0(%rip)
14964 +#ifndef CONFIG_XEN
14965 + addq %rbp, level3_ident_pgt + 8(%rip)
14966 +#endif
14967
14968 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14969 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14970 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14971 +
14972 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14973 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14974
14975 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14976 -
14977 - /* Add an Identity mapping if I am above 1G */
14978 - leaq _text(%rip), %rdi
14979 - andq $PMD_PAGE_MASK, %rdi
14980 -
14981 - movq %rdi, %rax
14982 - shrq $PUD_SHIFT, %rax
14983 - andq $(PTRS_PER_PUD - 1), %rax
14984 - jz ident_complete
14985 -
14986 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14987 - leaq level3_ident_pgt(%rip), %rbx
14988 - movq %rdx, 0(%rbx, %rax, 8)
14989 -
14990 - movq %rdi, %rax
14991 - shrq $PMD_SHIFT, %rax
14992 - andq $(PTRS_PER_PMD - 1), %rax
14993 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14994 - leaq level2_spare_pgt(%rip), %rbx
14995 - movq %rdx, 0(%rbx, %rax, 8)
14996 -ident_complete:
14997 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14998
14999 /*
15000 * Fixup the kernel text+data virtual addresses. Note that
15001 @@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
15002 * after the boot processor executes this code.
15003 */
15004
15005 - /* Enable PAE mode and PGE */
15006 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
15007 + /* Enable PAE mode and PSE/PGE */
15008 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15009 movq %rax, %cr4
15010
15011 /* Setup early boot stage 4 level pagetables. */
15012 @@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
15013 movl $MSR_EFER, %ecx
15014 rdmsr
15015 btsl $_EFER_SCE, %eax /* Enable System Call */
15016 - btl $20,%edi /* No Execute supported? */
15017 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
15018 jnc 1f
15019 btsl $_EFER_NX, %eax
15020 + leaq init_level4_pgt(%rip), %rdi
15021 +#ifndef CONFIG_EFI
15022 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15023 +#endif
15024 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15025 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
15026 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15027 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
15028 1: wrmsr /* Make changes effective */
15029
15030 /* Setup cr0 */
15031 @@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
15032 * jump. In addition we need to ensure %cs is set so we make this
15033 * a far return.
15034 */
15035 + pax_set_fptr_mask
15036 movq initial_code(%rip),%rax
15037 pushq $0 # fake return address to stop unwinder
15038 pushq $__KERNEL_CS # set correct cs
15039 @@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
15040 bad_address:
15041 jmp bad_address
15042
15043 - .section ".init.text","ax"
15044 + __INIT
15045 #ifdef CONFIG_EARLY_PRINTK
15046 .globl early_idt_handlers
15047 early_idt_handlers:
15048 @@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
15049 #endif /* EARLY_PRINTK */
15050 1: hlt
15051 jmp 1b
15052 + .previous
15053
15054 #ifdef CONFIG_EARLY_PRINTK
15055 + __INITDATA
15056 early_recursion_flag:
15057 .long 0
15058 + .previous
15059
15060 + .section .rodata,"a",@progbits
15061 early_idt_msg:
15062 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15063 early_idt_ripmsg:
15064 .asciz "RIP %s\n"
15065 + .previous
15066 #endif /* CONFIG_EARLY_PRINTK */
15067 - .previous
15068
15069 + .section .rodata,"a",@progbits
15070 #define NEXT_PAGE(name) \
15071 .balign PAGE_SIZE; \
15072 ENTRY(name)
15073 @@ -338,7 +348,6 @@ ENTRY(name)
15074 i = i + 1 ; \
15075 .endr
15076
15077 - .data
15078 /*
15079 * This default setting generates an ident mapping at address 0x100000
15080 * and a mapping for the kernel that precisely maps virtual address
15081 @@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
15082 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15083 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15084 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15085 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
15086 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
15087 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
15088 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
15089 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15090 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15091 .org init_level4_pgt + L4_START_KERNEL*8, 0
15092 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15093 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15094
15095 +#ifdef CONFIG_PAX_PER_CPU_PGD
15096 +NEXT_PAGE(cpu_pgd)
15097 + .rept NR_CPUS
15098 + .fill 512,8,0
15099 + .endr
15100 +#endif
15101 +
15102 NEXT_PAGE(level3_ident_pgt)
15103 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15104 +#ifdef CONFIG_XEN
15105 .fill 511,8,0
15106 +#else
15107 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15108 + .fill 510,8,0
15109 +#endif
15110 +
15111 +NEXT_PAGE(level3_vmalloc_start_pgt)
15112 + .fill 512,8,0
15113 +
15114 +NEXT_PAGE(level3_vmalloc_end_pgt)
15115 + .fill 512,8,0
15116 +
15117 +NEXT_PAGE(level3_vmemmap_pgt)
15118 + .fill L3_VMEMMAP_START,8,0
15119 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15120
15121 NEXT_PAGE(level3_kernel_pgt)
15122 .fill L3_START_KERNEL,8,0
15123 @@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
15124 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15125 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15126
15127 +NEXT_PAGE(level2_vmemmap_pgt)
15128 + .fill 512,8,0
15129 +
15130 NEXT_PAGE(level2_fixmap_pgt)
15131 - .fill 506,8,0
15132 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15133 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15134 - .fill 5,8,0
15135 + .fill 507,8,0
15136 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15137 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15138 + .fill 4,8,0
15139
15140 -NEXT_PAGE(level1_fixmap_pgt)
15141 +NEXT_PAGE(level1_vsyscall_pgt)
15142 .fill 512,8,0
15143
15144 -NEXT_PAGE(level2_ident_pgt)
15145 - /* Since I easily can, map the first 1G.
15146 + /* Since I easily can, map the first 2G.
15147 * Don't set NX because code runs from these pages.
15148 */
15149 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15150 +NEXT_PAGE(level2_ident_pgt)
15151 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15152
15153 NEXT_PAGE(level2_kernel_pgt)
15154 /*
15155 @@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt)
15156 * If you want to increase this then increase MODULES_VADDR
15157 * too.)
15158 */
15159 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15160 - KERNEL_IMAGE_SIZE/PMD_SIZE)
15161 -
15162 -NEXT_PAGE(level2_spare_pgt)
15163 - .fill 512, 8, 0
15164 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15165
15166 #undef PMDS
15167 #undef NEXT_PAGE
15168
15169 - .data
15170 + .align PAGE_SIZE
15171 +ENTRY(cpu_gdt_table)
15172 + .rept NR_CPUS
15173 + .quad 0x0000000000000000 /* NULL descriptor */
15174 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15175 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
15176 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
15177 + .quad 0x00cffb000000ffff /* __USER32_CS */
15178 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15179 + .quad 0x00affb000000ffff /* __USER_CS */
15180 +
15181 +#ifdef CONFIG_PAX_KERNEXEC
15182 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15183 +#else
15184 + .quad 0x0 /* unused */
15185 +#endif
15186 +
15187 + .quad 0,0 /* TSS */
15188 + .quad 0,0 /* LDT */
15189 + .quad 0,0,0 /* three TLS descriptors */
15190 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
15191 + /* asm/segment.h:GDT_ENTRIES must match this */
15192 +
15193 + /* zero the remaining page */
15194 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15195 + .endr
15196 +
15197 .align 16
15198 .globl early_gdt_descr
15199 early_gdt_descr:
15200 .word GDT_ENTRIES*8-1
15201 early_gdt_descr_base:
15202 - .quad INIT_PER_CPU_VAR(gdt_page)
15203 + .quad cpu_gdt_table
15204
15205 ENTRY(phys_base)
15206 /* This must match the first entry in level2_kernel_pgt */
15207 .quad 0x0000000000000000
15208
15209 #include "../../x86/xen/xen-head.S"
15210 -
15211 - .section .bss, "aw", @nobits
15212 +
15213 + .section .rodata,"a",@progbits
15214 .align L1_CACHE_BYTES
15215 ENTRY(idt_table)
15216 - .skip IDT_ENTRIES * 16
15217 + .fill 512,8,0
15218
15219 __PAGE_ALIGNED_BSS
15220 .align PAGE_SIZE
15221 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
15222 index 9c3bd4a..e1d9b35 100644
15223 --- a/arch/x86/kernel/i386_ksyms_32.c
15224 +++ b/arch/x86/kernel/i386_ksyms_32.c
15225 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15226 EXPORT_SYMBOL(cmpxchg8b_emu);
15227 #endif
15228
15229 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
15230 +
15231 /* Networking helper routines. */
15232 EXPORT_SYMBOL(csum_partial_copy_generic);
15233 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15234 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15235
15236 EXPORT_SYMBOL(__get_user_1);
15237 EXPORT_SYMBOL(__get_user_2);
15238 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15239
15240 EXPORT_SYMBOL(csum_partial);
15241 EXPORT_SYMBOL(empty_zero_page);
15242 +
15243 +#ifdef CONFIG_PAX_KERNEXEC
15244 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15245 +#endif
15246 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
15247 index 6104852..6114160 100644
15248 --- a/arch/x86/kernel/i8259.c
15249 +++ b/arch/x86/kernel/i8259.c
15250 @@ -210,7 +210,7 @@ spurious_8259A_irq:
15251 "spurious 8259A interrupt: IRQ%d.\n", irq);
15252 spurious_irq_mask |= irqmask;
15253 }
15254 - atomic_inc(&irq_err_count);
15255 + atomic_inc_unchecked(&irq_err_count);
15256 /*
15257 * Theoretically we do not have to handle this IRQ,
15258 * but in Linux this does not cause problems and is
15259 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
15260 index 43e9ccf..44ccf6f 100644
15261 --- a/arch/x86/kernel/init_task.c
15262 +++ b/arch/x86/kernel/init_task.c
15263 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15264 * way process stacks are handled. This is done by having a special
15265 * "init_task" linker map entry..
15266 */
15267 -union thread_union init_thread_union __init_task_data =
15268 - { INIT_THREAD_INFO(init_task) };
15269 +union thread_union init_thread_union __init_task_data;
15270
15271 /*
15272 * Initial task structure.
15273 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15274 * section. Since TSS's are completely CPU-local, we want them
15275 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15276 */
15277 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15278 -
15279 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15280 +EXPORT_SYMBOL(init_tss);
15281 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
15282 index 8c96897..be66bfa 100644
15283 --- a/arch/x86/kernel/ioport.c
15284 +++ b/arch/x86/kernel/ioport.c
15285 @@ -6,6 +6,7 @@
15286 #include <linux/sched.h>
15287 #include <linux/kernel.h>
15288 #include <linux/capability.h>
15289 +#include <linux/security.h>
15290 #include <linux/errno.h>
15291 #include <linux/types.h>
15292 #include <linux/ioport.h>
15293 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15294
15295 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15296 return -EINVAL;
15297 +#ifdef CONFIG_GRKERNSEC_IO
15298 + if (turn_on && grsec_disable_privio) {
15299 + gr_handle_ioperm();
15300 + return -EPERM;
15301 + }
15302 +#endif
15303 if (turn_on && !capable(CAP_SYS_RAWIO))
15304 return -EPERM;
15305
15306 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15307 * because the ->io_bitmap_max value must match the bitmap
15308 * contents:
15309 */
15310 - tss = &per_cpu(init_tss, get_cpu());
15311 + tss = init_tss + get_cpu();
15312
15313 if (turn_on)
15314 bitmap_clear(t->io_bitmap_ptr, from, num);
15315 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
15316 return -EINVAL;
15317 /* Trying to gain more privileges? */
15318 if (level > old) {
15319 +#ifdef CONFIG_GRKERNSEC_IO
15320 + if (grsec_disable_privio) {
15321 + gr_handle_iopl();
15322 + return -EPERM;
15323 + }
15324 +#endif
15325 if (!capable(CAP_SYS_RAWIO))
15326 return -EPERM;
15327 }
15328 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
15329 index 6c0802e..bea25ae 100644
15330 --- a/arch/x86/kernel/irq.c
15331 +++ b/arch/x86/kernel/irq.c
15332 @@ -17,7 +17,7 @@
15333 #include <asm/mce.h>
15334 #include <asm/hw_irq.h>
15335
15336 -atomic_t irq_err_count;
15337 +atomic_unchecked_t irq_err_count;
15338
15339 /* Function pointer for generic interrupt vector handling */
15340 void (*x86_platform_ipi_callback)(void) = NULL;
15341 @@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
15342 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15343 seq_printf(p, " Machine check polls\n");
15344 #endif
15345 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15346 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15347 #if defined(CONFIG_X86_IO_APIC)
15348 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15349 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15350 #endif
15351 return 0;
15352 }
15353 @@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15354
15355 u64 arch_irq_stat(void)
15356 {
15357 - u64 sum = atomic_read(&irq_err_count);
15358 + u64 sum = atomic_read_unchecked(&irq_err_count);
15359
15360 #ifdef CONFIG_X86_IO_APIC
15361 - sum += atomic_read(&irq_mis_count);
15362 + sum += atomic_read_unchecked(&irq_mis_count);
15363 #endif
15364 return sum;
15365 }
15366 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
15367 index 7209070..cbcd71a 100644
15368 --- a/arch/x86/kernel/irq_32.c
15369 +++ b/arch/x86/kernel/irq_32.c
15370 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
15371 __asm__ __volatile__("andl %%esp,%0" :
15372 "=r" (sp) : "0" (THREAD_SIZE - 1));
15373
15374 - return sp < (sizeof(struct thread_info) + STACK_WARN);
15375 + return sp < STACK_WARN;
15376 }
15377
15378 static void print_stack_overflow(void)
15379 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
15380 * per-CPU IRQ handling contexts (thread information and stack)
15381 */
15382 union irq_ctx {
15383 - struct thread_info tinfo;
15384 - u32 stack[THREAD_SIZE/sizeof(u32)];
15385 + unsigned long previous_esp;
15386 + u32 stack[THREAD_SIZE/sizeof(u32)];
15387 } __attribute__((aligned(THREAD_SIZE)));
15388
15389 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15390 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
15391 static inline int
15392 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15393 {
15394 - union irq_ctx *curctx, *irqctx;
15395 + union irq_ctx *irqctx;
15396 u32 *isp, arg1, arg2;
15397
15398 - curctx = (union irq_ctx *) current_thread_info();
15399 irqctx = __this_cpu_read(hardirq_ctx);
15400
15401 /*
15402 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15403 * handler) we can't do that and just have to keep using the
15404 * current stack (which is the irq stack already after all)
15405 */
15406 - if (unlikely(curctx == irqctx))
15407 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15408 return 0;
15409
15410 /* build the stack frame on the IRQ stack */
15411 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15412 - irqctx->tinfo.task = curctx->tinfo.task;
15413 - irqctx->tinfo.previous_esp = current_stack_pointer;
15414 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15415 + irqctx->previous_esp = current_stack_pointer;
15416
15417 - /*
15418 - * Copy the softirq bits in preempt_count so that the
15419 - * softirq checks work in the hardirq context.
15420 - */
15421 - irqctx->tinfo.preempt_count =
15422 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15423 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15424 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15425 + __set_fs(MAKE_MM_SEG(0));
15426 +#endif
15427
15428 if (unlikely(overflow))
15429 call_on_stack(print_stack_overflow, isp);
15430 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15431 : "0" (irq), "1" (desc), "2" (isp),
15432 "D" (desc->handle_irq)
15433 : "memory", "cc", "ecx");
15434 +
15435 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15436 + __set_fs(current_thread_info()->addr_limit);
15437 +#endif
15438 +
15439 return 1;
15440 }
15441
15442 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15443 */
15444 void __cpuinit irq_ctx_init(int cpu)
15445 {
15446 - union irq_ctx *irqctx;
15447 -
15448 if (per_cpu(hardirq_ctx, cpu))
15449 return;
15450
15451 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15452 - THREAD_FLAGS,
15453 - THREAD_ORDER));
15454 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15455 - irqctx->tinfo.cpu = cpu;
15456 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15457 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15458 -
15459 - per_cpu(hardirq_ctx, cpu) = irqctx;
15460 -
15461 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15462 - THREAD_FLAGS,
15463 - THREAD_ORDER));
15464 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15465 - irqctx->tinfo.cpu = cpu;
15466 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15467 -
15468 - per_cpu(softirq_ctx, cpu) = irqctx;
15469 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15470 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15471
15472 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15473 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15474 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
15475 asmlinkage void do_softirq(void)
15476 {
15477 unsigned long flags;
15478 - struct thread_info *curctx;
15479 union irq_ctx *irqctx;
15480 u32 *isp;
15481
15482 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
15483 local_irq_save(flags);
15484
15485 if (local_softirq_pending()) {
15486 - curctx = current_thread_info();
15487 irqctx = __this_cpu_read(softirq_ctx);
15488 - irqctx->tinfo.task = curctx->task;
15489 - irqctx->tinfo.previous_esp = current_stack_pointer;
15490 + irqctx->previous_esp = current_stack_pointer;
15491
15492 /* build the stack frame on the softirq stack */
15493 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15494 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15495 +
15496 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15497 + __set_fs(MAKE_MM_SEG(0));
15498 +#endif
15499
15500 call_on_stack(__do_softirq, isp);
15501 +
15502 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15503 + __set_fs(current_thread_info()->addr_limit);
15504 +#endif
15505 +
15506 /*
15507 * Shouldn't happen, we returned above if in_interrupt():
15508 */
15509 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
15510 index 00354d4..187ae44 100644
15511 --- a/arch/x86/kernel/kgdb.c
15512 +++ b/arch/x86/kernel/kgdb.c
15513 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
15514 #ifdef CONFIG_X86_32
15515 switch (regno) {
15516 case GDB_SS:
15517 - if (!user_mode_vm(regs))
15518 + if (!user_mode(regs))
15519 *(unsigned long *)mem = __KERNEL_DS;
15520 break;
15521 case GDB_SP:
15522 - if (!user_mode_vm(regs))
15523 + if (!user_mode(regs))
15524 *(unsigned long *)mem = kernel_stack_pointer(regs);
15525 break;
15526 case GDB_GS:
15527 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
15528 case 'k':
15529 /* clear the trace bit */
15530 linux_regs->flags &= ~X86_EFLAGS_TF;
15531 - atomic_set(&kgdb_cpu_doing_single_step, -1);
15532 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15533
15534 /* set the trace bit if we're stepping */
15535 if (remcomInBuffer[0] == 's') {
15536 linux_regs->flags |= X86_EFLAGS_TF;
15537 - atomic_set(&kgdb_cpu_doing_single_step,
15538 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15539 raw_smp_processor_id());
15540 }
15541
15542 @@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
15543 return NOTIFY_DONE;
15544
15545 case DIE_DEBUG:
15546 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
15547 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
15548 if (user_mode(regs))
15549 return single_step_cont(regs, args);
15550 break;
15551 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
15552 index 794bc95..c6e29e9 100644
15553 --- a/arch/x86/kernel/kprobes.c
15554 +++ b/arch/x86/kernel/kprobes.c
15555 @@ -117,8 +117,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
15556 } __attribute__((packed)) *insn;
15557
15558 insn = (struct __arch_relative_insn *)from;
15559 +
15560 + pax_open_kernel();
15561 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
15562 insn->op = op;
15563 + pax_close_kernel();
15564 }
15565
15566 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
15567 @@ -155,7 +158,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
15568 kprobe_opcode_t opcode;
15569 kprobe_opcode_t *orig_opcodes = opcodes;
15570
15571 - if (search_exception_tables((unsigned long)opcodes))
15572 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15573 return 0; /* Page fault may occur on this address. */
15574
15575 retry:
15576 @@ -316,7 +319,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15577 }
15578 }
15579 insn_get_length(&insn);
15580 + pax_open_kernel();
15581 memcpy(dest, insn.kaddr, insn.length);
15582 + pax_close_kernel();
15583
15584 #ifdef CONFIG_X86_64
15585 if (insn_rip_relative(&insn)) {
15586 @@ -340,7 +345,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15587 (u8 *) dest;
15588 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
15589 disp = (u8 *) dest + insn_offset_displacement(&insn);
15590 + pax_open_kernel();
15591 *(s32 *) disp = (s32) newdisp;
15592 + pax_close_kernel();
15593 }
15594 #endif
15595 return insn.length;
15596 @@ -354,12 +361,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
15597 */
15598 __copy_instruction(p->ainsn.insn, p->addr, 0);
15599
15600 - if (can_boost(p->addr))
15601 + if (can_boost(ktla_ktva(p->addr)))
15602 p->ainsn.boostable = 0;
15603 else
15604 p->ainsn.boostable = -1;
15605
15606 - p->opcode = *p->addr;
15607 + p->opcode = *(ktla_ktva(p->addr));
15608 }
15609
15610 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15611 @@ -476,7 +483,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15612 * nor set current_kprobe, because it doesn't use single
15613 * stepping.
15614 */
15615 - regs->ip = (unsigned long)p->ainsn.insn;
15616 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15617 preempt_enable_no_resched();
15618 return;
15619 }
15620 @@ -495,7 +502,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15621 if (p->opcode == BREAKPOINT_INSTRUCTION)
15622 regs->ip = (unsigned long)p->addr;
15623 else
15624 - regs->ip = (unsigned long)p->ainsn.insn;
15625 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15626 }
15627
15628 /*
15629 @@ -574,7 +581,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
15630 setup_singlestep(p, regs, kcb, 0);
15631 return 1;
15632 }
15633 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
15634 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15635 /*
15636 * The breakpoint instruction was removed right
15637 * after we hit it. Another cpu has removed
15638 @@ -682,6 +689,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
15639 " movq %rax, 152(%rsp)\n"
15640 RESTORE_REGS_STRING
15641 " popfq\n"
15642 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
15643 + " btsq $63,(%rsp)\n"
15644 +#endif
15645 #else
15646 " pushf\n"
15647 SAVE_REGS_STRING
15648 @@ -819,7 +829,7 @@ static void __kprobes resume_execution(struct kprobe *p,
15649 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15650 {
15651 unsigned long *tos = stack_addr(regs);
15652 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15653 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15654 unsigned long orig_ip = (unsigned long)p->addr;
15655 kprobe_opcode_t *insn = p->ainsn.insn;
15656
15657 @@ -1001,7 +1011,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
15658 struct die_args *args = data;
15659 int ret = NOTIFY_DONE;
15660
15661 - if (args->regs && user_mode_vm(args->regs))
15662 + if (args->regs && user_mode(args->regs))
15663 return ret;
15664
15665 switch (val) {
15666 @@ -1383,7 +1393,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15667 * Verify if the address gap is in 2GB range, because this uses
15668 * a relative jump.
15669 */
15670 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
15671 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
15672 if (abs(rel) > 0x7fffffff)
15673 return -ERANGE;
15674
15675 @@ -1404,11 +1414,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15676 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
15677
15678 /* Set probe function call */
15679 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
15680 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
15681
15682 /* Set returning jmp instruction at the tail of out-of-line buffer */
15683 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
15684 - (u8 *)op->kp.addr + op->optinsn.size);
15685 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
15686
15687 flush_icache_range((unsigned long) buf,
15688 (unsigned long) buf + TMPL_END_IDX +
15689 @@ -1430,7 +1440,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
15690 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
15691
15692 /* Backup instructions which will be replaced by jump address */
15693 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
15694 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
15695 RELATIVE_ADDR_SIZE);
15696
15697 insn_buf[0] = RELATIVEJUMP_OPCODE;
15698 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
15699 index a9c2116..a52d4fc 100644
15700 --- a/arch/x86/kernel/kvm.c
15701 +++ b/arch/x86/kernel/kvm.c
15702 @@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
15703 pv_mmu_ops.set_pud = kvm_set_pud;
15704 #if PAGETABLE_LEVELS == 4
15705 pv_mmu_ops.set_pgd = kvm_set_pgd;
15706 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
15707 #endif
15708 #endif
15709 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
15710 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
15711 index ea69726..604d066 100644
15712 --- a/arch/x86/kernel/ldt.c
15713 +++ b/arch/x86/kernel/ldt.c
15714 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
15715 if (reload) {
15716 #ifdef CONFIG_SMP
15717 preempt_disable();
15718 - load_LDT(pc);
15719 + load_LDT_nolock(pc);
15720 if (!cpumask_equal(mm_cpumask(current->mm),
15721 cpumask_of(smp_processor_id())))
15722 smp_call_function(flush_ldt, current->mm, 1);
15723 preempt_enable();
15724 #else
15725 - load_LDT(pc);
15726 + load_LDT_nolock(pc);
15727 #endif
15728 }
15729 if (oldsize) {
15730 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
15731 return err;
15732
15733 for (i = 0; i < old->size; i++)
15734 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15735 + write_ldt_entry(new->ldt, i, old->ldt + i);
15736 return 0;
15737 }
15738
15739 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
15740 retval = copy_ldt(&mm->context, &old_mm->context);
15741 mutex_unlock(&old_mm->context.lock);
15742 }
15743 +
15744 + if (tsk == current) {
15745 + mm->context.vdso = 0;
15746 +
15747 +#ifdef CONFIG_X86_32
15748 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15749 + mm->context.user_cs_base = 0UL;
15750 + mm->context.user_cs_limit = ~0UL;
15751 +
15752 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15753 + cpus_clear(mm->context.cpu_user_cs_mask);
15754 +#endif
15755 +
15756 +#endif
15757 +#endif
15758 +
15759 + }
15760 +
15761 return retval;
15762 }
15763
15764 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
15765 }
15766 }
15767
15768 +#ifdef CONFIG_PAX_SEGMEXEC
15769 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15770 + error = -EINVAL;
15771 + goto out_unlock;
15772 + }
15773 +#endif
15774 +
15775 fill_ldt(&ldt, &ldt_info);
15776 if (oldmode)
15777 ldt.avl = 0;
15778 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
15779 index a3fa43b..8966f4c 100644
15780 --- a/arch/x86/kernel/machine_kexec_32.c
15781 +++ b/arch/x86/kernel/machine_kexec_32.c
15782 @@ -27,7 +27,7 @@
15783 #include <asm/cacheflush.h>
15784 #include <asm/debugreg.h>
15785
15786 -static void set_idt(void *newidt, __u16 limit)
15787 +static void set_idt(struct desc_struct *newidt, __u16 limit)
15788 {
15789 struct desc_ptr curidt;
15790
15791 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
15792 }
15793
15794
15795 -static void set_gdt(void *newgdt, __u16 limit)
15796 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15797 {
15798 struct desc_ptr curgdt;
15799
15800 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15801 }
15802
15803 control_page = page_address(image->control_code_page);
15804 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15805 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15806
15807 relocate_kernel_ptr = control_page;
15808 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15809 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
15810 index 1a1b606..5c89b55 100644
15811 --- a/arch/x86/kernel/microcode_intel.c
15812 +++ b/arch/x86/kernel/microcode_intel.c
15813 @@ -440,13 +440,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
15814
15815 static int get_ucode_user(void *to, const void *from, size_t n)
15816 {
15817 - return copy_from_user(to, from, n);
15818 + return copy_from_user(to, (const void __force_user *)from, n);
15819 }
15820
15821 static enum ucode_state
15822 request_microcode_user(int cpu, const void __user *buf, size_t size)
15823 {
15824 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15825 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
15826 }
15827
15828 static void microcode_fini_cpu(int cpu)
15829 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
15830 index 925179f..85bec6c 100644
15831 --- a/arch/x86/kernel/module.c
15832 +++ b/arch/x86/kernel/module.c
15833 @@ -36,15 +36,60 @@
15834 #define DEBUGP(fmt...)
15835 #endif
15836
15837 -void *module_alloc(unsigned long size)
15838 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
15839 {
15840 if (PAGE_ALIGN(size) > MODULES_LEN)
15841 return NULL;
15842 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
15843 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
15844 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
15845 -1, __builtin_return_address(0));
15846 }
15847
15848 +void *module_alloc(unsigned long size)
15849 +{
15850 +
15851 +#ifdef CONFIG_PAX_KERNEXEC
15852 + return __module_alloc(size, PAGE_KERNEL);
15853 +#else
15854 + return __module_alloc(size, PAGE_KERNEL_EXEC);
15855 +#endif
15856 +
15857 +}
15858 +
15859 +#ifdef CONFIG_PAX_KERNEXEC
15860 +#ifdef CONFIG_X86_32
15861 +void *module_alloc_exec(unsigned long size)
15862 +{
15863 + struct vm_struct *area;
15864 +
15865 + if (size == 0)
15866 + return NULL;
15867 +
15868 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15869 + return area ? area->addr : NULL;
15870 +}
15871 +EXPORT_SYMBOL(module_alloc_exec);
15872 +
15873 +void module_free_exec(struct module *mod, void *module_region)
15874 +{
15875 + vunmap(module_region);
15876 +}
15877 +EXPORT_SYMBOL(module_free_exec);
15878 +#else
15879 +void module_free_exec(struct module *mod, void *module_region)
15880 +{
15881 + module_free(mod, module_region);
15882 +}
15883 +EXPORT_SYMBOL(module_free_exec);
15884 +
15885 +void *module_alloc_exec(unsigned long size)
15886 +{
15887 + return __module_alloc(size, PAGE_KERNEL_RX);
15888 +}
15889 +EXPORT_SYMBOL(module_alloc_exec);
15890 +#endif
15891 +#endif
15892 +
15893 #ifdef CONFIG_X86_32
15894 int apply_relocate(Elf32_Shdr *sechdrs,
15895 const char *strtab,
15896 @@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15897 unsigned int i;
15898 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15899 Elf32_Sym *sym;
15900 - uint32_t *location;
15901 + uint32_t *plocation, location;
15902
15903 DEBUGP("Applying relocate section %u to %u\n", relsec,
15904 sechdrs[relsec].sh_info);
15905 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15906 /* This is where to make the change */
15907 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15908 - + rel[i].r_offset;
15909 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15910 + location = (uint32_t)plocation;
15911 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15912 + plocation = ktla_ktva((void *)plocation);
15913 /* This is the symbol it is referring to. Note that all
15914 undefined symbols have been resolved. */
15915 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15916 @@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15917 switch (ELF32_R_TYPE(rel[i].r_info)) {
15918 case R_386_32:
15919 /* We add the value into the location given */
15920 - *location += sym->st_value;
15921 + pax_open_kernel();
15922 + *plocation += sym->st_value;
15923 + pax_close_kernel();
15924 break;
15925 case R_386_PC32:
15926 /* Add the value, subtract its postition */
15927 - *location += sym->st_value - (uint32_t)location;
15928 + pax_open_kernel();
15929 + *plocation += sym->st_value - location;
15930 + pax_close_kernel();
15931 break;
15932 default:
15933 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15934 @@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
15935 case R_X86_64_NONE:
15936 break;
15937 case R_X86_64_64:
15938 + pax_open_kernel();
15939 *(u64 *)loc = val;
15940 + pax_close_kernel();
15941 break;
15942 case R_X86_64_32:
15943 + pax_open_kernel();
15944 *(u32 *)loc = val;
15945 + pax_close_kernel();
15946 if (val != *(u32 *)loc)
15947 goto overflow;
15948 break;
15949 case R_X86_64_32S:
15950 + pax_open_kernel();
15951 *(s32 *)loc = val;
15952 + pax_close_kernel();
15953 if ((s64)val != *(s32 *)loc)
15954 goto overflow;
15955 break;
15956 case R_X86_64_PC32:
15957 val -= (u64)loc;
15958 + pax_open_kernel();
15959 *(u32 *)loc = val;
15960 + pax_close_kernel();
15961 +
15962 #if 0
15963 if ((s64)val != *(s32 *)loc)
15964 goto overflow;
15965 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
15966 index 676b8c7..870ba04 100644
15967 --- a/arch/x86/kernel/paravirt-spinlocks.c
15968 +++ b/arch/x86/kernel/paravirt-spinlocks.c
15969 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
15970 arch_spin_lock(lock);
15971 }
15972
15973 -struct pv_lock_ops pv_lock_ops = {
15974 +struct pv_lock_ops pv_lock_ops __read_only = {
15975 #ifdef CONFIG_SMP
15976 .spin_is_locked = __ticket_spin_is_locked,
15977 .spin_is_contended = __ticket_spin_is_contended,
15978 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
15979 index d90272e..2d54e8e 100644
15980 --- a/arch/x86/kernel/paravirt.c
15981 +++ b/arch/x86/kernel/paravirt.c
15982 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
15983 {
15984 return x;
15985 }
15986 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15987 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
15988 +#endif
15989
15990 void __init default_banner(void)
15991 {
15992 @@ -133,6 +136,9 @@ static void *get_call_destination(u8 type)
15993 .pv_lock_ops = pv_lock_ops,
15994 #endif
15995 };
15996 +
15997 + pax_track_stack();
15998 +
15999 return *((void **)&tmpl + type);
16000 }
16001
16002 @@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
16003 if (opfunc == NULL)
16004 /* If there's no function, patch it with a ud2a (BUG) */
16005 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
16006 - else if (opfunc == _paravirt_nop)
16007 + else if (opfunc == (void *)_paravirt_nop)
16008 /* If the operation is a nop, then nop the callsite */
16009 ret = paravirt_patch_nop();
16010
16011 /* identity functions just return their single argument */
16012 - else if (opfunc == _paravirt_ident_32)
16013 + else if (opfunc == (void *)_paravirt_ident_32)
16014 ret = paravirt_patch_ident_32(insnbuf, len);
16015 - else if (opfunc == _paravirt_ident_64)
16016 + else if (opfunc == (void *)_paravirt_ident_64)
16017 ret = paravirt_patch_ident_64(insnbuf, len);
16018 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16019 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16020 + ret = paravirt_patch_ident_64(insnbuf, len);
16021 +#endif
16022
16023 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16024 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16025 @@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
16026 if (insn_len > len || start == NULL)
16027 insn_len = len;
16028 else
16029 - memcpy(insnbuf, start, insn_len);
16030 + memcpy(insnbuf, ktla_ktva(start), insn_len);
16031
16032 return insn_len;
16033 }
16034 @@ -302,7 +312,7 @@ void arch_flush_lazy_mmu_mode(void)
16035 preempt_enable();
16036 }
16037
16038 -struct pv_info pv_info = {
16039 +struct pv_info pv_info __read_only = {
16040 .name = "bare hardware",
16041 .paravirt_enabled = 0,
16042 .kernel_rpl = 0,
16043 @@ -313,16 +323,16 @@ struct pv_info pv_info = {
16044 #endif
16045 };
16046
16047 -struct pv_init_ops pv_init_ops = {
16048 +struct pv_init_ops pv_init_ops __read_only = {
16049 .patch = native_patch,
16050 };
16051
16052 -struct pv_time_ops pv_time_ops = {
16053 +struct pv_time_ops pv_time_ops __read_only = {
16054 .sched_clock = native_sched_clock,
16055 .steal_clock = native_steal_clock,
16056 };
16057
16058 -struct pv_irq_ops pv_irq_ops = {
16059 +struct pv_irq_ops pv_irq_ops __read_only = {
16060 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16061 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16062 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16063 @@ -334,7 +344,7 @@ struct pv_irq_ops pv_irq_ops = {
16064 #endif
16065 };
16066
16067 -struct pv_cpu_ops pv_cpu_ops = {
16068 +struct pv_cpu_ops pv_cpu_ops __read_only = {
16069 .cpuid = native_cpuid,
16070 .get_debugreg = native_get_debugreg,
16071 .set_debugreg = native_set_debugreg,
16072 @@ -395,21 +405,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16073 .end_context_switch = paravirt_nop,
16074 };
16075
16076 -struct pv_apic_ops pv_apic_ops = {
16077 +struct pv_apic_ops pv_apic_ops __read_only = {
16078 #ifdef CONFIG_X86_LOCAL_APIC
16079 .startup_ipi_hook = paravirt_nop,
16080 #endif
16081 };
16082
16083 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16084 +#ifdef CONFIG_X86_32
16085 +#ifdef CONFIG_X86_PAE
16086 +/* 64-bit pagetable entries */
16087 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16088 +#else
16089 /* 32-bit pagetable entries */
16090 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16091 +#endif
16092 #else
16093 /* 64-bit pagetable entries */
16094 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16095 #endif
16096
16097 -struct pv_mmu_ops pv_mmu_ops = {
16098 +struct pv_mmu_ops pv_mmu_ops __read_only = {
16099
16100 .read_cr2 = native_read_cr2,
16101 .write_cr2 = native_write_cr2,
16102 @@ -459,6 +474,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16103 .make_pud = PTE_IDENT,
16104
16105 .set_pgd = native_set_pgd,
16106 + .set_pgd_batched = native_set_pgd_batched,
16107 #endif
16108 #endif /* PAGETABLE_LEVELS >= 3 */
16109
16110 @@ -478,6 +494,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16111 },
16112
16113 .set_fixmap = native_set_fixmap,
16114 +
16115 +#ifdef CONFIG_PAX_KERNEXEC
16116 + .pax_open_kernel = native_pax_open_kernel,
16117 + .pax_close_kernel = native_pax_close_kernel,
16118 +#endif
16119 +
16120 };
16121
16122 EXPORT_SYMBOL_GPL(pv_time_ops);
16123 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
16124 index 35ccf75..67e7d4d 100644
16125 --- a/arch/x86/kernel/pci-iommu_table.c
16126 +++ b/arch/x86/kernel/pci-iommu_table.c
16127 @@ -2,7 +2,7 @@
16128 #include <asm/iommu_table.h>
16129 #include <linux/string.h>
16130 #include <linux/kallsyms.h>
16131 -
16132 +#include <linux/sched.h>
16133
16134 #define DEBUG 1
16135
16136 @@ -51,6 +51,8 @@ void __init check_iommu_entries(struct iommu_table_entry *start,
16137 {
16138 struct iommu_table_entry *p, *q, *x;
16139
16140 + pax_track_stack();
16141 +
16142 /* Simple cyclic dependency checker. */
16143 for (p = start; p < finish; p++) {
16144 q = find_dependents_of(start, finish, p);
16145 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
16146 index 30eb651..37fa2d7 100644
16147 --- a/arch/x86/kernel/process.c
16148 +++ b/arch/x86/kernel/process.c
16149 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
16150
16151 void free_thread_info(struct thread_info *ti)
16152 {
16153 - free_thread_xstate(ti->task);
16154 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
16155 }
16156
16157 +static struct kmem_cache *task_struct_cachep;
16158 +
16159 void arch_task_cache_init(void)
16160 {
16161 - task_xstate_cachep =
16162 - kmem_cache_create("task_xstate", xstate_size,
16163 + /* create a slab on which task_structs can be allocated */
16164 + task_struct_cachep =
16165 + kmem_cache_create("task_struct", sizeof(struct task_struct),
16166 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16167 +
16168 + task_xstate_cachep =
16169 + kmem_cache_create("task_xstate", xstate_size,
16170 __alignof__(union thread_xstate),
16171 - SLAB_PANIC | SLAB_NOTRACK, NULL);
16172 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16173 +}
16174 +
16175 +struct task_struct *alloc_task_struct_node(int node)
16176 +{
16177 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
16178 +}
16179 +
16180 +void free_task_struct(struct task_struct *task)
16181 +{
16182 + free_thread_xstate(task);
16183 + kmem_cache_free(task_struct_cachep, task);
16184 }
16185
16186 /*
16187 @@ -70,7 +87,7 @@ void exit_thread(void)
16188 unsigned long *bp = t->io_bitmap_ptr;
16189
16190 if (bp) {
16191 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16192 + struct tss_struct *tss = init_tss + get_cpu();
16193
16194 t->io_bitmap_ptr = NULL;
16195 clear_thread_flag(TIF_IO_BITMAP);
16196 @@ -106,7 +123,7 @@ void show_regs_common(void)
16197
16198 printk(KERN_CONT "\n");
16199 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
16200 - current->pid, current->comm, print_tainted(),
16201 + task_pid_nr(current), current->comm, print_tainted(),
16202 init_utsname()->release,
16203 (int)strcspn(init_utsname()->version, " "),
16204 init_utsname()->version);
16205 @@ -120,6 +137,9 @@ void flush_thread(void)
16206 {
16207 struct task_struct *tsk = current;
16208
16209 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16210 + loadsegment(gs, 0);
16211 +#endif
16212 flush_ptrace_hw_breakpoint(tsk);
16213 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
16214 /*
16215 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
16216 regs.di = (unsigned long) arg;
16217
16218 #ifdef CONFIG_X86_32
16219 - regs.ds = __USER_DS;
16220 - regs.es = __USER_DS;
16221 + regs.ds = __KERNEL_DS;
16222 + regs.es = __KERNEL_DS;
16223 regs.fs = __KERNEL_PERCPU;
16224 - regs.gs = __KERNEL_STACK_CANARY;
16225 + savesegment(gs, regs.gs);
16226 #else
16227 regs.ss = __KERNEL_DS;
16228 #endif
16229 @@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
16230
16231 return ret;
16232 }
16233 -void stop_this_cpu(void *dummy)
16234 +__noreturn void stop_this_cpu(void *dummy)
16235 {
16236 local_irq_disable();
16237 /*
16238 @@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
16239 }
16240 early_param("idle", idle_setup);
16241
16242 -unsigned long arch_align_stack(unsigned long sp)
16243 +#ifdef CONFIG_PAX_RANDKSTACK
16244 +void pax_randomize_kstack(struct pt_regs *regs)
16245 {
16246 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16247 - sp -= get_random_int() % 8192;
16248 - return sp & ~0xf;
16249 -}
16250 + struct thread_struct *thread = &current->thread;
16251 + unsigned long time;
16252
16253 -unsigned long arch_randomize_brk(struct mm_struct *mm)
16254 -{
16255 - unsigned long range_end = mm->brk + 0x02000000;
16256 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16257 -}
16258 + if (!randomize_va_space)
16259 + return;
16260 +
16261 + if (v8086_mode(regs))
16262 + return;
16263
16264 + rdtscl(time);
16265 +
16266 + /* P4 seems to return a 0 LSB, ignore it */
16267 +#ifdef CONFIG_MPENTIUM4
16268 + time &= 0x3EUL;
16269 + time <<= 2;
16270 +#elif defined(CONFIG_X86_64)
16271 + time &= 0xFUL;
16272 + time <<= 4;
16273 +#else
16274 + time &= 0x1FUL;
16275 + time <<= 3;
16276 +#endif
16277 +
16278 + thread->sp0 ^= time;
16279 + load_sp0(init_tss + smp_processor_id(), thread);
16280 +
16281 +#ifdef CONFIG_X86_64
16282 + percpu_write(kernel_stack, thread->sp0);
16283 +#endif
16284 +}
16285 +#endif
16286 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
16287 index 7a3b651..5a946f6 100644
16288 --- a/arch/x86/kernel/process_32.c
16289 +++ b/arch/x86/kernel/process_32.c
16290 @@ -66,6 +66,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
16291 unsigned long thread_saved_pc(struct task_struct *tsk)
16292 {
16293 return ((unsigned long *)tsk->thread.sp)[3];
16294 +//XXX return tsk->thread.eip;
16295 }
16296
16297 #ifndef CONFIG_SMP
16298 @@ -128,15 +129,14 @@ void __show_regs(struct pt_regs *regs, int all)
16299 unsigned long sp;
16300 unsigned short ss, gs;
16301
16302 - if (user_mode_vm(regs)) {
16303 + if (user_mode(regs)) {
16304 sp = regs->sp;
16305 ss = regs->ss & 0xffff;
16306 - gs = get_user_gs(regs);
16307 } else {
16308 sp = kernel_stack_pointer(regs);
16309 savesegment(ss, ss);
16310 - savesegment(gs, gs);
16311 }
16312 + gs = get_user_gs(regs);
16313
16314 show_regs_common();
16315
16316 @@ -198,13 +198,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16317 struct task_struct *tsk;
16318 int err;
16319
16320 - childregs = task_pt_regs(p);
16321 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16322 *childregs = *regs;
16323 childregs->ax = 0;
16324 childregs->sp = sp;
16325
16326 p->thread.sp = (unsigned long) childregs;
16327 p->thread.sp0 = (unsigned long) (childregs+1);
16328 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16329
16330 p->thread.ip = (unsigned long) ret_from_fork;
16331
16332 @@ -294,7 +295,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16333 struct thread_struct *prev = &prev_p->thread,
16334 *next = &next_p->thread;
16335 int cpu = smp_processor_id();
16336 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16337 + struct tss_struct *tss = init_tss + cpu;
16338 bool preload_fpu;
16339
16340 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16341 @@ -329,6 +330,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16342 */
16343 lazy_save_gs(prev->gs);
16344
16345 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16346 + __set_fs(task_thread_info(next_p)->addr_limit);
16347 +#endif
16348 +
16349 /*
16350 * Load the per-thread Thread-Local Storage descriptor.
16351 */
16352 @@ -364,6 +369,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16353 */
16354 arch_end_context_switch(next_p);
16355
16356 + percpu_write(current_task, next_p);
16357 + percpu_write(current_tinfo, &next_p->tinfo);
16358 +
16359 if (preload_fpu)
16360 __math_state_restore();
16361
16362 @@ -373,8 +381,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16363 if (prev->gs | next->gs)
16364 lazy_load_gs(next->gs);
16365
16366 - percpu_write(current_task, next_p);
16367 -
16368 return prev_p;
16369 }
16370
16371 @@ -404,4 +410,3 @@ unsigned long get_wchan(struct task_struct *p)
16372 } while (count++ < 16);
16373 return 0;
16374 }
16375 -
16376 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
16377 index f693e44..3c979b2 100644
16378 --- a/arch/x86/kernel/process_64.c
16379 +++ b/arch/x86/kernel/process_64.c
16380 @@ -88,7 +88,7 @@ static void __exit_idle(void)
16381 void exit_idle(void)
16382 {
16383 /* idle loop has pid 0 */
16384 - if (current->pid)
16385 + if (task_pid_nr(current))
16386 return;
16387 __exit_idle();
16388 }
16389 @@ -262,8 +262,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16390 struct pt_regs *childregs;
16391 struct task_struct *me = current;
16392
16393 - childregs = ((struct pt_regs *)
16394 - (THREAD_SIZE + task_stack_page(p))) - 1;
16395 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16396 *childregs = *regs;
16397
16398 childregs->ax = 0;
16399 @@ -275,6 +274,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16400 p->thread.sp = (unsigned long) childregs;
16401 p->thread.sp0 = (unsigned long) (childregs+1);
16402 p->thread.usersp = me->thread.usersp;
16403 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16404
16405 set_tsk_thread_flag(p, TIF_FORK);
16406
16407 @@ -377,7 +377,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16408 struct thread_struct *prev = &prev_p->thread;
16409 struct thread_struct *next = &next_p->thread;
16410 int cpu = smp_processor_id();
16411 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16412 + struct tss_struct *tss = init_tss + cpu;
16413 unsigned fsindex, gsindex;
16414 bool preload_fpu;
16415
16416 @@ -473,10 +473,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16417 prev->usersp = percpu_read(old_rsp);
16418 percpu_write(old_rsp, next->usersp);
16419 percpu_write(current_task, next_p);
16420 + percpu_write(current_tinfo, &next_p->tinfo);
16421
16422 - percpu_write(kernel_stack,
16423 - (unsigned long)task_stack_page(next_p) +
16424 - THREAD_SIZE - KERNEL_STACK_OFFSET);
16425 + percpu_write(kernel_stack, next->sp0);
16426
16427 /*
16428 * Now maybe reload the debug registers and handle I/O bitmaps
16429 @@ -538,12 +537,11 @@ unsigned long get_wchan(struct task_struct *p)
16430 if (!p || p == current || p->state == TASK_RUNNING)
16431 return 0;
16432 stack = (unsigned long)task_stack_page(p);
16433 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16434 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16435 return 0;
16436 fp = *(u64 *)(p->thread.sp);
16437 do {
16438 - if (fp < (unsigned long)stack ||
16439 - fp >= (unsigned long)stack+THREAD_SIZE)
16440 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16441 return 0;
16442 ip = *(u64 *)(fp+8);
16443 if (!in_sched_functions(ip))
16444 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
16445 index 8252879..d3219e0 100644
16446 --- a/arch/x86/kernel/ptrace.c
16447 +++ b/arch/x86/kernel/ptrace.c
16448 @@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request,
16449 unsigned long addr, unsigned long data)
16450 {
16451 int ret;
16452 - unsigned long __user *datap = (unsigned long __user *)data;
16453 + unsigned long __user *datap = (__force unsigned long __user *)data;
16454
16455 switch (request) {
16456 /* read the word at location addr in the USER area. */
16457 @@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request,
16458 if ((int) addr < 0)
16459 return -EIO;
16460 ret = do_get_thread_area(child, addr,
16461 - (struct user_desc __user *)data);
16462 + (__force struct user_desc __user *) data);
16463 break;
16464
16465 case PTRACE_SET_THREAD_AREA:
16466 if ((int) addr < 0)
16467 return -EIO;
16468 ret = do_set_thread_area(child, addr,
16469 - (struct user_desc __user *)data, 0);
16470 + (__force struct user_desc __user *) data, 0);
16471 break;
16472 #endif
16473
16474 @@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
16475 memset(info, 0, sizeof(*info));
16476 info->si_signo = SIGTRAP;
16477 info->si_code = si_code;
16478 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
16479 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
16480 }
16481
16482 void user_single_step_siginfo(struct task_struct *tsk,
16483 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
16484 index 42eb330..139955c 100644
16485 --- a/arch/x86/kernel/pvclock.c
16486 +++ b/arch/x86/kernel/pvclock.c
16487 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
16488 return pv_tsc_khz;
16489 }
16490
16491 -static atomic64_t last_value = ATOMIC64_INIT(0);
16492 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
16493
16494 void pvclock_resume(void)
16495 {
16496 - atomic64_set(&last_value, 0);
16497 + atomic64_set_unchecked(&last_value, 0);
16498 }
16499
16500 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16501 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16502 * updating at the same time, and one of them could be slightly behind,
16503 * making the assumption that last_value always go forward fail to hold.
16504 */
16505 - last = atomic64_read(&last_value);
16506 + last = atomic64_read_unchecked(&last_value);
16507 do {
16508 if (ret < last)
16509 return last;
16510 - last = atomic64_cmpxchg(&last_value, last, ret);
16511 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
16512 } while (unlikely(last != ret));
16513
16514 return ret;
16515 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
16516 index d4a705f..ef8f1a9 100644
16517 --- a/arch/x86/kernel/reboot.c
16518 +++ b/arch/x86/kernel/reboot.c
16519 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
16520 EXPORT_SYMBOL(pm_power_off);
16521
16522 static const struct desc_ptr no_idt = {};
16523 -static int reboot_mode;
16524 +static unsigned short reboot_mode;
16525 enum reboot_type reboot_type = BOOT_ACPI;
16526 int reboot_force;
16527
16528 @@ -324,13 +324,17 @@ core_initcall(reboot_init);
16529 extern const unsigned char machine_real_restart_asm[];
16530 extern const u64 machine_real_restart_gdt[3];
16531
16532 -void machine_real_restart(unsigned int type)
16533 +__noreturn void machine_real_restart(unsigned int type)
16534 {
16535 void *restart_va;
16536 unsigned long restart_pa;
16537 - void (*restart_lowmem)(unsigned int);
16538 + void (* __noreturn restart_lowmem)(unsigned int);
16539 u64 *lowmem_gdt;
16540
16541 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16542 + struct desc_struct *gdt;
16543 +#endif
16544 +
16545 local_irq_disable();
16546
16547 /* Write zero to CMOS register number 0x0f, which the BIOS POST
16548 @@ -356,14 +360,14 @@ void machine_real_restart(unsigned int type)
16549 boot)". This seems like a fairly standard thing that gets set by
16550 REBOOT.COM programs, and the previous reset routine did this
16551 too. */
16552 - *((unsigned short *)0x472) = reboot_mode;
16553 + *(unsigned short *)(__va(0x472)) = reboot_mode;
16554
16555 /* Patch the GDT in the low memory trampoline */
16556 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
16557
16558 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
16559 restart_pa = virt_to_phys(restart_va);
16560 - restart_lowmem = (void (*)(unsigned int))restart_pa;
16561 + restart_lowmem = (void *)restart_pa;
16562
16563 /* GDT[0]: GDT self-pointer */
16564 lowmem_gdt[0] =
16565 @@ -374,7 +378,33 @@ void machine_real_restart(unsigned int type)
16566 GDT_ENTRY(0x009b, restart_pa, 0xffff);
16567
16568 /* Jump to the identity-mapped low memory code */
16569 +
16570 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16571 + gdt = get_cpu_gdt_table(smp_processor_id());
16572 + pax_open_kernel();
16573 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16574 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
16575 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
16576 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
16577 +#endif
16578 +#ifdef CONFIG_PAX_KERNEXEC
16579 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
16580 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
16581 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
16582 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
16583 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
16584 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
16585 +#endif
16586 + pax_close_kernel();
16587 +#endif
16588 +
16589 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16590 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
16591 + unreachable();
16592 +#else
16593 restart_lowmem(type);
16594 +#endif
16595 +
16596 }
16597 #ifdef CONFIG_APM_MODULE
16598 EXPORT_SYMBOL(machine_real_restart);
16599 @@ -532,7 +562,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
16600 * try to force a triple fault and then cycle between hitting the keyboard
16601 * controller and doing that
16602 */
16603 -static void native_machine_emergency_restart(void)
16604 +__noreturn static void native_machine_emergency_restart(void)
16605 {
16606 int i;
16607 int attempt = 0;
16608 @@ -656,13 +686,13 @@ void native_machine_shutdown(void)
16609 #endif
16610 }
16611
16612 -static void __machine_emergency_restart(int emergency)
16613 +static __noreturn void __machine_emergency_restart(int emergency)
16614 {
16615 reboot_emergency = emergency;
16616 machine_ops.emergency_restart();
16617 }
16618
16619 -static void native_machine_restart(char *__unused)
16620 +static __noreturn void native_machine_restart(char *__unused)
16621 {
16622 printk("machine restart\n");
16623
16624 @@ -671,7 +701,7 @@ static void native_machine_restart(char *__unused)
16625 __machine_emergency_restart(0);
16626 }
16627
16628 -static void native_machine_halt(void)
16629 +static __noreturn void native_machine_halt(void)
16630 {
16631 /* stop other cpus and apics */
16632 machine_shutdown();
16633 @@ -682,7 +712,7 @@ static void native_machine_halt(void)
16634 stop_this_cpu(NULL);
16635 }
16636
16637 -static void native_machine_power_off(void)
16638 +__noreturn static void native_machine_power_off(void)
16639 {
16640 if (pm_power_off) {
16641 if (!reboot_force)
16642 @@ -691,6 +721,7 @@ static void native_machine_power_off(void)
16643 }
16644 /* a fallback in case there is no PM info available */
16645 tboot_shutdown(TB_SHUTDOWN_HALT);
16646 + unreachable();
16647 }
16648
16649 struct machine_ops machine_ops = {
16650 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
16651 index 7a6f3b3..bed145d7 100644
16652 --- a/arch/x86/kernel/relocate_kernel_64.S
16653 +++ b/arch/x86/kernel/relocate_kernel_64.S
16654 @@ -11,6 +11,7 @@
16655 #include <asm/kexec.h>
16656 #include <asm/processor-flags.h>
16657 #include <asm/pgtable_types.h>
16658 +#include <asm/alternative-asm.h>
16659
16660 /*
16661 * Must be relocatable PIC code callable as a C function
16662 @@ -160,13 +161,14 @@ identity_mapped:
16663 xorq %rbp, %rbp
16664 xorq %r8, %r8
16665 xorq %r9, %r9
16666 - xorq %r10, %r9
16667 + xorq %r10, %r10
16668 xorq %r11, %r11
16669 xorq %r12, %r12
16670 xorq %r13, %r13
16671 xorq %r14, %r14
16672 xorq %r15, %r15
16673
16674 + pax_force_retaddr 0, 1
16675 ret
16676
16677 1:
16678 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
16679 index afaf384..1a101fe 100644
16680 --- a/arch/x86/kernel/setup.c
16681 +++ b/arch/x86/kernel/setup.c
16682 @@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
16683
16684 switch (data->type) {
16685 case SETUP_E820_EXT:
16686 - parse_e820_ext(data);
16687 + parse_e820_ext((struct setup_data __force_kernel *)data);
16688 break;
16689 case SETUP_DTB:
16690 add_dtb(pa_data);
16691 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
16692 * area (640->1Mb) as ram even though it is not.
16693 * take them out.
16694 */
16695 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
16696 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
16697 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
16698 }
16699
16700 @@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
16701
16702 if (!boot_params.hdr.root_flags)
16703 root_mountflags &= ~MS_RDONLY;
16704 - init_mm.start_code = (unsigned long) _text;
16705 - init_mm.end_code = (unsigned long) _etext;
16706 + init_mm.start_code = ktla_ktva((unsigned long) _text);
16707 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
16708 init_mm.end_data = (unsigned long) _edata;
16709 init_mm.brk = _brk_end;
16710
16711 - code_resource.start = virt_to_phys(_text);
16712 - code_resource.end = virt_to_phys(_etext)-1;
16713 - data_resource.start = virt_to_phys(_etext);
16714 + code_resource.start = virt_to_phys(ktla_ktva(_text));
16715 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16716 + data_resource.start = virt_to_phys(_sdata);
16717 data_resource.end = virt_to_phys(_edata)-1;
16718 bss_resource.start = virt_to_phys(&__bss_start);
16719 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16720 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
16721 index 71f4727..16dc9f7 100644
16722 --- a/arch/x86/kernel/setup_percpu.c
16723 +++ b/arch/x86/kernel/setup_percpu.c
16724 @@ -21,19 +21,17 @@
16725 #include <asm/cpu.h>
16726 #include <asm/stackprotector.h>
16727
16728 -DEFINE_PER_CPU(int, cpu_number);
16729 +#ifdef CONFIG_SMP
16730 +DEFINE_PER_CPU(unsigned int, cpu_number);
16731 EXPORT_PER_CPU_SYMBOL(cpu_number);
16732 +#endif
16733
16734 -#ifdef CONFIG_X86_64
16735 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16736 -#else
16737 -#define BOOT_PERCPU_OFFSET 0
16738 -#endif
16739
16740 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16741 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16742
16743 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16744 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16745 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16746 };
16747 EXPORT_SYMBOL(__per_cpu_offset);
16748 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
16749 {
16750 #ifdef CONFIG_X86_32
16751 struct desc_struct gdt;
16752 + unsigned long base = per_cpu_offset(cpu);
16753
16754 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16755 - 0x2 | DESCTYPE_S, 0x8);
16756 - gdt.s = 1;
16757 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16758 + 0x83 | DESCTYPE_S, 0xC);
16759 write_gdt_entry(get_cpu_gdt_table(cpu),
16760 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16761 #endif
16762 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
16763 /* alrighty, percpu areas up and running */
16764 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16765 for_each_possible_cpu(cpu) {
16766 +#ifdef CONFIG_CC_STACKPROTECTOR
16767 +#ifdef CONFIG_X86_32
16768 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
16769 +#endif
16770 +#endif
16771 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16772 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16773 per_cpu(cpu_number, cpu) = cpu;
16774 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
16775 */
16776 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
16777 #endif
16778 +#ifdef CONFIG_CC_STACKPROTECTOR
16779 +#ifdef CONFIG_X86_32
16780 + if (!cpu)
16781 + per_cpu(stack_canary.canary, cpu) = canary;
16782 +#endif
16783 +#endif
16784 /*
16785 * Up to this point, the boot CPU has been using .init.data
16786 * area. Reload any changed state for the boot CPU.
16787 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
16788 index 54ddaeb2..a6aa4d2 100644
16789 --- a/arch/x86/kernel/signal.c
16790 +++ b/arch/x86/kernel/signal.c
16791 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
16792 * Align the stack pointer according to the i386 ABI,
16793 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16794 */
16795 - sp = ((sp + 4) & -16ul) - 4;
16796 + sp = ((sp - 12) & -16ul) - 4;
16797 #else /* !CONFIG_X86_32 */
16798 sp = round_down(sp, 16) - 8;
16799 #endif
16800 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
16801 * Return an always-bogus address instead so we will die with SIGSEGV.
16802 */
16803 if (onsigstack && !likely(on_sig_stack(sp)))
16804 - return (void __user *)-1L;
16805 + return (__force void __user *)-1L;
16806
16807 /* save i387 state */
16808 if (used_math() && save_i387_xstate(*fpstate) < 0)
16809 - return (void __user *)-1L;
16810 + return (__force void __user *)-1L;
16811
16812 return (void __user *)sp;
16813 }
16814 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16815 }
16816
16817 if (current->mm->context.vdso)
16818 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16819 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16820 else
16821 - restorer = &frame->retcode;
16822 + restorer = (void __user *)&frame->retcode;
16823 if (ka->sa.sa_flags & SA_RESTORER)
16824 restorer = ka->sa.sa_restorer;
16825
16826 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16827 * reasons and because gdb uses it as a signature to notice
16828 * signal handler stack frames.
16829 */
16830 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16831 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16832
16833 if (err)
16834 return -EFAULT;
16835 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16836 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16837
16838 /* Set up to return from userspace. */
16839 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16840 + if (current->mm->context.vdso)
16841 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16842 + else
16843 + restorer = (void __user *)&frame->retcode;
16844 if (ka->sa.sa_flags & SA_RESTORER)
16845 restorer = ka->sa.sa_restorer;
16846 put_user_ex(restorer, &frame->pretcode);
16847 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16848 * reasons and because gdb uses it as a signature to notice
16849 * signal handler stack frames.
16850 */
16851 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16852 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16853 } put_user_catch(err);
16854
16855 if (err)
16856 @@ -762,6 +765,8 @@ static void do_signal(struct pt_regs *regs)
16857 siginfo_t info;
16858 int signr;
16859
16860 + pax_track_stack();
16861 +
16862 /*
16863 * We want the common case to go fast, which is why we may in certain
16864 * cases get here from kernel mode. Just return without doing anything
16865 @@ -769,7 +774,7 @@ static void do_signal(struct pt_regs *regs)
16866 * X86_32: vm86 regs switched out by assembly code before reaching
16867 * here, so testing against kernel CS suffices.
16868 */
16869 - if (!user_mode(regs))
16870 + if (!user_mode_novm(regs))
16871 return;
16872
16873 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
16874 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
16875 index 9f548cb..caf76f7 100644
16876 --- a/arch/x86/kernel/smpboot.c
16877 +++ b/arch/x86/kernel/smpboot.c
16878 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
16879 set_idle_for_cpu(cpu, c_idle.idle);
16880 do_rest:
16881 per_cpu(current_task, cpu) = c_idle.idle;
16882 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16883 #ifdef CONFIG_X86_32
16884 /* Stack for startup_32 can be just as for start_secondary onwards */
16885 irq_ctx_init(cpu);
16886 #else
16887 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16888 initial_gs = per_cpu_offset(cpu);
16889 - per_cpu(kernel_stack, cpu) =
16890 - (unsigned long)task_stack_page(c_idle.idle) -
16891 - KERNEL_STACK_OFFSET + THREAD_SIZE;
16892 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16893 #endif
16894 +
16895 + pax_open_kernel();
16896 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16897 + pax_close_kernel();
16898 +
16899 initial_code = (unsigned long)start_secondary;
16900 stack_start = c_idle.idle->thread.sp;
16901
16902 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
16903
16904 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16905
16906 +#ifdef CONFIG_PAX_PER_CPU_PGD
16907 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16908 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16909 + KERNEL_PGD_PTRS);
16910 +#endif
16911 +
16912 err = do_boot_cpu(apicid, cpu);
16913 if (err) {
16914 pr_debug("do_boot_cpu failed %d\n", err);
16915 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
16916 index c346d11..d43b163 100644
16917 --- a/arch/x86/kernel/step.c
16918 +++ b/arch/x86/kernel/step.c
16919 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
16920 struct desc_struct *desc;
16921 unsigned long base;
16922
16923 - seg &= ~7UL;
16924 + seg >>= 3;
16925
16926 mutex_lock(&child->mm->context.lock);
16927 - if (unlikely((seg >> 3) >= child->mm->context.size))
16928 + if (unlikely(seg >= child->mm->context.size))
16929 addr = -1L; /* bogus selector, access would fault */
16930 else {
16931 desc = child->mm->context.ldt + seg;
16932 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
16933 addr += base;
16934 }
16935 mutex_unlock(&child->mm->context.lock);
16936 - }
16937 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16938 + addr = ktla_ktva(addr);
16939
16940 return addr;
16941 }
16942 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
16943 unsigned char opcode[15];
16944 unsigned long addr = convert_ip_to_linear(child, regs);
16945
16946 + if (addr == -EINVAL)
16947 + return 0;
16948 +
16949 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16950 for (i = 0; i < copied; i++) {
16951 switch (opcode[i]) {
16952 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
16953 index 0b0cb5f..db6b9ed 100644
16954 --- a/arch/x86/kernel/sys_i386_32.c
16955 +++ b/arch/x86/kernel/sys_i386_32.c
16956 @@ -24,17 +24,224 @@
16957
16958 #include <asm/syscalls.h>
16959
16960 -/*
16961 - * Do a system call from kernel instead of calling sys_execve so we
16962 - * end up with proper pt_regs.
16963 - */
16964 -int kernel_execve(const char *filename,
16965 - const char *const argv[],
16966 - const char *const envp[])
16967 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16968 {
16969 - long __res;
16970 - asm volatile ("int $0x80"
16971 - : "=a" (__res)
16972 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
16973 - return __res;
16974 + unsigned long pax_task_size = TASK_SIZE;
16975 +
16976 +#ifdef CONFIG_PAX_SEGMEXEC
16977 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16978 + pax_task_size = SEGMEXEC_TASK_SIZE;
16979 +#endif
16980 +
16981 + if (len > pax_task_size || addr > pax_task_size - len)
16982 + return -EINVAL;
16983 +
16984 + return 0;
16985 +}
16986 +
16987 +unsigned long
16988 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
16989 + unsigned long len, unsigned long pgoff, unsigned long flags)
16990 +{
16991 + struct mm_struct *mm = current->mm;
16992 + struct vm_area_struct *vma;
16993 + unsigned long start_addr, pax_task_size = TASK_SIZE;
16994 +
16995 +#ifdef CONFIG_PAX_SEGMEXEC
16996 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16997 + pax_task_size = SEGMEXEC_TASK_SIZE;
16998 +#endif
16999 +
17000 + pax_task_size -= PAGE_SIZE;
17001 +
17002 + if (len > pax_task_size)
17003 + return -ENOMEM;
17004 +
17005 + if (flags & MAP_FIXED)
17006 + return addr;
17007 +
17008 +#ifdef CONFIG_PAX_RANDMMAP
17009 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17010 +#endif
17011 +
17012 + if (addr) {
17013 + addr = PAGE_ALIGN(addr);
17014 + if (pax_task_size - len >= addr) {
17015 + vma = find_vma(mm, addr);
17016 + if (check_heap_stack_gap(vma, addr, len))
17017 + return addr;
17018 + }
17019 + }
17020 + if (len > mm->cached_hole_size) {
17021 + start_addr = addr = mm->free_area_cache;
17022 + } else {
17023 + start_addr = addr = mm->mmap_base;
17024 + mm->cached_hole_size = 0;
17025 + }
17026 +
17027 +#ifdef CONFIG_PAX_PAGEEXEC
17028 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17029 + start_addr = 0x00110000UL;
17030 +
17031 +#ifdef CONFIG_PAX_RANDMMAP
17032 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17033 + start_addr += mm->delta_mmap & 0x03FFF000UL;
17034 +#endif
17035 +
17036 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17037 + start_addr = addr = mm->mmap_base;
17038 + else
17039 + addr = start_addr;
17040 + }
17041 +#endif
17042 +
17043 +full_search:
17044 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17045 + /* At this point: (!vma || addr < vma->vm_end). */
17046 + if (pax_task_size - len < addr) {
17047 + /*
17048 + * Start a new search - just in case we missed
17049 + * some holes.
17050 + */
17051 + if (start_addr != mm->mmap_base) {
17052 + start_addr = addr = mm->mmap_base;
17053 + mm->cached_hole_size = 0;
17054 + goto full_search;
17055 + }
17056 + return -ENOMEM;
17057 + }
17058 + if (check_heap_stack_gap(vma, addr, len))
17059 + break;
17060 + if (addr + mm->cached_hole_size < vma->vm_start)
17061 + mm->cached_hole_size = vma->vm_start - addr;
17062 + addr = vma->vm_end;
17063 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
17064 + start_addr = addr = mm->mmap_base;
17065 + mm->cached_hole_size = 0;
17066 + goto full_search;
17067 + }
17068 + }
17069 +
17070 + /*
17071 + * Remember the place where we stopped the search:
17072 + */
17073 + mm->free_area_cache = addr + len;
17074 + return addr;
17075 +}
17076 +
17077 +unsigned long
17078 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17079 + const unsigned long len, const unsigned long pgoff,
17080 + const unsigned long flags)
17081 +{
17082 + struct vm_area_struct *vma;
17083 + struct mm_struct *mm = current->mm;
17084 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17085 +
17086 +#ifdef CONFIG_PAX_SEGMEXEC
17087 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17088 + pax_task_size = SEGMEXEC_TASK_SIZE;
17089 +#endif
17090 +
17091 + pax_task_size -= PAGE_SIZE;
17092 +
17093 + /* requested length too big for entire address space */
17094 + if (len > pax_task_size)
17095 + return -ENOMEM;
17096 +
17097 + if (flags & MAP_FIXED)
17098 + return addr;
17099 +
17100 +#ifdef CONFIG_PAX_PAGEEXEC
17101 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17102 + goto bottomup;
17103 +#endif
17104 +
17105 +#ifdef CONFIG_PAX_RANDMMAP
17106 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17107 +#endif
17108 +
17109 + /* requesting a specific address */
17110 + if (addr) {
17111 + addr = PAGE_ALIGN(addr);
17112 + if (pax_task_size - len >= addr) {
17113 + vma = find_vma(mm, addr);
17114 + if (check_heap_stack_gap(vma, addr, len))
17115 + return addr;
17116 + }
17117 + }
17118 +
17119 + /* check if free_area_cache is useful for us */
17120 + if (len <= mm->cached_hole_size) {
17121 + mm->cached_hole_size = 0;
17122 + mm->free_area_cache = mm->mmap_base;
17123 + }
17124 +
17125 + /* either no address requested or can't fit in requested address hole */
17126 + addr = mm->free_area_cache;
17127 +
17128 + /* make sure it can fit in the remaining address space */
17129 + if (addr > len) {
17130 + vma = find_vma(mm, addr-len);
17131 + if (check_heap_stack_gap(vma, addr - len, len))
17132 + /* remember the address as a hint for next time */
17133 + return (mm->free_area_cache = addr-len);
17134 + }
17135 +
17136 + if (mm->mmap_base < len)
17137 + goto bottomup;
17138 +
17139 + addr = mm->mmap_base-len;
17140 +
17141 + do {
17142 + /*
17143 + * Lookup failure means no vma is above this address,
17144 + * else if new region fits below vma->vm_start,
17145 + * return with success:
17146 + */
17147 + vma = find_vma(mm, addr);
17148 + if (check_heap_stack_gap(vma, addr, len))
17149 + /* remember the address as a hint for next time */
17150 + return (mm->free_area_cache = addr);
17151 +
17152 + /* remember the largest hole we saw so far */
17153 + if (addr + mm->cached_hole_size < vma->vm_start)
17154 + mm->cached_hole_size = vma->vm_start - addr;
17155 +
17156 + /* try just below the current vma->vm_start */
17157 + addr = skip_heap_stack_gap(vma, len);
17158 + } while (!IS_ERR_VALUE(addr));
17159 +
17160 +bottomup:
17161 + /*
17162 + * A failed mmap() very likely causes application failure,
17163 + * so fall back to the bottom-up function here. This scenario
17164 + * can happen with large stack limits and large mmap()
17165 + * allocations.
17166 + */
17167 +
17168 +#ifdef CONFIG_PAX_SEGMEXEC
17169 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17170 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17171 + else
17172 +#endif
17173 +
17174 + mm->mmap_base = TASK_UNMAPPED_BASE;
17175 +
17176 +#ifdef CONFIG_PAX_RANDMMAP
17177 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17178 + mm->mmap_base += mm->delta_mmap;
17179 +#endif
17180 +
17181 + mm->free_area_cache = mm->mmap_base;
17182 + mm->cached_hole_size = ~0UL;
17183 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17184 + /*
17185 + * Restore the topdown base:
17186 + */
17187 + mm->mmap_base = base;
17188 + mm->free_area_cache = base;
17189 + mm->cached_hole_size = ~0UL;
17190 +
17191 + return addr;
17192 }
17193 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
17194 index ff14a50..35626c3 100644
17195 --- a/arch/x86/kernel/sys_x86_64.c
17196 +++ b/arch/x86/kernel/sys_x86_64.c
17197 @@ -32,8 +32,8 @@ out:
17198 return error;
17199 }
17200
17201 -static void find_start_end(unsigned long flags, unsigned long *begin,
17202 - unsigned long *end)
17203 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
17204 + unsigned long *begin, unsigned long *end)
17205 {
17206 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17207 unsigned long new_begin;
17208 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
17209 *begin = new_begin;
17210 }
17211 } else {
17212 - *begin = TASK_UNMAPPED_BASE;
17213 + *begin = mm->mmap_base;
17214 *end = TASK_SIZE;
17215 }
17216 }
17217 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
17218 if (flags & MAP_FIXED)
17219 return addr;
17220
17221 - find_start_end(flags, &begin, &end);
17222 + find_start_end(mm, flags, &begin, &end);
17223
17224 if (len > end)
17225 return -ENOMEM;
17226
17227 +#ifdef CONFIG_PAX_RANDMMAP
17228 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17229 +#endif
17230 +
17231 if (addr) {
17232 addr = PAGE_ALIGN(addr);
17233 vma = find_vma(mm, addr);
17234 - if (end - len >= addr &&
17235 - (!vma || addr + len <= vma->vm_start))
17236 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17237 return addr;
17238 }
17239 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17240 @@ -106,7 +109,7 @@ full_search:
17241 }
17242 return -ENOMEM;
17243 }
17244 - if (!vma || addr + len <= vma->vm_start) {
17245 + if (check_heap_stack_gap(vma, addr, len)) {
17246 /*
17247 * Remember the place where we stopped the search:
17248 */
17249 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17250 {
17251 struct vm_area_struct *vma;
17252 struct mm_struct *mm = current->mm;
17253 - unsigned long addr = addr0;
17254 + unsigned long base = mm->mmap_base, addr = addr0;
17255
17256 /* requested length too big for entire address space */
17257 if (len > TASK_SIZE)
17258 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17259 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17260 goto bottomup;
17261
17262 +#ifdef CONFIG_PAX_RANDMMAP
17263 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17264 +#endif
17265 +
17266 /* requesting a specific address */
17267 if (addr) {
17268 addr = PAGE_ALIGN(addr);
17269 - vma = find_vma(mm, addr);
17270 - if (TASK_SIZE - len >= addr &&
17271 - (!vma || addr + len <= vma->vm_start))
17272 - return addr;
17273 + if (TASK_SIZE - len >= addr) {
17274 + vma = find_vma(mm, addr);
17275 + if (check_heap_stack_gap(vma, addr, len))
17276 + return addr;
17277 + }
17278 }
17279
17280 /* check if free_area_cache is useful for us */
17281 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17282 /* make sure it can fit in the remaining address space */
17283 if (addr > len) {
17284 vma = find_vma(mm, addr-len);
17285 - if (!vma || addr <= vma->vm_start)
17286 + if (check_heap_stack_gap(vma, addr - len, len))
17287 /* remember the address as a hint for next time */
17288 return mm->free_area_cache = addr-len;
17289 }
17290 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17291 * return with success:
17292 */
17293 vma = find_vma(mm, addr);
17294 - if (!vma || addr+len <= vma->vm_start)
17295 + if (check_heap_stack_gap(vma, addr, len))
17296 /* remember the address as a hint for next time */
17297 return mm->free_area_cache = addr;
17298
17299 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17300 mm->cached_hole_size = vma->vm_start - addr;
17301
17302 /* try just below the current vma->vm_start */
17303 - addr = vma->vm_start-len;
17304 - } while (len < vma->vm_start);
17305 + addr = skip_heap_stack_gap(vma, len);
17306 + } while (!IS_ERR_VALUE(addr));
17307
17308 bottomup:
17309 /*
17310 @@ -198,13 +206,21 @@ bottomup:
17311 * can happen with large stack limits and large mmap()
17312 * allocations.
17313 */
17314 + mm->mmap_base = TASK_UNMAPPED_BASE;
17315 +
17316 +#ifdef CONFIG_PAX_RANDMMAP
17317 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17318 + mm->mmap_base += mm->delta_mmap;
17319 +#endif
17320 +
17321 + mm->free_area_cache = mm->mmap_base;
17322 mm->cached_hole_size = ~0UL;
17323 - mm->free_area_cache = TASK_UNMAPPED_BASE;
17324 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17325 /*
17326 * Restore the topdown base:
17327 */
17328 - mm->free_area_cache = mm->mmap_base;
17329 + mm->mmap_base = base;
17330 + mm->free_area_cache = base;
17331 mm->cached_hole_size = ~0UL;
17332
17333 return addr;
17334 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
17335 index bc19be3..0f5fbf7 100644
17336 --- a/arch/x86/kernel/syscall_table_32.S
17337 +++ b/arch/x86/kernel/syscall_table_32.S
17338 @@ -1,3 +1,4 @@
17339 +.section .rodata,"a",@progbits
17340 ENTRY(sys_call_table)
17341 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17342 .long sys_exit
17343 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
17344 index e07a2fc..db0369d 100644
17345 --- a/arch/x86/kernel/tboot.c
17346 +++ b/arch/x86/kernel/tboot.c
17347 @@ -218,7 +218,7 @@ static int tboot_setup_sleep(void)
17348
17349 void tboot_shutdown(u32 shutdown_type)
17350 {
17351 - void (*shutdown)(void);
17352 + void (* __noreturn shutdown)(void);
17353
17354 if (!tboot_enabled())
17355 return;
17356 @@ -240,7 +240,7 @@ void tboot_shutdown(u32 shutdown_type)
17357
17358 switch_to_tboot_pt();
17359
17360 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17361 + shutdown = (void *)tboot->shutdown_entry;
17362 shutdown();
17363
17364 /* should not reach here */
17365 @@ -297,7 +297,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
17366 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17367 }
17368
17369 -static atomic_t ap_wfs_count;
17370 +static atomic_unchecked_t ap_wfs_count;
17371
17372 static int tboot_wait_for_aps(int num_aps)
17373 {
17374 @@ -321,9 +321,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
17375 {
17376 switch (action) {
17377 case CPU_DYING:
17378 - atomic_inc(&ap_wfs_count);
17379 + atomic_inc_unchecked(&ap_wfs_count);
17380 if (num_online_cpus() == 1)
17381 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17382 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17383 return NOTIFY_BAD;
17384 break;
17385 }
17386 @@ -342,7 +342,7 @@ static __init int tboot_late_init(void)
17387
17388 tboot_create_trampoline();
17389
17390 - atomic_set(&ap_wfs_count, 0);
17391 + atomic_set_unchecked(&ap_wfs_count, 0);
17392 register_hotcpu_notifier(&tboot_cpu_notifier);
17393 return 0;
17394 }
17395 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
17396 index 5a64d05..804587b 100644
17397 --- a/arch/x86/kernel/time.c
17398 +++ b/arch/x86/kernel/time.c
17399 @@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
17400 {
17401 unsigned long pc = instruction_pointer(regs);
17402
17403 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17404 + if (!user_mode(regs) && in_lock_functions(pc)) {
17405 #ifdef CONFIG_FRAME_POINTER
17406 - return *(unsigned long *)(regs->bp + sizeof(long));
17407 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17408 #else
17409 unsigned long *sp =
17410 (unsigned long *)kernel_stack_pointer(regs);
17411 @@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
17412 * or above a saved flags. Eflags has bits 22-31 zero,
17413 * kernel addresses don't.
17414 */
17415 +
17416 +#ifdef CONFIG_PAX_KERNEXEC
17417 + return ktla_ktva(sp[0]);
17418 +#else
17419 if (sp[0] >> 22)
17420 return sp[0];
17421 if (sp[1] >> 22)
17422 return sp[1];
17423 #endif
17424 +
17425 +#endif
17426 }
17427 return pc;
17428 }
17429 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
17430 index 6bb7b85..dd853e1 100644
17431 --- a/arch/x86/kernel/tls.c
17432 +++ b/arch/x86/kernel/tls.c
17433 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
17434 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17435 return -EINVAL;
17436
17437 +#ifdef CONFIG_PAX_SEGMEXEC
17438 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17439 + return -EINVAL;
17440 +#endif
17441 +
17442 set_tls_desc(p, idx, &info, 1);
17443
17444 return 0;
17445 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
17446 index 451c0a7..e57f551 100644
17447 --- a/arch/x86/kernel/trampoline_32.S
17448 +++ b/arch/x86/kernel/trampoline_32.S
17449 @@ -32,6 +32,12 @@
17450 #include <asm/segment.h>
17451 #include <asm/page_types.h>
17452
17453 +#ifdef CONFIG_PAX_KERNEXEC
17454 +#define ta(X) (X)
17455 +#else
17456 +#define ta(X) ((X) - __PAGE_OFFSET)
17457 +#endif
17458 +
17459 #ifdef CONFIG_SMP
17460
17461 .section ".x86_trampoline","a"
17462 @@ -62,7 +68,7 @@ r_base = .
17463 inc %ax # protected mode (PE) bit
17464 lmsw %ax # into protected mode
17465 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17466 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17467 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
17468
17469 # These need to be in the same 64K segment as the above;
17470 # hence we don't use the boot_gdt_descr defined in head.S
17471 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
17472 index 09ff517..df19fbff 100644
17473 --- a/arch/x86/kernel/trampoline_64.S
17474 +++ b/arch/x86/kernel/trampoline_64.S
17475 @@ -90,7 +90,7 @@ startup_32:
17476 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17477 movl %eax, %ds
17478
17479 - movl $X86_CR4_PAE, %eax
17480 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17481 movl %eax, %cr4 # Enable PAE mode
17482
17483 # Setup trampoline 4 level pagetables
17484 @@ -138,7 +138,7 @@ tidt:
17485 # so the kernel can live anywhere
17486 .balign 4
17487 tgdt:
17488 - .short tgdt_end - tgdt # gdt limit
17489 + .short tgdt_end - tgdt - 1 # gdt limit
17490 .long tgdt - r_base
17491 .short 0
17492 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17493 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
17494 index 6913369..7e7dff6 100644
17495 --- a/arch/x86/kernel/traps.c
17496 +++ b/arch/x86/kernel/traps.c
17497 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
17498
17499 /* Do we ignore FPU interrupts ? */
17500 char ignore_fpu_irq;
17501 -
17502 -/*
17503 - * The IDT has to be page-aligned to simplify the Pentium
17504 - * F0 0F bug workaround.
17505 - */
17506 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17507 #endif
17508
17509 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17510 @@ -117,13 +111,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
17511 }
17512
17513 static void __kprobes
17514 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17515 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17516 long error_code, siginfo_t *info)
17517 {
17518 struct task_struct *tsk = current;
17519
17520 #ifdef CONFIG_X86_32
17521 - if (regs->flags & X86_VM_MASK) {
17522 + if (v8086_mode(regs)) {
17523 /*
17524 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17525 * On nmi (interrupt 2), do_trap should not be called.
17526 @@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17527 }
17528 #endif
17529
17530 - if (!user_mode(regs))
17531 + if (!user_mode_novm(regs))
17532 goto kernel_trap;
17533
17534 #ifdef CONFIG_X86_32
17535 @@ -157,7 +151,7 @@ trap_signal:
17536 printk_ratelimit()) {
17537 printk(KERN_INFO
17538 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17539 - tsk->comm, tsk->pid, str,
17540 + tsk->comm, task_pid_nr(tsk), str,
17541 regs->ip, regs->sp, error_code);
17542 print_vma_addr(" in ", regs->ip);
17543 printk("\n");
17544 @@ -174,8 +168,20 @@ kernel_trap:
17545 if (!fixup_exception(regs)) {
17546 tsk->thread.error_code = error_code;
17547 tsk->thread.trap_no = trapnr;
17548 +
17549 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17550 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17551 + str = "PAX: suspicious stack segment fault";
17552 +#endif
17553 +
17554 die(str, regs, error_code);
17555 }
17556 +
17557 +#ifdef CONFIG_PAX_REFCOUNT
17558 + if (trapnr == 4)
17559 + pax_report_refcount_overflow(regs);
17560 +#endif
17561 +
17562 return;
17563
17564 #ifdef CONFIG_X86_32
17565 @@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
17566 conditional_sti(regs);
17567
17568 #ifdef CONFIG_X86_32
17569 - if (regs->flags & X86_VM_MASK)
17570 + if (v8086_mode(regs))
17571 goto gp_in_vm86;
17572 #endif
17573
17574 tsk = current;
17575 - if (!user_mode(regs))
17576 + if (!user_mode_novm(regs))
17577 goto gp_in_kernel;
17578
17579 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17580 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17581 + struct mm_struct *mm = tsk->mm;
17582 + unsigned long limit;
17583 +
17584 + down_write(&mm->mmap_sem);
17585 + limit = mm->context.user_cs_limit;
17586 + if (limit < TASK_SIZE) {
17587 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17588 + up_write(&mm->mmap_sem);
17589 + return;
17590 + }
17591 + up_write(&mm->mmap_sem);
17592 + }
17593 +#endif
17594 +
17595 tsk->thread.error_code = error_code;
17596 tsk->thread.trap_no = 13;
17597
17598 @@ -304,6 +326,13 @@ gp_in_kernel:
17599 if (notify_die(DIE_GPF, "general protection fault", regs,
17600 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17601 return;
17602 +
17603 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17604 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17605 + die("PAX: suspicious general protection fault", regs, error_code);
17606 + else
17607 +#endif
17608 +
17609 die("general protection fault", regs, error_code);
17610 }
17611
17612 @@ -433,6 +462,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
17613 dotraplinkage notrace __kprobes void
17614 do_nmi(struct pt_regs *regs, long error_code)
17615 {
17616 +
17617 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17618 + if (!user_mode(regs)) {
17619 + unsigned long cs = regs->cs & 0xFFFF;
17620 + unsigned long ip = ktva_ktla(regs->ip);
17621 +
17622 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17623 + regs->ip = ip;
17624 + }
17625 +#endif
17626 +
17627 nmi_enter();
17628
17629 inc_irq_stat(__nmi_count);
17630 @@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17631 /* It's safe to allow irq's after DR6 has been saved */
17632 preempt_conditional_sti(regs);
17633
17634 - if (regs->flags & X86_VM_MASK) {
17635 + if (v8086_mode(regs)) {
17636 handle_vm86_trap((struct kernel_vm86_regs *) regs,
17637 error_code, 1);
17638 preempt_conditional_cli(regs);
17639 @@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17640 * We already checked v86 mode above, so we can check for kernel mode
17641 * by just checking the CPL of CS.
17642 */
17643 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
17644 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
17645 tsk->thread.debugreg6 &= ~DR_STEP;
17646 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
17647 regs->flags &= ~X86_EFLAGS_TF;
17648 @@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
17649 return;
17650 conditional_sti(regs);
17651
17652 - if (!user_mode_vm(regs))
17653 + if (!user_mode(regs))
17654 {
17655 if (!fixup_exception(regs)) {
17656 task->thread.error_code = error_code;
17657 @@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
17658 void __math_state_restore(void)
17659 {
17660 struct thread_info *thread = current_thread_info();
17661 - struct task_struct *tsk = thread->task;
17662 + struct task_struct *tsk = current;
17663
17664 /*
17665 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17666 @@ -750,8 +790,7 @@ void __math_state_restore(void)
17667 */
17668 asmlinkage void math_state_restore(void)
17669 {
17670 - struct thread_info *thread = current_thread_info();
17671 - struct task_struct *tsk = thread->task;
17672 + struct task_struct *tsk = current;
17673
17674 if (!tsk_used_math(tsk)) {
17675 local_irq_enable();
17676 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
17677 index b9242ba..50c5edd 100644
17678 --- a/arch/x86/kernel/verify_cpu.S
17679 +++ b/arch/x86/kernel/verify_cpu.S
17680 @@ -20,6 +20,7 @@
17681 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17682 * arch/x86/kernel/trampoline_64.S: secondary processor verification
17683 * arch/x86/kernel/head_32.S: processor startup
17684 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17685 *
17686 * verify_cpu, returns the status of longmode and SSE in register %eax.
17687 * 0: Success 1: Failure
17688 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
17689 index 863f875..4307295 100644
17690 --- a/arch/x86/kernel/vm86_32.c
17691 +++ b/arch/x86/kernel/vm86_32.c
17692 @@ -41,6 +41,7 @@
17693 #include <linux/ptrace.h>
17694 #include <linux/audit.h>
17695 #include <linux/stddef.h>
17696 +#include <linux/grsecurity.h>
17697
17698 #include <asm/uaccess.h>
17699 #include <asm/io.h>
17700 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
17701 do_exit(SIGSEGV);
17702 }
17703
17704 - tss = &per_cpu(init_tss, get_cpu());
17705 + tss = init_tss + get_cpu();
17706 current->thread.sp0 = current->thread.saved_sp0;
17707 current->thread.sysenter_cs = __KERNEL_CS;
17708 load_sp0(tss, &current->thread);
17709 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
17710 struct task_struct *tsk;
17711 int tmp, ret = -EPERM;
17712
17713 +#ifdef CONFIG_GRKERNSEC_VM86
17714 + if (!capable(CAP_SYS_RAWIO)) {
17715 + gr_handle_vm86();
17716 + goto out;
17717 + }
17718 +#endif
17719 +
17720 tsk = current;
17721 if (tsk->thread.saved_sp0)
17722 goto out;
17723 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
17724 int tmp, ret;
17725 struct vm86plus_struct __user *v86;
17726
17727 +#ifdef CONFIG_GRKERNSEC_VM86
17728 + if (!capable(CAP_SYS_RAWIO)) {
17729 + gr_handle_vm86();
17730 + ret = -EPERM;
17731 + goto out;
17732 + }
17733 +#endif
17734 +
17735 tsk = current;
17736 switch (cmd) {
17737 case VM86_REQUEST_IRQ:
17738 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
17739 tsk->thread.saved_fs = info->regs32->fs;
17740 tsk->thread.saved_gs = get_user_gs(info->regs32);
17741
17742 - tss = &per_cpu(init_tss, get_cpu());
17743 + tss = init_tss + get_cpu();
17744 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17745 if (cpu_has_sep)
17746 tsk->thread.sysenter_cs = 0;
17747 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
17748 goto cannot_handle;
17749 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17750 goto cannot_handle;
17751 - intr_ptr = (unsigned long __user *) (i << 2);
17752 + intr_ptr = (__force unsigned long __user *) (i << 2);
17753 if (get_user(segoffs, intr_ptr))
17754 goto cannot_handle;
17755 if ((segoffs >> 16) == BIOSSEG)
17756 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
17757 index 0f703f1..9e15f64 100644
17758 --- a/arch/x86/kernel/vmlinux.lds.S
17759 +++ b/arch/x86/kernel/vmlinux.lds.S
17760 @@ -26,6 +26,13 @@
17761 #include <asm/page_types.h>
17762 #include <asm/cache.h>
17763 #include <asm/boot.h>
17764 +#include <asm/segment.h>
17765 +
17766 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17767 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17768 +#else
17769 +#define __KERNEL_TEXT_OFFSET 0
17770 +#endif
17771
17772 #undef i386 /* in case the preprocessor is a 32bit one */
17773
17774 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
17775
17776 PHDRS {
17777 text PT_LOAD FLAGS(5); /* R_E */
17778 +#ifdef CONFIG_X86_32
17779 + module PT_LOAD FLAGS(5); /* R_E */
17780 +#endif
17781 +#ifdef CONFIG_XEN
17782 + rodata PT_LOAD FLAGS(5); /* R_E */
17783 +#else
17784 + rodata PT_LOAD FLAGS(4); /* R__ */
17785 +#endif
17786 data PT_LOAD FLAGS(6); /* RW_ */
17787 -#ifdef CONFIG_X86_64
17788 + init.begin PT_LOAD FLAGS(6); /* RW_ */
17789 #ifdef CONFIG_SMP
17790 percpu PT_LOAD FLAGS(6); /* RW_ */
17791 #endif
17792 + text.init PT_LOAD FLAGS(5); /* R_E */
17793 + text.exit PT_LOAD FLAGS(5); /* R_E */
17794 init PT_LOAD FLAGS(7); /* RWE */
17795 -#endif
17796 note PT_NOTE FLAGS(0); /* ___ */
17797 }
17798
17799 SECTIONS
17800 {
17801 #ifdef CONFIG_X86_32
17802 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17803 - phys_startup_32 = startup_32 - LOAD_OFFSET;
17804 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17805 #else
17806 - . = __START_KERNEL;
17807 - phys_startup_64 = startup_64 - LOAD_OFFSET;
17808 + . = __START_KERNEL;
17809 #endif
17810
17811 /* Text and read-only data */
17812 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
17813 - _text = .;
17814 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17815 /* bootstrapping code */
17816 +#ifdef CONFIG_X86_32
17817 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17818 +#else
17819 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17820 +#endif
17821 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17822 + _text = .;
17823 HEAD_TEXT
17824 #ifdef CONFIG_X86_32
17825 . = ALIGN(PAGE_SIZE);
17826 @@ -108,13 +128,47 @@ SECTIONS
17827 IRQENTRY_TEXT
17828 *(.fixup)
17829 *(.gnu.warning)
17830 - /* End of text section */
17831 - _etext = .;
17832 } :text = 0x9090
17833
17834 - NOTES :text :note
17835 + . += __KERNEL_TEXT_OFFSET;
17836
17837 - EXCEPTION_TABLE(16) :text = 0x9090
17838 +#ifdef CONFIG_X86_32
17839 + . = ALIGN(PAGE_SIZE);
17840 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17841 +
17842 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17843 + MODULES_EXEC_VADDR = .;
17844 + BYTE(0)
17845 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17846 + . = ALIGN(HPAGE_SIZE);
17847 + MODULES_EXEC_END = . - 1;
17848 +#endif
17849 +
17850 + } :module
17851 +#endif
17852 +
17853 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17854 + /* End of text section */
17855 + _etext = . - __KERNEL_TEXT_OFFSET;
17856 + }
17857 +
17858 +#ifdef CONFIG_X86_32
17859 + . = ALIGN(PAGE_SIZE);
17860 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17861 + *(.idt)
17862 + . = ALIGN(PAGE_SIZE);
17863 + *(.empty_zero_page)
17864 + *(.initial_pg_fixmap)
17865 + *(.initial_pg_pmd)
17866 + *(.initial_page_table)
17867 + *(.swapper_pg_dir)
17868 + } :rodata
17869 +#endif
17870 +
17871 + . = ALIGN(PAGE_SIZE);
17872 + NOTES :rodata :note
17873 +
17874 + EXCEPTION_TABLE(16) :rodata
17875
17876 #if defined(CONFIG_DEBUG_RODATA)
17877 /* .text should occupy whole number of pages */
17878 @@ -126,16 +180,20 @@ SECTIONS
17879
17880 /* Data */
17881 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17882 +
17883 +#ifdef CONFIG_PAX_KERNEXEC
17884 + . = ALIGN(HPAGE_SIZE);
17885 +#else
17886 + . = ALIGN(PAGE_SIZE);
17887 +#endif
17888 +
17889 /* Start of data section */
17890 _sdata = .;
17891
17892 /* init_task */
17893 INIT_TASK_DATA(THREAD_SIZE)
17894
17895 -#ifdef CONFIG_X86_32
17896 - /* 32 bit has nosave before _edata */
17897 NOSAVE_DATA
17898 -#endif
17899
17900 PAGE_ALIGNED_DATA(PAGE_SIZE)
17901
17902 @@ -176,12 +234,19 @@ SECTIONS
17903 #endif /* CONFIG_X86_64 */
17904
17905 /* Init code and data - will be freed after init */
17906 - . = ALIGN(PAGE_SIZE);
17907 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17908 + BYTE(0)
17909 +
17910 +#ifdef CONFIG_PAX_KERNEXEC
17911 + . = ALIGN(HPAGE_SIZE);
17912 +#else
17913 + . = ALIGN(PAGE_SIZE);
17914 +#endif
17915 +
17916 __init_begin = .; /* paired with __init_end */
17917 - }
17918 + } :init.begin
17919
17920 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17921 +#ifdef CONFIG_SMP
17922 /*
17923 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17924 * output PHDR, so the next output section - .init.text - should
17925 @@ -190,12 +255,27 @@ SECTIONS
17926 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
17927 #endif
17928
17929 - INIT_TEXT_SECTION(PAGE_SIZE)
17930 -#ifdef CONFIG_X86_64
17931 - :init
17932 -#endif
17933 + . = ALIGN(PAGE_SIZE);
17934 + init_begin = .;
17935 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17936 + VMLINUX_SYMBOL(_sinittext) = .;
17937 + INIT_TEXT
17938 + VMLINUX_SYMBOL(_einittext) = .;
17939 + . = ALIGN(PAGE_SIZE);
17940 + } :text.init
17941
17942 - INIT_DATA_SECTION(16)
17943 + /*
17944 + * .exit.text is discard at runtime, not link time, to deal with
17945 + * references from .altinstructions and .eh_frame
17946 + */
17947 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17948 + EXIT_TEXT
17949 + . = ALIGN(16);
17950 + } :text.exit
17951 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
17952 +
17953 + . = ALIGN(PAGE_SIZE);
17954 + INIT_DATA_SECTION(16) :init
17955
17956 /*
17957 * Code and data for a variety of lowlevel trampolines, to be
17958 @@ -269,19 +349,12 @@ SECTIONS
17959 }
17960
17961 . = ALIGN(8);
17962 - /*
17963 - * .exit.text is discard at runtime, not link time, to deal with
17964 - * references from .altinstructions and .eh_frame
17965 - */
17966 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
17967 - EXIT_TEXT
17968 - }
17969
17970 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
17971 EXIT_DATA
17972 }
17973
17974 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
17975 +#ifndef CONFIG_SMP
17976 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
17977 #endif
17978
17979 @@ -300,16 +373,10 @@ SECTIONS
17980 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
17981 __smp_locks = .;
17982 *(.smp_locks)
17983 - . = ALIGN(PAGE_SIZE);
17984 __smp_locks_end = .;
17985 + . = ALIGN(PAGE_SIZE);
17986 }
17987
17988 -#ifdef CONFIG_X86_64
17989 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
17990 - NOSAVE_DATA
17991 - }
17992 -#endif
17993 -
17994 /* BSS */
17995 . = ALIGN(PAGE_SIZE);
17996 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
17997 @@ -325,6 +392,7 @@ SECTIONS
17998 __brk_base = .;
17999 . += 64 * 1024; /* 64k alignment slop space */
18000 *(.brk_reservation) /* areas brk users have reserved */
18001 + . = ALIGN(HPAGE_SIZE);
18002 __brk_limit = .;
18003 }
18004
18005 @@ -351,13 +419,12 @@ SECTIONS
18006 * for the boot processor.
18007 */
18008 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
18009 -INIT_PER_CPU(gdt_page);
18010 INIT_PER_CPU(irq_stack_union);
18011
18012 /*
18013 * Build-time check on the image size:
18014 */
18015 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18016 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18017 "kernel image bigger than KERNEL_IMAGE_SIZE");
18018
18019 #ifdef CONFIG_SMP
18020 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
18021 index b56c65de..561a55b 100644
18022 --- a/arch/x86/kernel/vsyscall_64.c
18023 +++ b/arch/x86/kernel/vsyscall_64.c
18024 @@ -56,15 +56,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
18025 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
18026 };
18027
18028 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
18029 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
18030
18031 static int __init vsyscall_setup(char *str)
18032 {
18033 if (str) {
18034 if (!strcmp("emulate", str))
18035 vsyscall_mode = EMULATE;
18036 - else if (!strcmp("native", str))
18037 - vsyscall_mode = NATIVE;
18038 else if (!strcmp("none", str))
18039 vsyscall_mode = NONE;
18040 else
18041 @@ -177,7 +175,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18042
18043 tsk = current;
18044 if (seccomp_mode(&tsk->seccomp))
18045 - do_exit(SIGKILL);
18046 + do_group_exit(SIGKILL);
18047
18048 switch (vsyscall_nr) {
18049 case 0:
18050 @@ -219,8 +217,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18051 return true;
18052
18053 sigsegv:
18054 - force_sig(SIGSEGV, current);
18055 - return true;
18056 + do_group_exit(SIGKILL);
18057 }
18058
18059 /*
18060 @@ -273,10 +270,7 @@ void __init map_vsyscall(void)
18061 extern char __vvar_page;
18062 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
18063
18064 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
18065 - vsyscall_mode == NATIVE
18066 - ? PAGE_KERNEL_VSYSCALL
18067 - : PAGE_KERNEL_VVAR);
18068 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
18069 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
18070 (unsigned long)VSYSCALL_START);
18071
18072 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
18073 index 9796c2f..f686fbf 100644
18074 --- a/arch/x86/kernel/x8664_ksyms_64.c
18075 +++ b/arch/x86/kernel/x8664_ksyms_64.c
18076 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
18077 EXPORT_SYMBOL(copy_user_generic_string);
18078 EXPORT_SYMBOL(copy_user_generic_unrolled);
18079 EXPORT_SYMBOL(__copy_user_nocache);
18080 -EXPORT_SYMBOL(_copy_from_user);
18081 -EXPORT_SYMBOL(_copy_to_user);
18082
18083 EXPORT_SYMBOL(copy_page);
18084 EXPORT_SYMBOL(clear_page);
18085 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
18086 index a391134..d0b63b6e 100644
18087 --- a/arch/x86/kernel/xsave.c
18088 +++ b/arch/x86/kernel/xsave.c
18089 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
18090 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18091 return -EINVAL;
18092
18093 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18094 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18095 fx_sw_user->extended_size -
18096 FP_XSTATE_MAGIC2_SIZE));
18097 if (err)
18098 @@ -267,7 +267,7 @@ fx_only:
18099 * the other extended state.
18100 */
18101 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18102 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18103 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
18104 }
18105
18106 /*
18107 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf)
18108 if (use_xsave())
18109 err = restore_user_xstate(buf);
18110 else
18111 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
18112 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
18113 buf);
18114 if (unlikely(err)) {
18115 /*
18116 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
18117 index 8b4cc5f..f086b5b 100644
18118 --- a/arch/x86/kvm/emulate.c
18119 +++ b/arch/x86/kvm/emulate.c
18120 @@ -96,7 +96,7 @@
18121 #define Src2ImmByte (2<<29)
18122 #define Src2One (3<<29)
18123 #define Src2Imm (4<<29)
18124 -#define Src2Mask (7<<29)
18125 +#define Src2Mask (7U<<29)
18126
18127 #define X2(x...) x, x
18128 #define X3(x...) X2(x), x
18129 @@ -207,6 +207,7 @@ struct gprefix {
18130
18131 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
18132 do { \
18133 + unsigned long _tmp; \
18134 __asm__ __volatile__ ( \
18135 _PRE_EFLAGS("0", "4", "2") \
18136 _op _suffix " %"_x"3,%1; " \
18137 @@ -220,8 +221,6 @@ struct gprefix {
18138 /* Raw emulation: instruction has two explicit operands. */
18139 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
18140 do { \
18141 - unsigned long _tmp; \
18142 - \
18143 switch ((_dst).bytes) { \
18144 case 2: \
18145 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
18146 @@ -237,7 +236,6 @@ struct gprefix {
18147
18148 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18149 do { \
18150 - unsigned long _tmp; \
18151 switch ((_dst).bytes) { \
18152 case 1: \
18153 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
18154 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
18155 index 57dcbd4..79aba9b 100644
18156 --- a/arch/x86/kvm/lapic.c
18157 +++ b/arch/x86/kvm/lapic.c
18158 @@ -53,7 +53,7 @@
18159 #define APIC_BUS_CYCLE_NS 1
18160
18161 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18162 -#define apic_debug(fmt, arg...)
18163 +#define apic_debug(fmt, arg...) do {} while (0)
18164
18165 #define APIC_LVT_NUM 6
18166 /* 14 is the version for Xeon and Pentium 8.4.8*/
18167 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
18168 index 8e8da79..13bc641 100644
18169 --- a/arch/x86/kvm/mmu.c
18170 +++ b/arch/x86/kvm/mmu.c
18171 @@ -3552,7 +3552,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18172
18173 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
18174
18175 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
18176 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
18177
18178 /*
18179 * Assume that the pte write on a page table of the same type
18180 @@ -3584,7 +3584,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18181 }
18182
18183 spin_lock(&vcpu->kvm->mmu_lock);
18184 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18185 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18186 gentry = 0;
18187 kvm_mmu_free_some_pages(vcpu);
18188 ++vcpu->kvm->stat.mmu_pte_write;
18189 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
18190 index 507e2b8..fc55f89 100644
18191 --- a/arch/x86/kvm/paging_tmpl.h
18192 +++ b/arch/x86/kvm/paging_tmpl.h
18193 @@ -197,7 +197,7 @@ retry_walk:
18194 if (unlikely(kvm_is_error_hva(host_addr)))
18195 goto error;
18196
18197 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
18198 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
18199 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
18200 goto error;
18201
18202 @@ -575,6 +575,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
18203 unsigned long mmu_seq;
18204 bool map_writable;
18205
18206 + pax_track_stack();
18207 +
18208 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
18209
18210 if (unlikely(error_code & PFERR_RSVD_MASK))
18211 @@ -701,7 +703,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
18212 if (need_flush)
18213 kvm_flush_remote_tlbs(vcpu->kvm);
18214
18215 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
18216 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
18217
18218 spin_unlock(&vcpu->kvm->mmu_lock);
18219
18220 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
18221 index 475d1c9..33658ff 100644
18222 --- a/arch/x86/kvm/svm.c
18223 +++ b/arch/x86/kvm/svm.c
18224 @@ -3381,7 +3381,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
18225 int cpu = raw_smp_processor_id();
18226
18227 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
18228 +
18229 + pax_open_kernel();
18230 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
18231 + pax_close_kernel();
18232 +
18233 load_TR_desc();
18234 }
18235
18236 @@ -3759,6 +3763,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
18237 #endif
18238 #endif
18239
18240 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18241 + __set_fs(current_thread_info()->addr_limit);
18242 +#endif
18243 +
18244 reload_tss(vcpu);
18245
18246 local_irq_disable();
18247 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
18248 index e65a158..656dc24 100644
18249 --- a/arch/x86/kvm/vmx.c
18250 +++ b/arch/x86/kvm/vmx.c
18251 @@ -1251,7 +1251,11 @@ static void reload_tss(void)
18252 struct desc_struct *descs;
18253
18254 descs = (void *)gdt->address;
18255 +
18256 + pax_open_kernel();
18257 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18258 + pax_close_kernel();
18259 +
18260 load_TR_desc();
18261 }
18262
18263 @@ -2520,8 +2524,11 @@ static __init int hardware_setup(void)
18264 if (!cpu_has_vmx_flexpriority())
18265 flexpriority_enabled = 0;
18266
18267 - if (!cpu_has_vmx_tpr_shadow())
18268 - kvm_x86_ops->update_cr8_intercept = NULL;
18269 + if (!cpu_has_vmx_tpr_shadow()) {
18270 + pax_open_kernel();
18271 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18272 + pax_close_kernel();
18273 + }
18274
18275 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18276 kvm_disable_largepages();
18277 @@ -3535,7 +3542,7 @@ static void vmx_set_constant_host_state(void)
18278 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
18279
18280 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
18281 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
18282 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
18283
18284 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
18285 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
18286 @@ -6021,6 +6028,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18287 "jmp .Lkvm_vmx_return \n\t"
18288 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18289 ".Lkvm_vmx_return: "
18290 +
18291 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18292 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18293 + ".Lkvm_vmx_return2: "
18294 +#endif
18295 +
18296 /* Save guest registers, load host registers, keep flags */
18297 "mov %0, %c[wordsize](%%"R"sp) \n\t"
18298 "pop %0 \n\t"
18299 @@ -6069,6 +6082,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18300 #endif
18301 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
18302 [wordsize]"i"(sizeof(ulong))
18303 +
18304 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18305 + ,[cs]"i"(__KERNEL_CS)
18306 +#endif
18307 +
18308 : "cc", "memory"
18309 , R"ax", R"bx", R"di", R"si"
18310 #ifdef CONFIG_X86_64
18311 @@ -6097,7 +6115,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18312 }
18313 }
18314
18315 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18316 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18317 +
18318 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18319 + loadsegment(fs, __KERNEL_PERCPU);
18320 +#endif
18321 +
18322 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18323 + __set_fs(current_thread_info()->addr_limit);
18324 +#endif
18325 +
18326 vmx->loaded_vmcs->launched = 1;
18327
18328 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
18329 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
18330 index 84a28ea..9326501 100644
18331 --- a/arch/x86/kvm/x86.c
18332 +++ b/arch/x86/kvm/x86.c
18333 @@ -1334,8 +1334,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
18334 {
18335 struct kvm *kvm = vcpu->kvm;
18336 int lm = is_long_mode(vcpu);
18337 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18338 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18339 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18340 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18341 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
18342 : kvm->arch.xen_hvm_config.blob_size_32;
18343 u32 page_num = data & ~PAGE_MASK;
18344 @@ -2137,6 +2137,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
18345 if (n < msr_list.nmsrs)
18346 goto out;
18347 r = -EFAULT;
18348 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
18349 + goto out;
18350 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
18351 num_msrs_to_save * sizeof(u32)))
18352 goto out;
18353 @@ -2312,15 +2314,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18354 struct kvm_cpuid2 *cpuid,
18355 struct kvm_cpuid_entry2 __user *entries)
18356 {
18357 - int r;
18358 + int r, i;
18359
18360 r = -E2BIG;
18361 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18362 goto out;
18363 r = -EFAULT;
18364 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18365 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18366 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18367 goto out;
18368 + for (i = 0; i < cpuid->nent; ++i) {
18369 + struct kvm_cpuid_entry2 cpuid_entry;
18370 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18371 + goto out;
18372 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
18373 + }
18374 vcpu->arch.cpuid_nent = cpuid->nent;
18375 kvm_apic_set_version(vcpu);
18376 kvm_x86_ops->cpuid_update(vcpu);
18377 @@ -2335,15 +2342,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
18378 struct kvm_cpuid2 *cpuid,
18379 struct kvm_cpuid_entry2 __user *entries)
18380 {
18381 - int r;
18382 + int r, i;
18383
18384 r = -E2BIG;
18385 if (cpuid->nent < vcpu->arch.cpuid_nent)
18386 goto out;
18387 r = -EFAULT;
18388 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18389 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18390 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18391 goto out;
18392 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18393 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18394 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18395 + goto out;
18396 + }
18397 return 0;
18398
18399 out:
18400 @@ -2718,7 +2729,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
18401 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18402 struct kvm_interrupt *irq)
18403 {
18404 - if (irq->irq < 0 || irq->irq >= 256)
18405 + if (irq->irq >= 256)
18406 return -EINVAL;
18407 if (irqchip_in_kernel(vcpu->kvm))
18408 return -ENXIO;
18409 @@ -5089,7 +5100,7 @@ static void kvm_set_mmio_spte_mask(void)
18410 kvm_mmu_set_mmio_spte_mask(mask);
18411 }
18412
18413 -int kvm_arch_init(void *opaque)
18414 +int kvm_arch_init(const void *opaque)
18415 {
18416 int r;
18417 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18418 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
18419 index 13ee258..b9632f6 100644
18420 --- a/arch/x86/lguest/boot.c
18421 +++ b/arch/x86/lguest/boot.c
18422 @@ -1184,9 +1184,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
18423 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18424 * Launcher to reboot us.
18425 */
18426 -static void lguest_restart(char *reason)
18427 +static __noreturn void lguest_restart(char *reason)
18428 {
18429 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
18430 + BUG();
18431 }
18432
18433 /*G:050
18434 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
18435 index 042f682..c92afb6 100644
18436 --- a/arch/x86/lib/atomic64_32.c
18437 +++ b/arch/x86/lib/atomic64_32.c
18438 @@ -8,18 +8,30 @@
18439
18440 long long atomic64_read_cx8(long long, const atomic64_t *v);
18441 EXPORT_SYMBOL(atomic64_read_cx8);
18442 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18443 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
18444 long long atomic64_set_cx8(long long, const atomic64_t *v);
18445 EXPORT_SYMBOL(atomic64_set_cx8);
18446 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18447 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
18448 long long atomic64_xchg_cx8(long long, unsigned high);
18449 EXPORT_SYMBOL(atomic64_xchg_cx8);
18450 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
18451 EXPORT_SYMBOL(atomic64_add_return_cx8);
18452 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18453 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
18454 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
18455 EXPORT_SYMBOL(atomic64_sub_return_cx8);
18456 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18457 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
18458 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
18459 EXPORT_SYMBOL(atomic64_inc_return_cx8);
18460 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18461 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
18462 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
18463 EXPORT_SYMBOL(atomic64_dec_return_cx8);
18464 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18465 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
18466 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
18467 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
18468 int atomic64_inc_not_zero_cx8(atomic64_t *v);
18469 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
18470 #ifndef CONFIG_X86_CMPXCHG64
18471 long long atomic64_read_386(long long, const atomic64_t *v);
18472 EXPORT_SYMBOL(atomic64_read_386);
18473 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
18474 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
18475 long long atomic64_set_386(long long, const atomic64_t *v);
18476 EXPORT_SYMBOL(atomic64_set_386);
18477 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
18478 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
18479 long long atomic64_xchg_386(long long, unsigned high);
18480 EXPORT_SYMBOL(atomic64_xchg_386);
18481 long long atomic64_add_return_386(long long a, atomic64_t *v);
18482 EXPORT_SYMBOL(atomic64_add_return_386);
18483 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18484 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
18485 long long atomic64_sub_return_386(long long a, atomic64_t *v);
18486 EXPORT_SYMBOL(atomic64_sub_return_386);
18487 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18488 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
18489 long long atomic64_inc_return_386(long long a, atomic64_t *v);
18490 EXPORT_SYMBOL(atomic64_inc_return_386);
18491 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18492 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
18493 long long atomic64_dec_return_386(long long a, atomic64_t *v);
18494 EXPORT_SYMBOL(atomic64_dec_return_386);
18495 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18496 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
18497 long long atomic64_add_386(long long a, atomic64_t *v);
18498 EXPORT_SYMBOL(atomic64_add_386);
18499 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
18500 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
18501 long long atomic64_sub_386(long long a, atomic64_t *v);
18502 EXPORT_SYMBOL(atomic64_sub_386);
18503 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
18504 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
18505 long long atomic64_inc_386(long long a, atomic64_t *v);
18506 EXPORT_SYMBOL(atomic64_inc_386);
18507 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
18508 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
18509 long long atomic64_dec_386(long long a, atomic64_t *v);
18510 EXPORT_SYMBOL(atomic64_dec_386);
18511 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
18512 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
18513 long long atomic64_dec_if_positive_386(atomic64_t *v);
18514 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
18515 int atomic64_inc_not_zero_386(atomic64_t *v);
18516 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
18517 index e8e7e0d..56fd1b0 100644
18518 --- a/arch/x86/lib/atomic64_386_32.S
18519 +++ b/arch/x86/lib/atomic64_386_32.S
18520 @@ -48,6 +48,10 @@ BEGIN(read)
18521 movl (v), %eax
18522 movl 4(v), %edx
18523 RET_ENDP
18524 +BEGIN(read_unchecked)
18525 + movl (v), %eax
18526 + movl 4(v), %edx
18527 +RET_ENDP
18528 #undef v
18529
18530 #define v %esi
18531 @@ -55,6 +59,10 @@ BEGIN(set)
18532 movl %ebx, (v)
18533 movl %ecx, 4(v)
18534 RET_ENDP
18535 +BEGIN(set_unchecked)
18536 + movl %ebx, (v)
18537 + movl %ecx, 4(v)
18538 +RET_ENDP
18539 #undef v
18540
18541 #define v %esi
18542 @@ -70,6 +78,20 @@ RET_ENDP
18543 BEGIN(add)
18544 addl %eax, (v)
18545 adcl %edx, 4(v)
18546 +
18547 +#ifdef CONFIG_PAX_REFCOUNT
18548 + jno 0f
18549 + subl %eax, (v)
18550 + sbbl %edx, 4(v)
18551 + int $4
18552 +0:
18553 + _ASM_EXTABLE(0b, 0b)
18554 +#endif
18555 +
18556 +RET_ENDP
18557 +BEGIN(add_unchecked)
18558 + addl %eax, (v)
18559 + adcl %edx, 4(v)
18560 RET_ENDP
18561 #undef v
18562
18563 @@ -77,6 +99,24 @@ RET_ENDP
18564 BEGIN(add_return)
18565 addl (v), %eax
18566 adcl 4(v), %edx
18567 +
18568 +#ifdef CONFIG_PAX_REFCOUNT
18569 + into
18570 +1234:
18571 + _ASM_EXTABLE(1234b, 2f)
18572 +#endif
18573 +
18574 + movl %eax, (v)
18575 + movl %edx, 4(v)
18576 +
18577 +#ifdef CONFIG_PAX_REFCOUNT
18578 +2:
18579 +#endif
18580 +
18581 +RET_ENDP
18582 +BEGIN(add_return_unchecked)
18583 + addl (v), %eax
18584 + adcl 4(v), %edx
18585 movl %eax, (v)
18586 movl %edx, 4(v)
18587 RET_ENDP
18588 @@ -86,6 +126,20 @@ RET_ENDP
18589 BEGIN(sub)
18590 subl %eax, (v)
18591 sbbl %edx, 4(v)
18592 +
18593 +#ifdef CONFIG_PAX_REFCOUNT
18594 + jno 0f
18595 + addl %eax, (v)
18596 + adcl %edx, 4(v)
18597 + int $4
18598 +0:
18599 + _ASM_EXTABLE(0b, 0b)
18600 +#endif
18601 +
18602 +RET_ENDP
18603 +BEGIN(sub_unchecked)
18604 + subl %eax, (v)
18605 + sbbl %edx, 4(v)
18606 RET_ENDP
18607 #undef v
18608
18609 @@ -96,6 +150,27 @@ BEGIN(sub_return)
18610 sbbl $0, %edx
18611 addl (v), %eax
18612 adcl 4(v), %edx
18613 +
18614 +#ifdef CONFIG_PAX_REFCOUNT
18615 + into
18616 +1234:
18617 + _ASM_EXTABLE(1234b, 2f)
18618 +#endif
18619 +
18620 + movl %eax, (v)
18621 + movl %edx, 4(v)
18622 +
18623 +#ifdef CONFIG_PAX_REFCOUNT
18624 +2:
18625 +#endif
18626 +
18627 +RET_ENDP
18628 +BEGIN(sub_return_unchecked)
18629 + negl %edx
18630 + negl %eax
18631 + sbbl $0, %edx
18632 + addl (v), %eax
18633 + adcl 4(v), %edx
18634 movl %eax, (v)
18635 movl %edx, 4(v)
18636 RET_ENDP
18637 @@ -105,6 +180,20 @@ RET_ENDP
18638 BEGIN(inc)
18639 addl $1, (v)
18640 adcl $0, 4(v)
18641 +
18642 +#ifdef CONFIG_PAX_REFCOUNT
18643 + jno 0f
18644 + subl $1, (v)
18645 + sbbl $0, 4(v)
18646 + int $4
18647 +0:
18648 + _ASM_EXTABLE(0b, 0b)
18649 +#endif
18650 +
18651 +RET_ENDP
18652 +BEGIN(inc_unchecked)
18653 + addl $1, (v)
18654 + adcl $0, 4(v)
18655 RET_ENDP
18656 #undef v
18657
18658 @@ -114,6 +203,26 @@ BEGIN(inc_return)
18659 movl 4(v), %edx
18660 addl $1, %eax
18661 adcl $0, %edx
18662 +
18663 +#ifdef CONFIG_PAX_REFCOUNT
18664 + into
18665 +1234:
18666 + _ASM_EXTABLE(1234b, 2f)
18667 +#endif
18668 +
18669 + movl %eax, (v)
18670 + movl %edx, 4(v)
18671 +
18672 +#ifdef CONFIG_PAX_REFCOUNT
18673 +2:
18674 +#endif
18675 +
18676 +RET_ENDP
18677 +BEGIN(inc_return_unchecked)
18678 + movl (v), %eax
18679 + movl 4(v), %edx
18680 + addl $1, %eax
18681 + adcl $0, %edx
18682 movl %eax, (v)
18683 movl %edx, 4(v)
18684 RET_ENDP
18685 @@ -123,6 +232,20 @@ RET_ENDP
18686 BEGIN(dec)
18687 subl $1, (v)
18688 sbbl $0, 4(v)
18689 +
18690 +#ifdef CONFIG_PAX_REFCOUNT
18691 + jno 0f
18692 + addl $1, (v)
18693 + adcl $0, 4(v)
18694 + int $4
18695 +0:
18696 + _ASM_EXTABLE(0b, 0b)
18697 +#endif
18698 +
18699 +RET_ENDP
18700 +BEGIN(dec_unchecked)
18701 + subl $1, (v)
18702 + sbbl $0, 4(v)
18703 RET_ENDP
18704 #undef v
18705
18706 @@ -132,6 +255,26 @@ BEGIN(dec_return)
18707 movl 4(v), %edx
18708 subl $1, %eax
18709 sbbl $0, %edx
18710 +
18711 +#ifdef CONFIG_PAX_REFCOUNT
18712 + into
18713 +1234:
18714 + _ASM_EXTABLE(1234b, 2f)
18715 +#endif
18716 +
18717 + movl %eax, (v)
18718 + movl %edx, 4(v)
18719 +
18720 +#ifdef CONFIG_PAX_REFCOUNT
18721 +2:
18722 +#endif
18723 +
18724 +RET_ENDP
18725 +BEGIN(dec_return_unchecked)
18726 + movl (v), %eax
18727 + movl 4(v), %edx
18728 + subl $1, %eax
18729 + sbbl $0, %edx
18730 movl %eax, (v)
18731 movl %edx, 4(v)
18732 RET_ENDP
18733 @@ -143,6 +286,13 @@ BEGIN(add_unless)
18734 adcl %edx, %edi
18735 addl (v), %eax
18736 adcl 4(v), %edx
18737 +
18738 +#ifdef CONFIG_PAX_REFCOUNT
18739 + into
18740 +1234:
18741 + _ASM_EXTABLE(1234b, 2f)
18742 +#endif
18743 +
18744 cmpl %eax, %esi
18745 je 3f
18746 1:
18747 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
18748 1:
18749 addl $1, %eax
18750 adcl $0, %edx
18751 +
18752 +#ifdef CONFIG_PAX_REFCOUNT
18753 + into
18754 +1234:
18755 + _ASM_EXTABLE(1234b, 2f)
18756 +#endif
18757 +
18758 movl %eax, (v)
18759 movl %edx, 4(v)
18760 movl $1, %eax
18761 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
18762 movl 4(v), %edx
18763 subl $1, %eax
18764 sbbl $0, %edx
18765 +
18766 +#ifdef CONFIG_PAX_REFCOUNT
18767 + into
18768 +1234:
18769 + _ASM_EXTABLE(1234b, 1f)
18770 +#endif
18771 +
18772 js 1f
18773 movl %eax, (v)
18774 movl %edx, 4(v)
18775 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
18776 index 391a083..d658e9f 100644
18777 --- a/arch/x86/lib/atomic64_cx8_32.S
18778 +++ b/arch/x86/lib/atomic64_cx8_32.S
18779 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
18780 CFI_STARTPROC
18781
18782 read64 %ecx
18783 + pax_force_retaddr
18784 ret
18785 CFI_ENDPROC
18786 ENDPROC(atomic64_read_cx8)
18787
18788 +ENTRY(atomic64_read_unchecked_cx8)
18789 + CFI_STARTPROC
18790 +
18791 + read64 %ecx
18792 + pax_force_retaddr
18793 + ret
18794 + CFI_ENDPROC
18795 +ENDPROC(atomic64_read_unchecked_cx8)
18796 +
18797 ENTRY(atomic64_set_cx8)
18798 CFI_STARTPROC
18799
18800 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
18801 cmpxchg8b (%esi)
18802 jne 1b
18803
18804 + pax_force_retaddr
18805 ret
18806 CFI_ENDPROC
18807 ENDPROC(atomic64_set_cx8)
18808
18809 +ENTRY(atomic64_set_unchecked_cx8)
18810 + CFI_STARTPROC
18811 +
18812 +1:
18813 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
18814 + * are atomic on 586 and newer */
18815 + cmpxchg8b (%esi)
18816 + jne 1b
18817 +
18818 + pax_force_retaddr
18819 + ret
18820 + CFI_ENDPROC
18821 +ENDPROC(atomic64_set_unchecked_cx8)
18822 +
18823 ENTRY(atomic64_xchg_cx8)
18824 CFI_STARTPROC
18825
18826 @@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
18827 cmpxchg8b (%esi)
18828 jne 1b
18829
18830 + pax_force_retaddr
18831 ret
18832 CFI_ENDPROC
18833 ENDPROC(atomic64_xchg_cx8)
18834
18835 -.macro addsub_return func ins insc
18836 -ENTRY(atomic64_\func\()_return_cx8)
18837 +.macro addsub_return func ins insc unchecked=""
18838 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18839 CFI_STARTPROC
18840 SAVE ebp
18841 SAVE ebx
18842 @@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
18843 movl %edx, %ecx
18844 \ins\()l %esi, %ebx
18845 \insc\()l %edi, %ecx
18846 +
18847 +.ifb \unchecked
18848 +#ifdef CONFIG_PAX_REFCOUNT
18849 + into
18850 +2:
18851 + _ASM_EXTABLE(2b, 3f)
18852 +#endif
18853 +.endif
18854 +
18855 LOCK_PREFIX
18856 cmpxchg8b (%ebp)
18857 jne 1b
18858 -
18859 -10:
18860 movl %ebx, %eax
18861 movl %ecx, %edx
18862 +
18863 +.ifb \unchecked
18864 +#ifdef CONFIG_PAX_REFCOUNT
18865 +3:
18866 +#endif
18867 +.endif
18868 +
18869 RESTORE edi
18870 RESTORE esi
18871 RESTORE ebx
18872 RESTORE ebp
18873 + pax_force_retaddr
18874 ret
18875 CFI_ENDPROC
18876 -ENDPROC(atomic64_\func\()_return_cx8)
18877 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18878 .endm
18879
18880 addsub_return add add adc
18881 addsub_return sub sub sbb
18882 +addsub_return add add adc _unchecked
18883 +addsub_return sub sub sbb _unchecked
18884
18885 -.macro incdec_return func ins insc
18886 -ENTRY(atomic64_\func\()_return_cx8)
18887 +.macro incdec_return func ins insc unchecked
18888 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18889 CFI_STARTPROC
18890 SAVE ebx
18891
18892 @@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
18893 movl %edx, %ecx
18894 \ins\()l $1, %ebx
18895 \insc\()l $0, %ecx
18896 +
18897 +.ifb \unchecked
18898 +#ifdef CONFIG_PAX_REFCOUNT
18899 + into
18900 +2:
18901 + _ASM_EXTABLE(2b, 3f)
18902 +#endif
18903 +.endif
18904 +
18905 LOCK_PREFIX
18906 cmpxchg8b (%esi)
18907 jne 1b
18908
18909 -10:
18910 movl %ebx, %eax
18911 movl %ecx, %edx
18912 +
18913 +.ifb \unchecked
18914 +#ifdef CONFIG_PAX_REFCOUNT
18915 +3:
18916 +#endif
18917 +.endif
18918 +
18919 RESTORE ebx
18920 + pax_force_retaddr
18921 ret
18922 CFI_ENDPROC
18923 -ENDPROC(atomic64_\func\()_return_cx8)
18924 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18925 .endm
18926
18927 incdec_return inc add adc
18928 incdec_return dec sub sbb
18929 +incdec_return inc add adc _unchecked
18930 +incdec_return dec sub sbb _unchecked
18931
18932 ENTRY(atomic64_dec_if_positive_cx8)
18933 CFI_STARTPROC
18934 @@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
18935 movl %edx, %ecx
18936 subl $1, %ebx
18937 sbb $0, %ecx
18938 +
18939 +#ifdef CONFIG_PAX_REFCOUNT
18940 + into
18941 +1234:
18942 + _ASM_EXTABLE(1234b, 2f)
18943 +#endif
18944 +
18945 js 2f
18946 LOCK_PREFIX
18947 cmpxchg8b (%esi)
18948 @@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
18949 movl %ebx, %eax
18950 movl %ecx, %edx
18951 RESTORE ebx
18952 + pax_force_retaddr
18953 ret
18954 CFI_ENDPROC
18955 ENDPROC(atomic64_dec_if_positive_cx8)
18956 @@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
18957 movl %edx, %ecx
18958 addl %esi, %ebx
18959 adcl %edi, %ecx
18960 +
18961 +#ifdef CONFIG_PAX_REFCOUNT
18962 + into
18963 +1234:
18964 + _ASM_EXTABLE(1234b, 3f)
18965 +#endif
18966 +
18967 LOCK_PREFIX
18968 cmpxchg8b (%ebp)
18969 jne 1b
18970 @@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
18971 CFI_ADJUST_CFA_OFFSET -8
18972 RESTORE ebx
18973 RESTORE ebp
18974 + pax_force_retaddr
18975 ret
18976 4:
18977 cmpl %edx, 4(%esp)
18978 @@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
18979 movl %edx, %ecx
18980 addl $1, %ebx
18981 adcl $0, %ecx
18982 +
18983 +#ifdef CONFIG_PAX_REFCOUNT
18984 + into
18985 +1234:
18986 + _ASM_EXTABLE(1234b, 3f)
18987 +#endif
18988 +
18989 LOCK_PREFIX
18990 cmpxchg8b (%esi)
18991 jne 1b
18992 @@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
18993 movl $1, %eax
18994 3:
18995 RESTORE ebx
18996 + pax_force_retaddr
18997 ret
18998 4:
18999 testl %edx, %edx
19000 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
19001 index 78d16a5..fbcf666 100644
19002 --- a/arch/x86/lib/checksum_32.S
19003 +++ b/arch/x86/lib/checksum_32.S
19004 @@ -28,7 +28,8 @@
19005 #include <linux/linkage.h>
19006 #include <asm/dwarf2.h>
19007 #include <asm/errno.h>
19008 -
19009 +#include <asm/segment.h>
19010 +
19011 /*
19012 * computes a partial checksum, e.g. for TCP/UDP fragments
19013 */
19014 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
19015
19016 #define ARGBASE 16
19017 #define FP 12
19018 -
19019 -ENTRY(csum_partial_copy_generic)
19020 +
19021 +ENTRY(csum_partial_copy_generic_to_user)
19022 CFI_STARTPROC
19023 +
19024 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19025 + pushl_cfi %gs
19026 + popl_cfi %es
19027 + jmp csum_partial_copy_generic
19028 +#endif
19029 +
19030 +ENTRY(csum_partial_copy_generic_from_user)
19031 +
19032 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19033 + pushl_cfi %gs
19034 + popl_cfi %ds
19035 +#endif
19036 +
19037 +ENTRY(csum_partial_copy_generic)
19038 subl $4,%esp
19039 CFI_ADJUST_CFA_OFFSET 4
19040 pushl_cfi %edi
19041 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
19042 jmp 4f
19043 SRC(1: movw (%esi), %bx )
19044 addl $2, %esi
19045 -DST( movw %bx, (%edi) )
19046 +DST( movw %bx, %es:(%edi) )
19047 addl $2, %edi
19048 addw %bx, %ax
19049 adcl $0, %eax
19050 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
19051 SRC(1: movl (%esi), %ebx )
19052 SRC( movl 4(%esi), %edx )
19053 adcl %ebx, %eax
19054 -DST( movl %ebx, (%edi) )
19055 +DST( movl %ebx, %es:(%edi) )
19056 adcl %edx, %eax
19057 -DST( movl %edx, 4(%edi) )
19058 +DST( movl %edx, %es:4(%edi) )
19059
19060 SRC( movl 8(%esi), %ebx )
19061 SRC( movl 12(%esi), %edx )
19062 adcl %ebx, %eax
19063 -DST( movl %ebx, 8(%edi) )
19064 +DST( movl %ebx, %es:8(%edi) )
19065 adcl %edx, %eax
19066 -DST( movl %edx, 12(%edi) )
19067 +DST( movl %edx, %es:12(%edi) )
19068
19069 SRC( movl 16(%esi), %ebx )
19070 SRC( movl 20(%esi), %edx )
19071 adcl %ebx, %eax
19072 -DST( movl %ebx, 16(%edi) )
19073 +DST( movl %ebx, %es:16(%edi) )
19074 adcl %edx, %eax
19075 -DST( movl %edx, 20(%edi) )
19076 +DST( movl %edx, %es:20(%edi) )
19077
19078 SRC( movl 24(%esi), %ebx )
19079 SRC( movl 28(%esi), %edx )
19080 adcl %ebx, %eax
19081 -DST( movl %ebx, 24(%edi) )
19082 +DST( movl %ebx, %es:24(%edi) )
19083 adcl %edx, %eax
19084 -DST( movl %edx, 28(%edi) )
19085 +DST( movl %edx, %es:28(%edi) )
19086
19087 lea 32(%esi), %esi
19088 lea 32(%edi), %edi
19089 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
19090 shrl $2, %edx # This clears CF
19091 SRC(3: movl (%esi), %ebx )
19092 adcl %ebx, %eax
19093 -DST( movl %ebx, (%edi) )
19094 +DST( movl %ebx, %es:(%edi) )
19095 lea 4(%esi), %esi
19096 lea 4(%edi), %edi
19097 dec %edx
19098 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
19099 jb 5f
19100 SRC( movw (%esi), %cx )
19101 leal 2(%esi), %esi
19102 -DST( movw %cx, (%edi) )
19103 +DST( movw %cx, %es:(%edi) )
19104 leal 2(%edi), %edi
19105 je 6f
19106 shll $16,%ecx
19107 SRC(5: movb (%esi), %cl )
19108 -DST( movb %cl, (%edi) )
19109 +DST( movb %cl, %es:(%edi) )
19110 6: addl %ecx, %eax
19111 adcl $0, %eax
19112 7:
19113 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
19114
19115 6001:
19116 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19117 - movl $-EFAULT, (%ebx)
19118 + movl $-EFAULT, %ss:(%ebx)
19119
19120 # zero the complete destination - computing the rest
19121 # is too much work
19122 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
19123
19124 6002:
19125 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19126 - movl $-EFAULT,(%ebx)
19127 + movl $-EFAULT,%ss:(%ebx)
19128 jmp 5000b
19129
19130 .previous
19131
19132 + pushl_cfi %ss
19133 + popl_cfi %ds
19134 + pushl_cfi %ss
19135 + popl_cfi %es
19136 popl_cfi %ebx
19137 CFI_RESTORE ebx
19138 popl_cfi %esi
19139 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
19140 popl_cfi %ecx # equivalent to addl $4,%esp
19141 ret
19142 CFI_ENDPROC
19143 -ENDPROC(csum_partial_copy_generic)
19144 +ENDPROC(csum_partial_copy_generic_to_user)
19145
19146 #else
19147
19148 /* Version for PentiumII/PPro */
19149
19150 #define ROUND1(x) \
19151 + nop; nop; nop; \
19152 SRC(movl x(%esi), %ebx ) ; \
19153 addl %ebx, %eax ; \
19154 - DST(movl %ebx, x(%edi) ) ;
19155 + DST(movl %ebx, %es:x(%edi)) ;
19156
19157 #define ROUND(x) \
19158 + nop; nop; nop; \
19159 SRC(movl x(%esi), %ebx ) ; \
19160 adcl %ebx, %eax ; \
19161 - DST(movl %ebx, x(%edi) ) ;
19162 + DST(movl %ebx, %es:x(%edi)) ;
19163
19164 #define ARGBASE 12
19165 -
19166 -ENTRY(csum_partial_copy_generic)
19167 +
19168 +ENTRY(csum_partial_copy_generic_to_user)
19169 CFI_STARTPROC
19170 +
19171 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19172 + pushl_cfi %gs
19173 + popl_cfi %es
19174 + jmp csum_partial_copy_generic
19175 +#endif
19176 +
19177 +ENTRY(csum_partial_copy_generic_from_user)
19178 +
19179 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19180 + pushl_cfi %gs
19181 + popl_cfi %ds
19182 +#endif
19183 +
19184 +ENTRY(csum_partial_copy_generic)
19185 pushl_cfi %ebx
19186 CFI_REL_OFFSET ebx, 0
19187 pushl_cfi %edi
19188 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
19189 subl %ebx, %edi
19190 lea -1(%esi),%edx
19191 andl $-32,%edx
19192 - lea 3f(%ebx,%ebx), %ebx
19193 + lea 3f(%ebx,%ebx,2), %ebx
19194 testl %esi, %esi
19195 jmp *%ebx
19196 1: addl $64,%esi
19197 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
19198 jb 5f
19199 SRC( movw (%esi), %dx )
19200 leal 2(%esi), %esi
19201 -DST( movw %dx, (%edi) )
19202 +DST( movw %dx, %es:(%edi) )
19203 leal 2(%edi), %edi
19204 je 6f
19205 shll $16,%edx
19206 5:
19207 SRC( movb (%esi), %dl )
19208 -DST( movb %dl, (%edi) )
19209 +DST( movb %dl, %es:(%edi) )
19210 6: addl %edx, %eax
19211 adcl $0, %eax
19212 7:
19213 .section .fixup, "ax"
19214 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19215 - movl $-EFAULT, (%ebx)
19216 + movl $-EFAULT, %ss:(%ebx)
19217 # zero the complete destination (computing the rest is too much work)
19218 movl ARGBASE+8(%esp),%edi # dst
19219 movl ARGBASE+12(%esp),%ecx # len
19220 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
19221 rep; stosb
19222 jmp 7b
19223 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19224 - movl $-EFAULT, (%ebx)
19225 + movl $-EFAULT, %ss:(%ebx)
19226 jmp 7b
19227 .previous
19228
19229 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19230 + pushl_cfi %ss
19231 + popl_cfi %ds
19232 + pushl_cfi %ss
19233 + popl_cfi %es
19234 +#endif
19235 +
19236 popl_cfi %esi
19237 CFI_RESTORE esi
19238 popl_cfi %edi
19239 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
19240 CFI_RESTORE ebx
19241 ret
19242 CFI_ENDPROC
19243 -ENDPROC(csum_partial_copy_generic)
19244 +ENDPROC(csum_partial_copy_generic_to_user)
19245
19246 #undef ROUND
19247 #undef ROUND1
19248 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
19249 index f2145cf..cea889d 100644
19250 --- a/arch/x86/lib/clear_page_64.S
19251 +++ b/arch/x86/lib/clear_page_64.S
19252 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
19253 movl $4096/8,%ecx
19254 xorl %eax,%eax
19255 rep stosq
19256 + pax_force_retaddr
19257 ret
19258 CFI_ENDPROC
19259 ENDPROC(clear_page_c)
19260 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
19261 movl $4096,%ecx
19262 xorl %eax,%eax
19263 rep stosb
19264 + pax_force_retaddr
19265 ret
19266 CFI_ENDPROC
19267 ENDPROC(clear_page_c_e)
19268 @@ -43,6 +45,7 @@ ENTRY(clear_page)
19269 leaq 64(%rdi),%rdi
19270 jnz .Lloop
19271 nop
19272 + pax_force_retaddr
19273 ret
19274 CFI_ENDPROC
19275 .Lclear_page_end:
19276 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
19277
19278 #include <asm/cpufeature.h>
19279
19280 - .section .altinstr_replacement,"ax"
19281 + .section .altinstr_replacement,"a"
19282 1: .byte 0xeb /* jmp <disp8> */
19283 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19284 2: .byte 0xeb /* jmp <disp8> */
19285 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
19286 index 1e572c5..2a162cd 100644
19287 --- a/arch/x86/lib/cmpxchg16b_emu.S
19288 +++ b/arch/x86/lib/cmpxchg16b_emu.S
19289 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
19290
19291 popf
19292 mov $1, %al
19293 + pax_force_retaddr
19294 ret
19295
19296 not_same:
19297 popf
19298 xor %al,%al
19299 + pax_force_retaddr
19300 ret
19301
19302 CFI_ENDPROC
19303 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
19304 index 01c805b..dccb07f 100644
19305 --- a/arch/x86/lib/copy_page_64.S
19306 +++ b/arch/x86/lib/copy_page_64.S
19307 @@ -9,6 +9,7 @@ copy_page_c:
19308 CFI_STARTPROC
19309 movl $4096/8,%ecx
19310 rep movsq
19311 + pax_force_retaddr
19312 ret
19313 CFI_ENDPROC
19314 ENDPROC(copy_page_c)
19315 @@ -39,7 +40,7 @@ ENTRY(copy_page)
19316 movq 16 (%rsi), %rdx
19317 movq 24 (%rsi), %r8
19318 movq 32 (%rsi), %r9
19319 - movq 40 (%rsi), %r10
19320 + movq 40 (%rsi), %r13
19321 movq 48 (%rsi), %r11
19322 movq 56 (%rsi), %r12
19323
19324 @@ -50,7 +51,7 @@ ENTRY(copy_page)
19325 movq %rdx, 16 (%rdi)
19326 movq %r8, 24 (%rdi)
19327 movq %r9, 32 (%rdi)
19328 - movq %r10, 40 (%rdi)
19329 + movq %r13, 40 (%rdi)
19330 movq %r11, 48 (%rdi)
19331 movq %r12, 56 (%rdi)
19332
19333 @@ -69,7 +70,7 @@ ENTRY(copy_page)
19334 movq 16 (%rsi), %rdx
19335 movq 24 (%rsi), %r8
19336 movq 32 (%rsi), %r9
19337 - movq 40 (%rsi), %r10
19338 + movq 40 (%rsi), %r13
19339 movq 48 (%rsi), %r11
19340 movq 56 (%rsi), %r12
19341
19342 @@ -78,7 +79,7 @@ ENTRY(copy_page)
19343 movq %rdx, 16 (%rdi)
19344 movq %r8, 24 (%rdi)
19345 movq %r9, 32 (%rdi)
19346 - movq %r10, 40 (%rdi)
19347 + movq %r13, 40 (%rdi)
19348 movq %r11, 48 (%rdi)
19349 movq %r12, 56 (%rdi)
19350
19351 @@ -95,6 +96,7 @@ ENTRY(copy_page)
19352 CFI_RESTORE r13
19353 addq $3*8,%rsp
19354 CFI_ADJUST_CFA_OFFSET -3*8
19355 + pax_force_retaddr
19356 ret
19357 .Lcopy_page_end:
19358 CFI_ENDPROC
19359 @@ -105,7 +107,7 @@ ENDPROC(copy_page)
19360
19361 #include <asm/cpufeature.h>
19362
19363 - .section .altinstr_replacement,"ax"
19364 + .section .altinstr_replacement,"a"
19365 1: .byte 0xeb /* jmp <disp8> */
19366 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19367 2:
19368 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
19369 index 0248402..821c786 100644
19370 --- a/arch/x86/lib/copy_user_64.S
19371 +++ b/arch/x86/lib/copy_user_64.S
19372 @@ -16,6 +16,7 @@
19373 #include <asm/thread_info.h>
19374 #include <asm/cpufeature.h>
19375 #include <asm/alternative-asm.h>
19376 +#include <asm/pgtable.h>
19377
19378 /*
19379 * By placing feature2 after feature1 in altinstructions section, we logically
19380 @@ -29,7 +30,7 @@
19381 .byte 0xe9 /* 32bit jump */
19382 .long \orig-1f /* by default jump to orig */
19383 1:
19384 - .section .altinstr_replacement,"ax"
19385 + .section .altinstr_replacement,"a"
19386 2: .byte 0xe9 /* near jump with 32bit immediate */
19387 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
19388 3: .byte 0xe9 /* near jump with 32bit immediate */
19389 @@ -71,47 +72,20 @@
19390 #endif
19391 .endm
19392
19393 -/* Standard copy_to_user with segment limit checking */
19394 -ENTRY(_copy_to_user)
19395 - CFI_STARTPROC
19396 - GET_THREAD_INFO(%rax)
19397 - movq %rdi,%rcx
19398 - addq %rdx,%rcx
19399 - jc bad_to_user
19400 - cmpq TI_addr_limit(%rax),%rcx
19401 - ja bad_to_user
19402 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19403 - copy_user_generic_unrolled,copy_user_generic_string, \
19404 - copy_user_enhanced_fast_string
19405 - CFI_ENDPROC
19406 -ENDPROC(_copy_to_user)
19407 -
19408 -/* Standard copy_from_user with segment limit checking */
19409 -ENTRY(_copy_from_user)
19410 - CFI_STARTPROC
19411 - GET_THREAD_INFO(%rax)
19412 - movq %rsi,%rcx
19413 - addq %rdx,%rcx
19414 - jc bad_from_user
19415 - cmpq TI_addr_limit(%rax),%rcx
19416 - ja bad_from_user
19417 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19418 - copy_user_generic_unrolled,copy_user_generic_string, \
19419 - copy_user_enhanced_fast_string
19420 - CFI_ENDPROC
19421 -ENDPROC(_copy_from_user)
19422 -
19423 .section .fixup,"ax"
19424 /* must zero dest */
19425 ENTRY(bad_from_user)
19426 bad_from_user:
19427 CFI_STARTPROC
19428 + testl %edx,%edx
19429 + js bad_to_user
19430 movl %edx,%ecx
19431 xorl %eax,%eax
19432 rep
19433 stosb
19434 bad_to_user:
19435 movl %edx,%eax
19436 + pax_force_retaddr
19437 ret
19438 CFI_ENDPROC
19439 ENDPROC(bad_from_user)
19440 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
19441 jz 17f
19442 1: movq (%rsi),%r8
19443 2: movq 1*8(%rsi),%r9
19444 -3: movq 2*8(%rsi),%r10
19445 +3: movq 2*8(%rsi),%rax
19446 4: movq 3*8(%rsi),%r11
19447 5: movq %r8,(%rdi)
19448 6: movq %r9,1*8(%rdi)
19449 -7: movq %r10,2*8(%rdi)
19450 +7: movq %rax,2*8(%rdi)
19451 8: movq %r11,3*8(%rdi)
19452 9: movq 4*8(%rsi),%r8
19453 10: movq 5*8(%rsi),%r9
19454 -11: movq 6*8(%rsi),%r10
19455 +11: movq 6*8(%rsi),%rax
19456 12: movq 7*8(%rsi),%r11
19457 13: movq %r8,4*8(%rdi)
19458 14: movq %r9,5*8(%rdi)
19459 -15: movq %r10,6*8(%rdi)
19460 +15: movq %rax,6*8(%rdi)
19461 16: movq %r11,7*8(%rdi)
19462 leaq 64(%rsi),%rsi
19463 leaq 64(%rdi),%rdi
19464 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
19465 decl %ecx
19466 jnz 21b
19467 23: xor %eax,%eax
19468 + pax_force_retaddr
19469 ret
19470
19471 .section .fixup,"ax"
19472 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
19473 3: rep
19474 movsb
19475 4: xorl %eax,%eax
19476 + pax_force_retaddr
19477 ret
19478
19479 .section .fixup,"ax"
19480 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
19481 1: rep
19482 movsb
19483 2: xorl %eax,%eax
19484 + pax_force_retaddr
19485 ret
19486
19487 .section .fixup,"ax"
19488 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
19489 index cb0c112..e3a6895 100644
19490 --- a/arch/x86/lib/copy_user_nocache_64.S
19491 +++ b/arch/x86/lib/copy_user_nocache_64.S
19492 @@ -8,12 +8,14 @@
19493
19494 #include <linux/linkage.h>
19495 #include <asm/dwarf2.h>
19496 +#include <asm/alternative-asm.h>
19497
19498 #define FIX_ALIGNMENT 1
19499
19500 #include <asm/current.h>
19501 #include <asm/asm-offsets.h>
19502 #include <asm/thread_info.h>
19503 +#include <asm/pgtable.h>
19504
19505 .macro ALIGN_DESTINATION
19506 #ifdef FIX_ALIGNMENT
19507 @@ -50,6 +52,15 @@
19508 */
19509 ENTRY(__copy_user_nocache)
19510 CFI_STARTPROC
19511 +
19512 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19513 + mov $PAX_USER_SHADOW_BASE,%rcx
19514 + cmp %rcx,%rsi
19515 + jae 1f
19516 + add %rcx,%rsi
19517 +1:
19518 +#endif
19519 +
19520 cmpl $8,%edx
19521 jb 20f /* less then 8 bytes, go to byte copy loop */
19522 ALIGN_DESTINATION
19523 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
19524 jz 17f
19525 1: movq (%rsi),%r8
19526 2: movq 1*8(%rsi),%r9
19527 -3: movq 2*8(%rsi),%r10
19528 +3: movq 2*8(%rsi),%rax
19529 4: movq 3*8(%rsi),%r11
19530 5: movnti %r8,(%rdi)
19531 6: movnti %r9,1*8(%rdi)
19532 -7: movnti %r10,2*8(%rdi)
19533 +7: movnti %rax,2*8(%rdi)
19534 8: movnti %r11,3*8(%rdi)
19535 9: movq 4*8(%rsi),%r8
19536 10: movq 5*8(%rsi),%r9
19537 -11: movq 6*8(%rsi),%r10
19538 +11: movq 6*8(%rsi),%rax
19539 12: movq 7*8(%rsi),%r11
19540 13: movnti %r8,4*8(%rdi)
19541 14: movnti %r9,5*8(%rdi)
19542 -15: movnti %r10,6*8(%rdi)
19543 +15: movnti %rax,6*8(%rdi)
19544 16: movnti %r11,7*8(%rdi)
19545 leaq 64(%rsi),%rsi
19546 leaq 64(%rdi),%rdi
19547 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
19548 jnz 21b
19549 23: xorl %eax,%eax
19550 sfence
19551 + pax_force_retaddr
19552 ret
19553
19554 .section .fixup,"ax"
19555 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
19556 index fb903b7..c92b7f7 100644
19557 --- a/arch/x86/lib/csum-copy_64.S
19558 +++ b/arch/x86/lib/csum-copy_64.S
19559 @@ -8,6 +8,7 @@
19560 #include <linux/linkage.h>
19561 #include <asm/dwarf2.h>
19562 #include <asm/errno.h>
19563 +#include <asm/alternative-asm.h>
19564
19565 /*
19566 * Checksum copy with exception handling.
19567 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
19568 CFI_RESTORE rbp
19569 addq $7*8, %rsp
19570 CFI_ADJUST_CFA_OFFSET -7*8
19571 + pax_force_retaddr 0, 1
19572 ret
19573 CFI_RESTORE_STATE
19574
19575 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
19576 index 459b58a..9570bc7 100644
19577 --- a/arch/x86/lib/csum-wrappers_64.c
19578 +++ b/arch/x86/lib/csum-wrappers_64.c
19579 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
19580 len -= 2;
19581 }
19582 }
19583 - isum = csum_partial_copy_generic((__force const void *)src,
19584 +
19585 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19586 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19587 + src += PAX_USER_SHADOW_BASE;
19588 +#endif
19589 +
19590 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
19591 dst, len, isum, errp, NULL);
19592 if (unlikely(*errp))
19593 goto out_err;
19594 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
19595 }
19596
19597 *errp = 0;
19598 - return csum_partial_copy_generic(src, (void __force *)dst,
19599 +
19600 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19601 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19602 + dst += PAX_USER_SHADOW_BASE;
19603 +#endif
19604 +
19605 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
19606 len, isum, NULL, errp);
19607 }
19608 EXPORT_SYMBOL(csum_partial_copy_to_user);
19609 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
19610 index 51f1504..ddac4c1 100644
19611 --- a/arch/x86/lib/getuser.S
19612 +++ b/arch/x86/lib/getuser.S
19613 @@ -33,15 +33,38 @@
19614 #include <asm/asm-offsets.h>
19615 #include <asm/thread_info.h>
19616 #include <asm/asm.h>
19617 +#include <asm/segment.h>
19618 +#include <asm/pgtable.h>
19619 +#include <asm/alternative-asm.h>
19620 +
19621 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19622 +#define __copyuser_seg gs;
19623 +#else
19624 +#define __copyuser_seg
19625 +#endif
19626
19627 .text
19628 ENTRY(__get_user_1)
19629 CFI_STARTPROC
19630 +
19631 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19632 GET_THREAD_INFO(%_ASM_DX)
19633 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19634 jae bad_get_user
19635 -1: movzb (%_ASM_AX),%edx
19636 +
19637 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19638 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19639 + cmp %_ASM_DX,%_ASM_AX
19640 + jae 1234f
19641 + add %_ASM_DX,%_ASM_AX
19642 +1234:
19643 +#endif
19644 +
19645 +#endif
19646 +
19647 +1: __copyuser_seg movzb (%_ASM_AX),%edx
19648 xor %eax,%eax
19649 + pax_force_retaddr
19650 ret
19651 CFI_ENDPROC
19652 ENDPROC(__get_user_1)
19653 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
19654 ENTRY(__get_user_2)
19655 CFI_STARTPROC
19656 add $1,%_ASM_AX
19657 +
19658 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19659 jc bad_get_user
19660 GET_THREAD_INFO(%_ASM_DX)
19661 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19662 jae bad_get_user
19663 -2: movzwl -1(%_ASM_AX),%edx
19664 +
19665 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19666 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19667 + cmp %_ASM_DX,%_ASM_AX
19668 + jae 1234f
19669 + add %_ASM_DX,%_ASM_AX
19670 +1234:
19671 +#endif
19672 +
19673 +#endif
19674 +
19675 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19676 xor %eax,%eax
19677 + pax_force_retaddr
19678 ret
19679 CFI_ENDPROC
19680 ENDPROC(__get_user_2)
19681 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
19682 ENTRY(__get_user_4)
19683 CFI_STARTPROC
19684 add $3,%_ASM_AX
19685 +
19686 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19687 jc bad_get_user
19688 GET_THREAD_INFO(%_ASM_DX)
19689 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19690 jae bad_get_user
19691 -3: mov -3(%_ASM_AX),%edx
19692 +
19693 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19694 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19695 + cmp %_ASM_DX,%_ASM_AX
19696 + jae 1234f
19697 + add %_ASM_DX,%_ASM_AX
19698 +1234:
19699 +#endif
19700 +
19701 +#endif
19702 +
19703 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
19704 xor %eax,%eax
19705 + pax_force_retaddr
19706 ret
19707 CFI_ENDPROC
19708 ENDPROC(__get_user_4)
19709 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
19710 GET_THREAD_INFO(%_ASM_DX)
19711 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19712 jae bad_get_user
19713 +
19714 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19715 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19716 + cmp %_ASM_DX,%_ASM_AX
19717 + jae 1234f
19718 + add %_ASM_DX,%_ASM_AX
19719 +1234:
19720 +#endif
19721 +
19722 4: movq -7(%_ASM_AX),%_ASM_DX
19723 xor %eax,%eax
19724 + pax_force_retaddr
19725 ret
19726 CFI_ENDPROC
19727 ENDPROC(__get_user_8)
19728 @@ -91,6 +152,7 @@ bad_get_user:
19729 CFI_STARTPROC
19730 xor %edx,%edx
19731 mov $(-EFAULT),%_ASM_AX
19732 + pax_force_retaddr
19733 ret
19734 CFI_ENDPROC
19735 END(bad_get_user)
19736 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
19737 index 9f33b98..dfc7678 100644
19738 --- a/arch/x86/lib/insn.c
19739 +++ b/arch/x86/lib/insn.c
19740 @@ -21,6 +21,11 @@
19741 #include <linux/string.h>
19742 #include <asm/inat.h>
19743 #include <asm/insn.h>
19744 +#ifdef __KERNEL__
19745 +#include <asm/pgtable_types.h>
19746 +#else
19747 +#define ktla_ktva(addr) addr
19748 +#endif
19749
19750 #define get_next(t, insn) \
19751 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
19752 @@ -40,8 +45,8 @@
19753 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
19754 {
19755 memset(insn, 0, sizeof(*insn));
19756 - insn->kaddr = kaddr;
19757 - insn->next_byte = kaddr;
19758 + insn->kaddr = ktla_ktva(kaddr);
19759 + insn->next_byte = ktla_ktva(kaddr);
19760 insn->x86_64 = x86_64 ? 1 : 0;
19761 insn->opnd_bytes = 4;
19762 if (x86_64)
19763 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
19764 index 05a95e7..326f2fa 100644
19765 --- a/arch/x86/lib/iomap_copy_64.S
19766 +++ b/arch/x86/lib/iomap_copy_64.S
19767 @@ -17,6 +17,7 @@
19768
19769 #include <linux/linkage.h>
19770 #include <asm/dwarf2.h>
19771 +#include <asm/alternative-asm.h>
19772
19773 /*
19774 * override generic version in lib/iomap_copy.c
19775 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
19776 CFI_STARTPROC
19777 movl %edx,%ecx
19778 rep movsd
19779 + pax_force_retaddr
19780 ret
19781 CFI_ENDPROC
19782 ENDPROC(__iowrite32_copy)
19783 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
19784 index efbf2a0..8893637 100644
19785 --- a/arch/x86/lib/memcpy_64.S
19786 +++ b/arch/x86/lib/memcpy_64.S
19787 @@ -34,6 +34,7 @@
19788 rep movsq
19789 movl %edx, %ecx
19790 rep movsb
19791 + pax_force_retaddr
19792 ret
19793 .Lmemcpy_e:
19794 .previous
19795 @@ -51,6 +52,7 @@
19796
19797 movl %edx, %ecx
19798 rep movsb
19799 + pax_force_retaddr
19800 ret
19801 .Lmemcpy_e_e:
19802 .previous
19803 @@ -81,13 +83,13 @@ ENTRY(memcpy)
19804 */
19805 movq 0*8(%rsi), %r8
19806 movq 1*8(%rsi), %r9
19807 - movq 2*8(%rsi), %r10
19808 + movq 2*8(%rsi), %rcx
19809 movq 3*8(%rsi), %r11
19810 leaq 4*8(%rsi), %rsi
19811
19812 movq %r8, 0*8(%rdi)
19813 movq %r9, 1*8(%rdi)
19814 - movq %r10, 2*8(%rdi)
19815 + movq %rcx, 2*8(%rdi)
19816 movq %r11, 3*8(%rdi)
19817 leaq 4*8(%rdi), %rdi
19818 jae .Lcopy_forward_loop
19819 @@ -110,12 +112,12 @@ ENTRY(memcpy)
19820 subq $0x20, %rdx
19821 movq -1*8(%rsi), %r8
19822 movq -2*8(%rsi), %r9
19823 - movq -3*8(%rsi), %r10
19824 + movq -3*8(%rsi), %rcx
19825 movq -4*8(%rsi), %r11
19826 leaq -4*8(%rsi), %rsi
19827 movq %r8, -1*8(%rdi)
19828 movq %r9, -2*8(%rdi)
19829 - movq %r10, -3*8(%rdi)
19830 + movq %rcx, -3*8(%rdi)
19831 movq %r11, -4*8(%rdi)
19832 leaq -4*8(%rdi), %rdi
19833 jae .Lcopy_backward_loop
19834 @@ -135,12 +137,13 @@ ENTRY(memcpy)
19835 */
19836 movq 0*8(%rsi), %r8
19837 movq 1*8(%rsi), %r9
19838 - movq -2*8(%rsi, %rdx), %r10
19839 + movq -2*8(%rsi, %rdx), %rcx
19840 movq -1*8(%rsi, %rdx), %r11
19841 movq %r8, 0*8(%rdi)
19842 movq %r9, 1*8(%rdi)
19843 - movq %r10, -2*8(%rdi, %rdx)
19844 + movq %rcx, -2*8(%rdi, %rdx)
19845 movq %r11, -1*8(%rdi, %rdx)
19846 + pax_force_retaddr
19847 retq
19848 .p2align 4
19849 .Lless_16bytes:
19850 @@ -153,6 +156,7 @@ ENTRY(memcpy)
19851 movq -1*8(%rsi, %rdx), %r9
19852 movq %r8, 0*8(%rdi)
19853 movq %r9, -1*8(%rdi, %rdx)
19854 + pax_force_retaddr
19855 retq
19856 .p2align 4
19857 .Lless_8bytes:
19858 @@ -166,6 +170,7 @@ ENTRY(memcpy)
19859 movl -4(%rsi, %rdx), %r8d
19860 movl %ecx, (%rdi)
19861 movl %r8d, -4(%rdi, %rdx)
19862 + pax_force_retaddr
19863 retq
19864 .p2align 4
19865 .Lless_3bytes:
19866 @@ -183,6 +188,7 @@ ENTRY(memcpy)
19867 jnz .Lloop_1
19868
19869 .Lend:
19870 + pax_force_retaddr
19871 retq
19872 CFI_ENDPROC
19873 ENDPROC(memcpy)
19874 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
19875 index ee16461..c39c199 100644
19876 --- a/arch/x86/lib/memmove_64.S
19877 +++ b/arch/x86/lib/memmove_64.S
19878 @@ -61,13 +61,13 @@ ENTRY(memmove)
19879 5:
19880 sub $0x20, %rdx
19881 movq 0*8(%rsi), %r11
19882 - movq 1*8(%rsi), %r10
19883 + movq 1*8(%rsi), %rcx
19884 movq 2*8(%rsi), %r9
19885 movq 3*8(%rsi), %r8
19886 leaq 4*8(%rsi), %rsi
19887
19888 movq %r11, 0*8(%rdi)
19889 - movq %r10, 1*8(%rdi)
19890 + movq %rcx, 1*8(%rdi)
19891 movq %r9, 2*8(%rdi)
19892 movq %r8, 3*8(%rdi)
19893 leaq 4*8(%rdi), %rdi
19894 @@ -81,10 +81,10 @@ ENTRY(memmove)
19895 4:
19896 movq %rdx, %rcx
19897 movq -8(%rsi, %rdx), %r11
19898 - lea -8(%rdi, %rdx), %r10
19899 + lea -8(%rdi, %rdx), %r9
19900 shrq $3, %rcx
19901 rep movsq
19902 - movq %r11, (%r10)
19903 + movq %r11, (%r9)
19904 jmp 13f
19905 .Lmemmove_end_forward:
19906
19907 @@ -95,14 +95,14 @@ ENTRY(memmove)
19908 7:
19909 movq %rdx, %rcx
19910 movq (%rsi), %r11
19911 - movq %rdi, %r10
19912 + movq %rdi, %r9
19913 leaq -8(%rsi, %rdx), %rsi
19914 leaq -8(%rdi, %rdx), %rdi
19915 shrq $3, %rcx
19916 std
19917 rep movsq
19918 cld
19919 - movq %r11, (%r10)
19920 + movq %r11, (%r9)
19921 jmp 13f
19922
19923 /*
19924 @@ -127,13 +127,13 @@ ENTRY(memmove)
19925 8:
19926 subq $0x20, %rdx
19927 movq -1*8(%rsi), %r11
19928 - movq -2*8(%rsi), %r10
19929 + movq -2*8(%rsi), %rcx
19930 movq -3*8(%rsi), %r9
19931 movq -4*8(%rsi), %r8
19932 leaq -4*8(%rsi), %rsi
19933
19934 movq %r11, -1*8(%rdi)
19935 - movq %r10, -2*8(%rdi)
19936 + movq %rcx, -2*8(%rdi)
19937 movq %r9, -3*8(%rdi)
19938 movq %r8, -4*8(%rdi)
19939 leaq -4*8(%rdi), %rdi
19940 @@ -151,11 +151,11 @@ ENTRY(memmove)
19941 * Move data from 16 bytes to 31 bytes.
19942 */
19943 movq 0*8(%rsi), %r11
19944 - movq 1*8(%rsi), %r10
19945 + movq 1*8(%rsi), %rcx
19946 movq -2*8(%rsi, %rdx), %r9
19947 movq -1*8(%rsi, %rdx), %r8
19948 movq %r11, 0*8(%rdi)
19949 - movq %r10, 1*8(%rdi)
19950 + movq %rcx, 1*8(%rdi)
19951 movq %r9, -2*8(%rdi, %rdx)
19952 movq %r8, -1*8(%rdi, %rdx)
19953 jmp 13f
19954 @@ -167,9 +167,9 @@ ENTRY(memmove)
19955 * Move data from 8 bytes to 15 bytes.
19956 */
19957 movq 0*8(%rsi), %r11
19958 - movq -1*8(%rsi, %rdx), %r10
19959 + movq -1*8(%rsi, %rdx), %r9
19960 movq %r11, 0*8(%rdi)
19961 - movq %r10, -1*8(%rdi, %rdx)
19962 + movq %r9, -1*8(%rdi, %rdx)
19963 jmp 13f
19964 10:
19965 cmpq $4, %rdx
19966 @@ -178,9 +178,9 @@ ENTRY(memmove)
19967 * Move data from 4 bytes to 7 bytes.
19968 */
19969 movl (%rsi), %r11d
19970 - movl -4(%rsi, %rdx), %r10d
19971 + movl -4(%rsi, %rdx), %r9d
19972 movl %r11d, (%rdi)
19973 - movl %r10d, -4(%rdi, %rdx)
19974 + movl %r9d, -4(%rdi, %rdx)
19975 jmp 13f
19976 11:
19977 cmp $2, %rdx
19978 @@ -189,9 +189,9 @@ ENTRY(memmove)
19979 * Move data from 2 bytes to 3 bytes.
19980 */
19981 movw (%rsi), %r11w
19982 - movw -2(%rsi, %rdx), %r10w
19983 + movw -2(%rsi, %rdx), %r9w
19984 movw %r11w, (%rdi)
19985 - movw %r10w, -2(%rdi, %rdx)
19986 + movw %r9w, -2(%rdi, %rdx)
19987 jmp 13f
19988 12:
19989 cmp $1, %rdx
19990 @@ -202,6 +202,7 @@ ENTRY(memmove)
19991 movb (%rsi), %r11b
19992 movb %r11b, (%rdi)
19993 13:
19994 + pax_force_retaddr
19995 retq
19996 CFI_ENDPROC
19997
19998 @@ -210,6 +211,7 @@ ENTRY(memmove)
19999 /* Forward moving data. */
20000 movq %rdx, %rcx
20001 rep movsb
20002 + pax_force_retaddr
20003 retq
20004 .Lmemmove_end_forward_efs:
20005 .previous
20006 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
20007 index 79bd454..dff325a 100644
20008 --- a/arch/x86/lib/memset_64.S
20009 +++ b/arch/x86/lib/memset_64.S
20010 @@ -31,6 +31,7 @@
20011 movl %r8d,%ecx
20012 rep stosb
20013 movq %r9,%rax
20014 + pax_force_retaddr
20015 ret
20016 .Lmemset_e:
20017 .previous
20018 @@ -53,6 +54,7 @@
20019 movl %edx,%ecx
20020 rep stosb
20021 movq %r9,%rax
20022 + pax_force_retaddr
20023 ret
20024 .Lmemset_e_e:
20025 .previous
20026 @@ -60,13 +62,13 @@
20027 ENTRY(memset)
20028 ENTRY(__memset)
20029 CFI_STARTPROC
20030 - movq %rdi,%r10
20031 movq %rdx,%r11
20032
20033 /* expand byte value */
20034 movzbl %sil,%ecx
20035 movabs $0x0101010101010101,%rax
20036 mul %rcx /* with rax, clobbers rdx */
20037 + movq %rdi,%rdx
20038
20039 /* align dst */
20040 movl %edi,%r9d
20041 @@ -120,7 +122,8 @@ ENTRY(__memset)
20042 jnz .Lloop_1
20043
20044 .Lende:
20045 - movq %r10,%rax
20046 + movq %rdx,%rax
20047 + pax_force_retaddr
20048 ret
20049
20050 CFI_RESTORE_STATE
20051 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
20052 index c9f2d9b..e7fd2c0 100644
20053 --- a/arch/x86/lib/mmx_32.c
20054 +++ b/arch/x86/lib/mmx_32.c
20055 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20056 {
20057 void *p;
20058 int i;
20059 + unsigned long cr0;
20060
20061 if (unlikely(in_interrupt()))
20062 return __memcpy(to, from, len);
20063 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20064 kernel_fpu_begin();
20065
20066 __asm__ __volatile__ (
20067 - "1: prefetch (%0)\n" /* This set is 28 bytes */
20068 - " prefetch 64(%0)\n"
20069 - " prefetch 128(%0)\n"
20070 - " prefetch 192(%0)\n"
20071 - " prefetch 256(%0)\n"
20072 + "1: prefetch (%1)\n" /* This set is 28 bytes */
20073 + " prefetch 64(%1)\n"
20074 + " prefetch 128(%1)\n"
20075 + " prefetch 192(%1)\n"
20076 + " prefetch 256(%1)\n"
20077 "2: \n"
20078 ".section .fixup, \"ax\"\n"
20079 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20080 + "3: \n"
20081 +
20082 +#ifdef CONFIG_PAX_KERNEXEC
20083 + " movl %%cr0, %0\n"
20084 + " movl %0, %%eax\n"
20085 + " andl $0xFFFEFFFF, %%eax\n"
20086 + " movl %%eax, %%cr0\n"
20087 +#endif
20088 +
20089 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20090 +
20091 +#ifdef CONFIG_PAX_KERNEXEC
20092 + " movl %0, %%cr0\n"
20093 +#endif
20094 +
20095 " jmp 2b\n"
20096 ".previous\n"
20097 _ASM_EXTABLE(1b, 3b)
20098 - : : "r" (from));
20099 + : "=&r" (cr0) : "r" (from) : "ax");
20100
20101 for ( ; i > 5; i--) {
20102 __asm__ __volatile__ (
20103 - "1: prefetch 320(%0)\n"
20104 - "2: movq (%0), %%mm0\n"
20105 - " movq 8(%0), %%mm1\n"
20106 - " movq 16(%0), %%mm2\n"
20107 - " movq 24(%0), %%mm3\n"
20108 - " movq %%mm0, (%1)\n"
20109 - " movq %%mm1, 8(%1)\n"
20110 - " movq %%mm2, 16(%1)\n"
20111 - " movq %%mm3, 24(%1)\n"
20112 - " movq 32(%0), %%mm0\n"
20113 - " movq 40(%0), %%mm1\n"
20114 - " movq 48(%0), %%mm2\n"
20115 - " movq 56(%0), %%mm3\n"
20116 - " movq %%mm0, 32(%1)\n"
20117 - " movq %%mm1, 40(%1)\n"
20118 - " movq %%mm2, 48(%1)\n"
20119 - " movq %%mm3, 56(%1)\n"
20120 + "1: prefetch 320(%1)\n"
20121 + "2: movq (%1), %%mm0\n"
20122 + " movq 8(%1), %%mm1\n"
20123 + " movq 16(%1), %%mm2\n"
20124 + " movq 24(%1), %%mm3\n"
20125 + " movq %%mm0, (%2)\n"
20126 + " movq %%mm1, 8(%2)\n"
20127 + " movq %%mm2, 16(%2)\n"
20128 + " movq %%mm3, 24(%2)\n"
20129 + " movq 32(%1), %%mm0\n"
20130 + " movq 40(%1), %%mm1\n"
20131 + " movq 48(%1), %%mm2\n"
20132 + " movq 56(%1), %%mm3\n"
20133 + " movq %%mm0, 32(%2)\n"
20134 + " movq %%mm1, 40(%2)\n"
20135 + " movq %%mm2, 48(%2)\n"
20136 + " movq %%mm3, 56(%2)\n"
20137 ".section .fixup, \"ax\"\n"
20138 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20139 + "3:\n"
20140 +
20141 +#ifdef CONFIG_PAX_KERNEXEC
20142 + " movl %%cr0, %0\n"
20143 + " movl %0, %%eax\n"
20144 + " andl $0xFFFEFFFF, %%eax\n"
20145 + " movl %%eax, %%cr0\n"
20146 +#endif
20147 +
20148 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20149 +
20150 +#ifdef CONFIG_PAX_KERNEXEC
20151 + " movl %0, %%cr0\n"
20152 +#endif
20153 +
20154 " jmp 2b\n"
20155 ".previous\n"
20156 _ASM_EXTABLE(1b, 3b)
20157 - : : "r" (from), "r" (to) : "memory");
20158 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20159
20160 from += 64;
20161 to += 64;
20162 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
20163 static void fast_copy_page(void *to, void *from)
20164 {
20165 int i;
20166 + unsigned long cr0;
20167
20168 kernel_fpu_begin();
20169
20170 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
20171 * but that is for later. -AV
20172 */
20173 __asm__ __volatile__(
20174 - "1: prefetch (%0)\n"
20175 - " prefetch 64(%0)\n"
20176 - " prefetch 128(%0)\n"
20177 - " prefetch 192(%0)\n"
20178 - " prefetch 256(%0)\n"
20179 + "1: prefetch (%1)\n"
20180 + " prefetch 64(%1)\n"
20181 + " prefetch 128(%1)\n"
20182 + " prefetch 192(%1)\n"
20183 + " prefetch 256(%1)\n"
20184 "2: \n"
20185 ".section .fixup, \"ax\"\n"
20186 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20187 + "3: \n"
20188 +
20189 +#ifdef CONFIG_PAX_KERNEXEC
20190 + " movl %%cr0, %0\n"
20191 + " movl %0, %%eax\n"
20192 + " andl $0xFFFEFFFF, %%eax\n"
20193 + " movl %%eax, %%cr0\n"
20194 +#endif
20195 +
20196 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20197 +
20198 +#ifdef CONFIG_PAX_KERNEXEC
20199 + " movl %0, %%cr0\n"
20200 +#endif
20201 +
20202 " jmp 2b\n"
20203 ".previous\n"
20204 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
20205 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20206
20207 for (i = 0; i < (4096-320)/64; i++) {
20208 __asm__ __volatile__ (
20209 - "1: prefetch 320(%0)\n"
20210 - "2: movq (%0), %%mm0\n"
20211 - " movntq %%mm0, (%1)\n"
20212 - " movq 8(%0), %%mm1\n"
20213 - " movntq %%mm1, 8(%1)\n"
20214 - " movq 16(%0), %%mm2\n"
20215 - " movntq %%mm2, 16(%1)\n"
20216 - " movq 24(%0), %%mm3\n"
20217 - " movntq %%mm3, 24(%1)\n"
20218 - " movq 32(%0), %%mm4\n"
20219 - " movntq %%mm4, 32(%1)\n"
20220 - " movq 40(%0), %%mm5\n"
20221 - " movntq %%mm5, 40(%1)\n"
20222 - " movq 48(%0), %%mm6\n"
20223 - " movntq %%mm6, 48(%1)\n"
20224 - " movq 56(%0), %%mm7\n"
20225 - " movntq %%mm7, 56(%1)\n"
20226 + "1: prefetch 320(%1)\n"
20227 + "2: movq (%1), %%mm0\n"
20228 + " movntq %%mm0, (%2)\n"
20229 + " movq 8(%1), %%mm1\n"
20230 + " movntq %%mm1, 8(%2)\n"
20231 + " movq 16(%1), %%mm2\n"
20232 + " movntq %%mm2, 16(%2)\n"
20233 + " movq 24(%1), %%mm3\n"
20234 + " movntq %%mm3, 24(%2)\n"
20235 + " movq 32(%1), %%mm4\n"
20236 + " movntq %%mm4, 32(%2)\n"
20237 + " movq 40(%1), %%mm5\n"
20238 + " movntq %%mm5, 40(%2)\n"
20239 + " movq 48(%1), %%mm6\n"
20240 + " movntq %%mm6, 48(%2)\n"
20241 + " movq 56(%1), %%mm7\n"
20242 + " movntq %%mm7, 56(%2)\n"
20243 ".section .fixup, \"ax\"\n"
20244 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20245 + "3:\n"
20246 +
20247 +#ifdef CONFIG_PAX_KERNEXEC
20248 + " movl %%cr0, %0\n"
20249 + " movl %0, %%eax\n"
20250 + " andl $0xFFFEFFFF, %%eax\n"
20251 + " movl %%eax, %%cr0\n"
20252 +#endif
20253 +
20254 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20255 +
20256 +#ifdef CONFIG_PAX_KERNEXEC
20257 + " movl %0, %%cr0\n"
20258 +#endif
20259 +
20260 " jmp 2b\n"
20261 ".previous\n"
20262 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
20263 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20264
20265 from += 64;
20266 to += 64;
20267 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
20268 static void fast_copy_page(void *to, void *from)
20269 {
20270 int i;
20271 + unsigned long cr0;
20272
20273 kernel_fpu_begin();
20274
20275 __asm__ __volatile__ (
20276 - "1: prefetch (%0)\n"
20277 - " prefetch 64(%0)\n"
20278 - " prefetch 128(%0)\n"
20279 - " prefetch 192(%0)\n"
20280 - " prefetch 256(%0)\n"
20281 + "1: prefetch (%1)\n"
20282 + " prefetch 64(%1)\n"
20283 + " prefetch 128(%1)\n"
20284 + " prefetch 192(%1)\n"
20285 + " prefetch 256(%1)\n"
20286 "2: \n"
20287 ".section .fixup, \"ax\"\n"
20288 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20289 + "3: \n"
20290 +
20291 +#ifdef CONFIG_PAX_KERNEXEC
20292 + " movl %%cr0, %0\n"
20293 + " movl %0, %%eax\n"
20294 + " andl $0xFFFEFFFF, %%eax\n"
20295 + " movl %%eax, %%cr0\n"
20296 +#endif
20297 +
20298 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20299 +
20300 +#ifdef CONFIG_PAX_KERNEXEC
20301 + " movl %0, %%cr0\n"
20302 +#endif
20303 +
20304 " jmp 2b\n"
20305 ".previous\n"
20306 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
20307 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20308
20309 for (i = 0; i < 4096/64; i++) {
20310 __asm__ __volatile__ (
20311 - "1: prefetch 320(%0)\n"
20312 - "2: movq (%0), %%mm0\n"
20313 - " movq 8(%0), %%mm1\n"
20314 - " movq 16(%0), %%mm2\n"
20315 - " movq 24(%0), %%mm3\n"
20316 - " movq %%mm0, (%1)\n"
20317 - " movq %%mm1, 8(%1)\n"
20318 - " movq %%mm2, 16(%1)\n"
20319 - " movq %%mm3, 24(%1)\n"
20320 - " movq 32(%0), %%mm0\n"
20321 - " movq 40(%0), %%mm1\n"
20322 - " movq 48(%0), %%mm2\n"
20323 - " movq 56(%0), %%mm3\n"
20324 - " movq %%mm0, 32(%1)\n"
20325 - " movq %%mm1, 40(%1)\n"
20326 - " movq %%mm2, 48(%1)\n"
20327 - " movq %%mm3, 56(%1)\n"
20328 + "1: prefetch 320(%1)\n"
20329 + "2: movq (%1), %%mm0\n"
20330 + " movq 8(%1), %%mm1\n"
20331 + " movq 16(%1), %%mm2\n"
20332 + " movq 24(%1), %%mm3\n"
20333 + " movq %%mm0, (%2)\n"
20334 + " movq %%mm1, 8(%2)\n"
20335 + " movq %%mm2, 16(%2)\n"
20336 + " movq %%mm3, 24(%2)\n"
20337 + " movq 32(%1), %%mm0\n"
20338 + " movq 40(%1), %%mm1\n"
20339 + " movq 48(%1), %%mm2\n"
20340 + " movq 56(%1), %%mm3\n"
20341 + " movq %%mm0, 32(%2)\n"
20342 + " movq %%mm1, 40(%2)\n"
20343 + " movq %%mm2, 48(%2)\n"
20344 + " movq %%mm3, 56(%2)\n"
20345 ".section .fixup, \"ax\"\n"
20346 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20347 + "3:\n"
20348 +
20349 +#ifdef CONFIG_PAX_KERNEXEC
20350 + " movl %%cr0, %0\n"
20351 + " movl %0, %%eax\n"
20352 + " andl $0xFFFEFFFF, %%eax\n"
20353 + " movl %%eax, %%cr0\n"
20354 +#endif
20355 +
20356 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20357 +
20358 +#ifdef CONFIG_PAX_KERNEXEC
20359 + " movl %0, %%cr0\n"
20360 +#endif
20361 +
20362 " jmp 2b\n"
20363 ".previous\n"
20364 _ASM_EXTABLE(1b, 3b)
20365 - : : "r" (from), "r" (to) : "memory");
20366 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20367
20368 from += 64;
20369 to += 64;
20370 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
20371 index 69fa106..adda88b 100644
20372 --- a/arch/x86/lib/msr-reg.S
20373 +++ b/arch/x86/lib/msr-reg.S
20374 @@ -3,6 +3,7 @@
20375 #include <asm/dwarf2.h>
20376 #include <asm/asm.h>
20377 #include <asm/msr.h>
20378 +#include <asm/alternative-asm.h>
20379
20380 #ifdef CONFIG_X86_64
20381 /*
20382 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
20383 CFI_STARTPROC
20384 pushq_cfi %rbx
20385 pushq_cfi %rbp
20386 - movq %rdi, %r10 /* Save pointer */
20387 + movq %rdi, %r9 /* Save pointer */
20388 xorl %r11d, %r11d /* Return value */
20389 movl (%rdi), %eax
20390 movl 4(%rdi), %ecx
20391 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
20392 movl 28(%rdi), %edi
20393 CFI_REMEMBER_STATE
20394 1: \op
20395 -2: movl %eax, (%r10)
20396 +2: movl %eax, (%r9)
20397 movl %r11d, %eax /* Return value */
20398 - movl %ecx, 4(%r10)
20399 - movl %edx, 8(%r10)
20400 - movl %ebx, 12(%r10)
20401 - movl %ebp, 20(%r10)
20402 - movl %esi, 24(%r10)
20403 - movl %edi, 28(%r10)
20404 + movl %ecx, 4(%r9)
20405 + movl %edx, 8(%r9)
20406 + movl %ebx, 12(%r9)
20407 + movl %ebp, 20(%r9)
20408 + movl %esi, 24(%r9)
20409 + movl %edi, 28(%r9)
20410 popq_cfi %rbp
20411 popq_cfi %rbx
20412 + pax_force_retaddr
20413 ret
20414 3:
20415 CFI_RESTORE_STATE
20416 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
20417 index 36b0d15..d381858 100644
20418 --- a/arch/x86/lib/putuser.S
20419 +++ b/arch/x86/lib/putuser.S
20420 @@ -15,7 +15,9 @@
20421 #include <asm/thread_info.h>
20422 #include <asm/errno.h>
20423 #include <asm/asm.h>
20424 -
20425 +#include <asm/segment.h>
20426 +#include <asm/pgtable.h>
20427 +#include <asm/alternative-asm.h>
20428
20429 /*
20430 * __put_user_X
20431 @@ -29,52 +31,119 @@
20432 * as they get called from within inline assembly.
20433 */
20434
20435 -#define ENTER CFI_STARTPROC ; \
20436 - GET_THREAD_INFO(%_ASM_BX)
20437 -#define EXIT ret ; \
20438 +#define ENTER CFI_STARTPROC
20439 +#define EXIT pax_force_retaddr; ret ; \
20440 CFI_ENDPROC
20441
20442 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20443 +#define _DEST %_ASM_CX,%_ASM_BX
20444 +#else
20445 +#define _DEST %_ASM_CX
20446 +#endif
20447 +
20448 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20449 +#define __copyuser_seg gs;
20450 +#else
20451 +#define __copyuser_seg
20452 +#endif
20453 +
20454 .text
20455 ENTRY(__put_user_1)
20456 ENTER
20457 +
20458 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20459 + GET_THREAD_INFO(%_ASM_BX)
20460 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20461 jae bad_put_user
20462 -1: movb %al,(%_ASM_CX)
20463 +
20464 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20465 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20466 + cmp %_ASM_BX,%_ASM_CX
20467 + jb 1234f
20468 + xor %ebx,%ebx
20469 +1234:
20470 +#endif
20471 +
20472 +#endif
20473 +
20474 +1: __copyuser_seg movb %al,(_DEST)
20475 xor %eax,%eax
20476 EXIT
20477 ENDPROC(__put_user_1)
20478
20479 ENTRY(__put_user_2)
20480 ENTER
20481 +
20482 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20483 + GET_THREAD_INFO(%_ASM_BX)
20484 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20485 sub $1,%_ASM_BX
20486 cmp %_ASM_BX,%_ASM_CX
20487 jae bad_put_user
20488 -2: movw %ax,(%_ASM_CX)
20489 +
20490 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20491 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20492 + cmp %_ASM_BX,%_ASM_CX
20493 + jb 1234f
20494 + xor %ebx,%ebx
20495 +1234:
20496 +#endif
20497 +
20498 +#endif
20499 +
20500 +2: __copyuser_seg movw %ax,(_DEST)
20501 xor %eax,%eax
20502 EXIT
20503 ENDPROC(__put_user_2)
20504
20505 ENTRY(__put_user_4)
20506 ENTER
20507 +
20508 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20509 + GET_THREAD_INFO(%_ASM_BX)
20510 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20511 sub $3,%_ASM_BX
20512 cmp %_ASM_BX,%_ASM_CX
20513 jae bad_put_user
20514 -3: movl %eax,(%_ASM_CX)
20515 +
20516 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20517 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20518 + cmp %_ASM_BX,%_ASM_CX
20519 + jb 1234f
20520 + xor %ebx,%ebx
20521 +1234:
20522 +#endif
20523 +
20524 +#endif
20525 +
20526 +3: __copyuser_seg movl %eax,(_DEST)
20527 xor %eax,%eax
20528 EXIT
20529 ENDPROC(__put_user_4)
20530
20531 ENTRY(__put_user_8)
20532 ENTER
20533 +
20534 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20535 + GET_THREAD_INFO(%_ASM_BX)
20536 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20537 sub $7,%_ASM_BX
20538 cmp %_ASM_BX,%_ASM_CX
20539 jae bad_put_user
20540 -4: mov %_ASM_AX,(%_ASM_CX)
20541 +
20542 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20543 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20544 + cmp %_ASM_BX,%_ASM_CX
20545 + jb 1234f
20546 + xor %ebx,%ebx
20547 +1234:
20548 +#endif
20549 +
20550 +#endif
20551 +
20552 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
20553 #ifdef CONFIG_X86_32
20554 -5: movl %edx,4(%_ASM_CX)
20555 +5: __copyuser_seg movl %edx,4(_DEST)
20556 #endif
20557 xor %eax,%eax
20558 EXIT
20559 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
20560 index 1cad221..de671ee 100644
20561 --- a/arch/x86/lib/rwlock.S
20562 +++ b/arch/x86/lib/rwlock.S
20563 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
20564 FRAME
20565 0: LOCK_PREFIX
20566 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20567 +
20568 +#ifdef CONFIG_PAX_REFCOUNT
20569 + jno 1234f
20570 + LOCK_PREFIX
20571 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20572 + int $4
20573 +1234:
20574 + _ASM_EXTABLE(1234b, 1234b)
20575 +#endif
20576 +
20577 1: rep; nop
20578 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
20579 jne 1b
20580 LOCK_PREFIX
20581 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20582 +
20583 +#ifdef CONFIG_PAX_REFCOUNT
20584 + jno 1234f
20585 + LOCK_PREFIX
20586 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20587 + int $4
20588 +1234:
20589 + _ASM_EXTABLE(1234b, 1234b)
20590 +#endif
20591 +
20592 jnz 0b
20593 ENDFRAME
20594 + pax_force_retaddr
20595 ret
20596 CFI_ENDPROC
20597 END(__write_lock_failed)
20598 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
20599 FRAME
20600 0: LOCK_PREFIX
20601 READ_LOCK_SIZE(inc) (%__lock_ptr)
20602 +
20603 +#ifdef CONFIG_PAX_REFCOUNT
20604 + jno 1234f
20605 + LOCK_PREFIX
20606 + READ_LOCK_SIZE(dec) (%__lock_ptr)
20607 + int $4
20608 +1234:
20609 + _ASM_EXTABLE(1234b, 1234b)
20610 +#endif
20611 +
20612 1: rep; nop
20613 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
20614 js 1b
20615 LOCK_PREFIX
20616 READ_LOCK_SIZE(dec) (%__lock_ptr)
20617 +
20618 +#ifdef CONFIG_PAX_REFCOUNT
20619 + jno 1234f
20620 + LOCK_PREFIX
20621 + READ_LOCK_SIZE(inc) (%__lock_ptr)
20622 + int $4
20623 +1234:
20624 + _ASM_EXTABLE(1234b, 1234b)
20625 +#endif
20626 +
20627 js 0b
20628 ENDFRAME
20629 + pax_force_retaddr
20630 ret
20631 CFI_ENDPROC
20632 END(__read_lock_failed)
20633 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
20634 index 5dff5f0..cadebf4 100644
20635 --- a/arch/x86/lib/rwsem.S
20636 +++ b/arch/x86/lib/rwsem.S
20637 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
20638 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20639 CFI_RESTORE __ASM_REG(dx)
20640 restore_common_regs
20641 + pax_force_retaddr
20642 ret
20643 CFI_ENDPROC
20644 ENDPROC(call_rwsem_down_read_failed)
20645 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
20646 movq %rax,%rdi
20647 call rwsem_down_write_failed
20648 restore_common_regs
20649 + pax_force_retaddr
20650 ret
20651 CFI_ENDPROC
20652 ENDPROC(call_rwsem_down_write_failed)
20653 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
20654 movq %rax,%rdi
20655 call rwsem_wake
20656 restore_common_regs
20657 -1: ret
20658 +1: pax_force_retaddr
20659 + ret
20660 CFI_ENDPROC
20661 ENDPROC(call_rwsem_wake)
20662
20663 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
20664 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20665 CFI_RESTORE __ASM_REG(dx)
20666 restore_common_regs
20667 + pax_force_retaddr
20668 ret
20669 CFI_ENDPROC
20670 ENDPROC(call_rwsem_downgrade_wake)
20671 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
20672 index a63efd6..ccecad8 100644
20673 --- a/arch/x86/lib/thunk_64.S
20674 +++ b/arch/x86/lib/thunk_64.S
20675 @@ -8,6 +8,7 @@
20676 #include <linux/linkage.h>
20677 #include <asm/dwarf2.h>
20678 #include <asm/calling.h>
20679 +#include <asm/alternative-asm.h>
20680
20681 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
20682 .macro THUNK name, func, put_ret_addr_in_rdi=0
20683 @@ -41,5 +42,6 @@
20684 SAVE_ARGS
20685 restore:
20686 RESTORE_ARGS
20687 + pax_force_retaddr
20688 ret
20689 CFI_ENDPROC
20690 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
20691 index e218d5d..35679b4 100644
20692 --- a/arch/x86/lib/usercopy_32.c
20693 +++ b/arch/x86/lib/usercopy_32.c
20694 @@ -43,7 +43,7 @@ do { \
20695 __asm__ __volatile__( \
20696 " testl %1,%1\n" \
20697 " jz 2f\n" \
20698 - "0: lodsb\n" \
20699 + "0: "__copyuser_seg"lodsb\n" \
20700 " stosb\n" \
20701 " testb %%al,%%al\n" \
20702 " jz 1f\n" \
20703 @@ -128,10 +128,12 @@ do { \
20704 int __d0; \
20705 might_fault(); \
20706 __asm__ __volatile__( \
20707 + __COPYUSER_SET_ES \
20708 "0: rep; stosl\n" \
20709 " movl %2,%0\n" \
20710 "1: rep; stosb\n" \
20711 "2:\n" \
20712 + __COPYUSER_RESTORE_ES \
20713 ".section .fixup,\"ax\"\n" \
20714 "3: lea 0(%2,%0,4),%0\n" \
20715 " jmp 2b\n" \
20716 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
20717 might_fault();
20718
20719 __asm__ __volatile__(
20720 + __COPYUSER_SET_ES
20721 " testl %0, %0\n"
20722 " jz 3f\n"
20723 " andl %0,%%ecx\n"
20724 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
20725 " subl %%ecx,%0\n"
20726 " addl %0,%%eax\n"
20727 "1:\n"
20728 + __COPYUSER_RESTORE_ES
20729 ".section .fixup,\"ax\"\n"
20730 "2: xorl %%eax,%%eax\n"
20731 " jmp 1b\n"
20732 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
20733
20734 #ifdef CONFIG_X86_INTEL_USERCOPY
20735 static unsigned long
20736 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
20737 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
20738 {
20739 int d0, d1;
20740 __asm__ __volatile__(
20741 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20742 " .align 2,0x90\n"
20743 "3: movl 0(%4), %%eax\n"
20744 "4: movl 4(%4), %%edx\n"
20745 - "5: movl %%eax, 0(%3)\n"
20746 - "6: movl %%edx, 4(%3)\n"
20747 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
20748 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
20749 "7: movl 8(%4), %%eax\n"
20750 "8: movl 12(%4),%%edx\n"
20751 - "9: movl %%eax, 8(%3)\n"
20752 - "10: movl %%edx, 12(%3)\n"
20753 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20754 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
20755 "11: movl 16(%4), %%eax\n"
20756 "12: movl 20(%4), %%edx\n"
20757 - "13: movl %%eax, 16(%3)\n"
20758 - "14: movl %%edx, 20(%3)\n"
20759 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20760 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
20761 "15: movl 24(%4), %%eax\n"
20762 "16: movl 28(%4), %%edx\n"
20763 - "17: movl %%eax, 24(%3)\n"
20764 - "18: movl %%edx, 28(%3)\n"
20765 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20766 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20767 "19: movl 32(%4), %%eax\n"
20768 "20: movl 36(%4), %%edx\n"
20769 - "21: movl %%eax, 32(%3)\n"
20770 - "22: movl %%edx, 36(%3)\n"
20771 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20772 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
20773 "23: movl 40(%4), %%eax\n"
20774 "24: movl 44(%4), %%edx\n"
20775 - "25: movl %%eax, 40(%3)\n"
20776 - "26: movl %%edx, 44(%3)\n"
20777 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20778 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
20779 "27: movl 48(%4), %%eax\n"
20780 "28: movl 52(%4), %%edx\n"
20781 - "29: movl %%eax, 48(%3)\n"
20782 - "30: movl %%edx, 52(%3)\n"
20783 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20784 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20785 "31: movl 56(%4), %%eax\n"
20786 "32: movl 60(%4), %%edx\n"
20787 - "33: movl %%eax, 56(%3)\n"
20788 - "34: movl %%edx, 60(%3)\n"
20789 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20790 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20791 " addl $-64, %0\n"
20792 " addl $64, %4\n"
20793 " addl $64, %3\n"
20794 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20795 " shrl $2, %0\n"
20796 " andl $3, %%eax\n"
20797 " cld\n"
20798 + __COPYUSER_SET_ES
20799 "99: rep; movsl\n"
20800 "36: movl %%eax, %0\n"
20801 "37: rep; movsb\n"
20802 "100:\n"
20803 + __COPYUSER_RESTORE_ES
20804 + ".section .fixup,\"ax\"\n"
20805 + "101: lea 0(%%eax,%0,4),%0\n"
20806 + " jmp 100b\n"
20807 + ".previous\n"
20808 + ".section __ex_table,\"a\"\n"
20809 + " .align 4\n"
20810 + " .long 1b,100b\n"
20811 + " .long 2b,100b\n"
20812 + " .long 3b,100b\n"
20813 + " .long 4b,100b\n"
20814 + " .long 5b,100b\n"
20815 + " .long 6b,100b\n"
20816 + " .long 7b,100b\n"
20817 + " .long 8b,100b\n"
20818 + " .long 9b,100b\n"
20819 + " .long 10b,100b\n"
20820 + " .long 11b,100b\n"
20821 + " .long 12b,100b\n"
20822 + " .long 13b,100b\n"
20823 + " .long 14b,100b\n"
20824 + " .long 15b,100b\n"
20825 + " .long 16b,100b\n"
20826 + " .long 17b,100b\n"
20827 + " .long 18b,100b\n"
20828 + " .long 19b,100b\n"
20829 + " .long 20b,100b\n"
20830 + " .long 21b,100b\n"
20831 + " .long 22b,100b\n"
20832 + " .long 23b,100b\n"
20833 + " .long 24b,100b\n"
20834 + " .long 25b,100b\n"
20835 + " .long 26b,100b\n"
20836 + " .long 27b,100b\n"
20837 + " .long 28b,100b\n"
20838 + " .long 29b,100b\n"
20839 + " .long 30b,100b\n"
20840 + " .long 31b,100b\n"
20841 + " .long 32b,100b\n"
20842 + " .long 33b,100b\n"
20843 + " .long 34b,100b\n"
20844 + " .long 35b,100b\n"
20845 + " .long 36b,100b\n"
20846 + " .long 37b,100b\n"
20847 + " .long 99b,101b\n"
20848 + ".previous"
20849 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
20850 + : "1"(to), "2"(from), "0"(size)
20851 + : "eax", "edx", "memory");
20852 + return size;
20853 +}
20854 +
20855 +static unsigned long
20856 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20857 +{
20858 + int d0, d1;
20859 + __asm__ __volatile__(
20860 + " .align 2,0x90\n"
20861 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20862 + " cmpl $67, %0\n"
20863 + " jbe 3f\n"
20864 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20865 + " .align 2,0x90\n"
20866 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20867 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20868 + "5: movl %%eax, 0(%3)\n"
20869 + "6: movl %%edx, 4(%3)\n"
20870 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20871 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20872 + "9: movl %%eax, 8(%3)\n"
20873 + "10: movl %%edx, 12(%3)\n"
20874 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20875 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20876 + "13: movl %%eax, 16(%3)\n"
20877 + "14: movl %%edx, 20(%3)\n"
20878 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20879 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20880 + "17: movl %%eax, 24(%3)\n"
20881 + "18: movl %%edx, 28(%3)\n"
20882 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20883 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20884 + "21: movl %%eax, 32(%3)\n"
20885 + "22: movl %%edx, 36(%3)\n"
20886 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20887 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20888 + "25: movl %%eax, 40(%3)\n"
20889 + "26: movl %%edx, 44(%3)\n"
20890 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20891 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20892 + "29: movl %%eax, 48(%3)\n"
20893 + "30: movl %%edx, 52(%3)\n"
20894 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20895 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20896 + "33: movl %%eax, 56(%3)\n"
20897 + "34: movl %%edx, 60(%3)\n"
20898 + " addl $-64, %0\n"
20899 + " addl $64, %4\n"
20900 + " addl $64, %3\n"
20901 + " cmpl $63, %0\n"
20902 + " ja 1b\n"
20903 + "35: movl %0, %%eax\n"
20904 + " shrl $2, %0\n"
20905 + " andl $3, %%eax\n"
20906 + " cld\n"
20907 + "99: rep; "__copyuser_seg" movsl\n"
20908 + "36: movl %%eax, %0\n"
20909 + "37: rep; "__copyuser_seg" movsb\n"
20910 + "100:\n"
20911 ".section .fixup,\"ax\"\n"
20912 "101: lea 0(%%eax,%0,4),%0\n"
20913 " jmp 100b\n"
20914 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
20915 int d0, d1;
20916 __asm__ __volatile__(
20917 " .align 2,0x90\n"
20918 - "0: movl 32(%4), %%eax\n"
20919 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20920 " cmpl $67, %0\n"
20921 " jbe 2f\n"
20922 - "1: movl 64(%4), %%eax\n"
20923 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20924 " .align 2,0x90\n"
20925 - "2: movl 0(%4), %%eax\n"
20926 - "21: movl 4(%4), %%edx\n"
20927 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20928 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20929 " movl %%eax, 0(%3)\n"
20930 " movl %%edx, 4(%3)\n"
20931 - "3: movl 8(%4), %%eax\n"
20932 - "31: movl 12(%4),%%edx\n"
20933 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20934 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20935 " movl %%eax, 8(%3)\n"
20936 " movl %%edx, 12(%3)\n"
20937 - "4: movl 16(%4), %%eax\n"
20938 - "41: movl 20(%4), %%edx\n"
20939 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20940 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20941 " movl %%eax, 16(%3)\n"
20942 " movl %%edx, 20(%3)\n"
20943 - "10: movl 24(%4), %%eax\n"
20944 - "51: movl 28(%4), %%edx\n"
20945 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20946 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20947 " movl %%eax, 24(%3)\n"
20948 " movl %%edx, 28(%3)\n"
20949 - "11: movl 32(%4), %%eax\n"
20950 - "61: movl 36(%4), %%edx\n"
20951 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20952 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20953 " movl %%eax, 32(%3)\n"
20954 " movl %%edx, 36(%3)\n"
20955 - "12: movl 40(%4), %%eax\n"
20956 - "71: movl 44(%4), %%edx\n"
20957 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20958 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20959 " movl %%eax, 40(%3)\n"
20960 " movl %%edx, 44(%3)\n"
20961 - "13: movl 48(%4), %%eax\n"
20962 - "81: movl 52(%4), %%edx\n"
20963 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20964 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20965 " movl %%eax, 48(%3)\n"
20966 " movl %%edx, 52(%3)\n"
20967 - "14: movl 56(%4), %%eax\n"
20968 - "91: movl 60(%4), %%edx\n"
20969 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20970 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20971 " movl %%eax, 56(%3)\n"
20972 " movl %%edx, 60(%3)\n"
20973 " addl $-64, %0\n"
20974 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
20975 " shrl $2, %0\n"
20976 " andl $3, %%eax\n"
20977 " cld\n"
20978 - "6: rep; movsl\n"
20979 + "6: rep; "__copyuser_seg" movsl\n"
20980 " movl %%eax,%0\n"
20981 - "7: rep; movsb\n"
20982 + "7: rep; "__copyuser_seg" movsb\n"
20983 "8:\n"
20984 ".section .fixup,\"ax\"\n"
20985 "9: lea 0(%%eax,%0,4),%0\n"
20986 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
20987
20988 __asm__ __volatile__(
20989 " .align 2,0x90\n"
20990 - "0: movl 32(%4), %%eax\n"
20991 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20992 " cmpl $67, %0\n"
20993 " jbe 2f\n"
20994 - "1: movl 64(%4), %%eax\n"
20995 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20996 " .align 2,0x90\n"
20997 - "2: movl 0(%4), %%eax\n"
20998 - "21: movl 4(%4), %%edx\n"
20999 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21000 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21001 " movnti %%eax, 0(%3)\n"
21002 " movnti %%edx, 4(%3)\n"
21003 - "3: movl 8(%4), %%eax\n"
21004 - "31: movl 12(%4),%%edx\n"
21005 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21006 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21007 " movnti %%eax, 8(%3)\n"
21008 " movnti %%edx, 12(%3)\n"
21009 - "4: movl 16(%4), %%eax\n"
21010 - "41: movl 20(%4), %%edx\n"
21011 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21012 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21013 " movnti %%eax, 16(%3)\n"
21014 " movnti %%edx, 20(%3)\n"
21015 - "10: movl 24(%4), %%eax\n"
21016 - "51: movl 28(%4), %%edx\n"
21017 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21018 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21019 " movnti %%eax, 24(%3)\n"
21020 " movnti %%edx, 28(%3)\n"
21021 - "11: movl 32(%4), %%eax\n"
21022 - "61: movl 36(%4), %%edx\n"
21023 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21024 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21025 " movnti %%eax, 32(%3)\n"
21026 " movnti %%edx, 36(%3)\n"
21027 - "12: movl 40(%4), %%eax\n"
21028 - "71: movl 44(%4), %%edx\n"
21029 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21030 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21031 " movnti %%eax, 40(%3)\n"
21032 " movnti %%edx, 44(%3)\n"
21033 - "13: movl 48(%4), %%eax\n"
21034 - "81: movl 52(%4), %%edx\n"
21035 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21036 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21037 " movnti %%eax, 48(%3)\n"
21038 " movnti %%edx, 52(%3)\n"
21039 - "14: movl 56(%4), %%eax\n"
21040 - "91: movl 60(%4), %%edx\n"
21041 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21042 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21043 " movnti %%eax, 56(%3)\n"
21044 " movnti %%edx, 60(%3)\n"
21045 " addl $-64, %0\n"
21046 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21047 " shrl $2, %0\n"
21048 " andl $3, %%eax\n"
21049 " cld\n"
21050 - "6: rep; movsl\n"
21051 + "6: rep; "__copyuser_seg" movsl\n"
21052 " movl %%eax,%0\n"
21053 - "7: rep; movsb\n"
21054 + "7: rep; "__copyuser_seg" movsb\n"
21055 "8:\n"
21056 ".section .fixup,\"ax\"\n"
21057 "9: lea 0(%%eax,%0,4),%0\n"
21058 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
21059
21060 __asm__ __volatile__(
21061 " .align 2,0x90\n"
21062 - "0: movl 32(%4), %%eax\n"
21063 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21064 " cmpl $67, %0\n"
21065 " jbe 2f\n"
21066 - "1: movl 64(%4), %%eax\n"
21067 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21068 " .align 2,0x90\n"
21069 - "2: movl 0(%4), %%eax\n"
21070 - "21: movl 4(%4), %%edx\n"
21071 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21072 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21073 " movnti %%eax, 0(%3)\n"
21074 " movnti %%edx, 4(%3)\n"
21075 - "3: movl 8(%4), %%eax\n"
21076 - "31: movl 12(%4),%%edx\n"
21077 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21078 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21079 " movnti %%eax, 8(%3)\n"
21080 " movnti %%edx, 12(%3)\n"
21081 - "4: movl 16(%4), %%eax\n"
21082 - "41: movl 20(%4), %%edx\n"
21083 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21084 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21085 " movnti %%eax, 16(%3)\n"
21086 " movnti %%edx, 20(%3)\n"
21087 - "10: movl 24(%4), %%eax\n"
21088 - "51: movl 28(%4), %%edx\n"
21089 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21090 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21091 " movnti %%eax, 24(%3)\n"
21092 " movnti %%edx, 28(%3)\n"
21093 - "11: movl 32(%4), %%eax\n"
21094 - "61: movl 36(%4), %%edx\n"
21095 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21096 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21097 " movnti %%eax, 32(%3)\n"
21098 " movnti %%edx, 36(%3)\n"
21099 - "12: movl 40(%4), %%eax\n"
21100 - "71: movl 44(%4), %%edx\n"
21101 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21102 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21103 " movnti %%eax, 40(%3)\n"
21104 " movnti %%edx, 44(%3)\n"
21105 - "13: movl 48(%4), %%eax\n"
21106 - "81: movl 52(%4), %%edx\n"
21107 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21108 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21109 " movnti %%eax, 48(%3)\n"
21110 " movnti %%edx, 52(%3)\n"
21111 - "14: movl 56(%4), %%eax\n"
21112 - "91: movl 60(%4), %%edx\n"
21113 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21114 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21115 " movnti %%eax, 56(%3)\n"
21116 " movnti %%edx, 60(%3)\n"
21117 " addl $-64, %0\n"
21118 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
21119 " shrl $2, %0\n"
21120 " andl $3, %%eax\n"
21121 " cld\n"
21122 - "6: rep; movsl\n"
21123 + "6: rep; "__copyuser_seg" movsl\n"
21124 " movl %%eax,%0\n"
21125 - "7: rep; movsb\n"
21126 + "7: rep; "__copyuser_seg" movsb\n"
21127 "8:\n"
21128 ".section .fixup,\"ax\"\n"
21129 "9: lea 0(%%eax,%0,4),%0\n"
21130 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
21131 */
21132 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
21133 unsigned long size);
21134 -unsigned long __copy_user_intel(void __user *to, const void *from,
21135 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
21136 + unsigned long size);
21137 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
21138 unsigned long size);
21139 unsigned long __copy_user_zeroing_intel_nocache(void *to,
21140 const void __user *from, unsigned long size);
21141 #endif /* CONFIG_X86_INTEL_USERCOPY */
21142
21143 /* Generic arbitrary sized copy. */
21144 -#define __copy_user(to, from, size) \
21145 +#define __copy_user(to, from, size, prefix, set, restore) \
21146 do { \
21147 int __d0, __d1, __d2; \
21148 __asm__ __volatile__( \
21149 + set \
21150 " cmp $7,%0\n" \
21151 " jbe 1f\n" \
21152 " movl %1,%0\n" \
21153 " negl %0\n" \
21154 " andl $7,%0\n" \
21155 " subl %0,%3\n" \
21156 - "4: rep; movsb\n" \
21157 + "4: rep; "prefix"movsb\n" \
21158 " movl %3,%0\n" \
21159 " shrl $2,%0\n" \
21160 " andl $3,%3\n" \
21161 " .align 2,0x90\n" \
21162 - "0: rep; movsl\n" \
21163 + "0: rep; "prefix"movsl\n" \
21164 " movl %3,%0\n" \
21165 - "1: rep; movsb\n" \
21166 + "1: rep; "prefix"movsb\n" \
21167 "2:\n" \
21168 + restore \
21169 ".section .fixup,\"ax\"\n" \
21170 "5: addl %3,%0\n" \
21171 " jmp 2b\n" \
21172 @@ -682,14 +799,14 @@ do { \
21173 " negl %0\n" \
21174 " andl $7,%0\n" \
21175 " subl %0,%3\n" \
21176 - "4: rep; movsb\n" \
21177 + "4: rep; "__copyuser_seg"movsb\n" \
21178 " movl %3,%0\n" \
21179 " shrl $2,%0\n" \
21180 " andl $3,%3\n" \
21181 " .align 2,0x90\n" \
21182 - "0: rep; movsl\n" \
21183 + "0: rep; "__copyuser_seg"movsl\n" \
21184 " movl %3,%0\n" \
21185 - "1: rep; movsb\n" \
21186 + "1: rep; "__copyuser_seg"movsb\n" \
21187 "2:\n" \
21188 ".section .fixup,\"ax\"\n" \
21189 "5: addl %3,%0\n" \
21190 @@ -775,9 +892,9 @@ survive:
21191 }
21192 #endif
21193 if (movsl_is_ok(to, from, n))
21194 - __copy_user(to, from, n);
21195 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
21196 else
21197 - n = __copy_user_intel(to, from, n);
21198 + n = __generic_copy_to_user_intel(to, from, n);
21199 return n;
21200 }
21201 EXPORT_SYMBOL(__copy_to_user_ll);
21202 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
21203 unsigned long n)
21204 {
21205 if (movsl_is_ok(to, from, n))
21206 - __copy_user(to, from, n);
21207 + __copy_user(to, from, n, __copyuser_seg, "", "");
21208 else
21209 - n = __copy_user_intel((void __user *)to,
21210 - (const void *)from, n);
21211 + n = __generic_copy_from_user_intel(to, from, n);
21212 return n;
21213 }
21214 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
21215 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
21216 if (n > 64 && cpu_has_xmm2)
21217 n = __copy_user_intel_nocache(to, from, n);
21218 else
21219 - __copy_user(to, from, n);
21220 + __copy_user(to, from, n, __copyuser_seg, "", "");
21221 #else
21222 - __copy_user(to, from, n);
21223 + __copy_user(to, from, n, __copyuser_seg, "", "");
21224 #endif
21225 return n;
21226 }
21227 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
21228
21229 -/**
21230 - * copy_to_user: - Copy a block of data into user space.
21231 - * @to: Destination address, in user space.
21232 - * @from: Source address, in kernel space.
21233 - * @n: Number of bytes to copy.
21234 - *
21235 - * Context: User context only. This function may sleep.
21236 - *
21237 - * Copy data from kernel space to user space.
21238 - *
21239 - * Returns number of bytes that could not be copied.
21240 - * On success, this will be zero.
21241 - */
21242 -unsigned long
21243 -copy_to_user(void __user *to, const void *from, unsigned long n)
21244 -{
21245 - if (access_ok(VERIFY_WRITE, to, n))
21246 - n = __copy_to_user(to, from, n);
21247 - return n;
21248 -}
21249 -EXPORT_SYMBOL(copy_to_user);
21250 -
21251 -/**
21252 - * copy_from_user: - Copy a block of data from user space.
21253 - * @to: Destination address, in kernel space.
21254 - * @from: Source address, in user space.
21255 - * @n: Number of bytes to copy.
21256 - *
21257 - * Context: User context only. This function may sleep.
21258 - *
21259 - * Copy data from user space to kernel space.
21260 - *
21261 - * Returns number of bytes that could not be copied.
21262 - * On success, this will be zero.
21263 - *
21264 - * If some data could not be copied, this function will pad the copied
21265 - * data to the requested size using zero bytes.
21266 - */
21267 -unsigned long
21268 -_copy_from_user(void *to, const void __user *from, unsigned long n)
21269 -{
21270 - if (access_ok(VERIFY_READ, from, n))
21271 - n = __copy_from_user(to, from, n);
21272 - else
21273 - memset(to, 0, n);
21274 - return n;
21275 -}
21276 -EXPORT_SYMBOL(_copy_from_user);
21277 -
21278 void copy_from_user_overflow(void)
21279 {
21280 WARN(1, "Buffer overflow detected!\n");
21281 }
21282 EXPORT_SYMBOL(copy_from_user_overflow);
21283 +
21284 +void copy_to_user_overflow(void)
21285 +{
21286 + WARN(1, "Buffer overflow detected!\n");
21287 +}
21288 +EXPORT_SYMBOL(copy_to_user_overflow);
21289 +
21290 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21291 +void __set_fs(mm_segment_t x)
21292 +{
21293 + switch (x.seg) {
21294 + case 0:
21295 + loadsegment(gs, 0);
21296 + break;
21297 + case TASK_SIZE_MAX:
21298 + loadsegment(gs, __USER_DS);
21299 + break;
21300 + case -1UL:
21301 + loadsegment(gs, __KERNEL_DS);
21302 + break;
21303 + default:
21304 + BUG();
21305 + }
21306 + return;
21307 +}
21308 +EXPORT_SYMBOL(__set_fs);
21309 +
21310 +void set_fs(mm_segment_t x)
21311 +{
21312 + current_thread_info()->addr_limit = x;
21313 + __set_fs(x);
21314 +}
21315 +EXPORT_SYMBOL(set_fs);
21316 +#endif
21317 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
21318 index b7c2849..8633ad8 100644
21319 --- a/arch/x86/lib/usercopy_64.c
21320 +++ b/arch/x86/lib/usercopy_64.c
21321 @@ -42,6 +42,12 @@ long
21322 __strncpy_from_user(char *dst, const char __user *src, long count)
21323 {
21324 long res;
21325 +
21326 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21327 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21328 + src += PAX_USER_SHADOW_BASE;
21329 +#endif
21330 +
21331 __do_strncpy_from_user(dst, src, count, res);
21332 return res;
21333 }
21334 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
21335 {
21336 long __d0;
21337 might_fault();
21338 +
21339 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21340 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
21341 + addr += PAX_USER_SHADOW_BASE;
21342 +#endif
21343 +
21344 /* no memory constraint because it doesn't change any memory gcc knows
21345 about */
21346 asm volatile(
21347 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
21348 }
21349 EXPORT_SYMBOL(strlen_user);
21350
21351 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
21352 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
21353 {
21354 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21355 - return copy_user_generic((__force void *)to, (__force void *)from, len);
21356 - }
21357 - return len;
21358 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21359 +
21360 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21361 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
21362 + to += PAX_USER_SHADOW_BASE;
21363 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
21364 + from += PAX_USER_SHADOW_BASE;
21365 +#endif
21366 +
21367 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
21368 + }
21369 + return len;
21370 }
21371 EXPORT_SYMBOL(copy_in_user);
21372
21373 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
21374 * it is not necessary to optimize tail handling.
21375 */
21376 unsigned long
21377 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
21378 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
21379 {
21380 char c;
21381 unsigned zero_len;
21382 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
21383 index d0474ad..36e9257 100644
21384 --- a/arch/x86/mm/extable.c
21385 +++ b/arch/x86/mm/extable.c
21386 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
21387 const struct exception_table_entry *fixup;
21388
21389 #ifdef CONFIG_PNPBIOS
21390 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
21391 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
21392 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
21393 extern u32 pnp_bios_is_utter_crap;
21394 pnp_bios_is_utter_crap = 1;
21395 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
21396 index 0d17c8c..4f4764f 100644
21397 --- a/arch/x86/mm/fault.c
21398 +++ b/arch/x86/mm/fault.c
21399 @@ -13,11 +13,18 @@
21400 #include <linux/perf_event.h> /* perf_sw_event */
21401 #include <linux/hugetlb.h> /* hstate_index_to_shift */
21402 #include <linux/prefetch.h> /* prefetchw */
21403 +#include <linux/unistd.h>
21404 +#include <linux/compiler.h>
21405
21406 #include <asm/traps.h> /* dotraplinkage, ... */
21407 #include <asm/pgalloc.h> /* pgd_*(), ... */
21408 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
21409 #include <asm/vsyscall.h>
21410 +#include <asm/tlbflush.h>
21411 +
21412 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21413 +#include <asm/stacktrace.h>
21414 +#endif
21415
21416 /*
21417 * Page fault error code bits:
21418 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
21419 int ret = 0;
21420
21421 /* kprobe_running() needs smp_processor_id() */
21422 - if (kprobes_built_in() && !user_mode_vm(regs)) {
21423 + if (kprobes_built_in() && !user_mode(regs)) {
21424 preempt_disable();
21425 if (kprobe_running() && kprobe_fault_handler(regs, 14))
21426 ret = 1;
21427 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
21428 return !instr_lo || (instr_lo>>1) == 1;
21429 case 0x00:
21430 /* Prefetch instruction is 0x0F0D or 0x0F18 */
21431 - if (probe_kernel_address(instr, opcode))
21432 + if (user_mode(regs)) {
21433 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21434 + return 0;
21435 + } else if (probe_kernel_address(instr, opcode))
21436 return 0;
21437
21438 *prefetch = (instr_lo == 0xF) &&
21439 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
21440 while (instr < max_instr) {
21441 unsigned char opcode;
21442
21443 - if (probe_kernel_address(instr, opcode))
21444 + if (user_mode(regs)) {
21445 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21446 + break;
21447 + } else if (probe_kernel_address(instr, opcode))
21448 break;
21449
21450 instr++;
21451 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
21452 force_sig_info(si_signo, &info, tsk);
21453 }
21454
21455 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21456 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
21457 +#endif
21458 +
21459 +#ifdef CONFIG_PAX_EMUTRAMP
21460 +static int pax_handle_fetch_fault(struct pt_regs *regs);
21461 +#endif
21462 +
21463 +#ifdef CONFIG_PAX_PAGEEXEC
21464 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
21465 +{
21466 + pgd_t *pgd;
21467 + pud_t *pud;
21468 + pmd_t *pmd;
21469 +
21470 + pgd = pgd_offset(mm, address);
21471 + if (!pgd_present(*pgd))
21472 + return NULL;
21473 + pud = pud_offset(pgd, address);
21474 + if (!pud_present(*pud))
21475 + return NULL;
21476 + pmd = pmd_offset(pud, address);
21477 + if (!pmd_present(*pmd))
21478 + return NULL;
21479 + return pmd;
21480 +}
21481 +#endif
21482 +
21483 DEFINE_SPINLOCK(pgd_lock);
21484 LIST_HEAD(pgd_list);
21485
21486 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
21487 for (address = VMALLOC_START & PMD_MASK;
21488 address >= TASK_SIZE && address < FIXADDR_TOP;
21489 address += PMD_SIZE) {
21490 +
21491 +#ifdef CONFIG_PAX_PER_CPU_PGD
21492 + unsigned long cpu;
21493 +#else
21494 struct page *page;
21495 +#endif
21496
21497 spin_lock(&pgd_lock);
21498 +
21499 +#ifdef CONFIG_PAX_PER_CPU_PGD
21500 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21501 + pgd_t *pgd = get_cpu_pgd(cpu);
21502 + pmd_t *ret;
21503 +#else
21504 list_for_each_entry(page, &pgd_list, lru) {
21505 + pgd_t *pgd = page_address(page);
21506 spinlock_t *pgt_lock;
21507 pmd_t *ret;
21508
21509 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
21510 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21511
21512 spin_lock(pgt_lock);
21513 - ret = vmalloc_sync_one(page_address(page), address);
21514 +#endif
21515 +
21516 + ret = vmalloc_sync_one(pgd, address);
21517 +
21518 +#ifndef CONFIG_PAX_PER_CPU_PGD
21519 spin_unlock(pgt_lock);
21520 +#endif
21521
21522 if (!ret)
21523 break;
21524 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21525 * an interrupt in the middle of a task switch..
21526 */
21527 pgd_paddr = read_cr3();
21528 +
21529 +#ifdef CONFIG_PAX_PER_CPU_PGD
21530 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
21531 +#endif
21532 +
21533 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21534 if (!pmd_k)
21535 return -1;
21536 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21537 * happen within a race in page table update. In the later
21538 * case just flush:
21539 */
21540 +
21541 +#ifdef CONFIG_PAX_PER_CPU_PGD
21542 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21543 + pgd = pgd_offset_cpu(smp_processor_id(), address);
21544 +#else
21545 pgd = pgd_offset(current->active_mm, address);
21546 +#endif
21547 +
21548 pgd_ref = pgd_offset_k(address);
21549 if (pgd_none(*pgd_ref))
21550 return -1;
21551 @@ -534,7 +604,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
21552 static int is_errata100(struct pt_regs *regs, unsigned long address)
21553 {
21554 #ifdef CONFIG_X86_64
21555 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21556 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21557 return 1;
21558 #endif
21559 return 0;
21560 @@ -561,7 +631,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
21561 }
21562
21563 static const char nx_warning[] = KERN_CRIT
21564 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21565 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21566
21567 static void
21568 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21569 @@ -570,15 +640,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21570 if (!oops_may_print())
21571 return;
21572
21573 - if (error_code & PF_INSTR) {
21574 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
21575 unsigned int level;
21576
21577 pte_t *pte = lookup_address(address, &level);
21578
21579 if (pte && pte_present(*pte) && !pte_exec(*pte))
21580 - printk(nx_warning, current_uid());
21581 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21582 }
21583
21584 +#ifdef CONFIG_PAX_KERNEXEC
21585 + if (init_mm.start_code <= address && address < init_mm.end_code) {
21586 + if (current->signal->curr_ip)
21587 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21588 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21589 + else
21590 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21591 + current->comm, task_pid_nr(current), current_uid(), current_euid());
21592 + }
21593 +#endif
21594 +
21595 printk(KERN_ALERT "BUG: unable to handle kernel ");
21596 if (address < PAGE_SIZE)
21597 printk(KERN_CONT "NULL pointer dereference");
21598 @@ -733,6 +814,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
21599 }
21600 #endif
21601
21602 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21603 + if (pax_is_fetch_fault(regs, error_code, address)) {
21604 +
21605 +#ifdef CONFIG_PAX_EMUTRAMP
21606 + switch (pax_handle_fetch_fault(regs)) {
21607 + case 2:
21608 + return;
21609 + }
21610 +#endif
21611 +
21612 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21613 + do_group_exit(SIGKILL);
21614 + }
21615 +#endif
21616 +
21617 if (unlikely(show_unhandled_signals))
21618 show_signal_msg(regs, error_code, address, tsk);
21619
21620 @@ -829,7 +925,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
21621 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
21622 printk(KERN_ERR
21623 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
21624 - tsk->comm, tsk->pid, address);
21625 + tsk->comm, task_pid_nr(tsk), address);
21626 code = BUS_MCEERR_AR;
21627 }
21628 #endif
21629 @@ -884,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
21630 return 1;
21631 }
21632
21633 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21634 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21635 +{
21636 + pte_t *pte;
21637 + pmd_t *pmd;
21638 + spinlock_t *ptl;
21639 + unsigned char pte_mask;
21640 +
21641 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21642 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
21643 + return 0;
21644 +
21645 + /* PaX: it's our fault, let's handle it if we can */
21646 +
21647 + /* PaX: take a look at read faults before acquiring any locks */
21648 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21649 + /* instruction fetch attempt from a protected page in user mode */
21650 + up_read(&mm->mmap_sem);
21651 +
21652 +#ifdef CONFIG_PAX_EMUTRAMP
21653 + switch (pax_handle_fetch_fault(regs)) {
21654 + case 2:
21655 + return 1;
21656 + }
21657 +#endif
21658 +
21659 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21660 + do_group_exit(SIGKILL);
21661 + }
21662 +
21663 + pmd = pax_get_pmd(mm, address);
21664 + if (unlikely(!pmd))
21665 + return 0;
21666 +
21667 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21668 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21669 + pte_unmap_unlock(pte, ptl);
21670 + return 0;
21671 + }
21672 +
21673 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21674 + /* write attempt to a protected page in user mode */
21675 + pte_unmap_unlock(pte, ptl);
21676 + return 0;
21677 + }
21678 +
21679 +#ifdef CONFIG_SMP
21680 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21681 +#else
21682 + if (likely(address > get_limit(regs->cs)))
21683 +#endif
21684 + {
21685 + set_pte(pte, pte_mkread(*pte));
21686 + __flush_tlb_one(address);
21687 + pte_unmap_unlock(pte, ptl);
21688 + up_read(&mm->mmap_sem);
21689 + return 1;
21690 + }
21691 +
21692 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21693 +
21694 + /*
21695 + * PaX: fill DTLB with user rights and retry
21696 + */
21697 + __asm__ __volatile__ (
21698 + "orb %2,(%1)\n"
21699 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21700 +/*
21701 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21702 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21703 + * page fault when examined during a TLB load attempt. this is true not only
21704 + * for PTEs holding a non-present entry but also present entries that will
21705 + * raise a page fault (such as those set up by PaX, or the copy-on-write
21706 + * mechanism). in effect it means that we do *not* need to flush the TLBs
21707 + * for our target pages since their PTEs are simply not in the TLBs at all.
21708 +
21709 + * the best thing in omitting it is that we gain around 15-20% speed in the
21710 + * fast path of the page fault handler and can get rid of tracing since we
21711 + * can no longer flush unintended entries.
21712 + */
21713 + "invlpg (%0)\n"
21714 +#endif
21715 + __copyuser_seg"testb $0,(%0)\n"
21716 + "xorb %3,(%1)\n"
21717 + :
21718 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21719 + : "memory", "cc");
21720 + pte_unmap_unlock(pte, ptl);
21721 + up_read(&mm->mmap_sem);
21722 + return 1;
21723 +}
21724 +#endif
21725 +
21726 /*
21727 * Handle a spurious fault caused by a stale TLB entry.
21728 *
21729 @@ -956,6 +1145,9 @@ int show_unhandled_signals = 1;
21730 static inline int
21731 access_error(unsigned long error_code, struct vm_area_struct *vma)
21732 {
21733 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21734 + return 1;
21735 +
21736 if (error_code & PF_WRITE) {
21737 /* write, present and write, not present: */
21738 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21739 @@ -989,18 +1181,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21740 {
21741 struct vm_area_struct *vma;
21742 struct task_struct *tsk;
21743 - unsigned long address;
21744 struct mm_struct *mm;
21745 int fault;
21746 int write = error_code & PF_WRITE;
21747 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
21748 (write ? FAULT_FLAG_WRITE : 0);
21749
21750 - tsk = current;
21751 - mm = tsk->mm;
21752 -
21753 /* Get the faulting address: */
21754 - address = read_cr2();
21755 + unsigned long address = read_cr2();
21756 +
21757 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21758 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21759 + if (!search_exception_tables(regs->ip)) {
21760 + bad_area_nosemaphore(regs, error_code, address);
21761 + return;
21762 + }
21763 + if (address < PAX_USER_SHADOW_BASE) {
21764 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21765 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
21766 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21767 + } else
21768 + address -= PAX_USER_SHADOW_BASE;
21769 + }
21770 +#endif
21771 +
21772 + tsk = current;
21773 + mm = tsk->mm;
21774
21775 /*
21776 * Detect and handle instructions that would cause a page fault for
21777 @@ -1061,7 +1267,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21778 * User-mode registers count as a user access even for any
21779 * potential system fault or CPU buglet:
21780 */
21781 - if (user_mode_vm(regs)) {
21782 + if (user_mode(regs)) {
21783 local_irq_enable();
21784 error_code |= PF_USER;
21785 } else {
21786 @@ -1116,6 +1322,11 @@ retry:
21787 might_sleep();
21788 }
21789
21790 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21791 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21792 + return;
21793 +#endif
21794 +
21795 vma = find_vma(mm, address);
21796 if (unlikely(!vma)) {
21797 bad_area(regs, error_code, address);
21798 @@ -1127,18 +1338,24 @@ retry:
21799 bad_area(regs, error_code, address);
21800 return;
21801 }
21802 - if (error_code & PF_USER) {
21803 - /*
21804 - * Accessing the stack below %sp is always a bug.
21805 - * The large cushion allows instructions like enter
21806 - * and pusha to work. ("enter $65535, $31" pushes
21807 - * 32 pointers and then decrements %sp by 65535.)
21808 - */
21809 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21810 - bad_area(regs, error_code, address);
21811 - return;
21812 - }
21813 + /*
21814 + * Accessing the stack below %sp is always a bug.
21815 + * The large cushion allows instructions like enter
21816 + * and pusha to work. ("enter $65535, $31" pushes
21817 + * 32 pointers and then decrements %sp by 65535.)
21818 + */
21819 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21820 + bad_area(regs, error_code, address);
21821 + return;
21822 }
21823 +
21824 +#ifdef CONFIG_PAX_SEGMEXEC
21825 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21826 + bad_area(regs, error_code, address);
21827 + return;
21828 + }
21829 +#endif
21830 +
21831 if (unlikely(expand_stack(vma, address))) {
21832 bad_area(regs, error_code, address);
21833 return;
21834 @@ -1193,3 +1410,240 @@ good_area:
21835
21836 up_read(&mm->mmap_sem);
21837 }
21838 +
21839 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21840 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
21841 +{
21842 + struct mm_struct *mm = current->mm;
21843 + unsigned long ip = regs->ip;
21844 +
21845 + if (v8086_mode(regs))
21846 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
21847 +
21848 +#ifdef CONFIG_PAX_PAGEEXEC
21849 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
21850 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
21851 + return true;
21852 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
21853 + return true;
21854 + return false;
21855 + }
21856 +#endif
21857 +
21858 +#ifdef CONFIG_PAX_SEGMEXEC
21859 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
21860 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
21861 + return true;
21862 + return false;
21863 + }
21864 +#endif
21865 +
21866 + return false;
21867 +}
21868 +#endif
21869 +
21870 +#ifdef CONFIG_PAX_EMUTRAMP
21871 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21872 +{
21873 + int err;
21874 +
21875 + do { /* PaX: gcc trampoline emulation #1 */
21876 + unsigned char mov1, mov2;
21877 + unsigned short jmp;
21878 + unsigned int addr1, addr2;
21879 +
21880 +#ifdef CONFIG_X86_64
21881 + if ((regs->ip + 11) >> 32)
21882 + break;
21883 +#endif
21884 +
21885 + err = get_user(mov1, (unsigned char __user *)regs->ip);
21886 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21887 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21888 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21889 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21890 +
21891 + if (err)
21892 + break;
21893 +
21894 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21895 + regs->cx = addr1;
21896 + regs->ax = addr2;
21897 + regs->ip = addr2;
21898 + return 2;
21899 + }
21900 + } while (0);
21901 +
21902 + do { /* PaX: gcc trampoline emulation #2 */
21903 + unsigned char mov, jmp;
21904 + unsigned int addr1, addr2;
21905 +
21906 +#ifdef CONFIG_X86_64
21907 + if ((regs->ip + 9) >> 32)
21908 + break;
21909 +#endif
21910 +
21911 + err = get_user(mov, (unsigned char __user *)regs->ip);
21912 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21913 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21914 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21915 +
21916 + if (err)
21917 + break;
21918 +
21919 + if (mov == 0xB9 && jmp == 0xE9) {
21920 + regs->cx = addr1;
21921 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21922 + return 2;
21923 + }
21924 + } while (0);
21925 +
21926 + return 1; /* PaX in action */
21927 +}
21928 +
21929 +#ifdef CONFIG_X86_64
21930 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21931 +{
21932 + int err;
21933 +
21934 + do { /* PaX: gcc trampoline emulation #1 */
21935 + unsigned short mov1, mov2, jmp1;
21936 + unsigned char jmp2;
21937 + unsigned int addr1;
21938 + unsigned long addr2;
21939 +
21940 + err = get_user(mov1, (unsigned short __user *)regs->ip);
21941 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
21942 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
21943 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
21944 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
21945 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
21946 +
21947 + if (err)
21948 + break;
21949 +
21950 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21951 + regs->r11 = addr1;
21952 + regs->r10 = addr2;
21953 + regs->ip = addr1;
21954 + return 2;
21955 + }
21956 + } while (0);
21957 +
21958 + do { /* PaX: gcc trampoline emulation #2 */
21959 + unsigned short mov1, mov2, jmp1;
21960 + unsigned char jmp2;
21961 + unsigned long addr1, addr2;
21962 +
21963 + err = get_user(mov1, (unsigned short __user *)regs->ip);
21964 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21965 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21966 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21967 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
21968 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
21969 +
21970 + if (err)
21971 + break;
21972 +
21973 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21974 + regs->r11 = addr1;
21975 + regs->r10 = addr2;
21976 + regs->ip = addr1;
21977 + return 2;
21978 + }
21979 + } while (0);
21980 +
21981 + return 1; /* PaX in action */
21982 +}
21983 +#endif
21984 +
21985 +/*
21986 + * PaX: decide what to do with offenders (regs->ip = fault address)
21987 + *
21988 + * returns 1 when task should be killed
21989 + * 2 when gcc trampoline was detected
21990 + */
21991 +static int pax_handle_fetch_fault(struct pt_regs *regs)
21992 +{
21993 + if (v8086_mode(regs))
21994 + return 1;
21995 +
21996 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
21997 + return 1;
21998 +
21999 +#ifdef CONFIG_X86_32
22000 + return pax_handle_fetch_fault_32(regs);
22001 +#else
22002 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
22003 + return pax_handle_fetch_fault_32(regs);
22004 + else
22005 + return pax_handle_fetch_fault_64(regs);
22006 +#endif
22007 +}
22008 +#endif
22009 +
22010 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22011 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
22012 +{
22013 + long i;
22014 +
22015 + printk(KERN_ERR "PAX: bytes at PC: ");
22016 + for (i = 0; i < 20; i++) {
22017 + unsigned char c;
22018 + if (get_user(c, (unsigned char __force_user *)pc+i))
22019 + printk(KERN_CONT "?? ");
22020 + else
22021 + printk(KERN_CONT "%02x ", c);
22022 + }
22023 + printk("\n");
22024 +
22025 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
22026 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
22027 + unsigned long c;
22028 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
22029 +#ifdef CONFIG_X86_32
22030 + printk(KERN_CONT "???????? ");
22031 +#else
22032 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
22033 + printk(KERN_CONT "???????? ???????? ");
22034 + else
22035 + printk(KERN_CONT "???????????????? ");
22036 +#endif
22037 + } else {
22038 +#ifdef CONFIG_X86_64
22039 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
22040 + printk(KERN_CONT "%08x ", (unsigned int)c);
22041 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
22042 + } else
22043 +#endif
22044 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
22045 + }
22046 + }
22047 + printk("\n");
22048 +}
22049 +#endif
22050 +
22051 +/**
22052 + * probe_kernel_write(): safely attempt to write to a location
22053 + * @dst: address to write to
22054 + * @src: pointer to the data that shall be written
22055 + * @size: size of the data chunk
22056 + *
22057 + * Safely write to address @dst from the buffer at @src. If a kernel fault
22058 + * happens, handle that and return -EFAULT.
22059 + */
22060 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
22061 +{
22062 + long ret;
22063 + mm_segment_t old_fs = get_fs();
22064 +
22065 + set_fs(KERNEL_DS);
22066 + pagefault_disable();
22067 + pax_open_kernel();
22068 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
22069 + pax_close_kernel();
22070 + pagefault_enable();
22071 + set_fs(old_fs);
22072 +
22073 + return ret ? -EFAULT : 0;
22074 +}
22075 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
22076 index dd74e46..7d26398 100644
22077 --- a/arch/x86/mm/gup.c
22078 +++ b/arch/x86/mm/gup.c
22079 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
22080 addr = start;
22081 len = (unsigned long) nr_pages << PAGE_SHIFT;
22082 end = start + len;
22083 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22084 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22085 (void __user *)start, len)))
22086 return 0;
22087
22088 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
22089 index f4f29b1..5cac4fb 100644
22090 --- a/arch/x86/mm/highmem_32.c
22091 +++ b/arch/x86/mm/highmem_32.c
22092 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
22093 idx = type + KM_TYPE_NR*smp_processor_id();
22094 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22095 BUG_ON(!pte_none(*(kmap_pte-idx)));
22096 +
22097 + pax_open_kernel();
22098 set_pte(kmap_pte-idx, mk_pte(page, prot));
22099 + pax_close_kernel();
22100 +
22101 arch_flush_lazy_mmu_mode();
22102
22103 return (void *)vaddr;
22104 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
22105 index f581a18..29efd37 100644
22106 --- a/arch/x86/mm/hugetlbpage.c
22107 +++ b/arch/x86/mm/hugetlbpage.c
22108 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
22109 struct hstate *h = hstate_file(file);
22110 struct mm_struct *mm = current->mm;
22111 struct vm_area_struct *vma;
22112 - unsigned long start_addr;
22113 + unsigned long start_addr, pax_task_size = TASK_SIZE;
22114 +
22115 +#ifdef CONFIG_PAX_SEGMEXEC
22116 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22117 + pax_task_size = SEGMEXEC_TASK_SIZE;
22118 +#endif
22119 +
22120 + pax_task_size -= PAGE_SIZE;
22121
22122 if (len > mm->cached_hole_size) {
22123 - start_addr = mm->free_area_cache;
22124 + start_addr = mm->free_area_cache;
22125 } else {
22126 - start_addr = TASK_UNMAPPED_BASE;
22127 - mm->cached_hole_size = 0;
22128 + start_addr = mm->mmap_base;
22129 + mm->cached_hole_size = 0;
22130 }
22131
22132 full_search:
22133 @@ -280,26 +287,27 @@ full_search:
22134
22135 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22136 /* At this point: (!vma || addr < vma->vm_end). */
22137 - if (TASK_SIZE - len < addr) {
22138 + if (pax_task_size - len < addr) {
22139 /*
22140 * Start a new search - just in case we missed
22141 * some holes.
22142 */
22143 - if (start_addr != TASK_UNMAPPED_BASE) {
22144 - start_addr = TASK_UNMAPPED_BASE;
22145 + if (start_addr != mm->mmap_base) {
22146 + start_addr = mm->mmap_base;
22147 mm->cached_hole_size = 0;
22148 goto full_search;
22149 }
22150 return -ENOMEM;
22151 }
22152 - if (!vma || addr + len <= vma->vm_start) {
22153 - mm->free_area_cache = addr + len;
22154 - return addr;
22155 - }
22156 + if (check_heap_stack_gap(vma, addr, len))
22157 + break;
22158 if (addr + mm->cached_hole_size < vma->vm_start)
22159 mm->cached_hole_size = vma->vm_start - addr;
22160 addr = ALIGN(vma->vm_end, huge_page_size(h));
22161 }
22162 +
22163 + mm->free_area_cache = addr + len;
22164 + return addr;
22165 }
22166
22167 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22168 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22169 {
22170 struct hstate *h = hstate_file(file);
22171 struct mm_struct *mm = current->mm;
22172 - struct vm_area_struct *vma, *prev_vma;
22173 - unsigned long base = mm->mmap_base, addr = addr0;
22174 + struct vm_area_struct *vma;
22175 + unsigned long base = mm->mmap_base, addr;
22176 unsigned long largest_hole = mm->cached_hole_size;
22177 - int first_time = 1;
22178
22179 /* don't allow allocations above current base */
22180 if (mm->free_area_cache > base)
22181 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22182 largest_hole = 0;
22183 mm->free_area_cache = base;
22184 }
22185 -try_again:
22186 +
22187 /* make sure it can fit in the remaining address space */
22188 if (mm->free_area_cache < len)
22189 goto fail;
22190
22191 /* either no address requested or can't fit in requested address hole */
22192 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
22193 + addr = (mm->free_area_cache - len);
22194 do {
22195 + addr &= huge_page_mask(h);
22196 + vma = find_vma(mm, addr);
22197 /*
22198 * Lookup failure means no vma is above this address,
22199 * i.e. return with success:
22200 - */
22201 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
22202 - return addr;
22203 -
22204 - /*
22205 * new region fits between prev_vma->vm_end and
22206 * vma->vm_start, use it:
22207 */
22208 - if (addr + len <= vma->vm_start &&
22209 - (!prev_vma || (addr >= prev_vma->vm_end))) {
22210 + if (check_heap_stack_gap(vma, addr, len)) {
22211 /* remember the address as a hint for next time */
22212 - mm->cached_hole_size = largest_hole;
22213 - return (mm->free_area_cache = addr);
22214 - } else {
22215 - /* pull free_area_cache down to the first hole */
22216 - if (mm->free_area_cache == vma->vm_end) {
22217 - mm->free_area_cache = vma->vm_start;
22218 - mm->cached_hole_size = largest_hole;
22219 - }
22220 + mm->cached_hole_size = largest_hole;
22221 + return (mm->free_area_cache = addr);
22222 + }
22223 + /* pull free_area_cache down to the first hole */
22224 + if (mm->free_area_cache == vma->vm_end) {
22225 + mm->free_area_cache = vma->vm_start;
22226 + mm->cached_hole_size = largest_hole;
22227 }
22228
22229 /* remember the largest hole we saw so far */
22230 if (addr + largest_hole < vma->vm_start)
22231 - largest_hole = vma->vm_start - addr;
22232 + largest_hole = vma->vm_start - addr;
22233
22234 /* try just below the current vma->vm_start */
22235 - addr = (vma->vm_start - len) & huge_page_mask(h);
22236 - } while (len <= vma->vm_start);
22237 + addr = skip_heap_stack_gap(vma, len);
22238 + } while (!IS_ERR_VALUE(addr));
22239
22240 fail:
22241 /*
22242 - * if hint left us with no space for the requested
22243 - * mapping then try again:
22244 - */
22245 - if (first_time) {
22246 - mm->free_area_cache = base;
22247 - largest_hole = 0;
22248 - first_time = 0;
22249 - goto try_again;
22250 - }
22251 - /*
22252 * A failed mmap() very likely causes application failure,
22253 * so fall back to the bottom-up function here. This scenario
22254 * can happen with large stack limits and large mmap()
22255 * allocations.
22256 */
22257 - mm->free_area_cache = TASK_UNMAPPED_BASE;
22258 +
22259 +#ifdef CONFIG_PAX_SEGMEXEC
22260 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22261 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22262 + else
22263 +#endif
22264 +
22265 + mm->mmap_base = TASK_UNMAPPED_BASE;
22266 +
22267 +#ifdef CONFIG_PAX_RANDMMAP
22268 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22269 + mm->mmap_base += mm->delta_mmap;
22270 +#endif
22271 +
22272 + mm->free_area_cache = mm->mmap_base;
22273 mm->cached_hole_size = ~0UL;
22274 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
22275 len, pgoff, flags);
22276 @@ -386,6 +392,7 @@ fail:
22277 /*
22278 * Restore the topdown base:
22279 */
22280 + mm->mmap_base = base;
22281 mm->free_area_cache = base;
22282 mm->cached_hole_size = ~0UL;
22283
22284 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22285 struct hstate *h = hstate_file(file);
22286 struct mm_struct *mm = current->mm;
22287 struct vm_area_struct *vma;
22288 + unsigned long pax_task_size = TASK_SIZE;
22289
22290 if (len & ~huge_page_mask(h))
22291 return -EINVAL;
22292 - if (len > TASK_SIZE)
22293 +
22294 +#ifdef CONFIG_PAX_SEGMEXEC
22295 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22296 + pax_task_size = SEGMEXEC_TASK_SIZE;
22297 +#endif
22298 +
22299 + pax_task_size -= PAGE_SIZE;
22300 +
22301 + if (len > pax_task_size)
22302 return -ENOMEM;
22303
22304 if (flags & MAP_FIXED) {
22305 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22306 if (addr) {
22307 addr = ALIGN(addr, huge_page_size(h));
22308 vma = find_vma(mm, addr);
22309 - if (TASK_SIZE - len >= addr &&
22310 - (!vma || addr + len <= vma->vm_start))
22311 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
22312 return addr;
22313 }
22314 if (mm->get_unmapped_area == arch_get_unmapped_area)
22315 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
22316 index 87488b9..7129f32 100644
22317 --- a/arch/x86/mm/init.c
22318 +++ b/arch/x86/mm/init.c
22319 @@ -31,7 +31,7 @@ int direct_gbpages
22320 static void __init find_early_table_space(unsigned long end, int use_pse,
22321 int use_gbpages)
22322 {
22323 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
22324 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
22325 phys_addr_t base;
22326
22327 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
22328 @@ -312,8 +312,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
22329 */
22330 int devmem_is_allowed(unsigned long pagenr)
22331 {
22332 +#ifdef CONFIG_GRKERNSEC_KMEM
22333 + /* allow BDA */
22334 + if (!pagenr)
22335 + return 1;
22336 + /* allow EBDA */
22337 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22338 + return 1;
22339 +#else
22340 + if (!pagenr)
22341 + return 1;
22342 +#ifdef CONFIG_VM86
22343 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
22344 + return 1;
22345 +#endif
22346 +#endif
22347 +
22348 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22349 + return 1;
22350 +#ifdef CONFIG_GRKERNSEC_KMEM
22351 + /* throw out everything else below 1MB */
22352 if (pagenr <= 256)
22353 - return 1;
22354 + return 0;
22355 +#endif
22356 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22357 return 0;
22358 if (!page_is_ram(pagenr))
22359 @@ -372,6 +393,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
22360
22361 void free_initmem(void)
22362 {
22363 +
22364 +#ifdef CONFIG_PAX_KERNEXEC
22365 +#ifdef CONFIG_X86_32
22366 + /* PaX: limit KERNEL_CS to actual size */
22367 + unsigned long addr, limit;
22368 + struct desc_struct d;
22369 + int cpu;
22370 +
22371 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22372 + limit = (limit - 1UL) >> PAGE_SHIFT;
22373 +
22374 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22375 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
22376 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22377 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22378 + }
22379 +
22380 + /* PaX: make KERNEL_CS read-only */
22381 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22382 + if (!paravirt_enabled())
22383 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22384 +/*
22385 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22386 + pgd = pgd_offset_k(addr);
22387 + pud = pud_offset(pgd, addr);
22388 + pmd = pmd_offset(pud, addr);
22389 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22390 + }
22391 +*/
22392 +#ifdef CONFIG_X86_PAE
22393 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22394 +/*
22395 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22396 + pgd = pgd_offset_k(addr);
22397 + pud = pud_offset(pgd, addr);
22398 + pmd = pmd_offset(pud, addr);
22399 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22400 + }
22401 +*/
22402 +#endif
22403 +
22404 +#ifdef CONFIG_MODULES
22405 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22406 +#endif
22407 +
22408 +#else
22409 + pgd_t *pgd;
22410 + pud_t *pud;
22411 + pmd_t *pmd;
22412 + unsigned long addr, end;
22413 +
22414 + /* PaX: make kernel code/rodata read-only, rest non-executable */
22415 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22416 + pgd = pgd_offset_k(addr);
22417 + pud = pud_offset(pgd, addr);
22418 + pmd = pmd_offset(pud, addr);
22419 + if (!pmd_present(*pmd))
22420 + continue;
22421 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22422 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22423 + else
22424 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22425 + }
22426 +
22427 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22428 + end = addr + KERNEL_IMAGE_SIZE;
22429 + for (; addr < end; addr += PMD_SIZE) {
22430 + pgd = pgd_offset_k(addr);
22431 + pud = pud_offset(pgd, addr);
22432 + pmd = pmd_offset(pud, addr);
22433 + if (!pmd_present(*pmd))
22434 + continue;
22435 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22436 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22437 + }
22438 +#endif
22439 +
22440 + flush_tlb_all();
22441 +#endif
22442 +
22443 free_init_pages("unused kernel memory",
22444 (unsigned long)(&__init_begin),
22445 (unsigned long)(&__init_end));
22446 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
22447 index 29f7c6d..b46b35b 100644
22448 --- a/arch/x86/mm/init_32.c
22449 +++ b/arch/x86/mm/init_32.c
22450 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
22451 }
22452
22453 /*
22454 - * Creates a middle page table and puts a pointer to it in the
22455 - * given global directory entry. This only returns the gd entry
22456 - * in non-PAE compilation mode, since the middle layer is folded.
22457 - */
22458 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
22459 -{
22460 - pud_t *pud;
22461 - pmd_t *pmd_table;
22462 -
22463 -#ifdef CONFIG_X86_PAE
22464 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
22465 - if (after_bootmem)
22466 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
22467 - else
22468 - pmd_table = (pmd_t *)alloc_low_page();
22469 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
22470 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
22471 - pud = pud_offset(pgd, 0);
22472 - BUG_ON(pmd_table != pmd_offset(pud, 0));
22473 -
22474 - return pmd_table;
22475 - }
22476 -#endif
22477 - pud = pud_offset(pgd, 0);
22478 - pmd_table = pmd_offset(pud, 0);
22479 -
22480 - return pmd_table;
22481 -}
22482 -
22483 -/*
22484 * Create a page table and place a pointer to it in a middle page
22485 * directory entry:
22486 */
22487 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
22488 page_table = (pte_t *)alloc_low_page();
22489
22490 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
22491 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22492 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
22493 +#else
22494 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
22495 +#endif
22496 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
22497 }
22498
22499 return pte_offset_kernel(pmd, 0);
22500 }
22501
22502 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
22503 +{
22504 + pud_t *pud;
22505 + pmd_t *pmd_table;
22506 +
22507 + pud = pud_offset(pgd, 0);
22508 + pmd_table = pmd_offset(pud, 0);
22509 +
22510 + return pmd_table;
22511 +}
22512 +
22513 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
22514 {
22515 int pgd_idx = pgd_index(vaddr);
22516 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22517 int pgd_idx, pmd_idx;
22518 unsigned long vaddr;
22519 pgd_t *pgd;
22520 + pud_t *pud;
22521 pmd_t *pmd;
22522 pte_t *pte = NULL;
22523
22524 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22525 pgd = pgd_base + pgd_idx;
22526
22527 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
22528 - pmd = one_md_table_init(pgd);
22529 - pmd = pmd + pmd_index(vaddr);
22530 + pud = pud_offset(pgd, vaddr);
22531 + pmd = pmd_offset(pud, vaddr);
22532 +
22533 +#ifdef CONFIG_X86_PAE
22534 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22535 +#endif
22536 +
22537 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
22538 pmd++, pmd_idx++) {
22539 pte = page_table_kmap_check(one_page_table_init(pmd),
22540 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22541 }
22542 }
22543
22544 -static inline int is_kernel_text(unsigned long addr)
22545 +static inline int is_kernel_text(unsigned long start, unsigned long end)
22546 {
22547 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
22548 - return 1;
22549 - return 0;
22550 + if ((start > ktla_ktva((unsigned long)_etext) ||
22551 + end <= ktla_ktva((unsigned long)_stext)) &&
22552 + (start > ktla_ktva((unsigned long)_einittext) ||
22553 + end <= ktla_ktva((unsigned long)_sinittext)) &&
22554 +
22555 +#ifdef CONFIG_ACPI_SLEEP
22556 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
22557 +#endif
22558 +
22559 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
22560 + return 0;
22561 + return 1;
22562 }
22563
22564 /*
22565 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
22566 unsigned long last_map_addr = end;
22567 unsigned long start_pfn, end_pfn;
22568 pgd_t *pgd_base = swapper_pg_dir;
22569 - int pgd_idx, pmd_idx, pte_ofs;
22570 + unsigned int pgd_idx, pmd_idx, pte_ofs;
22571 unsigned long pfn;
22572 pgd_t *pgd;
22573 + pud_t *pud;
22574 pmd_t *pmd;
22575 pte_t *pte;
22576 unsigned pages_2m, pages_4k;
22577 @@ -281,8 +282,13 @@ repeat:
22578 pfn = start_pfn;
22579 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22580 pgd = pgd_base + pgd_idx;
22581 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
22582 - pmd = one_md_table_init(pgd);
22583 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
22584 + pud = pud_offset(pgd, 0);
22585 + pmd = pmd_offset(pud, 0);
22586 +
22587 +#ifdef CONFIG_X86_PAE
22588 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22589 +#endif
22590
22591 if (pfn >= end_pfn)
22592 continue;
22593 @@ -294,14 +300,13 @@ repeat:
22594 #endif
22595 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
22596 pmd++, pmd_idx++) {
22597 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
22598 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
22599
22600 /*
22601 * Map with big pages if possible, otherwise
22602 * create normal page tables:
22603 */
22604 if (use_pse) {
22605 - unsigned int addr2;
22606 pgprot_t prot = PAGE_KERNEL_LARGE;
22607 /*
22608 * first pass will use the same initial
22609 @@ -311,11 +316,7 @@ repeat:
22610 __pgprot(PTE_IDENT_ATTR |
22611 _PAGE_PSE);
22612
22613 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
22614 - PAGE_OFFSET + PAGE_SIZE-1;
22615 -
22616 - if (is_kernel_text(addr) ||
22617 - is_kernel_text(addr2))
22618 + if (is_kernel_text(address, address + PMD_SIZE))
22619 prot = PAGE_KERNEL_LARGE_EXEC;
22620
22621 pages_2m++;
22622 @@ -332,7 +333,7 @@ repeat:
22623 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22624 pte += pte_ofs;
22625 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
22626 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
22627 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
22628 pgprot_t prot = PAGE_KERNEL;
22629 /*
22630 * first pass will use the same initial
22631 @@ -340,7 +341,7 @@ repeat:
22632 */
22633 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
22634
22635 - if (is_kernel_text(addr))
22636 + if (is_kernel_text(address, address + PAGE_SIZE))
22637 prot = PAGE_KERNEL_EXEC;
22638
22639 pages_4k++;
22640 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
22641
22642 pud = pud_offset(pgd, va);
22643 pmd = pmd_offset(pud, va);
22644 - if (!pmd_present(*pmd))
22645 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
22646 break;
22647
22648 pte = pte_offset_kernel(pmd, va);
22649 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
22650
22651 static void __init pagetable_init(void)
22652 {
22653 - pgd_t *pgd_base = swapper_pg_dir;
22654 -
22655 - permanent_kmaps_init(pgd_base);
22656 + permanent_kmaps_init(swapper_pg_dir);
22657 }
22658
22659 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22660 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22661 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22662
22663 /* user-defined highmem size */
22664 @@ -757,6 +756,12 @@ void __init mem_init(void)
22665
22666 pci_iommu_alloc();
22667
22668 +#ifdef CONFIG_PAX_PER_CPU_PGD
22669 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22670 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22671 + KERNEL_PGD_PTRS);
22672 +#endif
22673 +
22674 #ifdef CONFIG_FLATMEM
22675 BUG_ON(!mem_map);
22676 #endif
22677 @@ -774,7 +779,7 @@ void __init mem_init(void)
22678 set_highmem_pages_init();
22679
22680 codesize = (unsigned long) &_etext - (unsigned long) &_text;
22681 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
22682 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
22683 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
22684
22685 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
22686 @@ -815,10 +820,10 @@ void __init mem_init(void)
22687 ((unsigned long)&__init_end -
22688 (unsigned long)&__init_begin) >> 10,
22689
22690 - (unsigned long)&_etext, (unsigned long)&_edata,
22691 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
22692 + (unsigned long)&_sdata, (unsigned long)&_edata,
22693 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
22694
22695 - (unsigned long)&_text, (unsigned long)&_etext,
22696 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
22697 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
22698
22699 /*
22700 @@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
22701 if (!kernel_set_to_readonly)
22702 return;
22703
22704 + start = ktla_ktva(start);
22705 pr_debug("Set kernel text: %lx - %lx for read write\n",
22706 start, start+size);
22707
22708 @@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
22709 if (!kernel_set_to_readonly)
22710 return;
22711
22712 + start = ktla_ktva(start);
22713 pr_debug("Set kernel text: %lx - %lx for read only\n",
22714 start, start+size);
22715
22716 @@ -938,6 +945,7 @@ void mark_rodata_ro(void)
22717 unsigned long start = PFN_ALIGN(_text);
22718 unsigned long size = PFN_ALIGN(_etext) - start;
22719
22720 + start = ktla_ktva(start);
22721 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
22722 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
22723 size >> 10);
22724 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
22725 index bbaaa00..16dffad 100644
22726 --- a/arch/x86/mm/init_64.c
22727 +++ b/arch/x86/mm/init_64.c
22728 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
22729 * around without checking the pgd every time.
22730 */
22731
22732 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
22733 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
22734 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22735
22736 int force_personality32;
22737 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22738
22739 for (address = start; address <= end; address += PGDIR_SIZE) {
22740 const pgd_t *pgd_ref = pgd_offset_k(address);
22741 +
22742 +#ifdef CONFIG_PAX_PER_CPU_PGD
22743 + unsigned long cpu;
22744 +#else
22745 struct page *page;
22746 +#endif
22747
22748 if (pgd_none(*pgd_ref))
22749 continue;
22750
22751 spin_lock(&pgd_lock);
22752 +
22753 +#ifdef CONFIG_PAX_PER_CPU_PGD
22754 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22755 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
22756 +#else
22757 list_for_each_entry(page, &pgd_list, lru) {
22758 pgd_t *pgd;
22759 spinlock_t *pgt_lock;
22760 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22761 /* the pgt_lock only for Xen */
22762 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
22763 spin_lock(pgt_lock);
22764 +#endif
22765
22766 if (pgd_none(*pgd))
22767 set_pgd(pgd, *pgd_ref);
22768 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22769 BUG_ON(pgd_page_vaddr(*pgd)
22770 != pgd_page_vaddr(*pgd_ref));
22771
22772 +#ifndef CONFIG_PAX_PER_CPU_PGD
22773 spin_unlock(pgt_lock);
22774 +#endif
22775 +
22776 }
22777 spin_unlock(&pgd_lock);
22778 }
22779 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
22780 pmd = fill_pmd(pud, vaddr);
22781 pte = fill_pte(pmd, vaddr);
22782
22783 + pax_open_kernel();
22784 set_pte(pte, new_pte);
22785 + pax_close_kernel();
22786
22787 /*
22788 * It's enough to flush this one mapping.
22789 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
22790 pgd = pgd_offset_k((unsigned long)__va(phys));
22791 if (pgd_none(*pgd)) {
22792 pud = (pud_t *) spp_getpage();
22793 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
22794 - _PAGE_USER));
22795 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
22796 }
22797 pud = pud_offset(pgd, (unsigned long)__va(phys));
22798 if (pud_none(*pud)) {
22799 pmd = (pmd_t *) spp_getpage();
22800 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
22801 - _PAGE_USER));
22802 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
22803 }
22804 pmd = pmd_offset(pud, phys);
22805 BUG_ON(!pmd_none(*pmd));
22806 @@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
22807 if (pfn >= pgt_buf_top)
22808 panic("alloc_low_page: ran out of memory");
22809
22810 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22811 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22812 clear_page(adr);
22813 *phys = pfn * PAGE_SIZE;
22814 return adr;
22815 @@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
22816
22817 phys = __pa(virt);
22818 left = phys & (PAGE_SIZE - 1);
22819 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22820 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22821 adr = (void *)(((unsigned long)adr) | left);
22822
22823 return adr;
22824 @@ -693,6 +707,12 @@ void __init mem_init(void)
22825
22826 pci_iommu_alloc();
22827
22828 +#ifdef CONFIG_PAX_PER_CPU_PGD
22829 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22830 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22831 + KERNEL_PGD_PTRS);
22832 +#endif
22833 +
22834 /* clear_bss() already clear the empty_zero_page */
22835
22836 reservedpages = 0;
22837 @@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
22838 static struct vm_area_struct gate_vma = {
22839 .vm_start = VSYSCALL_START,
22840 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
22841 - .vm_page_prot = PAGE_READONLY_EXEC,
22842 - .vm_flags = VM_READ | VM_EXEC
22843 + .vm_page_prot = PAGE_READONLY,
22844 + .vm_flags = VM_READ
22845 };
22846
22847 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
22848 @@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr)
22849
22850 const char *arch_vma_name(struct vm_area_struct *vma)
22851 {
22852 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22853 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22854 return "[vdso]";
22855 if (vma == &gate_vma)
22856 return "[vsyscall]";
22857 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
22858 index 7b179b4..6bd1777 100644
22859 --- a/arch/x86/mm/iomap_32.c
22860 +++ b/arch/x86/mm/iomap_32.c
22861 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
22862 type = kmap_atomic_idx_push();
22863 idx = type + KM_TYPE_NR * smp_processor_id();
22864 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22865 +
22866 + pax_open_kernel();
22867 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22868 + pax_close_kernel();
22869 +
22870 arch_flush_lazy_mmu_mode();
22871
22872 return (void *)vaddr;
22873 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
22874 index be1ef57..9680edc 100644
22875 --- a/arch/x86/mm/ioremap.c
22876 +++ b/arch/x86/mm/ioremap.c
22877 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
22878 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
22879 int is_ram = page_is_ram(pfn);
22880
22881 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22882 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22883 return NULL;
22884 WARN_ON_ONCE(is_ram);
22885 }
22886 @@ -344,7 +344,7 @@ static int __init early_ioremap_debug_setup(char *str)
22887 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22888
22889 static __initdata int after_paging_init;
22890 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22891 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22892
22893 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22894 {
22895 @@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
22896 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22897
22898 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22899 - memset(bm_pte, 0, sizeof(bm_pte));
22900 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
22901 + pmd_populate_user(&init_mm, pmd, bm_pte);
22902
22903 /*
22904 * The boot-ioremap range spans multiple pmds, for which
22905 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
22906 index d87dd6d..bf3fa66 100644
22907 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
22908 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
22909 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
22910 * memory (e.g. tracked pages)? For now, we need this to avoid
22911 * invoking kmemcheck for PnP BIOS calls.
22912 */
22913 - if (regs->flags & X86_VM_MASK)
22914 + if (v8086_mode(regs))
22915 return false;
22916 - if (regs->cs != __KERNEL_CS)
22917 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
22918 return false;
22919
22920 pte = kmemcheck_pte_lookup(address);
22921 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
22922 index 1dab519..60a7e5f 100644
22923 --- a/arch/x86/mm/mmap.c
22924 +++ b/arch/x86/mm/mmap.c
22925 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
22926 * Leave an at least ~128 MB hole with possible stack randomization.
22927 */
22928 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
22929 -#define MAX_GAP (TASK_SIZE/6*5)
22930 +#define MAX_GAP (pax_task_size/6*5)
22931
22932 /*
22933 * True on X86_32 or when emulating IA32 on X86_64
22934 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
22935 return rnd << PAGE_SHIFT;
22936 }
22937
22938 -static unsigned long mmap_base(void)
22939 +static unsigned long mmap_base(struct mm_struct *mm)
22940 {
22941 unsigned long gap = rlimit(RLIMIT_STACK);
22942 + unsigned long pax_task_size = TASK_SIZE;
22943 +
22944 +#ifdef CONFIG_PAX_SEGMEXEC
22945 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22946 + pax_task_size = SEGMEXEC_TASK_SIZE;
22947 +#endif
22948
22949 if (gap < MIN_GAP)
22950 gap = MIN_GAP;
22951 else if (gap > MAX_GAP)
22952 gap = MAX_GAP;
22953
22954 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
22955 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
22956 }
22957
22958 /*
22959 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
22960 * does, but not when emulating X86_32
22961 */
22962 -static unsigned long mmap_legacy_base(void)
22963 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
22964 {
22965 - if (mmap_is_ia32())
22966 + if (mmap_is_ia32()) {
22967 +
22968 +#ifdef CONFIG_PAX_SEGMEXEC
22969 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22970 + return SEGMEXEC_TASK_UNMAPPED_BASE;
22971 + else
22972 +#endif
22973 +
22974 return TASK_UNMAPPED_BASE;
22975 - else
22976 + } else
22977 return TASK_UNMAPPED_BASE + mmap_rnd();
22978 }
22979
22980 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
22981 void arch_pick_mmap_layout(struct mm_struct *mm)
22982 {
22983 if (mmap_is_legacy()) {
22984 - mm->mmap_base = mmap_legacy_base();
22985 + mm->mmap_base = mmap_legacy_base(mm);
22986 +
22987 +#ifdef CONFIG_PAX_RANDMMAP
22988 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22989 + mm->mmap_base += mm->delta_mmap;
22990 +#endif
22991 +
22992 mm->get_unmapped_area = arch_get_unmapped_area;
22993 mm->unmap_area = arch_unmap_area;
22994 } else {
22995 - mm->mmap_base = mmap_base();
22996 + mm->mmap_base = mmap_base(mm);
22997 +
22998 +#ifdef CONFIG_PAX_RANDMMAP
22999 + if (mm->pax_flags & MF_PAX_RANDMMAP)
23000 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
23001 +#endif
23002 +
23003 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
23004 mm->unmap_area = arch_unmap_area_topdown;
23005 }
23006 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
23007 index 67421f3..8d6b107 100644
23008 --- a/arch/x86/mm/mmio-mod.c
23009 +++ b/arch/x86/mm/mmio-mod.c
23010 @@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
23011 break;
23012 default:
23013 {
23014 - unsigned char *ip = (unsigned char *)instptr;
23015 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
23016 my_trace->opcode = MMIO_UNKNOWN_OP;
23017 my_trace->width = 0;
23018 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
23019 @@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
23020 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23021 void __iomem *addr)
23022 {
23023 - static atomic_t next_id;
23024 + static atomic_unchecked_t next_id;
23025 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
23026 /* These are page-unaligned. */
23027 struct mmiotrace_map map = {
23028 @@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23029 .private = trace
23030 },
23031 .phys = offset,
23032 - .id = atomic_inc_return(&next_id)
23033 + .id = atomic_inc_return_unchecked(&next_id)
23034 };
23035 map.map_id = trace->id;
23036
23037 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
23038 index b008656..773eac2 100644
23039 --- a/arch/x86/mm/pageattr-test.c
23040 +++ b/arch/x86/mm/pageattr-test.c
23041 @@ -36,7 +36,7 @@ enum {
23042
23043 static int pte_testbit(pte_t pte)
23044 {
23045 - return pte_flags(pte) & _PAGE_UNUSED1;
23046 + return pte_flags(pte) & _PAGE_CPA_TEST;
23047 }
23048
23049 struct split_state {
23050 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
23051 index f9e5267..6f6e27f 100644
23052 --- a/arch/x86/mm/pageattr.c
23053 +++ b/arch/x86/mm/pageattr.c
23054 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23055 */
23056 #ifdef CONFIG_PCI_BIOS
23057 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
23058 - pgprot_val(forbidden) |= _PAGE_NX;
23059 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23060 #endif
23061
23062 /*
23063 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23064 * Does not cover __inittext since that is gone later on. On
23065 * 64bit we do not enforce !NX on the low mapping
23066 */
23067 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
23068 - pgprot_val(forbidden) |= _PAGE_NX;
23069 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
23070 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23071
23072 +#ifdef CONFIG_DEBUG_RODATA
23073 /*
23074 * The .rodata section needs to be read-only. Using the pfn
23075 * catches all aliases.
23076 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23077 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
23078 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
23079 pgprot_val(forbidden) |= _PAGE_RW;
23080 +#endif
23081
23082 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
23083 /*
23084 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23085 }
23086 #endif
23087
23088 +#ifdef CONFIG_PAX_KERNEXEC
23089 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
23090 + pgprot_val(forbidden) |= _PAGE_RW;
23091 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23092 + }
23093 +#endif
23094 +
23095 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
23096
23097 return prot;
23098 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
23099 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
23100 {
23101 /* change init_mm */
23102 + pax_open_kernel();
23103 set_pte_atomic(kpte, pte);
23104 +
23105 #ifdef CONFIG_X86_32
23106 if (!SHARED_KERNEL_PMD) {
23107 +
23108 +#ifdef CONFIG_PAX_PER_CPU_PGD
23109 + unsigned long cpu;
23110 +#else
23111 struct page *page;
23112 +#endif
23113
23114 +#ifdef CONFIG_PAX_PER_CPU_PGD
23115 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23116 + pgd_t *pgd = get_cpu_pgd(cpu);
23117 +#else
23118 list_for_each_entry(page, &pgd_list, lru) {
23119 - pgd_t *pgd;
23120 + pgd_t *pgd = (pgd_t *)page_address(page);
23121 +#endif
23122 +
23123 pud_t *pud;
23124 pmd_t *pmd;
23125
23126 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
23127 + pgd += pgd_index(address);
23128 pud = pud_offset(pgd, address);
23129 pmd = pmd_offset(pud, address);
23130 set_pte_atomic((pte_t *)pmd, pte);
23131 }
23132 }
23133 #endif
23134 + pax_close_kernel();
23135 }
23136
23137 static int
23138 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
23139 index f6ff57b..481690f 100644
23140 --- a/arch/x86/mm/pat.c
23141 +++ b/arch/x86/mm/pat.c
23142 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
23143
23144 if (!entry) {
23145 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
23146 - current->comm, current->pid, start, end);
23147 + current->comm, task_pid_nr(current), start, end);
23148 return -EINVAL;
23149 }
23150
23151 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23152 while (cursor < to) {
23153 if (!devmem_is_allowed(pfn)) {
23154 printk(KERN_INFO
23155 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23156 - current->comm, from, to);
23157 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
23158 + current->comm, from, to, cursor);
23159 return 0;
23160 }
23161 cursor += PAGE_SIZE;
23162 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
23163 printk(KERN_INFO
23164 "%s:%d ioremap_change_attr failed %s "
23165 "for %Lx-%Lx\n",
23166 - current->comm, current->pid,
23167 + current->comm, task_pid_nr(current),
23168 cattr_name(flags),
23169 base, (unsigned long long)(base + size));
23170 return -EINVAL;
23171 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23172 if (want_flags != flags) {
23173 printk(KERN_WARNING
23174 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
23175 - current->comm, current->pid,
23176 + current->comm, task_pid_nr(current),
23177 cattr_name(want_flags),
23178 (unsigned long long)paddr,
23179 (unsigned long long)(paddr + size),
23180 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23181 free_memtype(paddr, paddr + size);
23182 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
23183 " for %Lx-%Lx, got %s\n",
23184 - current->comm, current->pid,
23185 + current->comm, task_pid_nr(current),
23186 cattr_name(want_flags),
23187 (unsigned long long)paddr,
23188 (unsigned long long)(paddr + size),
23189 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
23190 index 9f0614d..92ae64a 100644
23191 --- a/arch/x86/mm/pf_in.c
23192 +++ b/arch/x86/mm/pf_in.c
23193 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
23194 int i;
23195 enum reason_type rv = OTHERS;
23196
23197 - p = (unsigned char *)ins_addr;
23198 + p = (unsigned char *)ktla_ktva(ins_addr);
23199 p += skip_prefix(p, &prf);
23200 p += get_opcode(p, &opcode);
23201
23202 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
23203 struct prefix_bits prf;
23204 int i;
23205
23206 - p = (unsigned char *)ins_addr;
23207 + p = (unsigned char *)ktla_ktva(ins_addr);
23208 p += skip_prefix(p, &prf);
23209 p += get_opcode(p, &opcode);
23210
23211 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
23212 struct prefix_bits prf;
23213 int i;
23214
23215 - p = (unsigned char *)ins_addr;
23216 + p = (unsigned char *)ktla_ktva(ins_addr);
23217 p += skip_prefix(p, &prf);
23218 p += get_opcode(p, &opcode);
23219
23220 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
23221 struct prefix_bits prf;
23222 int i;
23223
23224 - p = (unsigned char *)ins_addr;
23225 + p = (unsigned char *)ktla_ktva(ins_addr);
23226 p += skip_prefix(p, &prf);
23227 p += get_opcode(p, &opcode);
23228 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
23229 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
23230 struct prefix_bits prf;
23231 int i;
23232
23233 - p = (unsigned char *)ins_addr;
23234 + p = (unsigned char *)ktla_ktva(ins_addr);
23235 p += skip_prefix(p, &prf);
23236 p += get_opcode(p, &opcode);
23237 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
23238 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
23239 index 8573b83..6372501 100644
23240 --- a/arch/x86/mm/pgtable.c
23241 +++ b/arch/x86/mm/pgtable.c
23242 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
23243 list_del(&page->lru);
23244 }
23245
23246 -#define UNSHARED_PTRS_PER_PGD \
23247 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23248 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23249 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
23250
23251 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23252 +{
23253 + while (count--)
23254 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
23255 +}
23256 +#endif
23257
23258 +#ifdef CONFIG_PAX_PER_CPU_PGD
23259 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23260 +{
23261 + while (count--)
23262 +
23263 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23264 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
23265 +#else
23266 + *dst++ = *src++;
23267 +#endif
23268 +
23269 +}
23270 +#endif
23271 +
23272 +#ifdef CONFIG_X86_64
23273 +#define pxd_t pud_t
23274 +#define pyd_t pgd_t
23275 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
23276 +#define pxd_free(mm, pud) pud_free((mm), (pud))
23277 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
23278 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
23279 +#define PYD_SIZE PGDIR_SIZE
23280 +#else
23281 +#define pxd_t pmd_t
23282 +#define pyd_t pud_t
23283 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
23284 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
23285 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
23286 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
23287 +#define PYD_SIZE PUD_SIZE
23288 +#endif
23289 +
23290 +#ifdef CONFIG_PAX_PER_CPU_PGD
23291 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
23292 +static inline void pgd_dtor(pgd_t *pgd) {}
23293 +#else
23294 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
23295 {
23296 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
23297 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
23298 pgd_list_del(pgd);
23299 spin_unlock(&pgd_lock);
23300 }
23301 +#endif
23302
23303 /*
23304 * List of all pgd's needed for non-PAE so it can invalidate entries
23305 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
23306 * -- wli
23307 */
23308
23309 -#ifdef CONFIG_X86_PAE
23310 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23311 /*
23312 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
23313 * updating the top-level pagetable entries to guarantee the
23314 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
23315 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
23316 * and initialize the kernel pmds here.
23317 */
23318 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
23319 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23320
23321 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23322 {
23323 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23324 */
23325 flush_tlb_mm(mm);
23326 }
23327 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
23328 +#define PREALLOCATED_PXDS USER_PGD_PTRS
23329 #else /* !CONFIG_X86_PAE */
23330
23331 /* No need to prepopulate any pagetable entries in non-PAE modes. */
23332 -#define PREALLOCATED_PMDS 0
23333 +#define PREALLOCATED_PXDS 0
23334
23335 #endif /* CONFIG_X86_PAE */
23336
23337 -static void free_pmds(pmd_t *pmds[])
23338 +static void free_pxds(pxd_t *pxds[])
23339 {
23340 int i;
23341
23342 - for(i = 0; i < PREALLOCATED_PMDS; i++)
23343 - if (pmds[i])
23344 - free_page((unsigned long)pmds[i]);
23345 + for(i = 0; i < PREALLOCATED_PXDS; i++)
23346 + if (pxds[i])
23347 + free_page((unsigned long)pxds[i]);
23348 }
23349
23350 -static int preallocate_pmds(pmd_t *pmds[])
23351 +static int preallocate_pxds(pxd_t *pxds[])
23352 {
23353 int i;
23354 bool failed = false;
23355
23356 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
23357 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
23358 - if (pmd == NULL)
23359 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
23360 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
23361 + if (pxd == NULL)
23362 failed = true;
23363 - pmds[i] = pmd;
23364 + pxds[i] = pxd;
23365 }
23366
23367 if (failed) {
23368 - free_pmds(pmds);
23369 + free_pxds(pxds);
23370 return -ENOMEM;
23371 }
23372
23373 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
23374 * preallocate which never got a corresponding vma will need to be
23375 * freed manually.
23376 */
23377 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
23378 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
23379 {
23380 int i;
23381
23382 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
23383 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
23384 pgd_t pgd = pgdp[i];
23385
23386 if (pgd_val(pgd) != 0) {
23387 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
23388 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
23389
23390 - pgdp[i] = native_make_pgd(0);
23391 + set_pgd(pgdp + i, native_make_pgd(0));
23392
23393 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
23394 - pmd_free(mm, pmd);
23395 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
23396 + pxd_free(mm, pxd);
23397 }
23398 }
23399 }
23400
23401 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
23402 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
23403 {
23404 - pud_t *pud;
23405 + pyd_t *pyd;
23406 unsigned long addr;
23407 int i;
23408
23409 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
23410 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
23411 return;
23412
23413 - pud = pud_offset(pgd, 0);
23414 +#ifdef CONFIG_X86_64
23415 + pyd = pyd_offset(mm, 0L);
23416 +#else
23417 + pyd = pyd_offset(pgd, 0L);
23418 +#endif
23419
23420 - for (addr = i = 0; i < PREALLOCATED_PMDS;
23421 - i++, pud++, addr += PUD_SIZE) {
23422 - pmd_t *pmd = pmds[i];
23423 + for (addr = i = 0; i < PREALLOCATED_PXDS;
23424 + i++, pyd++, addr += PYD_SIZE) {
23425 + pxd_t *pxd = pxds[i];
23426
23427 if (i >= KERNEL_PGD_BOUNDARY)
23428 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23429 - sizeof(pmd_t) * PTRS_PER_PMD);
23430 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23431 + sizeof(pxd_t) * PTRS_PER_PMD);
23432
23433 - pud_populate(mm, pud, pmd);
23434 + pyd_populate(mm, pyd, pxd);
23435 }
23436 }
23437
23438 pgd_t *pgd_alloc(struct mm_struct *mm)
23439 {
23440 pgd_t *pgd;
23441 - pmd_t *pmds[PREALLOCATED_PMDS];
23442 + pxd_t *pxds[PREALLOCATED_PXDS];
23443
23444 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
23445
23446 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23447
23448 mm->pgd = pgd;
23449
23450 - if (preallocate_pmds(pmds) != 0)
23451 + if (preallocate_pxds(pxds) != 0)
23452 goto out_free_pgd;
23453
23454 if (paravirt_pgd_alloc(mm) != 0)
23455 - goto out_free_pmds;
23456 + goto out_free_pxds;
23457
23458 /*
23459 * Make sure that pre-populating the pmds is atomic with
23460 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23461 spin_lock(&pgd_lock);
23462
23463 pgd_ctor(mm, pgd);
23464 - pgd_prepopulate_pmd(mm, pgd, pmds);
23465 + pgd_prepopulate_pxd(mm, pgd, pxds);
23466
23467 spin_unlock(&pgd_lock);
23468
23469 return pgd;
23470
23471 -out_free_pmds:
23472 - free_pmds(pmds);
23473 +out_free_pxds:
23474 + free_pxds(pxds);
23475 out_free_pgd:
23476 free_page((unsigned long)pgd);
23477 out:
23478 @@ -295,7 +344,7 @@ out:
23479
23480 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23481 {
23482 - pgd_mop_up_pmds(mm, pgd);
23483 + pgd_mop_up_pxds(mm, pgd);
23484 pgd_dtor(pgd);
23485 paravirt_pgd_free(mm, pgd);
23486 free_page((unsigned long)pgd);
23487 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
23488 index cac7184..09a39fa 100644
23489 --- a/arch/x86/mm/pgtable_32.c
23490 +++ b/arch/x86/mm/pgtable_32.c
23491 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
23492 return;
23493 }
23494 pte = pte_offset_kernel(pmd, vaddr);
23495 +
23496 + pax_open_kernel();
23497 if (pte_val(pteval))
23498 set_pte_at(&init_mm, vaddr, pte, pteval);
23499 else
23500 pte_clear(&init_mm, vaddr, pte);
23501 + pax_close_kernel();
23502
23503 /*
23504 * It's enough to flush this one mapping.
23505 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
23506 index 410531d..0f16030 100644
23507 --- a/arch/x86/mm/setup_nx.c
23508 +++ b/arch/x86/mm/setup_nx.c
23509 @@ -5,8 +5,10 @@
23510 #include <asm/pgtable.h>
23511 #include <asm/proto.h>
23512
23513 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23514 static int disable_nx __cpuinitdata;
23515
23516 +#ifndef CONFIG_PAX_PAGEEXEC
23517 /*
23518 * noexec = on|off
23519 *
23520 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
23521 return 0;
23522 }
23523 early_param("noexec", noexec_setup);
23524 +#endif
23525 +
23526 +#endif
23527
23528 void __cpuinit x86_configure_nx(void)
23529 {
23530 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23531 if (cpu_has_nx && !disable_nx)
23532 __supported_pte_mask |= _PAGE_NX;
23533 else
23534 +#endif
23535 __supported_pte_mask &= ~_PAGE_NX;
23536 }
23537
23538 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
23539 index d6c0418..06a0ad5 100644
23540 --- a/arch/x86/mm/tlb.c
23541 +++ b/arch/x86/mm/tlb.c
23542 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
23543 BUG();
23544 cpumask_clear_cpu(cpu,
23545 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23546 +
23547 +#ifndef CONFIG_PAX_PER_CPU_PGD
23548 load_cr3(swapper_pg_dir);
23549 +#endif
23550 +
23551 }
23552 EXPORT_SYMBOL_GPL(leave_mm);
23553
23554 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
23555 index 6687022..ceabcfa 100644
23556 --- a/arch/x86/net/bpf_jit.S
23557 +++ b/arch/x86/net/bpf_jit.S
23558 @@ -9,6 +9,7 @@
23559 */
23560 #include <linux/linkage.h>
23561 #include <asm/dwarf2.h>
23562 +#include <asm/alternative-asm.h>
23563
23564 /*
23565 * Calling convention :
23566 @@ -35,6 +36,7 @@ sk_load_word:
23567 jle bpf_slow_path_word
23568 mov (SKBDATA,%rsi),%eax
23569 bswap %eax /* ntohl() */
23570 + pax_force_retaddr
23571 ret
23572
23573
23574 @@ -53,6 +55,7 @@ sk_load_half:
23575 jle bpf_slow_path_half
23576 movzwl (SKBDATA,%rsi),%eax
23577 rol $8,%ax # ntohs()
23578 + pax_force_retaddr
23579 ret
23580
23581 sk_load_byte_ind:
23582 @@ -66,6 +69,7 @@ sk_load_byte:
23583 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
23584 jle bpf_slow_path_byte
23585 movzbl (SKBDATA,%rsi),%eax
23586 + pax_force_retaddr
23587 ret
23588
23589 /**
23590 @@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
23591 movzbl (SKBDATA,%rsi),%ebx
23592 and $15,%bl
23593 shl $2,%bl
23594 + pax_force_retaddr
23595 ret
23596 CFI_ENDPROC
23597 ENDPROC(sk_load_byte_msh)
23598 @@ -91,6 +96,7 @@ bpf_error:
23599 xor %eax,%eax
23600 mov -8(%rbp),%rbx
23601 leaveq
23602 + pax_force_retaddr
23603 ret
23604
23605 /* rsi contains offset and can be scratched */
23606 @@ -113,6 +119,7 @@ bpf_slow_path_word:
23607 js bpf_error
23608 mov -12(%rbp),%eax
23609 bswap %eax
23610 + pax_force_retaddr
23611 ret
23612
23613 bpf_slow_path_half:
23614 @@ -121,12 +128,14 @@ bpf_slow_path_half:
23615 mov -12(%rbp),%ax
23616 rol $8,%ax
23617 movzwl %ax,%eax
23618 + pax_force_retaddr
23619 ret
23620
23621 bpf_slow_path_byte:
23622 bpf_slow_path_common(1)
23623 js bpf_error
23624 movzbl -12(%rbp),%eax
23625 + pax_force_retaddr
23626 ret
23627
23628 bpf_slow_path_byte_msh:
23629 @@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
23630 and $15,%al
23631 shl $2,%al
23632 xchg %eax,%ebx
23633 + pax_force_retaddr
23634 ret
23635 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
23636 index 7b65f75..63097f6 100644
23637 --- a/arch/x86/net/bpf_jit_comp.c
23638 +++ b/arch/x86/net/bpf_jit_comp.c
23639 @@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
23640 set_fs(old_fs);
23641 }
23642
23643 +struct bpf_jit_work {
23644 + struct work_struct work;
23645 + void *image;
23646 +};
23647
23648 void bpf_jit_compile(struct sk_filter *fp)
23649 {
23650 @@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
23651 if (addrs == NULL)
23652 return;
23653
23654 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
23655 + if (!fp->work)
23656 + goto out;
23657 +
23658 /* Before first pass, make a rough estimation of addrs[]
23659 * each bpf instruction is translated to less than 64 bytes
23660 */
23661 @@ -585,11 +593,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23662 if (image) {
23663 if (unlikely(proglen + ilen > oldproglen)) {
23664 pr_err("bpb_jit_compile fatal error\n");
23665 - kfree(addrs);
23666 - module_free(NULL, image);
23667 - return;
23668 + module_free_exec(NULL, image);
23669 + goto out;
23670 }
23671 + pax_open_kernel();
23672 memcpy(image + proglen, temp, ilen);
23673 + pax_close_kernel();
23674 }
23675 proglen += ilen;
23676 addrs[i] = proglen;
23677 @@ -609,7 +618,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23678 break;
23679 }
23680 if (proglen == oldproglen) {
23681 - image = module_alloc(max_t(unsigned int,
23682 + image = module_alloc_exec(max_t(unsigned int,
23683 proglen,
23684 sizeof(struct work_struct)));
23685 if (!image)
23686 @@ -631,24 +640,27 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23687 fp->bpf_func = (void *)image;
23688 }
23689 out:
23690 + kfree(fp->work);
23691 kfree(addrs);
23692 return;
23693 }
23694
23695 static void jit_free_defer(struct work_struct *arg)
23696 {
23697 - module_free(NULL, arg);
23698 + module_free_exec(NULL, ((struct bpf_jit_work*)arg)->image);
23699 + kfree(arg);
23700 }
23701
23702 /* run from softirq, we must use a work_struct to call
23703 - * module_free() from process context
23704 + * module_free_exec() from process context
23705 */
23706 void bpf_jit_free(struct sk_filter *fp)
23707 {
23708 if (fp->bpf_func != sk_run_filter) {
23709 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
23710 + struct work_struct *work = &fp->work->work;
23711
23712 INIT_WORK(work, jit_free_defer);
23713 + fp->work->image = fp->bpf_func;
23714 schedule_work(work);
23715 }
23716 }
23717 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
23718 index bff89df..377758a 100644
23719 --- a/arch/x86/oprofile/backtrace.c
23720 +++ b/arch/x86/oprofile/backtrace.c
23721 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
23722 struct stack_frame_ia32 *fp;
23723 unsigned long bytes;
23724
23725 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23726 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23727 if (bytes != sizeof(bufhead))
23728 return NULL;
23729
23730 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
23731 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
23732
23733 oprofile_add_trace(bufhead[0].return_address);
23734
23735 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
23736 struct stack_frame bufhead[2];
23737 unsigned long bytes;
23738
23739 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23740 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23741 if (bytes != sizeof(bufhead))
23742 return NULL;
23743
23744 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
23745 {
23746 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
23747
23748 - if (!user_mode_vm(regs)) {
23749 + if (!user_mode(regs)) {
23750 unsigned long stack = kernel_stack_pointer(regs);
23751 if (depth)
23752 dump_trace(NULL, regs, (unsigned long *)stack, 0,
23753 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
23754 index cb29191..036766d 100644
23755 --- a/arch/x86/pci/mrst.c
23756 +++ b/arch/x86/pci/mrst.c
23757 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
23758 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
23759 pci_mmcfg_late_init();
23760 pcibios_enable_irq = mrst_pci_irq_enable;
23761 - pci_root_ops = pci_mrst_ops;
23762 + pax_open_kernel();
23763 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
23764 + pax_close_kernel();
23765 /* Continue with standard init */
23766 return 1;
23767 }
23768 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
23769 index f685535..2b76a81 100644
23770 --- a/arch/x86/pci/pcbios.c
23771 +++ b/arch/x86/pci/pcbios.c
23772 @@ -79,50 +79,93 @@ union bios32 {
23773 static struct {
23774 unsigned long address;
23775 unsigned short segment;
23776 -} bios32_indirect = { 0, __KERNEL_CS };
23777 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23778
23779 /*
23780 * Returns the entry point for the given service, NULL on error
23781 */
23782
23783 -static unsigned long bios32_service(unsigned long service)
23784 +static unsigned long __devinit bios32_service(unsigned long service)
23785 {
23786 unsigned char return_code; /* %al */
23787 unsigned long address; /* %ebx */
23788 unsigned long length; /* %ecx */
23789 unsigned long entry; /* %edx */
23790 unsigned long flags;
23791 + struct desc_struct d, *gdt;
23792
23793 local_irq_save(flags);
23794 - __asm__("lcall *(%%edi); cld"
23795 +
23796 + gdt = get_cpu_gdt_table(smp_processor_id());
23797 +
23798 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23799 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23800 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23801 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23802 +
23803 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23804 : "=a" (return_code),
23805 "=b" (address),
23806 "=c" (length),
23807 "=d" (entry)
23808 : "0" (service),
23809 "1" (0),
23810 - "D" (&bios32_indirect));
23811 + "D" (&bios32_indirect),
23812 + "r"(__PCIBIOS_DS)
23813 + : "memory");
23814 +
23815 + pax_open_kernel();
23816 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23817 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23818 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23819 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23820 + pax_close_kernel();
23821 +
23822 local_irq_restore(flags);
23823
23824 switch (return_code) {
23825 - case 0:
23826 - return address + entry;
23827 - case 0x80: /* Not present */
23828 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23829 - return 0;
23830 - default: /* Shouldn't happen */
23831 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23832 - service, return_code);
23833 + case 0: {
23834 + int cpu;
23835 + unsigned char flags;
23836 +
23837 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23838 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23839 + printk(KERN_WARNING "bios32_service: not valid\n");
23840 return 0;
23841 + }
23842 + address = address + PAGE_OFFSET;
23843 + length += 16UL; /* some BIOSs underreport this... */
23844 + flags = 4;
23845 + if (length >= 64*1024*1024) {
23846 + length >>= PAGE_SHIFT;
23847 + flags |= 8;
23848 + }
23849 +
23850 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
23851 + gdt = get_cpu_gdt_table(cpu);
23852 + pack_descriptor(&d, address, length, 0x9b, flags);
23853 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23854 + pack_descriptor(&d, address, length, 0x93, flags);
23855 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23856 + }
23857 + return entry;
23858 + }
23859 + case 0x80: /* Not present */
23860 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23861 + return 0;
23862 + default: /* Shouldn't happen */
23863 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23864 + service, return_code);
23865 + return 0;
23866 }
23867 }
23868
23869 static struct {
23870 unsigned long address;
23871 unsigned short segment;
23872 -} pci_indirect = { 0, __KERNEL_CS };
23873 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23874
23875 -static int pci_bios_present;
23876 +static int pci_bios_present __read_only;
23877
23878 static int __devinit check_pcibios(void)
23879 {
23880 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
23881 unsigned long flags, pcibios_entry;
23882
23883 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23884 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23885 + pci_indirect.address = pcibios_entry;
23886
23887 local_irq_save(flags);
23888 - __asm__(
23889 - "lcall *(%%edi); cld\n\t"
23890 + __asm__("movw %w6, %%ds\n\t"
23891 + "lcall *%%ss:(%%edi); cld\n\t"
23892 + "push %%ss\n\t"
23893 + "pop %%ds\n\t"
23894 "jc 1f\n\t"
23895 "xor %%ah, %%ah\n"
23896 "1:"
23897 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
23898 "=b" (ebx),
23899 "=c" (ecx)
23900 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23901 - "D" (&pci_indirect)
23902 + "D" (&pci_indirect),
23903 + "r" (__PCIBIOS_DS)
23904 : "memory");
23905 local_irq_restore(flags);
23906
23907 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23908
23909 switch (len) {
23910 case 1:
23911 - __asm__("lcall *(%%esi); cld\n\t"
23912 + __asm__("movw %w6, %%ds\n\t"
23913 + "lcall *%%ss:(%%esi); cld\n\t"
23914 + "push %%ss\n\t"
23915 + "pop %%ds\n\t"
23916 "jc 1f\n\t"
23917 "xor %%ah, %%ah\n"
23918 "1:"
23919 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23920 : "1" (PCIBIOS_READ_CONFIG_BYTE),
23921 "b" (bx),
23922 "D" ((long)reg),
23923 - "S" (&pci_indirect));
23924 + "S" (&pci_indirect),
23925 + "r" (__PCIBIOS_DS));
23926 /*
23927 * Zero-extend the result beyond 8 bits, do not trust the
23928 * BIOS having done it:
23929 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23930 *value &= 0xff;
23931 break;
23932 case 2:
23933 - __asm__("lcall *(%%esi); cld\n\t"
23934 + __asm__("movw %w6, %%ds\n\t"
23935 + "lcall *%%ss:(%%esi); cld\n\t"
23936 + "push %%ss\n\t"
23937 + "pop %%ds\n\t"
23938 "jc 1f\n\t"
23939 "xor %%ah, %%ah\n"
23940 "1:"
23941 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23942 : "1" (PCIBIOS_READ_CONFIG_WORD),
23943 "b" (bx),
23944 "D" ((long)reg),
23945 - "S" (&pci_indirect));
23946 + "S" (&pci_indirect),
23947 + "r" (__PCIBIOS_DS));
23948 /*
23949 * Zero-extend the result beyond 16 bits, do not trust the
23950 * BIOS having done it:
23951 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23952 *value &= 0xffff;
23953 break;
23954 case 4:
23955 - __asm__("lcall *(%%esi); cld\n\t"
23956 + __asm__("movw %w6, %%ds\n\t"
23957 + "lcall *%%ss:(%%esi); cld\n\t"
23958 + "push %%ss\n\t"
23959 + "pop %%ds\n\t"
23960 "jc 1f\n\t"
23961 "xor %%ah, %%ah\n"
23962 "1:"
23963 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23964 : "1" (PCIBIOS_READ_CONFIG_DWORD),
23965 "b" (bx),
23966 "D" ((long)reg),
23967 - "S" (&pci_indirect));
23968 + "S" (&pci_indirect),
23969 + "r" (__PCIBIOS_DS));
23970 break;
23971 }
23972
23973 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
23974
23975 switch (len) {
23976 case 1:
23977 - __asm__("lcall *(%%esi); cld\n\t"
23978 + __asm__("movw %w6, %%ds\n\t"
23979 + "lcall *%%ss:(%%esi); cld\n\t"
23980 + "push %%ss\n\t"
23981 + "pop %%ds\n\t"
23982 "jc 1f\n\t"
23983 "xor %%ah, %%ah\n"
23984 "1:"
23985 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
23986 "c" (value),
23987 "b" (bx),
23988 "D" ((long)reg),
23989 - "S" (&pci_indirect));
23990 + "S" (&pci_indirect),
23991 + "r" (__PCIBIOS_DS));
23992 break;
23993 case 2:
23994 - __asm__("lcall *(%%esi); cld\n\t"
23995 + __asm__("movw %w6, %%ds\n\t"
23996 + "lcall *%%ss:(%%esi); cld\n\t"
23997 + "push %%ss\n\t"
23998 + "pop %%ds\n\t"
23999 "jc 1f\n\t"
24000 "xor %%ah, %%ah\n"
24001 "1:"
24002 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24003 "c" (value),
24004 "b" (bx),
24005 "D" ((long)reg),
24006 - "S" (&pci_indirect));
24007 + "S" (&pci_indirect),
24008 + "r" (__PCIBIOS_DS));
24009 break;
24010 case 4:
24011 - __asm__("lcall *(%%esi); cld\n\t"
24012 + __asm__("movw %w6, %%ds\n\t"
24013 + "lcall *%%ss:(%%esi); cld\n\t"
24014 + "push %%ss\n\t"
24015 + "pop %%ds\n\t"
24016 "jc 1f\n\t"
24017 "xor %%ah, %%ah\n"
24018 "1:"
24019 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
24020 "c" (value),
24021 "b" (bx),
24022 "D" ((long)reg),
24023 - "S" (&pci_indirect));
24024 + "S" (&pci_indirect),
24025 + "r" (__PCIBIOS_DS));
24026 break;
24027 }
24028
24029 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24030
24031 DBG("PCI: Fetching IRQ routing table... ");
24032 __asm__("push %%es\n\t"
24033 + "movw %w8, %%ds\n\t"
24034 "push %%ds\n\t"
24035 "pop %%es\n\t"
24036 - "lcall *(%%esi); cld\n\t"
24037 + "lcall *%%ss:(%%esi); cld\n\t"
24038 "pop %%es\n\t"
24039 + "push %%ss\n\t"
24040 + "pop %%ds\n"
24041 "jc 1f\n\t"
24042 "xor %%ah, %%ah\n"
24043 "1:"
24044 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24045 "1" (0),
24046 "D" ((long) &opt),
24047 "S" (&pci_indirect),
24048 - "m" (opt)
24049 + "m" (opt),
24050 + "r" (__PCIBIOS_DS)
24051 : "memory");
24052 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
24053 if (ret & 0xff00)
24054 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24055 {
24056 int ret;
24057
24058 - __asm__("lcall *(%%esi); cld\n\t"
24059 + __asm__("movw %w5, %%ds\n\t"
24060 + "lcall *%%ss:(%%esi); cld\n\t"
24061 + "push %%ss\n\t"
24062 + "pop %%ds\n"
24063 "jc 1f\n\t"
24064 "xor %%ah, %%ah\n"
24065 "1:"
24066 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24067 : "0" (PCIBIOS_SET_PCI_HW_INT),
24068 "b" ((dev->bus->number << 8) | dev->devfn),
24069 "c" ((irq << 8) | (pin + 10)),
24070 - "S" (&pci_indirect));
24071 + "S" (&pci_indirect),
24072 + "r" (__PCIBIOS_DS));
24073 return !(ret & 0xff00);
24074 }
24075 EXPORT_SYMBOL(pcibios_set_irq_routing);
24076 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
24077 index 5cab48e..b025f9b 100644
24078 --- a/arch/x86/platform/efi/efi_32.c
24079 +++ b/arch/x86/platform/efi/efi_32.c
24080 @@ -38,70 +38,56 @@
24081 */
24082
24083 static unsigned long efi_rt_eflags;
24084 -static pgd_t efi_bak_pg_dir_pointer[2];
24085 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
24086
24087 -void efi_call_phys_prelog(void)
24088 +void __init efi_call_phys_prelog(void)
24089 {
24090 - unsigned long cr4;
24091 - unsigned long temp;
24092 struct desc_ptr gdt_descr;
24093
24094 +#ifdef CONFIG_PAX_KERNEXEC
24095 + struct desc_struct d;
24096 +#endif
24097 +
24098 local_irq_save(efi_rt_eflags);
24099
24100 - /*
24101 - * If I don't have PAE, I should just duplicate two entries in page
24102 - * directory. If I have PAE, I just need to duplicate one entry in
24103 - * page directory.
24104 - */
24105 - cr4 = read_cr4_safe();
24106 -
24107 - if (cr4 & X86_CR4_PAE) {
24108 - efi_bak_pg_dir_pointer[0].pgd =
24109 - swapper_pg_dir[pgd_index(0)].pgd;
24110 - swapper_pg_dir[0].pgd =
24111 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
24112 - } else {
24113 - efi_bak_pg_dir_pointer[0].pgd =
24114 - swapper_pg_dir[pgd_index(0)].pgd;
24115 - efi_bak_pg_dir_pointer[1].pgd =
24116 - swapper_pg_dir[pgd_index(0x400000)].pgd;
24117 - swapper_pg_dir[pgd_index(0)].pgd =
24118 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
24119 - temp = PAGE_OFFSET + 0x400000;
24120 - swapper_pg_dir[pgd_index(0x400000)].pgd =
24121 - swapper_pg_dir[pgd_index(temp)].pgd;
24122 - }
24123 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
24124 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24125 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
24126
24127 /*
24128 * After the lock is released, the original page table is restored.
24129 */
24130 __flush_tlb_all();
24131
24132 +#ifdef CONFIG_PAX_KERNEXEC
24133 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
24134 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24135 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
24136 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24137 +#endif
24138 +
24139 gdt_descr.address = __pa(get_cpu_gdt_table(0));
24140 gdt_descr.size = GDT_SIZE - 1;
24141 load_gdt(&gdt_descr);
24142 }
24143
24144 -void efi_call_phys_epilog(void)
24145 +void __init efi_call_phys_epilog(void)
24146 {
24147 - unsigned long cr4;
24148 struct desc_ptr gdt_descr;
24149
24150 +#ifdef CONFIG_PAX_KERNEXEC
24151 + struct desc_struct d;
24152 +
24153 + memset(&d, 0, sizeof d);
24154 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24155 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24156 +#endif
24157 +
24158 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
24159 gdt_descr.size = GDT_SIZE - 1;
24160 load_gdt(&gdt_descr);
24161
24162 - cr4 = read_cr4_safe();
24163 -
24164 - if (cr4 & X86_CR4_PAE) {
24165 - swapper_pg_dir[pgd_index(0)].pgd =
24166 - efi_bak_pg_dir_pointer[0].pgd;
24167 - } else {
24168 - swapper_pg_dir[pgd_index(0)].pgd =
24169 - efi_bak_pg_dir_pointer[0].pgd;
24170 - swapper_pg_dir[pgd_index(0x400000)].pgd =
24171 - efi_bak_pg_dir_pointer[1].pgd;
24172 - }
24173 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
24174
24175 /*
24176 * After the lock is released, the original page table is restored.
24177 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
24178 index fbe66e6..c5c0dd2 100644
24179 --- a/arch/x86/platform/efi/efi_stub_32.S
24180 +++ b/arch/x86/platform/efi/efi_stub_32.S
24181 @@ -6,7 +6,9 @@
24182 */
24183
24184 #include <linux/linkage.h>
24185 +#include <linux/init.h>
24186 #include <asm/page_types.h>
24187 +#include <asm/segment.h>
24188
24189 /*
24190 * efi_call_phys(void *, ...) is a function with variable parameters.
24191 @@ -20,7 +22,7 @@
24192 * service functions will comply with gcc calling convention, too.
24193 */
24194
24195 -.text
24196 +__INIT
24197 ENTRY(efi_call_phys)
24198 /*
24199 * 0. The function can only be called in Linux kernel. So CS has been
24200 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
24201 * The mapping of lower virtual memory has been created in prelog and
24202 * epilog.
24203 */
24204 - movl $1f, %edx
24205 - subl $__PAGE_OFFSET, %edx
24206 - jmp *%edx
24207 + movl $(__KERNEXEC_EFI_DS), %edx
24208 + mov %edx, %ds
24209 + mov %edx, %es
24210 + mov %edx, %ss
24211 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
24212 1:
24213
24214 /*
24215 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
24216 * parameter 2, ..., param n. To make things easy, we save the return
24217 * address of efi_call_phys in a global variable.
24218 */
24219 - popl %edx
24220 - movl %edx, saved_return_addr
24221 - /* get the function pointer into ECX*/
24222 - popl %ecx
24223 - movl %ecx, efi_rt_function_ptr
24224 - movl $2f, %edx
24225 - subl $__PAGE_OFFSET, %edx
24226 - pushl %edx
24227 + popl (saved_return_addr)
24228 + popl (efi_rt_function_ptr)
24229
24230 /*
24231 * 3. Clear PG bit in %CR0.
24232 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
24233 /*
24234 * 5. Call the physical function.
24235 */
24236 - jmp *%ecx
24237 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
24238
24239 -2:
24240 /*
24241 * 6. After EFI runtime service returns, control will return to
24242 * following instruction. We'd better readjust stack pointer first.
24243 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
24244 movl %cr0, %edx
24245 orl $0x80000000, %edx
24246 movl %edx, %cr0
24247 - jmp 1f
24248 -1:
24249 +
24250 /*
24251 * 8. Now restore the virtual mode from flat mode by
24252 * adding EIP with PAGE_OFFSET.
24253 */
24254 - movl $1f, %edx
24255 - jmp *%edx
24256 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
24257 1:
24258 + movl $(__KERNEL_DS), %edx
24259 + mov %edx, %ds
24260 + mov %edx, %es
24261 + mov %edx, %ss
24262
24263 /*
24264 * 9. Balance the stack. And because EAX contain the return value,
24265 * we'd better not clobber it.
24266 */
24267 - leal efi_rt_function_ptr, %edx
24268 - movl (%edx), %ecx
24269 - pushl %ecx
24270 + pushl (efi_rt_function_ptr)
24271
24272 /*
24273 - * 10. Push the saved return address onto the stack and return.
24274 + * 10. Return to the saved return address.
24275 */
24276 - leal saved_return_addr, %edx
24277 - movl (%edx), %ecx
24278 - pushl %ecx
24279 - ret
24280 + jmpl *(saved_return_addr)
24281 ENDPROC(efi_call_phys)
24282 .previous
24283
24284 -.data
24285 +__INITDATA
24286 saved_return_addr:
24287 .long 0
24288 efi_rt_function_ptr:
24289 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
24290 index 4c07cca..2c8427d 100644
24291 --- a/arch/x86/platform/efi/efi_stub_64.S
24292 +++ b/arch/x86/platform/efi/efi_stub_64.S
24293 @@ -7,6 +7,7 @@
24294 */
24295
24296 #include <linux/linkage.h>
24297 +#include <asm/alternative-asm.h>
24298
24299 #define SAVE_XMM \
24300 mov %rsp, %rax; \
24301 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
24302 call *%rdi
24303 addq $32, %rsp
24304 RESTORE_XMM
24305 + pax_force_retaddr 0, 1
24306 ret
24307 ENDPROC(efi_call0)
24308
24309 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
24310 call *%rdi
24311 addq $32, %rsp
24312 RESTORE_XMM
24313 + pax_force_retaddr 0, 1
24314 ret
24315 ENDPROC(efi_call1)
24316
24317 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
24318 call *%rdi
24319 addq $32, %rsp
24320 RESTORE_XMM
24321 + pax_force_retaddr 0, 1
24322 ret
24323 ENDPROC(efi_call2)
24324
24325 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
24326 call *%rdi
24327 addq $32, %rsp
24328 RESTORE_XMM
24329 + pax_force_retaddr 0, 1
24330 ret
24331 ENDPROC(efi_call3)
24332
24333 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
24334 call *%rdi
24335 addq $32, %rsp
24336 RESTORE_XMM
24337 + pax_force_retaddr 0, 1
24338 ret
24339 ENDPROC(efi_call4)
24340
24341 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
24342 call *%rdi
24343 addq $48, %rsp
24344 RESTORE_XMM
24345 + pax_force_retaddr 0, 1
24346 ret
24347 ENDPROC(efi_call5)
24348
24349 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
24350 call *%rdi
24351 addq $48, %rsp
24352 RESTORE_XMM
24353 + pax_force_retaddr 0, 1
24354 ret
24355 ENDPROC(efi_call6)
24356 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
24357 index fe73276..70fe25a 100644
24358 --- a/arch/x86/platform/mrst/mrst.c
24359 +++ b/arch/x86/platform/mrst/mrst.c
24360 @@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
24361 }
24362
24363 /* Reboot and power off are handled by the SCU on a MID device */
24364 -static void mrst_power_off(void)
24365 +static __noreturn void mrst_power_off(void)
24366 {
24367 intel_scu_ipc_simple_command(0xf1, 1);
24368 + BUG();
24369 }
24370
24371 -static void mrst_reboot(void)
24372 +static __noreturn void mrst_reboot(void)
24373 {
24374 intel_scu_ipc_simple_command(0xf1, 0);
24375 + BUG();
24376 }
24377
24378 /*
24379 diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
24380 index 5b55219..b326540 100644
24381 --- a/arch/x86/platform/uv/tlb_uv.c
24382 +++ b/arch/x86/platform/uv/tlb_uv.c
24383 @@ -377,6 +377,8 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
24384 struct bau_control *smaster = bcp->socket_master;
24385 struct reset_args reset_args;
24386
24387 + pax_track_stack();
24388 +
24389 reset_args.sender = sender;
24390 cpus_clear(*mask);
24391 /* find a single cpu for each uvhub in this distribution mask */
24392 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
24393 index 87bb35e..eff2da8 100644
24394 --- a/arch/x86/power/cpu.c
24395 +++ b/arch/x86/power/cpu.c
24396 @@ -130,7 +130,7 @@ static void do_fpu_end(void)
24397 static void fix_processor_context(void)
24398 {
24399 int cpu = smp_processor_id();
24400 - struct tss_struct *t = &per_cpu(init_tss, cpu);
24401 + struct tss_struct *t = init_tss + cpu;
24402
24403 set_tss_desc(cpu, t); /*
24404 * This just modifies memory; should not be
24405 @@ -140,7 +140,9 @@ static void fix_processor_context(void)
24406 */
24407
24408 #ifdef CONFIG_X86_64
24409 + pax_open_kernel();
24410 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
24411 + pax_close_kernel();
24412
24413 syscall_init(); /* This sets MSR_*STAR and related */
24414 #endif
24415 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
24416 index 5d17950..2253fc9 100644
24417 --- a/arch/x86/vdso/Makefile
24418 +++ b/arch/x86/vdso/Makefile
24419 @@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
24420 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
24421 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
24422
24423 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24424 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24425 GCOV_PROFILE := n
24426
24427 #
24428 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
24429 index 468d591..8e80a0a 100644
24430 --- a/arch/x86/vdso/vdso32-setup.c
24431 +++ b/arch/x86/vdso/vdso32-setup.c
24432 @@ -25,6 +25,7 @@
24433 #include <asm/tlbflush.h>
24434 #include <asm/vdso.h>
24435 #include <asm/proto.h>
24436 +#include <asm/mman.h>
24437
24438 enum {
24439 VDSO_DISABLED = 0,
24440 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
24441 void enable_sep_cpu(void)
24442 {
24443 int cpu = get_cpu();
24444 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
24445 + struct tss_struct *tss = init_tss + cpu;
24446
24447 if (!boot_cpu_has(X86_FEATURE_SEP)) {
24448 put_cpu();
24449 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
24450 gate_vma.vm_start = FIXADDR_USER_START;
24451 gate_vma.vm_end = FIXADDR_USER_END;
24452 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
24453 - gate_vma.vm_page_prot = __P101;
24454 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
24455 /*
24456 * Make sure the vDSO gets into every core dump.
24457 * Dumping its contents makes post-mortem fully interpretable later
24458 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24459 if (compat)
24460 addr = VDSO_HIGH_BASE;
24461 else {
24462 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
24463 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
24464 if (IS_ERR_VALUE(addr)) {
24465 ret = addr;
24466 goto up_fail;
24467 }
24468 }
24469
24470 - current->mm->context.vdso = (void *)addr;
24471 + current->mm->context.vdso = addr;
24472
24473 if (compat_uses_vma || !compat) {
24474 /*
24475 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24476 }
24477
24478 current_thread_info()->sysenter_return =
24479 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24480 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24481
24482 up_fail:
24483 if (ret)
24484 - current->mm->context.vdso = NULL;
24485 + current->mm->context.vdso = 0;
24486
24487 up_write(&mm->mmap_sem);
24488
24489 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
24490
24491 const char *arch_vma_name(struct vm_area_struct *vma)
24492 {
24493 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24494 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24495 return "[vdso]";
24496 +
24497 +#ifdef CONFIG_PAX_SEGMEXEC
24498 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
24499 + return "[vdso]";
24500 +#endif
24501 +
24502 return NULL;
24503 }
24504
24505 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24506 * Check to see if the corresponding task was created in compat vdso
24507 * mode.
24508 */
24509 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
24510 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
24511 return &gate_vma;
24512 return NULL;
24513 }
24514 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
24515 index 316fbca..4638633 100644
24516 --- a/arch/x86/vdso/vma.c
24517 +++ b/arch/x86/vdso/vma.c
24518 @@ -16,8 +16,6 @@
24519 #include <asm/vdso.h>
24520 #include <asm/page.h>
24521
24522 -unsigned int __read_mostly vdso_enabled = 1;
24523 -
24524 extern char vdso_start[], vdso_end[];
24525 extern unsigned short vdso_sync_cpuid;
24526
24527 @@ -97,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24528 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24529 {
24530 struct mm_struct *mm = current->mm;
24531 - unsigned long addr;
24532 + unsigned long addr = 0;
24533 int ret;
24534
24535 - if (!vdso_enabled)
24536 - return 0;
24537 -
24538 down_write(&mm->mmap_sem);
24539 +
24540 +#ifdef CONFIG_PAX_RANDMMAP
24541 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24542 +#endif
24543 +
24544 addr = vdso_addr(mm->start_stack, vdso_size);
24545 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
24546 if (IS_ERR_VALUE(addr)) {
24547 @@ -111,26 +111,18 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24548 goto up_fail;
24549 }
24550
24551 - current->mm->context.vdso = (void *)addr;
24552 + mm->context.vdso = addr;
24553
24554 ret = install_special_mapping(mm, addr, vdso_size,
24555 VM_READ|VM_EXEC|
24556 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
24557 VM_ALWAYSDUMP,
24558 vdso_pages);
24559 - if (ret) {
24560 - current->mm->context.vdso = NULL;
24561 - goto up_fail;
24562 - }
24563 +
24564 + if (ret)
24565 + mm->context.vdso = 0;
24566
24567 up_fail:
24568 up_write(&mm->mmap_sem);
24569 return ret;
24570 }
24571 -
24572 -static __init int vdso_setup(char *s)
24573 -{
24574 - vdso_enabled = simple_strtoul(s, NULL, 0);
24575 - return 0;
24576 -}
24577 -__setup("vdso=", vdso_setup);
24578 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
24579 index 46c8069..6330d3c 100644
24580 --- a/arch/x86/xen/enlighten.c
24581 +++ b/arch/x86/xen/enlighten.c
24582 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
24583
24584 struct shared_info xen_dummy_shared_info;
24585
24586 -void *xen_initial_gdt;
24587 -
24588 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
24589 __read_mostly int xen_have_vector_callback;
24590 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
24591 @@ -1028,7 +1026,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
24592 #endif
24593 };
24594
24595 -static void xen_reboot(int reason)
24596 +static __noreturn void xen_reboot(int reason)
24597 {
24598 struct sched_shutdown r = { .reason = reason };
24599
24600 @@ -1036,17 +1034,17 @@ static void xen_reboot(int reason)
24601 BUG();
24602 }
24603
24604 -static void xen_restart(char *msg)
24605 +static __noreturn void xen_restart(char *msg)
24606 {
24607 xen_reboot(SHUTDOWN_reboot);
24608 }
24609
24610 -static void xen_emergency_restart(void)
24611 +static __noreturn void xen_emergency_restart(void)
24612 {
24613 xen_reboot(SHUTDOWN_reboot);
24614 }
24615
24616 -static void xen_machine_halt(void)
24617 +static __noreturn void xen_machine_halt(void)
24618 {
24619 xen_reboot(SHUTDOWN_poweroff);
24620 }
24621 @@ -1152,7 +1150,17 @@ asmlinkage void __init xen_start_kernel(void)
24622 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
24623
24624 /* Work out if we support NX */
24625 - x86_configure_nx();
24626 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
24627 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
24628 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
24629 + unsigned l, h;
24630 +
24631 + __supported_pte_mask |= _PAGE_NX;
24632 + rdmsr(MSR_EFER, l, h);
24633 + l |= EFER_NX;
24634 + wrmsr(MSR_EFER, l, h);
24635 + }
24636 +#endif
24637
24638 xen_setup_features();
24639
24640 @@ -1183,13 +1191,6 @@ asmlinkage void __init xen_start_kernel(void)
24641
24642 machine_ops = xen_machine_ops;
24643
24644 - /*
24645 - * The only reliable way to retain the initial address of the
24646 - * percpu gdt_page is to remember it here, so we can go and
24647 - * mark it RW later, when the initial percpu area is freed.
24648 - */
24649 - xen_initial_gdt = &per_cpu(gdt_page, 0);
24650 -
24651 xen_smp_init();
24652
24653 #ifdef CONFIG_ACPI_NUMA
24654 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
24655 index 3dd53f9..5aa5df3 100644
24656 --- a/arch/x86/xen/mmu.c
24657 +++ b/arch/x86/xen/mmu.c
24658 @@ -1768,6 +1768,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24659 convert_pfn_mfn(init_level4_pgt);
24660 convert_pfn_mfn(level3_ident_pgt);
24661 convert_pfn_mfn(level3_kernel_pgt);
24662 + convert_pfn_mfn(level3_vmalloc_start_pgt);
24663 + convert_pfn_mfn(level3_vmalloc_end_pgt);
24664 + convert_pfn_mfn(level3_vmemmap_pgt);
24665
24666 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
24667 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
24668 @@ -1786,7 +1789,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24669 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
24670 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
24671 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
24672 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
24673 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
24674 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
24675 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
24676 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
24677 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
24678 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
24679
24680 @@ -2000,6 +2007,7 @@ static void __init xen_post_allocator_init(void)
24681 pv_mmu_ops.set_pud = xen_set_pud;
24682 #if PAGETABLE_LEVELS == 4
24683 pv_mmu_ops.set_pgd = xen_set_pgd;
24684 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
24685 #endif
24686
24687 /* This will work as long as patching hasn't happened yet
24688 @@ -2081,6 +2089,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
24689 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
24690 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
24691 .set_pgd = xen_set_pgd_hyper,
24692 + .set_pgd_batched = xen_set_pgd_hyper,
24693
24694 .alloc_pud = xen_alloc_pmd_init,
24695 .release_pud = xen_release_pmd_init,
24696 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
24697 index 041d4fe..7666b7e 100644
24698 --- a/arch/x86/xen/smp.c
24699 +++ b/arch/x86/xen/smp.c
24700 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
24701 {
24702 BUG_ON(smp_processor_id() != 0);
24703 native_smp_prepare_boot_cpu();
24704 -
24705 - /* We've switched to the "real" per-cpu gdt, so make sure the
24706 - old memory can be recycled */
24707 - make_lowmem_page_readwrite(xen_initial_gdt);
24708 -
24709 xen_filter_cpu_maps();
24710 xen_setup_vcpu_info_placement();
24711 }
24712 @@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
24713 gdt = get_cpu_gdt_table(cpu);
24714
24715 ctxt->flags = VGCF_IN_KERNEL;
24716 - ctxt->user_regs.ds = __USER_DS;
24717 - ctxt->user_regs.es = __USER_DS;
24718 + ctxt->user_regs.ds = __KERNEL_DS;
24719 + ctxt->user_regs.es = __KERNEL_DS;
24720 ctxt->user_regs.ss = __KERNEL_DS;
24721 #ifdef CONFIG_X86_32
24722 ctxt->user_regs.fs = __KERNEL_PERCPU;
24723 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
24724 + savesegment(gs, ctxt->user_regs.gs);
24725 #else
24726 ctxt->gs_base_kernel = per_cpu_offset(cpu);
24727 #endif
24728 @@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
24729 int rc;
24730
24731 per_cpu(current_task, cpu) = idle;
24732 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
24733 #ifdef CONFIG_X86_32
24734 irq_ctx_init(cpu);
24735 #else
24736 clear_tsk_thread_flag(idle, TIF_FORK);
24737 - per_cpu(kernel_stack, cpu) =
24738 - (unsigned long)task_stack_page(idle) -
24739 - KERNEL_STACK_OFFSET + THREAD_SIZE;
24740 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
24741 #endif
24742 xen_setup_runstate_info(cpu);
24743 xen_setup_timer(cpu);
24744 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
24745 index b040b0e..8cc4fe0 100644
24746 --- a/arch/x86/xen/xen-asm_32.S
24747 +++ b/arch/x86/xen/xen-asm_32.S
24748 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
24749 ESP_OFFSET=4 # bytes pushed onto stack
24750
24751 /*
24752 - * Store vcpu_info pointer for easy access. Do it this way to
24753 - * avoid having to reload %fs
24754 + * Store vcpu_info pointer for easy access.
24755 */
24756 #ifdef CONFIG_SMP
24757 - GET_THREAD_INFO(%eax)
24758 - movl TI_cpu(%eax), %eax
24759 - movl __per_cpu_offset(,%eax,4), %eax
24760 - mov xen_vcpu(%eax), %eax
24761 + push %fs
24762 + mov $(__KERNEL_PERCPU), %eax
24763 + mov %eax, %fs
24764 + mov PER_CPU_VAR(xen_vcpu), %eax
24765 + pop %fs
24766 #else
24767 movl xen_vcpu, %eax
24768 #endif
24769 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
24770 index aaa7291..3f77960 100644
24771 --- a/arch/x86/xen/xen-head.S
24772 +++ b/arch/x86/xen/xen-head.S
24773 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
24774 #ifdef CONFIG_X86_32
24775 mov %esi,xen_start_info
24776 mov $init_thread_union+THREAD_SIZE,%esp
24777 +#ifdef CONFIG_SMP
24778 + movl $cpu_gdt_table,%edi
24779 + movl $__per_cpu_load,%eax
24780 + movw %ax,__KERNEL_PERCPU + 2(%edi)
24781 + rorl $16,%eax
24782 + movb %al,__KERNEL_PERCPU + 4(%edi)
24783 + movb %ah,__KERNEL_PERCPU + 7(%edi)
24784 + movl $__per_cpu_end - 1,%eax
24785 + subl $__per_cpu_start,%eax
24786 + movw %ax,__KERNEL_PERCPU + 0(%edi)
24787 +#endif
24788 #else
24789 mov %rsi,xen_start_info
24790 mov $init_thread_union+THREAD_SIZE,%rsp
24791 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
24792 index b095739..8c17bcd 100644
24793 --- a/arch/x86/xen/xen-ops.h
24794 +++ b/arch/x86/xen/xen-ops.h
24795 @@ -10,8 +10,6 @@
24796 extern const char xen_hypervisor_callback[];
24797 extern const char xen_failsafe_callback[];
24798
24799 -extern void *xen_initial_gdt;
24800 -
24801 struct trap_info;
24802 void xen_copy_trap_info(struct trap_info *traps);
24803
24804 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
24805 index 58916af..9cb880b 100644
24806 --- a/block/blk-iopoll.c
24807 +++ b/block/blk-iopoll.c
24808 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
24809 }
24810 EXPORT_SYMBOL(blk_iopoll_complete);
24811
24812 -static void blk_iopoll_softirq(struct softirq_action *h)
24813 +static void blk_iopoll_softirq(void)
24814 {
24815 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
24816 int rearm = 0, budget = blk_iopoll_budget;
24817 diff --git a/block/blk-map.c b/block/blk-map.c
24818 index 164cd00..6d96fc1 100644
24819 --- a/block/blk-map.c
24820 +++ b/block/blk-map.c
24821 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
24822 if (!len || !kbuf)
24823 return -EINVAL;
24824
24825 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
24826 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
24827 if (do_copy)
24828 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
24829 else
24830 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
24831 index 1366a89..e17f54b 100644
24832 --- a/block/blk-softirq.c
24833 +++ b/block/blk-softirq.c
24834 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
24835 * Softirq action handler - move entries to local list and loop over them
24836 * while passing them to the queue registered handler.
24837 */
24838 -static void blk_done_softirq(struct softirq_action *h)
24839 +static void blk_done_softirq(void)
24840 {
24841 struct list_head *cpu_list, local_list;
24842
24843 diff --git a/block/bsg.c b/block/bsg.c
24844 index 702f131..37808bf 100644
24845 --- a/block/bsg.c
24846 +++ b/block/bsg.c
24847 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
24848 struct sg_io_v4 *hdr, struct bsg_device *bd,
24849 fmode_t has_write_perm)
24850 {
24851 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24852 + unsigned char *cmdptr;
24853 +
24854 if (hdr->request_len > BLK_MAX_CDB) {
24855 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
24856 if (!rq->cmd)
24857 return -ENOMEM;
24858 - }
24859 + cmdptr = rq->cmd;
24860 + } else
24861 + cmdptr = tmpcmd;
24862
24863 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
24864 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
24865 hdr->request_len))
24866 return -EFAULT;
24867
24868 + if (cmdptr != rq->cmd)
24869 + memcpy(rq->cmd, cmdptr, hdr->request_len);
24870 +
24871 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
24872 if (blk_verify_command(rq->cmd, has_write_perm))
24873 return -EPERM;
24874 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
24875 index 7b72502..646105c 100644
24876 --- a/block/compat_ioctl.c
24877 +++ b/block/compat_ioctl.c
24878 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
24879 err |= __get_user(f->spec1, &uf->spec1);
24880 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
24881 err |= __get_user(name, &uf->name);
24882 - f->name = compat_ptr(name);
24883 + f->name = (void __force_kernel *)compat_ptr(name);
24884 if (err) {
24885 err = -EFAULT;
24886 goto out;
24887 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
24888 index 4f4230b..0feae9a 100644
24889 --- a/block/scsi_ioctl.c
24890 +++ b/block/scsi_ioctl.c
24891 @@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
24892 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
24893 struct sg_io_hdr *hdr, fmode_t mode)
24894 {
24895 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
24896 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24897 + unsigned char *cmdptr;
24898 +
24899 + if (rq->cmd != rq->__cmd)
24900 + cmdptr = rq->cmd;
24901 + else
24902 + cmdptr = tmpcmd;
24903 +
24904 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
24905 return -EFAULT;
24906 +
24907 + if (cmdptr != rq->cmd)
24908 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
24909 +
24910 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
24911 return -EPERM;
24912
24913 @@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24914 int err;
24915 unsigned int in_len, out_len, bytes, opcode, cmdlen;
24916 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
24917 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24918 + unsigned char *cmdptr;
24919
24920 if (!sic)
24921 return -EINVAL;
24922 @@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24923 */
24924 err = -EFAULT;
24925 rq->cmd_len = cmdlen;
24926 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
24927 +
24928 + if (rq->cmd != rq->__cmd)
24929 + cmdptr = rq->cmd;
24930 + else
24931 + cmdptr = tmpcmd;
24932 +
24933 + if (copy_from_user(cmdptr, sic->data, cmdlen))
24934 goto error;
24935
24936 + if (rq->cmd != cmdptr)
24937 + memcpy(rq->cmd, cmdptr, cmdlen);
24938 +
24939 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
24940 goto error;
24941
24942 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
24943 index 671d4d6..5f24030 100644
24944 --- a/crypto/cryptd.c
24945 +++ b/crypto/cryptd.c
24946 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
24947
24948 struct cryptd_blkcipher_request_ctx {
24949 crypto_completion_t complete;
24950 -};
24951 +} __no_const;
24952
24953 struct cryptd_hash_ctx {
24954 struct crypto_shash *child;
24955 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
24956
24957 struct cryptd_aead_request_ctx {
24958 crypto_completion_t complete;
24959 -};
24960 +} __no_const;
24961
24962 static void cryptd_queue_worker(struct work_struct *work);
24963
24964 diff --git a/crypto/serpent.c b/crypto/serpent.c
24965 index b651a55..a9ddd79b 100644
24966 --- a/crypto/serpent.c
24967 +++ b/crypto/serpent.c
24968 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
24969 u32 r0,r1,r2,r3,r4;
24970 int i;
24971
24972 + pax_track_stack();
24973 +
24974 /* Copy key, add padding */
24975
24976 for (i = 0; i < keylen; ++i)
24977 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
24978 index 5d41894..22021e4 100644
24979 --- a/drivers/acpi/apei/cper.c
24980 +++ b/drivers/acpi/apei/cper.c
24981 @@ -38,12 +38,12 @@
24982 */
24983 u64 cper_next_record_id(void)
24984 {
24985 - static atomic64_t seq;
24986 + static atomic64_unchecked_t seq;
24987
24988 - if (!atomic64_read(&seq))
24989 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
24990 + if (!atomic64_read_unchecked(&seq))
24991 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
24992
24993 - return atomic64_inc_return(&seq);
24994 + return atomic64_inc_return_unchecked(&seq);
24995 }
24996 EXPORT_SYMBOL_GPL(cper_next_record_id);
24997
24998 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
24999 index 22f918b..9fafb84 100644
25000 --- a/drivers/acpi/ec_sys.c
25001 +++ b/drivers/acpi/ec_sys.c
25002 @@ -11,6 +11,7 @@
25003 #include <linux/kernel.h>
25004 #include <linux/acpi.h>
25005 #include <linux/debugfs.h>
25006 +#include <asm/uaccess.h>
25007 #include "internal.h"
25008
25009 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
25010 @@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25011 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
25012 */
25013 unsigned int size = EC_SPACE_SIZE;
25014 - u8 *data = (u8 *) buf;
25015 + u8 data;
25016 loff_t init_off = *off;
25017 int err = 0;
25018
25019 @@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
25020 size = count;
25021
25022 while (size) {
25023 - err = ec_read(*off, &data[*off - init_off]);
25024 + err = ec_read(*off, &data);
25025 if (err)
25026 return err;
25027 + if (put_user(data, &buf[*off - init_off]))
25028 + return -EFAULT;
25029 *off += 1;
25030 size--;
25031 }
25032 @@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25033
25034 unsigned int size = count;
25035 loff_t init_off = *off;
25036 - u8 *data = (u8 *) buf;
25037 int err = 0;
25038
25039 if (*off >= EC_SPACE_SIZE)
25040 @@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25041 }
25042
25043 while (size) {
25044 - u8 byte_write = data[*off - init_off];
25045 + u8 byte_write;
25046 + if (get_user(byte_write, &buf[*off - init_off]))
25047 + return -EFAULT;
25048 err = ec_write(*off, byte_write);
25049 if (err)
25050 return err;
25051 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
25052 index f5f9869..da87aeb 100644
25053 --- a/drivers/acpi/proc.c
25054 +++ b/drivers/acpi/proc.c
25055 @@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct file *file,
25056 size_t count, loff_t * ppos)
25057 {
25058 struct list_head *node, *next;
25059 - char strbuf[5];
25060 - char str[5] = "";
25061 - unsigned int len = count;
25062 + char strbuf[5] = {0};
25063
25064 - if (len > 4)
25065 - len = 4;
25066 - if (len < 0)
25067 + if (count > 4)
25068 + count = 4;
25069 + if (copy_from_user(strbuf, buffer, count))
25070 return -EFAULT;
25071 -
25072 - if (copy_from_user(strbuf, buffer, len))
25073 - return -EFAULT;
25074 - strbuf[len] = '\0';
25075 - sscanf(strbuf, "%s", str);
25076 + strbuf[count] = '\0';
25077
25078 mutex_lock(&acpi_device_lock);
25079 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
25080 @@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct file *file,
25081 if (!dev->wakeup.flags.valid)
25082 continue;
25083
25084 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
25085 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
25086 if (device_can_wakeup(&dev->dev)) {
25087 bool enable = !device_may_wakeup(&dev->dev);
25088 device_set_wakeup_enable(&dev->dev, enable);
25089 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
25090 index a4e0f1b..9793b28 100644
25091 --- a/drivers/acpi/processor_driver.c
25092 +++ b/drivers/acpi/processor_driver.c
25093 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
25094 return 0;
25095 #endif
25096
25097 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
25098 + BUG_ON(pr->id >= nr_cpu_ids);
25099
25100 /*
25101 * Buggy BIOS check
25102 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
25103 index 4a3a5ae..cbee192 100644
25104 --- a/drivers/ata/libata-core.c
25105 +++ b/drivers/ata/libata-core.c
25106 @@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
25107 struct ata_port *ap;
25108 unsigned int tag;
25109
25110 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25111 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25112 ap = qc->ap;
25113
25114 qc->flags = 0;
25115 @@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
25116 struct ata_port *ap;
25117 struct ata_link *link;
25118
25119 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25120 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25121 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
25122 ap = qc->ap;
25123 link = qc->dev->link;
25124 @@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25125 return;
25126
25127 spin_lock(&lock);
25128 + pax_open_kernel();
25129
25130 for (cur = ops->inherits; cur; cur = cur->inherits) {
25131 void **inherit = (void **)cur;
25132 @@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25133 if (IS_ERR(*pp))
25134 *pp = NULL;
25135
25136 - ops->inherits = NULL;
25137 + *(struct ata_port_operations **)&ops->inherits = NULL;
25138
25139 + pax_close_kernel();
25140 spin_unlock(&lock);
25141 }
25142
25143 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
25144 index ed16fbe..fc92cb8 100644
25145 --- a/drivers/ata/libata-eh.c
25146 +++ b/drivers/ata/libata-eh.c
25147 @@ -2515,6 +2515,8 @@ void ata_eh_report(struct ata_port *ap)
25148 {
25149 struct ata_link *link;
25150
25151 + pax_track_stack();
25152 +
25153 ata_for_each_link(link, ap, HOST_FIRST)
25154 ata_eh_link_report(link);
25155 }
25156 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
25157 index 719bb73..79ce858 100644
25158 --- a/drivers/ata/pata_arasan_cf.c
25159 +++ b/drivers/ata/pata_arasan_cf.c
25160 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
25161 /* Handle platform specific quirks */
25162 if (pdata->quirk) {
25163 if (pdata->quirk & CF_BROKEN_PIO) {
25164 - ap->ops->set_piomode = NULL;
25165 + pax_open_kernel();
25166 + *(void **)&ap->ops->set_piomode = NULL;
25167 + pax_close_kernel();
25168 ap->pio_mask = 0;
25169 }
25170 if (pdata->quirk & CF_BROKEN_MWDMA)
25171 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
25172 index f9b983a..887b9d8 100644
25173 --- a/drivers/atm/adummy.c
25174 +++ b/drivers/atm/adummy.c
25175 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
25176 vcc->pop(vcc, skb);
25177 else
25178 dev_kfree_skb_any(skb);
25179 - atomic_inc(&vcc->stats->tx);
25180 + atomic_inc_unchecked(&vcc->stats->tx);
25181
25182 return 0;
25183 }
25184 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
25185 index f8f41e0..1f987dd 100644
25186 --- a/drivers/atm/ambassador.c
25187 +++ b/drivers/atm/ambassador.c
25188 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
25189 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25190
25191 // VC layer stats
25192 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25193 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25194
25195 // free the descriptor
25196 kfree (tx_descr);
25197 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25198 dump_skb ("<<<", vc, skb);
25199
25200 // VC layer stats
25201 - atomic_inc(&atm_vcc->stats->rx);
25202 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25203 __net_timestamp(skb);
25204 // end of our responsibility
25205 atm_vcc->push (atm_vcc, skb);
25206 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25207 } else {
25208 PRINTK (KERN_INFO, "dropped over-size frame");
25209 // should we count this?
25210 - atomic_inc(&atm_vcc->stats->rx_drop);
25211 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25212 }
25213
25214 } else {
25215 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
25216 }
25217
25218 if (check_area (skb->data, skb->len)) {
25219 - atomic_inc(&atm_vcc->stats->tx_err);
25220 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25221 return -ENOMEM; // ?
25222 }
25223
25224 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
25225 index b22d71c..d6e1049 100644
25226 --- a/drivers/atm/atmtcp.c
25227 +++ b/drivers/atm/atmtcp.c
25228 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25229 if (vcc->pop) vcc->pop(vcc,skb);
25230 else dev_kfree_skb(skb);
25231 if (dev_data) return 0;
25232 - atomic_inc(&vcc->stats->tx_err);
25233 + atomic_inc_unchecked(&vcc->stats->tx_err);
25234 return -ENOLINK;
25235 }
25236 size = skb->len+sizeof(struct atmtcp_hdr);
25237 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25238 if (!new_skb) {
25239 if (vcc->pop) vcc->pop(vcc,skb);
25240 else dev_kfree_skb(skb);
25241 - atomic_inc(&vcc->stats->tx_err);
25242 + atomic_inc_unchecked(&vcc->stats->tx_err);
25243 return -ENOBUFS;
25244 }
25245 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25246 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25247 if (vcc->pop) vcc->pop(vcc,skb);
25248 else dev_kfree_skb(skb);
25249 out_vcc->push(out_vcc,new_skb);
25250 - atomic_inc(&vcc->stats->tx);
25251 - atomic_inc(&out_vcc->stats->rx);
25252 + atomic_inc_unchecked(&vcc->stats->tx);
25253 + atomic_inc_unchecked(&out_vcc->stats->rx);
25254 return 0;
25255 }
25256
25257 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25258 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25259 read_unlock(&vcc_sklist_lock);
25260 if (!out_vcc) {
25261 - atomic_inc(&vcc->stats->tx_err);
25262 + atomic_inc_unchecked(&vcc->stats->tx_err);
25263 goto done;
25264 }
25265 skb_pull(skb,sizeof(struct atmtcp_hdr));
25266 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25267 __net_timestamp(new_skb);
25268 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25269 out_vcc->push(out_vcc,new_skb);
25270 - atomic_inc(&vcc->stats->tx);
25271 - atomic_inc(&out_vcc->stats->rx);
25272 + atomic_inc_unchecked(&vcc->stats->tx);
25273 + atomic_inc_unchecked(&out_vcc->stats->rx);
25274 done:
25275 if (vcc->pop) vcc->pop(vcc,skb);
25276 else dev_kfree_skb(skb);
25277 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
25278 index 9307141..d8521bf 100644
25279 --- a/drivers/atm/eni.c
25280 +++ b/drivers/atm/eni.c
25281 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25282 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25283 vcc->dev->number);
25284 length = 0;
25285 - atomic_inc(&vcc->stats->rx_err);
25286 + atomic_inc_unchecked(&vcc->stats->rx_err);
25287 }
25288 else {
25289 length = ATM_CELL_SIZE-1; /* no HEC */
25290 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25291 size);
25292 }
25293 eff = length = 0;
25294 - atomic_inc(&vcc->stats->rx_err);
25295 + atomic_inc_unchecked(&vcc->stats->rx_err);
25296 }
25297 else {
25298 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25299 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25300 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25301 vcc->dev->number,vcc->vci,length,size << 2,descr);
25302 length = eff = 0;
25303 - atomic_inc(&vcc->stats->rx_err);
25304 + atomic_inc_unchecked(&vcc->stats->rx_err);
25305 }
25306 }
25307 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25308 @@ -771,7 +771,7 @@ rx_dequeued++;
25309 vcc->push(vcc,skb);
25310 pushed++;
25311 }
25312 - atomic_inc(&vcc->stats->rx);
25313 + atomic_inc_unchecked(&vcc->stats->rx);
25314 }
25315 wake_up(&eni_dev->rx_wait);
25316 }
25317 @@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *dev)
25318 PCI_DMA_TODEVICE);
25319 if (vcc->pop) vcc->pop(vcc,skb);
25320 else dev_kfree_skb_irq(skb);
25321 - atomic_inc(&vcc->stats->tx);
25322 + atomic_inc_unchecked(&vcc->stats->tx);
25323 wake_up(&eni_dev->tx_wait);
25324 dma_complete++;
25325 }
25326 @@ -1568,7 +1568,7 @@ tx_complete++;
25327 /*--------------------------------- entries ---------------------------------*/
25328
25329
25330 -static const char *media_name[] __devinitdata = {
25331 +static const char *media_name[] __devinitconst = {
25332 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
25333 "UTP", "05?", "06?", "07?", /* 4- 7 */
25334 "TAXI","09?", "10?", "11?", /* 8-11 */
25335 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
25336 index 5072f8a..fa52520 100644
25337 --- a/drivers/atm/firestream.c
25338 +++ b/drivers/atm/firestream.c
25339 @@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
25340 }
25341 }
25342
25343 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25344 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25345
25346 fs_dprintk (FS_DEBUG_TXMEM, "i");
25347 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25348 @@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25349 #endif
25350 skb_put (skb, qe->p1 & 0xffff);
25351 ATM_SKB(skb)->vcc = atm_vcc;
25352 - atomic_inc(&atm_vcc->stats->rx);
25353 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25354 __net_timestamp(skb);
25355 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25356 atm_vcc->push (atm_vcc, skb);
25357 @@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25358 kfree (pe);
25359 }
25360 if (atm_vcc)
25361 - atomic_inc(&atm_vcc->stats->rx_drop);
25362 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25363 break;
25364 case 0x1f: /* Reassembly abort: no buffers. */
25365 /* Silently increment error counter. */
25366 if (atm_vcc)
25367 - atomic_inc(&atm_vcc->stats->rx_drop);
25368 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25369 break;
25370 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25371 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25372 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
25373 index 361f5ae..7fc552d 100644
25374 --- a/drivers/atm/fore200e.c
25375 +++ b/drivers/atm/fore200e.c
25376 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
25377 #endif
25378 /* check error condition */
25379 if (*entry->status & STATUS_ERROR)
25380 - atomic_inc(&vcc->stats->tx_err);
25381 + atomic_inc_unchecked(&vcc->stats->tx_err);
25382 else
25383 - atomic_inc(&vcc->stats->tx);
25384 + atomic_inc_unchecked(&vcc->stats->tx);
25385 }
25386 }
25387
25388 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25389 if (skb == NULL) {
25390 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25391
25392 - atomic_inc(&vcc->stats->rx_drop);
25393 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25394 return -ENOMEM;
25395 }
25396
25397 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25398
25399 dev_kfree_skb_any(skb);
25400
25401 - atomic_inc(&vcc->stats->rx_drop);
25402 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25403 return -ENOMEM;
25404 }
25405
25406 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25407
25408 vcc->push(vcc, skb);
25409 - atomic_inc(&vcc->stats->rx);
25410 + atomic_inc_unchecked(&vcc->stats->rx);
25411
25412 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25413
25414 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
25415 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25416 fore200e->atm_dev->number,
25417 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25418 - atomic_inc(&vcc->stats->rx_err);
25419 + atomic_inc_unchecked(&vcc->stats->rx_err);
25420 }
25421 }
25422
25423 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
25424 goto retry_here;
25425 }
25426
25427 - atomic_inc(&vcc->stats->tx_err);
25428 + atomic_inc_unchecked(&vcc->stats->tx_err);
25429
25430 fore200e->tx_sat++;
25431 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25432 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
25433 index 9a51df4..f3bb5f8 100644
25434 --- a/drivers/atm/he.c
25435 +++ b/drivers/atm/he.c
25436 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25437
25438 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25439 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25440 - atomic_inc(&vcc->stats->rx_drop);
25441 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25442 goto return_host_buffers;
25443 }
25444
25445 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25446 RBRQ_LEN_ERR(he_dev->rbrq_head)
25447 ? "LEN_ERR" : "",
25448 vcc->vpi, vcc->vci);
25449 - atomic_inc(&vcc->stats->rx_err);
25450 + atomic_inc_unchecked(&vcc->stats->rx_err);
25451 goto return_host_buffers;
25452 }
25453
25454 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25455 vcc->push(vcc, skb);
25456 spin_lock(&he_dev->global_lock);
25457
25458 - atomic_inc(&vcc->stats->rx);
25459 + atomic_inc_unchecked(&vcc->stats->rx);
25460
25461 return_host_buffers:
25462 ++pdus_assembled;
25463 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
25464 tpd->vcc->pop(tpd->vcc, tpd->skb);
25465 else
25466 dev_kfree_skb_any(tpd->skb);
25467 - atomic_inc(&tpd->vcc->stats->tx_err);
25468 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25469 }
25470 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25471 return;
25472 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25473 vcc->pop(vcc, skb);
25474 else
25475 dev_kfree_skb_any(skb);
25476 - atomic_inc(&vcc->stats->tx_err);
25477 + atomic_inc_unchecked(&vcc->stats->tx_err);
25478 return -EINVAL;
25479 }
25480
25481 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25482 vcc->pop(vcc, skb);
25483 else
25484 dev_kfree_skb_any(skb);
25485 - atomic_inc(&vcc->stats->tx_err);
25486 + atomic_inc_unchecked(&vcc->stats->tx_err);
25487 return -EINVAL;
25488 }
25489 #endif
25490 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25491 vcc->pop(vcc, skb);
25492 else
25493 dev_kfree_skb_any(skb);
25494 - atomic_inc(&vcc->stats->tx_err);
25495 + atomic_inc_unchecked(&vcc->stats->tx_err);
25496 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25497 return -ENOMEM;
25498 }
25499 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25500 vcc->pop(vcc, skb);
25501 else
25502 dev_kfree_skb_any(skb);
25503 - atomic_inc(&vcc->stats->tx_err);
25504 + atomic_inc_unchecked(&vcc->stats->tx_err);
25505 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25506 return -ENOMEM;
25507 }
25508 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25509 __enqueue_tpd(he_dev, tpd, cid);
25510 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25511
25512 - atomic_inc(&vcc->stats->tx);
25513 + atomic_inc_unchecked(&vcc->stats->tx);
25514
25515 return 0;
25516 }
25517 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
25518 index b812103..e391a49 100644
25519 --- a/drivers/atm/horizon.c
25520 +++ b/drivers/atm/horizon.c
25521 @@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
25522 {
25523 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25524 // VC layer stats
25525 - atomic_inc(&vcc->stats->rx);
25526 + atomic_inc_unchecked(&vcc->stats->rx);
25527 __net_timestamp(skb);
25528 // end of our responsibility
25529 vcc->push (vcc, skb);
25530 @@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
25531 dev->tx_iovec = NULL;
25532
25533 // VC layer stats
25534 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25535 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25536
25537 // free the skb
25538 hrz_kfree_skb (skb);
25539 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
25540 index db06f34..dcebb61 100644
25541 --- a/drivers/atm/idt77252.c
25542 +++ b/drivers/atm/idt77252.c
25543 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
25544 else
25545 dev_kfree_skb(skb);
25546
25547 - atomic_inc(&vcc->stats->tx);
25548 + atomic_inc_unchecked(&vcc->stats->tx);
25549 }
25550
25551 atomic_dec(&scq->used);
25552 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25553 if ((sb = dev_alloc_skb(64)) == NULL) {
25554 printk("%s: Can't allocate buffers for aal0.\n",
25555 card->name);
25556 - atomic_add(i, &vcc->stats->rx_drop);
25557 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25558 break;
25559 }
25560 if (!atm_charge(vcc, sb->truesize)) {
25561 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25562 card->name);
25563 - atomic_add(i - 1, &vcc->stats->rx_drop);
25564 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25565 dev_kfree_skb(sb);
25566 break;
25567 }
25568 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25569 ATM_SKB(sb)->vcc = vcc;
25570 __net_timestamp(sb);
25571 vcc->push(vcc, sb);
25572 - atomic_inc(&vcc->stats->rx);
25573 + atomic_inc_unchecked(&vcc->stats->rx);
25574
25575 cell += ATM_CELL_PAYLOAD;
25576 }
25577 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25578 "(CDC: %08x)\n",
25579 card->name, len, rpp->len, readl(SAR_REG_CDC));
25580 recycle_rx_pool_skb(card, rpp);
25581 - atomic_inc(&vcc->stats->rx_err);
25582 + atomic_inc_unchecked(&vcc->stats->rx_err);
25583 return;
25584 }
25585 if (stat & SAR_RSQE_CRC) {
25586 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25587 recycle_rx_pool_skb(card, rpp);
25588 - atomic_inc(&vcc->stats->rx_err);
25589 + atomic_inc_unchecked(&vcc->stats->rx_err);
25590 return;
25591 }
25592 if (skb_queue_len(&rpp->queue) > 1) {
25593 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25594 RXPRINTK("%s: Can't alloc RX skb.\n",
25595 card->name);
25596 recycle_rx_pool_skb(card, rpp);
25597 - atomic_inc(&vcc->stats->rx_err);
25598 + atomic_inc_unchecked(&vcc->stats->rx_err);
25599 return;
25600 }
25601 if (!atm_charge(vcc, skb->truesize)) {
25602 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25603 __net_timestamp(skb);
25604
25605 vcc->push(vcc, skb);
25606 - atomic_inc(&vcc->stats->rx);
25607 + atomic_inc_unchecked(&vcc->stats->rx);
25608
25609 return;
25610 }
25611 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25612 __net_timestamp(skb);
25613
25614 vcc->push(vcc, skb);
25615 - atomic_inc(&vcc->stats->rx);
25616 + atomic_inc_unchecked(&vcc->stats->rx);
25617
25618 if (skb->truesize > SAR_FB_SIZE_3)
25619 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25620 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
25621 if (vcc->qos.aal != ATM_AAL0) {
25622 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25623 card->name, vpi, vci);
25624 - atomic_inc(&vcc->stats->rx_drop);
25625 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25626 goto drop;
25627 }
25628
25629 if ((sb = dev_alloc_skb(64)) == NULL) {
25630 printk("%s: Can't allocate buffers for AAL0.\n",
25631 card->name);
25632 - atomic_inc(&vcc->stats->rx_err);
25633 + atomic_inc_unchecked(&vcc->stats->rx_err);
25634 goto drop;
25635 }
25636
25637 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
25638 ATM_SKB(sb)->vcc = vcc;
25639 __net_timestamp(sb);
25640 vcc->push(vcc, sb);
25641 - atomic_inc(&vcc->stats->rx);
25642 + atomic_inc_unchecked(&vcc->stats->rx);
25643
25644 drop:
25645 skb_pull(queue, 64);
25646 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25647
25648 if (vc == NULL) {
25649 printk("%s: NULL connection in send().\n", card->name);
25650 - atomic_inc(&vcc->stats->tx_err);
25651 + atomic_inc_unchecked(&vcc->stats->tx_err);
25652 dev_kfree_skb(skb);
25653 return -EINVAL;
25654 }
25655 if (!test_bit(VCF_TX, &vc->flags)) {
25656 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25657 - atomic_inc(&vcc->stats->tx_err);
25658 + atomic_inc_unchecked(&vcc->stats->tx_err);
25659 dev_kfree_skb(skb);
25660 return -EINVAL;
25661 }
25662 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25663 break;
25664 default:
25665 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25666 - atomic_inc(&vcc->stats->tx_err);
25667 + atomic_inc_unchecked(&vcc->stats->tx_err);
25668 dev_kfree_skb(skb);
25669 return -EINVAL;
25670 }
25671
25672 if (skb_shinfo(skb)->nr_frags != 0) {
25673 printk("%s: No scatter-gather yet.\n", card->name);
25674 - atomic_inc(&vcc->stats->tx_err);
25675 + atomic_inc_unchecked(&vcc->stats->tx_err);
25676 dev_kfree_skb(skb);
25677 return -EINVAL;
25678 }
25679 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25680
25681 err = queue_skb(card, vc, skb, oam);
25682 if (err) {
25683 - atomic_inc(&vcc->stats->tx_err);
25684 + atomic_inc_unchecked(&vcc->stats->tx_err);
25685 dev_kfree_skb(skb);
25686 return err;
25687 }
25688 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
25689 skb = dev_alloc_skb(64);
25690 if (!skb) {
25691 printk("%s: Out of memory in send_oam().\n", card->name);
25692 - atomic_inc(&vcc->stats->tx_err);
25693 + atomic_inc_unchecked(&vcc->stats->tx_err);
25694 return -ENOMEM;
25695 }
25696 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25697 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
25698 index cb90f7a..bd33566 100644
25699 --- a/drivers/atm/iphase.c
25700 +++ b/drivers/atm/iphase.c
25701 @@ -1121,7 +1121,7 @@ static int rx_pkt(struct atm_dev *dev)
25702 status = (u_short) (buf_desc_ptr->desc_mode);
25703 if (status & (RX_CER | RX_PTE | RX_OFL))
25704 {
25705 - atomic_inc(&vcc->stats->rx_err);
25706 + atomic_inc_unchecked(&vcc->stats->rx_err);
25707 IF_ERR(printk("IA: bad packet, dropping it");)
25708 if (status & RX_CER) {
25709 IF_ERR(printk(" cause: packet CRC error\n");)
25710 @@ -1144,7 +1144,7 @@ static int rx_pkt(struct atm_dev *dev)
25711 len = dma_addr - buf_addr;
25712 if (len > iadev->rx_buf_sz) {
25713 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25714 - atomic_inc(&vcc->stats->rx_err);
25715 + atomic_inc_unchecked(&vcc->stats->rx_err);
25716 goto out_free_desc;
25717 }
25718
25719 @@ -1294,7 +1294,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25720 ia_vcc = INPH_IA_VCC(vcc);
25721 if (ia_vcc == NULL)
25722 {
25723 - atomic_inc(&vcc->stats->rx_err);
25724 + atomic_inc_unchecked(&vcc->stats->rx_err);
25725 dev_kfree_skb_any(skb);
25726 atm_return(vcc, atm_guess_pdu2truesize(len));
25727 goto INCR_DLE;
25728 @@ -1306,7 +1306,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25729 if ((length > iadev->rx_buf_sz) || (length >
25730 (skb->len - sizeof(struct cpcs_trailer))))
25731 {
25732 - atomic_inc(&vcc->stats->rx_err);
25733 + atomic_inc_unchecked(&vcc->stats->rx_err);
25734 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
25735 length, skb->len);)
25736 dev_kfree_skb_any(skb);
25737 @@ -1322,7 +1322,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25738
25739 IF_RX(printk("rx_dle_intr: skb push");)
25740 vcc->push(vcc,skb);
25741 - atomic_inc(&vcc->stats->rx);
25742 + atomic_inc_unchecked(&vcc->stats->rx);
25743 iadev->rx_pkt_cnt++;
25744 }
25745 INCR_DLE:
25746 @@ -2802,15 +2802,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
25747 {
25748 struct k_sonet_stats *stats;
25749 stats = &PRIV(_ia_dev[board])->sonet_stats;
25750 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
25751 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
25752 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
25753 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
25754 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
25755 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
25756 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
25757 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
25758 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
25759 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
25760 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
25761 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
25762 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
25763 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
25764 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
25765 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
25766 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
25767 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
25768 }
25769 ia_cmds.status = 0;
25770 break;
25771 @@ -2915,7 +2915,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25772 if ((desc == 0) || (desc > iadev->num_tx_desc))
25773 {
25774 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
25775 - atomic_inc(&vcc->stats->tx);
25776 + atomic_inc_unchecked(&vcc->stats->tx);
25777 if (vcc->pop)
25778 vcc->pop(vcc, skb);
25779 else
25780 @@ -3020,14 +3020,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25781 ATM_DESC(skb) = vcc->vci;
25782 skb_queue_tail(&iadev->tx_dma_q, skb);
25783
25784 - atomic_inc(&vcc->stats->tx);
25785 + atomic_inc_unchecked(&vcc->stats->tx);
25786 iadev->tx_pkt_cnt++;
25787 /* Increment transaction counter */
25788 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
25789
25790 #if 0
25791 /* add flow control logic */
25792 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
25793 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
25794 if (iavcc->vc_desc_cnt > 10) {
25795 vcc->tx_quota = vcc->tx_quota * 3 / 4;
25796 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
25797 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
25798 index e828c54..ae83976 100644
25799 --- a/drivers/atm/lanai.c
25800 +++ b/drivers/atm/lanai.c
25801 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
25802 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
25803 lanai_endtx(lanai, lvcc);
25804 lanai_free_skb(lvcc->tx.atmvcc, skb);
25805 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
25806 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
25807 }
25808
25809 /* Try to fill the buffer - don't call unless there is backlog */
25810 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
25811 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
25812 __net_timestamp(skb);
25813 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
25814 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
25815 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
25816 out:
25817 lvcc->rx.buf.ptr = end;
25818 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
25819 @@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25820 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
25821 "vcc %d\n", lanai->number, (unsigned int) s, vci);
25822 lanai->stats.service_rxnotaal5++;
25823 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25824 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25825 return 0;
25826 }
25827 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
25828 @@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25829 int bytes;
25830 read_unlock(&vcc_sklist_lock);
25831 DPRINTK("got trashed rx pdu on vci %d\n", vci);
25832 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25833 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25834 lvcc->stats.x.aal5.service_trash++;
25835 bytes = (SERVICE_GET_END(s) * 16) -
25836 (((unsigned long) lvcc->rx.buf.ptr) -
25837 @@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25838 }
25839 if (s & SERVICE_STREAM) {
25840 read_unlock(&vcc_sklist_lock);
25841 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25842 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25843 lvcc->stats.x.aal5.service_stream++;
25844 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
25845 "PDU on VCI %d!\n", lanai->number, vci);
25846 @@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25847 return 0;
25848 }
25849 DPRINTK("got rx crc error on vci %d\n", vci);
25850 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25851 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25852 lvcc->stats.x.aal5.service_rxcrc++;
25853 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
25854 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
25855 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
25856 index 1c70c45..300718d 100644
25857 --- a/drivers/atm/nicstar.c
25858 +++ b/drivers/atm/nicstar.c
25859 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25860 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
25861 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
25862 card->index);
25863 - atomic_inc(&vcc->stats->tx_err);
25864 + atomic_inc_unchecked(&vcc->stats->tx_err);
25865 dev_kfree_skb_any(skb);
25866 return -EINVAL;
25867 }
25868 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25869 if (!vc->tx) {
25870 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
25871 card->index);
25872 - atomic_inc(&vcc->stats->tx_err);
25873 + atomic_inc_unchecked(&vcc->stats->tx_err);
25874 dev_kfree_skb_any(skb);
25875 return -EINVAL;
25876 }
25877 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25878 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
25879 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
25880 card->index);
25881 - atomic_inc(&vcc->stats->tx_err);
25882 + atomic_inc_unchecked(&vcc->stats->tx_err);
25883 dev_kfree_skb_any(skb);
25884 return -EINVAL;
25885 }
25886
25887 if (skb_shinfo(skb)->nr_frags != 0) {
25888 printk("nicstar%d: No scatter-gather yet.\n", card->index);
25889 - atomic_inc(&vcc->stats->tx_err);
25890 + atomic_inc_unchecked(&vcc->stats->tx_err);
25891 dev_kfree_skb_any(skb);
25892 return -EINVAL;
25893 }
25894 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25895 }
25896
25897 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
25898 - atomic_inc(&vcc->stats->tx_err);
25899 + atomic_inc_unchecked(&vcc->stats->tx_err);
25900 dev_kfree_skb_any(skb);
25901 return -EIO;
25902 }
25903 - atomic_inc(&vcc->stats->tx);
25904 + atomic_inc_unchecked(&vcc->stats->tx);
25905
25906 return 0;
25907 }
25908 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25909 printk
25910 ("nicstar%d: Can't allocate buffers for aal0.\n",
25911 card->index);
25912 - atomic_add(i, &vcc->stats->rx_drop);
25913 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25914 break;
25915 }
25916 if (!atm_charge(vcc, sb->truesize)) {
25917 RXPRINTK
25918 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
25919 card->index);
25920 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
25921 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
25922 dev_kfree_skb_any(sb);
25923 break;
25924 }
25925 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25926 ATM_SKB(sb)->vcc = vcc;
25927 __net_timestamp(sb);
25928 vcc->push(vcc, sb);
25929 - atomic_inc(&vcc->stats->rx);
25930 + atomic_inc_unchecked(&vcc->stats->rx);
25931 cell += ATM_CELL_PAYLOAD;
25932 }
25933
25934 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25935 if (iovb == NULL) {
25936 printk("nicstar%d: Out of iovec buffers.\n",
25937 card->index);
25938 - atomic_inc(&vcc->stats->rx_drop);
25939 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25940 recycle_rx_buf(card, skb);
25941 return;
25942 }
25943 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25944 small or large buffer itself. */
25945 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
25946 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
25947 - atomic_inc(&vcc->stats->rx_err);
25948 + atomic_inc_unchecked(&vcc->stats->rx_err);
25949 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25950 NS_MAX_IOVECS);
25951 NS_PRV_IOVCNT(iovb) = 0;
25952 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25953 ("nicstar%d: Expected a small buffer, and this is not one.\n",
25954 card->index);
25955 which_list(card, skb);
25956 - atomic_inc(&vcc->stats->rx_err);
25957 + atomic_inc_unchecked(&vcc->stats->rx_err);
25958 recycle_rx_buf(card, skb);
25959 vc->rx_iov = NULL;
25960 recycle_iov_buf(card, iovb);
25961 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25962 ("nicstar%d: Expected a large buffer, and this is not one.\n",
25963 card->index);
25964 which_list(card, skb);
25965 - atomic_inc(&vcc->stats->rx_err);
25966 + atomic_inc_unchecked(&vcc->stats->rx_err);
25967 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25968 NS_PRV_IOVCNT(iovb));
25969 vc->rx_iov = NULL;
25970 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25971 printk(" - PDU size mismatch.\n");
25972 else
25973 printk(".\n");
25974 - atomic_inc(&vcc->stats->rx_err);
25975 + atomic_inc_unchecked(&vcc->stats->rx_err);
25976 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25977 NS_PRV_IOVCNT(iovb));
25978 vc->rx_iov = NULL;
25979 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25980 /* skb points to a small buffer */
25981 if (!atm_charge(vcc, skb->truesize)) {
25982 push_rxbufs(card, skb);
25983 - atomic_inc(&vcc->stats->rx_drop);
25984 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25985 } else {
25986 skb_put(skb, len);
25987 dequeue_sm_buf(card, skb);
25988 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25989 ATM_SKB(skb)->vcc = vcc;
25990 __net_timestamp(skb);
25991 vcc->push(vcc, skb);
25992 - atomic_inc(&vcc->stats->rx);
25993 + atomic_inc_unchecked(&vcc->stats->rx);
25994 }
25995 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
25996 struct sk_buff *sb;
25997 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25998 if (len <= NS_SMBUFSIZE) {
25999 if (!atm_charge(vcc, sb->truesize)) {
26000 push_rxbufs(card, sb);
26001 - atomic_inc(&vcc->stats->rx_drop);
26002 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26003 } else {
26004 skb_put(sb, len);
26005 dequeue_sm_buf(card, sb);
26006 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26007 ATM_SKB(sb)->vcc = vcc;
26008 __net_timestamp(sb);
26009 vcc->push(vcc, sb);
26010 - atomic_inc(&vcc->stats->rx);
26011 + atomic_inc_unchecked(&vcc->stats->rx);
26012 }
26013
26014 push_rxbufs(card, skb);
26015 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26016
26017 if (!atm_charge(vcc, skb->truesize)) {
26018 push_rxbufs(card, skb);
26019 - atomic_inc(&vcc->stats->rx_drop);
26020 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26021 } else {
26022 dequeue_lg_buf(card, skb);
26023 #ifdef NS_USE_DESTRUCTORS
26024 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26025 ATM_SKB(skb)->vcc = vcc;
26026 __net_timestamp(skb);
26027 vcc->push(vcc, skb);
26028 - atomic_inc(&vcc->stats->rx);
26029 + atomic_inc_unchecked(&vcc->stats->rx);
26030 }
26031
26032 push_rxbufs(card, sb);
26033 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26034 printk
26035 ("nicstar%d: Out of huge buffers.\n",
26036 card->index);
26037 - atomic_inc(&vcc->stats->rx_drop);
26038 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26039 recycle_iovec_rx_bufs(card,
26040 (struct iovec *)
26041 iovb->data,
26042 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26043 card->hbpool.count++;
26044 } else
26045 dev_kfree_skb_any(hb);
26046 - atomic_inc(&vcc->stats->rx_drop);
26047 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26048 } else {
26049 /* Copy the small buffer to the huge buffer */
26050 sb = (struct sk_buff *)iov->iov_base;
26051 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26052 #endif /* NS_USE_DESTRUCTORS */
26053 __net_timestamp(hb);
26054 vcc->push(vcc, hb);
26055 - atomic_inc(&vcc->stats->rx);
26056 + atomic_inc_unchecked(&vcc->stats->rx);
26057 }
26058 }
26059
26060 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
26061 index 5d1d076..4f31f42 100644
26062 --- a/drivers/atm/solos-pci.c
26063 +++ b/drivers/atm/solos-pci.c
26064 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
26065 }
26066 atm_charge(vcc, skb->truesize);
26067 vcc->push(vcc, skb);
26068 - atomic_inc(&vcc->stats->rx);
26069 + atomic_inc_unchecked(&vcc->stats->rx);
26070 break;
26071
26072 case PKT_STATUS:
26073 @@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *buf)
26074 char msg[500];
26075 char item[10];
26076
26077 + pax_track_stack();
26078 +
26079 len = buf->len;
26080 for (i = 0; i < len; i++){
26081 if(i % 8 == 0)
26082 @@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_card *card)
26083 vcc = SKB_CB(oldskb)->vcc;
26084
26085 if (vcc) {
26086 - atomic_inc(&vcc->stats->tx);
26087 + atomic_inc_unchecked(&vcc->stats->tx);
26088 solos_pop(vcc, oldskb);
26089 } else
26090 dev_kfree_skb_irq(oldskb);
26091 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
26092 index 90f1ccc..04c4a1e 100644
26093 --- a/drivers/atm/suni.c
26094 +++ b/drivers/atm/suni.c
26095 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26096
26097
26098 #define ADD_LIMITED(s,v) \
26099 - atomic_add((v),&stats->s); \
26100 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26101 + atomic_add_unchecked((v),&stats->s); \
26102 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26103
26104
26105 static void suni_hz(unsigned long from_timer)
26106 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
26107 index 5120a96..e2572bd 100644
26108 --- a/drivers/atm/uPD98402.c
26109 +++ b/drivers/atm/uPD98402.c
26110 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
26111 struct sonet_stats tmp;
26112 int error = 0;
26113
26114 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26115 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26116 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26117 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26118 if (zero && !error) {
26119 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
26120
26121
26122 #define ADD_LIMITED(s,v) \
26123 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26124 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26125 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26126 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26127 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26128 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26129
26130
26131 static void stat_event(struct atm_dev *dev)
26132 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
26133 if (reason & uPD98402_INT_PFM) stat_event(dev);
26134 if (reason & uPD98402_INT_PCO) {
26135 (void) GET(PCOCR); /* clear interrupt cause */
26136 - atomic_add(GET(HECCT),
26137 + atomic_add_unchecked(GET(HECCT),
26138 &PRIV(dev)->sonet_stats.uncorr_hcs);
26139 }
26140 if ((reason & uPD98402_INT_RFO) &&
26141 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
26142 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26143 uPD98402_INT_LOS),PIMR); /* enable them */
26144 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26145 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26146 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26147 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26148 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26149 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26150 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26151 return 0;
26152 }
26153
26154 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
26155 index d889f56..17eb71e 100644
26156 --- a/drivers/atm/zatm.c
26157 +++ b/drivers/atm/zatm.c
26158 @@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26159 }
26160 if (!size) {
26161 dev_kfree_skb_irq(skb);
26162 - if (vcc) atomic_inc(&vcc->stats->rx_err);
26163 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26164 continue;
26165 }
26166 if (!atm_charge(vcc,skb->truesize)) {
26167 @@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26168 skb->len = size;
26169 ATM_SKB(skb)->vcc = vcc;
26170 vcc->push(vcc,skb);
26171 - atomic_inc(&vcc->stats->rx);
26172 + atomic_inc_unchecked(&vcc->stats->rx);
26173 }
26174 zout(pos & 0xffff,MTA(mbx));
26175 #if 0 /* probably a stupid idea */
26176 @@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
26177 skb_queue_head(&zatm_vcc->backlog,skb);
26178 break;
26179 }
26180 - atomic_inc(&vcc->stats->tx);
26181 + atomic_inc_unchecked(&vcc->stats->tx);
26182 wake_up(&zatm_vcc->tx_wait);
26183 }
26184
26185 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
26186 index a4760e0..51283cf 100644
26187 --- a/drivers/base/devtmpfs.c
26188 +++ b/drivers/base/devtmpfs.c
26189 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
26190 if (!thread)
26191 return 0;
26192
26193 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
26194 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
26195 if (err)
26196 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
26197 else
26198 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
26199 index 84f7c7d..37cfd87 100644
26200 --- a/drivers/base/power/wakeup.c
26201 +++ b/drivers/base/power/wakeup.c
26202 @@ -29,14 +29,14 @@ bool events_check_enabled;
26203 * They need to be modified together atomically, so it's better to use one
26204 * atomic variable to hold them both.
26205 */
26206 -static atomic_t combined_event_count = ATOMIC_INIT(0);
26207 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
26208
26209 #define IN_PROGRESS_BITS (sizeof(int) * 4)
26210 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
26211
26212 static void split_counters(unsigned int *cnt, unsigned int *inpr)
26213 {
26214 - unsigned int comb = atomic_read(&combined_event_count);
26215 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
26216
26217 *cnt = (comb >> IN_PROGRESS_BITS);
26218 *inpr = comb & MAX_IN_PROGRESS;
26219 @@ -350,7 +350,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
26220 ws->last_time = ktime_get();
26221
26222 /* Increment the counter of events in progress. */
26223 - atomic_inc(&combined_event_count);
26224 + atomic_inc_unchecked(&combined_event_count);
26225 }
26226
26227 /**
26228 @@ -440,7 +440,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
26229 * Increment the counter of registered wakeup events and decrement the
26230 * couter of wakeup events in progress simultaneously.
26231 */
26232 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
26233 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
26234 }
26235
26236 /**
26237 diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
26238 index e086fbb..398e1fe 100644
26239 --- a/drivers/block/DAC960.c
26240 +++ b/drivers/block/DAC960.c
26241 @@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
26242 unsigned long flags;
26243 int Channel, TargetID;
26244
26245 + pax_track_stack();
26246 +
26247 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
26248 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
26249 sizeof(DAC960_SCSI_Inquiry_T) +
26250 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
26251 index c2f9b3e..5911988 100644
26252 --- a/drivers/block/cciss.c
26253 +++ b/drivers/block/cciss.c
26254 @@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
26255 int err;
26256 u32 cp;
26257
26258 + memset(&arg64, 0, sizeof(arg64));
26259 +
26260 err = 0;
26261 err |=
26262 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26263 @@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
26264 while (!list_empty(&h->reqQ)) {
26265 c = list_entry(h->reqQ.next, CommandList_struct, list);
26266 /* can't do anything if fifo is full */
26267 - if ((h->access.fifo_full(h))) {
26268 + if ((h->access->fifo_full(h))) {
26269 dev_warn(&h->pdev->dev, "fifo full\n");
26270 break;
26271 }
26272 @@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
26273 h->Qdepth--;
26274
26275 /* Tell the controller execute command */
26276 - h->access.submit_command(h, c);
26277 + h->access->submit_command(h, c);
26278
26279 /* Put job onto the completed Q */
26280 addQ(&h->cmpQ, c);
26281 @@ -3422,17 +3424,17 @@ startio:
26282
26283 static inline unsigned long get_next_completion(ctlr_info_t *h)
26284 {
26285 - return h->access.command_completed(h);
26286 + return h->access->command_completed(h);
26287 }
26288
26289 static inline int interrupt_pending(ctlr_info_t *h)
26290 {
26291 - return h->access.intr_pending(h);
26292 + return h->access->intr_pending(h);
26293 }
26294
26295 static inline long interrupt_not_for_us(ctlr_info_t *h)
26296 {
26297 - return ((h->access.intr_pending(h) == 0) ||
26298 + return ((h->access->intr_pending(h) == 0) ||
26299 (h->interrupts_enabled == 0));
26300 }
26301
26302 @@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info_t *h)
26303 u32 a;
26304
26305 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
26306 - return h->access.command_completed(h);
26307 + return h->access->command_completed(h);
26308
26309 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
26310 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
26311 @@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
26312 trans_support & CFGTBL_Trans_use_short_tags);
26313
26314 /* Change the access methods to the performant access methods */
26315 - h->access = SA5_performant_access;
26316 + h->access = &SA5_performant_access;
26317 h->transMethod = CFGTBL_Trans_Performant;
26318
26319 return;
26320 @@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
26321 if (prod_index < 0)
26322 return -ENODEV;
26323 h->product_name = products[prod_index].product_name;
26324 - h->access = *(products[prod_index].access);
26325 + h->access = products[prod_index].access;
26326
26327 if (cciss_board_disabled(h)) {
26328 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
26329 @@ -5009,7 +5011,7 @@ reinit_after_soft_reset:
26330 }
26331
26332 /* make sure the board interrupts are off */
26333 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26334 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26335 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
26336 if (rc)
26337 goto clean2;
26338 @@ -5061,7 +5063,7 @@ reinit_after_soft_reset:
26339 * fake ones to scoop up any residual completions.
26340 */
26341 spin_lock_irqsave(&h->lock, flags);
26342 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26343 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26344 spin_unlock_irqrestore(&h->lock, flags);
26345 free_irq(h->intr[PERF_MODE_INT], h);
26346 rc = cciss_request_irq(h, cciss_msix_discard_completions,
26347 @@ -5081,9 +5083,9 @@ reinit_after_soft_reset:
26348 dev_info(&h->pdev->dev, "Board READY.\n");
26349 dev_info(&h->pdev->dev,
26350 "Waiting for stale completions to drain.\n");
26351 - h->access.set_intr_mask(h, CCISS_INTR_ON);
26352 + h->access->set_intr_mask(h, CCISS_INTR_ON);
26353 msleep(10000);
26354 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26355 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26356
26357 rc = controller_reset_failed(h->cfgtable);
26358 if (rc)
26359 @@ -5106,7 +5108,7 @@ reinit_after_soft_reset:
26360 cciss_scsi_setup(h);
26361
26362 /* Turn the interrupts on so we can service requests */
26363 - h->access.set_intr_mask(h, CCISS_INTR_ON);
26364 + h->access->set_intr_mask(h, CCISS_INTR_ON);
26365
26366 /* Get the firmware version */
26367 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
26368 @@ -5178,7 +5180,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
26369 kfree(flush_buf);
26370 if (return_code != IO_OK)
26371 dev_warn(&h->pdev->dev, "Error flushing cache\n");
26372 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26373 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26374 free_irq(h->intr[PERF_MODE_INT], h);
26375 }
26376
26377 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
26378 index c049548..a09cb6e 100644
26379 --- a/drivers/block/cciss.h
26380 +++ b/drivers/block/cciss.h
26381 @@ -100,7 +100,7 @@ struct ctlr_info
26382 /* information about each logical volume */
26383 drive_info_struct *drv[CISS_MAX_LUN];
26384
26385 - struct access_method access;
26386 + struct access_method *access;
26387
26388 /* queue and queue Info */
26389 struct list_head reqQ;
26390 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
26391 index b2fceb5..87fec83 100644
26392 --- a/drivers/block/cpqarray.c
26393 +++ b/drivers/block/cpqarray.c
26394 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26395 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
26396 goto Enomem4;
26397 }
26398 - hba[i]->access.set_intr_mask(hba[i], 0);
26399 + hba[i]->access->set_intr_mask(hba[i], 0);
26400 if (request_irq(hba[i]->intr, do_ida_intr,
26401 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
26402 {
26403 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26404 add_timer(&hba[i]->timer);
26405
26406 /* Enable IRQ now that spinlock and rate limit timer are set up */
26407 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26408 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26409
26410 for(j=0; j<NWD; j++) {
26411 struct gendisk *disk = ida_gendisk[i][j];
26412 @@ -694,7 +694,7 @@ DBGINFO(
26413 for(i=0; i<NR_PRODUCTS; i++) {
26414 if (board_id == products[i].board_id) {
26415 c->product_name = products[i].product_name;
26416 - c->access = *(products[i].access);
26417 + c->access = products[i].access;
26418 break;
26419 }
26420 }
26421 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
26422 hba[ctlr]->intr = intr;
26423 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
26424 hba[ctlr]->product_name = products[j].product_name;
26425 - hba[ctlr]->access = *(products[j].access);
26426 + hba[ctlr]->access = products[j].access;
26427 hba[ctlr]->ctlr = ctlr;
26428 hba[ctlr]->board_id = board_id;
26429 hba[ctlr]->pci_dev = NULL; /* not PCI */
26430 @@ -911,6 +911,8 @@ static void do_ida_request(struct request_queue *q)
26431 struct scatterlist tmp_sg[SG_MAX];
26432 int i, dir, seg;
26433
26434 + pax_track_stack();
26435 +
26436 queue_next:
26437 creq = blk_peek_request(q);
26438 if (!creq)
26439 @@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
26440
26441 while((c = h->reqQ) != NULL) {
26442 /* Can't do anything if we're busy */
26443 - if (h->access.fifo_full(h) == 0)
26444 + if (h->access->fifo_full(h) == 0)
26445 return;
26446
26447 /* Get the first entry from the request Q */
26448 @@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
26449 h->Qdepth--;
26450
26451 /* Tell the controller to do our bidding */
26452 - h->access.submit_command(h, c);
26453 + h->access->submit_command(h, c);
26454
26455 /* Get onto the completion Q */
26456 addQ(&h->cmpQ, c);
26457 @@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26458 unsigned long flags;
26459 __u32 a,a1;
26460
26461 - istat = h->access.intr_pending(h);
26462 + istat = h->access->intr_pending(h);
26463 /* Is this interrupt for us? */
26464 if (istat == 0)
26465 return IRQ_NONE;
26466 @@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26467 */
26468 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
26469 if (istat & FIFO_NOT_EMPTY) {
26470 - while((a = h->access.command_completed(h))) {
26471 + while((a = h->access->command_completed(h))) {
26472 a1 = a; a &= ~3;
26473 if ((c = h->cmpQ) == NULL)
26474 {
26475 @@ -1449,11 +1451,11 @@ static int sendcmd(
26476 /*
26477 * Disable interrupt
26478 */
26479 - info_p->access.set_intr_mask(info_p, 0);
26480 + info_p->access->set_intr_mask(info_p, 0);
26481 /* Make sure there is room in the command FIFO */
26482 /* Actually it should be completely empty at this time. */
26483 for (i = 200000; i > 0; i--) {
26484 - temp = info_p->access.fifo_full(info_p);
26485 + temp = info_p->access->fifo_full(info_p);
26486 if (temp != 0) {
26487 break;
26488 }
26489 @@ -1466,7 +1468,7 @@ DBG(
26490 /*
26491 * Send the cmd
26492 */
26493 - info_p->access.submit_command(info_p, c);
26494 + info_p->access->submit_command(info_p, c);
26495 complete = pollcomplete(ctlr);
26496
26497 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
26498 @@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t *host)
26499 * we check the new geometry. Then turn interrupts back on when
26500 * we're done.
26501 */
26502 - host->access.set_intr_mask(host, 0);
26503 + host->access->set_intr_mask(host, 0);
26504 getgeometry(ctlr);
26505 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
26506 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
26507
26508 for(i=0; i<NWD; i++) {
26509 struct gendisk *disk = ida_gendisk[ctlr][i];
26510 @@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
26511 /* Wait (up to 2 seconds) for a command to complete */
26512
26513 for (i = 200000; i > 0; i--) {
26514 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
26515 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
26516 if (done == 0) {
26517 udelay(10); /* a short fixed delay */
26518 } else
26519 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
26520 index be73e9d..7fbf140 100644
26521 --- a/drivers/block/cpqarray.h
26522 +++ b/drivers/block/cpqarray.h
26523 @@ -99,7 +99,7 @@ struct ctlr_info {
26524 drv_info_t drv[NWD];
26525 struct proc_dir_entry *proc;
26526
26527 - struct access_method access;
26528 + struct access_method *access;
26529
26530 cmdlist_t *reqQ;
26531 cmdlist_t *cmpQ;
26532 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
26533 index ef2ceed..c9cb18e 100644
26534 --- a/drivers/block/drbd/drbd_int.h
26535 +++ b/drivers/block/drbd/drbd_int.h
26536 @@ -737,7 +737,7 @@ struct drbd_request;
26537 struct drbd_epoch {
26538 struct list_head list;
26539 unsigned int barrier_nr;
26540 - atomic_t epoch_size; /* increased on every request added. */
26541 + atomic_unchecked_t epoch_size; /* increased on every request added. */
26542 atomic_t active; /* increased on every req. added, and dec on every finished. */
26543 unsigned long flags;
26544 };
26545 @@ -1109,7 +1109,7 @@ struct drbd_conf {
26546 void *int_dig_in;
26547 void *int_dig_vv;
26548 wait_queue_head_t seq_wait;
26549 - atomic_t packet_seq;
26550 + atomic_unchecked_t packet_seq;
26551 unsigned int peer_seq;
26552 spinlock_t peer_seq_lock;
26553 unsigned int minor;
26554 @@ -1618,30 +1618,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
26555
26556 static inline void drbd_tcp_cork(struct socket *sock)
26557 {
26558 - int __user val = 1;
26559 + int val = 1;
26560 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26561 - (char __user *)&val, sizeof(val));
26562 + (char __force_user *)&val, sizeof(val));
26563 }
26564
26565 static inline void drbd_tcp_uncork(struct socket *sock)
26566 {
26567 - int __user val = 0;
26568 + int val = 0;
26569 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26570 - (char __user *)&val, sizeof(val));
26571 + (char __force_user *)&val, sizeof(val));
26572 }
26573
26574 static inline void drbd_tcp_nodelay(struct socket *sock)
26575 {
26576 - int __user val = 1;
26577 + int val = 1;
26578 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
26579 - (char __user *)&val, sizeof(val));
26580 + (char __force_user *)&val, sizeof(val));
26581 }
26582
26583 static inline void drbd_tcp_quickack(struct socket *sock)
26584 {
26585 - int __user val = 2;
26586 + int val = 2;
26587 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
26588 - (char __user *)&val, sizeof(val));
26589 + (char __force_user *)&val, sizeof(val));
26590 }
26591
26592 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
26593 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
26594 index 0358e55..bc33689 100644
26595 --- a/drivers/block/drbd/drbd_main.c
26596 +++ b/drivers/block/drbd/drbd_main.c
26597 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
26598 p.sector = sector;
26599 p.block_id = block_id;
26600 p.blksize = blksize;
26601 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
26602 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
26603
26604 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
26605 return false;
26606 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
26607 p.sector = cpu_to_be64(req->sector);
26608 p.block_id = (unsigned long)req;
26609 p.seq_num = cpu_to_be32(req->seq_num =
26610 - atomic_add_return(1, &mdev->packet_seq));
26611 + atomic_add_return_unchecked(1, &mdev->packet_seq));
26612
26613 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
26614
26615 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
26616 atomic_set(&mdev->unacked_cnt, 0);
26617 atomic_set(&mdev->local_cnt, 0);
26618 atomic_set(&mdev->net_cnt, 0);
26619 - atomic_set(&mdev->packet_seq, 0);
26620 + atomic_set_unchecked(&mdev->packet_seq, 0);
26621 atomic_set(&mdev->pp_in_use, 0);
26622 atomic_set(&mdev->pp_in_use_by_net, 0);
26623 atomic_set(&mdev->rs_sect_in, 0);
26624 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
26625 mdev->receiver.t_state);
26626
26627 /* no need to lock it, I'm the only thread alive */
26628 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
26629 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
26630 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
26631 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
26632 mdev->al_writ_cnt =
26633 mdev->bm_writ_cnt =
26634 mdev->read_cnt =
26635 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
26636 index 0feab26..5d9b3dd 100644
26637 --- a/drivers/block/drbd/drbd_nl.c
26638 +++ b/drivers/block/drbd/drbd_nl.c
26639 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
26640 module_put(THIS_MODULE);
26641 }
26642
26643 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26644 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26645
26646 static unsigned short *
26647 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
26648 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
26649 cn_reply->id.idx = CN_IDX_DRBD;
26650 cn_reply->id.val = CN_VAL_DRBD;
26651
26652 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26653 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26654 cn_reply->ack = 0; /* not used here. */
26655 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26656 (int)((char *)tl - (char *)reply->tag_list);
26657 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
26658 cn_reply->id.idx = CN_IDX_DRBD;
26659 cn_reply->id.val = CN_VAL_DRBD;
26660
26661 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26662 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26663 cn_reply->ack = 0; /* not used here. */
26664 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26665 (int)((char *)tl - (char *)reply->tag_list);
26666 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
26667 cn_reply->id.idx = CN_IDX_DRBD;
26668 cn_reply->id.val = CN_VAL_DRBD;
26669
26670 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
26671 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
26672 cn_reply->ack = 0; // not used here.
26673 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26674 (int)((char*)tl - (char*)reply->tag_list);
26675 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
26676 cn_reply->id.idx = CN_IDX_DRBD;
26677 cn_reply->id.val = CN_VAL_DRBD;
26678
26679 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26680 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26681 cn_reply->ack = 0; /* not used here. */
26682 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26683 (int)((char *)tl - (char *)reply->tag_list);
26684 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
26685 index 43beaca..4a5b1dd 100644
26686 --- a/drivers/block/drbd/drbd_receiver.c
26687 +++ b/drivers/block/drbd/drbd_receiver.c
26688 @@ -894,7 +894,7 @@ retry:
26689 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
26690 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
26691
26692 - atomic_set(&mdev->packet_seq, 0);
26693 + atomic_set_unchecked(&mdev->packet_seq, 0);
26694 mdev->peer_seq = 0;
26695
26696 drbd_thread_start(&mdev->asender);
26697 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26698 do {
26699 next_epoch = NULL;
26700
26701 - epoch_size = atomic_read(&epoch->epoch_size);
26702 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
26703
26704 switch (ev & ~EV_CLEANUP) {
26705 case EV_PUT:
26706 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26707 rv = FE_DESTROYED;
26708 } else {
26709 epoch->flags = 0;
26710 - atomic_set(&epoch->epoch_size, 0);
26711 + atomic_set_unchecked(&epoch->epoch_size, 0);
26712 /* atomic_set(&epoch->active, 0); is already zero */
26713 if (rv == FE_STILL_LIVE)
26714 rv = FE_RECYCLED;
26715 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26716 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
26717 drbd_flush(mdev);
26718
26719 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
26720 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26721 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
26722 if (epoch)
26723 break;
26724 }
26725
26726 epoch = mdev->current_epoch;
26727 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
26728 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
26729
26730 D_ASSERT(atomic_read(&epoch->active) == 0);
26731 D_ASSERT(epoch->flags == 0);
26732 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26733 }
26734
26735 epoch->flags = 0;
26736 - atomic_set(&epoch->epoch_size, 0);
26737 + atomic_set_unchecked(&epoch->epoch_size, 0);
26738 atomic_set(&epoch->active, 0);
26739
26740 spin_lock(&mdev->epoch_lock);
26741 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
26742 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26743 list_add(&epoch->list, &mdev->current_epoch->list);
26744 mdev->current_epoch = epoch;
26745 mdev->epochs++;
26746 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26747 spin_unlock(&mdev->peer_seq_lock);
26748
26749 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
26750 - atomic_inc(&mdev->current_epoch->epoch_size);
26751 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
26752 return drbd_drain_block(mdev, data_size);
26753 }
26754
26755 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26756
26757 spin_lock(&mdev->epoch_lock);
26758 e->epoch = mdev->current_epoch;
26759 - atomic_inc(&e->epoch->epoch_size);
26760 + atomic_inc_unchecked(&e->epoch->epoch_size);
26761 atomic_inc(&e->epoch->active);
26762 spin_unlock(&mdev->epoch_lock);
26763
26764 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
26765 D_ASSERT(list_empty(&mdev->done_ee));
26766
26767 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
26768 - atomic_set(&mdev->current_epoch->epoch_size, 0);
26769 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
26770 D_ASSERT(list_empty(&mdev->current_epoch->list));
26771 }
26772
26773 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
26774 index 4720c7a..2c49af1 100644
26775 --- a/drivers/block/loop.c
26776 +++ b/drivers/block/loop.c
26777 @@ -283,7 +283,7 @@ static int __do_lo_send_write(struct file *file,
26778 mm_segment_t old_fs = get_fs();
26779
26780 set_fs(get_ds());
26781 - bw = file->f_op->write(file, buf, len, &pos);
26782 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
26783 set_fs(old_fs);
26784 if (likely(bw == len))
26785 return 0;
26786 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
26787 index f533f33..6177bcb 100644
26788 --- a/drivers/block/nbd.c
26789 +++ b/drivers/block/nbd.c
26790 @@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
26791 struct kvec iov;
26792 sigset_t blocked, oldset;
26793
26794 + pax_track_stack();
26795 +
26796 if (unlikely(!sock)) {
26797 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
26798 lo->disk->disk_name, (send ? "send" : "recv"));
26799 @@ -572,6 +574,8 @@ static void do_nbd_request(struct request_queue *q)
26800 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
26801 unsigned int cmd, unsigned long arg)
26802 {
26803 + pax_track_stack();
26804 +
26805 switch (cmd) {
26806 case NBD_DISCONNECT: {
26807 struct request sreq;
26808 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
26809 index 423fd56..06d3be0 100644
26810 --- a/drivers/char/Kconfig
26811 +++ b/drivers/char/Kconfig
26812 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
26813
26814 config DEVKMEM
26815 bool "/dev/kmem virtual device support"
26816 - default y
26817 + default n
26818 + depends on !GRKERNSEC_KMEM
26819 help
26820 Say Y here if you want to support the /dev/kmem device. The
26821 /dev/kmem device is rarely used, but can be used for certain
26822 @@ -596,6 +597,7 @@ config DEVPORT
26823 bool
26824 depends on !M68K
26825 depends on ISA || PCI
26826 + depends on !GRKERNSEC_KMEM
26827 default y
26828
26829 source "drivers/s390/char/Kconfig"
26830 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
26831 index 2e04433..22afc64 100644
26832 --- a/drivers/char/agp/frontend.c
26833 +++ b/drivers/char/agp/frontend.c
26834 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
26835 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26836 return -EFAULT;
26837
26838 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26839 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26840 return -EFAULT;
26841
26842 client = agp_find_client_by_pid(reserve.pid);
26843 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
26844 index 095ab90..afad0a4 100644
26845 --- a/drivers/char/briq_panel.c
26846 +++ b/drivers/char/briq_panel.c
26847 @@ -9,6 +9,7 @@
26848 #include <linux/types.h>
26849 #include <linux/errno.h>
26850 #include <linux/tty.h>
26851 +#include <linux/mutex.h>
26852 #include <linux/timer.h>
26853 #include <linux/kernel.h>
26854 #include <linux/wait.h>
26855 @@ -34,6 +35,7 @@ static int vfd_is_open;
26856 static unsigned char vfd[40];
26857 static int vfd_cursor;
26858 static unsigned char ledpb, led;
26859 +static DEFINE_MUTEX(vfd_mutex);
26860
26861 static void update_vfd(void)
26862 {
26863 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26864 if (!vfd_is_open)
26865 return -EBUSY;
26866
26867 + mutex_lock(&vfd_mutex);
26868 for (;;) {
26869 char c;
26870 if (!indx)
26871 break;
26872 - if (get_user(c, buf))
26873 + if (get_user(c, buf)) {
26874 + mutex_unlock(&vfd_mutex);
26875 return -EFAULT;
26876 + }
26877 if (esc) {
26878 set_led(c);
26879 esc = 0;
26880 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26881 buf++;
26882 }
26883 update_vfd();
26884 + mutex_unlock(&vfd_mutex);
26885
26886 return len;
26887 }
26888 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
26889 index f773a9d..65cd683 100644
26890 --- a/drivers/char/genrtc.c
26891 +++ b/drivers/char/genrtc.c
26892 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
26893 switch (cmd) {
26894
26895 case RTC_PLL_GET:
26896 + memset(&pll, 0, sizeof(pll));
26897 if (get_rtc_pll(&pll))
26898 return -EINVAL;
26899 else
26900 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
26901 index 0833896..cccce52 100644
26902 --- a/drivers/char/hpet.c
26903 +++ b/drivers/char/hpet.c
26904 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
26905 }
26906
26907 static int
26908 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
26909 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
26910 struct hpet_info *info)
26911 {
26912 struct hpet_timer __iomem *timer;
26913 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
26914 index 58c0e63..25aed94 100644
26915 --- a/drivers/char/ipmi/ipmi_msghandler.c
26916 +++ b/drivers/char/ipmi/ipmi_msghandler.c
26917 @@ -415,7 +415,7 @@ struct ipmi_smi {
26918 struct proc_dir_entry *proc_dir;
26919 char proc_dir_name[10];
26920
26921 - atomic_t stats[IPMI_NUM_STATS];
26922 + atomic_unchecked_t stats[IPMI_NUM_STATS];
26923
26924 /*
26925 * run_to_completion duplicate of smb_info, smi_info
26926 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
26927
26928
26929 #define ipmi_inc_stat(intf, stat) \
26930 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
26931 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
26932 #define ipmi_get_stat(intf, stat) \
26933 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
26934 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
26935
26936 static int is_lan_addr(struct ipmi_addr *addr)
26937 {
26938 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
26939 INIT_LIST_HEAD(&intf->cmd_rcvrs);
26940 init_waitqueue_head(&intf->waitq);
26941 for (i = 0; i < IPMI_NUM_STATS; i++)
26942 - atomic_set(&intf->stats[i], 0);
26943 + atomic_set_unchecked(&intf->stats[i], 0);
26944
26945 intf->proc_dir = NULL;
26946
26947 @@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
26948 struct ipmi_smi_msg smi_msg;
26949 struct ipmi_recv_msg recv_msg;
26950
26951 + pax_track_stack();
26952 +
26953 si = (struct ipmi_system_interface_addr *) &addr;
26954 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
26955 si->channel = IPMI_BMC_CHANNEL;
26956 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
26957 index 9397ab4..d01bee1 100644
26958 --- a/drivers/char/ipmi/ipmi_si_intf.c
26959 +++ b/drivers/char/ipmi/ipmi_si_intf.c
26960 @@ -277,7 +277,7 @@ struct smi_info {
26961 unsigned char slave_addr;
26962
26963 /* Counters and things for the proc filesystem. */
26964 - atomic_t stats[SI_NUM_STATS];
26965 + atomic_unchecked_t stats[SI_NUM_STATS];
26966
26967 struct task_struct *thread;
26968
26969 @@ -286,9 +286,9 @@ struct smi_info {
26970 };
26971
26972 #define smi_inc_stat(smi, stat) \
26973 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
26974 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
26975 #define smi_get_stat(smi, stat) \
26976 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
26977 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
26978
26979 #define SI_MAX_PARMS 4
26980
26981 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
26982 atomic_set(&new_smi->req_events, 0);
26983 new_smi->run_to_completion = 0;
26984 for (i = 0; i < SI_NUM_STATS; i++)
26985 - atomic_set(&new_smi->stats[i], 0);
26986 + atomic_set_unchecked(&new_smi->stats[i], 0);
26987
26988 new_smi->interrupt_disabled = 1;
26989 atomic_set(&new_smi->stop_operation, 0);
26990 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
26991 index 1aeaaba..e018570 100644
26992 --- a/drivers/char/mbcs.c
26993 +++ b/drivers/char/mbcs.c
26994 @@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
26995 return 0;
26996 }
26997
26998 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
26999 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
27000 {
27001 .part_num = MBCS_PART_NUM,
27002 .mfg_num = MBCS_MFG_NUM,
27003 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
27004 index 8fc04b4..cebdeec 100644
27005 --- a/drivers/char/mem.c
27006 +++ b/drivers/char/mem.c
27007 @@ -18,6 +18,7 @@
27008 #include <linux/raw.h>
27009 #include <linux/tty.h>
27010 #include <linux/capability.h>
27011 +#include <linux/security.h>
27012 #include <linux/ptrace.h>
27013 #include <linux/device.h>
27014 #include <linux/highmem.h>
27015 @@ -34,6 +35,10 @@
27016 # include <linux/efi.h>
27017 #endif
27018
27019 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27020 +extern const struct file_operations grsec_fops;
27021 +#endif
27022 +
27023 static inline unsigned long size_inside_page(unsigned long start,
27024 unsigned long size)
27025 {
27026 @@ -65,9 +70,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27027
27028 while (cursor < to) {
27029 if (!devmem_is_allowed(pfn)) {
27030 +#ifdef CONFIG_GRKERNSEC_KMEM
27031 + gr_handle_mem_readwrite(from, to);
27032 +#else
27033 printk(KERN_INFO
27034 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27035 current->comm, from, to);
27036 +#endif
27037 return 0;
27038 }
27039 cursor += PAGE_SIZE;
27040 @@ -75,6 +84,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27041 }
27042 return 1;
27043 }
27044 +#elif defined(CONFIG_GRKERNSEC_KMEM)
27045 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27046 +{
27047 + return 0;
27048 +}
27049 #else
27050 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27051 {
27052 @@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27053
27054 while (count > 0) {
27055 unsigned long remaining;
27056 + char *temp;
27057
27058 sz = size_inside_page(p, count);
27059
27060 @@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27061 if (!ptr)
27062 return -EFAULT;
27063
27064 - remaining = copy_to_user(buf, ptr, sz);
27065 +#ifdef CONFIG_PAX_USERCOPY
27066 + temp = kmalloc(sz, GFP_KERNEL);
27067 + if (!temp) {
27068 + unxlate_dev_mem_ptr(p, ptr);
27069 + return -ENOMEM;
27070 + }
27071 + memcpy(temp, ptr, sz);
27072 +#else
27073 + temp = ptr;
27074 +#endif
27075 +
27076 + remaining = copy_to_user(buf, temp, sz);
27077 +
27078 +#ifdef CONFIG_PAX_USERCOPY
27079 + kfree(temp);
27080 +#endif
27081 +
27082 unxlate_dev_mem_ptr(p, ptr);
27083 if (remaining)
27084 return -EFAULT;
27085 @@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27086 size_t count, loff_t *ppos)
27087 {
27088 unsigned long p = *ppos;
27089 - ssize_t low_count, read, sz;
27090 + ssize_t low_count, read, sz, err = 0;
27091 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27092 - int err = 0;
27093
27094 read = 0;
27095 if (p < (unsigned long) high_memory) {
27096 @@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27097 }
27098 #endif
27099 while (low_count > 0) {
27100 + char *temp;
27101 +
27102 sz = size_inside_page(p, low_count);
27103
27104 /*
27105 @@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27106 */
27107 kbuf = xlate_dev_kmem_ptr((char *)p);
27108
27109 - if (copy_to_user(buf, kbuf, sz))
27110 +#ifdef CONFIG_PAX_USERCOPY
27111 + temp = kmalloc(sz, GFP_KERNEL);
27112 + if (!temp)
27113 + return -ENOMEM;
27114 + memcpy(temp, kbuf, sz);
27115 +#else
27116 + temp = kbuf;
27117 +#endif
27118 +
27119 + err = copy_to_user(buf, temp, sz);
27120 +
27121 +#ifdef CONFIG_PAX_USERCOPY
27122 + kfree(temp);
27123 +#endif
27124 +
27125 + if (err)
27126 return -EFAULT;
27127 buf += sz;
27128 p += sz;
27129 @@ -866,6 +913,9 @@ static const struct memdev {
27130 #ifdef CONFIG_CRASH_DUMP
27131 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27132 #endif
27133 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27134 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27135 +#endif
27136 };
27137
27138 static int memory_open(struct inode *inode, struct file *filp)
27139 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
27140 index da3cfee..a5a6606 100644
27141 --- a/drivers/char/nvram.c
27142 +++ b/drivers/char/nvram.c
27143 @@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
27144
27145 spin_unlock_irq(&rtc_lock);
27146
27147 - if (copy_to_user(buf, contents, tmp - contents))
27148 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
27149 return -EFAULT;
27150
27151 *ppos = i;
27152 diff --git a/drivers/char/random.c b/drivers/char/random.c
27153 index c35a785..6d82202 100644
27154 --- a/drivers/char/random.c
27155 +++ b/drivers/char/random.c
27156 @@ -261,8 +261,13 @@
27157 /*
27158 * Configuration information
27159 */
27160 +#ifdef CONFIG_GRKERNSEC_RANDNET
27161 +#define INPUT_POOL_WORDS 512
27162 +#define OUTPUT_POOL_WORDS 128
27163 +#else
27164 #define INPUT_POOL_WORDS 128
27165 #define OUTPUT_POOL_WORDS 32
27166 +#endif
27167 #define SEC_XFER_SIZE 512
27168 #define EXTRACT_SIZE 10
27169
27170 @@ -300,10 +305,17 @@ static struct poolinfo {
27171 int poolwords;
27172 int tap1, tap2, tap3, tap4, tap5;
27173 } poolinfo_table[] = {
27174 +#ifdef CONFIG_GRKERNSEC_RANDNET
27175 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27176 + { 512, 411, 308, 208, 104, 1 },
27177 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27178 + { 128, 103, 76, 51, 25, 1 },
27179 +#else
27180 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27181 { 128, 103, 76, 51, 25, 1 },
27182 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27183 { 32, 26, 20, 14, 7, 1 },
27184 +#endif
27185 #if 0
27186 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27187 { 2048, 1638, 1231, 819, 411, 1 },
27188 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
27189
27190 extract_buf(r, tmp);
27191 i = min_t(int, nbytes, EXTRACT_SIZE);
27192 - if (copy_to_user(buf, tmp, i)) {
27193 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
27194 ret = -EFAULT;
27195 break;
27196 }
27197 @@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27198 #include <linux/sysctl.h>
27199
27200 static int min_read_thresh = 8, min_write_thresh;
27201 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
27202 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27203 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27204 static char sysctl_bootid[16];
27205
27206 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
27207 index 1ee8ce7..b778bef 100644
27208 --- a/drivers/char/sonypi.c
27209 +++ b/drivers/char/sonypi.c
27210 @@ -55,6 +55,7 @@
27211 #include <asm/uaccess.h>
27212 #include <asm/io.h>
27213 #include <asm/system.h>
27214 +#include <asm/local.h>
27215
27216 #include <linux/sonypi.h>
27217
27218 @@ -491,7 +492,7 @@ static struct sonypi_device {
27219 spinlock_t fifo_lock;
27220 wait_queue_head_t fifo_proc_list;
27221 struct fasync_struct *fifo_async;
27222 - int open_count;
27223 + local_t open_count;
27224 int model;
27225 struct input_dev *input_jog_dev;
27226 struct input_dev *input_key_dev;
27227 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
27228 static int sonypi_misc_release(struct inode *inode, struct file *file)
27229 {
27230 mutex_lock(&sonypi_device.lock);
27231 - sonypi_device.open_count--;
27232 + local_dec(&sonypi_device.open_count);
27233 mutex_unlock(&sonypi_device.lock);
27234 return 0;
27235 }
27236 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
27237 {
27238 mutex_lock(&sonypi_device.lock);
27239 /* Flush input queue on first open */
27240 - if (!sonypi_device.open_count)
27241 + if (!local_read(&sonypi_device.open_count))
27242 kfifo_reset(&sonypi_device.fifo);
27243 - sonypi_device.open_count++;
27244 + local_inc(&sonypi_device.open_count);
27245 mutex_unlock(&sonypi_device.lock);
27246
27247 return 0;
27248 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
27249 index 9ca5c02..7ce352c 100644
27250 --- a/drivers/char/tpm/tpm.c
27251 +++ b/drivers/char/tpm/tpm.c
27252 @@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
27253 chip->vendor.req_complete_val)
27254 goto out_recv;
27255
27256 - if ((status == chip->vendor.req_canceled)) {
27257 + if (status == chip->vendor.req_canceled) {
27258 dev_err(chip->dev, "Operation Canceled\n");
27259 rc = -ECANCELED;
27260 goto out;
27261 @@ -862,6 +862,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
27262
27263 struct tpm_chip *chip = dev_get_drvdata(dev);
27264
27265 + pax_track_stack();
27266 +
27267 tpm_cmd.header.in = tpm_readpubek_header;
27268 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
27269 "attempting to read the PUBEK");
27270 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
27271 index 0636520..169c1d0 100644
27272 --- a/drivers/char/tpm/tpm_bios.c
27273 +++ b/drivers/char/tpm/tpm_bios.c
27274 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
27275 event = addr;
27276
27277 if ((event->event_type == 0 && event->event_size == 0) ||
27278 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27279 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27280 return NULL;
27281
27282 return addr;
27283 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
27284 return NULL;
27285
27286 if ((event->event_type == 0 && event->event_size == 0) ||
27287 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27288 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27289 return NULL;
27290
27291 (*pos)++;
27292 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
27293 int i;
27294
27295 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27296 - seq_putc(m, data[i]);
27297 + if (!seq_putc(m, data[i]))
27298 + return -EFAULT;
27299
27300 return 0;
27301 }
27302 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
27303 log->bios_event_log_end = log->bios_event_log + len;
27304
27305 virt = acpi_os_map_memory(start, len);
27306 + if (!virt) {
27307 + kfree(log->bios_event_log);
27308 + log->bios_event_log = NULL;
27309 + return -EFAULT;
27310 + }
27311
27312 - memcpy(log->bios_event_log, virt, len);
27313 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
27314
27315 acpi_os_unmap_memory(virt, len);
27316 return 0;
27317 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
27318 index fb68b12..0f6c6ca 100644
27319 --- a/drivers/char/virtio_console.c
27320 +++ b/drivers/char/virtio_console.c
27321 @@ -555,7 +555,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
27322 if (to_user) {
27323 ssize_t ret;
27324
27325 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
27326 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
27327 if (ret)
27328 return -EFAULT;
27329 } else {
27330 @@ -654,7 +654,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
27331 if (!port_has_data(port) && !port->host_connected)
27332 return 0;
27333
27334 - return fill_readbuf(port, ubuf, count, true);
27335 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
27336 }
27337
27338 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
27339 diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
27340 index a84250a..68c725e 100644
27341 --- a/drivers/crypto/hifn_795x.c
27342 +++ b/drivers/crypto/hifn_795x.c
27343 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
27344 0xCA, 0x34, 0x2B, 0x2E};
27345 struct scatterlist sg;
27346
27347 + pax_track_stack();
27348 +
27349 memset(src, 0, sizeof(src));
27350 memset(ctx.key, 0, sizeof(ctx.key));
27351
27352 diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
27353 index db33d30..7823369 100644
27354 --- a/drivers/crypto/padlock-aes.c
27355 +++ b/drivers/crypto/padlock-aes.c
27356 @@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
27357 struct crypto_aes_ctx gen_aes;
27358 int cpu;
27359
27360 + pax_track_stack();
27361 +
27362 if (key_len % 8) {
27363 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
27364 return -EINVAL;
27365 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
27366 index 9a8bebc..b1e4989 100644
27367 --- a/drivers/edac/amd64_edac.c
27368 +++ b/drivers/edac/amd64_edac.c
27369 @@ -2670,7 +2670,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
27370 * PCI core identifies what devices are on a system during boot, and then
27371 * inquiry this table to see if this driver is for a given device found.
27372 */
27373 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
27374 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
27375 {
27376 .vendor = PCI_VENDOR_ID_AMD,
27377 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
27378 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
27379 index e47e73b..348e0bd 100644
27380 --- a/drivers/edac/amd76x_edac.c
27381 +++ b/drivers/edac/amd76x_edac.c
27382 @@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
27383 edac_mc_free(mci);
27384 }
27385
27386 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
27387 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
27388 {
27389 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27390 AMD762},
27391 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
27392 index 1af531a..3a8ff27 100644
27393 --- a/drivers/edac/e752x_edac.c
27394 +++ b/drivers/edac/e752x_edac.c
27395 @@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
27396 edac_mc_free(mci);
27397 }
27398
27399 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
27400 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
27401 {
27402 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27403 E7520},
27404 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
27405 index 6ffb6d2..383d8d7 100644
27406 --- a/drivers/edac/e7xxx_edac.c
27407 +++ b/drivers/edac/e7xxx_edac.c
27408 @@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
27409 edac_mc_free(mci);
27410 }
27411
27412 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
27413 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
27414 {
27415 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27416 E7205},
27417 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
27418 index 495198a..ac08c85 100644
27419 --- a/drivers/edac/edac_pci_sysfs.c
27420 +++ b/drivers/edac/edac_pci_sysfs.c
27421 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
27422 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27423 static int edac_pci_poll_msec = 1000; /* one second workq period */
27424
27425 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
27426 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27427 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27428 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27429
27430 static struct kobject *edac_pci_top_main_kobj;
27431 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27432 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27433 edac_printk(KERN_CRIT, EDAC_PCI,
27434 "Signaled System Error on %s\n",
27435 pci_name(dev));
27436 - atomic_inc(&pci_nonparity_count);
27437 + atomic_inc_unchecked(&pci_nonparity_count);
27438 }
27439
27440 if (status & (PCI_STATUS_PARITY)) {
27441 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27442 "Master Data Parity Error on %s\n",
27443 pci_name(dev));
27444
27445 - atomic_inc(&pci_parity_count);
27446 + atomic_inc_unchecked(&pci_parity_count);
27447 }
27448
27449 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27450 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27451 "Detected Parity Error on %s\n",
27452 pci_name(dev));
27453
27454 - atomic_inc(&pci_parity_count);
27455 + atomic_inc_unchecked(&pci_parity_count);
27456 }
27457 }
27458
27459 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27460 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27461 "Signaled System Error on %s\n",
27462 pci_name(dev));
27463 - atomic_inc(&pci_nonparity_count);
27464 + atomic_inc_unchecked(&pci_nonparity_count);
27465 }
27466
27467 if (status & (PCI_STATUS_PARITY)) {
27468 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27469 "Master Data Parity Error on "
27470 "%s\n", pci_name(dev));
27471
27472 - atomic_inc(&pci_parity_count);
27473 + atomic_inc_unchecked(&pci_parity_count);
27474 }
27475
27476 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27477 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27478 "Detected Parity Error on %s\n",
27479 pci_name(dev));
27480
27481 - atomic_inc(&pci_parity_count);
27482 + atomic_inc_unchecked(&pci_parity_count);
27483 }
27484 }
27485 }
27486 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
27487 if (!check_pci_errors)
27488 return;
27489
27490 - before_count = atomic_read(&pci_parity_count);
27491 + before_count = atomic_read_unchecked(&pci_parity_count);
27492
27493 /* scan all PCI devices looking for a Parity Error on devices and
27494 * bridges.
27495 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
27496 /* Only if operator has selected panic on PCI Error */
27497 if (edac_pci_get_panic_on_pe()) {
27498 /* If the count is different 'after' from 'before' */
27499 - if (before_count != atomic_read(&pci_parity_count))
27500 + if (before_count != atomic_read_unchecked(&pci_parity_count))
27501 panic("EDAC: PCI Parity Error");
27502 }
27503 }
27504 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
27505 index c0510b3..6e2a954 100644
27506 --- a/drivers/edac/i3000_edac.c
27507 +++ b/drivers/edac/i3000_edac.c
27508 @@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
27509 edac_mc_free(mci);
27510 }
27511
27512 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
27513 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
27514 {
27515 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27516 I3000},
27517 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
27518 index aa08497..7e6822a 100644
27519 --- a/drivers/edac/i3200_edac.c
27520 +++ b/drivers/edac/i3200_edac.c
27521 @@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
27522 edac_mc_free(mci);
27523 }
27524
27525 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
27526 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
27527 {
27528 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27529 I3200},
27530 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
27531 index 4dc3ac2..67d05a6 100644
27532 --- a/drivers/edac/i5000_edac.c
27533 +++ b/drivers/edac/i5000_edac.c
27534 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
27535 *
27536 * The "E500P" device is the first device supported.
27537 */
27538 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
27539 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
27540 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
27541 .driver_data = I5000P},
27542
27543 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
27544 index bcbdeec..9886d16 100644
27545 --- a/drivers/edac/i5100_edac.c
27546 +++ b/drivers/edac/i5100_edac.c
27547 @@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
27548 edac_mc_free(mci);
27549 }
27550
27551 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
27552 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
27553 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
27554 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
27555 { 0, }
27556 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
27557 index 74d6ec34..baff517 100644
27558 --- a/drivers/edac/i5400_edac.c
27559 +++ b/drivers/edac/i5400_edac.c
27560 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
27561 *
27562 * The "E500P" device is the first device supported.
27563 */
27564 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
27565 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
27566 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
27567 {0,} /* 0 terminated list. */
27568 };
27569 diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
27570 index a76fe83..15479e6 100644
27571 --- a/drivers/edac/i7300_edac.c
27572 +++ b/drivers/edac/i7300_edac.c
27573 @@ -1191,7 +1191,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
27574 *
27575 * Has only 8086:360c PCI ID
27576 */
27577 -static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
27578 +static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
27579 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
27580 {0,} /* 0 terminated list. */
27581 };
27582 diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
27583 index f6cf448..3f612e9 100644
27584 --- a/drivers/edac/i7core_edac.c
27585 +++ b/drivers/edac/i7core_edac.c
27586 @@ -359,7 +359,7 @@ static const struct pci_id_table pci_dev_table[] = {
27587 /*
27588 * pci_device_id table for which devices we are looking for
27589 */
27590 -static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
27591 +static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
27592 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
27593 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
27594 {0,} /* 0 terminated list. */
27595 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
27596 index 4329d39..f3022ef 100644
27597 --- a/drivers/edac/i82443bxgx_edac.c
27598 +++ b/drivers/edac/i82443bxgx_edac.c
27599 @@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
27600
27601 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
27602
27603 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
27604 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
27605 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
27606 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
27607 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
27608 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
27609 index 931a057..fd28340 100644
27610 --- a/drivers/edac/i82860_edac.c
27611 +++ b/drivers/edac/i82860_edac.c
27612 @@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
27613 edac_mc_free(mci);
27614 }
27615
27616 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
27617 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
27618 {
27619 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27620 I82860},
27621 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
27622 index 33864c6..01edc61 100644
27623 --- a/drivers/edac/i82875p_edac.c
27624 +++ b/drivers/edac/i82875p_edac.c
27625 @@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
27626 edac_mc_free(mci);
27627 }
27628
27629 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
27630 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
27631 {
27632 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27633 I82875P},
27634 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
27635 index a5da732..983363b 100644
27636 --- a/drivers/edac/i82975x_edac.c
27637 +++ b/drivers/edac/i82975x_edac.c
27638 @@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
27639 edac_mc_free(mci);
27640 }
27641
27642 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
27643 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
27644 {
27645 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27646 I82975X
27647 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
27648 index 795a320..3bbc3d3 100644
27649 --- a/drivers/edac/mce_amd.h
27650 +++ b/drivers/edac/mce_amd.h
27651 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
27652 bool (*dc_mce)(u16, u8);
27653 bool (*ic_mce)(u16, u8);
27654 bool (*nb_mce)(u16, u8);
27655 -};
27656 +} __no_const;
27657
27658 void amd_report_gart_errors(bool);
27659 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
27660 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
27661 index b153674..ad2ba9b 100644
27662 --- a/drivers/edac/r82600_edac.c
27663 +++ b/drivers/edac/r82600_edac.c
27664 @@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
27665 edac_mc_free(mci);
27666 }
27667
27668 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
27669 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
27670 {
27671 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
27672 },
27673 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
27674 index b6f47de..c5acf3a 100644
27675 --- a/drivers/edac/x38_edac.c
27676 +++ b/drivers/edac/x38_edac.c
27677 @@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
27678 edac_mc_free(mci);
27679 }
27680
27681 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
27682 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
27683 {
27684 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27685 X38},
27686 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
27687 index 85661b0..c784559a 100644
27688 --- a/drivers/firewire/core-card.c
27689 +++ b/drivers/firewire/core-card.c
27690 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
27691
27692 void fw_core_remove_card(struct fw_card *card)
27693 {
27694 - struct fw_card_driver dummy_driver = dummy_driver_template;
27695 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
27696
27697 card->driver->update_phy_reg(card, 4,
27698 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
27699 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
27700 index 4799393..37bd3ab 100644
27701 --- a/drivers/firewire/core-cdev.c
27702 +++ b/drivers/firewire/core-cdev.c
27703 @@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
27704 int ret;
27705
27706 if ((request->channels == 0 && request->bandwidth == 0) ||
27707 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27708 - request->bandwidth < 0)
27709 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27710 return -EINVAL;
27711
27712 r = kmalloc(sizeof(*r), GFP_KERNEL);
27713 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
27714 index 334b82a..ea5261d 100644
27715 --- a/drivers/firewire/core-transaction.c
27716 +++ b/drivers/firewire/core-transaction.c
27717 @@ -37,6 +37,7 @@
27718 #include <linux/timer.h>
27719 #include <linux/types.h>
27720 #include <linux/workqueue.h>
27721 +#include <linux/sched.h>
27722
27723 #include <asm/byteorder.h>
27724
27725 @@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
27726 struct transaction_callback_data d;
27727 struct fw_transaction t;
27728
27729 + pax_track_stack();
27730 +
27731 init_timer_on_stack(&t.split_timeout_timer);
27732 init_completion(&d.done);
27733 d.payload = payload;
27734 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
27735 index b45be57..5fad18b 100644
27736 --- a/drivers/firewire/core.h
27737 +++ b/drivers/firewire/core.h
27738 @@ -101,6 +101,7 @@ struct fw_card_driver {
27739
27740 int (*stop_iso)(struct fw_iso_context *ctx);
27741 };
27742 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
27743
27744 void fw_card_initialize(struct fw_card *card,
27745 const struct fw_card_driver *driver, struct device *device);
27746 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
27747 index bcb1126..2cc2121 100644
27748 --- a/drivers/firmware/dmi_scan.c
27749 +++ b/drivers/firmware/dmi_scan.c
27750 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
27751 }
27752 }
27753 else {
27754 - /*
27755 - * no iounmap() for that ioremap(); it would be a no-op, but
27756 - * it's so early in setup that sucker gets confused into doing
27757 - * what it shouldn't if we actually call it.
27758 - */
27759 p = dmi_ioremap(0xF0000, 0x10000);
27760 if (p == NULL)
27761 goto error;
27762 @@ -725,7 +720,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
27763 if (buf == NULL)
27764 return -1;
27765
27766 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
27767 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
27768
27769 iounmap(buf);
27770 return 0;
27771 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
27772 index 98723cb..10ca85b 100644
27773 --- a/drivers/gpio/gpio-vr41xx.c
27774 +++ b/drivers/gpio/gpio-vr41xx.c
27775 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27776 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27777 maskl, pendl, maskh, pendh);
27778
27779 - atomic_inc(&irq_err_count);
27780 + atomic_inc_unchecked(&irq_err_count);
27781
27782 return -EINVAL;
27783 }
27784 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
27785 index 2410c40..2d03563 100644
27786 --- a/drivers/gpu/drm/drm_crtc.c
27787 +++ b/drivers/gpu/drm/drm_crtc.c
27788 @@ -1374,7 +1374,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27789 */
27790 if ((out_resp->count_modes >= mode_count) && mode_count) {
27791 copied = 0;
27792 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
27793 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
27794 list_for_each_entry(mode, &connector->modes, head) {
27795 drm_crtc_convert_to_umode(&u_mode, mode);
27796 if (copy_to_user(mode_ptr + copied,
27797 @@ -1389,8 +1389,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27798
27799 if ((out_resp->count_props >= props_count) && props_count) {
27800 copied = 0;
27801 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
27802 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
27803 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
27804 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
27805 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
27806 if (connector->property_ids[i] != 0) {
27807 if (put_user(connector->property_ids[i],
27808 @@ -1412,7 +1412,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27809
27810 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
27811 copied = 0;
27812 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
27813 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
27814 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
27815 if (connector->encoder_ids[i] != 0) {
27816 if (put_user(connector->encoder_ids[i],
27817 @@ -1571,7 +1571,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
27818 }
27819
27820 for (i = 0; i < crtc_req->count_connectors; i++) {
27821 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
27822 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
27823 if (get_user(out_id, &set_connectors_ptr[i])) {
27824 ret = -EFAULT;
27825 goto out;
27826 @@ -1852,7 +1852,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
27827 fb = obj_to_fb(obj);
27828
27829 num_clips = r->num_clips;
27830 - clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
27831 + clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
27832
27833 if (!num_clips != !clips_ptr) {
27834 ret = -EINVAL;
27835 @@ -2276,7 +2276,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27836 out_resp->flags = property->flags;
27837
27838 if ((out_resp->count_values >= value_count) && value_count) {
27839 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
27840 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
27841 for (i = 0; i < value_count; i++) {
27842 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
27843 ret = -EFAULT;
27844 @@ -2289,7 +2289,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27845 if (property->flags & DRM_MODE_PROP_ENUM) {
27846 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
27847 copied = 0;
27848 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
27849 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
27850 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
27851
27852 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
27853 @@ -2312,7 +2312,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27854 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
27855 copied = 0;
27856 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
27857 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
27858 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
27859
27860 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
27861 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
27862 @@ -2373,7 +2373,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27863 struct drm_mode_get_blob *out_resp = data;
27864 struct drm_property_blob *blob;
27865 int ret = 0;
27866 - void *blob_ptr;
27867 + void __user *blob_ptr;
27868
27869 if (!drm_core_check_feature(dev, DRIVER_MODESET))
27870 return -EINVAL;
27871 @@ -2387,7 +2387,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27872 blob = obj_to_blob(obj);
27873
27874 if (out_resp->length == blob->length) {
27875 - blob_ptr = (void *)(unsigned long)out_resp->data;
27876 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
27877 if (copy_to_user(blob_ptr, blob->data, blob->length)){
27878 ret = -EFAULT;
27879 goto done;
27880 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
27881 index f88a9b2..8f4078f 100644
27882 --- a/drivers/gpu/drm/drm_crtc_helper.c
27883 +++ b/drivers/gpu/drm/drm_crtc_helper.c
27884 @@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
27885 struct drm_crtc *tmp;
27886 int crtc_mask = 1;
27887
27888 - WARN(!crtc, "checking null crtc?\n");
27889 + BUG_ON(!crtc);
27890
27891 dev = crtc->dev;
27892
27893 @@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
27894 struct drm_encoder *encoder;
27895 bool ret = true;
27896
27897 + pax_track_stack();
27898 +
27899 crtc->enabled = drm_helper_crtc_in_use(crtc);
27900 if (!crtc->enabled)
27901 return true;
27902 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
27903 index 93a112d..c8b065d 100644
27904 --- a/drivers/gpu/drm/drm_drv.c
27905 +++ b/drivers/gpu/drm/drm_drv.c
27906 @@ -307,7 +307,7 @@ module_exit(drm_core_exit);
27907 /**
27908 * Copy and IOCTL return string to user space
27909 */
27910 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
27911 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
27912 {
27913 int len;
27914
27915 @@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
27916
27917 dev = file_priv->minor->dev;
27918 atomic_inc(&dev->ioctl_count);
27919 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
27920 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
27921 ++file_priv->ioctl_count;
27922
27923 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
27924 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
27925 index 2ec7d48..be14bb1 100644
27926 --- a/drivers/gpu/drm/drm_fops.c
27927 +++ b/drivers/gpu/drm/drm_fops.c
27928 @@ -70,7 +70,7 @@ static int drm_setup(struct drm_device * dev)
27929 }
27930
27931 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
27932 - atomic_set(&dev->counts[i], 0);
27933 + atomic_set_unchecked(&dev->counts[i], 0);
27934
27935 dev->sigdata.lock = NULL;
27936
27937 @@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct file *filp)
27938
27939 retcode = drm_open_helper(inode, filp, dev);
27940 if (!retcode) {
27941 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
27942 - if (!dev->open_count++)
27943 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
27944 + if (local_inc_return(&dev->open_count) == 1)
27945 retcode = drm_setup(dev);
27946 }
27947 if (!retcode) {
27948 @@ -472,7 +472,7 @@ int drm_release(struct inode *inode, struct file *filp)
27949
27950 mutex_lock(&drm_global_mutex);
27951
27952 - DRM_DEBUG("open_count = %d\n", dev->open_count);
27953 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
27954
27955 if (dev->driver->preclose)
27956 dev->driver->preclose(dev, file_priv);
27957 @@ -484,7 +484,7 @@ int drm_release(struct inode *inode, struct file *filp)
27958 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
27959 task_pid_nr(current),
27960 (long)old_encode_dev(file_priv->minor->device),
27961 - dev->open_count);
27962 + local_read(&dev->open_count));
27963
27964 /* if the master has gone away we can't do anything with the lock */
27965 if (file_priv->minor->master)
27966 @@ -565,8 +565,8 @@ int drm_release(struct inode *inode, struct file *filp)
27967 * End inline drm_release
27968 */
27969
27970 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
27971 - if (!--dev->open_count) {
27972 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
27973 + if (local_dec_and_test(&dev->open_count)) {
27974 if (atomic_read(&dev->ioctl_count)) {
27975 DRM_ERROR("Device busy: %d\n",
27976 atomic_read(&dev->ioctl_count));
27977 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
27978 index c87dc96..326055d 100644
27979 --- a/drivers/gpu/drm/drm_global.c
27980 +++ b/drivers/gpu/drm/drm_global.c
27981 @@ -36,7 +36,7 @@
27982 struct drm_global_item {
27983 struct mutex mutex;
27984 void *object;
27985 - int refcount;
27986 + atomic_t refcount;
27987 };
27988
27989 static struct drm_global_item glob[DRM_GLOBAL_NUM];
27990 @@ -49,7 +49,7 @@ void drm_global_init(void)
27991 struct drm_global_item *item = &glob[i];
27992 mutex_init(&item->mutex);
27993 item->object = NULL;
27994 - item->refcount = 0;
27995 + atomic_set(&item->refcount, 0);
27996 }
27997 }
27998
27999 @@ -59,7 +59,7 @@ void drm_global_release(void)
28000 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
28001 struct drm_global_item *item = &glob[i];
28002 BUG_ON(item->object != NULL);
28003 - BUG_ON(item->refcount != 0);
28004 + BUG_ON(atomic_read(&item->refcount) != 0);
28005 }
28006 }
28007
28008 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
28009 void *object;
28010
28011 mutex_lock(&item->mutex);
28012 - if (item->refcount == 0) {
28013 + if (atomic_read(&item->refcount) == 0) {
28014 item->object = kzalloc(ref->size, GFP_KERNEL);
28015 if (unlikely(item->object == NULL)) {
28016 ret = -ENOMEM;
28017 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
28018 goto out_err;
28019
28020 }
28021 - ++item->refcount;
28022 + atomic_inc(&item->refcount);
28023 ref->object = item->object;
28024 object = item->object;
28025 mutex_unlock(&item->mutex);
28026 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
28027 struct drm_global_item *item = &glob[ref->global_type];
28028
28029 mutex_lock(&item->mutex);
28030 - BUG_ON(item->refcount == 0);
28031 + BUG_ON(atomic_read(&item->refcount) == 0);
28032 BUG_ON(ref->object != item->object);
28033 - if (--item->refcount == 0) {
28034 + if (atomic_dec_and_test(&item->refcount)) {
28035 ref->release(ref);
28036 item->object = NULL;
28037 }
28038 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
28039 index ab1162d..42587b2 100644
28040 --- a/drivers/gpu/drm/drm_info.c
28041 +++ b/drivers/gpu/drm/drm_info.c
28042 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
28043 struct drm_local_map *map;
28044 struct drm_map_list *r_list;
28045
28046 - /* Hardcoded from _DRM_FRAME_BUFFER,
28047 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28048 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28049 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28050 + static const char * const types[] = {
28051 + [_DRM_FRAME_BUFFER] = "FB",
28052 + [_DRM_REGISTERS] = "REG",
28053 + [_DRM_SHM] = "SHM",
28054 + [_DRM_AGP] = "AGP",
28055 + [_DRM_SCATTER_GATHER] = "SG",
28056 + [_DRM_CONSISTENT] = "PCI",
28057 + [_DRM_GEM] = "GEM" };
28058 const char *type;
28059 int i;
28060
28061 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
28062 map = r_list->map;
28063 if (!map)
28064 continue;
28065 - if (map->type < 0 || map->type > 5)
28066 + if (map->type >= ARRAY_SIZE(types))
28067 type = "??";
28068 else
28069 type = types[map->type];
28070 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
28071 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
28072 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28073 vma->vm_flags & VM_IO ? 'i' : '-',
28074 +#ifdef CONFIG_GRKERNSEC_HIDESYM
28075 + 0);
28076 +#else
28077 vma->vm_pgoff);
28078 +#endif
28079
28080 #if defined(__i386__)
28081 pgprot = pgprot_val(vma->vm_page_prot);
28082 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
28083 index 4a058c7..b42cd92 100644
28084 --- a/drivers/gpu/drm/drm_ioc32.c
28085 +++ b/drivers/gpu/drm/drm_ioc32.c
28086 @@ -455,7 +455,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
28087 request = compat_alloc_user_space(nbytes);
28088 if (!access_ok(VERIFY_WRITE, request, nbytes))
28089 return -EFAULT;
28090 - list = (struct drm_buf_desc *) (request + 1);
28091 + list = (struct drm_buf_desc __user *) (request + 1);
28092
28093 if (__put_user(count, &request->count)
28094 || __put_user(list, &request->list))
28095 @@ -516,7 +516,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
28096 request = compat_alloc_user_space(nbytes);
28097 if (!access_ok(VERIFY_WRITE, request, nbytes))
28098 return -EFAULT;
28099 - list = (struct drm_buf_pub *) (request + 1);
28100 + list = (struct drm_buf_pub __user *) (request + 1);
28101
28102 if (__put_user(count, &request->count)
28103 || __put_user(list, &request->list))
28104 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
28105 index 904d7e9..ab88581 100644
28106 --- a/drivers/gpu/drm/drm_ioctl.c
28107 +++ b/drivers/gpu/drm/drm_ioctl.c
28108 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
28109 stats->data[i].value =
28110 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28111 else
28112 - stats->data[i].value = atomic_read(&dev->counts[i]);
28113 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28114 stats->data[i].type = dev->types[i];
28115 }
28116
28117 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
28118 index 632ae24..244cf4a 100644
28119 --- a/drivers/gpu/drm/drm_lock.c
28120 +++ b/drivers/gpu/drm/drm_lock.c
28121 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28122 if (drm_lock_take(&master->lock, lock->context)) {
28123 master->lock.file_priv = file_priv;
28124 master->lock.lock_time = jiffies;
28125 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28126 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28127 break; /* Got lock */
28128 }
28129
28130 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28131 return -EINVAL;
28132 }
28133
28134 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28135 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28136
28137 if (drm_lock_free(&master->lock, lock->context)) {
28138 /* FIXME: Should really bail out here. */
28139 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
28140 index 8f371e8..9f85d52 100644
28141 --- a/drivers/gpu/drm/i810/i810_dma.c
28142 +++ b/drivers/gpu/drm/i810/i810_dma.c
28143 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
28144 dma->buflist[vertex->idx],
28145 vertex->discard, vertex->used);
28146
28147 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28148 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28149 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28150 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28151 sarea_priv->last_enqueue = dev_priv->counter - 1;
28152 sarea_priv->last_dispatch = (int)hw_status[5];
28153
28154 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
28155 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28156 mc->last_render);
28157
28158 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28159 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28160 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28161 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28162 sarea_priv->last_enqueue = dev_priv->counter - 1;
28163 sarea_priv->last_dispatch = (int)hw_status[5];
28164
28165 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
28166 index c9339f4..f5e1b9d 100644
28167 --- a/drivers/gpu/drm/i810/i810_drv.h
28168 +++ b/drivers/gpu/drm/i810/i810_drv.h
28169 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28170 int page_flipping;
28171
28172 wait_queue_head_t irq_queue;
28173 - atomic_t irq_received;
28174 - atomic_t irq_emitted;
28175 + atomic_unchecked_t irq_received;
28176 + atomic_unchecked_t irq_emitted;
28177
28178 int front_offset;
28179 } drm_i810_private_t;
28180 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
28181 index 3c395a5..02889c2 100644
28182 --- a/drivers/gpu/drm/i915/i915_debugfs.c
28183 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
28184 @@ -497,7 +497,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
28185 I915_READ(GTIMR));
28186 }
28187 seq_printf(m, "Interrupts received: %d\n",
28188 - atomic_read(&dev_priv->irq_received));
28189 + atomic_read_unchecked(&dev_priv->irq_received));
28190 for (i = 0; i < I915_NUM_RINGS; i++) {
28191 if (IS_GEN6(dev) || IS_GEN7(dev)) {
28192 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
28193 @@ -1185,7 +1185,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
28194 return ret;
28195
28196 if (opregion->header)
28197 - seq_write(m, opregion->header, OPREGION_SIZE);
28198 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
28199
28200 mutex_unlock(&dev->struct_mutex);
28201
28202 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
28203 index c72b590..aa86f0a 100644
28204 --- a/drivers/gpu/drm/i915/i915_dma.c
28205 +++ b/drivers/gpu/drm/i915/i915_dma.c
28206 @@ -1171,7 +1171,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
28207 bool can_switch;
28208
28209 spin_lock(&dev->count_lock);
28210 - can_switch = (dev->open_count == 0);
28211 + can_switch = (local_read(&dev->open_count) == 0);
28212 spin_unlock(&dev->count_lock);
28213 return can_switch;
28214 }
28215 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
28216 index 1a2a2d1..f280182 100644
28217 --- a/drivers/gpu/drm/i915/i915_drv.h
28218 +++ b/drivers/gpu/drm/i915/i915_drv.h
28219 @@ -222,7 +222,7 @@ struct drm_i915_display_funcs {
28220 /* render clock increase/decrease */
28221 /* display clock increase/decrease */
28222 /* pll clock increase/decrease */
28223 -};
28224 +} __no_const;
28225
28226 struct intel_device_info {
28227 u8 gen;
28228 @@ -305,7 +305,7 @@ typedef struct drm_i915_private {
28229 int current_page;
28230 int page_flipping;
28231
28232 - atomic_t irq_received;
28233 + atomic_unchecked_t irq_received;
28234
28235 /* protects the irq masks */
28236 spinlock_t irq_lock;
28237 @@ -883,7 +883,7 @@ struct drm_i915_gem_object {
28238 * will be page flipped away on the next vblank. When it
28239 * reaches 0, dev_priv->pending_flip_queue will be woken up.
28240 */
28241 - atomic_t pending_flip;
28242 + atomic_unchecked_t pending_flip;
28243 };
28244
28245 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
28246 @@ -1263,7 +1263,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
28247 extern void intel_teardown_gmbus(struct drm_device *dev);
28248 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
28249 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
28250 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28251 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28252 {
28253 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
28254 }
28255 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28256 index 4934cf8..1da9c84 100644
28257 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28258 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28259 @@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
28260 i915_gem_clflush_object(obj);
28261
28262 if (obj->base.pending_write_domain)
28263 - cd->flips |= atomic_read(&obj->pending_flip);
28264 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
28265
28266 /* The actual obj->write_domain will be updated with
28267 * pending_write_domain after we emit the accumulated flush for all
28268 @@ -864,9 +864,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
28269
28270 static int
28271 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
28272 - int count)
28273 + unsigned int count)
28274 {
28275 - int i;
28276 + unsigned int i;
28277
28278 for (i = 0; i < count; i++) {
28279 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
28280 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
28281 index 73248d0..f7bac29 100644
28282 --- a/drivers/gpu/drm/i915/i915_irq.c
28283 +++ b/drivers/gpu/drm/i915/i915_irq.c
28284 @@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
28285 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
28286 struct drm_i915_master_private *master_priv;
28287
28288 - atomic_inc(&dev_priv->irq_received);
28289 + atomic_inc_unchecked(&dev_priv->irq_received);
28290
28291 /* disable master interrupt before clearing iir */
28292 de_ier = I915_READ(DEIER);
28293 @@ -565,7 +565,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
28294 struct drm_i915_master_private *master_priv;
28295 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
28296
28297 - atomic_inc(&dev_priv->irq_received);
28298 + atomic_inc_unchecked(&dev_priv->irq_received);
28299
28300 if (IS_GEN6(dev))
28301 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
28302 @@ -1229,7 +1229,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
28303 int ret = IRQ_NONE, pipe;
28304 bool blc_event = false;
28305
28306 - atomic_inc(&dev_priv->irq_received);
28307 + atomic_inc_unchecked(&dev_priv->irq_received);
28308
28309 iir = I915_READ(IIR);
28310
28311 @@ -1741,7 +1741,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
28312 {
28313 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28314
28315 - atomic_set(&dev_priv->irq_received, 0);
28316 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28317
28318 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28319 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28320 @@ -1905,7 +1905,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
28321 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28322 int pipe;
28323
28324 - atomic_set(&dev_priv->irq_received, 0);
28325 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28326
28327 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28328 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28329 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
28330 index 07e7cf3..c75f312 100644
28331 --- a/drivers/gpu/drm/i915/intel_display.c
28332 +++ b/drivers/gpu/drm/i915/intel_display.c
28333 @@ -2205,7 +2205,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
28334
28335 wait_event(dev_priv->pending_flip_queue,
28336 atomic_read(&dev_priv->mm.wedged) ||
28337 - atomic_read(&obj->pending_flip) == 0);
28338 + atomic_read_unchecked(&obj->pending_flip) == 0);
28339
28340 /* Big Hammer, we also need to ensure that any pending
28341 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
28342 @@ -2826,7 +2826,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
28343 obj = to_intel_framebuffer(crtc->fb)->obj;
28344 dev_priv = crtc->dev->dev_private;
28345 wait_event(dev_priv->pending_flip_queue,
28346 - atomic_read(&obj->pending_flip) == 0);
28347 + atomic_read_unchecked(&obj->pending_flip) == 0);
28348 }
28349
28350 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
28351 @@ -6676,7 +6676,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
28352
28353 atomic_clear_mask(1 << intel_crtc->plane,
28354 &obj->pending_flip.counter);
28355 - if (atomic_read(&obj->pending_flip) == 0)
28356 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
28357 wake_up(&dev_priv->pending_flip_queue);
28358
28359 schedule_work(&work->work);
28360 @@ -6965,7 +6965,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28361 /* Block clients from rendering to the new back buffer until
28362 * the flip occurs and the object is no longer visible.
28363 */
28364 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28365 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28366
28367 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
28368 if (ret)
28369 @@ -6979,7 +6979,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28370 return 0;
28371
28372 cleanup_pending:
28373 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28374 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28375 cleanup_objs:
28376 drm_gem_object_unreference(&work->old_fb_obj->base);
28377 drm_gem_object_unreference(&obj->base);
28378 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
28379 index 54558a0..2d97005 100644
28380 --- a/drivers/gpu/drm/mga/mga_drv.h
28381 +++ b/drivers/gpu/drm/mga/mga_drv.h
28382 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28383 u32 clear_cmd;
28384 u32 maccess;
28385
28386 - atomic_t vbl_received; /**< Number of vblanks received. */
28387 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28388 wait_queue_head_t fence_queue;
28389 - atomic_t last_fence_retired;
28390 + atomic_unchecked_t last_fence_retired;
28391 u32 next_fence_to_post;
28392
28393 unsigned int fb_cpp;
28394 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
28395 index 2581202..f230a8d9 100644
28396 --- a/drivers/gpu/drm/mga/mga_irq.c
28397 +++ b/drivers/gpu/drm/mga/mga_irq.c
28398 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
28399 if (crtc != 0)
28400 return 0;
28401
28402 - return atomic_read(&dev_priv->vbl_received);
28403 + return atomic_read_unchecked(&dev_priv->vbl_received);
28404 }
28405
28406
28407 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28408 /* VBLANK interrupt */
28409 if (status & MGA_VLINEPEN) {
28410 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28411 - atomic_inc(&dev_priv->vbl_received);
28412 + atomic_inc_unchecked(&dev_priv->vbl_received);
28413 drm_handle_vblank(dev, 0);
28414 handled = 1;
28415 }
28416 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28417 if ((prim_start & ~0x03) != (prim_end & ~0x03))
28418 MGA_WRITE(MGA_PRIMEND, prim_end);
28419
28420 - atomic_inc(&dev_priv->last_fence_retired);
28421 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
28422 DRM_WAKEUP(&dev_priv->fence_queue);
28423 handled = 1;
28424 }
28425 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
28426 * using fences.
28427 */
28428 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28429 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28430 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28431 - *sequence) <= (1 << 23)));
28432
28433 *sequence = cur_fence;
28434 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
28435 index b311fab..dc11d6a 100644
28436 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
28437 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
28438 @@ -201,7 +201,7 @@ struct methods {
28439 const char desc[8];
28440 void (*loadbios)(struct drm_device *, uint8_t *);
28441 const bool rw;
28442 -};
28443 +} __do_const;
28444
28445 static struct methods shadow_methods[] = {
28446 { "PRAMIN", load_vbios_pramin, true },
28447 @@ -5489,7 +5489,7 @@ parse_bit_displayport_tbl_entry(struct drm_device *dev, struct nvbios *bios,
28448 struct bit_table {
28449 const char id;
28450 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
28451 -};
28452 +} __no_const;
28453
28454 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
28455
28456 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
28457 index d7d51de..7c6a7f1 100644
28458 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
28459 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
28460 @@ -238,7 +238,7 @@ struct nouveau_channel {
28461 struct list_head pending;
28462 uint32_t sequence;
28463 uint32_t sequence_ack;
28464 - atomic_t last_sequence_irq;
28465 + atomic_unchecked_t last_sequence_irq;
28466 struct nouveau_vma vma;
28467 } fence;
28468
28469 @@ -319,7 +319,7 @@ struct nouveau_exec_engine {
28470 u32 handle, u16 class);
28471 void (*set_tile_region)(struct drm_device *dev, int i);
28472 void (*tlb_flush)(struct drm_device *, int engine);
28473 -};
28474 +} __no_const;
28475
28476 struct nouveau_instmem_engine {
28477 void *priv;
28478 @@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
28479 struct nouveau_mc_engine {
28480 int (*init)(struct drm_device *dev);
28481 void (*takedown)(struct drm_device *dev);
28482 -};
28483 +} __no_const;
28484
28485 struct nouveau_timer_engine {
28486 int (*init)(struct drm_device *dev);
28487 void (*takedown)(struct drm_device *dev);
28488 uint64_t (*read)(struct drm_device *dev);
28489 -};
28490 +} __no_const;
28491
28492 struct nouveau_fb_engine {
28493 int num_tiles;
28494 @@ -513,7 +513,7 @@ struct nouveau_vram_engine {
28495 void (*put)(struct drm_device *, struct nouveau_mem **);
28496
28497 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
28498 -};
28499 +} __no_const;
28500
28501 struct nouveau_engine {
28502 struct nouveau_instmem_engine instmem;
28503 @@ -660,7 +660,7 @@ struct drm_nouveau_private {
28504 struct drm_global_reference mem_global_ref;
28505 struct ttm_bo_global_ref bo_global_ref;
28506 struct ttm_bo_device bdev;
28507 - atomic_t validate_sequence;
28508 + atomic_unchecked_t validate_sequence;
28509 } ttm;
28510
28511 struct {
28512 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
28513 index ae22dfa..4f09960 100644
28514 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
28515 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
28516 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
28517 if (USE_REFCNT(dev))
28518 sequence = nvchan_rd32(chan, 0x48);
28519 else
28520 - sequence = atomic_read(&chan->fence.last_sequence_irq);
28521 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
28522
28523 if (chan->fence.sequence_ack == sequence)
28524 goto out;
28525 @@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
28526 return ret;
28527 }
28528
28529 - atomic_set(&chan->fence.last_sequence_irq, 0);
28530 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
28531 return 0;
28532 }
28533
28534 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
28535 index 5f0bc57..eb9fac8 100644
28536 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
28537 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
28538 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
28539 int trycnt = 0;
28540 int ret, i;
28541
28542 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
28543 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
28544 retry:
28545 if (++trycnt > 100000) {
28546 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
28547 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
28548 index 10656e4..59bf2a4 100644
28549 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
28550 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
28551 @@ -496,7 +496,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
28552 bool can_switch;
28553
28554 spin_lock(&dev->count_lock);
28555 - can_switch = (dev->open_count == 0);
28556 + can_switch = (local_read(&dev->open_count) == 0);
28557 spin_unlock(&dev->count_lock);
28558 return can_switch;
28559 }
28560 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
28561 index dbdea8e..cd6eeeb 100644
28562 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
28563 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
28564 @@ -554,7 +554,7 @@ static int
28565 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
28566 u32 class, u32 mthd, u32 data)
28567 {
28568 - atomic_set(&chan->fence.last_sequence_irq, data);
28569 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
28570 return 0;
28571 }
28572
28573 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
28574 index 570e190..084a31a 100644
28575 --- a/drivers/gpu/drm/r128/r128_cce.c
28576 +++ b/drivers/gpu/drm/r128/r128_cce.c
28577 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
28578
28579 /* GH: Simple idle check.
28580 */
28581 - atomic_set(&dev_priv->idle_count, 0);
28582 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28583
28584 /* We don't support anything other than bus-mastering ring mode,
28585 * but the ring can be in either AGP or PCI space for the ring
28586 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
28587 index 930c71b..499aded 100644
28588 --- a/drivers/gpu/drm/r128/r128_drv.h
28589 +++ b/drivers/gpu/drm/r128/r128_drv.h
28590 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28591 int is_pci;
28592 unsigned long cce_buffers_offset;
28593
28594 - atomic_t idle_count;
28595 + atomic_unchecked_t idle_count;
28596
28597 int page_flipping;
28598 int current_page;
28599 u32 crtc_offset;
28600 u32 crtc_offset_cntl;
28601
28602 - atomic_t vbl_received;
28603 + atomic_unchecked_t vbl_received;
28604
28605 u32 color_fmt;
28606 unsigned int front_offset;
28607 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
28608 index 429d5a0..7e899ed 100644
28609 --- a/drivers/gpu/drm/r128/r128_irq.c
28610 +++ b/drivers/gpu/drm/r128/r128_irq.c
28611 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
28612 if (crtc != 0)
28613 return 0;
28614
28615 - return atomic_read(&dev_priv->vbl_received);
28616 + return atomic_read_unchecked(&dev_priv->vbl_received);
28617 }
28618
28619 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28620 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28621 /* VBLANK interrupt */
28622 if (status & R128_CRTC_VBLANK_INT) {
28623 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28624 - atomic_inc(&dev_priv->vbl_received);
28625 + atomic_inc_unchecked(&dev_priv->vbl_received);
28626 drm_handle_vblank(dev, 0);
28627 return IRQ_HANDLED;
28628 }
28629 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
28630 index a9e33ce..09edd4b 100644
28631 --- a/drivers/gpu/drm/r128/r128_state.c
28632 +++ b/drivers/gpu/drm/r128/r128_state.c
28633 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
28634
28635 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
28636 {
28637 - if (atomic_read(&dev_priv->idle_count) == 0)
28638 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
28639 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28640 else
28641 - atomic_set(&dev_priv->idle_count, 0);
28642 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28643 }
28644
28645 #endif
28646 diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
28647 index 14cc88a..cc7b3a5 100644
28648 --- a/drivers/gpu/drm/radeon/atom.c
28649 +++ b/drivers/gpu/drm/radeon/atom.c
28650 @@ -1254,6 +1254,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
28651 char name[512];
28652 int i;
28653
28654 + pax_track_stack();
28655 +
28656 if (!ctx)
28657 return NULL;
28658
28659 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
28660 index 5a82b6b..9e69c73 100644
28661 --- a/drivers/gpu/drm/radeon/mkregtable.c
28662 +++ b/drivers/gpu/drm/radeon/mkregtable.c
28663 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
28664 regex_t mask_rex;
28665 regmatch_t match[4];
28666 char buf[1024];
28667 - size_t end;
28668 + long end;
28669 int len;
28670 int done = 0;
28671 int r;
28672 unsigned o;
28673 struct offset *offset;
28674 char last_reg_s[10];
28675 - int last_reg;
28676 + unsigned long last_reg;
28677
28678 if (regcomp
28679 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28680 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
28681 index 184628c..30e1725 100644
28682 --- a/drivers/gpu/drm/radeon/radeon.h
28683 +++ b/drivers/gpu/drm/radeon/radeon.h
28684 @@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
28685 */
28686 struct radeon_fence_driver {
28687 uint32_t scratch_reg;
28688 - atomic_t seq;
28689 + atomic_unchecked_t seq;
28690 uint32_t last_seq;
28691 unsigned long last_jiffies;
28692 unsigned long last_timeout;
28693 @@ -962,7 +962,7 @@ struct radeon_asic {
28694 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
28695 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
28696 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
28697 -};
28698 +} __no_const;
28699
28700 /*
28701 * Asic structures
28702 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
28703 index a098edc..d001c09 100644
28704 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
28705 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
28706 @@ -569,6 +569,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
28707 struct radeon_gpio_rec gpio;
28708 struct radeon_hpd hpd;
28709
28710 + pax_track_stack();
28711 +
28712 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
28713 return false;
28714
28715 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
28716 index b51e157..8f14fb9 100644
28717 --- a/drivers/gpu/drm/radeon/radeon_device.c
28718 +++ b/drivers/gpu/drm/radeon/radeon_device.c
28719 @@ -684,7 +684,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
28720 bool can_switch;
28721
28722 spin_lock(&dev->count_lock);
28723 - can_switch = (dev->open_count == 0);
28724 + can_switch = (local_read(&dev->open_count) == 0);
28725 spin_unlock(&dev->count_lock);
28726 return can_switch;
28727 }
28728 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
28729 index 07ac481..41cb437 100644
28730 --- a/drivers/gpu/drm/radeon/radeon_display.c
28731 +++ b/drivers/gpu/drm/radeon/radeon_display.c
28732 @@ -926,6 +926,8 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
28733 uint32_t post_div;
28734 u32 pll_out_min, pll_out_max;
28735
28736 + pax_track_stack();
28737 +
28738 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
28739 freq = freq * 1000;
28740
28741 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
28742 index a1b59ca..86f2d44 100644
28743 --- a/drivers/gpu/drm/radeon/radeon_drv.h
28744 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
28745 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
28746
28747 /* SW interrupt */
28748 wait_queue_head_t swi_queue;
28749 - atomic_t swi_emitted;
28750 + atomic_unchecked_t swi_emitted;
28751 int vblank_crtc;
28752 uint32_t irq_enable_reg;
28753 uint32_t r500_disp_irq_reg;
28754 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
28755 index 7fd4e3e..9748ab5 100644
28756 --- a/drivers/gpu/drm/radeon/radeon_fence.c
28757 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
28758 @@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
28759 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28760 return 0;
28761 }
28762 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28763 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28764 if (!rdev->cp.ready)
28765 /* FIXME: cp is not running assume everythings is done right
28766 * away
28767 @@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
28768 return r;
28769 }
28770 radeon_fence_write(rdev, 0);
28771 - atomic_set(&rdev->fence_drv.seq, 0);
28772 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28773 INIT_LIST_HEAD(&rdev->fence_drv.created);
28774 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28775 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28776 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
28777 index 48b7cea..342236f 100644
28778 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
28779 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
28780 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
28781 request = compat_alloc_user_space(sizeof(*request));
28782 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28783 || __put_user(req32.param, &request->param)
28784 - || __put_user((void __user *)(unsigned long)req32.value,
28785 + || __put_user((unsigned long)req32.value,
28786 &request->value))
28787 return -EFAULT;
28788
28789 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
28790 index 465746b..cb2b055 100644
28791 --- a/drivers/gpu/drm/radeon/radeon_irq.c
28792 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
28793 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
28794 unsigned int ret;
28795 RING_LOCALS;
28796
28797 - atomic_inc(&dev_priv->swi_emitted);
28798 - ret = atomic_read(&dev_priv->swi_emitted);
28799 + atomic_inc_unchecked(&dev_priv->swi_emitted);
28800 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28801
28802 BEGIN_RING(4);
28803 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28804 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
28805 drm_radeon_private_t *dev_priv =
28806 (drm_radeon_private_t *) dev->dev_private;
28807
28808 - atomic_set(&dev_priv->swi_emitted, 0);
28809 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28810 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28811
28812 dev->max_vblank_count = 0x001fffff;
28813 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
28814 index 92e7ea7..147ffad 100644
28815 --- a/drivers/gpu/drm/radeon/radeon_state.c
28816 +++ b/drivers/gpu/drm/radeon/radeon_state.c
28817 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
28818 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
28819 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
28820
28821 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28822 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28823 sarea_priv->nbox * sizeof(depth_boxes[0])))
28824 return -EFAULT;
28825
28826 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
28827 {
28828 drm_radeon_private_t *dev_priv = dev->dev_private;
28829 drm_radeon_getparam_t *param = data;
28830 - int value;
28831 + int value = 0;
28832
28833 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28834
28835 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
28836 index 0b5468b..9c4b308 100644
28837 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
28838 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
28839 @@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
28840 }
28841 if (unlikely(ttm_vm_ops == NULL)) {
28842 ttm_vm_ops = vma->vm_ops;
28843 - radeon_ttm_vm_ops = *ttm_vm_ops;
28844 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28845 + pax_open_kernel();
28846 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
28847 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28848 + pax_close_kernel();
28849 }
28850 vma->vm_ops = &radeon_ttm_vm_ops;
28851 return 0;
28852 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
28853 index a9049ed..501f284 100644
28854 --- a/drivers/gpu/drm/radeon/rs690.c
28855 +++ b/drivers/gpu/drm/radeon/rs690.c
28856 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
28857 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
28858 rdev->pm.sideport_bandwidth.full)
28859 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
28860 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
28861 + read_delay_latency.full = dfixed_const(800 * 1000);
28862 read_delay_latency.full = dfixed_div(read_delay_latency,
28863 rdev->pm.igp_sideport_mclk);
28864 + a.full = dfixed_const(370);
28865 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
28866 } else {
28867 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
28868 rdev->pm.k8_bandwidth.full)
28869 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28870 index 727e93d..1565650 100644
28871 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
28872 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28873 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
28874 static int ttm_pool_mm_shrink(struct shrinker *shrink,
28875 struct shrink_control *sc)
28876 {
28877 - static atomic_t start_pool = ATOMIC_INIT(0);
28878 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
28879 unsigned i;
28880 - unsigned pool_offset = atomic_add_return(1, &start_pool);
28881 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
28882 struct ttm_page_pool *pool;
28883 int shrink_pages = sc->nr_to_scan;
28884
28885 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
28886 index 9cf87d9..2000b7d 100644
28887 --- a/drivers/gpu/drm/via/via_drv.h
28888 +++ b/drivers/gpu/drm/via/via_drv.h
28889 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
28890 typedef uint32_t maskarray_t[5];
28891
28892 typedef struct drm_via_irq {
28893 - atomic_t irq_received;
28894 + atomic_unchecked_t irq_received;
28895 uint32_t pending_mask;
28896 uint32_t enable_mask;
28897 wait_queue_head_t irq_queue;
28898 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
28899 struct timeval last_vblank;
28900 int last_vblank_valid;
28901 unsigned usec_per_vblank;
28902 - atomic_t vbl_received;
28903 + atomic_unchecked_t vbl_received;
28904 drm_via_state_t hc_state;
28905 char pci_buf[VIA_PCI_BUF_SIZE];
28906 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
28907 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
28908 index d391f48..10c8ca3 100644
28909 --- a/drivers/gpu/drm/via/via_irq.c
28910 +++ b/drivers/gpu/drm/via/via_irq.c
28911 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
28912 if (crtc != 0)
28913 return 0;
28914
28915 - return atomic_read(&dev_priv->vbl_received);
28916 + return atomic_read_unchecked(&dev_priv->vbl_received);
28917 }
28918
28919 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28920 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28921
28922 status = VIA_READ(VIA_REG_INTERRUPT);
28923 if (status & VIA_IRQ_VBLANK_PENDING) {
28924 - atomic_inc(&dev_priv->vbl_received);
28925 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
28926 + atomic_inc_unchecked(&dev_priv->vbl_received);
28927 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
28928 do_gettimeofday(&cur_vblank);
28929 if (dev_priv->last_vblank_valid) {
28930 dev_priv->usec_per_vblank =
28931 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28932 dev_priv->last_vblank = cur_vblank;
28933 dev_priv->last_vblank_valid = 1;
28934 }
28935 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
28936 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
28937 DRM_DEBUG("US per vblank is: %u\n",
28938 dev_priv->usec_per_vblank);
28939 }
28940 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28941
28942 for (i = 0; i < dev_priv->num_irqs; ++i) {
28943 if (status & cur_irq->pending_mask) {
28944 - atomic_inc(&cur_irq->irq_received);
28945 + atomic_inc_unchecked(&cur_irq->irq_received);
28946 DRM_WAKEUP(&cur_irq->irq_queue);
28947 handled = 1;
28948 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
28949 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
28950 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28951 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
28952 masks[irq][4]));
28953 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
28954 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
28955 } else {
28956 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28957 (((cur_irq_sequence =
28958 - atomic_read(&cur_irq->irq_received)) -
28959 + atomic_read_unchecked(&cur_irq->irq_received)) -
28960 *sequence) <= (1 << 23)));
28961 }
28962 *sequence = cur_irq_sequence;
28963 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
28964 }
28965
28966 for (i = 0; i < dev_priv->num_irqs; ++i) {
28967 - atomic_set(&cur_irq->irq_received, 0);
28968 + atomic_set_unchecked(&cur_irq->irq_received, 0);
28969 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
28970 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
28971 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
28972 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
28973 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
28974 case VIA_IRQ_RELATIVE:
28975 irqwait->request.sequence +=
28976 - atomic_read(&cur_irq->irq_received);
28977 + atomic_read_unchecked(&cur_irq->irq_received);
28978 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
28979 case VIA_IRQ_ABSOLUTE:
28980 break;
28981 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28982 index 10fc01f..b4e9822 100644
28983 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28984 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28985 @@ -240,7 +240,7 @@ struct vmw_private {
28986 * Fencing and IRQs.
28987 */
28988
28989 - atomic_t fence_seq;
28990 + atomic_unchecked_t fence_seq;
28991 wait_queue_head_t fence_queue;
28992 wait_queue_head_t fifo_queue;
28993 atomic_t fence_queue_waiters;
28994 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
28995 index 41b95ed..69ea504 100644
28996 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
28997 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
28998 @@ -610,7 +610,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
28999 struct drm_vmw_fence_rep fence_rep;
29000 struct drm_vmw_fence_rep __user *user_fence_rep;
29001 int ret;
29002 - void *user_cmd;
29003 + void __user *user_cmd;
29004 void *cmd;
29005 uint32_t sequence;
29006 struct vmw_sw_context *sw_context = &dev_priv->ctx;
29007 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
29008 index 61eacc1..ee38ce8 100644
29009 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
29010 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
29011 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
29012 while (!vmw_lag_lt(queue, us)) {
29013 spin_lock(&queue->lock);
29014 if (list_empty(&queue->head))
29015 - sequence = atomic_read(&dev_priv->fence_seq);
29016 + sequence = atomic_read_unchecked(&dev_priv->fence_seq);
29017 else {
29018 fence = list_first_entry(&queue->head,
29019 struct vmw_fence, head);
29020 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29021 index 635c0ff..2641bbb 100644
29022 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29023 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29024 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
29025 (unsigned int) min,
29026 (unsigned int) fifo->capabilities);
29027
29028 - atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
29029 + atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
29030 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
29031 vmw_fence_queue_init(&fifo->fence_queue);
29032 return vmw_fifo_send_fence(dev_priv, &dummy);
29033 @@ -356,7 +356,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
29034 if (reserveable)
29035 iowrite32(bytes, fifo_mem +
29036 SVGA_FIFO_RESERVED);
29037 - return fifo_mem + (next_cmd >> 2);
29038 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
29039 } else {
29040 need_bounce = true;
29041 }
29042 @@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
29043
29044 fm = vmw_fifo_reserve(dev_priv, bytes);
29045 if (unlikely(fm == NULL)) {
29046 - *sequence = atomic_read(&dev_priv->fence_seq);
29047 + *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
29048 ret = -ENOMEM;
29049 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
29050 false, 3*HZ);
29051 @@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
29052 }
29053
29054 do {
29055 - *sequence = atomic_add_return(1, &dev_priv->fence_seq);
29056 + *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
29057 } while (*sequence == 0);
29058
29059 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
29060 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29061 index e92298a..f68f2d6 100644
29062 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29063 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29064 @@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
29065 * emitted. Then the fence is stale and signaled.
29066 */
29067
29068 - ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
29069 + ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
29070 > VMW_FENCE_WRAP);
29071
29072 return ret;
29073 @@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
29074
29075 if (fifo_idle)
29076 down_read(&fifo_state->rwsem);
29077 - signal_seq = atomic_read(&dev_priv->fence_seq);
29078 + signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
29079 ret = 0;
29080
29081 for (;;) {
29082 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
29083 index dfe32e6..dd18a00 100644
29084 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
29085 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
29086 @@ -843,7 +843,6 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
29087 struct vmw_framebuffer *vfb = NULL;
29088 struct vmw_surface *surface = NULL;
29089 struct vmw_dma_buffer *bo = NULL;
29090 - u64 required_size;
29091 int ret;
29092
29093 /**
29094 @@ -852,8 +851,9 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
29095 * requested framebuffer.
29096 */
29097
29098 - required_size = mode_cmd->pitch * mode_cmd->height;
29099 - if (unlikely(required_size > (u64) dev_priv->vram_size)) {
29100 + if (!vmw_kms_validate_mode_vram(dev_priv,
29101 + mode_cmd->pitch,
29102 + mode_cmd->height)) {
29103 DRM_ERROR("VRAM size is too small for requested mode.\n");
29104 return NULL;
29105 }
29106 diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
29107 index c72f1c0..18376f1 100644
29108 --- a/drivers/gpu/vga/vgaarb.c
29109 +++ b/drivers/gpu/vga/vgaarb.c
29110 @@ -993,14 +993,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
29111 uc = &priv->cards[i];
29112 }
29113
29114 - if (!uc)
29115 - return -EINVAL;
29116 + if (!uc) {
29117 + ret_val = -EINVAL;
29118 + goto done;
29119 + }
29120
29121 - if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
29122 - return -EINVAL;
29123 + if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
29124 + ret_val = -EINVAL;
29125 + goto done;
29126 + }
29127
29128 - if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
29129 - return -EINVAL;
29130 + if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
29131 + ret_val = -EINVAL;
29132 + goto done;
29133 + }
29134
29135 vga_put(pdev, io_state);
29136
29137 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
29138 index f26ae31..721fe1b 100644
29139 --- a/drivers/hid/hid-core.c
29140 +++ b/drivers/hid/hid-core.c
29141 @@ -1951,7 +1951,7 @@ static bool hid_ignore(struct hid_device *hdev)
29142
29143 int hid_add_device(struct hid_device *hdev)
29144 {
29145 - static atomic_t id = ATOMIC_INIT(0);
29146 + static atomic_unchecked_t id = ATOMIC_INIT(0);
29147 int ret;
29148
29149 if (WARN_ON(hdev->status & HID_STAT_ADDED))
29150 @@ -1966,7 +1966,7 @@ int hid_add_device(struct hid_device *hdev)
29151 /* XXX hack, any other cleaner solution after the driver core
29152 * is converted to allow more than 20 bytes as the device name? */
29153 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
29154 - hdev->vendor, hdev->product, atomic_inc_return(&id));
29155 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
29156
29157 hid_debug_register(hdev, dev_name(&hdev->dev));
29158 ret = device_add(&hdev->dev);
29159 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
29160 index 7c1188b..5a64357 100644
29161 --- a/drivers/hid/usbhid/hiddev.c
29162 +++ b/drivers/hid/usbhid/hiddev.c
29163 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
29164 break;
29165
29166 case HIDIOCAPPLICATION:
29167 - if (arg < 0 || arg >= hid->maxapplication)
29168 + if (arg >= hid->maxapplication)
29169 break;
29170
29171 for (i = 0; i < hid->maxcollection; i++)
29172 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
29173 index 66f6729..2d6de0a 100644
29174 --- a/drivers/hwmon/acpi_power_meter.c
29175 +++ b/drivers/hwmon/acpi_power_meter.c
29176 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
29177 return res;
29178
29179 temp /= 1000;
29180 - if (temp < 0)
29181 - return -EINVAL;
29182
29183 mutex_lock(&resource->lock);
29184 resource->trip[attr->index - 7] = temp;
29185 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
29186 index fe4104c..346febb 100644
29187 --- a/drivers/hwmon/sht15.c
29188 +++ b/drivers/hwmon/sht15.c
29189 @@ -166,7 +166,7 @@ struct sht15_data {
29190 int supply_uV;
29191 bool supply_uV_valid;
29192 struct work_struct update_supply_work;
29193 - atomic_t interrupt_handled;
29194 + atomic_unchecked_t interrupt_handled;
29195 };
29196
29197 /**
29198 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
29199 return ret;
29200
29201 gpio_direction_input(data->pdata->gpio_data);
29202 - atomic_set(&data->interrupt_handled, 0);
29203 + atomic_set_unchecked(&data->interrupt_handled, 0);
29204
29205 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29206 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29207 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29208 /* Only relevant if the interrupt hasn't occurred. */
29209 - if (!atomic_read(&data->interrupt_handled))
29210 + if (!atomic_read_unchecked(&data->interrupt_handled))
29211 schedule_work(&data->read_work);
29212 }
29213 ret = wait_event_timeout(data->wait_queue,
29214 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
29215
29216 /* First disable the interrupt */
29217 disable_irq_nosync(irq);
29218 - atomic_inc(&data->interrupt_handled);
29219 + atomic_inc_unchecked(&data->interrupt_handled);
29220 /* Then schedule a reading work struct */
29221 if (data->state != SHT15_READING_NOTHING)
29222 schedule_work(&data->read_work);
29223 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
29224 * If not, then start the interrupt again - care here as could
29225 * have gone low in meantime so verify it hasn't!
29226 */
29227 - atomic_set(&data->interrupt_handled, 0);
29228 + atomic_set_unchecked(&data->interrupt_handled, 0);
29229 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29230 /* If still not occurred or another handler has been scheduled */
29231 if (gpio_get_value(data->pdata->gpio_data)
29232 - || atomic_read(&data->interrupt_handled))
29233 + || atomic_read_unchecked(&data->interrupt_handled))
29234 return;
29235 }
29236
29237 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
29238 index 378fcb5..5e91fa8 100644
29239 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
29240 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
29241 @@ -43,7 +43,7 @@
29242 extern struct i2c_adapter amd756_smbus;
29243
29244 static struct i2c_adapter *s4882_adapter;
29245 -static struct i2c_algorithm *s4882_algo;
29246 +static i2c_algorithm_no_const *s4882_algo;
29247
29248 /* Wrapper access functions for multiplexed SMBus */
29249 static DEFINE_MUTEX(amd756_lock);
29250 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
29251 index 29015eb..af2d8e9 100644
29252 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
29253 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
29254 @@ -41,7 +41,7 @@
29255 extern struct i2c_adapter *nforce2_smbus;
29256
29257 static struct i2c_adapter *s4985_adapter;
29258 -static struct i2c_algorithm *s4985_algo;
29259 +static i2c_algorithm_no_const *s4985_algo;
29260
29261 /* Wrapper access functions for multiplexed SMBus */
29262 static DEFINE_MUTEX(nforce2_lock);
29263 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
29264 index d7a4833..7fae376 100644
29265 --- a/drivers/i2c/i2c-mux.c
29266 +++ b/drivers/i2c/i2c-mux.c
29267 @@ -28,7 +28,7 @@
29268 /* multiplexer per channel data */
29269 struct i2c_mux_priv {
29270 struct i2c_adapter adap;
29271 - struct i2c_algorithm algo;
29272 + i2c_algorithm_no_const algo;
29273
29274 struct i2c_adapter *parent;
29275 void *mux_dev; /* the mux chip/device */
29276 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
29277 index 57d00ca..0145194 100644
29278 --- a/drivers/ide/aec62xx.c
29279 +++ b/drivers/ide/aec62xx.c
29280 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
29281 .cable_detect = atp86x_cable_detect,
29282 };
29283
29284 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
29285 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
29286 { /* 0: AEC6210 */
29287 .name = DRV_NAME,
29288 .init_chipset = init_chipset_aec62xx,
29289 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
29290 index 2c8016a..911a27c 100644
29291 --- a/drivers/ide/alim15x3.c
29292 +++ b/drivers/ide/alim15x3.c
29293 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
29294 .dma_sff_read_status = ide_dma_sff_read_status,
29295 };
29296
29297 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
29298 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
29299 .name = DRV_NAME,
29300 .init_chipset = init_chipset_ali15x3,
29301 .init_hwif = init_hwif_ali15x3,
29302 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
29303 index 3747b25..56fc995 100644
29304 --- a/drivers/ide/amd74xx.c
29305 +++ b/drivers/ide/amd74xx.c
29306 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
29307 .udma_mask = udma, \
29308 }
29309
29310 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
29311 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
29312 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
29313 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
29314 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
29315 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
29316 index 15f0ead..cb43480 100644
29317 --- a/drivers/ide/atiixp.c
29318 +++ b/drivers/ide/atiixp.c
29319 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
29320 .cable_detect = atiixp_cable_detect,
29321 };
29322
29323 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
29324 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
29325 { /* 0: IXP200/300/400/700 */
29326 .name = DRV_NAME,
29327 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
29328 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
29329 index 5f80312..d1fc438 100644
29330 --- a/drivers/ide/cmd64x.c
29331 +++ b/drivers/ide/cmd64x.c
29332 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
29333 .dma_sff_read_status = ide_dma_sff_read_status,
29334 };
29335
29336 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
29337 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
29338 { /* 0: CMD643 */
29339 .name = DRV_NAME,
29340 .init_chipset = init_chipset_cmd64x,
29341 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
29342 index 2c1e5f7..1444762 100644
29343 --- a/drivers/ide/cs5520.c
29344 +++ b/drivers/ide/cs5520.c
29345 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
29346 .set_dma_mode = cs5520_set_dma_mode,
29347 };
29348
29349 -static const struct ide_port_info cyrix_chipset __devinitdata = {
29350 +static const struct ide_port_info cyrix_chipset __devinitconst = {
29351 .name = DRV_NAME,
29352 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
29353 .port_ops = &cs5520_port_ops,
29354 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
29355 index 4dc4eb9..49b40ad 100644
29356 --- a/drivers/ide/cs5530.c
29357 +++ b/drivers/ide/cs5530.c
29358 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
29359 .udma_filter = cs5530_udma_filter,
29360 };
29361
29362 -static const struct ide_port_info cs5530_chipset __devinitdata = {
29363 +static const struct ide_port_info cs5530_chipset __devinitconst = {
29364 .name = DRV_NAME,
29365 .init_chipset = init_chipset_cs5530,
29366 .init_hwif = init_hwif_cs5530,
29367 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
29368 index 5059faf..18d4c85 100644
29369 --- a/drivers/ide/cs5535.c
29370 +++ b/drivers/ide/cs5535.c
29371 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
29372 .cable_detect = cs5535_cable_detect,
29373 };
29374
29375 -static const struct ide_port_info cs5535_chipset __devinitdata = {
29376 +static const struct ide_port_info cs5535_chipset __devinitconst = {
29377 .name = DRV_NAME,
29378 .port_ops = &cs5535_port_ops,
29379 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
29380 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
29381 index 67cbcfa..37ea151 100644
29382 --- a/drivers/ide/cy82c693.c
29383 +++ b/drivers/ide/cy82c693.c
29384 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
29385 .set_dma_mode = cy82c693_set_dma_mode,
29386 };
29387
29388 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
29389 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
29390 .name = DRV_NAME,
29391 .init_iops = init_iops_cy82c693,
29392 .port_ops = &cy82c693_port_ops,
29393 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
29394 index 58c51cd..4aec3b8 100644
29395 --- a/drivers/ide/hpt366.c
29396 +++ b/drivers/ide/hpt366.c
29397 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
29398 }
29399 };
29400
29401 -static const struct hpt_info hpt36x __devinitdata = {
29402 +static const struct hpt_info hpt36x __devinitconst = {
29403 .chip_name = "HPT36x",
29404 .chip_type = HPT36x,
29405 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
29406 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
29407 .timings = &hpt36x_timings
29408 };
29409
29410 -static const struct hpt_info hpt370 __devinitdata = {
29411 +static const struct hpt_info hpt370 __devinitconst = {
29412 .chip_name = "HPT370",
29413 .chip_type = HPT370,
29414 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29415 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
29416 .timings = &hpt37x_timings
29417 };
29418
29419 -static const struct hpt_info hpt370a __devinitdata = {
29420 +static const struct hpt_info hpt370a __devinitconst = {
29421 .chip_name = "HPT370A",
29422 .chip_type = HPT370A,
29423 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29424 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
29425 .timings = &hpt37x_timings
29426 };
29427
29428 -static const struct hpt_info hpt374 __devinitdata = {
29429 +static const struct hpt_info hpt374 __devinitconst = {
29430 .chip_name = "HPT374",
29431 .chip_type = HPT374,
29432 .udma_mask = ATA_UDMA5,
29433 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
29434 .timings = &hpt37x_timings
29435 };
29436
29437 -static const struct hpt_info hpt372 __devinitdata = {
29438 +static const struct hpt_info hpt372 __devinitconst = {
29439 .chip_name = "HPT372",
29440 .chip_type = HPT372,
29441 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29442 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
29443 .timings = &hpt37x_timings
29444 };
29445
29446 -static const struct hpt_info hpt372a __devinitdata = {
29447 +static const struct hpt_info hpt372a __devinitconst = {
29448 .chip_name = "HPT372A",
29449 .chip_type = HPT372A,
29450 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29451 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
29452 .timings = &hpt37x_timings
29453 };
29454
29455 -static const struct hpt_info hpt302 __devinitdata = {
29456 +static const struct hpt_info hpt302 __devinitconst = {
29457 .chip_name = "HPT302",
29458 .chip_type = HPT302,
29459 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29460 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
29461 .timings = &hpt37x_timings
29462 };
29463
29464 -static const struct hpt_info hpt371 __devinitdata = {
29465 +static const struct hpt_info hpt371 __devinitconst = {
29466 .chip_name = "HPT371",
29467 .chip_type = HPT371,
29468 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29469 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
29470 .timings = &hpt37x_timings
29471 };
29472
29473 -static const struct hpt_info hpt372n __devinitdata = {
29474 +static const struct hpt_info hpt372n __devinitconst = {
29475 .chip_name = "HPT372N",
29476 .chip_type = HPT372N,
29477 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29478 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
29479 .timings = &hpt37x_timings
29480 };
29481
29482 -static const struct hpt_info hpt302n __devinitdata = {
29483 +static const struct hpt_info hpt302n __devinitconst = {
29484 .chip_name = "HPT302N",
29485 .chip_type = HPT302N,
29486 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29487 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
29488 .timings = &hpt37x_timings
29489 };
29490
29491 -static const struct hpt_info hpt371n __devinitdata = {
29492 +static const struct hpt_info hpt371n __devinitconst = {
29493 .chip_name = "HPT371N",
29494 .chip_type = HPT371N,
29495 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29496 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
29497 .dma_sff_read_status = ide_dma_sff_read_status,
29498 };
29499
29500 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
29501 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
29502 { /* 0: HPT36x */
29503 .name = DRV_NAME,
29504 .init_chipset = init_chipset_hpt366,
29505 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
29506 index 04b0956..f5b47dc 100644
29507 --- a/drivers/ide/ide-cd.c
29508 +++ b/drivers/ide/ide-cd.c
29509 @@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
29510 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29511 if ((unsigned long)buf & alignment
29512 || blk_rq_bytes(rq) & q->dma_pad_mask
29513 - || object_is_on_stack(buf))
29514 + || object_starts_on_stack(buf))
29515 drive->dma = 0;
29516 }
29517 }
29518 diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
29519 index 61fdf54..2834ea6 100644
29520 --- a/drivers/ide/ide-floppy.c
29521 +++ b/drivers/ide/ide-floppy.c
29522 @@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
29523 u8 pc_buf[256], header_len, desc_cnt;
29524 int i, rc = 1, blocks, length;
29525
29526 + pax_track_stack();
29527 +
29528 ide_debug_log(IDE_DBG_FUNC, "enter");
29529
29530 drive->bios_cyl = 0;
29531 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
29532 index a743e68..1cfd674 100644
29533 --- a/drivers/ide/ide-pci-generic.c
29534 +++ b/drivers/ide/ide-pci-generic.c
29535 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
29536 .udma_mask = ATA_UDMA6, \
29537 }
29538
29539 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
29540 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
29541 /* 0: Unknown */
29542 DECLARE_GENERIC_PCI_DEV(0),
29543
29544 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
29545 index 560e66d..d5dd180 100644
29546 --- a/drivers/ide/it8172.c
29547 +++ b/drivers/ide/it8172.c
29548 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
29549 .set_dma_mode = it8172_set_dma_mode,
29550 };
29551
29552 -static const struct ide_port_info it8172_port_info __devinitdata = {
29553 +static const struct ide_port_info it8172_port_info __devinitconst = {
29554 .name = DRV_NAME,
29555 .port_ops = &it8172_port_ops,
29556 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
29557 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
29558 index 46816ba..1847aeb 100644
29559 --- a/drivers/ide/it8213.c
29560 +++ b/drivers/ide/it8213.c
29561 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
29562 .cable_detect = it8213_cable_detect,
29563 };
29564
29565 -static const struct ide_port_info it8213_chipset __devinitdata = {
29566 +static const struct ide_port_info it8213_chipset __devinitconst = {
29567 .name = DRV_NAME,
29568 .enablebits = { {0x41, 0x80, 0x80} },
29569 .port_ops = &it8213_port_ops,
29570 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
29571 index 2e3169f..c5611db 100644
29572 --- a/drivers/ide/it821x.c
29573 +++ b/drivers/ide/it821x.c
29574 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
29575 .cable_detect = it821x_cable_detect,
29576 };
29577
29578 -static const struct ide_port_info it821x_chipset __devinitdata = {
29579 +static const struct ide_port_info it821x_chipset __devinitconst = {
29580 .name = DRV_NAME,
29581 .init_chipset = init_chipset_it821x,
29582 .init_hwif = init_hwif_it821x,
29583 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
29584 index 74c2c4a..efddd7d 100644
29585 --- a/drivers/ide/jmicron.c
29586 +++ b/drivers/ide/jmicron.c
29587 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
29588 .cable_detect = jmicron_cable_detect,
29589 };
29590
29591 -static const struct ide_port_info jmicron_chipset __devinitdata = {
29592 +static const struct ide_port_info jmicron_chipset __devinitconst = {
29593 .name = DRV_NAME,
29594 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
29595 .port_ops = &jmicron_port_ops,
29596 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
29597 index 95327a2..73f78d8 100644
29598 --- a/drivers/ide/ns87415.c
29599 +++ b/drivers/ide/ns87415.c
29600 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
29601 .dma_sff_read_status = superio_dma_sff_read_status,
29602 };
29603
29604 -static const struct ide_port_info ns87415_chipset __devinitdata = {
29605 +static const struct ide_port_info ns87415_chipset __devinitconst = {
29606 .name = DRV_NAME,
29607 .init_hwif = init_hwif_ns87415,
29608 .tp_ops = &ns87415_tp_ops,
29609 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
29610 index 1a53a4c..39edc66 100644
29611 --- a/drivers/ide/opti621.c
29612 +++ b/drivers/ide/opti621.c
29613 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
29614 .set_pio_mode = opti621_set_pio_mode,
29615 };
29616
29617 -static const struct ide_port_info opti621_chipset __devinitdata = {
29618 +static const struct ide_port_info opti621_chipset __devinitconst = {
29619 .name = DRV_NAME,
29620 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
29621 .port_ops = &opti621_port_ops,
29622 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
29623 index 9546fe2..2e5ceb6 100644
29624 --- a/drivers/ide/pdc202xx_new.c
29625 +++ b/drivers/ide/pdc202xx_new.c
29626 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
29627 .udma_mask = udma, \
29628 }
29629
29630 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
29631 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
29632 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
29633 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
29634 };
29635 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
29636 index 3a35ec6..5634510 100644
29637 --- a/drivers/ide/pdc202xx_old.c
29638 +++ b/drivers/ide/pdc202xx_old.c
29639 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
29640 .max_sectors = sectors, \
29641 }
29642
29643 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
29644 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
29645 { /* 0: PDC20246 */
29646 .name = DRV_NAME,
29647 .init_chipset = init_chipset_pdc202xx,
29648 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
29649 index b59d04c..368c2a7 100644
29650 --- a/drivers/ide/piix.c
29651 +++ b/drivers/ide/piix.c
29652 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
29653 .udma_mask = udma, \
29654 }
29655
29656 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
29657 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
29658 /* 0: MPIIX */
29659 { /*
29660 * MPIIX actually has only a single IDE channel mapped to
29661 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
29662 index a6414a8..c04173e 100644
29663 --- a/drivers/ide/rz1000.c
29664 +++ b/drivers/ide/rz1000.c
29665 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
29666 }
29667 }
29668
29669 -static const struct ide_port_info rz1000_chipset __devinitdata = {
29670 +static const struct ide_port_info rz1000_chipset __devinitconst = {
29671 .name = DRV_NAME,
29672 .host_flags = IDE_HFLAG_NO_DMA,
29673 };
29674 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
29675 index 356b9b5..d4758eb 100644
29676 --- a/drivers/ide/sc1200.c
29677 +++ b/drivers/ide/sc1200.c
29678 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
29679 .dma_sff_read_status = ide_dma_sff_read_status,
29680 };
29681
29682 -static const struct ide_port_info sc1200_chipset __devinitdata = {
29683 +static const struct ide_port_info sc1200_chipset __devinitconst = {
29684 .name = DRV_NAME,
29685 .port_ops = &sc1200_port_ops,
29686 .dma_ops = &sc1200_dma_ops,
29687 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
29688 index b7f5b0c..9701038 100644
29689 --- a/drivers/ide/scc_pata.c
29690 +++ b/drivers/ide/scc_pata.c
29691 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
29692 .dma_sff_read_status = scc_dma_sff_read_status,
29693 };
29694
29695 -static const struct ide_port_info scc_chipset __devinitdata = {
29696 +static const struct ide_port_info scc_chipset __devinitconst = {
29697 .name = "sccIDE",
29698 .init_iops = init_iops_scc,
29699 .init_dma = scc_init_dma,
29700 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
29701 index 35fb8da..24d72ef 100644
29702 --- a/drivers/ide/serverworks.c
29703 +++ b/drivers/ide/serverworks.c
29704 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
29705 .cable_detect = svwks_cable_detect,
29706 };
29707
29708 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
29709 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
29710 { /* 0: OSB4 */
29711 .name = DRV_NAME,
29712 .init_chipset = init_chipset_svwks,
29713 diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
29714 index ab3db61..afed580 100644
29715 --- a/drivers/ide/setup-pci.c
29716 +++ b/drivers/ide/setup-pci.c
29717 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
29718 int ret, i, n_ports = dev2 ? 4 : 2;
29719 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
29720
29721 + pax_track_stack();
29722 +
29723 for (i = 0; i < n_ports / 2; i++) {
29724 ret = ide_setup_pci_controller(pdev[i], d, !i);
29725 if (ret < 0)
29726 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
29727 index ddeda44..46f7e30 100644
29728 --- a/drivers/ide/siimage.c
29729 +++ b/drivers/ide/siimage.c
29730 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
29731 .udma_mask = ATA_UDMA6, \
29732 }
29733
29734 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
29735 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
29736 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
29737 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
29738 };
29739 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
29740 index 4a00225..09e61b4 100644
29741 --- a/drivers/ide/sis5513.c
29742 +++ b/drivers/ide/sis5513.c
29743 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
29744 .cable_detect = sis_cable_detect,
29745 };
29746
29747 -static const struct ide_port_info sis5513_chipset __devinitdata = {
29748 +static const struct ide_port_info sis5513_chipset __devinitconst = {
29749 .name = DRV_NAME,
29750 .init_chipset = init_chipset_sis5513,
29751 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
29752 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
29753 index f21dc2a..d051cd2 100644
29754 --- a/drivers/ide/sl82c105.c
29755 +++ b/drivers/ide/sl82c105.c
29756 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
29757 .dma_sff_read_status = ide_dma_sff_read_status,
29758 };
29759
29760 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
29761 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
29762 .name = DRV_NAME,
29763 .init_chipset = init_chipset_sl82c105,
29764 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
29765 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
29766 index 864ffe0..863a5e9 100644
29767 --- a/drivers/ide/slc90e66.c
29768 +++ b/drivers/ide/slc90e66.c
29769 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
29770 .cable_detect = slc90e66_cable_detect,
29771 };
29772
29773 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
29774 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
29775 .name = DRV_NAME,
29776 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
29777 .port_ops = &slc90e66_port_ops,
29778 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
29779 index e444d24..ba577de 100644
29780 --- a/drivers/ide/tc86c001.c
29781 +++ b/drivers/ide/tc86c001.c
29782 @@ -191,7 +191,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
29783 .dma_sff_read_status = ide_dma_sff_read_status,
29784 };
29785
29786 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
29787 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
29788 .name = DRV_NAME,
29789 .init_hwif = init_hwif_tc86c001,
29790 .port_ops = &tc86c001_port_ops,
29791 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
29792 index e53a1b7..d11aff7 100644
29793 --- a/drivers/ide/triflex.c
29794 +++ b/drivers/ide/triflex.c
29795 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
29796 .set_dma_mode = triflex_set_mode,
29797 };
29798
29799 -static const struct ide_port_info triflex_device __devinitdata = {
29800 +static const struct ide_port_info triflex_device __devinitconst = {
29801 .name = DRV_NAME,
29802 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
29803 .port_ops = &triflex_port_ops,
29804 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
29805 index 4b42ca0..e494a98 100644
29806 --- a/drivers/ide/trm290.c
29807 +++ b/drivers/ide/trm290.c
29808 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
29809 .dma_check = trm290_dma_check,
29810 };
29811
29812 -static const struct ide_port_info trm290_chipset __devinitdata = {
29813 +static const struct ide_port_info trm290_chipset __devinitconst = {
29814 .name = DRV_NAME,
29815 .init_hwif = init_hwif_trm290,
29816 .tp_ops = &trm290_tp_ops,
29817 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
29818 index f46f49c..eb77678 100644
29819 --- a/drivers/ide/via82cxxx.c
29820 +++ b/drivers/ide/via82cxxx.c
29821 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
29822 .cable_detect = via82cxxx_cable_detect,
29823 };
29824
29825 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
29826 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
29827 .name = DRV_NAME,
29828 .init_chipset = init_chipset_via82cxxx,
29829 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
29830 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
29831 index fc0f2bd..ac2f8a5 100644
29832 --- a/drivers/infiniband/core/cm.c
29833 +++ b/drivers/infiniband/core/cm.c
29834 @@ -113,7 +113,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
29835
29836 struct cm_counter_group {
29837 struct kobject obj;
29838 - atomic_long_t counter[CM_ATTR_COUNT];
29839 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29840 };
29841
29842 struct cm_counter_attribute {
29843 @@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm_work *work,
29844 struct ib_mad_send_buf *msg = NULL;
29845 int ret;
29846
29847 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29848 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29849 counter[CM_REQ_COUNTER]);
29850
29851 /* Quick state check to discard duplicate REQs. */
29852 @@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
29853 if (!cm_id_priv)
29854 return;
29855
29856 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29857 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29858 counter[CM_REP_COUNTER]);
29859 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29860 if (ret)
29861 @@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work *work)
29862 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29863 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29864 spin_unlock_irq(&cm_id_priv->lock);
29865 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29866 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29867 counter[CM_RTU_COUNTER]);
29868 goto out;
29869 }
29870 @@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_work *work)
29871 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29872 dreq_msg->local_comm_id);
29873 if (!cm_id_priv) {
29874 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29875 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29876 counter[CM_DREQ_COUNTER]);
29877 cm_issue_drep(work->port, work->mad_recv_wc);
29878 return -EINVAL;
29879 @@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_work *work)
29880 case IB_CM_MRA_REP_RCVD:
29881 break;
29882 case IB_CM_TIMEWAIT:
29883 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29884 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29885 counter[CM_DREQ_COUNTER]);
29886 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29887 goto unlock;
29888 @@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_work *work)
29889 cm_free_msg(msg);
29890 goto deref;
29891 case IB_CM_DREQ_RCVD:
29892 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29893 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29894 counter[CM_DREQ_COUNTER]);
29895 goto unlock;
29896 default:
29897 @@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work *work)
29898 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29899 cm_id_priv->msg, timeout)) {
29900 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29901 - atomic_long_inc(&work->port->
29902 + atomic_long_inc_unchecked(&work->port->
29903 counter_group[CM_RECV_DUPLICATES].
29904 counter[CM_MRA_COUNTER]);
29905 goto out;
29906 @@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work *work)
29907 break;
29908 case IB_CM_MRA_REQ_RCVD:
29909 case IB_CM_MRA_REP_RCVD:
29910 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29911 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29912 counter[CM_MRA_COUNTER]);
29913 /* fall through */
29914 default:
29915 @@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work *work)
29916 case IB_CM_LAP_IDLE:
29917 break;
29918 case IB_CM_MRA_LAP_SENT:
29919 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29920 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29921 counter[CM_LAP_COUNTER]);
29922 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29923 goto unlock;
29924 @@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work *work)
29925 cm_free_msg(msg);
29926 goto deref;
29927 case IB_CM_LAP_RCVD:
29928 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29929 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29930 counter[CM_LAP_COUNTER]);
29931 goto unlock;
29932 default:
29933 @@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
29934 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29935 if (cur_cm_id_priv) {
29936 spin_unlock_irq(&cm.lock);
29937 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29938 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29939 counter[CM_SIDR_REQ_COUNTER]);
29940 goto out; /* Duplicate message. */
29941 }
29942 @@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
29943 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29944 msg->retries = 1;
29945
29946 - atomic_long_add(1 + msg->retries,
29947 + atomic_long_add_unchecked(1 + msg->retries,
29948 &port->counter_group[CM_XMIT].counter[attr_index]);
29949 if (msg->retries)
29950 - atomic_long_add(msg->retries,
29951 + atomic_long_add_unchecked(msg->retries,
29952 &port->counter_group[CM_XMIT_RETRIES].
29953 counter[attr_index]);
29954
29955 @@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
29956 }
29957
29958 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29959 - atomic_long_inc(&port->counter_group[CM_RECV].
29960 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29961 counter[attr_id - CM_ATTR_ID_OFFSET]);
29962
29963 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29964 @@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
29965 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29966
29967 return sprintf(buf, "%ld\n",
29968 - atomic_long_read(&group->counter[cm_attr->index]));
29969 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29970 }
29971
29972 static const struct sysfs_ops cm_counter_ops = {
29973 diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
29974 index ca4c5dc..572d1ae 100644
29975 --- a/drivers/infiniband/core/cma.c
29976 +++ b/drivers/infiniband/core/cma.c
29977 @@ -2492,6 +2492,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
29978
29979 req.private_data_len = sizeof(struct cma_hdr) +
29980 conn_param->private_data_len;
29981 + if (req.private_data_len < conn_param->private_data_len)
29982 + return -EINVAL;
29983 +
29984 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
29985 if (!req.private_data)
29986 return -ENOMEM;
29987 @@ -2541,6 +2544,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
29988 memset(&req, 0, sizeof req);
29989 offset = cma_user_data_offset(id_priv->id.ps);
29990 req.private_data_len = offset + conn_param->private_data_len;
29991 + if (req.private_data_len < conn_param->private_data_len)
29992 + return -EINVAL;
29993 +
29994 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
29995 if (!private_data)
29996 return -ENOMEM;
29997 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
29998 index 4507043..14ad522 100644
29999 --- a/drivers/infiniband/core/fmr_pool.c
30000 +++ b/drivers/infiniband/core/fmr_pool.c
30001 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
30002
30003 struct task_struct *thread;
30004
30005 - atomic_t req_ser;
30006 - atomic_t flush_ser;
30007 + atomic_unchecked_t req_ser;
30008 + atomic_unchecked_t flush_ser;
30009
30010 wait_queue_head_t force_wait;
30011 };
30012 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
30013 struct ib_fmr_pool *pool = pool_ptr;
30014
30015 do {
30016 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
30017 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
30018 ib_fmr_batch_release(pool);
30019
30020 - atomic_inc(&pool->flush_ser);
30021 + atomic_inc_unchecked(&pool->flush_ser);
30022 wake_up_interruptible(&pool->force_wait);
30023
30024 if (pool->flush_function)
30025 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
30026 }
30027
30028 set_current_state(TASK_INTERRUPTIBLE);
30029 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
30030 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
30031 !kthread_should_stop())
30032 schedule();
30033 __set_current_state(TASK_RUNNING);
30034 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
30035 pool->dirty_watermark = params->dirty_watermark;
30036 pool->dirty_len = 0;
30037 spin_lock_init(&pool->pool_lock);
30038 - atomic_set(&pool->req_ser, 0);
30039 - atomic_set(&pool->flush_ser, 0);
30040 + atomic_set_unchecked(&pool->req_ser, 0);
30041 + atomic_set_unchecked(&pool->flush_ser, 0);
30042 init_waitqueue_head(&pool->force_wait);
30043
30044 pool->thread = kthread_run(ib_fmr_cleanup_thread,
30045 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
30046 }
30047 spin_unlock_irq(&pool->pool_lock);
30048
30049 - serial = atomic_inc_return(&pool->req_ser);
30050 + serial = atomic_inc_return_unchecked(&pool->req_ser);
30051 wake_up_process(pool->thread);
30052
30053 if (wait_event_interruptible(pool->force_wait,
30054 - atomic_read(&pool->flush_ser) - serial >= 0))
30055 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30056 return -EINTR;
30057
30058 return 0;
30059 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
30060 } else {
30061 list_add_tail(&fmr->list, &pool->dirty_list);
30062 if (++pool->dirty_len >= pool->dirty_watermark) {
30063 - atomic_inc(&pool->req_ser);
30064 + atomic_inc_unchecked(&pool->req_ser);
30065 wake_up_process(pool->thread);
30066 }
30067 }
30068 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
30069 index 40c8353..946b0e4 100644
30070 --- a/drivers/infiniband/hw/cxgb4/mem.c
30071 +++ b/drivers/infiniband/hw/cxgb4/mem.c
30072 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30073 int err;
30074 struct fw_ri_tpte tpt;
30075 u32 stag_idx;
30076 - static atomic_t key;
30077 + static atomic_unchecked_t key;
30078
30079 if (c4iw_fatal_error(rdev))
30080 return -EIO;
30081 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30082 &rdev->resource.tpt_fifo_lock);
30083 if (!stag_idx)
30084 return -ENOMEM;
30085 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
30086 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
30087 }
30088 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
30089 __func__, stag_state, type, pdid, stag_idx);
30090 diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
30091 index 31ae1b1..2f5b038 100644
30092 --- a/drivers/infiniband/hw/ipath/ipath_fs.c
30093 +++ b/drivers/infiniband/hw/ipath/ipath_fs.c
30094 @@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
30095 struct infinipath_counters counters;
30096 struct ipath_devdata *dd;
30097
30098 + pax_track_stack();
30099 +
30100 dd = file->f_path.dentry->d_inode->i_private;
30101 dd->ipath_f_read_counters(dd, &counters);
30102
30103 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
30104 index 79b3dbc..96e5fcc 100644
30105 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
30106 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
30107 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30108 struct ib_atomic_eth *ateth;
30109 struct ipath_ack_entry *e;
30110 u64 vaddr;
30111 - atomic64_t *maddr;
30112 + atomic64_unchecked_t *maddr;
30113 u64 sdata;
30114 u32 rkey;
30115 u8 next;
30116 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30117 IB_ACCESS_REMOTE_ATOMIC)))
30118 goto nack_acc_unlck;
30119 /* Perform atomic OP and save result. */
30120 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30121 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30122 sdata = be64_to_cpu(ateth->swap_data);
30123 e = &qp->s_ack_queue[qp->r_head_ack_queue];
30124 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
30125 - (u64) atomic64_add_return(sdata, maddr) - sdata :
30126 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30127 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30128 be64_to_cpu(ateth->compare_data),
30129 sdata);
30130 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
30131 index 1f95bba..9530f87 100644
30132 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
30133 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
30134 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
30135 unsigned long flags;
30136 struct ib_wc wc;
30137 u64 sdata;
30138 - atomic64_t *maddr;
30139 + atomic64_unchecked_t *maddr;
30140 enum ib_wc_status send_status;
30141
30142 /*
30143 @@ -382,11 +382,11 @@ again:
30144 IB_ACCESS_REMOTE_ATOMIC)))
30145 goto acc_err;
30146 /* Perform atomic OP and save result. */
30147 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30148 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30149 sdata = wqe->wr.wr.atomic.compare_add;
30150 *(u64 *) sqp->s_sge.sge.vaddr =
30151 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
30152 - (u64) atomic64_add_return(sdata, maddr) - sdata :
30153 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30154 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30155 sdata, wqe->wr.wr.atomic.swap);
30156 goto send_comp;
30157 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
30158 index 2d668c6..3312bb7 100644
30159 --- a/drivers/infiniband/hw/nes/nes.c
30160 +++ b/drivers/infiniband/hw/nes/nes.c
30161 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
30162 LIST_HEAD(nes_adapter_list);
30163 static LIST_HEAD(nes_dev_list);
30164
30165 -atomic_t qps_destroyed;
30166 +atomic_unchecked_t qps_destroyed;
30167
30168 static unsigned int ee_flsh_adapter;
30169 static unsigned int sysfs_nonidx_addr;
30170 @@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
30171 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
30172 struct nes_adapter *nesadapter = nesdev->nesadapter;
30173
30174 - atomic_inc(&qps_destroyed);
30175 + atomic_inc_unchecked(&qps_destroyed);
30176
30177 /* Free the control structures */
30178
30179 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
30180 index 6fe7987..68637b5 100644
30181 --- a/drivers/infiniband/hw/nes/nes.h
30182 +++ b/drivers/infiniband/hw/nes/nes.h
30183 @@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
30184 extern unsigned int wqm_quanta;
30185 extern struct list_head nes_adapter_list;
30186
30187 -extern atomic_t cm_connects;
30188 -extern atomic_t cm_accepts;
30189 -extern atomic_t cm_disconnects;
30190 -extern atomic_t cm_closes;
30191 -extern atomic_t cm_connecteds;
30192 -extern atomic_t cm_connect_reqs;
30193 -extern atomic_t cm_rejects;
30194 -extern atomic_t mod_qp_timouts;
30195 -extern atomic_t qps_created;
30196 -extern atomic_t qps_destroyed;
30197 -extern atomic_t sw_qps_destroyed;
30198 +extern atomic_unchecked_t cm_connects;
30199 +extern atomic_unchecked_t cm_accepts;
30200 +extern atomic_unchecked_t cm_disconnects;
30201 +extern atomic_unchecked_t cm_closes;
30202 +extern atomic_unchecked_t cm_connecteds;
30203 +extern atomic_unchecked_t cm_connect_reqs;
30204 +extern atomic_unchecked_t cm_rejects;
30205 +extern atomic_unchecked_t mod_qp_timouts;
30206 +extern atomic_unchecked_t qps_created;
30207 +extern atomic_unchecked_t qps_destroyed;
30208 +extern atomic_unchecked_t sw_qps_destroyed;
30209 extern u32 mh_detected;
30210 extern u32 mh_pauses_sent;
30211 extern u32 cm_packets_sent;
30212 @@ -194,14 +194,14 @@ extern u32 cm_packets_created;
30213 extern u32 cm_packets_received;
30214 extern u32 cm_packets_dropped;
30215 extern u32 cm_packets_retrans;
30216 -extern atomic_t cm_listens_created;
30217 -extern atomic_t cm_listens_destroyed;
30218 +extern atomic_unchecked_t cm_listens_created;
30219 +extern atomic_unchecked_t cm_listens_destroyed;
30220 extern u32 cm_backlog_drops;
30221 -extern atomic_t cm_loopbacks;
30222 -extern atomic_t cm_nodes_created;
30223 -extern atomic_t cm_nodes_destroyed;
30224 -extern atomic_t cm_accel_dropped_pkts;
30225 -extern atomic_t cm_resets_recvd;
30226 +extern atomic_unchecked_t cm_loopbacks;
30227 +extern atomic_unchecked_t cm_nodes_created;
30228 +extern atomic_unchecked_t cm_nodes_destroyed;
30229 +extern atomic_unchecked_t cm_accel_dropped_pkts;
30230 +extern atomic_unchecked_t cm_resets_recvd;
30231
30232 extern u32 int_mod_timer_init;
30233 extern u32 int_mod_cq_depth_256;
30234 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
30235 index a237547..28a9819 100644
30236 --- a/drivers/infiniband/hw/nes/nes_cm.c
30237 +++ b/drivers/infiniband/hw/nes/nes_cm.c
30238 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
30239 u32 cm_packets_retrans;
30240 u32 cm_packets_created;
30241 u32 cm_packets_received;
30242 -atomic_t cm_listens_created;
30243 -atomic_t cm_listens_destroyed;
30244 +atomic_unchecked_t cm_listens_created;
30245 +atomic_unchecked_t cm_listens_destroyed;
30246 u32 cm_backlog_drops;
30247 -atomic_t cm_loopbacks;
30248 -atomic_t cm_nodes_created;
30249 -atomic_t cm_nodes_destroyed;
30250 -atomic_t cm_accel_dropped_pkts;
30251 -atomic_t cm_resets_recvd;
30252 +atomic_unchecked_t cm_loopbacks;
30253 +atomic_unchecked_t cm_nodes_created;
30254 +atomic_unchecked_t cm_nodes_destroyed;
30255 +atomic_unchecked_t cm_accel_dropped_pkts;
30256 +atomic_unchecked_t cm_resets_recvd;
30257
30258 static inline int mini_cm_accelerated(struct nes_cm_core *,
30259 struct nes_cm_node *);
30260 @@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
30261
30262 static struct nes_cm_core *g_cm_core;
30263
30264 -atomic_t cm_connects;
30265 -atomic_t cm_accepts;
30266 -atomic_t cm_disconnects;
30267 -atomic_t cm_closes;
30268 -atomic_t cm_connecteds;
30269 -atomic_t cm_connect_reqs;
30270 -atomic_t cm_rejects;
30271 +atomic_unchecked_t cm_connects;
30272 +atomic_unchecked_t cm_accepts;
30273 +atomic_unchecked_t cm_disconnects;
30274 +atomic_unchecked_t cm_closes;
30275 +atomic_unchecked_t cm_connecteds;
30276 +atomic_unchecked_t cm_connect_reqs;
30277 +atomic_unchecked_t cm_rejects;
30278
30279
30280 /**
30281 @@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
30282 kfree(listener);
30283 listener = NULL;
30284 ret = 0;
30285 - atomic_inc(&cm_listens_destroyed);
30286 + atomic_inc_unchecked(&cm_listens_destroyed);
30287 } else {
30288 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
30289 }
30290 @@ -1242,7 +1242,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
30291 cm_node->rem_mac);
30292
30293 add_hte_node(cm_core, cm_node);
30294 - atomic_inc(&cm_nodes_created);
30295 + atomic_inc_unchecked(&cm_nodes_created);
30296
30297 return cm_node;
30298 }
30299 @@ -1300,7 +1300,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
30300 }
30301
30302 atomic_dec(&cm_core->node_cnt);
30303 - atomic_inc(&cm_nodes_destroyed);
30304 + atomic_inc_unchecked(&cm_nodes_destroyed);
30305 nesqp = cm_node->nesqp;
30306 if (nesqp) {
30307 nesqp->cm_node = NULL;
30308 @@ -1367,7 +1367,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
30309
30310 static void drop_packet(struct sk_buff *skb)
30311 {
30312 - atomic_inc(&cm_accel_dropped_pkts);
30313 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30314 dev_kfree_skb_any(skb);
30315 }
30316
30317 @@ -1430,7 +1430,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
30318 {
30319
30320 int reset = 0; /* whether to send reset in case of err.. */
30321 - atomic_inc(&cm_resets_recvd);
30322 + atomic_inc_unchecked(&cm_resets_recvd);
30323 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30324 " refcnt=%d\n", cm_node, cm_node->state,
30325 atomic_read(&cm_node->ref_count));
30326 @@ -2059,7 +2059,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
30327 rem_ref_cm_node(cm_node->cm_core, cm_node);
30328 return NULL;
30329 }
30330 - atomic_inc(&cm_loopbacks);
30331 + atomic_inc_unchecked(&cm_loopbacks);
30332 loopbackremotenode->loopbackpartner = cm_node;
30333 loopbackremotenode->tcp_cntxt.rcv_wscale =
30334 NES_CM_DEFAULT_RCV_WND_SCALE;
30335 @@ -2334,7 +2334,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
30336 add_ref_cm_node(cm_node);
30337 } else if (cm_node->state == NES_CM_STATE_TSA) {
30338 rem_ref_cm_node(cm_core, cm_node);
30339 - atomic_inc(&cm_accel_dropped_pkts);
30340 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30341 dev_kfree_skb_any(skb);
30342 break;
30343 }
30344 @@ -2640,7 +2640,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30345
30346 if ((cm_id) && (cm_id->event_handler)) {
30347 if (issue_disconn) {
30348 - atomic_inc(&cm_disconnects);
30349 + atomic_inc_unchecked(&cm_disconnects);
30350 cm_event.event = IW_CM_EVENT_DISCONNECT;
30351 cm_event.status = disconn_status;
30352 cm_event.local_addr = cm_id->local_addr;
30353 @@ -2662,7 +2662,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30354 }
30355
30356 if (issue_close) {
30357 - atomic_inc(&cm_closes);
30358 + atomic_inc_unchecked(&cm_closes);
30359 nes_disconnect(nesqp, 1);
30360
30361 cm_id->provider_data = nesqp;
30362 @@ -2793,7 +2793,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30363
30364 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30365 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30366 - atomic_inc(&cm_accepts);
30367 + atomic_inc_unchecked(&cm_accepts);
30368
30369 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30370 netdev_refcnt_read(nesvnic->netdev));
30371 @@ -3003,7 +3003,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
30372
30373 struct nes_cm_core *cm_core;
30374
30375 - atomic_inc(&cm_rejects);
30376 + atomic_inc_unchecked(&cm_rejects);
30377 cm_node = (struct nes_cm_node *) cm_id->provider_data;
30378 loopback = cm_node->loopbackpartner;
30379 cm_core = cm_node->cm_core;
30380 @@ -3069,7 +3069,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30381 ntohl(cm_id->local_addr.sin_addr.s_addr),
30382 ntohs(cm_id->local_addr.sin_port));
30383
30384 - atomic_inc(&cm_connects);
30385 + atomic_inc_unchecked(&cm_connects);
30386 nesqp->active_conn = 1;
30387
30388 /* cache the cm_id in the qp */
30389 @@ -3175,7 +3175,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
30390 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
30391 return err;
30392 }
30393 - atomic_inc(&cm_listens_created);
30394 + atomic_inc_unchecked(&cm_listens_created);
30395 }
30396
30397 cm_id->add_ref(cm_id);
30398 @@ -3280,7 +3280,7 @@ static void cm_event_connected(struct nes_cm_event *event)
30399 if (nesqp->destroyed) {
30400 return;
30401 }
30402 - atomic_inc(&cm_connecteds);
30403 + atomic_inc_unchecked(&cm_connecteds);
30404 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30405 " local port 0x%04X. jiffies = %lu.\n",
30406 nesqp->hwqp.qp_id,
30407 @@ -3495,7 +3495,7 @@ static void cm_event_reset(struct nes_cm_event *event)
30408
30409 cm_id->add_ref(cm_id);
30410 ret = cm_id->event_handler(cm_id, &cm_event);
30411 - atomic_inc(&cm_closes);
30412 + atomic_inc_unchecked(&cm_closes);
30413 cm_event.event = IW_CM_EVENT_CLOSE;
30414 cm_event.status = 0;
30415 cm_event.provider_data = cm_id->provider_data;
30416 @@ -3531,7 +3531,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
30417 return;
30418 cm_id = cm_node->cm_id;
30419
30420 - atomic_inc(&cm_connect_reqs);
30421 + atomic_inc_unchecked(&cm_connect_reqs);
30422 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30423 cm_node, cm_id, jiffies);
30424
30425 @@ -3569,7 +3569,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
30426 return;
30427 cm_id = cm_node->cm_id;
30428
30429 - atomic_inc(&cm_connect_reqs);
30430 + atomic_inc_unchecked(&cm_connect_reqs);
30431 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30432 cm_node, cm_id, jiffies);
30433
30434 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
30435 index 9d7ffeb..a95dd7d 100644
30436 --- a/drivers/infiniband/hw/nes/nes_nic.c
30437 +++ b/drivers/infiniband/hw/nes/nes_nic.c
30438 @@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
30439 target_stat_values[++index] = mh_detected;
30440 target_stat_values[++index] = mh_pauses_sent;
30441 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30442 - target_stat_values[++index] = atomic_read(&cm_connects);
30443 - target_stat_values[++index] = atomic_read(&cm_accepts);
30444 - target_stat_values[++index] = atomic_read(&cm_disconnects);
30445 - target_stat_values[++index] = atomic_read(&cm_connecteds);
30446 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30447 - target_stat_values[++index] = atomic_read(&cm_rejects);
30448 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30449 - target_stat_values[++index] = atomic_read(&qps_created);
30450 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30451 - target_stat_values[++index] = atomic_read(&qps_destroyed);
30452 - target_stat_values[++index] = atomic_read(&cm_closes);
30453 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30454 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30455 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30456 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30457 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30458 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30459 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30460 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30461 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30462 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30463 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30464 target_stat_values[++index] = cm_packets_sent;
30465 target_stat_values[++index] = cm_packets_bounced;
30466 target_stat_values[++index] = cm_packets_created;
30467 target_stat_values[++index] = cm_packets_received;
30468 target_stat_values[++index] = cm_packets_dropped;
30469 target_stat_values[++index] = cm_packets_retrans;
30470 - target_stat_values[++index] = atomic_read(&cm_listens_created);
30471 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
30472 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
30473 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
30474 target_stat_values[++index] = cm_backlog_drops;
30475 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
30476 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
30477 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30478 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30479 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30480 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30481 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30482 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30483 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30484 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30485 target_stat_values[++index] = nesadapter->free_4kpbl;
30486 target_stat_values[++index] = nesadapter->free_256pbl;
30487 target_stat_values[++index] = int_mod_timer_init;
30488 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
30489 index 9f2f7d4..6d2fee2 100644
30490 --- a/drivers/infiniband/hw/nes/nes_verbs.c
30491 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
30492 @@ -46,9 +46,9 @@
30493
30494 #include <rdma/ib_umem.h>
30495
30496 -atomic_t mod_qp_timouts;
30497 -atomic_t qps_created;
30498 -atomic_t sw_qps_destroyed;
30499 +atomic_unchecked_t mod_qp_timouts;
30500 +atomic_unchecked_t qps_created;
30501 +atomic_unchecked_t sw_qps_destroyed;
30502
30503 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30504
30505 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
30506 if (init_attr->create_flags)
30507 return ERR_PTR(-EINVAL);
30508
30509 - atomic_inc(&qps_created);
30510 + atomic_inc_unchecked(&qps_created);
30511 switch (init_attr->qp_type) {
30512 case IB_QPT_RC:
30513 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30514 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
30515 struct iw_cm_event cm_event;
30516 int ret;
30517
30518 - atomic_inc(&sw_qps_destroyed);
30519 + atomic_inc_unchecked(&sw_qps_destroyed);
30520 nesqp->destroyed = 1;
30521
30522 /* Blow away the connection if it exists. */
30523 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
30524 index c9624ea..e025b66 100644
30525 --- a/drivers/infiniband/hw/qib/qib.h
30526 +++ b/drivers/infiniband/hw/qib/qib.h
30527 @@ -51,6 +51,7 @@
30528 #include <linux/completion.h>
30529 #include <linux/kref.h>
30530 #include <linux/sched.h>
30531 +#include <linux/slab.h>
30532
30533 #include "qib_common.h"
30534 #include "qib_verbs.h"
30535 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
30536 index c351aa4..e6967c2 100644
30537 --- a/drivers/input/gameport/gameport.c
30538 +++ b/drivers/input/gameport/gameport.c
30539 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
30540 */
30541 static void gameport_init_port(struct gameport *gameport)
30542 {
30543 - static atomic_t gameport_no = ATOMIC_INIT(0);
30544 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30545
30546 __module_get(THIS_MODULE);
30547
30548 mutex_init(&gameport->drv_mutex);
30549 device_initialize(&gameport->dev);
30550 dev_set_name(&gameport->dev, "gameport%lu",
30551 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
30552 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30553 gameport->dev.bus = &gameport_bus;
30554 gameport->dev.release = gameport_release_port;
30555 if (gameport->parent)
30556 diff --git a/drivers/input/input.c b/drivers/input/input.c
30557 index da38d97..2aa0b79 100644
30558 --- a/drivers/input/input.c
30559 +++ b/drivers/input/input.c
30560 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
30561 */
30562 int input_register_device(struct input_dev *dev)
30563 {
30564 - static atomic_t input_no = ATOMIC_INIT(0);
30565 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30566 struct input_handler *handler;
30567 const char *path;
30568 int error;
30569 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
30570 dev->setkeycode = input_default_setkeycode;
30571
30572 dev_set_name(&dev->dev, "input%ld",
30573 - (unsigned long) atomic_inc_return(&input_no) - 1);
30574 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30575
30576 error = device_add(&dev->dev);
30577 if (error)
30578 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
30579 index b8d8611..15f8d2c 100644
30580 --- a/drivers/input/joystick/sidewinder.c
30581 +++ b/drivers/input/joystick/sidewinder.c
30582 @@ -30,6 +30,7 @@
30583 #include <linux/kernel.h>
30584 #include <linux/module.h>
30585 #include <linux/slab.h>
30586 +#include <linux/sched.h>
30587 #include <linux/init.h>
30588 #include <linux/input.h>
30589 #include <linux/gameport.h>
30590 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
30591 unsigned char buf[SW_LENGTH];
30592 int i;
30593
30594 + pax_track_stack();
30595 +
30596 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
30597
30598 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
30599 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
30600 index d728875..844c89b 100644
30601 --- a/drivers/input/joystick/xpad.c
30602 +++ b/drivers/input/joystick/xpad.c
30603 @@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
30604
30605 static int xpad_led_probe(struct usb_xpad *xpad)
30606 {
30607 - static atomic_t led_seq = ATOMIC_INIT(0);
30608 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30609 long led_no;
30610 struct xpad_led *led;
30611 struct led_classdev *led_cdev;
30612 @@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
30613 if (!led)
30614 return -ENOMEM;
30615
30616 - led_no = (long)atomic_inc_return(&led_seq) - 1;
30617 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30618
30619 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30620 led->xpad = xpad;
30621 diff --git a/drivers/input/misc/cma3000_d0x.c b/drivers/input/misc/cma3000_d0x.c
30622 index 1633b63..09f8f20 100644
30623 --- a/drivers/input/misc/cma3000_d0x.c
30624 +++ b/drivers/input/misc/cma3000_d0x.c
30625 @@ -114,8 +114,8 @@ static void decode_mg(struct cma3000_accl_data *data, int *datax,
30626 static irqreturn_t cma3000_thread_irq(int irq, void *dev_id)
30627 {
30628 struct cma3000_accl_data *data = dev_id;
30629 - int datax, datay, dataz;
30630 - u8 ctrl, mode, range, intr_status;
30631 + int datax, datay, dataz, intr_status;
30632 + u8 ctrl, mode, range;
30633
30634 intr_status = CMA3000_READ(data, CMA3000_INTSTATUS, "interrupt status");
30635 if (intr_status < 0)
30636 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
30637 index 0110b5a..d3ad144 100644
30638 --- a/drivers/input/mousedev.c
30639 +++ b/drivers/input/mousedev.c
30640 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
30641
30642 spin_unlock_irq(&client->packet_lock);
30643
30644 - if (copy_to_user(buffer, data, count))
30645 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
30646 return -EFAULT;
30647
30648 return count;
30649 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
30650 index ba70058..571d25d 100644
30651 --- a/drivers/input/serio/serio.c
30652 +++ b/drivers/input/serio/serio.c
30653 @@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
30654 */
30655 static void serio_init_port(struct serio *serio)
30656 {
30657 - static atomic_t serio_no = ATOMIC_INIT(0);
30658 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30659
30660 __module_get(THIS_MODULE);
30661
30662 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
30663 mutex_init(&serio->drv_mutex);
30664 device_initialize(&serio->dev);
30665 dev_set_name(&serio->dev, "serio%ld",
30666 - (long)atomic_inc_return(&serio_no) - 1);
30667 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
30668 serio->dev.bus = &serio_bus;
30669 serio->dev.release = serio_release_port;
30670 serio->dev.groups = serio_device_attr_groups;
30671 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
30672 index e44933d..9ba484a 100644
30673 --- a/drivers/isdn/capi/capi.c
30674 +++ b/drivers/isdn/capi/capi.c
30675 @@ -83,8 +83,8 @@ struct capiminor {
30676
30677 struct capi20_appl *ap;
30678 u32 ncci;
30679 - atomic_t datahandle;
30680 - atomic_t msgid;
30681 + atomic_unchecked_t datahandle;
30682 + atomic_unchecked_t msgid;
30683
30684 struct tty_port port;
30685 int ttyinstop;
30686 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
30687 capimsg_setu16(s, 2, mp->ap->applid);
30688 capimsg_setu8 (s, 4, CAPI_DATA_B3);
30689 capimsg_setu8 (s, 5, CAPI_RESP);
30690 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
30691 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
30692 capimsg_setu32(s, 8, mp->ncci);
30693 capimsg_setu16(s, 12, datahandle);
30694 }
30695 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
30696 mp->outbytes -= len;
30697 spin_unlock_bh(&mp->outlock);
30698
30699 - datahandle = atomic_inc_return(&mp->datahandle);
30700 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
30701 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
30702 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30703 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30704 capimsg_setu16(skb->data, 2, mp->ap->applid);
30705 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
30706 capimsg_setu8 (skb->data, 5, CAPI_REQ);
30707 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
30708 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
30709 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
30710 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
30711 capimsg_setu16(skb->data, 16, len); /* Data length */
30712 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
30713 index db621db..825ea1a 100644
30714 --- a/drivers/isdn/gigaset/common.c
30715 +++ b/drivers/isdn/gigaset/common.c
30716 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
30717 cs->commands_pending = 0;
30718 cs->cur_at_seq = 0;
30719 cs->gotfwver = -1;
30720 - cs->open_count = 0;
30721 + local_set(&cs->open_count, 0);
30722 cs->dev = NULL;
30723 cs->tty = NULL;
30724 cs->tty_dev = NULL;
30725 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
30726 index 212efaf..f187c6b 100644
30727 --- a/drivers/isdn/gigaset/gigaset.h
30728 +++ b/drivers/isdn/gigaset/gigaset.h
30729 @@ -35,6 +35,7 @@
30730 #include <linux/tty_driver.h>
30731 #include <linux/list.h>
30732 #include <linux/atomic.h>
30733 +#include <asm/local.h>
30734
30735 #define GIG_VERSION {0, 5, 0, 0}
30736 #define GIG_COMPAT {0, 4, 0, 0}
30737 @@ -433,7 +434,7 @@ struct cardstate {
30738 spinlock_t cmdlock;
30739 unsigned curlen, cmdbytes;
30740
30741 - unsigned open_count;
30742 + local_t open_count;
30743 struct tty_struct *tty;
30744 struct tasklet_struct if_wake_tasklet;
30745 unsigned control_state;
30746 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
30747 index e35058b..5898a8b 100644
30748 --- a/drivers/isdn/gigaset/interface.c
30749 +++ b/drivers/isdn/gigaset/interface.c
30750 @@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
30751 }
30752 tty->driver_data = cs;
30753
30754 - ++cs->open_count;
30755 -
30756 - if (cs->open_count == 1) {
30757 + if (local_inc_return(&cs->open_count) == 1) {
30758 spin_lock_irqsave(&cs->lock, flags);
30759 cs->tty = tty;
30760 spin_unlock_irqrestore(&cs->lock, flags);
30761 @@ -192,10 +190,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
30762
30763 if (!cs->connected)
30764 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30765 - else if (!cs->open_count)
30766 + else if (!local_read(&cs->open_count))
30767 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30768 else {
30769 - if (!--cs->open_count) {
30770 + if (!local_dec_return(&cs->open_count)) {
30771 spin_lock_irqsave(&cs->lock, flags);
30772 cs->tty = NULL;
30773 spin_unlock_irqrestore(&cs->lock, flags);
30774 @@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *tty,
30775 if (!cs->connected) {
30776 gig_dbg(DEBUG_IF, "not connected");
30777 retval = -ENODEV;
30778 - } else if (!cs->open_count)
30779 + } else if (!local_read(&cs->open_count))
30780 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30781 else {
30782 retval = 0;
30783 @@ -360,7 +358,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
30784 retval = -ENODEV;
30785 goto done;
30786 }
30787 - if (!cs->open_count) {
30788 + if (!local_read(&cs->open_count)) {
30789 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30790 retval = -ENODEV;
30791 goto done;
30792 @@ -413,7 +411,7 @@ static int if_write_room(struct tty_struct *tty)
30793 if (!cs->connected) {
30794 gig_dbg(DEBUG_IF, "not connected");
30795 retval = -ENODEV;
30796 - } else if (!cs->open_count)
30797 + } else if (!local_read(&cs->open_count))
30798 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30799 else if (cs->mstate != MS_LOCKED) {
30800 dev_warn(cs->dev, "can't write to unlocked device\n");
30801 @@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
30802
30803 if (!cs->connected)
30804 gig_dbg(DEBUG_IF, "not connected");
30805 - else if (!cs->open_count)
30806 + else if (!local_read(&cs->open_count))
30807 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30808 else if (cs->mstate != MS_LOCKED)
30809 dev_warn(cs->dev, "can't write to unlocked device\n");
30810 @@ -471,7 +469,7 @@ static void if_throttle(struct tty_struct *tty)
30811
30812 if (!cs->connected)
30813 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30814 - else if (!cs->open_count)
30815 + else if (!local_read(&cs->open_count))
30816 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30817 else
30818 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30819 @@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_struct *tty)
30820
30821 if (!cs->connected)
30822 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30823 - else if (!cs->open_count)
30824 + else if (!local_read(&cs->open_count))
30825 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30826 else
30827 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30828 @@ -526,7 +524,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
30829 goto out;
30830 }
30831
30832 - if (!cs->open_count) {
30833 + if (!local_read(&cs->open_count)) {
30834 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30835 goto out;
30836 }
30837 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
30838 index 2a57da59..e7a12ed 100644
30839 --- a/drivers/isdn/hardware/avm/b1.c
30840 +++ b/drivers/isdn/hardware/avm/b1.c
30841 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
30842 }
30843 if (left) {
30844 if (t4file->user) {
30845 - if (copy_from_user(buf, dp, left))
30846 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30847 return -EFAULT;
30848 } else {
30849 memcpy(buf, dp, left);
30850 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
30851 }
30852 if (left) {
30853 if (config->user) {
30854 - if (copy_from_user(buf, dp, left))
30855 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30856 return -EFAULT;
30857 } else {
30858 memcpy(buf, dp, left);
30859 diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
30860 index f130724..c373c68 100644
30861 --- a/drivers/isdn/hardware/eicon/capidtmf.c
30862 +++ b/drivers/isdn/hardware/eicon/capidtmf.c
30863 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
30864 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
30865 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
30866
30867 + pax_track_stack();
30868
30869 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
30870 {
30871 diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
30872 index 4d425c6..a9be6c4 100644
30873 --- a/drivers/isdn/hardware/eicon/capifunc.c
30874 +++ b/drivers/isdn/hardware/eicon/capifunc.c
30875 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
30876 IDI_SYNC_REQ req;
30877 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30878
30879 + pax_track_stack();
30880 +
30881 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30882
30883 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30884 diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
30885 index 3029234..ef0d9e2 100644
30886 --- a/drivers/isdn/hardware/eicon/diddfunc.c
30887 +++ b/drivers/isdn/hardware/eicon/diddfunc.c
30888 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
30889 IDI_SYNC_REQ req;
30890 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30891
30892 + pax_track_stack();
30893 +
30894 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30895
30896 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30897 diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
30898 index 0bbee78..a0d0a01 100644
30899 --- a/drivers/isdn/hardware/eicon/divasfunc.c
30900 +++ b/drivers/isdn/hardware/eicon/divasfunc.c
30901 @@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
30902 IDI_SYNC_REQ req;
30903 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30904
30905 + pax_track_stack();
30906 +
30907 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30908
30909 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30910 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
30911 index 85784a7..a19ca98 100644
30912 --- a/drivers/isdn/hardware/eicon/divasync.h
30913 +++ b/drivers/isdn/hardware/eicon/divasync.h
30914 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
30915 } diva_didd_add_adapter_t;
30916 typedef struct _diva_didd_remove_adapter {
30917 IDI_CALL p_request;
30918 -} diva_didd_remove_adapter_t;
30919 +} __no_const diva_didd_remove_adapter_t;
30920 typedef struct _diva_didd_read_adapter_array {
30921 void * buffer;
30922 dword length;
30923 diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
30924 index db87d51..7d09acf 100644
30925 --- a/drivers/isdn/hardware/eicon/idifunc.c
30926 +++ b/drivers/isdn/hardware/eicon/idifunc.c
30927 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
30928 IDI_SYNC_REQ req;
30929 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30930
30931 + pax_track_stack();
30932 +
30933 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30934
30935 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30936 diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
30937 index a339598..b6a8bfc 100644
30938 --- a/drivers/isdn/hardware/eicon/message.c
30939 +++ b/drivers/isdn/hardware/eicon/message.c
30940 @@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
30941 dword d;
30942 word w;
30943
30944 + pax_track_stack();
30945 +
30946 a = plci->adapter;
30947 Id = ((word)plci->Id<<8)|a->Id;
30948 PUT_WORD(&SS_Ind[4],0x0000);
30949 @@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
30950 word j, n, w;
30951 dword d;
30952
30953 + pax_track_stack();
30954 +
30955
30956 for(i=0;i<8;i++) bp_parms[i].length = 0;
30957 for(i=0;i<2;i++) global_config[i].length = 0;
30958 @@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
30959 const byte llc3[] = {4,3,2,2,6,6,0};
30960 const byte header[] = {0,2,3,3,0,0,0};
30961
30962 + pax_track_stack();
30963 +
30964 for(i=0;i<8;i++) bp_parms[i].length = 0;
30965 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
30966 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
30967 @@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
30968 word appl_number_group_type[MAX_APPL];
30969 PLCI *auxplci;
30970
30971 + pax_track_stack();
30972 +
30973 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
30974
30975 if(!a->group_optimization_enabled)
30976 diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
30977 index a564b75..f3cf8b5 100644
30978 --- a/drivers/isdn/hardware/eicon/mntfunc.c
30979 +++ b/drivers/isdn/hardware/eicon/mntfunc.c
30980 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
30981 IDI_SYNC_REQ req;
30982 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30983
30984 + pax_track_stack();
30985 +
30986 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30987
30988 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30989 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
30990 index a3bd163..8956575 100644
30991 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
30992 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
30993 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
30994 typedef struct _diva_os_idi_adapter_interface {
30995 diva_init_card_proc_t cleanup_adapter_proc;
30996 diva_cmd_card_proc_t cmd_proc;
30997 -} diva_os_idi_adapter_interface_t;
30998 +} __no_const diva_os_idi_adapter_interface_t;
30999
31000 typedef struct _diva_os_xdi_adapter {
31001 struct list_head link;
31002 diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
31003 index 6ed82ad..b05ac05 100644
31004 --- a/drivers/isdn/i4l/isdn_common.c
31005 +++ b/drivers/isdn/i4l/isdn_common.c
31006 @@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
31007 } iocpar;
31008 void __user *argp = (void __user *)arg;
31009
31010 + pax_track_stack();
31011 +
31012 #define name iocpar.name
31013 #define bname iocpar.bname
31014 #define iocts iocpar.iocts
31015 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
31016 index 1f355bb..43f1fea 100644
31017 --- a/drivers/isdn/icn/icn.c
31018 +++ b/drivers/isdn/icn/icn.c
31019 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
31020 if (count > len)
31021 count = len;
31022 if (user) {
31023 - if (copy_from_user(msg, buf, count))
31024 + if (count > sizeof msg || copy_from_user(msg, buf, count))
31025 return -EFAULT;
31026 } else
31027 memcpy(msg, buf, count);
31028 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
31029 index 2535933..09a8e86 100644
31030 --- a/drivers/lguest/core.c
31031 +++ b/drivers/lguest/core.c
31032 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
31033 * it's worked so far. The end address needs +1 because __get_vm_area
31034 * allocates an extra guard page, so we need space for that.
31035 */
31036 +
31037 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31038 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31039 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
31040 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31041 +#else
31042 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31043 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
31044 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31045 +#endif
31046 +
31047 if (!switcher_vma) {
31048 err = -ENOMEM;
31049 printk("lguest: could not map switcher pages high\n");
31050 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
31051 * Now the Switcher is mapped at the right address, we can't fail!
31052 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
31053 */
31054 - memcpy(switcher_vma->addr, start_switcher_text,
31055 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
31056 end_switcher_text - start_switcher_text);
31057
31058 printk(KERN_INFO "lguest: mapped switcher at %p\n",
31059 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
31060 index 65af42f..530c87a 100644
31061 --- a/drivers/lguest/x86/core.c
31062 +++ b/drivers/lguest/x86/core.c
31063 @@ -59,7 +59,7 @@ static struct {
31064 /* Offset from where switcher.S was compiled to where we've copied it */
31065 static unsigned long switcher_offset(void)
31066 {
31067 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
31068 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
31069 }
31070
31071 /* This cpu's struct lguest_pages. */
31072 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
31073 * These copies are pretty cheap, so we do them unconditionally: */
31074 /* Save the current Host top-level page directory.
31075 */
31076 +
31077 +#ifdef CONFIG_PAX_PER_CPU_PGD
31078 + pages->state.host_cr3 = read_cr3();
31079 +#else
31080 pages->state.host_cr3 = __pa(current->mm->pgd);
31081 +#endif
31082 +
31083 /*
31084 * Set up the Guest's page tables to see this CPU's pages (and no
31085 * other CPU's pages).
31086 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
31087 * compiled-in switcher code and the high-mapped copy we just made.
31088 */
31089 for (i = 0; i < IDT_ENTRIES; i++)
31090 - default_idt_entries[i] += switcher_offset();
31091 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
31092
31093 /*
31094 * Set up the Switcher's per-cpu areas.
31095 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
31096 * it will be undisturbed when we switch. To change %cs and jump we
31097 * need this structure to feed to Intel's "lcall" instruction.
31098 */
31099 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
31100 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
31101 lguest_entry.segment = LGUEST_CS;
31102
31103 /*
31104 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
31105 index 40634b0..4f5855e 100644
31106 --- a/drivers/lguest/x86/switcher_32.S
31107 +++ b/drivers/lguest/x86/switcher_32.S
31108 @@ -87,6 +87,7 @@
31109 #include <asm/page.h>
31110 #include <asm/segment.h>
31111 #include <asm/lguest.h>
31112 +#include <asm/processor-flags.h>
31113
31114 // We mark the start of the code to copy
31115 // It's placed in .text tho it's never run here
31116 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
31117 // Changes type when we load it: damn Intel!
31118 // For after we switch over our page tables
31119 // That entry will be read-only: we'd crash.
31120 +
31121 +#ifdef CONFIG_PAX_KERNEXEC
31122 + mov %cr0, %edx
31123 + xor $X86_CR0_WP, %edx
31124 + mov %edx, %cr0
31125 +#endif
31126 +
31127 movl $(GDT_ENTRY_TSS*8), %edx
31128 ltr %dx
31129
31130 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
31131 // Let's clear it again for our return.
31132 // The GDT descriptor of the Host
31133 // Points to the table after two "size" bytes
31134 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31135 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31136 // Clear "used" from type field (byte 5, bit 2)
31137 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31138 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31139 +
31140 +#ifdef CONFIG_PAX_KERNEXEC
31141 + mov %cr0, %eax
31142 + xor $X86_CR0_WP, %eax
31143 + mov %eax, %cr0
31144 +#endif
31145
31146 // Once our page table's switched, the Guest is live!
31147 // The Host fades as we run this final step.
31148 @@ -295,13 +309,12 @@ deliver_to_host:
31149 // I consulted gcc, and it gave
31150 // These instructions, which I gladly credit:
31151 leal (%edx,%ebx,8), %eax
31152 - movzwl (%eax),%edx
31153 - movl 4(%eax), %eax
31154 - xorw %ax, %ax
31155 - orl %eax, %edx
31156 + movl 4(%eax), %edx
31157 + movw (%eax), %dx
31158 // Now the address of the handler's in %edx
31159 // We call it now: its "iret" drops us home.
31160 - jmp *%edx
31161 + ljmp $__KERNEL_CS, $1f
31162 +1: jmp *%edx
31163
31164 // Every interrupt can come to us here
31165 // But we must truly tell each apart.
31166 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
31167 index 4daf9e5..b8d1d0f 100644
31168 --- a/drivers/macintosh/macio_asic.c
31169 +++ b/drivers/macintosh/macio_asic.c
31170 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
31171 * MacIO is matched against any Apple ID, it's probe() function
31172 * will then decide wether it applies or not
31173 */
31174 -static const struct pci_device_id __devinitdata pci_ids [] = { {
31175 +static const struct pci_device_id __devinitconst pci_ids [] = { {
31176 .vendor = PCI_VENDOR_ID_APPLE,
31177 .device = PCI_ANY_ID,
31178 .subvendor = PCI_ANY_ID,
31179 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
31180 index 2e9a3ca..c2fb229 100644
31181 --- a/drivers/md/dm-ioctl.c
31182 +++ b/drivers/md/dm-ioctl.c
31183 @@ -1578,7 +1578,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
31184 cmd == DM_LIST_VERSIONS_CMD)
31185 return 0;
31186
31187 - if ((cmd == DM_DEV_CREATE_CMD)) {
31188 + if (cmd == DM_DEV_CREATE_CMD) {
31189 if (!*param->name) {
31190 DMWARN("name not supplied when creating device");
31191 return -EINVAL;
31192 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
31193 index 9bfd057..01180bc 100644
31194 --- a/drivers/md/dm-raid1.c
31195 +++ b/drivers/md/dm-raid1.c
31196 @@ -40,7 +40,7 @@ enum dm_raid1_error {
31197
31198 struct mirror {
31199 struct mirror_set *ms;
31200 - atomic_t error_count;
31201 + atomic_unchecked_t error_count;
31202 unsigned long error_type;
31203 struct dm_dev *dev;
31204 sector_t offset;
31205 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
31206 struct mirror *m;
31207
31208 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
31209 - if (!atomic_read(&m->error_count))
31210 + if (!atomic_read_unchecked(&m->error_count))
31211 return m;
31212
31213 return NULL;
31214 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
31215 * simple way to tell if a device has encountered
31216 * errors.
31217 */
31218 - atomic_inc(&m->error_count);
31219 + atomic_inc_unchecked(&m->error_count);
31220
31221 if (test_and_set_bit(error_type, &m->error_type))
31222 return;
31223 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
31224 struct mirror *m = get_default_mirror(ms);
31225
31226 do {
31227 - if (likely(!atomic_read(&m->error_count)))
31228 + if (likely(!atomic_read_unchecked(&m->error_count)))
31229 return m;
31230
31231 if (m-- == ms->mirror)
31232 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
31233 {
31234 struct mirror *default_mirror = get_default_mirror(m->ms);
31235
31236 - return !atomic_read(&default_mirror->error_count);
31237 + return !atomic_read_unchecked(&default_mirror->error_count);
31238 }
31239
31240 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31241 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
31242 */
31243 if (likely(region_in_sync(ms, region, 1)))
31244 m = choose_mirror(ms, bio->bi_sector);
31245 - else if (m && atomic_read(&m->error_count))
31246 + else if (m && atomic_read_unchecked(&m->error_count))
31247 m = NULL;
31248
31249 if (likely(m))
31250 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
31251 }
31252
31253 ms->mirror[mirror].ms = ms;
31254 - atomic_set(&(ms->mirror[mirror].error_count), 0);
31255 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31256 ms->mirror[mirror].error_type = 0;
31257 ms->mirror[mirror].offset = offset;
31258
31259 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
31260 */
31261 static char device_status_char(struct mirror *m)
31262 {
31263 - if (!atomic_read(&(m->error_count)))
31264 + if (!atomic_read_unchecked(&(m->error_count)))
31265 return 'A';
31266
31267 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
31268 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
31269 index 3d80cf0..b77cc47 100644
31270 --- a/drivers/md/dm-stripe.c
31271 +++ b/drivers/md/dm-stripe.c
31272 @@ -20,7 +20,7 @@ struct stripe {
31273 struct dm_dev *dev;
31274 sector_t physical_start;
31275
31276 - atomic_t error_count;
31277 + atomic_unchecked_t error_count;
31278 };
31279
31280 struct stripe_c {
31281 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
31282 kfree(sc);
31283 return r;
31284 }
31285 - atomic_set(&(sc->stripe[i].error_count), 0);
31286 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31287 }
31288
31289 ti->private = sc;
31290 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
31291 DMEMIT("%d ", sc->stripes);
31292 for (i = 0; i < sc->stripes; i++) {
31293 DMEMIT("%s ", sc->stripe[i].dev->name);
31294 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31295 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31296 'D' : 'A';
31297 }
31298 buffer[i] = '\0';
31299 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
31300 */
31301 for (i = 0; i < sc->stripes; i++)
31302 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31303 - atomic_inc(&(sc->stripe[i].error_count));
31304 - if (atomic_read(&(sc->stripe[i].error_count)) <
31305 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
31306 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31307 DM_IO_ERROR_THRESHOLD)
31308 schedule_work(&sc->trigger_event);
31309 }
31310 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
31311 index bc04518..7a83b81 100644
31312 --- a/drivers/md/dm-table.c
31313 +++ b/drivers/md/dm-table.c
31314 @@ -389,7 +389,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
31315 if (!dev_size)
31316 return 0;
31317
31318 - if ((start >= dev_size) || (start + len > dev_size)) {
31319 + if ((start >= dev_size) || (len > dev_size - start)) {
31320 DMWARN("%s: %s too small for target: "
31321 "start=%llu, len=%llu, dev_size=%llu",
31322 dm_device_name(ti->table->md), bdevname(bdev, b),
31323 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
31324 index 52b39f3..83a8b6b 100644
31325 --- a/drivers/md/dm.c
31326 +++ b/drivers/md/dm.c
31327 @@ -165,9 +165,9 @@ struct mapped_device {
31328 /*
31329 * Event handling.
31330 */
31331 - atomic_t event_nr;
31332 + atomic_unchecked_t event_nr;
31333 wait_queue_head_t eventq;
31334 - atomic_t uevent_seq;
31335 + atomic_unchecked_t uevent_seq;
31336 struct list_head uevent_list;
31337 spinlock_t uevent_lock; /* Protect access to uevent_list */
31338
31339 @@ -1843,8 +1843,8 @@ static struct mapped_device *alloc_dev(int minor)
31340 rwlock_init(&md->map_lock);
31341 atomic_set(&md->holders, 1);
31342 atomic_set(&md->open_count, 0);
31343 - atomic_set(&md->event_nr, 0);
31344 - atomic_set(&md->uevent_seq, 0);
31345 + atomic_set_unchecked(&md->event_nr, 0);
31346 + atomic_set_unchecked(&md->uevent_seq, 0);
31347 INIT_LIST_HEAD(&md->uevent_list);
31348 spin_lock_init(&md->uevent_lock);
31349
31350 @@ -1978,7 +1978,7 @@ static void event_callback(void *context)
31351
31352 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31353
31354 - atomic_inc(&md->event_nr);
31355 + atomic_inc_unchecked(&md->event_nr);
31356 wake_up(&md->eventq);
31357 }
31358
31359 @@ -2614,18 +2614,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
31360
31361 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31362 {
31363 - return atomic_add_return(1, &md->uevent_seq);
31364 + return atomic_add_return_unchecked(1, &md->uevent_seq);
31365 }
31366
31367 uint32_t dm_get_event_nr(struct mapped_device *md)
31368 {
31369 - return atomic_read(&md->event_nr);
31370 + return atomic_read_unchecked(&md->event_nr);
31371 }
31372
31373 int dm_wait_event(struct mapped_device *md, int event_nr)
31374 {
31375 return wait_event_interruptible(md->eventq,
31376 - (event_nr != atomic_read(&md->event_nr)));
31377 + (event_nr != atomic_read_unchecked(&md->event_nr)));
31378 }
31379
31380 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31381 diff --git a/drivers/md/md.c b/drivers/md/md.c
31382 index 5c95ccb..217fa57 100644
31383 --- a/drivers/md/md.c
31384 +++ b/drivers/md/md.c
31385 @@ -280,10 +280,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
31386 * start build, activate spare
31387 */
31388 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31389 -static atomic_t md_event_count;
31390 +static atomic_unchecked_t md_event_count;
31391 void md_new_event(mddev_t *mddev)
31392 {
31393 - atomic_inc(&md_event_count);
31394 + atomic_inc_unchecked(&md_event_count);
31395 wake_up(&md_event_waiters);
31396 }
31397 EXPORT_SYMBOL_GPL(md_new_event);
31398 @@ -293,7 +293,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31399 */
31400 static void md_new_event_inintr(mddev_t *mddev)
31401 {
31402 - atomic_inc(&md_event_count);
31403 + atomic_inc_unchecked(&md_event_count);
31404 wake_up(&md_event_waiters);
31405 }
31406
31407 @@ -1531,7 +1531,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
31408
31409 rdev->preferred_minor = 0xffff;
31410 rdev->data_offset = le64_to_cpu(sb->data_offset);
31411 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31412 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31413
31414 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31415 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31416 @@ -1748,7 +1748,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
31417 else
31418 sb->resync_offset = cpu_to_le64(0);
31419
31420 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31421 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31422
31423 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31424 sb->size = cpu_to_le64(mddev->dev_sectors);
31425 @@ -2643,7 +2643,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
31426 static ssize_t
31427 errors_show(mdk_rdev_t *rdev, char *page)
31428 {
31429 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31430 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31431 }
31432
31433 static ssize_t
31434 @@ -2652,7 +2652,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
31435 char *e;
31436 unsigned long n = simple_strtoul(buf, &e, 10);
31437 if (*buf && (*e == 0 || *e == '\n')) {
31438 - atomic_set(&rdev->corrected_errors, n);
31439 + atomic_set_unchecked(&rdev->corrected_errors, n);
31440 return len;
31441 }
31442 return -EINVAL;
31443 @@ -3042,8 +3042,8 @@ int md_rdev_init(mdk_rdev_t *rdev)
31444 rdev->sb_loaded = 0;
31445 rdev->bb_page = NULL;
31446 atomic_set(&rdev->nr_pending, 0);
31447 - atomic_set(&rdev->read_errors, 0);
31448 - atomic_set(&rdev->corrected_errors, 0);
31449 + atomic_set_unchecked(&rdev->read_errors, 0);
31450 + atomic_set_unchecked(&rdev->corrected_errors, 0);
31451
31452 INIT_LIST_HEAD(&rdev->same_set);
31453 init_waitqueue_head(&rdev->blocked_wait);
31454 @@ -6667,7 +6667,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31455
31456 spin_unlock(&pers_lock);
31457 seq_printf(seq, "\n");
31458 - seq->poll_event = atomic_read(&md_event_count);
31459 + seq->poll_event = atomic_read_unchecked(&md_event_count);
31460 return 0;
31461 }
31462 if (v == (void*)2) {
31463 @@ -6756,7 +6756,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31464 chunk_kb ? "KB" : "B");
31465 if (bitmap->file) {
31466 seq_printf(seq, ", file: ");
31467 - seq_path(seq, &bitmap->file->f_path, " \t\n");
31468 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31469 }
31470
31471 seq_printf(seq, "\n");
31472 @@ -6787,7 +6787,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
31473 return error;
31474
31475 seq = file->private_data;
31476 - seq->poll_event = atomic_read(&md_event_count);
31477 + seq->poll_event = atomic_read_unchecked(&md_event_count);
31478 return error;
31479 }
31480
31481 @@ -6801,7 +6801,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
31482 /* always allow read */
31483 mask = POLLIN | POLLRDNORM;
31484
31485 - if (seq->poll_event != atomic_read(&md_event_count))
31486 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
31487 mask |= POLLERR | POLLPRI;
31488 return mask;
31489 }
31490 @@ -6845,7 +6845,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
31491 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31492 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31493 (int)part_stat_read(&disk->part0, sectors[1]) -
31494 - atomic_read(&disk->sync_io);
31495 + atomic_read_unchecked(&disk->sync_io);
31496 /* sync IO will cause sync_io to increase before the disk_stats
31497 * as sync_io is counted when a request starts, and
31498 * disk_stats is counted when it completes.
31499 diff --git a/drivers/md/md.h b/drivers/md/md.h
31500 index 0a309dc..7e01d7f 100644
31501 --- a/drivers/md/md.h
31502 +++ b/drivers/md/md.h
31503 @@ -124,13 +124,13 @@ struct mdk_rdev_s
31504 * only maintained for arrays that
31505 * support hot removal
31506 */
31507 - atomic_t read_errors; /* number of consecutive read errors that
31508 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
31509 * we have tried to ignore.
31510 */
31511 struct timespec last_read_error; /* monotonic time since our
31512 * last read error
31513 */
31514 - atomic_t corrected_errors; /* number of corrected read errors,
31515 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31516 * for reporting to userspace and storing
31517 * in superblock.
31518 */
31519 @@ -415,7 +415,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
31520
31521 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31522 {
31523 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31524 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31525 }
31526
31527 struct mdk_personality
31528 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
31529 index d9587df..83a0dc3 100644
31530 --- a/drivers/md/raid1.c
31531 +++ b/drivers/md/raid1.c
31532 @@ -1541,7 +1541,7 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
31533 if (r1_sync_page_io(rdev, sect, s,
31534 bio->bi_io_vec[idx].bv_page,
31535 READ) != 0)
31536 - atomic_add(s, &rdev->corrected_errors);
31537 + atomic_add_unchecked(s, &rdev->corrected_errors);
31538 }
31539 sectors -= s;
31540 sect += s;
31541 @@ -1754,7 +1754,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
31542 test_bit(In_sync, &rdev->flags)) {
31543 if (r1_sync_page_io(rdev, sect, s,
31544 conf->tmppage, READ)) {
31545 - atomic_add(s, &rdev->corrected_errors);
31546 + atomic_add_unchecked(s, &rdev->corrected_errors);
31547 printk(KERN_INFO
31548 "md/raid1:%s: read error corrected "
31549 "(%d sectors at %llu on %s)\n",
31550 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
31551 index 1d44228..98db57d 100644
31552 --- a/drivers/md/raid10.c
31553 +++ b/drivers/md/raid10.c
31554 @@ -1423,7 +1423,7 @@ static void end_sync_read(struct bio *bio, int error)
31555 /* The write handler will notice the lack of
31556 * R10BIO_Uptodate and record any errors etc
31557 */
31558 - atomic_add(r10_bio->sectors,
31559 + atomic_add_unchecked(r10_bio->sectors,
31560 &conf->mirrors[d].rdev->corrected_errors);
31561
31562 /* for reconstruct, we always reschedule after a read.
31563 @@ -1723,7 +1723,7 @@ static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
31564 {
31565 struct timespec cur_time_mon;
31566 unsigned long hours_since_last;
31567 - unsigned int read_errors = atomic_read(&rdev->read_errors);
31568 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
31569
31570 ktime_get_ts(&cur_time_mon);
31571
31572 @@ -1745,9 +1745,9 @@ static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
31573 * overflowing the shift of read_errors by hours_since_last.
31574 */
31575 if (hours_since_last >= 8 * sizeof(read_errors))
31576 - atomic_set(&rdev->read_errors, 0);
31577 + atomic_set_unchecked(&rdev->read_errors, 0);
31578 else
31579 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
31580 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
31581 }
31582
31583 static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector,
31584 @@ -1797,8 +1797,8 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
31585 return;
31586
31587 check_decay_read_errors(mddev, rdev);
31588 - atomic_inc(&rdev->read_errors);
31589 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
31590 + atomic_inc_unchecked(&rdev->read_errors);
31591 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
31592 char b[BDEVNAME_SIZE];
31593 bdevname(rdev->bdev, b);
31594
31595 @@ -1806,7 +1806,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
31596 "md/raid10:%s: %s: Raid device exceeded "
31597 "read_error threshold [cur %d:max %d]\n",
31598 mdname(mddev), b,
31599 - atomic_read(&rdev->read_errors), max_read_errors);
31600 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
31601 printk(KERN_NOTICE
31602 "md/raid10:%s: %s: Failing raid device\n",
31603 mdname(mddev), b);
31604 @@ -1951,7 +1951,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
31605 (unsigned long long)(
31606 sect + rdev->data_offset),
31607 bdevname(rdev->bdev, b));
31608 - atomic_add(s, &rdev->corrected_errors);
31609 + atomic_add_unchecked(s, &rdev->corrected_errors);
31610 }
31611
31612 rdev_dec_pending(rdev, mddev);
31613 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
31614 index b6200c3..02e8702 100644
31615 --- a/drivers/md/raid5.c
31616 +++ b/drivers/md/raid5.c
31617 @@ -1616,19 +1616,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
31618 (unsigned long long)(sh->sector
31619 + rdev->data_offset),
31620 bdevname(rdev->bdev, b));
31621 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
31622 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
31623 clear_bit(R5_ReadError, &sh->dev[i].flags);
31624 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31625 }
31626 - if (atomic_read(&conf->disks[i].rdev->read_errors))
31627 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
31628 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31629 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31630 } else {
31631 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31632 int retry = 0;
31633 rdev = conf->disks[i].rdev;
31634
31635 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31636 - atomic_inc(&rdev->read_errors);
31637 + atomic_inc_unchecked(&rdev->read_errors);
31638 if (conf->mddev->degraded >= conf->max_degraded)
31639 printk_ratelimited(
31640 KERN_WARNING
31641 @@ -1648,7 +1648,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
31642 (unsigned long long)(sh->sector
31643 + rdev->data_offset),
31644 bdn);
31645 - else if (atomic_read(&rdev->read_errors)
31646 + else if (atomic_read_unchecked(&rdev->read_errors)
31647 > conf->max_nr_stripes)
31648 printk(KERN_WARNING
31649 "md/raid:%s: Too many read errors, failing device %s.\n",
31650 @@ -1978,6 +1978,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
31651 sector_t r_sector;
31652 struct stripe_head sh2;
31653
31654 + pax_track_stack();
31655
31656 chunk_offset = sector_div(new_sector, sectors_per_chunk);
31657 stripe = new_sector;
31658 diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
31659 index 1d1d8d2..6c6837a 100644
31660 --- a/drivers/media/common/saa7146_hlp.c
31661 +++ b/drivers/media/common/saa7146_hlp.c
31662 @@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
31663
31664 int x[32], y[32], w[32], h[32];
31665
31666 + pax_track_stack();
31667 +
31668 /* clear out memory */
31669 memset(&line_list[0], 0x00, sizeof(u32)*32);
31670 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
31671 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
31672 index 573d540..16f78f3 100644
31673 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
31674 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
31675 @@ -1675,7 +1675,7 @@ static struct ddb_info ddb_v6 = {
31676 .subvendor = _subvend, .subdevice = _subdev, \
31677 .driver_data = (unsigned long)&_driverdata }
31678
31679 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
31680 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
31681 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
31682 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
31683 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
31684 diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
31685 index 7ea517b..252fe54 100644
31686 --- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
31687 +++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
31688 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
31689 u8 buf[HOST_LINK_BUF_SIZE];
31690 int i;
31691
31692 + pax_track_stack();
31693 +
31694 dprintk("%s\n", __func__);
31695
31696 /* check if we have space for a link buf in the rx_buffer */
31697 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
31698 unsigned long timeout;
31699 int written;
31700
31701 + pax_track_stack();
31702 +
31703 dprintk("%s\n", __func__);
31704
31705 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
31706 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
31707 index a7d876f..8c21b61 100644
31708 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
31709 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
31710 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
31711 union {
31712 dmx_ts_cb ts;
31713 dmx_section_cb sec;
31714 - } cb;
31715 + } __no_const cb;
31716
31717 struct dvb_demux *demux;
31718 void *priv;
31719 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
31720 index f732877..d38c35a 100644
31721 --- a/drivers/media/dvb/dvb-core/dvbdev.c
31722 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
31723 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
31724 const struct dvb_device *template, void *priv, int type)
31725 {
31726 struct dvb_device *dvbdev;
31727 - struct file_operations *dvbdevfops;
31728 + file_operations_no_const *dvbdevfops;
31729 struct device *clsdev;
31730 int minor;
31731 int id;
31732 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
31733 index acb5fb2..2413f1d 100644
31734 --- a/drivers/media/dvb/dvb-usb/cxusb.c
31735 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
31736 @@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
31737 struct dib0700_adapter_state {
31738 int (*set_param_save) (struct dvb_frontend *,
31739 struct dvb_frontend_parameters *);
31740 -};
31741 +} __no_const;
31742
31743 static int dib7070_set_param_override(struct dvb_frontend *fe,
31744 struct dvb_frontend_parameters *fep)
31745 diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
31746 index a224e94..503b76a 100644
31747 --- a/drivers/media/dvb/dvb-usb/dib0700_core.c
31748 +++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
31749 @@ -478,6 +478,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
31750 if (!buf)
31751 return -ENOMEM;
31752
31753 + pax_track_stack();
31754 +
31755 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
31756 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
31757 hx.addr, hx.len, hx.chk);
31758 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
31759 index 058b231..183d2b3 100644
31760 --- a/drivers/media/dvb/dvb-usb/dw2102.c
31761 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
31762 @@ -95,7 +95,7 @@ struct su3000_state {
31763
31764 struct s6x0_state {
31765 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
31766 -};
31767 +} __no_const;
31768
31769 /* debug */
31770 static int dvb_usb_dw2102_debug;
31771 diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
31772 index 37b1469..28a6f6f 100644
31773 --- a/drivers/media/dvb/dvb-usb/lmedm04.c
31774 +++ b/drivers/media/dvb/dvb-usb/lmedm04.c
31775 @@ -742,6 +742,7 @@ static int lme2510_download_firmware(struct usb_device *dev,
31776 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
31777 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
31778
31779 + pax_track_stack();
31780
31781 data[0] = 0x8a;
31782 len_in = 1;
31783 @@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_device *dev)
31784 int ret = 0, len_in;
31785 u8 data[512] = {0};
31786
31787 + pax_track_stack();
31788 +
31789 data[0] = 0x0a;
31790 len_in = 1;
31791 info("FRM Firmware Cold Reset");
31792 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
31793 index ba91735..4261d84 100644
31794 --- a/drivers/media/dvb/frontends/dib3000.h
31795 +++ b/drivers/media/dvb/frontends/dib3000.h
31796 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
31797 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
31798 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
31799 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
31800 -};
31801 +} __no_const;
31802
31803 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
31804 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
31805 diff --git a/drivers/media/dvb/frontends/mb86a16.c b/drivers/media/dvb/frontends/mb86a16.c
31806 index c283112..7f367a7 100644
31807 --- a/drivers/media/dvb/frontends/mb86a16.c
31808 +++ b/drivers/media/dvb/frontends/mb86a16.c
31809 @@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16_state *state)
31810 int ret = -1;
31811 int sync;
31812
31813 + pax_track_stack();
31814 +
31815 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
31816
31817 fcp = 3000;
31818 diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
31819 index c709ce6..b3fe620 100644
31820 --- a/drivers/media/dvb/frontends/or51211.c
31821 +++ b/drivers/media/dvb/frontends/or51211.c
31822 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
31823 u8 tudata[585];
31824 int i;
31825
31826 + pax_track_stack();
31827 +
31828 dprintk("Firmware is %zd bytes\n",fw->size);
31829
31830 /* Get eprom data */
31831 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
31832 index 0564192..75b16f5 100644
31833 --- a/drivers/media/dvb/ngene/ngene-cards.c
31834 +++ b/drivers/media/dvb/ngene/ngene-cards.c
31835 @@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
31836
31837 /****************************************************************************/
31838
31839 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
31840 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
31841 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
31842 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
31843 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
31844 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
31845 index 16a089f..ab1667d 100644
31846 --- a/drivers/media/radio/radio-cadet.c
31847 +++ b/drivers/media/radio/radio-cadet.c
31848 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
31849 unsigned char readbuf[RDS_BUFFER];
31850 int i = 0;
31851
31852 + if (count > RDS_BUFFER)
31853 + return -EFAULT;
31854 mutex_lock(&dev->lock);
31855 if (dev->rdsstat == 0) {
31856 dev->rdsstat = 1;
31857 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
31858 index 9cde353..8c6a1c3 100644
31859 --- a/drivers/media/video/au0828/au0828.h
31860 +++ b/drivers/media/video/au0828/au0828.h
31861 @@ -191,7 +191,7 @@ struct au0828_dev {
31862
31863 /* I2C */
31864 struct i2c_adapter i2c_adap;
31865 - struct i2c_algorithm i2c_algo;
31866 + i2c_algorithm_no_const i2c_algo;
31867 struct i2c_client i2c_client;
31868 u32 i2c_rc;
31869
31870 diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
31871 index 9e2f870..22e3a08 100644
31872 --- a/drivers/media/video/cx18/cx18-driver.c
31873 +++ b/drivers/media/video/cx18/cx18-driver.c
31874 @@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
31875 struct i2c_client c;
31876 u8 eedata[256];
31877
31878 + pax_track_stack();
31879 +
31880 memset(&c, 0, sizeof(c));
31881 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
31882 c.adapter = &cx->i2c_adap[0];
31883 diff --git a/drivers/media/video/cx23885/cx23885-input.c b/drivers/media/video/cx23885/cx23885-input.c
31884 index ce765e3..f9e1b04 100644
31885 --- a/drivers/media/video/cx23885/cx23885-input.c
31886 +++ b/drivers/media/video/cx23885/cx23885-input.c
31887 @@ -53,6 +53,8 @@ static void cx23885_input_process_measurements(struct cx23885_dev *dev,
31888 bool handle = false;
31889 struct ir_raw_event ir_core_event[64];
31890
31891 + pax_track_stack();
31892 +
31893 do {
31894 num = 0;
31895 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
31896 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
31897 index 68d1240..46b32eb 100644
31898 --- a/drivers/media/video/cx88/cx88-alsa.c
31899 +++ b/drivers/media/video/cx88/cx88-alsa.c
31900 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
31901 * Only boards with eeprom and byte 1 at eeprom=1 have it
31902 */
31903
31904 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
31905 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
31906 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31907 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31908 {0, }
31909 diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
31910 index 9515f3a..c9ecb85 100644
31911 --- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
31912 +++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
31913 @@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
31914 u8 *eeprom;
31915 struct tveeprom tvdata;
31916
31917 + pax_track_stack();
31918 +
31919 memset(&tvdata,0,sizeof(tvdata));
31920
31921 eeprom = pvr2_eeprom_fetch(hdw);
31922 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31923 index 305e6aa..0143317 100644
31924 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31925 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31926 @@ -196,7 +196,7 @@ struct pvr2_hdw {
31927
31928 /* I2C stuff */
31929 struct i2c_adapter i2c_adap;
31930 - struct i2c_algorithm i2c_algo;
31931 + i2c_algorithm_no_const i2c_algo;
31932 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
31933 int i2c_cx25840_hack_state;
31934 int i2c_linked;
31935 diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
31936 index f9f29cc..5a2e330 100644
31937 --- a/drivers/media/video/saa7134/saa6752hs.c
31938 +++ b/drivers/media/video/saa7134/saa6752hs.c
31939 @@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
31940 unsigned char localPAT[256];
31941 unsigned char localPMT[256];
31942
31943 + pax_track_stack();
31944 +
31945 /* Set video format - must be done first as it resets other settings */
31946 set_reg8(client, 0x41, h->video_format);
31947
31948 diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
31949 index 62fac7f..f29e0b9 100644
31950 --- a/drivers/media/video/saa7164/saa7164-cmd.c
31951 +++ b/drivers/media/video/saa7164/saa7164-cmd.c
31952 @@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
31953 u8 tmp[512];
31954 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31955
31956 + pax_track_stack();
31957 +
31958 /* While any outstand message on the bus exists... */
31959 do {
31960
31961 @@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
31962 u8 tmp[512];
31963 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31964
31965 + pax_track_stack();
31966 +
31967 while (loop) {
31968
31969 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
31970 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
31971 index 84cd1b6..f741e07 100644
31972 --- a/drivers/media/video/timblogiw.c
31973 +++ b/drivers/media/video/timblogiw.c
31974 @@ -744,7 +744,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
31975
31976 /* Platform device functions */
31977
31978 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31979 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
31980 .vidioc_querycap = timblogiw_querycap,
31981 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
31982 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
31983 @@ -766,7 +766,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31984 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
31985 };
31986
31987 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
31988 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
31989 .owner = THIS_MODULE,
31990 .open = timblogiw_open,
31991 .release = timblogiw_close,
31992 diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
31993 index f344411..6ae9974 100644
31994 --- a/drivers/media/video/usbvision/usbvision-core.c
31995 +++ b/drivers/media/video/usbvision/usbvision-core.c
31996 @@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_compress(struct usb_usbvision *usbvision
31997 unsigned char rv, gv, bv;
31998 static unsigned char *Y, *U, *V;
31999
32000 + pax_track_stack();
32001 +
32002 frame = usbvision->cur_frame;
32003 image_size = frame->frmwidth * frame->frmheight;
32004 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
32005 diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
32006 index f300dea..04834ba 100644
32007 --- a/drivers/media/video/videobuf-dma-sg.c
32008 +++ b/drivers/media/video/videobuf-dma-sg.c
32009 @@ -607,6 +607,8 @@ void *videobuf_sg_alloc(size_t size)
32010 {
32011 struct videobuf_queue q;
32012
32013 + pax_track_stack();
32014 +
32015 /* Required to make generic handler to call __videobuf_alloc */
32016 q.int_ops = &sg_ops;
32017
32018 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
32019 index 7956a10..f39232f 100644
32020 --- a/drivers/message/fusion/mptbase.c
32021 +++ b/drivers/message/fusion/mptbase.c
32022 @@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
32023 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
32024 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
32025
32026 +#ifdef CONFIG_GRKERNSEC_HIDESYM
32027 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
32028 +#else
32029 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
32030 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
32031 +#endif
32032 +
32033 /*
32034 * Rounding UP to nearest 4-kB boundary here...
32035 */
32036 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
32037 index 7596aec..f7ae9aa 100644
32038 --- a/drivers/message/fusion/mptsas.c
32039 +++ b/drivers/message/fusion/mptsas.c
32040 @@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
32041 return 0;
32042 }
32043
32044 +static inline void
32045 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32046 +{
32047 + if (phy_info->port_details) {
32048 + phy_info->port_details->rphy = rphy;
32049 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32050 + ioc->name, rphy));
32051 + }
32052 +
32053 + if (rphy) {
32054 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32055 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32056 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32057 + ioc->name, rphy, rphy->dev.release));
32058 + }
32059 +}
32060 +
32061 /* no mutex */
32062 static void
32063 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
32064 @@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
32065 return NULL;
32066 }
32067
32068 -static inline void
32069 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
32070 -{
32071 - if (phy_info->port_details) {
32072 - phy_info->port_details->rphy = rphy;
32073 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
32074 - ioc->name, rphy));
32075 - }
32076 -
32077 - if (rphy) {
32078 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32079 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32080 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32081 - ioc->name, rphy, rphy->dev.release));
32082 - }
32083 -}
32084 -
32085 static inline struct sas_port *
32086 mptsas_get_port(struct mptsas_phyinfo *phy_info)
32087 {
32088 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
32089 index ce61a57..3da8862 100644
32090 --- a/drivers/message/fusion/mptscsih.c
32091 +++ b/drivers/message/fusion/mptscsih.c
32092 @@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
32093
32094 h = shost_priv(SChost);
32095
32096 - if (h) {
32097 - if (h->info_kbuf == NULL)
32098 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32099 - return h->info_kbuf;
32100 - h->info_kbuf[0] = '\0';
32101 + if (!h)
32102 + return NULL;
32103
32104 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32105 - h->info_kbuf[size-1] = '\0';
32106 - }
32107 + if (h->info_kbuf == NULL)
32108 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32109 + return h->info_kbuf;
32110 + h->info_kbuf[0] = '\0';
32111 +
32112 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32113 + h->info_kbuf[size-1] = '\0';
32114
32115 return h->info_kbuf;
32116 }
32117 diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
32118 index 098de2b..fbb922c 100644
32119 --- a/drivers/message/i2o/i2o_config.c
32120 +++ b/drivers/message/i2o/i2o_config.c
32121 @@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned long arg)
32122 struct i2o_message *msg;
32123 unsigned int iop;
32124
32125 + pax_track_stack();
32126 +
32127 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
32128 return -EFAULT;
32129
32130 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
32131 index 07dbeaf..5533142 100644
32132 --- a/drivers/message/i2o/i2o_proc.c
32133 +++ b/drivers/message/i2o/i2o_proc.c
32134 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
32135 "Array Controller Device"
32136 };
32137
32138 -static char *chtostr(u8 * chars, int n)
32139 -{
32140 - char tmp[256];
32141 - tmp[0] = 0;
32142 - return strncat(tmp, (char *)chars, n);
32143 -}
32144 -
32145 static int i2o_report_query_status(struct seq_file *seq, int block_status,
32146 char *group)
32147 {
32148 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
32149
32150 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
32151 seq_printf(seq, "%-#8x", ddm_table.module_id);
32152 - seq_printf(seq, "%-29s",
32153 - chtostr(ddm_table.module_name_version, 28));
32154 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
32155 seq_printf(seq, "%9d ", ddm_table.data_size);
32156 seq_printf(seq, "%8d", ddm_table.code_size);
32157
32158 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
32159
32160 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
32161 seq_printf(seq, "%-#8x", dst->module_id);
32162 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
32163 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
32164 + seq_printf(seq, "%-.28s", dst->module_name_version);
32165 + seq_printf(seq, "%-.8s", dst->date);
32166 seq_printf(seq, "%8d ", dst->module_size);
32167 seq_printf(seq, "%8d ", dst->mpb_size);
32168 seq_printf(seq, "0x%04x", dst->module_flags);
32169 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
32170 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
32171 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
32172 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
32173 - seq_printf(seq, "Vendor info : %s\n",
32174 - chtostr((u8 *) (work32 + 2), 16));
32175 - seq_printf(seq, "Product info : %s\n",
32176 - chtostr((u8 *) (work32 + 6), 16));
32177 - seq_printf(seq, "Description : %s\n",
32178 - chtostr((u8 *) (work32 + 10), 16));
32179 - seq_printf(seq, "Product rev. : %s\n",
32180 - chtostr((u8 *) (work32 + 14), 8));
32181 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
32182 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
32183 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
32184 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
32185
32186 seq_printf(seq, "Serial number : ");
32187 print_serial_number(seq, (u8 *) (work32 + 16),
32188 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
32189 }
32190
32191 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
32192 - seq_printf(seq, "Module name : %s\n",
32193 - chtostr(result.module_name, 24));
32194 - seq_printf(seq, "Module revision : %s\n",
32195 - chtostr(result.module_rev, 8));
32196 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
32197 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
32198
32199 seq_printf(seq, "Serial number : ");
32200 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
32201 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
32202 return 0;
32203 }
32204
32205 - seq_printf(seq, "Device name : %s\n",
32206 - chtostr(result.device_name, 64));
32207 - seq_printf(seq, "Service name : %s\n",
32208 - chtostr(result.service_name, 64));
32209 - seq_printf(seq, "Physical name : %s\n",
32210 - chtostr(result.physical_location, 64));
32211 - seq_printf(seq, "Instance number : %s\n",
32212 - chtostr(result.instance_number, 4));
32213 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
32214 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
32215 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
32216 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
32217
32218 return 0;
32219 }
32220 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
32221 index a8c08f3..155fe3d 100644
32222 --- a/drivers/message/i2o/iop.c
32223 +++ b/drivers/message/i2o/iop.c
32224 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
32225
32226 spin_lock_irqsave(&c->context_list_lock, flags);
32227
32228 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
32229 - atomic_inc(&c->context_list_counter);
32230 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
32231 + atomic_inc_unchecked(&c->context_list_counter);
32232
32233 - entry->context = atomic_read(&c->context_list_counter);
32234 + entry->context = atomic_read_unchecked(&c->context_list_counter);
32235
32236 list_add(&entry->list, &c->context_list);
32237
32238 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
32239
32240 #if BITS_PER_LONG == 64
32241 spin_lock_init(&c->context_list_lock);
32242 - atomic_set(&c->context_list_counter, 0);
32243 + atomic_set_unchecked(&c->context_list_counter, 0);
32244 INIT_LIST_HEAD(&c->context_list);
32245 #endif
32246
32247 diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
32248 index a20e1c4..4f57255 100644
32249 --- a/drivers/mfd/ab3100-core.c
32250 +++ b/drivers/mfd/ab3100-core.c
32251 @@ -809,7 +809,7 @@ struct ab_family_id {
32252 char *name;
32253 };
32254
32255 -static const struct ab_family_id ids[] __devinitdata = {
32256 +static const struct ab_family_id ids[] __devinitconst = {
32257 /* AB3100 */
32258 {
32259 .id = 0xc0,
32260 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
32261 index f12720d..3c251fd 100644
32262 --- a/drivers/mfd/abx500-core.c
32263 +++ b/drivers/mfd/abx500-core.c
32264 @@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
32265
32266 struct abx500_device_entry {
32267 struct list_head list;
32268 - struct abx500_ops ops;
32269 + abx500_ops_no_const ops;
32270 struct device *dev;
32271 };
32272
32273 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
32274 index 5c2a06a..8fa077c 100644
32275 --- a/drivers/mfd/janz-cmodio.c
32276 +++ b/drivers/mfd/janz-cmodio.c
32277 @@ -13,6 +13,7 @@
32278
32279 #include <linux/kernel.h>
32280 #include <linux/module.h>
32281 +#include <linux/slab.h>
32282 #include <linux/init.h>
32283 #include <linux/pci.h>
32284 #include <linux/interrupt.h>
32285 diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
32286 index 5fe5de1..af64f53 100644
32287 --- a/drivers/mfd/wm8350-i2c.c
32288 +++ b/drivers/mfd/wm8350-i2c.c
32289 @@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
32290 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
32291 int ret;
32292
32293 + pax_track_stack();
32294 +
32295 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
32296 return -EINVAL;
32297
32298 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
32299 index 8b51cd6..f628f8d 100644
32300 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
32301 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
32302 @@ -437,7 +437,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
32303 * the lid is closed. This leads to interrupts as soon as a little move
32304 * is done.
32305 */
32306 - atomic_inc(&lis3_dev.count);
32307 + atomic_inc_unchecked(&lis3_dev.count);
32308
32309 wake_up_interruptible(&lis3_dev.misc_wait);
32310 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
32311 @@ -520,7 +520,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
32312 if (lis3_dev.pm_dev)
32313 pm_runtime_get_sync(lis3_dev.pm_dev);
32314
32315 - atomic_set(&lis3_dev.count, 0);
32316 + atomic_set_unchecked(&lis3_dev.count, 0);
32317 return 0;
32318 }
32319
32320 @@ -547,7 +547,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
32321 add_wait_queue(&lis3_dev.misc_wait, &wait);
32322 while (true) {
32323 set_current_state(TASK_INTERRUPTIBLE);
32324 - data = atomic_xchg(&lis3_dev.count, 0);
32325 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
32326 if (data)
32327 break;
32328
32329 @@ -585,7 +585,7 @@ out:
32330 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
32331 {
32332 poll_wait(file, &lis3_dev.misc_wait, wait);
32333 - if (atomic_read(&lis3_dev.count))
32334 + if (atomic_read_unchecked(&lis3_dev.count))
32335 return POLLIN | POLLRDNORM;
32336 return 0;
32337 }
32338 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
32339 index a193958..4d7ecd2 100644
32340 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
32341 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
32342 @@ -265,7 +265,7 @@ struct lis3lv02d {
32343 struct input_polled_dev *idev; /* input device */
32344 struct platform_device *pdev; /* platform device */
32345 struct regulator_bulk_data regulators[2];
32346 - atomic_t count; /* interrupt count after last read */
32347 + atomic_unchecked_t count; /* interrupt count after last read */
32348 union axis_conversion ac; /* hw -> logical axis */
32349 int mapped_btns[3];
32350
32351 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
32352 index 2f30bad..c4c13d0 100644
32353 --- a/drivers/misc/sgi-gru/gruhandles.c
32354 +++ b/drivers/misc/sgi-gru/gruhandles.c
32355 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32356 unsigned long nsec;
32357
32358 nsec = CLKS2NSEC(clks);
32359 - atomic_long_inc(&mcs_op_statistics[op].count);
32360 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
32361 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32362 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
32363 if (mcs_op_statistics[op].max < nsec)
32364 mcs_op_statistics[op].max = nsec;
32365 }
32366 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
32367 index 7768b87..f8aac38 100644
32368 --- a/drivers/misc/sgi-gru/gruprocfs.c
32369 +++ b/drivers/misc/sgi-gru/gruprocfs.c
32370 @@ -32,9 +32,9 @@
32371
32372 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32373
32374 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32375 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32376 {
32377 - unsigned long val = atomic_long_read(v);
32378 + unsigned long val = atomic_long_read_unchecked(v);
32379
32380 seq_printf(s, "%16lu %s\n", val, id);
32381 }
32382 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
32383
32384 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
32385 for (op = 0; op < mcsop_last; op++) {
32386 - count = atomic_long_read(&mcs_op_statistics[op].count);
32387 - total = atomic_long_read(&mcs_op_statistics[op].total);
32388 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32389 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32390 max = mcs_op_statistics[op].max;
32391 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32392 count ? total / count : 0, max);
32393 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
32394 index 5c3ce24..4915ccb 100644
32395 --- a/drivers/misc/sgi-gru/grutables.h
32396 +++ b/drivers/misc/sgi-gru/grutables.h
32397 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
32398 * GRU statistics.
32399 */
32400 struct gru_stats_s {
32401 - atomic_long_t vdata_alloc;
32402 - atomic_long_t vdata_free;
32403 - atomic_long_t gts_alloc;
32404 - atomic_long_t gts_free;
32405 - atomic_long_t gms_alloc;
32406 - atomic_long_t gms_free;
32407 - atomic_long_t gts_double_allocate;
32408 - atomic_long_t assign_context;
32409 - atomic_long_t assign_context_failed;
32410 - atomic_long_t free_context;
32411 - atomic_long_t load_user_context;
32412 - atomic_long_t load_kernel_context;
32413 - atomic_long_t lock_kernel_context;
32414 - atomic_long_t unlock_kernel_context;
32415 - atomic_long_t steal_user_context;
32416 - atomic_long_t steal_kernel_context;
32417 - atomic_long_t steal_context_failed;
32418 - atomic_long_t nopfn;
32419 - atomic_long_t asid_new;
32420 - atomic_long_t asid_next;
32421 - atomic_long_t asid_wrap;
32422 - atomic_long_t asid_reuse;
32423 - atomic_long_t intr;
32424 - atomic_long_t intr_cbr;
32425 - atomic_long_t intr_tfh;
32426 - atomic_long_t intr_spurious;
32427 - atomic_long_t intr_mm_lock_failed;
32428 - atomic_long_t call_os;
32429 - atomic_long_t call_os_wait_queue;
32430 - atomic_long_t user_flush_tlb;
32431 - atomic_long_t user_unload_context;
32432 - atomic_long_t user_exception;
32433 - atomic_long_t set_context_option;
32434 - atomic_long_t check_context_retarget_intr;
32435 - atomic_long_t check_context_unload;
32436 - atomic_long_t tlb_dropin;
32437 - atomic_long_t tlb_preload_page;
32438 - atomic_long_t tlb_dropin_fail_no_asid;
32439 - atomic_long_t tlb_dropin_fail_upm;
32440 - atomic_long_t tlb_dropin_fail_invalid;
32441 - atomic_long_t tlb_dropin_fail_range_active;
32442 - atomic_long_t tlb_dropin_fail_idle;
32443 - atomic_long_t tlb_dropin_fail_fmm;
32444 - atomic_long_t tlb_dropin_fail_no_exception;
32445 - atomic_long_t tfh_stale_on_fault;
32446 - atomic_long_t mmu_invalidate_range;
32447 - atomic_long_t mmu_invalidate_page;
32448 - atomic_long_t flush_tlb;
32449 - atomic_long_t flush_tlb_gru;
32450 - atomic_long_t flush_tlb_gru_tgh;
32451 - atomic_long_t flush_tlb_gru_zero_asid;
32452 + atomic_long_unchecked_t vdata_alloc;
32453 + atomic_long_unchecked_t vdata_free;
32454 + atomic_long_unchecked_t gts_alloc;
32455 + atomic_long_unchecked_t gts_free;
32456 + atomic_long_unchecked_t gms_alloc;
32457 + atomic_long_unchecked_t gms_free;
32458 + atomic_long_unchecked_t gts_double_allocate;
32459 + atomic_long_unchecked_t assign_context;
32460 + atomic_long_unchecked_t assign_context_failed;
32461 + atomic_long_unchecked_t free_context;
32462 + atomic_long_unchecked_t load_user_context;
32463 + atomic_long_unchecked_t load_kernel_context;
32464 + atomic_long_unchecked_t lock_kernel_context;
32465 + atomic_long_unchecked_t unlock_kernel_context;
32466 + atomic_long_unchecked_t steal_user_context;
32467 + atomic_long_unchecked_t steal_kernel_context;
32468 + atomic_long_unchecked_t steal_context_failed;
32469 + atomic_long_unchecked_t nopfn;
32470 + atomic_long_unchecked_t asid_new;
32471 + atomic_long_unchecked_t asid_next;
32472 + atomic_long_unchecked_t asid_wrap;
32473 + atomic_long_unchecked_t asid_reuse;
32474 + atomic_long_unchecked_t intr;
32475 + atomic_long_unchecked_t intr_cbr;
32476 + atomic_long_unchecked_t intr_tfh;
32477 + atomic_long_unchecked_t intr_spurious;
32478 + atomic_long_unchecked_t intr_mm_lock_failed;
32479 + atomic_long_unchecked_t call_os;
32480 + atomic_long_unchecked_t call_os_wait_queue;
32481 + atomic_long_unchecked_t user_flush_tlb;
32482 + atomic_long_unchecked_t user_unload_context;
32483 + atomic_long_unchecked_t user_exception;
32484 + atomic_long_unchecked_t set_context_option;
32485 + atomic_long_unchecked_t check_context_retarget_intr;
32486 + atomic_long_unchecked_t check_context_unload;
32487 + atomic_long_unchecked_t tlb_dropin;
32488 + atomic_long_unchecked_t tlb_preload_page;
32489 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32490 + atomic_long_unchecked_t tlb_dropin_fail_upm;
32491 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
32492 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
32493 + atomic_long_unchecked_t tlb_dropin_fail_idle;
32494 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
32495 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32496 + atomic_long_unchecked_t tfh_stale_on_fault;
32497 + atomic_long_unchecked_t mmu_invalidate_range;
32498 + atomic_long_unchecked_t mmu_invalidate_page;
32499 + atomic_long_unchecked_t flush_tlb;
32500 + atomic_long_unchecked_t flush_tlb_gru;
32501 + atomic_long_unchecked_t flush_tlb_gru_tgh;
32502 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32503
32504 - atomic_long_t copy_gpa;
32505 - atomic_long_t read_gpa;
32506 + atomic_long_unchecked_t copy_gpa;
32507 + atomic_long_unchecked_t read_gpa;
32508
32509 - atomic_long_t mesq_receive;
32510 - atomic_long_t mesq_receive_none;
32511 - atomic_long_t mesq_send;
32512 - atomic_long_t mesq_send_failed;
32513 - atomic_long_t mesq_noop;
32514 - atomic_long_t mesq_send_unexpected_error;
32515 - atomic_long_t mesq_send_lb_overflow;
32516 - atomic_long_t mesq_send_qlimit_reached;
32517 - atomic_long_t mesq_send_amo_nacked;
32518 - atomic_long_t mesq_send_put_nacked;
32519 - atomic_long_t mesq_page_overflow;
32520 - atomic_long_t mesq_qf_locked;
32521 - atomic_long_t mesq_qf_noop_not_full;
32522 - atomic_long_t mesq_qf_switch_head_failed;
32523 - atomic_long_t mesq_qf_unexpected_error;
32524 - atomic_long_t mesq_noop_unexpected_error;
32525 - atomic_long_t mesq_noop_lb_overflow;
32526 - atomic_long_t mesq_noop_qlimit_reached;
32527 - atomic_long_t mesq_noop_amo_nacked;
32528 - atomic_long_t mesq_noop_put_nacked;
32529 - atomic_long_t mesq_noop_page_overflow;
32530 + atomic_long_unchecked_t mesq_receive;
32531 + atomic_long_unchecked_t mesq_receive_none;
32532 + atomic_long_unchecked_t mesq_send;
32533 + atomic_long_unchecked_t mesq_send_failed;
32534 + atomic_long_unchecked_t mesq_noop;
32535 + atomic_long_unchecked_t mesq_send_unexpected_error;
32536 + atomic_long_unchecked_t mesq_send_lb_overflow;
32537 + atomic_long_unchecked_t mesq_send_qlimit_reached;
32538 + atomic_long_unchecked_t mesq_send_amo_nacked;
32539 + atomic_long_unchecked_t mesq_send_put_nacked;
32540 + atomic_long_unchecked_t mesq_page_overflow;
32541 + atomic_long_unchecked_t mesq_qf_locked;
32542 + atomic_long_unchecked_t mesq_qf_noop_not_full;
32543 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
32544 + atomic_long_unchecked_t mesq_qf_unexpected_error;
32545 + atomic_long_unchecked_t mesq_noop_unexpected_error;
32546 + atomic_long_unchecked_t mesq_noop_lb_overflow;
32547 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
32548 + atomic_long_unchecked_t mesq_noop_amo_nacked;
32549 + atomic_long_unchecked_t mesq_noop_put_nacked;
32550 + atomic_long_unchecked_t mesq_noop_page_overflow;
32551
32552 };
32553
32554 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
32555 tghop_invalidate, mcsop_last};
32556
32557 struct mcs_op_statistic {
32558 - atomic_long_t count;
32559 - atomic_long_t total;
32560 + atomic_long_unchecked_t count;
32561 + atomic_long_unchecked_t total;
32562 unsigned long max;
32563 };
32564
32565 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
32566
32567 #define STAT(id) do { \
32568 if (gru_options & OPT_STATS) \
32569 - atomic_long_inc(&gru_stats.id); \
32570 + atomic_long_inc_unchecked(&gru_stats.id); \
32571 } while (0)
32572
32573 #ifdef CONFIG_SGI_GRU_DEBUG
32574 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
32575 index 851b2f2..a4ec097 100644
32576 --- a/drivers/misc/sgi-xp/xp.h
32577 +++ b/drivers/misc/sgi-xp/xp.h
32578 @@ -289,7 +289,7 @@ struct xpc_interface {
32579 xpc_notify_func, void *);
32580 void (*received) (short, int, void *);
32581 enum xp_retval (*partid_to_nasids) (short, void *);
32582 -};
32583 +} __no_const;
32584
32585 extern struct xpc_interface xpc_interface;
32586
32587 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
32588 index b94d5f7..7f494c5 100644
32589 --- a/drivers/misc/sgi-xp/xpc.h
32590 +++ b/drivers/misc/sgi-xp/xpc.h
32591 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
32592 void (*received_payload) (struct xpc_channel *, void *);
32593 void (*notify_senders_of_disconnect) (struct xpc_channel *);
32594 };
32595 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
32596
32597 /* struct xpc_partition act_state values (for XPC HB) */
32598
32599 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
32600 /* found in xpc_main.c */
32601 extern struct device *xpc_part;
32602 extern struct device *xpc_chan;
32603 -extern struct xpc_arch_operations xpc_arch_ops;
32604 +extern xpc_arch_operations_no_const xpc_arch_ops;
32605 extern int xpc_disengage_timelimit;
32606 extern int xpc_disengage_timedout;
32607 extern int xpc_activate_IRQ_rcvd;
32608 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
32609 index 8d082b4..aa749ae 100644
32610 --- a/drivers/misc/sgi-xp/xpc_main.c
32611 +++ b/drivers/misc/sgi-xp/xpc_main.c
32612 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
32613 .notifier_call = xpc_system_die,
32614 };
32615
32616 -struct xpc_arch_operations xpc_arch_ops;
32617 +xpc_arch_operations_no_const xpc_arch_ops;
32618
32619 /*
32620 * Timer function to enforce the timelimit on the partition disengage.
32621 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
32622 index 26c5286..292d261 100644
32623 --- a/drivers/mmc/host/sdhci-pci.c
32624 +++ b/drivers/mmc/host/sdhci-pci.c
32625 @@ -542,7 +542,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
32626 .probe = via_probe,
32627 };
32628
32629 -static const struct pci_device_id pci_ids[] __devinitdata = {
32630 +static const struct pci_device_id pci_ids[] __devinitconst = {
32631 {
32632 .vendor = PCI_VENDOR_ID_RICOH,
32633 .device = PCI_DEVICE_ID_RICOH_R5C822,
32634 diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
32635 index e1e122f..d99a6ea 100644
32636 --- a/drivers/mtd/chips/cfi_cmdset_0001.c
32637 +++ b/drivers/mtd/chips/cfi_cmdset_0001.c
32638 @@ -757,6 +757,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
32639 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
32640 unsigned long timeo = jiffies + HZ;
32641
32642 + pax_track_stack();
32643 +
32644 /* Prevent setting state FL_SYNCING for chip in suspended state. */
32645 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
32646 goto sleep;
32647 @@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
32648 unsigned long initial_adr;
32649 int initial_len = len;
32650
32651 + pax_track_stack();
32652 +
32653 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
32654 adr += chip->start;
32655 initial_adr = adr;
32656 @@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
32657 int retries = 3;
32658 int ret;
32659
32660 + pax_track_stack();
32661 +
32662 adr += chip->start;
32663
32664 retry:
32665 diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
32666 index 179814a..abe9d60 100644
32667 --- a/drivers/mtd/chips/cfi_cmdset_0020.c
32668 +++ b/drivers/mtd/chips/cfi_cmdset_0020.c
32669 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
32670 unsigned long cmd_addr;
32671 struct cfi_private *cfi = map->fldrv_priv;
32672
32673 + pax_track_stack();
32674 +
32675 adr += chip->start;
32676
32677 /* Ensure cmd read/writes are aligned. */
32678 @@ -429,6 +431,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
32679 DECLARE_WAITQUEUE(wait, current);
32680 int wbufsize, z;
32681
32682 + pax_track_stack();
32683 +
32684 /* M58LW064A requires bus alignment for buffer wriets -- saw */
32685 if (adr & (map_bankwidth(map)-1))
32686 return -EINVAL;
32687 @@ -743,6 +747,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
32688 DECLARE_WAITQUEUE(wait, current);
32689 int ret = 0;
32690
32691 + pax_track_stack();
32692 +
32693 adr += chip->start;
32694
32695 /* Let's determine this according to the interleave only once */
32696 @@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
32697 unsigned long timeo = jiffies + HZ;
32698 DECLARE_WAITQUEUE(wait, current);
32699
32700 + pax_track_stack();
32701 +
32702 adr += chip->start;
32703
32704 /* Let's determine this according to the interleave only once */
32705 @@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
32706 unsigned long timeo = jiffies + HZ;
32707 DECLARE_WAITQUEUE(wait, current);
32708
32709 + pax_track_stack();
32710 +
32711 adr += chip->start;
32712
32713 /* Let's determine this according to the interleave only once */
32714 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
32715 index f7fbf60..9866457 100644
32716 --- a/drivers/mtd/devices/doc2000.c
32717 +++ b/drivers/mtd/devices/doc2000.c
32718 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
32719
32720 /* The ECC will not be calculated correctly if less than 512 is written */
32721 /* DBB-
32722 - if (len != 0x200 && eccbuf)
32723 + if (len != 0x200)
32724 printk(KERN_WARNING
32725 "ECC needs a full sector write (adr: %lx size %lx)\n",
32726 (long) to, (long) len);
32727 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
32728 index 241192f..d0c35a3 100644
32729 --- a/drivers/mtd/devices/doc2001.c
32730 +++ b/drivers/mtd/devices/doc2001.c
32731 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
32732 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32733
32734 /* Don't allow read past end of device */
32735 - if (from >= this->totlen)
32736 + if (from >= this->totlen || !len)
32737 return -EINVAL;
32738
32739 /* Don't allow a single read to cross a 512-byte block boundary */
32740 diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
32741 index 037b399..225a71d 100644
32742 --- a/drivers/mtd/ftl.c
32743 +++ b/drivers/mtd/ftl.c
32744 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
32745 loff_t offset;
32746 uint16_t srcunitswap = cpu_to_le16(srcunit);
32747
32748 + pax_track_stack();
32749 +
32750 eun = &part->EUNInfo[srcunit];
32751 xfer = &part->XferInfo[xferunit];
32752 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
32753 diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
32754 index d7592e6..31c505c 100644
32755 --- a/drivers/mtd/inftlcore.c
32756 +++ b/drivers/mtd/inftlcore.c
32757 @@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
32758 struct inftl_oob oob;
32759 size_t retlen;
32760
32761 + pax_track_stack();
32762 +
32763 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
32764 "pending=%d)\n", inftl, thisVUC, pendingblock);
32765
32766 diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
32767 index 104052e..6232be5 100644
32768 --- a/drivers/mtd/inftlmount.c
32769 +++ b/drivers/mtd/inftlmount.c
32770 @@ -53,6 +53,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
32771 struct INFTLPartition *ip;
32772 size_t retlen;
32773
32774 + pax_track_stack();
32775 +
32776 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
32777
32778 /*
32779 diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
32780 index dbfe17b..c7b0918 100644
32781 --- a/drivers/mtd/lpddr/qinfo_probe.c
32782 +++ b/drivers/mtd/lpddr/qinfo_probe.c
32783 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
32784 {
32785 map_word pfow_val[4];
32786
32787 + pax_track_stack();
32788 +
32789 /* Check identification string */
32790 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
32791 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
32792 diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
32793 index 49e20a4..60fbfa5 100644
32794 --- a/drivers/mtd/mtdchar.c
32795 +++ b/drivers/mtd/mtdchar.c
32796 @@ -554,6 +554,8 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
32797 u_long size;
32798 struct mtd_info_user info;
32799
32800 + pax_track_stack();
32801 +
32802 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
32803
32804 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
32805 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
32806 index d527621..2491fab 100644
32807 --- a/drivers/mtd/nand/denali.c
32808 +++ b/drivers/mtd/nand/denali.c
32809 @@ -26,6 +26,7 @@
32810 #include <linux/pci.h>
32811 #include <linux/mtd/mtd.h>
32812 #include <linux/module.h>
32813 +#include <linux/slab.h>
32814
32815 #include "denali.h"
32816
32817 diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
32818 index b155666..611b801 100644
32819 --- a/drivers/mtd/nftlcore.c
32820 +++ b/drivers/mtd/nftlcore.c
32821 @@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
32822 int inplace = 1;
32823 size_t retlen;
32824
32825 + pax_track_stack();
32826 +
32827 memset(BlockMap, 0xff, sizeof(BlockMap));
32828 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
32829
32830 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
32831 index e3cd1ff..0ea79a3 100644
32832 --- a/drivers/mtd/nftlmount.c
32833 +++ b/drivers/mtd/nftlmount.c
32834 @@ -24,6 +24,7 @@
32835 #include <asm/errno.h>
32836 #include <linux/delay.h>
32837 #include <linux/slab.h>
32838 +#include <linux/sched.h>
32839 #include <linux/mtd/mtd.h>
32840 #include <linux/mtd/nand.h>
32841 #include <linux/mtd/nftl.h>
32842 @@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
32843 struct mtd_info *mtd = nftl->mbd.mtd;
32844 unsigned int i;
32845
32846 + pax_track_stack();
32847 +
32848 /* Assume logical EraseSize == physical erasesize for starting the scan.
32849 We'll sort it out later if we find a MediaHeader which says otherwise */
32850 /* Actually, we won't. The new DiskOnChip driver has already scanned
32851 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
32852 index 6c3fb5a..c542a81 100644
32853 --- a/drivers/mtd/ubi/build.c
32854 +++ b/drivers/mtd/ubi/build.c
32855 @@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
32856 static int __init bytes_str_to_int(const char *str)
32857 {
32858 char *endp;
32859 - unsigned long result;
32860 + unsigned long result, scale = 1;
32861
32862 result = simple_strtoul(str, &endp, 0);
32863 if (str == endp || result >= INT_MAX) {
32864 @@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
32865
32866 switch (*endp) {
32867 case 'G':
32868 - result *= 1024;
32869 + scale *= 1024;
32870 case 'M':
32871 - result *= 1024;
32872 + scale *= 1024;
32873 case 'K':
32874 - result *= 1024;
32875 + scale *= 1024;
32876 if (endp[1] == 'i' && endp[2] == 'B')
32877 endp += 2;
32878 case '\0':
32879 @@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
32880 return -EINVAL;
32881 }
32882
32883 - return result;
32884 + if ((intoverflow_t)result*scale >= INT_MAX) {
32885 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
32886 + str);
32887 + return -EINVAL;
32888 + }
32889 +
32890 + return result*scale;
32891 }
32892
32893 /**
32894 diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
32895 index d4f7dda..d627d46 100644
32896 --- a/drivers/net/atlx/atl2.c
32897 +++ b/drivers/net/atlx/atl2.c
32898 @@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
32899 */
32900
32901 #define ATL2_PARAM(X, desc) \
32902 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32903 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32904 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
32905 MODULE_PARM_DESC(X, desc);
32906 #else
32907 diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
32908 index 87aecdf..ec23470 100644
32909 --- a/drivers/net/bna/bfa_ioc_ct.c
32910 +++ b/drivers/net/bna/bfa_ioc_ct.c
32911 @@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
32912 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
32913 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
32914
32915 -static struct bfa_ioc_hwif nw_hwif_ct;
32916 +static struct bfa_ioc_hwif nw_hwif_ct = {
32917 + .ioc_pll_init = bfa_ioc_ct_pll_init,
32918 + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
32919 + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
32920 + .ioc_reg_init = bfa_ioc_ct_reg_init,
32921 + .ioc_map_port = bfa_ioc_ct_map_port,
32922 + .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
32923 + .ioc_notify_fail = bfa_ioc_ct_notify_fail,
32924 + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
32925 + .ioc_sync_start = bfa_ioc_ct_sync_start,
32926 + .ioc_sync_join = bfa_ioc_ct_sync_join,
32927 + .ioc_sync_leave = bfa_ioc_ct_sync_leave,
32928 + .ioc_sync_ack = bfa_ioc_ct_sync_ack,
32929 + .ioc_sync_complete = bfa_ioc_ct_sync_complete
32930 +};
32931
32932 /**
32933 * Called from bfa_ioc_attach() to map asic specific calls.
32934 @@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
32935 void
32936 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
32937 {
32938 - nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
32939 - nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
32940 - nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
32941 - nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
32942 - nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
32943 - nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
32944 - nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
32945 - nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
32946 - nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
32947 - nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
32948 - nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
32949 - nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
32950 - nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
32951 -
32952 ioc->ioc_hwif = &nw_hwif_ct;
32953 }
32954
32955 diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
32956 index 8e35b25..c39f205 100644
32957 --- a/drivers/net/bna/bnad.c
32958 +++ b/drivers/net/bna/bnad.c
32959 @@ -1673,7 +1673,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx_id)
32960 struct bna_intr_info *intr_info =
32961 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
32962 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
32963 - struct bna_tx_event_cbfn tx_cbfn;
32964 + static struct bna_tx_event_cbfn tx_cbfn = {
32965 + /* Initialize the tx event handlers */
32966 + .tcb_setup_cbfn = bnad_cb_tcb_setup,
32967 + .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
32968 + .tx_stall_cbfn = bnad_cb_tx_stall,
32969 + .tx_resume_cbfn = bnad_cb_tx_resume,
32970 + .tx_cleanup_cbfn = bnad_cb_tx_cleanup
32971 + };
32972 struct bna_tx *tx;
32973 unsigned long flags;
32974
32975 @@ -1682,13 +1689,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx_id)
32976 tx_config->txq_depth = bnad->txq_depth;
32977 tx_config->tx_type = BNA_TX_T_REGULAR;
32978
32979 - /* Initialize the tx event handlers */
32980 - tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
32981 - tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
32982 - tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
32983 - tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
32984 - tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
32985 -
32986 /* Get BNA's resource requirement for one tx object */
32987 spin_lock_irqsave(&bnad->bna_lock, flags);
32988 bna_tx_res_req(bnad->num_txq_per_tx,
32989 @@ -1819,21 +1819,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
32990 struct bna_intr_info *intr_info =
32991 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
32992 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
32993 - struct bna_rx_event_cbfn rx_cbfn;
32994 + static struct bna_rx_event_cbfn rx_cbfn = {
32995 + /* Initialize the Rx event handlers */
32996 + .rcb_setup_cbfn = bnad_cb_rcb_setup,
32997 + .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
32998 + .ccb_setup_cbfn = bnad_cb_ccb_setup,
32999 + .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
33000 + .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
33001 + .rx_post_cbfn = bnad_cb_rx_post
33002 + };
33003 struct bna_rx *rx;
33004 unsigned long flags;
33005
33006 /* Initialize the Rx object configuration */
33007 bnad_init_rx_config(bnad, rx_config);
33008
33009 - /* Initialize the Rx event handlers */
33010 - rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
33011 - rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
33012 - rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
33013 - rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
33014 - rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
33015 - rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
33016 -
33017 /* Get BNA's resource requirement for one Rx object */
33018 spin_lock_irqsave(&bnad->bna_lock, flags);
33019 bna_rx_res_req(rx_config, res_info);
33020 diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
33021 index 4b2b570..31033f4 100644
33022 --- a/drivers/net/bnx2.c
33023 +++ b/drivers/net/bnx2.c
33024 @@ -5877,6 +5877,8 @@ bnx2_test_nvram(struct bnx2 *bp)
33025 int rc = 0;
33026 u32 magic, csum;
33027
33028 + pax_track_stack();
33029 +
33030 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
33031 goto test_nvram_done;
33032
33033 diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
33034 index cf3e479..5dc0ecc 100644
33035 --- a/drivers/net/bnx2x/bnx2x_ethtool.c
33036 +++ b/drivers/net/bnx2x/bnx2x_ethtool.c
33037 @@ -1943,6 +1943,8 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
33038 int i, rc;
33039 u32 magic, crc;
33040
33041 + pax_track_stack();
33042 +
33043 if (BP_NOMCP(bp))
33044 return 0;
33045
33046 diff --git a/drivers/net/bnx2x/bnx2x_sp.h b/drivers/net/bnx2x/bnx2x_sp.h
33047 index 9a517c2..a50cfcb 100644
33048 --- a/drivers/net/bnx2x/bnx2x_sp.h
33049 +++ b/drivers/net/bnx2x/bnx2x_sp.h
33050 @@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
33051
33052 int (*wait_comp)(struct bnx2x *bp,
33053 struct bnx2x_rx_mode_ramrod_params *p);
33054 -};
33055 +} __no_const;
33056
33057 /********************** Set multicast group ***********************************/
33058
33059 diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
33060 index c5f5479..2e8c260 100644
33061 --- a/drivers/net/cxgb3/l2t.h
33062 +++ b/drivers/net/cxgb3/l2t.h
33063 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
33064 */
33065 struct l2t_skb_cb {
33066 arp_failure_handler_func arp_failure_handler;
33067 -};
33068 +} __no_const;
33069
33070 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
33071
33072 diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
33073 index b4efa29..c5f2703 100644
33074 --- a/drivers/net/cxgb4/cxgb4_main.c
33075 +++ b/drivers/net/cxgb4/cxgb4_main.c
33076 @@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct adapter *adap)
33077 unsigned int nchan = adap->params.nports;
33078 struct msix_entry entries[MAX_INGQ + 1];
33079
33080 + pax_track_stack();
33081 +
33082 for (i = 0; i < ARRAY_SIZE(entries); ++i)
33083 entries[i].entry = i;
33084
33085 diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
33086 index d1ec111..12735bc 100644
33087 --- a/drivers/net/cxgb4/t4_hw.c
33088 +++ b/drivers/net/cxgb4/t4_hw.c
33089 @@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
33090 u8 vpd[VPD_LEN], csum;
33091 unsigned int vpdr_len, kw_offset, id_len;
33092
33093 + pax_track_stack();
33094 +
33095 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
33096 if (ret < 0)
33097 return ret;
33098 diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
33099 index 536b3a5..e6f8dcc 100644
33100 --- a/drivers/net/e1000e/82571.c
33101 +++ b/drivers/net/e1000e/82571.c
33102 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
33103 {
33104 struct e1000_hw *hw = &adapter->hw;
33105 struct e1000_mac_info *mac = &hw->mac;
33106 - struct e1000_mac_operations *func = &mac->ops;
33107 + e1000_mac_operations_no_const *func = &mac->ops;
33108 u32 swsm = 0;
33109 u32 swsm2 = 0;
33110 bool force_clear_smbi = false;
33111 diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
33112 index e4f4225..24da2ea 100644
33113 --- a/drivers/net/e1000e/es2lan.c
33114 +++ b/drivers/net/e1000e/es2lan.c
33115 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
33116 {
33117 struct e1000_hw *hw = &adapter->hw;
33118 struct e1000_mac_info *mac = &hw->mac;
33119 - struct e1000_mac_operations *func = &mac->ops;
33120 + e1000_mac_operations_no_const *func = &mac->ops;
33121
33122 /* Set media type */
33123 switch (adapter->pdev->device) {
33124 diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
33125 index 2967039..ca8c40c 100644
33126 --- a/drivers/net/e1000e/hw.h
33127 +++ b/drivers/net/e1000e/hw.h
33128 @@ -778,6 +778,7 @@ struct e1000_mac_operations {
33129 void (*write_vfta)(struct e1000_hw *, u32, u32);
33130 s32 (*read_mac_addr)(struct e1000_hw *);
33131 };
33132 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33133
33134 /*
33135 * When to use various PHY register access functions:
33136 @@ -818,6 +819,7 @@ struct e1000_phy_operations {
33137 void (*power_up)(struct e1000_hw *);
33138 void (*power_down)(struct e1000_hw *);
33139 };
33140 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
33141
33142 /* Function pointers for the NVM. */
33143 struct e1000_nvm_operations {
33144 @@ -829,9 +831,10 @@ struct e1000_nvm_operations {
33145 s32 (*validate)(struct e1000_hw *);
33146 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
33147 };
33148 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
33149
33150 struct e1000_mac_info {
33151 - struct e1000_mac_operations ops;
33152 + e1000_mac_operations_no_const ops;
33153 u8 addr[ETH_ALEN];
33154 u8 perm_addr[ETH_ALEN];
33155
33156 @@ -872,7 +875,7 @@ struct e1000_mac_info {
33157 };
33158
33159 struct e1000_phy_info {
33160 - struct e1000_phy_operations ops;
33161 + e1000_phy_operations_no_const ops;
33162
33163 enum e1000_phy_type type;
33164
33165 @@ -906,7 +909,7 @@ struct e1000_phy_info {
33166 };
33167
33168 struct e1000_nvm_info {
33169 - struct e1000_nvm_operations ops;
33170 + e1000_nvm_operations_no_const ops;
33171
33172 enum e1000_nvm_type type;
33173 enum e1000_nvm_override override;
33174 diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
33175 index fa8677c..196356f 100644
33176 --- a/drivers/net/fealnx.c
33177 +++ b/drivers/net/fealnx.c
33178 @@ -150,7 +150,7 @@ struct chip_info {
33179 int flags;
33180 };
33181
33182 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
33183 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
33184 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
33185 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
33186 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
33187 diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
33188 index 2a5a34d..be871cc 100644
33189 --- a/drivers/net/hamradio/6pack.c
33190 +++ b/drivers/net/hamradio/6pack.c
33191 @@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
33192 unsigned char buf[512];
33193 int count1;
33194
33195 + pax_track_stack();
33196 +
33197 if (!count)
33198 return;
33199
33200 diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
33201 index 4519a13..f97fcd0 100644
33202 --- a/drivers/net/igb/e1000_hw.h
33203 +++ b/drivers/net/igb/e1000_hw.h
33204 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
33205 s32 (*read_mac_addr)(struct e1000_hw *);
33206 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
33207 };
33208 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33209
33210 struct e1000_phy_operations {
33211 s32 (*acquire)(struct e1000_hw *);
33212 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
33213 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
33214 s32 (*write_reg)(struct e1000_hw *, u32, u16);
33215 };
33216 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
33217
33218 struct e1000_nvm_operations {
33219 s32 (*acquire)(struct e1000_hw *);
33220 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
33221 s32 (*update)(struct e1000_hw *);
33222 s32 (*validate)(struct e1000_hw *);
33223 };
33224 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
33225
33226 struct e1000_info {
33227 s32 (*get_invariants)(struct e1000_hw *);
33228 @@ -350,7 +353,7 @@ struct e1000_info {
33229 extern const struct e1000_info e1000_82575_info;
33230
33231 struct e1000_mac_info {
33232 - struct e1000_mac_operations ops;
33233 + e1000_mac_operations_no_const ops;
33234
33235 u8 addr[6];
33236 u8 perm_addr[6];
33237 @@ -388,7 +391,7 @@ struct e1000_mac_info {
33238 };
33239
33240 struct e1000_phy_info {
33241 - struct e1000_phy_operations ops;
33242 + e1000_phy_operations_no_const ops;
33243
33244 enum e1000_phy_type type;
33245
33246 @@ -423,7 +426,7 @@ struct e1000_phy_info {
33247 };
33248
33249 struct e1000_nvm_info {
33250 - struct e1000_nvm_operations ops;
33251 + e1000_nvm_operations_no_const ops;
33252 enum e1000_nvm_type type;
33253 enum e1000_nvm_override override;
33254
33255 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
33256 s32 (*check_for_ack)(struct e1000_hw *, u16);
33257 s32 (*check_for_rst)(struct e1000_hw *, u16);
33258 };
33259 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
33260
33261 struct e1000_mbx_stats {
33262 u32 msgs_tx;
33263 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
33264 };
33265
33266 struct e1000_mbx_info {
33267 - struct e1000_mbx_operations ops;
33268 + e1000_mbx_operations_no_const ops;
33269 struct e1000_mbx_stats stats;
33270 u32 timeout;
33271 u32 usec_delay;
33272 diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
33273 index d7ed58f..64cde36 100644
33274 --- a/drivers/net/igbvf/vf.h
33275 +++ b/drivers/net/igbvf/vf.h
33276 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
33277 s32 (*read_mac_addr)(struct e1000_hw *);
33278 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
33279 };
33280 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33281
33282 struct e1000_mac_info {
33283 - struct e1000_mac_operations ops;
33284 + e1000_mac_operations_no_const ops;
33285 u8 addr[6];
33286 u8 perm_addr[6];
33287
33288 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
33289 s32 (*check_for_ack)(struct e1000_hw *);
33290 s32 (*check_for_rst)(struct e1000_hw *);
33291 };
33292 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
33293
33294 struct e1000_mbx_stats {
33295 u32 msgs_tx;
33296 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
33297 };
33298
33299 struct e1000_mbx_info {
33300 - struct e1000_mbx_operations ops;
33301 + e1000_mbx_operations_no_const ops;
33302 struct e1000_mbx_stats stats;
33303 u32 timeout;
33304 u32 usec_delay;
33305 diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
33306 index 6a130eb..1aeb9e4 100644
33307 --- a/drivers/net/ixgb/ixgb_main.c
33308 +++ b/drivers/net/ixgb/ixgb_main.c
33309 @@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev)
33310 u32 rctl;
33311 int i;
33312
33313 + pax_track_stack();
33314 +
33315 /* Check for Promiscuous and All Multicast modes */
33316
33317 rctl = IXGB_READ_REG(hw, RCTL);
33318 diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
33319 index dd7fbeb..44b9bbf 100644
33320 --- a/drivers/net/ixgb/ixgb_param.c
33321 +++ b/drivers/net/ixgb/ixgb_param.c
33322 @@ -261,6 +261,9 @@ void __devinit
33323 ixgb_check_options(struct ixgb_adapter *adapter)
33324 {
33325 int bd = adapter->bd_number;
33326 +
33327 + pax_track_stack();
33328 +
33329 if (bd >= IXGB_MAX_NIC) {
33330 pr_notice("Warning: no configuration for board #%i\n", bd);
33331 pr_notice("Using defaults for all values\n");
33332 diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
33333 index e0d970e..1cfdea5 100644
33334 --- a/drivers/net/ixgbe/ixgbe_type.h
33335 +++ b/drivers/net/ixgbe/ixgbe_type.h
33336 @@ -2642,6 +2642,7 @@ struct ixgbe_eeprom_operations {
33337 s32 (*update_checksum)(struct ixgbe_hw *);
33338 u16 (*calc_checksum)(struct ixgbe_hw *);
33339 };
33340 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
33341
33342 struct ixgbe_mac_operations {
33343 s32 (*init_hw)(struct ixgbe_hw *);
33344 @@ -2703,6 +2704,7 @@ struct ixgbe_mac_operations {
33345 /* Manageability interface */
33346 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
33347 };
33348 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
33349
33350 struct ixgbe_phy_operations {
33351 s32 (*identify)(struct ixgbe_hw *);
33352 @@ -2722,9 +2724,10 @@ struct ixgbe_phy_operations {
33353 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
33354 s32 (*check_overtemp)(struct ixgbe_hw *);
33355 };
33356 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
33357
33358 struct ixgbe_eeprom_info {
33359 - struct ixgbe_eeprom_operations ops;
33360 + ixgbe_eeprom_operations_no_const ops;
33361 enum ixgbe_eeprom_type type;
33362 u32 semaphore_delay;
33363 u16 word_size;
33364 @@ -2734,7 +2737,7 @@ struct ixgbe_eeprom_info {
33365
33366 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
33367 struct ixgbe_mac_info {
33368 - struct ixgbe_mac_operations ops;
33369 + ixgbe_mac_operations_no_const ops;
33370 enum ixgbe_mac_type type;
33371 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
33372 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
33373 @@ -2762,7 +2765,7 @@ struct ixgbe_mac_info {
33374 };
33375
33376 struct ixgbe_phy_info {
33377 - struct ixgbe_phy_operations ops;
33378 + ixgbe_phy_operations_no_const ops;
33379 struct mdio_if_info mdio;
33380 enum ixgbe_phy_type type;
33381 u32 id;
33382 @@ -2790,6 +2793,7 @@ struct ixgbe_mbx_operations {
33383 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
33384 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
33385 };
33386 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
33387
33388 struct ixgbe_mbx_stats {
33389 u32 msgs_tx;
33390 @@ -2801,7 +2805,7 @@ struct ixgbe_mbx_stats {
33391 };
33392
33393 struct ixgbe_mbx_info {
33394 - struct ixgbe_mbx_operations ops;
33395 + ixgbe_mbx_operations_no_const ops;
33396 struct ixgbe_mbx_stats stats;
33397 u32 timeout;
33398 u32 usec_delay;
33399 diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
33400 index 10306b4..28df758 100644
33401 --- a/drivers/net/ixgbevf/vf.h
33402 +++ b/drivers/net/ixgbevf/vf.h
33403 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
33404 s32 (*clear_vfta)(struct ixgbe_hw *);
33405 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
33406 };
33407 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
33408
33409 enum ixgbe_mac_type {
33410 ixgbe_mac_unknown = 0,
33411 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
33412 };
33413
33414 struct ixgbe_mac_info {
33415 - struct ixgbe_mac_operations ops;
33416 + ixgbe_mac_operations_no_const ops;
33417 u8 addr[6];
33418 u8 perm_addr[6];
33419
33420 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
33421 s32 (*check_for_ack)(struct ixgbe_hw *);
33422 s32 (*check_for_rst)(struct ixgbe_hw *);
33423 };
33424 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
33425
33426 struct ixgbe_mbx_stats {
33427 u32 msgs_tx;
33428 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
33429 };
33430
33431 struct ixgbe_mbx_info {
33432 - struct ixgbe_mbx_operations ops;
33433 + ixgbe_mbx_operations_no_const ops;
33434 struct ixgbe_mbx_stats stats;
33435 u32 timeout;
33436 u32 udelay;
33437 diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
33438 index 27418d3..adf15bb 100644
33439 --- a/drivers/net/ksz884x.c
33440 +++ b/drivers/net/ksz884x.c
33441 @@ -6533,6 +6533,8 @@ static void netdev_get_ethtool_stats(struct net_device *dev,
33442 int rc;
33443 u64 counter[TOTAL_PORT_COUNTER_NUM];
33444
33445 + pax_track_stack();
33446 +
33447 mutex_lock(&hw_priv->lock);
33448 n = SWITCH_PORT_NUM;
33449 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
33450 diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
33451 index f0ee35d..3831c8a 100644
33452 --- a/drivers/net/mlx4/main.c
33453 +++ b/drivers/net/mlx4/main.c
33454 @@ -40,6 +40,7 @@
33455 #include <linux/dma-mapping.h>
33456 #include <linux/slab.h>
33457 #include <linux/io-mapping.h>
33458 +#include <linux/sched.h>
33459
33460 #include <linux/mlx4/device.h>
33461 #include <linux/mlx4/doorbell.h>
33462 @@ -762,6 +763,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
33463 u64 icm_size;
33464 int err;
33465
33466 + pax_track_stack();
33467 +
33468 err = mlx4_QUERY_FW(dev);
33469 if (err) {
33470 if (err == -EACCES)
33471 diff --git a/drivers/net/niu.c b/drivers/net/niu.c
33472 index ed47585..5e5be8f 100644
33473 --- a/drivers/net/niu.c
33474 +++ b/drivers/net/niu.c
33475 @@ -9061,6 +9061,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
33476 int i, num_irqs, err;
33477 u8 first_ldg;
33478
33479 + pax_track_stack();
33480 +
33481 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
33482 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
33483 ldg_num_map[i] = first_ldg + i;
33484 diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
33485 index 80b6f36..5cd8938 100644
33486 --- a/drivers/net/pcnet32.c
33487 +++ b/drivers/net/pcnet32.c
33488 @@ -270,7 +270,7 @@ struct pcnet32_private {
33489 struct sk_buff **rx_skbuff;
33490 dma_addr_t *tx_dma_addr;
33491 dma_addr_t *rx_dma_addr;
33492 - struct pcnet32_access a;
33493 + struct pcnet32_access *a;
33494 spinlock_t lock; /* Guard lock */
33495 unsigned int cur_rx, cur_tx; /* The next free ring entry */
33496 unsigned int rx_ring_size; /* current rx ring size */
33497 @@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct net_device *dev)
33498 u16 val;
33499
33500 netif_wake_queue(dev);
33501 - val = lp->a.read_csr(ioaddr, CSR3);
33502 + val = lp->a->read_csr(ioaddr, CSR3);
33503 val &= 0x00ff;
33504 - lp->a.write_csr(ioaddr, CSR3, val);
33505 + lp->a->write_csr(ioaddr, CSR3, val);
33506 napi_enable(&lp->napi);
33507 }
33508
33509 @@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
33510 r = mii_link_ok(&lp->mii_if);
33511 } else if (lp->chip_version >= PCNET32_79C970A) {
33512 ulong ioaddr = dev->base_addr; /* card base I/O address */
33513 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
33514 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
33515 } else { /* can not detect link on really old chips */
33516 r = 1;
33517 }
33518 @@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
33519 pcnet32_netif_stop(dev);
33520
33521 spin_lock_irqsave(&lp->lock, flags);
33522 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
33523 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
33524
33525 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
33526
33527 @@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
33528 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
33529 {
33530 struct pcnet32_private *lp = netdev_priv(dev);
33531 - struct pcnet32_access *a = &lp->a; /* access to registers */
33532 + struct pcnet32_access *a = lp->a; /* access to registers */
33533 ulong ioaddr = dev->base_addr; /* card base I/O address */
33534 struct sk_buff *skb; /* sk buff */
33535 int x, i; /* counters */
33536 @@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
33537 pcnet32_netif_stop(dev);
33538
33539 spin_lock_irqsave(&lp->lock, flags);
33540 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
33541 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
33542
33543 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
33544
33545 /* Reset the PCNET32 */
33546 - lp->a.reset(ioaddr);
33547 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33548 + lp->a->reset(ioaddr);
33549 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33550
33551 /* switch pcnet32 to 32bit mode */
33552 - lp->a.write_bcr(ioaddr, 20, 2);
33553 + lp->a->write_bcr(ioaddr, 20, 2);
33554
33555 /* purge & init rings but don't actually restart */
33556 pcnet32_restart(dev, 0x0000);
33557
33558 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
33559 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
33560
33561 /* Initialize Transmit buffers. */
33562 size = data_len + 15;
33563 @@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
33564
33565 /* set int loopback in CSR15 */
33566 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
33567 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
33568 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
33569
33570 teststatus = cpu_to_le16(0x8000);
33571 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
33572 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
33573
33574 /* Check status of descriptors */
33575 for (x = 0; x < numbuffs; x++) {
33576 @@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
33577 }
33578 }
33579
33580 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
33581 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
33582 wmb();
33583 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
33584 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
33585 @@ -1015,7 +1015,7 @@ clean_up:
33586 pcnet32_restart(dev, CSR0_NORMAL);
33587 } else {
33588 pcnet32_purge_rx_ring(dev);
33589 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
33590 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
33591 }
33592 spin_unlock_irqrestore(&lp->lock, flags);
33593
33594 @@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct net_device *dev,
33595 enum ethtool_phys_id_state state)
33596 {
33597 struct pcnet32_private *lp = netdev_priv(dev);
33598 - struct pcnet32_access *a = &lp->a;
33599 + struct pcnet32_access *a = lp->a;
33600 ulong ioaddr = dev->base_addr;
33601 unsigned long flags;
33602 int i;
33603 @@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
33604 {
33605 int csr5;
33606 struct pcnet32_private *lp = netdev_priv(dev);
33607 - struct pcnet32_access *a = &lp->a;
33608 + struct pcnet32_access *a = lp->a;
33609 ulong ioaddr = dev->base_addr;
33610 int ticks;
33611
33612 @@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
33613 spin_lock_irqsave(&lp->lock, flags);
33614 if (pcnet32_tx(dev)) {
33615 /* reset the chip to clear the error condition, then restart */
33616 - lp->a.reset(ioaddr);
33617 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33618 + lp->a->reset(ioaddr);
33619 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33620 pcnet32_restart(dev, CSR0_START);
33621 netif_wake_queue(dev);
33622 }
33623 @@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
33624 __napi_complete(napi);
33625
33626 /* clear interrupt masks */
33627 - val = lp->a.read_csr(ioaddr, CSR3);
33628 + val = lp->a->read_csr(ioaddr, CSR3);
33629 val &= 0x00ff;
33630 - lp->a.write_csr(ioaddr, CSR3, val);
33631 + lp->a->write_csr(ioaddr, CSR3, val);
33632
33633 /* Set interrupt enable. */
33634 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
33635 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
33636
33637 spin_unlock_irqrestore(&lp->lock, flags);
33638 }
33639 @@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
33640 int i, csr0;
33641 u16 *buff = ptr;
33642 struct pcnet32_private *lp = netdev_priv(dev);
33643 - struct pcnet32_access *a = &lp->a;
33644 + struct pcnet32_access *a = lp->a;
33645 ulong ioaddr = dev->base_addr;
33646 unsigned long flags;
33647
33648 @@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
33649 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
33650 if (lp->phymask & (1 << j)) {
33651 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
33652 - lp->a.write_bcr(ioaddr, 33,
33653 + lp->a->write_bcr(ioaddr, 33,
33654 (j << 5) | i);
33655 - *buff++ = lp->a.read_bcr(ioaddr, 34);
33656 + *buff++ = lp->a->read_bcr(ioaddr, 34);
33657 }
33658 }
33659 }
33660 @@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
33661 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
33662 lp->options |= PCNET32_PORT_FD;
33663
33664 - lp->a = *a;
33665 + lp->a = a;
33666
33667 /* prior to register_netdev, dev->name is not yet correct */
33668 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
33669 @@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
33670 if (lp->mii) {
33671 /* lp->phycount and lp->phymask are set to 0 by memset above */
33672
33673 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
33674 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
33675 /* scan for PHYs */
33676 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
33677 unsigned short id1, id2;
33678 @@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
33679 pr_info("Found PHY %04x:%04x at address %d\n",
33680 id1, id2, i);
33681 }
33682 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
33683 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
33684 if (lp->phycount > 1)
33685 lp->options |= PCNET32_PORT_MII;
33686 }
33687 @@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_device *dev)
33688 }
33689
33690 /* Reset the PCNET32 */
33691 - lp->a.reset(ioaddr);
33692 + lp->a->reset(ioaddr);
33693
33694 /* switch pcnet32 to 32bit mode */
33695 - lp->a.write_bcr(ioaddr, 20, 2);
33696 + lp->a->write_bcr(ioaddr, 20, 2);
33697
33698 netif_printk(lp, ifup, KERN_DEBUG, dev,
33699 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
33700 @@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_device *dev)
33701 (u32) (lp->init_dma_addr));
33702
33703 /* set/reset autoselect bit */
33704 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
33705 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
33706 if (lp->options & PCNET32_PORT_ASEL)
33707 val |= 2;
33708 - lp->a.write_bcr(ioaddr, 2, val);
33709 + lp->a->write_bcr(ioaddr, 2, val);
33710
33711 /* handle full duplex setting */
33712 if (lp->mii_if.full_duplex) {
33713 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
33714 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
33715 if (lp->options & PCNET32_PORT_FD) {
33716 val |= 1;
33717 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
33718 @@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_device *dev)
33719 if (lp->chip_version == 0x2627)
33720 val |= 3;
33721 }
33722 - lp->a.write_bcr(ioaddr, 9, val);
33723 + lp->a->write_bcr(ioaddr, 9, val);
33724 }
33725
33726 /* set/reset GPSI bit in test register */
33727 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
33728 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
33729 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
33730 val |= 0x10;
33731 - lp->a.write_csr(ioaddr, 124, val);
33732 + lp->a->write_csr(ioaddr, 124, val);
33733
33734 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
33735 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
33736 @@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_device *dev)
33737 * duplex, and/or enable auto negotiation, and clear DANAS
33738 */
33739 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
33740 - lp->a.write_bcr(ioaddr, 32,
33741 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
33742 + lp->a->write_bcr(ioaddr, 32,
33743 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
33744 /* disable Auto Negotiation, set 10Mpbs, HD */
33745 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
33746 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
33747 if (lp->options & PCNET32_PORT_FD)
33748 val |= 0x10;
33749 if (lp->options & PCNET32_PORT_100)
33750 val |= 0x08;
33751 - lp->a.write_bcr(ioaddr, 32, val);
33752 + lp->a->write_bcr(ioaddr, 32, val);
33753 } else {
33754 if (lp->options & PCNET32_PORT_ASEL) {
33755 - lp->a.write_bcr(ioaddr, 32,
33756 - lp->a.read_bcr(ioaddr,
33757 + lp->a->write_bcr(ioaddr, 32,
33758 + lp->a->read_bcr(ioaddr,
33759 32) | 0x0080);
33760 /* enable auto negotiate, setup, disable fd */
33761 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
33762 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
33763 val |= 0x20;
33764 - lp->a.write_bcr(ioaddr, 32, val);
33765 + lp->a->write_bcr(ioaddr, 32, val);
33766 }
33767 }
33768 } else {
33769 @@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_device *dev)
33770 * There is really no good other way to handle multiple PHYs
33771 * other than turning off all automatics
33772 */
33773 - val = lp->a.read_bcr(ioaddr, 2);
33774 - lp->a.write_bcr(ioaddr, 2, val & ~2);
33775 - val = lp->a.read_bcr(ioaddr, 32);
33776 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
33777 + val = lp->a->read_bcr(ioaddr, 2);
33778 + lp->a->write_bcr(ioaddr, 2, val & ~2);
33779 + val = lp->a->read_bcr(ioaddr, 32);
33780 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
33781
33782 if (!(lp->options & PCNET32_PORT_ASEL)) {
33783 /* setup ecmd */
33784 @@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_device *dev)
33785 ethtool_cmd_speed_set(&ecmd,
33786 (lp->options & PCNET32_PORT_100) ?
33787 SPEED_100 : SPEED_10);
33788 - bcr9 = lp->a.read_bcr(ioaddr, 9);
33789 + bcr9 = lp->a->read_bcr(ioaddr, 9);
33790
33791 if (lp->options & PCNET32_PORT_FD) {
33792 ecmd.duplex = DUPLEX_FULL;
33793 @@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_device *dev)
33794 ecmd.duplex = DUPLEX_HALF;
33795 bcr9 |= ~(1 << 0);
33796 }
33797 - lp->a.write_bcr(ioaddr, 9, bcr9);
33798 + lp->a->write_bcr(ioaddr, 9, bcr9);
33799 }
33800
33801 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
33802 @@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_device *dev)
33803
33804 #ifdef DO_DXSUFLO
33805 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
33806 - val = lp->a.read_csr(ioaddr, CSR3);
33807 + val = lp->a->read_csr(ioaddr, CSR3);
33808 val |= 0x40;
33809 - lp->a.write_csr(ioaddr, CSR3, val);
33810 + lp->a->write_csr(ioaddr, CSR3, val);
33811 }
33812 #endif
33813
33814 @@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_device *dev)
33815 napi_enable(&lp->napi);
33816
33817 /* Re-initialize the PCNET32, and start it when done. */
33818 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
33819 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
33820 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
33821 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
33822
33823 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33824 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
33825 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33826 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
33827
33828 netif_start_queue(dev);
33829
33830 @@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_device *dev)
33831
33832 i = 0;
33833 while (i++ < 100)
33834 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
33835 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
33836 break;
33837 /*
33838 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
33839 * reports that doing so triggers a bug in the '974.
33840 */
33841 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
33842 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
33843
33844 netif_printk(lp, ifup, KERN_DEBUG, dev,
33845 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
33846 i,
33847 (u32) (lp->init_dma_addr),
33848 - lp->a.read_csr(ioaddr, CSR0));
33849 + lp->a->read_csr(ioaddr, CSR0));
33850
33851 spin_unlock_irqrestore(&lp->lock, flags);
33852
33853 @@ -2218,7 +2218,7 @@ err_free_ring:
33854 * Switch back to 16bit mode to avoid problems with dumb
33855 * DOS packet driver after a warm reboot
33856 */
33857 - lp->a.write_bcr(ioaddr, 20, 4);
33858 + lp->a->write_bcr(ioaddr, 20, 4);
33859
33860 err_free_irq:
33861 spin_unlock_irqrestore(&lp->lock, flags);
33862 @@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
33863
33864 /* wait for stop */
33865 for (i = 0; i < 100; i++)
33866 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
33867 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
33868 break;
33869
33870 if (i >= 100)
33871 @@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
33872 return;
33873
33874 /* ReInit Ring */
33875 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
33876 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
33877 i = 0;
33878 while (i++ < 1000)
33879 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
33880 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
33881 break;
33882
33883 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
33884 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
33885 }
33886
33887 static void pcnet32_tx_timeout(struct net_device *dev)
33888 @@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
33889 /* Transmitter timeout, serious problems. */
33890 if (pcnet32_debug & NETIF_MSG_DRV)
33891 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
33892 - dev->name, lp->a.read_csr(ioaddr, CSR0));
33893 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
33894 + dev->name, lp->a->read_csr(ioaddr, CSR0));
33895 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
33896 dev->stats.tx_errors++;
33897 if (netif_msg_tx_err(lp)) {
33898 int i;
33899 @@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
33900
33901 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
33902 "%s() called, csr0 %4.4x\n",
33903 - __func__, lp->a.read_csr(ioaddr, CSR0));
33904 + __func__, lp->a->read_csr(ioaddr, CSR0));
33905
33906 /* Default status -- will not enable Successful-TxDone
33907 * interrupt when that option is available to us.
33908 @@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
33909 dev->stats.tx_bytes += skb->len;
33910
33911 /* Trigger an immediate send poll. */
33912 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
33913 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
33914
33915 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
33916 lp->tx_full = 1;
33917 @@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
33918
33919 spin_lock(&lp->lock);
33920
33921 - csr0 = lp->a.read_csr(ioaddr, CSR0);
33922 + csr0 = lp->a->read_csr(ioaddr, CSR0);
33923 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
33924 if (csr0 == 0xffff)
33925 break; /* PCMCIA remove happened */
33926 /* Acknowledge all of the current interrupt sources ASAP. */
33927 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
33928 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
33929
33930 netif_printk(lp, intr, KERN_DEBUG, dev,
33931 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
33932 - csr0, lp->a.read_csr(ioaddr, CSR0));
33933 + csr0, lp->a->read_csr(ioaddr, CSR0));
33934
33935 /* Log misc errors. */
33936 if (csr0 & 0x4000)
33937 @@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
33938 if (napi_schedule_prep(&lp->napi)) {
33939 u16 val;
33940 /* set interrupt masks */
33941 - val = lp->a.read_csr(ioaddr, CSR3);
33942 + val = lp->a->read_csr(ioaddr, CSR3);
33943 val |= 0x5f00;
33944 - lp->a.write_csr(ioaddr, CSR3, val);
33945 + lp->a->write_csr(ioaddr, CSR3, val);
33946
33947 __napi_schedule(&lp->napi);
33948 break;
33949 }
33950 - csr0 = lp->a.read_csr(ioaddr, CSR0);
33951 + csr0 = lp->a->read_csr(ioaddr, CSR0);
33952 }
33953
33954 netif_printk(lp, intr, KERN_DEBUG, dev,
33955 "exiting interrupt, csr0=%#4.4x\n",
33956 - lp->a.read_csr(ioaddr, CSR0));
33957 + lp->a->read_csr(ioaddr, CSR0));
33958
33959 spin_unlock(&lp->lock);
33960
33961 @@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_device *dev)
33962
33963 spin_lock_irqsave(&lp->lock, flags);
33964
33965 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
33966 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
33967
33968 netif_printk(lp, ifdown, KERN_DEBUG, dev,
33969 "Shutting down ethercard, status was %2.2x\n",
33970 - lp->a.read_csr(ioaddr, CSR0));
33971 + lp->a->read_csr(ioaddr, CSR0));
33972
33973 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
33974 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
33975 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
33976
33977 /*
33978 * Switch back to 16bit mode to avoid problems with dumb
33979 * DOS packet driver after a warm reboot
33980 */
33981 - lp->a.write_bcr(ioaddr, 20, 4);
33982 + lp->a->write_bcr(ioaddr, 20, 4);
33983
33984 spin_unlock_irqrestore(&lp->lock, flags);
33985
33986 @@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
33987 unsigned long flags;
33988
33989 spin_lock_irqsave(&lp->lock, flags);
33990 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
33991 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
33992 spin_unlock_irqrestore(&lp->lock, flags);
33993
33994 return &dev->stats;
33995 @@ -2577,10 +2577,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
33996 if (dev->flags & IFF_ALLMULTI) {
33997 ib->filter[0] = cpu_to_le32(~0U);
33998 ib->filter[1] = cpu_to_le32(~0U);
33999 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34000 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34001 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34002 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34003 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
34004 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
34005 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
34006 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
34007 return;
34008 }
34009 /* clear the multicast filter */
34010 @@ -2594,7 +2594,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
34011 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
34012 }
34013 for (i = 0; i < 4; i++)
34014 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
34015 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
34016 le16_to_cpu(mcast_table[i]));
34017 }
34018
34019 @@ -2609,28 +2609,28 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
34020
34021 spin_lock_irqsave(&lp->lock, flags);
34022 suspended = pcnet32_suspend(dev, &flags, 0);
34023 - csr15 = lp->a.read_csr(ioaddr, CSR15);
34024 + csr15 = lp->a->read_csr(ioaddr, CSR15);
34025 if (dev->flags & IFF_PROMISC) {
34026 /* Log any net taps. */
34027 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
34028 lp->init_block->mode =
34029 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
34030 7);
34031 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
34032 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
34033 } else {
34034 lp->init_block->mode =
34035 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
34036 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34037 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
34038 pcnet32_load_multicast(dev);
34039 }
34040
34041 if (suspended) {
34042 int csr5;
34043 /* clear SUSPEND (SPND) - CSR5 bit 0 */
34044 - csr5 = lp->a.read_csr(ioaddr, CSR5);
34045 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34046 + csr5 = lp->a->read_csr(ioaddr, CSR5);
34047 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
34048 } else {
34049 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
34050 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
34051 pcnet32_restart(dev, CSR0_NORMAL);
34052 netif_wake_queue(dev);
34053 }
34054 @@ -2648,8 +2648,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
34055 if (!lp->mii)
34056 return 0;
34057
34058 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34059 - val_out = lp->a.read_bcr(ioaddr, 34);
34060 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34061 + val_out = lp->a->read_bcr(ioaddr, 34);
34062
34063 return val_out;
34064 }
34065 @@ -2663,8 +2663,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
34066 if (!lp->mii)
34067 return;
34068
34069 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34070 - lp->a.write_bcr(ioaddr, 34, val);
34071 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
34072 + lp->a->write_bcr(ioaddr, 34, val);
34073 }
34074
34075 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34076 @@ -2741,7 +2741,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
34077 curr_link = mii_link_ok(&lp->mii_if);
34078 } else {
34079 ulong ioaddr = dev->base_addr; /* card base I/O address */
34080 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34081 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34082 }
34083 if (!curr_link) {
34084 if (prev_link || verbose) {
34085 @@ -2764,13 +2764,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
34086 (ecmd.duplex == DUPLEX_FULL)
34087 ? "full" : "half");
34088 }
34089 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
34090 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
34091 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
34092 if (lp->mii_if.full_duplex)
34093 bcr9 |= (1 << 0);
34094 else
34095 bcr9 &= ~(1 << 0);
34096 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
34097 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
34098 }
34099 } else {
34100 netif_info(lp, link, dev, "link up\n");
34101 diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
34102 index edfa15d..002bfa9 100644
34103 --- a/drivers/net/ppp_generic.c
34104 +++ b/drivers/net/ppp_generic.c
34105 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34106 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
34107 struct ppp_stats stats;
34108 struct ppp_comp_stats cstats;
34109 - char *vers;
34110
34111 switch (cmd) {
34112 case SIOCGPPPSTATS:
34113 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34114 break;
34115
34116 case SIOCGPPPVER:
34117 - vers = PPP_VERSION;
34118 - if (copy_to_user(addr, vers, strlen(vers) + 1))
34119 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
34120 break;
34121 err = 0;
34122 break;
34123 diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
34124 index 6d657ca..d1be94b 100644
34125 --- a/drivers/net/r8169.c
34126 +++ b/drivers/net/r8169.c
34127 @@ -663,12 +663,12 @@ struct rtl8169_private {
34128 struct mdio_ops {
34129 void (*write)(void __iomem *, int, int);
34130 int (*read)(void __iomem *, int);
34131 - } mdio_ops;
34132 + } __no_const mdio_ops;
34133
34134 struct pll_power_ops {
34135 void (*down)(struct rtl8169_private *);
34136 void (*up)(struct rtl8169_private *);
34137 - } pll_power_ops;
34138 + } __no_const pll_power_ops;
34139
34140 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
34141 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
34142 diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
34143 index 3c0f131..17f8b02 100644
34144 --- a/drivers/net/sis190.c
34145 +++ b/drivers/net/sis190.c
34146 @@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
34147 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
34148 struct net_device *dev)
34149 {
34150 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
34151 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
34152 struct sis190_private *tp = netdev_priv(dev);
34153 struct pci_dev *isa_bridge;
34154 u8 reg, tmp8;
34155 diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
34156 index 4793df8..44c9849 100644
34157 --- a/drivers/net/sundance.c
34158 +++ b/drivers/net/sundance.c
34159 @@ -218,7 +218,7 @@ enum {
34160 struct pci_id_info {
34161 const char *name;
34162 };
34163 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34164 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34165 {"D-Link DFE-550TX FAST Ethernet Adapter"},
34166 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
34167 {"D-Link DFE-580TX 4 port Server Adapter"},
34168 diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
34169 index 2ea456d..3ad9523 100644
34170 --- a/drivers/net/tg3.h
34171 +++ b/drivers/net/tg3.h
34172 @@ -134,6 +134,7 @@
34173 #define CHIPREV_ID_5750_A0 0x4000
34174 #define CHIPREV_ID_5750_A1 0x4001
34175 #define CHIPREV_ID_5750_A3 0x4003
34176 +#define CHIPREV_ID_5750_C1 0x4201
34177 #define CHIPREV_ID_5750_C2 0x4202
34178 #define CHIPREV_ID_5752_A0_HW 0x5000
34179 #define CHIPREV_ID_5752_A0 0x6000
34180 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
34181 index 515f122..41dd273 100644
34182 --- a/drivers/net/tokenring/abyss.c
34183 +++ b/drivers/net/tokenring/abyss.c
34184 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
34185
34186 static int __init abyss_init (void)
34187 {
34188 - abyss_netdev_ops = tms380tr_netdev_ops;
34189 + pax_open_kernel();
34190 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34191
34192 - abyss_netdev_ops.ndo_open = abyss_open;
34193 - abyss_netdev_ops.ndo_stop = abyss_close;
34194 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
34195 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
34196 + pax_close_kernel();
34197
34198 return pci_register_driver(&abyss_driver);
34199 }
34200 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
34201 index 6153cfd..cf69c1c 100644
34202 --- a/drivers/net/tokenring/madgemc.c
34203 +++ b/drivers/net/tokenring/madgemc.c
34204 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
34205
34206 static int __init madgemc_init (void)
34207 {
34208 - madgemc_netdev_ops = tms380tr_netdev_ops;
34209 - madgemc_netdev_ops.ndo_open = madgemc_open;
34210 - madgemc_netdev_ops.ndo_stop = madgemc_close;
34211 + pax_open_kernel();
34212 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34213 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34214 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34215 + pax_close_kernel();
34216
34217 return mca_register_driver (&madgemc_driver);
34218 }
34219 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
34220 index 8d362e6..f91cc52 100644
34221 --- a/drivers/net/tokenring/proteon.c
34222 +++ b/drivers/net/tokenring/proteon.c
34223 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
34224 struct platform_device *pdev;
34225 int i, num = 0, err = 0;
34226
34227 - proteon_netdev_ops = tms380tr_netdev_ops;
34228 - proteon_netdev_ops.ndo_open = proteon_open;
34229 - proteon_netdev_ops.ndo_stop = tms380tr_close;
34230 + pax_open_kernel();
34231 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34232 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
34233 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
34234 + pax_close_kernel();
34235
34236 err = platform_driver_register(&proteon_driver);
34237 if (err)
34238 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
34239 index 46db5c5..37c1536 100644
34240 --- a/drivers/net/tokenring/skisa.c
34241 +++ b/drivers/net/tokenring/skisa.c
34242 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
34243 struct platform_device *pdev;
34244 int i, num = 0, err = 0;
34245
34246 - sk_isa_netdev_ops = tms380tr_netdev_ops;
34247 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
34248 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34249 + pax_open_kernel();
34250 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34251 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
34252 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34253 + pax_close_kernel();
34254
34255 err = platform_driver_register(&sk_isa_driver);
34256 if (err)
34257 diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
34258 index ce90efc..2676f89 100644
34259 --- a/drivers/net/tulip/de2104x.c
34260 +++ b/drivers/net/tulip/de2104x.c
34261 @@ -1795,6 +1795,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
34262 struct de_srom_info_leaf *il;
34263 void *bufp;
34264
34265 + pax_track_stack();
34266 +
34267 /* download entire eeprom */
34268 for (i = 0; i < DE_EEPROM_WORDS; i++)
34269 ((__le16 *)ee_data)[i] =
34270 diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
34271 index 959b410..c97fac2 100644
34272 --- a/drivers/net/tulip/de4x5.c
34273 +++ b/drivers/net/tulip/de4x5.c
34274 @@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34275 for (i=0; i<ETH_ALEN; i++) {
34276 tmp.addr[i] = dev->dev_addr[i];
34277 }
34278 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34279 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34280 break;
34281
34282 case DE4X5_SET_HWADDR: /* Set the hardware address */
34283 @@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34284 spin_lock_irqsave(&lp->lock, flags);
34285 memcpy(&statbuf, &lp->pktStats, ioc->len);
34286 spin_unlock_irqrestore(&lp->lock, flags);
34287 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
34288 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34289 return -EFAULT;
34290 break;
34291 }
34292 diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
34293 index fa5eee9..e074432 100644
34294 --- a/drivers/net/tulip/eeprom.c
34295 +++ b/drivers/net/tulip/eeprom.c
34296 @@ -81,7 +81,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
34297 {NULL}};
34298
34299
34300 -static const char *block_name[] __devinitdata = {
34301 +static const char *block_name[] __devinitconst = {
34302 "21140 non-MII",
34303 "21140 MII PHY",
34304 "21142 Serial PHY",
34305 diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
34306 index 862eadf..3eee1e6 100644
34307 --- a/drivers/net/tulip/winbond-840.c
34308 +++ b/drivers/net/tulip/winbond-840.c
34309 @@ -236,7 +236,7 @@ struct pci_id_info {
34310 int drv_flags; /* Driver use, intended as capability flags. */
34311 };
34312
34313 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34314 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34315 { /* Sometime a Level-One switch card. */
34316 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34317 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
34318 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
34319 index 304fe78..db112fa 100644
34320 --- a/drivers/net/usb/hso.c
34321 +++ b/drivers/net/usb/hso.c
34322 @@ -71,7 +71,7 @@
34323 #include <asm/byteorder.h>
34324 #include <linux/serial_core.h>
34325 #include <linux/serial.h>
34326 -
34327 +#include <asm/local.h>
34328
34329 #define MOD_AUTHOR "Option Wireless"
34330 #define MOD_DESCRIPTION "USB High Speed Option driver"
34331 @@ -257,7 +257,7 @@ struct hso_serial {
34332
34333 /* from usb_serial_port */
34334 struct tty_struct *tty;
34335 - int open_count;
34336 + local_t open_count;
34337 spinlock_t serial_lock;
34338
34339 int (*write_data) (struct hso_serial *serial);
34340 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
34341 struct urb *urb;
34342
34343 urb = serial->rx_urb[0];
34344 - if (serial->open_count > 0) {
34345 + if (local_read(&serial->open_count) > 0) {
34346 count = put_rxbuf_data(urb, serial);
34347 if (count == -1)
34348 return;
34349 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
34350 DUMP1(urb->transfer_buffer, urb->actual_length);
34351
34352 /* Anyone listening? */
34353 - if (serial->open_count == 0)
34354 + if (local_read(&serial->open_count) == 0)
34355 return;
34356
34357 if (status == 0) {
34358 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
34359 spin_unlock_irq(&serial->serial_lock);
34360
34361 /* check for port already opened, if not set the termios */
34362 - serial->open_count++;
34363 - if (serial->open_count == 1) {
34364 + if (local_inc_return(&serial->open_count) == 1) {
34365 serial->rx_state = RX_IDLE;
34366 /* Force default termio settings */
34367 _hso_serial_set_termios(tty, NULL);
34368 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
34369 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
34370 if (result) {
34371 hso_stop_serial_device(serial->parent);
34372 - serial->open_count--;
34373 + local_dec(&serial->open_count);
34374 kref_put(&serial->parent->ref, hso_serial_ref_free);
34375 }
34376 } else {
34377 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
34378
34379 /* reset the rts and dtr */
34380 /* do the actual close */
34381 - serial->open_count--;
34382 + local_dec(&serial->open_count);
34383
34384 - if (serial->open_count <= 0) {
34385 - serial->open_count = 0;
34386 + if (local_read(&serial->open_count) <= 0) {
34387 + local_set(&serial->open_count, 0);
34388 spin_lock_irq(&serial->serial_lock);
34389 if (serial->tty == tty) {
34390 serial->tty->driver_data = NULL;
34391 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
34392
34393 /* the actual setup */
34394 spin_lock_irqsave(&serial->serial_lock, flags);
34395 - if (serial->open_count)
34396 + if (local_read(&serial->open_count))
34397 _hso_serial_set_termios(tty, old);
34398 else
34399 tty->termios = old;
34400 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
34401 D1("Pending read interrupt on port %d\n", i);
34402 spin_lock(&serial->serial_lock);
34403 if (serial->rx_state == RX_IDLE &&
34404 - serial->open_count > 0) {
34405 + local_read(&serial->open_count) > 0) {
34406 /* Setup and send a ctrl req read on
34407 * port i */
34408 if (!serial->rx_urb_filled[0]) {
34409 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
34410 /* Start all serial ports */
34411 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
34412 if (serial_table[i] && (serial_table[i]->interface == iface)) {
34413 - if (dev2ser(serial_table[i])->open_count) {
34414 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
34415 result =
34416 hso_start_serial_device(serial_table[i], GFP_NOIO);
34417 hso_kick_transmit(dev2ser(serial_table[i]));
34418 diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
34419 index 27400ed..c796e05 100644
34420 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
34421 +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
34422 @@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
34423 * Return with error code if any of the queue indices
34424 * is out of range
34425 */
34426 - if (p->ring_index[i] < 0 ||
34427 - p->ring_index[i] >= adapter->num_rx_queues)
34428 + if (p->ring_index[i] >= adapter->num_rx_queues)
34429 return -EINVAL;
34430 }
34431
34432 diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
34433 index dd36258..e47fd31 100644
34434 --- a/drivers/net/vxge/vxge-config.h
34435 +++ b/drivers/net/vxge/vxge-config.h
34436 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
34437 void (*link_down)(struct __vxge_hw_device *devh);
34438 void (*crit_err)(struct __vxge_hw_device *devh,
34439 enum vxge_hw_event type, u64 ext_data);
34440 -};
34441 +} __no_const;
34442
34443 /*
34444 * struct __vxge_hw_blockpool_entry - Block private data structure
34445 diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
34446 index 178348a2..18bb433 100644
34447 --- a/drivers/net/vxge/vxge-main.c
34448 +++ b/drivers/net/vxge/vxge-main.c
34449 @@ -100,6 +100,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
34450 struct sk_buff *completed[NR_SKB_COMPLETED];
34451 int more;
34452
34453 + pax_track_stack();
34454 +
34455 do {
34456 more = 0;
34457 skb_ptr = completed;
34458 @@ -1915,6 +1917,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
34459 u8 mtable[256] = {0}; /* CPU to vpath mapping */
34460 int index;
34461
34462 + pax_track_stack();
34463 +
34464 /*
34465 * Filling
34466 * - itable with bucket numbers
34467 diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
34468 index 4a518a3..936b334 100644
34469 --- a/drivers/net/vxge/vxge-traffic.h
34470 +++ b/drivers/net/vxge/vxge-traffic.h
34471 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
34472 struct vxge_hw_mempool_dma *dma_object,
34473 u32 index,
34474 u32 is_last);
34475 -};
34476 +} __no_const;
34477
34478 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
34479 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
34480 diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
34481 index 56aeb01..547f71f 100644
34482 --- a/drivers/net/wan/hdlc_x25.c
34483 +++ b/drivers/net/wan/hdlc_x25.c
34484 @@ -134,16 +134,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
34485
34486 static int x25_open(struct net_device *dev)
34487 {
34488 - struct lapb_register_struct cb;
34489 + static struct lapb_register_struct cb = {
34490 + .connect_confirmation = x25_connected,
34491 + .connect_indication = x25_connected,
34492 + .disconnect_confirmation = x25_disconnected,
34493 + .disconnect_indication = x25_disconnected,
34494 + .data_indication = x25_data_indication,
34495 + .data_transmit = x25_data_transmit
34496 + };
34497 int result;
34498
34499 - cb.connect_confirmation = x25_connected;
34500 - cb.connect_indication = x25_connected;
34501 - cb.disconnect_confirmation = x25_disconnected;
34502 - cb.disconnect_indication = x25_disconnected;
34503 - cb.data_indication = x25_data_indication;
34504 - cb.data_transmit = x25_data_transmit;
34505 -
34506 result = lapb_register(dev, &cb);
34507 if (result != LAPB_OK)
34508 return result;
34509 diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
34510 index 1fda46c..f2858f2 100644
34511 --- a/drivers/net/wimax/i2400m/usb-fw.c
34512 +++ b/drivers/net/wimax/i2400m/usb-fw.c
34513 @@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
34514 int do_autopm = 1;
34515 DECLARE_COMPLETION_ONSTACK(notif_completion);
34516
34517 + pax_track_stack();
34518 +
34519 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
34520 i2400m, ack, ack_size);
34521 BUG_ON(_ack == i2400m->bm_ack_buf);
34522 diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
34523 index e1b3e3c..e413f18 100644
34524 --- a/drivers/net/wireless/airo.c
34525 +++ b/drivers/net/wireless/airo.c
34526 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
34527 BSSListElement * loop_net;
34528 BSSListElement * tmp_net;
34529
34530 + pax_track_stack();
34531 +
34532 /* Blow away current list of scan results */
34533 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
34534 list_move_tail (&loop_net->list, &ai->network_free_list);
34535 @@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
34536 WepKeyRid wkr;
34537 int rc;
34538
34539 + pax_track_stack();
34540 +
34541 memset( &mySsid, 0, sizeof( mySsid ) );
34542 kfree (ai->flash);
34543 ai->flash = NULL;
34544 @@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct inode *inode,
34545 __le32 *vals = stats.vals;
34546 int len;
34547
34548 + pax_track_stack();
34549 +
34550 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
34551 return -ENOMEM;
34552 data = file->private_data;
34553 @@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
34554 /* If doLoseSync is not 1, we won't do a Lose Sync */
34555 int doLoseSync = -1;
34556
34557 + pax_track_stack();
34558 +
34559 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
34560 return -ENOMEM;
34561 data = file->private_data;
34562 @@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_device *dev,
34563 int i;
34564 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
34565
34566 + pax_track_stack();
34567 +
34568 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
34569 if (!qual)
34570 return -ENOMEM;
34571 @@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
34572 CapabilityRid cap_rid;
34573 __le32 *vals = stats_rid.vals;
34574
34575 + pax_track_stack();
34576 +
34577 /* Get stats out of the card */
34578 clear_bit(JOB_WSTATS, &local->jobs);
34579 if (local->power.event) {
34580 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
34581 index 17c4b56..00d836f 100644
34582 --- a/drivers/net/wireless/ath/ath.h
34583 +++ b/drivers/net/wireless/ath/ath.h
34584 @@ -121,6 +121,7 @@ struct ath_ops {
34585 void (*write_flush) (void *);
34586 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
34587 };
34588 +typedef struct ath_ops __no_const ath_ops_no_const;
34589
34590 struct ath_common;
34591 struct ath_bus_ops;
34592 diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
34593 index ccca724..7afbadc 100644
34594 --- a/drivers/net/wireless/ath/ath5k/debug.c
34595 +++ b/drivers/net/wireless/ath/ath5k/debug.c
34596 @@ -203,6 +203,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
34597 unsigned int v;
34598 u64 tsf;
34599
34600 + pax_track_stack();
34601 +
34602 v = ath5k_hw_reg_read(ah, AR5K_BEACON);
34603 len += snprintf(buf + len, sizeof(buf) - len,
34604 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
34605 @@ -321,6 +323,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
34606 unsigned int len = 0;
34607 unsigned int i;
34608
34609 + pax_track_stack();
34610 +
34611 len += snprintf(buf + len, sizeof(buf) - len,
34612 "DEBUG LEVEL: 0x%08x\n\n", ah->debug.level);
34613
34614 @@ -492,6 +496,8 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
34615 unsigned int len = 0;
34616 u32 filt = ath5k_hw_get_rx_filter(ah);
34617
34618 + pax_track_stack();
34619 +
34620 len += snprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n",
34621 ah->bssidmask);
34622 len += snprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ",
34623 @@ -548,6 +554,8 @@ static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
34624 unsigned int len = 0;
34625 int i;
34626
34627 + pax_track_stack();
34628 +
34629 len += snprintf(buf + len, sizeof(buf) - len,
34630 "RX\n---------------------\n");
34631 len += snprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n",
34632 @@ -665,6 +673,8 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
34633 char buf[700];
34634 unsigned int len = 0;
34635
34636 + pax_track_stack();
34637 +
34638 len += snprintf(buf + len, sizeof(buf) - len,
34639 "HW has PHY error counters:\t%s\n",
34640 ah->ah_capabilities.cap_has_phyerr_counters ?
34641 @@ -829,6 +839,8 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
34642 struct ath5k_buf *bf, *bf0;
34643 int i, n;
34644
34645 + pax_track_stack();
34646 +
34647 len += snprintf(buf + len, sizeof(buf) - len,
34648 "available txbuffers: %d\n", ah->txbuf_len);
34649
34650 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
34651 index 7c2aaad..ad14dee 100644
34652 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
34653 +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
34654 @@ -758,6 +758,8 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah)
34655 int i, im, j;
34656 int nmeasurement;
34657
34658 + pax_track_stack();
34659 +
34660 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
34661 if (ah->txchainmask & (1 << i))
34662 num_chains++;
34663 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
34664 index f80d1d6..08b773d 100644
34665 --- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
34666 +++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
34667 @@ -406,6 +406,8 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
34668 int theta_low_bin = 0;
34669 int i;
34670
34671 + pax_track_stack();
34672 +
34673 /* disregard any bin that contains <= 16 samples */
34674 thresh_accum_cnt = 16;
34675 scale_factor = 5;
34676 diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
34677 index d1eb896..8b67cd4 100644
34678 --- a/drivers/net/wireless/ath/ath9k/debug.c
34679 +++ b/drivers/net/wireless/ath/ath9k/debug.c
34680 @@ -387,6 +387,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
34681 char buf[512];
34682 unsigned int len = 0;
34683
34684 + pax_track_stack();
34685 +
34686 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
34687 len += snprintf(buf + len, sizeof(buf) - len,
34688 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
34689 @@ -477,6 +479,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
34690 u8 addr[ETH_ALEN];
34691 u32 tmp;
34692
34693 + pax_track_stack();
34694 +
34695 len += snprintf(buf + len, sizeof(buf) - len,
34696 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
34697 wiphy_name(sc->hw->wiphy),
34698 diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
34699 index d3ff33c..309398e 100644
34700 --- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
34701 +++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
34702 @@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(struct file *file, char __user *user_buf,
34703 unsigned int len = 0;
34704 int ret = 0;
34705
34706 + pax_track_stack();
34707 +
34708 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
34709
34710 ath9k_htc_ps_wakeup(priv);
34711 @@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(struct file *file, char __user *user_buf,
34712 unsigned int len = 0;
34713 int ret = 0;
34714
34715 + pax_track_stack();
34716 +
34717 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
34718
34719 ath9k_htc_ps_wakeup(priv);
34720 @@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(struct file *file, char __user *user_buf,
34721 unsigned int len = 0;
34722 int ret = 0;
34723
34724 + pax_track_stack();
34725 +
34726 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
34727
34728 ath9k_htc_ps_wakeup(priv);
34729 @@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
34730 char buf[512];
34731 unsigned int len = 0;
34732
34733 + pax_track_stack();
34734 +
34735 len += snprintf(buf + len, sizeof(buf) - len,
34736 "%20s : %10u\n", "Buffers queued",
34737 priv->debug.tx_stats.buf_queued);
34738 @@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct file *file, char __user *user_buf,
34739 char buf[512];
34740 unsigned int len = 0;
34741
34742 + pax_track_stack();
34743 +
34744 spin_lock_bh(&priv->tx.tx_lock);
34745
34746 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
34747 @@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
34748 char buf[512];
34749 unsigned int len = 0;
34750
34751 + pax_track_stack();
34752 +
34753 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
34754 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
34755
34756 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
34757 index c798890..c19a8fb 100644
34758 --- a/drivers/net/wireless/ath/ath9k/hw.h
34759 +++ b/drivers/net/wireless/ath/ath9k/hw.h
34760 @@ -588,7 +588,7 @@ struct ath_hw_private_ops {
34761
34762 /* ANI */
34763 void (*ani_cache_ini_regs)(struct ath_hw *ah);
34764 -};
34765 +} __no_const;
34766
34767 /**
34768 * struct ath_hw_ops - callbacks used by hardware code and driver code
34769 @@ -639,7 +639,7 @@ struct ath_hw_ops {
34770 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
34771 struct ath_hw_antcomb_conf *antconf);
34772
34773 -};
34774 +} __no_const;
34775
34776 struct ath_nf_limits {
34777 s16 max;
34778 @@ -652,7 +652,7 @@ struct ath_nf_limits {
34779 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
34780
34781 struct ath_hw {
34782 - struct ath_ops reg_ops;
34783 + ath_ops_no_const reg_ops;
34784
34785 struct ieee80211_hw *hw;
34786 struct ath_common common;
34787 diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
34788 index ef9ad79..f5f8d80 100644
34789 --- a/drivers/net/wireless/ipw2x00/ipw2100.c
34790 +++ b/drivers/net/wireless/ipw2x00/ipw2100.c
34791 @@ -2102,6 +2102,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
34792 int err;
34793 DECLARE_SSID_BUF(ssid);
34794
34795 + pax_track_stack();
34796 +
34797 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
34798
34799 if (ssid_len)
34800 @@ -5451,6 +5453,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
34801 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
34802 int err;
34803
34804 + pax_track_stack();
34805 +
34806 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
34807 idx, keylen, len);
34808
34809 diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
34810 index 32a9966..de69787 100644
34811 --- a/drivers/net/wireless/ipw2x00/libipw_rx.c
34812 +++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
34813 @@ -1565,6 +1565,8 @@ static void libipw_process_probe_response(struct libipw_device
34814 unsigned long flags;
34815 DECLARE_SSID_BUF(ssid);
34816
34817 + pax_track_stack();
34818 +
34819 LIBIPW_DEBUG_SCAN("'%s' (%pM"
34820 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
34821 print_ssid(ssid, info_element->data, info_element->len),
34822 diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
34823 index 66ee1562..b90412b 100644
34824 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
34825 +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
34826 @@ -3687,7 +3687,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
34827 */
34828 if (iwl3945_mod_params.disable_hw_scan) {
34829 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
34830 - iwl3945_hw_ops.hw_scan = NULL;
34831 + pax_open_kernel();
34832 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
34833 + pax_close_kernel();
34834 }
34835
34836 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
34837 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
34838 index 3789ff4..22ab151 100644
34839 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
34840 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
34841 @@ -920,6 +920,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
34842 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
34843 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
34844
34845 + pax_track_stack();
34846 +
34847 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
34848
34849 /* Treat uninitialized rate scaling data same as non-existing. */
34850 @@ -2931,6 +2933,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
34851 container_of(lq_sta, struct iwl_station_priv, lq_sta);
34852 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
34853
34854 + pax_track_stack();
34855 +
34856 /* Override starting rate (index 0) if needed for debug purposes */
34857 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
34858
34859 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
34860 index f9a407e..a6f2bb7 100644
34861 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
34862 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
34863 @@ -68,8 +68,8 @@ do { \
34864 } while (0)
34865
34866 #else
34867 -#define IWL_DEBUG(__priv, level, fmt, args...)
34868 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
34869 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
34870 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
34871 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
34872 const void *p, u32 len)
34873 {}
34874 diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
34875 index ec1485b..900c3bd 100644
34876 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
34877 +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
34878 @@ -561,6 +561,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
34879 int pos = 0;
34880 const size_t bufsz = sizeof(buf);
34881
34882 + pax_track_stack();
34883 +
34884 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
34885 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
34886 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
34887 @@ -693,6 +695,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
34888 char buf[256 * NUM_IWL_RXON_CTX];
34889 const size_t bufsz = sizeof(buf);
34890
34891 + pax_track_stack();
34892 +
34893 for_each_context(priv, ctx) {
34894 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
34895 ctx->ctxid);
34896 diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
34897 index 0a0cc96..fd49ad8 100644
34898 --- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
34899 +++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
34900 @@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
34901 int buf_len = 512;
34902 size_t len = 0;
34903
34904 + pax_track_stack();
34905 +
34906 if (*ppos != 0)
34907 return 0;
34908 if (count < sizeof(buf))
34909 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
34910 index 031cd89..bdc8435 100644
34911 --- a/drivers/net/wireless/mac80211_hwsim.c
34912 +++ b/drivers/net/wireless/mac80211_hwsim.c
34913 @@ -1670,9 +1670,11 @@ static int __init init_mac80211_hwsim(void)
34914 return -EINVAL;
34915
34916 if (fake_hw_scan) {
34917 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
34918 - mac80211_hwsim_ops.sw_scan_start = NULL;
34919 - mac80211_hwsim_ops.sw_scan_complete = NULL;
34920 + pax_open_kernel();
34921 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
34922 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
34923 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
34924 + pax_close_kernel();
34925 }
34926
34927 spin_lock_init(&hwsim_radio_lock);
34928 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
34929 index 2215c3c..64e6a47 100644
34930 --- a/drivers/net/wireless/mwifiex/main.h
34931 +++ b/drivers/net/wireless/mwifiex/main.h
34932 @@ -560,7 +560,7 @@ struct mwifiex_if_ops {
34933
34934 void (*update_mp_end_port) (struct mwifiex_adapter *, u16);
34935 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
34936 -};
34937 +} __no_const;
34938
34939 struct mwifiex_adapter {
34940 struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM];
34941 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
34942 index 29f9389..f6d2ce0 100644
34943 --- a/drivers/net/wireless/rndis_wlan.c
34944 +++ b/drivers/net/wireless/rndis_wlan.c
34945 @@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
34946
34947 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
34948
34949 - if (rts_threshold < 0 || rts_threshold > 2347)
34950 + if (rts_threshold > 2347)
34951 rts_threshold = 2347;
34952
34953 tmp = cpu_to_le32(rts_threshold);
34954 diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
34955 index 3b11642..d6bb049 100644
34956 --- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
34957 +++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
34958 @@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
34959 u8 rfpath;
34960 u8 num_total_rfpath = rtlphy->num_total_rfpath;
34961
34962 + pax_track_stack();
34963 +
34964 precommoncmdcnt = 0;
34965 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
34966 MAX_PRECMD_CNT,
34967 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
34968 index a77f1bb..c608b2b 100644
34969 --- a/drivers/net/wireless/wl1251/wl1251.h
34970 +++ b/drivers/net/wireless/wl1251/wl1251.h
34971 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
34972 void (*reset)(struct wl1251 *wl);
34973 void (*enable_irq)(struct wl1251 *wl);
34974 void (*disable_irq)(struct wl1251 *wl);
34975 -};
34976 +} __no_const;
34977
34978 struct wl1251 {
34979 struct ieee80211_hw *hw;
34980 diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
34981 index e0b3736..4b466e6 100644
34982 --- a/drivers/net/wireless/wl12xx/spi.c
34983 +++ b/drivers/net/wireless/wl12xx/spi.c
34984 @@ -281,6 +281,8 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
34985 u32 chunk_len;
34986 int i;
34987
34988 + pax_track_stack();
34989 +
34990 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
34991
34992 spi_message_init(&m);
34993 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
34994 index f34b5b2..b5abb9f 100644
34995 --- a/drivers/oprofile/buffer_sync.c
34996 +++ b/drivers/oprofile/buffer_sync.c
34997 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
34998 if (cookie == NO_COOKIE)
34999 offset = pc;
35000 if (cookie == INVALID_COOKIE) {
35001 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35002 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35003 offset = pc;
35004 }
35005 if (cookie != last_cookie) {
35006 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
35007 /* add userspace sample */
35008
35009 if (!mm) {
35010 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
35011 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35012 return 0;
35013 }
35014
35015 cookie = lookup_dcookie(mm, s->eip, &offset);
35016
35017 if (cookie == INVALID_COOKIE) {
35018 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35019 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35020 return 0;
35021 }
35022
35023 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
35024 /* ignore backtraces if failed to add a sample */
35025 if (state == sb_bt_start) {
35026 state = sb_bt_ignore;
35027 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35028 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35029 }
35030 }
35031 release_mm(mm);
35032 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
35033 index dd87e86..bc0148c 100644
35034 --- a/drivers/oprofile/event_buffer.c
35035 +++ b/drivers/oprofile/event_buffer.c
35036 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
35037 }
35038
35039 if (buffer_pos == buffer_size) {
35040 - atomic_inc(&oprofile_stats.event_lost_overflow);
35041 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35042 return;
35043 }
35044
35045 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
35046 index f8c752e..28bf4fc 100644
35047 --- a/drivers/oprofile/oprof.c
35048 +++ b/drivers/oprofile/oprof.c
35049 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
35050 if (oprofile_ops.switch_events())
35051 return;
35052
35053 - atomic_inc(&oprofile_stats.multiplex_counter);
35054 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35055 start_switch_worker();
35056 }
35057
35058 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
35059 index 917d28e..d62d981 100644
35060 --- a/drivers/oprofile/oprofile_stats.c
35061 +++ b/drivers/oprofile/oprofile_stats.c
35062 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35063 cpu_buf->sample_invalid_eip = 0;
35064 }
35065
35066 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35067 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35068 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
35069 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35070 - atomic_set(&oprofile_stats.multiplex_counter, 0);
35071 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35072 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35073 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35074 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35075 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35076 }
35077
35078
35079 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35080 index 38b6fc0..b5cbfce 100644
35081 --- a/drivers/oprofile/oprofile_stats.h
35082 +++ b/drivers/oprofile/oprofile_stats.h
35083 @@ -13,11 +13,11 @@
35084 #include <linux/atomic.h>
35085
35086 struct oprofile_stat_struct {
35087 - atomic_t sample_lost_no_mm;
35088 - atomic_t sample_lost_no_mapping;
35089 - atomic_t bt_lost_no_mapping;
35090 - atomic_t event_lost_overflow;
35091 - atomic_t multiplex_counter;
35092 + atomic_unchecked_t sample_lost_no_mm;
35093 + atomic_unchecked_t sample_lost_no_mapping;
35094 + atomic_unchecked_t bt_lost_no_mapping;
35095 + atomic_unchecked_t event_lost_overflow;
35096 + atomic_unchecked_t multiplex_counter;
35097 };
35098
35099 extern struct oprofile_stat_struct oprofile_stats;
35100 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
35101 index 1c0b799..c11b2d2 100644
35102 --- a/drivers/oprofile/oprofilefs.c
35103 +++ b/drivers/oprofile/oprofilefs.c
35104 @@ -193,7 +193,7 @@ static const struct file_operations atomic_ro_fops = {
35105
35106
35107 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35108 - char const *name, atomic_t *val)
35109 + char const *name, atomic_unchecked_t *val)
35110 {
35111 return __oprofilefs_create_file(sb, root, name,
35112 &atomic_ro_fops, 0444, val);
35113 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
35114 index 3f56bc0..707d642 100644
35115 --- a/drivers/parport/procfs.c
35116 +++ b/drivers/parport/procfs.c
35117 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
35118
35119 *ppos += len;
35120
35121 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35122 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35123 }
35124
35125 #ifdef CONFIG_PARPORT_1284
35126 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
35127
35128 *ppos += len;
35129
35130 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35131 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35132 }
35133 #endif /* IEEE1284.3 support. */
35134
35135 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
35136 index 9fff878..ad0ad53 100644
35137 --- a/drivers/pci/hotplug/cpci_hotplug.h
35138 +++ b/drivers/pci/hotplug/cpci_hotplug.h
35139 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35140 int (*hardware_test) (struct slot* slot, u32 value);
35141 u8 (*get_power) (struct slot* slot);
35142 int (*set_power) (struct slot* slot, int value);
35143 -};
35144 +} __no_const;
35145
35146 struct cpci_hp_controller {
35147 unsigned int irq;
35148 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
35149 index 76ba8a1..20ca857 100644
35150 --- a/drivers/pci/hotplug/cpqphp_nvram.c
35151 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
35152 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
35153
35154 void compaq_nvram_init (void __iomem *rom_start)
35155 {
35156 +
35157 +#ifndef CONFIG_PAX_KERNEXEC
35158 if (rom_start) {
35159 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35160 }
35161 +#endif
35162 +
35163 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35164
35165 /* initialize our int15 lock */
35166 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
35167 index cbfbab1..6a9fced 100644
35168 --- a/drivers/pci/pcie/aspm.c
35169 +++ b/drivers/pci/pcie/aspm.c
35170 @@ -27,9 +27,9 @@
35171 #define MODULE_PARAM_PREFIX "pcie_aspm."
35172
35173 /* Note: those are not register definitions */
35174 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35175 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35176 -#define ASPM_STATE_L1 (4) /* L1 state */
35177 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35178 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35179 +#define ASPM_STATE_L1 (4U) /* L1 state */
35180 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35181 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35182
35183 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
35184 index 6ab6bd3..72bdc69 100644
35185 --- a/drivers/pci/probe.c
35186 +++ b/drivers/pci/probe.c
35187 @@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
35188 u32 l, sz, mask;
35189 u16 orig_cmd;
35190
35191 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
35192 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
35193
35194 if (!dev->mmio_always_on) {
35195 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
35196 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
35197 index 27911b5..5b6db88 100644
35198 --- a/drivers/pci/proc.c
35199 +++ b/drivers/pci/proc.c
35200 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
35201 static int __init pci_proc_init(void)
35202 {
35203 struct pci_dev *dev = NULL;
35204 +
35205 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
35206 +#ifdef CONFIG_GRKERNSEC_PROC_USER
35207 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35208 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35209 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35210 +#endif
35211 +#else
35212 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35213 +#endif
35214 proc_create("devices", 0, proc_bus_pci_dir,
35215 &proc_bus_pci_dev_operations);
35216 proc_initialized = 1;
35217 diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
35218 index 90832a9..419089a 100644
35219 --- a/drivers/pci/xen-pcifront.c
35220 +++ b/drivers/pci/xen-pcifront.c
35221 @@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn,
35222 struct pcifront_sd *sd = bus->sysdata;
35223 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35224
35225 + pax_track_stack();
35226 +
35227 if (verbose_request)
35228 dev_info(&pdev->xdev->dev,
35229 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
35230 @@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
35231 struct pcifront_sd *sd = bus->sysdata;
35232 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35233
35234 + pax_track_stack();
35235 +
35236 if (verbose_request)
35237 dev_info(&pdev->xdev->dev,
35238 "write dev=%04x:%02x:%02x.%01x - "
35239 @@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
35240 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35241 struct msi_desc *entry;
35242
35243 + pax_track_stack();
35244 +
35245 if (nvec > SH_INFO_MAX_VEC) {
35246 dev_err(&dev->dev, "too much vector for pci frontend: %x."
35247 " Increase SH_INFO_MAX_VEC.\n", nvec);
35248 @@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(struct pci_dev *dev)
35249 struct pcifront_sd *sd = dev->bus->sysdata;
35250 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35251
35252 + pax_track_stack();
35253 +
35254 err = do_pci_op(pdev, &op);
35255
35256 /* What should do for error ? */
35257 @@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struct pci_dev *dev, int vector[])
35258 struct pcifront_sd *sd = dev->bus->sysdata;
35259 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35260
35261 + pax_track_stack();
35262 +
35263 err = do_pci_op(pdev, &op);
35264 if (likely(!err)) {
35265 vector[0] = op.value;
35266 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
35267 index 7bd829f..a3237ad 100644
35268 --- a/drivers/platform/x86/thinkpad_acpi.c
35269 +++ b/drivers/platform/x86/thinkpad_acpi.c
35270 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
35271 return 0;
35272 }
35273
35274 -void static hotkey_mask_warn_incomplete_mask(void)
35275 +static void hotkey_mask_warn_incomplete_mask(void)
35276 {
35277 /* log only what the user can fix... */
35278 const u32 wantedmask = hotkey_driver_mask &
35279 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
35280 }
35281 }
35282
35283 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35284 - struct tp_nvram_state *newn,
35285 - const u32 event_mask)
35286 -{
35287 -
35288 #define TPACPI_COMPARE_KEY(__scancode, __member) \
35289 do { \
35290 if ((event_mask & (1 << __scancode)) && \
35291 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35292 tpacpi_hotkey_send_key(__scancode); \
35293 } while (0)
35294
35295 - void issue_volchange(const unsigned int oldvol,
35296 - const unsigned int newvol)
35297 - {
35298 - unsigned int i = oldvol;
35299 +static void issue_volchange(const unsigned int oldvol,
35300 + const unsigned int newvol,
35301 + const u32 event_mask)
35302 +{
35303 + unsigned int i = oldvol;
35304
35305 - while (i > newvol) {
35306 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35307 - i--;
35308 - }
35309 - while (i < newvol) {
35310 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35311 - i++;
35312 - }
35313 + while (i > newvol) {
35314 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35315 + i--;
35316 }
35317 + while (i < newvol) {
35318 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35319 + i++;
35320 + }
35321 +}
35322
35323 - void issue_brightnesschange(const unsigned int oldbrt,
35324 - const unsigned int newbrt)
35325 - {
35326 - unsigned int i = oldbrt;
35327 +static void issue_brightnesschange(const unsigned int oldbrt,
35328 + const unsigned int newbrt,
35329 + const u32 event_mask)
35330 +{
35331 + unsigned int i = oldbrt;
35332
35333 - while (i > newbrt) {
35334 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35335 - i--;
35336 - }
35337 - while (i < newbrt) {
35338 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35339 - i++;
35340 - }
35341 + while (i > newbrt) {
35342 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35343 + i--;
35344 + }
35345 + while (i < newbrt) {
35346 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35347 + i++;
35348 }
35349 +}
35350
35351 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35352 + struct tp_nvram_state *newn,
35353 + const u32 event_mask)
35354 +{
35355 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
35356 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
35357 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
35358 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35359 oldn->volume_level != newn->volume_level) {
35360 /* recently muted, or repeated mute keypress, or
35361 * multiple presses ending in mute */
35362 - issue_volchange(oldn->volume_level, newn->volume_level);
35363 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35364 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
35365 }
35366 } else {
35367 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35368 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35369 }
35370 if (oldn->volume_level != newn->volume_level) {
35371 - issue_volchange(oldn->volume_level, newn->volume_level);
35372 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35373 } else if (oldn->volume_toggle != newn->volume_toggle) {
35374 /* repeated vol up/down keypress at end of scale ? */
35375 if (newn->volume_level == 0)
35376 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35377 /* handle brightness */
35378 if (oldn->brightness_level != newn->brightness_level) {
35379 issue_brightnesschange(oldn->brightness_level,
35380 - newn->brightness_level);
35381 + newn->brightness_level,
35382 + event_mask);
35383 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
35384 /* repeated key presses that didn't change state */
35385 if (newn->brightness_level == 0)
35386 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35387 && !tp_features.bright_unkfw)
35388 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35389 }
35390 +}
35391
35392 #undef TPACPI_COMPARE_KEY
35393 #undef TPACPI_MAY_SEND_KEY
35394 -}
35395
35396 /*
35397 * Polling driver
35398 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
35399 index b859d16..5cc6b1a 100644
35400 --- a/drivers/pnp/pnpbios/bioscalls.c
35401 +++ b/drivers/pnp/pnpbios/bioscalls.c
35402 @@ -59,7 +59,7 @@ do { \
35403 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
35404 } while(0)
35405
35406 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
35407 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
35408 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
35409
35410 /*
35411 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35412
35413 cpu = get_cpu();
35414 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
35415 +
35416 + pax_open_kernel();
35417 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
35418 + pax_close_kernel();
35419
35420 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
35421 spin_lock_irqsave(&pnp_bios_lock, flags);
35422 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35423 :"memory");
35424 spin_unlock_irqrestore(&pnp_bios_lock, flags);
35425
35426 + pax_open_kernel();
35427 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
35428 + pax_close_kernel();
35429 +
35430 put_cpu();
35431
35432 /* If we get here and this is set then the PnP BIOS faulted on us. */
35433 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
35434 return status;
35435 }
35436
35437 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
35438 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
35439 {
35440 int i;
35441
35442 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35443 pnp_bios_callpoint.offset = header->fields.pm16offset;
35444 pnp_bios_callpoint.segment = PNP_CS16;
35445
35446 + pax_open_kernel();
35447 +
35448 for_each_possible_cpu(i) {
35449 struct desc_struct *gdt = get_cpu_gdt_table(i);
35450 if (!gdt)
35451 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35452 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
35453 (unsigned long)__va(header->fields.pm16dseg));
35454 }
35455 +
35456 + pax_close_kernel();
35457 }
35458 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
35459 index b0ecacb..7c9da2e 100644
35460 --- a/drivers/pnp/resource.c
35461 +++ b/drivers/pnp/resource.c
35462 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
35463 return 1;
35464
35465 /* check if the resource is valid */
35466 - if (*irq < 0 || *irq > 15)
35467 + if (*irq > 15)
35468 return 0;
35469
35470 /* check if the resource is reserved */
35471 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
35472 return 1;
35473
35474 /* check if the resource is valid */
35475 - if (*dma < 0 || *dma == 4 || *dma > 7)
35476 + if (*dma == 4 || *dma > 7)
35477 return 0;
35478
35479 /* check if the resource is reserved */
35480 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
35481 index bb16f5b..c751eef 100644
35482 --- a/drivers/power/bq27x00_battery.c
35483 +++ b/drivers/power/bq27x00_battery.c
35484 @@ -67,7 +67,7 @@
35485 struct bq27x00_device_info;
35486 struct bq27x00_access_methods {
35487 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
35488 -};
35489 +} __no_const;
35490
35491 enum bq27x00_chip { BQ27000, BQ27500 };
35492
35493 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
35494 index 33f5d9a..d957d3f 100644
35495 --- a/drivers/regulator/max8660.c
35496 +++ b/drivers/regulator/max8660.c
35497 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
35498 max8660->shadow_regs[MAX8660_OVER1] = 5;
35499 } else {
35500 /* Otherwise devices can be toggled via software */
35501 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
35502 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
35503 + pax_open_kernel();
35504 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
35505 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
35506 + pax_close_kernel();
35507 }
35508
35509 /*
35510 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
35511 index 3285d41..ab7c22a 100644
35512 --- a/drivers/regulator/mc13892-regulator.c
35513 +++ b/drivers/regulator/mc13892-regulator.c
35514 @@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
35515 }
35516 mc13xxx_unlock(mc13892);
35517
35518 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35519 + pax_open_kernel();
35520 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35521 = mc13892_vcam_set_mode;
35522 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35523 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35524 = mc13892_vcam_get_mode;
35525 + pax_close_kernel();
35526 for (i = 0; i < pdata->num_regulators; i++) {
35527 init_data = &pdata->regulators[i];
35528 priv->regulators[i] = regulator_register(
35529 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
35530 index cace6d3..f623fda 100644
35531 --- a/drivers/rtc/rtc-dev.c
35532 +++ b/drivers/rtc/rtc-dev.c
35533 @@ -14,6 +14,7 @@
35534 #include <linux/module.h>
35535 #include <linux/rtc.h>
35536 #include <linux/sched.h>
35537 +#include <linux/grsecurity.h>
35538 #include "rtc-core.h"
35539
35540 static dev_t rtc_devt;
35541 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
35542 if (copy_from_user(&tm, uarg, sizeof(tm)))
35543 return -EFAULT;
35544
35545 + gr_log_timechange();
35546 +
35547 return rtc_set_time(rtc, &tm);
35548
35549 case RTC_PIE_ON:
35550 diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
35551 index f66c33b..7ae5823 100644
35552 --- a/drivers/scsi/BusLogic.c
35553 +++ b/drivers/scsi/BusLogic.c
35554 @@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
35555 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
35556 *PrototypeHostAdapter)
35557 {
35558 + pax_track_stack();
35559 +
35560 /*
35561 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
35562 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
35563 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
35564 index ffb5878..e6d785c 100644
35565 --- a/drivers/scsi/aacraid/aacraid.h
35566 +++ b/drivers/scsi/aacraid/aacraid.h
35567 @@ -492,7 +492,7 @@ struct adapter_ops
35568 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
35569 /* Administrative operations */
35570 int (*adapter_comm)(struct aac_dev * dev, int comm);
35571 -};
35572 +} __no_const;
35573
35574 /*
35575 * Define which interrupt handler needs to be installed
35576 diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
35577 index 8a0b330..b4286de 100644
35578 --- a/drivers/scsi/aacraid/commctrl.c
35579 +++ b/drivers/scsi/aacraid/commctrl.c
35580 @@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
35581 u32 actual_fibsize64, actual_fibsize = 0;
35582 int i;
35583
35584 + pax_track_stack();
35585
35586 if (dev->in_reset) {
35587 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
35588 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
35589 index c7b6fed..4db0569 100644
35590 --- a/drivers/scsi/aacraid/linit.c
35591 +++ b/drivers/scsi/aacraid/linit.c
35592 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
35593 #elif defined(__devinitconst)
35594 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35595 #else
35596 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
35597 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35598 #endif
35599 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
35600 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
35601 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
35602 index d5ff142..49c0ebb 100644
35603 --- a/drivers/scsi/aic94xx/aic94xx_init.c
35604 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
35605 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
35606 .lldd_control_phy = asd_control_phy,
35607 };
35608
35609 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
35610 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
35611 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
35612 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
35613 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
35614 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
35615 index a796de9..1ef20e1 100644
35616 --- a/drivers/scsi/bfa/bfa.h
35617 +++ b/drivers/scsi/bfa/bfa.h
35618 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
35619 u32 *end);
35620 int cpe_vec_q0;
35621 int rme_vec_q0;
35622 -};
35623 +} __no_const;
35624 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
35625
35626 struct bfa_faa_cbfn_s {
35627 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
35628 index e07bd47..dbd260a 100644
35629 --- a/drivers/scsi/bfa/bfa_fcpim.c
35630 +++ b/drivers/scsi/bfa/bfa_fcpim.c
35631 @@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
35632 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
35633 {
35634 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
35635 - struct bfa_itn_s *itn;
35636 + bfa_itn_s_no_const *itn;
35637
35638 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
35639 itn->isr = isr;
35640 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
35641 index 1080bcb..a3b39e3 100644
35642 --- a/drivers/scsi/bfa/bfa_fcpim.h
35643 +++ b/drivers/scsi/bfa/bfa_fcpim.h
35644 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
35645 struct bfa_itn_s {
35646 bfa_isr_func_t isr;
35647 };
35648 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
35649
35650 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
35651 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
35652 @@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
35653 struct list_head iotag_tio_free_q; /* free IO resources */
35654 struct list_head iotag_unused_q; /* unused IO resources*/
35655 struct bfa_iotag_s *iotag_arr;
35656 - struct bfa_itn_s *itn_arr;
35657 + bfa_itn_s_no_const *itn_arr;
35658 int num_ioim_reqs;
35659 int num_fwtio_reqs;
35660 int num_itns;
35661 diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
35662 index d4f951f..197c350 100644
35663 --- a/drivers/scsi/bfa/bfa_fcs_lport.c
35664 +++ b/drivers/scsi/bfa/bfa_fcs_lport.c
35665 @@ -1700,6 +1700,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
35666 u16 len, count;
35667 u16 templen;
35668
35669 + pax_track_stack();
35670 +
35671 /*
35672 * get hba attributes
35673 */
35674 @@ -1977,6 +1979,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
35675 u8 count = 0;
35676 u16 templen;
35677
35678 + pax_track_stack();
35679 +
35680 /*
35681 * get port attributes
35682 */
35683 diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
35684 index 52628d5..f89d033 100644
35685 --- a/drivers/scsi/bfa/bfa_fcs_rport.c
35686 +++ b/drivers/scsi/bfa/bfa_fcs_rport.c
35687 @@ -1871,6 +1871,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport,
35688 struct fc_rpsc_speed_info_s speeds;
35689 struct bfa_port_attr_s pport_attr;
35690
35691 + pax_track_stack();
35692 +
35693 bfa_trc(port->fcs, rx_fchs->s_id);
35694 bfa_trc(port->fcs, rx_fchs->d_id);
35695
35696 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
35697 index 546d46b..642fa5b 100644
35698 --- a/drivers/scsi/bfa/bfa_ioc.h
35699 +++ b/drivers/scsi/bfa/bfa_ioc.h
35700 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
35701 bfa_ioc_disable_cbfn_t disable_cbfn;
35702 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
35703 bfa_ioc_reset_cbfn_t reset_cbfn;
35704 -};
35705 +} __no_const;
35706
35707 /*
35708 * IOC event notification mechanism.
35709 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
35710 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
35711 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
35712 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
35713 -};
35714 +} __no_const;
35715
35716 /*
35717 * Queue element to wait for room in request queue. FIFO order is
35718 diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
35719 index 66fb725..0fe05ab 100644
35720 --- a/drivers/scsi/bfa/bfad.c
35721 +++ b/drivers/scsi/bfa/bfad.c
35722 @@ -1019,6 +1019,8 @@ bfad_start_ops(struct bfad_s *bfad) {
35723 struct bfad_vport_s *vport, *vport_new;
35724 struct bfa_fcs_driver_info_s driver_info;
35725
35726 + pax_track_stack();
35727 +
35728 /* Limit min/max. xfer size to [64k-32MB] */
35729 if (max_xfer_size < BFAD_MIN_SECTORS >> 1)
35730 max_xfer_size = BFAD_MIN_SECTORS >> 1;
35731 diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
35732 index b4f6c9a..0eb1938 100644
35733 --- a/drivers/scsi/dpt_i2o.c
35734 +++ b/drivers/scsi/dpt_i2o.c
35735 @@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
35736 dma_addr_t addr;
35737 ulong flags = 0;
35738
35739 + pax_track_stack();
35740 +
35741 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
35742 // get user msg size in u32s
35743 if(get_user(size, &user_msg[0])){
35744 @@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
35745 s32 rcode;
35746 dma_addr_t addr;
35747
35748 + pax_track_stack();
35749 +
35750 memset(msg, 0 , sizeof(msg));
35751 len = scsi_bufflen(cmd);
35752 direction = 0x00000000;
35753 diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
35754 index 94de889..ca4f0cf 100644
35755 --- a/drivers/scsi/eata.c
35756 +++ b/drivers/scsi/eata.c
35757 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
35758 struct hostdata *ha;
35759 char name[16];
35760
35761 + pax_track_stack();
35762 +
35763 sprintf(name, "%s%d", driver_name, j);
35764
35765 if (!request_region(port_base, REGION_SIZE, driver_name)) {
35766 diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
35767 index c74c4b8..c41ca3f 100644
35768 --- a/drivers/scsi/fcoe/fcoe_ctlr.c
35769 +++ b/drivers/scsi/fcoe/fcoe_ctlr.c
35770 @@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
35771 } buf;
35772 int rc;
35773
35774 + pax_track_stack();
35775 +
35776 fiph = (struct fip_header *)skb->data;
35777 sub = fiph->fip_subcode;
35778
35779 diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
35780 index 3242bca..45a83e7 100644
35781 --- a/drivers/scsi/gdth.c
35782 +++ b/drivers/scsi/gdth.c
35783 @@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
35784 unsigned long flags;
35785 gdth_ha_str *ha;
35786
35787 + pax_track_stack();
35788 +
35789 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
35790 return -EFAULT;
35791 ha = gdth_find_ha(ldrv.ionode);
35792 @@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
35793 gdth_ha_str *ha;
35794 int rval;
35795
35796 + pax_track_stack();
35797 +
35798 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
35799 res.number >= MAX_HDRIVES)
35800 return -EFAULT;
35801 @@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg, char *cmnd)
35802 gdth_ha_str *ha;
35803 int rval;
35804
35805 + pax_track_stack();
35806 +
35807 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
35808 return -EFAULT;
35809 ha = gdth_find_ha(gen.ionode);
35810 @@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
35811 int i;
35812 gdth_cmd_str gdtcmd;
35813 char cmnd[MAX_COMMAND_SIZE];
35814 +
35815 + pax_track_stack();
35816 +
35817 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
35818
35819 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
35820 diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
35821 index 6527543..81e4fe2 100644
35822 --- a/drivers/scsi/gdth_proc.c
35823 +++ b/drivers/scsi/gdth_proc.c
35824 @@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
35825 u64 paddr;
35826
35827 char cmnd[MAX_COMMAND_SIZE];
35828 +
35829 + pax_track_stack();
35830 +
35831 memset(cmnd, 0xff, 12);
35832 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
35833
35834 @@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
35835 gdth_hget_str *phg;
35836 char cmnd[MAX_COMMAND_SIZE];
35837
35838 + pax_track_stack();
35839 +
35840 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
35841 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
35842 if (!gdtcmd || !estr)
35843 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
35844 index 351dc0b..951dc32 100644
35845 --- a/drivers/scsi/hosts.c
35846 +++ b/drivers/scsi/hosts.c
35847 @@ -42,7 +42,7 @@
35848 #include "scsi_logging.h"
35849
35850
35851 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
35852 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
35853
35854
35855 static void scsi_host_cls_release(struct device *dev)
35856 @@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
35857 * subtract one because we increment first then return, but we need to
35858 * know what the next host number was before increment
35859 */
35860 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
35861 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
35862 shost->dma_channel = 0xff;
35863
35864 /* These three are default values which can be overridden */
35865 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
35866 index 418ce83..7ee1225 100644
35867 --- a/drivers/scsi/hpsa.c
35868 +++ b/drivers/scsi/hpsa.c
35869 @@ -499,7 +499,7 @@ static inline u32 next_command(struct ctlr_info *h)
35870 u32 a;
35871
35872 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
35873 - return h->access.command_completed(h);
35874 + return h->access->command_completed(h);
35875
35876 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
35877 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
35878 @@ -2956,7 +2956,7 @@ static void start_io(struct ctlr_info *h)
35879 while (!list_empty(&h->reqQ)) {
35880 c = list_entry(h->reqQ.next, struct CommandList, list);
35881 /* can't do anything if fifo is full */
35882 - if ((h->access.fifo_full(h))) {
35883 + if ((h->access->fifo_full(h))) {
35884 dev_warn(&h->pdev->dev, "fifo full\n");
35885 break;
35886 }
35887 @@ -2966,7 +2966,7 @@ static void start_io(struct ctlr_info *h)
35888 h->Qdepth--;
35889
35890 /* Tell the controller execute command */
35891 - h->access.submit_command(h, c);
35892 + h->access->submit_command(h, c);
35893
35894 /* Put job onto the completed Q */
35895 addQ(&h->cmpQ, c);
35896 @@ -2975,17 +2975,17 @@ static void start_io(struct ctlr_info *h)
35897
35898 static inline unsigned long get_next_completion(struct ctlr_info *h)
35899 {
35900 - return h->access.command_completed(h);
35901 + return h->access->command_completed(h);
35902 }
35903
35904 static inline bool interrupt_pending(struct ctlr_info *h)
35905 {
35906 - return h->access.intr_pending(h);
35907 + return h->access->intr_pending(h);
35908 }
35909
35910 static inline long interrupt_not_for_us(struct ctlr_info *h)
35911 {
35912 - return (h->access.intr_pending(h) == 0) ||
35913 + return (h->access->intr_pending(h) == 0) ||
35914 (h->interrupts_enabled == 0);
35915 }
35916
35917 @@ -3882,7 +3882,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
35918 if (prod_index < 0)
35919 return -ENODEV;
35920 h->product_name = products[prod_index].product_name;
35921 - h->access = *(products[prod_index].access);
35922 + h->access = products[prod_index].access;
35923
35924 if (hpsa_board_disabled(h->pdev)) {
35925 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
35926 @@ -4163,7 +4163,7 @@ reinit_after_soft_reset:
35927 }
35928
35929 /* make sure the board interrupts are off */
35930 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35931 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35932
35933 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
35934 goto clean2;
35935 @@ -4197,7 +4197,7 @@ reinit_after_soft_reset:
35936 * fake ones to scoop up any residual completions.
35937 */
35938 spin_lock_irqsave(&h->lock, flags);
35939 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35940 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35941 spin_unlock_irqrestore(&h->lock, flags);
35942 free_irq(h->intr[h->intr_mode], h);
35943 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
35944 @@ -4216,9 +4216,9 @@ reinit_after_soft_reset:
35945 dev_info(&h->pdev->dev, "Board READY.\n");
35946 dev_info(&h->pdev->dev,
35947 "Waiting for stale completions to drain.\n");
35948 - h->access.set_intr_mask(h, HPSA_INTR_ON);
35949 + h->access->set_intr_mask(h, HPSA_INTR_ON);
35950 msleep(10000);
35951 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35952 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35953
35954 rc = controller_reset_failed(h->cfgtable);
35955 if (rc)
35956 @@ -4239,7 +4239,7 @@ reinit_after_soft_reset:
35957 }
35958
35959 /* Turn the interrupts on so we can service requests */
35960 - h->access.set_intr_mask(h, HPSA_INTR_ON);
35961 + h->access->set_intr_mask(h, HPSA_INTR_ON);
35962
35963 hpsa_hba_inquiry(h);
35964 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
35965 @@ -4292,7 +4292,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
35966 * To write all data in the battery backed cache to disks
35967 */
35968 hpsa_flush_cache(h);
35969 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35970 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35971 free_irq(h->intr[h->intr_mode], h);
35972 #ifdef CONFIG_PCI_MSI
35973 if (h->msix_vector)
35974 @@ -4455,7 +4455,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
35975 return;
35976 }
35977 /* Change the access methods to the performant access methods */
35978 - h->access = SA5_performant_access;
35979 + h->access = &SA5_performant_access;
35980 h->transMethod = CFGTBL_Trans_Performant;
35981 }
35982
35983 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
35984 index 7f53cea..a8c7188 100644
35985 --- a/drivers/scsi/hpsa.h
35986 +++ b/drivers/scsi/hpsa.h
35987 @@ -73,7 +73,7 @@ struct ctlr_info {
35988 unsigned int msix_vector;
35989 unsigned int msi_vector;
35990 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
35991 - struct access_method access;
35992 + struct access_method *access;
35993
35994 /* queue and queue Info */
35995 struct list_head reqQ;
35996 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
35997 index f2df059..a3a9930 100644
35998 --- a/drivers/scsi/ips.h
35999 +++ b/drivers/scsi/ips.h
36000 @@ -1027,7 +1027,7 @@ typedef struct {
36001 int (*intr)(struct ips_ha *);
36002 void (*enableint)(struct ips_ha *);
36003 uint32_t (*statupd)(struct ips_ha *);
36004 -} ips_hw_func_t;
36005 +} __no_const ips_hw_func_t;
36006
36007 typedef struct ips_ha {
36008 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36009 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
36010 index d261e98..1e00f35 100644
36011 --- a/drivers/scsi/libfc/fc_exch.c
36012 +++ b/drivers/scsi/libfc/fc_exch.c
36013 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
36014 * all together if not used XXX
36015 */
36016 struct {
36017 - atomic_t no_free_exch;
36018 - atomic_t no_free_exch_xid;
36019 - atomic_t xid_not_found;
36020 - atomic_t xid_busy;
36021 - atomic_t seq_not_found;
36022 - atomic_t non_bls_resp;
36023 + atomic_unchecked_t no_free_exch;
36024 + atomic_unchecked_t no_free_exch_xid;
36025 + atomic_unchecked_t xid_not_found;
36026 + atomic_unchecked_t xid_busy;
36027 + atomic_unchecked_t seq_not_found;
36028 + atomic_unchecked_t non_bls_resp;
36029 } stats;
36030 };
36031
36032 @@ -718,7 +718,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
36033 /* allocate memory for exchange */
36034 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36035 if (!ep) {
36036 - atomic_inc(&mp->stats.no_free_exch);
36037 + atomic_inc_unchecked(&mp->stats.no_free_exch);
36038 goto out;
36039 }
36040 memset(ep, 0, sizeof(*ep));
36041 @@ -779,7 +779,7 @@ out:
36042 return ep;
36043 err:
36044 spin_unlock_bh(&pool->lock);
36045 - atomic_inc(&mp->stats.no_free_exch_xid);
36046 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36047 mempool_free(ep, mp->ep_pool);
36048 return NULL;
36049 }
36050 @@ -922,7 +922,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36051 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36052 ep = fc_exch_find(mp, xid);
36053 if (!ep) {
36054 - atomic_inc(&mp->stats.xid_not_found);
36055 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36056 reject = FC_RJT_OX_ID;
36057 goto out;
36058 }
36059 @@ -952,7 +952,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36060 ep = fc_exch_find(mp, xid);
36061 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36062 if (ep) {
36063 - atomic_inc(&mp->stats.xid_busy);
36064 + atomic_inc_unchecked(&mp->stats.xid_busy);
36065 reject = FC_RJT_RX_ID;
36066 goto rel;
36067 }
36068 @@ -963,7 +963,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36069 }
36070 xid = ep->xid; /* get our XID */
36071 } else if (!ep) {
36072 - atomic_inc(&mp->stats.xid_not_found);
36073 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36074 reject = FC_RJT_RX_ID; /* XID not found */
36075 goto out;
36076 }
36077 @@ -980,7 +980,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36078 } else {
36079 sp = &ep->seq;
36080 if (sp->id != fh->fh_seq_id) {
36081 - atomic_inc(&mp->stats.seq_not_found);
36082 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36083 if (f_ctl & FC_FC_END_SEQ) {
36084 /*
36085 * Update sequence_id based on incoming last
36086 @@ -1430,22 +1430,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36087
36088 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36089 if (!ep) {
36090 - atomic_inc(&mp->stats.xid_not_found);
36091 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36092 goto out;
36093 }
36094 if (ep->esb_stat & ESB_ST_COMPLETE) {
36095 - atomic_inc(&mp->stats.xid_not_found);
36096 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36097 goto rel;
36098 }
36099 if (ep->rxid == FC_XID_UNKNOWN)
36100 ep->rxid = ntohs(fh->fh_rx_id);
36101 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36102 - atomic_inc(&mp->stats.xid_not_found);
36103 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36104 goto rel;
36105 }
36106 if (ep->did != ntoh24(fh->fh_s_id) &&
36107 ep->did != FC_FID_FLOGI) {
36108 - atomic_inc(&mp->stats.xid_not_found);
36109 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36110 goto rel;
36111 }
36112 sof = fr_sof(fp);
36113 @@ -1454,7 +1454,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36114 sp->ssb_stat |= SSB_ST_RESP;
36115 sp->id = fh->fh_seq_id;
36116 } else if (sp->id != fh->fh_seq_id) {
36117 - atomic_inc(&mp->stats.seq_not_found);
36118 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36119 goto rel;
36120 }
36121
36122 @@ -1518,9 +1518,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36123 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36124
36125 if (!sp)
36126 - atomic_inc(&mp->stats.xid_not_found);
36127 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36128 else
36129 - atomic_inc(&mp->stats.non_bls_resp);
36130 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
36131
36132 fc_frame_free(fp);
36133 }
36134 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
36135 index db9238f..4378ed2 100644
36136 --- a/drivers/scsi/libsas/sas_ata.c
36137 +++ b/drivers/scsi/libsas/sas_ata.c
36138 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
36139 .postreset = ata_std_postreset,
36140 .error_handler = ata_std_error_handler,
36141 .post_internal_cmd = sas_ata_post_internal,
36142 - .qc_defer = ata_std_qc_defer,
36143 + .qc_defer = ata_std_qc_defer,
36144 .qc_prep = ata_noop_qc_prep,
36145 .qc_issue = sas_ata_qc_issue,
36146 .qc_fill_rtf = sas_ata_qc_fill_rtf,
36147 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
36148 index c088a36..01c73b0 100644
36149 --- a/drivers/scsi/lpfc/lpfc.h
36150 +++ b/drivers/scsi/lpfc/lpfc.h
36151 @@ -425,7 +425,7 @@ struct lpfc_vport {
36152 struct dentry *debug_nodelist;
36153 struct dentry *vport_debugfs_root;
36154 struct lpfc_debugfs_trc *disc_trc;
36155 - atomic_t disc_trc_cnt;
36156 + atomic_unchecked_t disc_trc_cnt;
36157 #endif
36158 uint8_t stat_data_enabled;
36159 uint8_t stat_data_blocked;
36160 @@ -835,8 +835,8 @@ struct lpfc_hba {
36161 struct timer_list fabric_block_timer;
36162 unsigned long bit_flags;
36163 #define FABRIC_COMANDS_BLOCKED 0
36164 - atomic_t num_rsrc_err;
36165 - atomic_t num_cmd_success;
36166 + atomic_unchecked_t num_rsrc_err;
36167 + atomic_unchecked_t num_cmd_success;
36168 unsigned long last_rsrc_error_time;
36169 unsigned long last_ramp_down_time;
36170 unsigned long last_ramp_up_time;
36171 @@ -850,7 +850,7 @@ struct lpfc_hba {
36172 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
36173 struct dentry *debug_slow_ring_trc;
36174 struct lpfc_debugfs_trc *slow_ring_trc;
36175 - atomic_t slow_ring_trc_cnt;
36176 + atomic_unchecked_t slow_ring_trc_cnt;
36177 /* iDiag debugfs sub-directory */
36178 struct dentry *idiag_root;
36179 struct dentry *idiag_pci_cfg;
36180 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
36181 index a0424dd..2499b6b 100644
36182 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
36183 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
36184 @@ -105,7 +105,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
36185
36186 #include <linux/debugfs.h>
36187
36188 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36189 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36190 static unsigned long lpfc_debugfs_start_time = 0L;
36191
36192 /* iDiag */
36193 @@ -146,7 +146,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
36194 lpfc_debugfs_enable = 0;
36195
36196 len = 0;
36197 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36198 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36199 (lpfc_debugfs_max_disc_trc - 1);
36200 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36201 dtp = vport->disc_trc + i;
36202 @@ -212,7 +212,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
36203 lpfc_debugfs_enable = 0;
36204
36205 len = 0;
36206 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36207 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36208 (lpfc_debugfs_max_slow_ring_trc - 1);
36209 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36210 dtp = phba->slow_ring_trc + i;
36211 @@ -635,14 +635,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
36212 !vport || !vport->disc_trc)
36213 return;
36214
36215 - index = atomic_inc_return(&vport->disc_trc_cnt) &
36216 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36217 (lpfc_debugfs_max_disc_trc - 1);
36218 dtp = vport->disc_trc + index;
36219 dtp->fmt = fmt;
36220 dtp->data1 = data1;
36221 dtp->data2 = data2;
36222 dtp->data3 = data3;
36223 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36224 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36225 dtp->jif = jiffies;
36226 #endif
36227 return;
36228 @@ -673,14 +673,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
36229 !phba || !phba->slow_ring_trc)
36230 return;
36231
36232 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36233 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36234 (lpfc_debugfs_max_slow_ring_trc - 1);
36235 dtp = phba->slow_ring_trc + index;
36236 dtp->fmt = fmt;
36237 dtp->data1 = data1;
36238 dtp->data2 = data2;
36239 dtp->data3 = data3;
36240 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36241 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36242 dtp->jif = jiffies;
36243 #endif
36244 return;
36245 @@ -3828,7 +3828,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36246 "slow_ring buffer\n");
36247 goto debug_failed;
36248 }
36249 - atomic_set(&phba->slow_ring_trc_cnt, 0);
36250 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36251 memset(phba->slow_ring_trc, 0,
36252 (sizeof(struct lpfc_debugfs_trc) *
36253 lpfc_debugfs_max_slow_ring_trc));
36254 @@ -3874,7 +3874,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36255 "buffer\n");
36256 goto debug_failed;
36257 }
36258 - atomic_set(&vport->disc_trc_cnt, 0);
36259 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36260
36261 snprintf(name, sizeof(name), "discovery_trace");
36262 vport->debug_disc_trc =
36263 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
36264 index a3c8200..31e562e 100644
36265 --- a/drivers/scsi/lpfc/lpfc_init.c
36266 +++ b/drivers/scsi/lpfc/lpfc_init.c
36267 @@ -9969,8 +9969,10 @@ lpfc_init(void)
36268 printk(LPFC_COPYRIGHT "\n");
36269
36270 if (lpfc_enable_npiv) {
36271 - lpfc_transport_functions.vport_create = lpfc_vport_create;
36272 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36273 + pax_open_kernel();
36274 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36275 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36276 + pax_close_kernel();
36277 }
36278 lpfc_transport_template =
36279 fc_attach_transport(&lpfc_transport_functions);
36280 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
36281 index eadd241..26c8e0f 100644
36282 --- a/drivers/scsi/lpfc/lpfc_scsi.c
36283 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
36284 @@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
36285 uint32_t evt_posted;
36286
36287 spin_lock_irqsave(&phba->hbalock, flags);
36288 - atomic_inc(&phba->num_rsrc_err);
36289 + atomic_inc_unchecked(&phba->num_rsrc_err);
36290 phba->last_rsrc_error_time = jiffies;
36291
36292 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36293 @@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
36294 unsigned long flags;
36295 struct lpfc_hba *phba = vport->phba;
36296 uint32_t evt_posted;
36297 - atomic_inc(&phba->num_cmd_success);
36298 + atomic_inc_unchecked(&phba->num_cmd_success);
36299
36300 if (vport->cfg_lun_queue_depth <= queue_depth)
36301 return;
36302 @@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36303 unsigned long num_rsrc_err, num_cmd_success;
36304 int i;
36305
36306 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36307 - num_cmd_success = atomic_read(&phba->num_cmd_success);
36308 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36309 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36310
36311 vports = lpfc_create_vport_work_array(phba);
36312 if (vports != NULL)
36313 @@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36314 }
36315 }
36316 lpfc_destroy_vport_work_array(phba, vports);
36317 - atomic_set(&phba->num_rsrc_err, 0);
36318 - atomic_set(&phba->num_cmd_success, 0);
36319 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36320 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36321 }
36322
36323 /**
36324 @@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
36325 }
36326 }
36327 lpfc_destroy_vport_work_array(phba, vports);
36328 - atomic_set(&phba->num_rsrc_err, 0);
36329 - atomic_set(&phba->num_cmd_success, 0);
36330 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36331 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36332 }
36333
36334 /**
36335 diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
36336 index 2e6619e..fa64494 100644
36337 --- a/drivers/scsi/megaraid/megaraid_mbox.c
36338 +++ b/drivers/scsi/megaraid/megaraid_mbox.c
36339 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
36340 int rval;
36341 int i;
36342
36343 + pax_track_stack();
36344 +
36345 // Allocate memory for the base list of scb for management module.
36346 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
36347
36348 diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
36349 index 86afb13f..c912398 100644
36350 --- a/drivers/scsi/osd/osd_initiator.c
36351 +++ b/drivers/scsi/osd/osd_initiator.c
36352 @@ -97,6 +97,8 @@ static int _osd_get_print_system_info(struct osd_dev *od,
36353 int nelem = ARRAY_SIZE(get_attrs), a = 0;
36354 int ret;
36355
36356 + pax_track_stack();
36357 +
36358 or = osd_start_request(od, GFP_KERNEL);
36359 if (!or)
36360 return -ENOMEM;
36361 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
36362 index d079f9a..d26072c 100644
36363 --- a/drivers/scsi/pmcraid.c
36364 +++ b/drivers/scsi/pmcraid.c
36365 @@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
36366 res->scsi_dev = scsi_dev;
36367 scsi_dev->hostdata = res;
36368 res->change_detected = 0;
36369 - atomic_set(&res->read_failures, 0);
36370 - atomic_set(&res->write_failures, 0);
36371 + atomic_set_unchecked(&res->read_failures, 0);
36372 + atomic_set_unchecked(&res->write_failures, 0);
36373 rc = 0;
36374 }
36375 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36376 @@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
36377
36378 /* If this was a SCSI read/write command keep count of errors */
36379 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36380 - atomic_inc(&res->read_failures);
36381 + atomic_inc_unchecked(&res->read_failures);
36382 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36383 - atomic_inc(&res->write_failures);
36384 + atomic_inc_unchecked(&res->write_failures);
36385
36386 if (!RES_IS_GSCSI(res->cfg_entry) &&
36387 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36388 @@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
36389 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36390 * hrrq_id assigned here in queuecommand
36391 */
36392 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36393 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36394 pinstance->num_hrrq;
36395 cmd->cmd_done = pmcraid_io_done;
36396
36397 @@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
36398 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36399 * hrrq_id assigned here in queuecommand
36400 */
36401 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36402 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36403 pinstance->num_hrrq;
36404
36405 if (request_size) {
36406 @@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
36407
36408 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36409 /* add resources only after host is added into system */
36410 - if (!atomic_read(&pinstance->expose_resources))
36411 + if (!atomic_read_unchecked(&pinstance->expose_resources))
36412 return;
36413
36414 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
36415 @@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instance(
36416 init_waitqueue_head(&pinstance->reset_wait_q);
36417
36418 atomic_set(&pinstance->outstanding_cmds, 0);
36419 - atomic_set(&pinstance->last_message_id, 0);
36420 - atomic_set(&pinstance->expose_resources, 0);
36421 + atomic_set_unchecked(&pinstance->last_message_id, 0);
36422 + atomic_set_unchecked(&pinstance->expose_resources, 0);
36423
36424 INIT_LIST_HEAD(&pinstance->free_res_q);
36425 INIT_LIST_HEAD(&pinstance->used_res_q);
36426 @@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
36427 /* Schedule worker thread to handle CCN and take care of adding and
36428 * removing devices to OS
36429 */
36430 - atomic_set(&pinstance->expose_resources, 1);
36431 + atomic_set_unchecked(&pinstance->expose_resources, 1);
36432 schedule_work(&pinstance->worker_q);
36433 return rc;
36434
36435 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
36436 index f920baf..4417389 100644
36437 --- a/drivers/scsi/pmcraid.h
36438 +++ b/drivers/scsi/pmcraid.h
36439 @@ -749,7 +749,7 @@ struct pmcraid_instance {
36440 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
36441
36442 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
36443 - atomic_t last_message_id;
36444 + atomic_unchecked_t last_message_id;
36445
36446 /* configuration table */
36447 struct pmcraid_config_table *cfg_table;
36448 @@ -778,7 +778,7 @@ struct pmcraid_instance {
36449 atomic_t outstanding_cmds;
36450
36451 /* should add/delete resources to mid-layer now ?*/
36452 - atomic_t expose_resources;
36453 + atomic_unchecked_t expose_resources;
36454
36455
36456
36457 @@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
36458 struct pmcraid_config_table_entry_ext cfg_entry_ext;
36459 };
36460 struct scsi_device *scsi_dev; /* Link scsi_device structure */
36461 - atomic_t read_failures; /* count of failed READ commands */
36462 - atomic_t write_failures; /* count of failed WRITE commands */
36463 + atomic_unchecked_t read_failures; /* count of failed READ commands */
36464 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
36465
36466 /* To indicate add/delete/modify during CCN */
36467 u8 change_detected;
36468 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
36469 index a03eaf4..a6b3fd9 100644
36470 --- a/drivers/scsi/qla2xxx/qla_def.h
36471 +++ b/drivers/scsi/qla2xxx/qla_def.h
36472 @@ -2244,7 +2244,7 @@ struct isp_operations {
36473 int (*get_flash_version) (struct scsi_qla_host *, void *);
36474 int (*start_scsi) (srb_t *);
36475 int (*abort_isp) (struct scsi_qla_host *);
36476 -};
36477 +} __no_const;
36478
36479 /* MSI-X Support *************************************************************/
36480
36481 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
36482 index 473c5c8..4e2f24a 100644
36483 --- a/drivers/scsi/qla4xxx/ql4_def.h
36484 +++ b/drivers/scsi/qla4xxx/ql4_def.h
36485 @@ -256,7 +256,7 @@ struct ddb_entry {
36486 atomic_t retry_relogin_timer; /* Min Time between relogins
36487 * (4000 only) */
36488 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
36489 - atomic_t relogin_retry_count; /* Num of times relogin has been
36490 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
36491 * retried */
36492
36493 uint16_t port;
36494 diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
36495 index 42ed5db..0262f9e 100644
36496 --- a/drivers/scsi/qla4xxx/ql4_init.c
36497 +++ b/drivers/scsi/qla4xxx/ql4_init.c
36498 @@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
36499 ddb_entry->fw_ddb_index = fw_ddb_index;
36500 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
36501 atomic_set(&ddb_entry->relogin_timer, 0);
36502 - atomic_set(&ddb_entry->relogin_retry_count, 0);
36503 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36504 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
36505 list_add_tail(&ddb_entry->list, &ha->ddb_list);
36506 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
36507 @@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
36508 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
36509 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
36510 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
36511 - atomic_set(&ddb_entry->relogin_retry_count, 0);
36512 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36513 atomic_set(&ddb_entry->relogin_timer, 0);
36514 clear_bit(DF_RELOGIN, &ddb_entry->flags);
36515 iscsi_unblock_session(ddb_entry->sess);
36516 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
36517 index f2364ec..44c42b1 100644
36518 --- a/drivers/scsi/qla4xxx/ql4_os.c
36519 +++ b/drivers/scsi/qla4xxx/ql4_os.c
36520 @@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
36521 ddb_entry->fw_ddb_device_state ==
36522 DDB_DS_SESSION_FAILED) {
36523 /* Reset retry relogin timer */
36524 - atomic_inc(&ddb_entry->relogin_retry_count);
36525 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
36526 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
36527 " timed out-retrying"
36528 " relogin (%d)\n",
36529 ha->host_no,
36530 ddb_entry->fw_ddb_index,
36531 - atomic_read(&ddb_entry->
36532 + atomic_read_unchecked(&ddb_entry->
36533 relogin_retry_count))
36534 );
36535 start_dpc++;
36536 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
36537 index 2aeb2e9..46e3925 100644
36538 --- a/drivers/scsi/scsi.c
36539 +++ b/drivers/scsi/scsi.c
36540 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
36541 unsigned long timeout;
36542 int rtn = 0;
36543
36544 - atomic_inc(&cmd->device->iorequest_cnt);
36545 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36546
36547 /* check if the device is still usable */
36548 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
36549 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
36550 index 6888b2c..45befa1 100644
36551 --- a/drivers/scsi/scsi_debug.c
36552 +++ b/drivers/scsi/scsi_debug.c
36553 @@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
36554 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
36555 unsigned char *cmd = (unsigned char *)scp->cmnd;
36556
36557 + pax_track_stack();
36558 +
36559 if ((errsts = check_readiness(scp, 1, devip)))
36560 return errsts;
36561 memset(arr, 0, sizeof(arr));
36562 @@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
36563 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
36564 unsigned char *cmd = (unsigned char *)scp->cmnd;
36565
36566 + pax_track_stack();
36567 +
36568 if ((errsts = check_readiness(scp, 1, devip)))
36569 return errsts;
36570 memset(arr, 0, sizeof(arr));
36571 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
36572 index 6d219e4..eb3ded3 100644
36573 --- a/drivers/scsi/scsi_lib.c
36574 +++ b/drivers/scsi/scsi_lib.c
36575 @@ -1415,7 +1415,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
36576 shost = sdev->host;
36577 scsi_init_cmd_errh(cmd);
36578 cmd->result = DID_NO_CONNECT << 16;
36579 - atomic_inc(&cmd->device->iorequest_cnt);
36580 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36581
36582 /*
36583 * SCSI request completion path will do scsi_device_unbusy(),
36584 @@ -1441,9 +1441,9 @@ static void scsi_softirq_done(struct request *rq)
36585
36586 INIT_LIST_HEAD(&cmd->eh_entry);
36587
36588 - atomic_inc(&cmd->device->iodone_cnt);
36589 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
36590 if (cmd->result)
36591 - atomic_inc(&cmd->device->ioerr_cnt);
36592 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
36593
36594 disposition = scsi_decide_disposition(cmd);
36595 if (disposition != SUCCESS &&
36596 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
36597 index e0bd3f7..816b8a6 100644
36598 --- a/drivers/scsi/scsi_sysfs.c
36599 +++ b/drivers/scsi/scsi_sysfs.c
36600 @@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
36601 char *buf) \
36602 { \
36603 struct scsi_device *sdev = to_scsi_device(dev); \
36604 - unsigned long long count = atomic_read(&sdev->field); \
36605 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
36606 return snprintf(buf, 20, "0x%llx\n", count); \
36607 } \
36608 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
36609 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
36610 index 84a1fdf..693b0d6 100644
36611 --- a/drivers/scsi/scsi_tgt_lib.c
36612 +++ b/drivers/scsi/scsi_tgt_lib.c
36613 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
36614 int err;
36615
36616 dprintk("%lx %u\n", uaddr, len);
36617 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
36618 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
36619 if (err) {
36620 /*
36621 * TODO: need to fixup sg_tablesize, max_segment_size,
36622 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
36623 index 1b21491..1b7f60e 100644
36624 --- a/drivers/scsi/scsi_transport_fc.c
36625 +++ b/drivers/scsi/scsi_transport_fc.c
36626 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
36627 * Netlink Infrastructure
36628 */
36629
36630 -static atomic_t fc_event_seq;
36631 +static atomic_unchecked_t fc_event_seq;
36632
36633 /**
36634 * fc_get_event_number - Obtain the next sequential FC event number
36635 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
36636 u32
36637 fc_get_event_number(void)
36638 {
36639 - return atomic_add_return(1, &fc_event_seq);
36640 + return atomic_add_return_unchecked(1, &fc_event_seq);
36641 }
36642 EXPORT_SYMBOL(fc_get_event_number);
36643
36644 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
36645 {
36646 int error;
36647
36648 - atomic_set(&fc_event_seq, 0);
36649 + atomic_set_unchecked(&fc_event_seq, 0);
36650
36651 error = transport_class_register(&fc_host_class);
36652 if (error)
36653 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
36654 char *cp;
36655
36656 *val = simple_strtoul(buf, &cp, 0);
36657 - if ((*cp && (*cp != '\n')) || (*val < 0))
36658 + if (*cp && (*cp != '\n'))
36659 return -EINVAL;
36660 /*
36661 * Check for overflow; dev_loss_tmo is u32
36662 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
36663 index 3fd16d7..ba0871f 100644
36664 --- a/drivers/scsi/scsi_transport_iscsi.c
36665 +++ b/drivers/scsi/scsi_transport_iscsi.c
36666 @@ -83,7 +83,7 @@ struct iscsi_internal {
36667 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
36668 };
36669
36670 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
36671 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
36672 static struct workqueue_struct *iscsi_eh_timer_workq;
36673
36674 /*
36675 @@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
36676 int err;
36677
36678 ihost = shost->shost_data;
36679 - session->sid = atomic_add_return(1, &iscsi_session_nr);
36680 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
36681
36682 if (id == ISCSI_MAX_TARGET) {
36683 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
36684 @@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(void)
36685 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
36686 ISCSI_TRANSPORT_VERSION);
36687
36688 - atomic_set(&iscsi_session_nr, 0);
36689 + atomic_set_unchecked(&iscsi_session_nr, 0);
36690
36691 err = class_register(&iscsi_transport_class);
36692 if (err)
36693 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
36694 index 21a045e..ec89e03 100644
36695 --- a/drivers/scsi/scsi_transport_srp.c
36696 +++ b/drivers/scsi/scsi_transport_srp.c
36697 @@ -33,7 +33,7 @@
36698 #include "scsi_transport_srp_internal.h"
36699
36700 struct srp_host_attrs {
36701 - atomic_t next_port_id;
36702 + atomic_unchecked_t next_port_id;
36703 };
36704 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
36705
36706 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
36707 struct Scsi_Host *shost = dev_to_shost(dev);
36708 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
36709
36710 - atomic_set(&srp_host->next_port_id, 0);
36711 + atomic_set_unchecked(&srp_host->next_port_id, 0);
36712 return 0;
36713 }
36714
36715 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
36716 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
36717 rport->roles = ids->roles;
36718
36719 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
36720 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
36721 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
36722
36723 transport_setup_device(&rport->dev);
36724 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
36725 index 909ed9e..1ae290a 100644
36726 --- a/drivers/scsi/sg.c
36727 +++ b/drivers/scsi/sg.c
36728 @@ -1075,7 +1075,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
36729 sdp->disk->disk_name,
36730 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
36731 NULL,
36732 - (char *)arg);
36733 + (char __user *)arg);
36734 case BLKTRACESTART:
36735 return blk_trace_startstop(sdp->device->request_queue, 1);
36736 case BLKTRACESTOP:
36737 @@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
36738 const struct file_operations * fops;
36739 };
36740
36741 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
36742 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
36743 {"allow_dio", &adio_fops},
36744 {"debug", &debug_fops},
36745 {"def_reserved_size", &dressz_fops},
36746 @@ -2325,7 +2325,7 @@ sg_proc_init(void)
36747 {
36748 int k, mask;
36749 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
36750 - struct sg_proc_leaf * leaf;
36751 + const struct sg_proc_leaf * leaf;
36752
36753 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
36754 if (!sg_proc_sgp)
36755 diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
36756 index b4543f5..e1b34b8 100644
36757 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c
36758 +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
36759 @@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
36760 int do_iounmap = 0;
36761 int do_disable_device = 1;
36762
36763 + pax_track_stack();
36764 +
36765 memset(&sym_dev, 0, sizeof(sym_dev));
36766 memset(&nvram, 0, sizeof(nvram));
36767 sym_dev.pdev = pdev;
36768 diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
36769 index a18996d..fe993cb 100644
36770 --- a/drivers/scsi/vmw_pvscsi.c
36771 +++ b/drivers/scsi/vmw_pvscsi.c
36772 @@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
36773 dma_addr_t base;
36774 unsigned i;
36775
36776 + pax_track_stack();
36777 +
36778 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
36779 cmd.reqRingNumPages = adapter->req_pages;
36780 cmd.cmpRingNumPages = adapter->cmp_pages;
36781 diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
36782 index c5f37f0..898d202 100644
36783 --- a/drivers/spi/spi-dw-pci.c
36784 +++ b/drivers/spi/spi-dw-pci.c
36785 @@ -148,7 +148,7 @@ static int spi_resume(struct pci_dev *pdev)
36786 #define spi_resume NULL
36787 #endif
36788
36789 -static const struct pci_device_id pci_ids[] __devinitdata = {
36790 +static const struct pci_device_id pci_ids[] __devinitconst = {
36791 /* Intel MID platform SPI controller 0 */
36792 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
36793 {},
36794 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
36795 index 4d1b9f5..8408fe3 100644
36796 --- a/drivers/spi/spi.c
36797 +++ b/drivers/spi/spi.c
36798 @@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *master)
36799 EXPORT_SYMBOL_GPL(spi_bus_unlock);
36800
36801 /* portable code must never pass more than 32 bytes */
36802 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
36803 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
36804
36805 static u8 *buf;
36806
36807 diff --git a/drivers/staging/ath6kl/os/linux/ar6000_drv.c b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
36808 index 32ee39a..3004c3d 100644
36809 --- a/drivers/staging/ath6kl/os/linux/ar6000_drv.c
36810 +++ b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
36811 @@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[MAX_COOKIE_NUM];
36812 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
36813
36814
36815 -static struct net_device_ops ar6000_netdev_ops = {
36816 +static net_device_ops_no_const ar6000_netdev_ops = {
36817 .ndo_init = NULL,
36818 .ndo_open = ar6000_open,
36819 .ndo_stop = ar6000_close,
36820 diff --git a/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h b/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
36821 index 39e0873..0925710 100644
36822 --- a/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
36823 +++ b/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
36824 @@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void *pHciPalInfo, void *skb);
36825 typedef struct ar6k_pal_config_s
36826 {
36827 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
36828 -}ar6k_pal_config_t;
36829 +} __no_const ar6k_pal_config_t;
36830
36831 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
36832 #endif /* _AR6K_PAL_H_ */
36833 diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
36834 index 05dada9..96171c6 100644
36835 --- a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
36836 +++ b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
36837 @@ -451,14 +451,14 @@ static void brcmf_op_if(struct brcmf_if *ifp)
36838 free_netdev(ifp->net);
36839 }
36840 /* Allocate etherdev, including space for private structure */
36841 - ifp->net = alloc_etherdev(sizeof(drvr_priv));
36842 + ifp->net = alloc_etherdev(sizeof(*drvr_priv));
36843 if (!ifp->net) {
36844 BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
36845 ret = -ENOMEM;
36846 }
36847 if (ret == 0) {
36848 strcpy(ifp->net->name, ifp->name);
36849 - memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(drvr_priv));
36850 + memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(*drvr_priv));
36851 err = brcmf_net_attach(&drvr_priv->pub, ifp->idx);
36852 if (err != 0) {
36853 BRCMF_ERROR(("%s: brcmf_net_attach failed, "
36854 @@ -1279,7 +1279,7 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
36855 BRCMF_TRACE(("%s: Enter\n", __func__));
36856
36857 /* Allocate etherdev, including space for private structure */
36858 - net = alloc_etherdev(sizeof(drvr_priv));
36859 + net = alloc_etherdev(sizeof(*drvr_priv));
36860 if (!net) {
36861 BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
36862 goto fail;
36863 @@ -1295,7 +1295,7 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
36864 /*
36865 * Save the brcmf_info into the priv
36866 */
36867 - memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
36868 + memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv));
36869
36870 /* Set network interface name if it was provided as module parameter */
36871 if (iface_name[0]) {
36872 @@ -1352,7 +1352,7 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
36873 /*
36874 * Save the brcmf_info into the priv
36875 */
36876 - memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
36877 + memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv));
36878
36879 #if defined(CONFIG_PM_SLEEP)
36880 atomic_set(&brcmf_mmc_suspend, false);
36881 diff --git a/drivers/staging/brcm80211/brcmfmac/sdio_host.h b/drivers/staging/brcm80211/brcmfmac/sdio_host.h
36882 index d345472..cedb19e 100644
36883 --- a/drivers/staging/brcm80211/brcmfmac/sdio_host.h
36884 +++ b/drivers/staging/brcm80211/brcmfmac/sdio_host.h
36885 @@ -263,7 +263,7 @@ struct brcmf_sdioh_driver {
36886 u16 func, uint bustype, u32 regsva, void *param);
36887 /* detach from device */
36888 void (*detach) (void *ch);
36889 -};
36890 +} __no_const;
36891
36892 struct sdioh_info;
36893
36894 diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h b/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
36895 index a01b01c..b3f721c 100644
36896 --- a/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
36897 +++ b/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
36898 @@ -591,7 +591,7 @@ struct phy_func_ptr {
36899 initfn_t carrsuppr;
36900 rxsigpwrfn_t rxsigpwr;
36901 detachfn_t detach;
36902 -};
36903 +} __no_const;
36904
36905 struct brcms_phy {
36906 struct brcms_phy_pub pubpi_ro;
36907 diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
36908 index 8fb3051..a8b6c67 100644
36909 --- a/drivers/staging/et131x/et1310_tx.c
36910 +++ b/drivers/staging/et131x/et1310_tx.c
36911 @@ -635,11 +635,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
36912 struct net_device_stats *stats = &etdev->net_stats;
36913
36914 if (tcb->flags & fMP_DEST_BROAD)
36915 - atomic_inc(&etdev->stats.brdcstxmt);
36916 + atomic_inc_unchecked(&etdev->stats.brdcstxmt);
36917 else if (tcb->flags & fMP_DEST_MULTI)
36918 - atomic_inc(&etdev->stats.multixmt);
36919 + atomic_inc_unchecked(&etdev->stats.multixmt);
36920 else
36921 - atomic_inc(&etdev->stats.unixmt);
36922 + atomic_inc_unchecked(&etdev->stats.unixmt);
36923
36924 if (tcb->skb) {
36925 stats->tx_bytes += tcb->skb->len;
36926 diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
36927 index 408c50b..fd65e9f 100644
36928 --- a/drivers/staging/et131x/et131x_adapter.h
36929 +++ b/drivers/staging/et131x/et131x_adapter.h
36930 @@ -106,11 +106,11 @@ struct ce_stats {
36931 * operations
36932 */
36933 u32 unircv; /* # multicast packets received */
36934 - atomic_t unixmt; /* # multicast packets for Tx */
36935 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
36936 u32 multircv; /* # multicast packets received */
36937 - atomic_t multixmt; /* # multicast packets for Tx */
36938 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
36939 u32 brdcstrcv; /* # broadcast packets received */
36940 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
36941 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
36942 u32 norcvbuf; /* # Rx packets discarded */
36943 u32 noxmtbuf; /* # Tx packets discarded */
36944
36945 diff --git a/drivers/staging/hv/channel.c b/drivers/staging/hv/channel.c
36946 index 455f47a..86205ff 100644
36947 --- a/drivers/staging/hv/channel.c
36948 +++ b/drivers/staging/hv/channel.c
36949 @@ -447,8 +447,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
36950 int ret = 0;
36951 int t;
36952
36953 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
36954 - atomic_inc(&vmbus_connection.next_gpadl_handle);
36955 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
36956 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
36957
36958 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
36959 if (ret)
36960 diff --git a/drivers/staging/hv/hv.c b/drivers/staging/hv/hv.c
36961 index 824f816..a800af7 100644
36962 --- a/drivers/staging/hv/hv.c
36963 +++ b/drivers/staging/hv/hv.c
36964 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
36965 u64 output_address = (output) ? virt_to_phys(output) : 0;
36966 u32 output_address_hi = output_address >> 32;
36967 u32 output_address_lo = output_address & 0xFFFFFFFF;
36968 - volatile void *hypercall_page = hv_context.hypercall_page;
36969 + volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
36970
36971 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
36972 "=a"(hv_status_lo) : "d" (control_hi),
36973 diff --git a/drivers/staging/hv/hv_mouse.c b/drivers/staging/hv/hv_mouse.c
36974 index d957fc2..43cedd9 100644
36975 --- a/drivers/staging/hv/hv_mouse.c
36976 +++ b/drivers/staging/hv/hv_mouse.c
36977 @@ -878,8 +878,10 @@ static void reportdesc_callback(struct hv_device *dev, void *packet, u32 len)
36978 if (hid_dev) {
36979 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
36980
36981 - hid_dev->ll_driver->open = mousevsc_hid_open;
36982 - hid_dev->ll_driver->close = mousevsc_hid_close;
36983 + pax_open_kernel();
36984 + *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
36985 + *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
36986 + pax_close_kernel();
36987
36988 hid_dev->bus = BUS_VIRTUAL;
36989 hid_dev->vendor = input_device_ctx->device_info.vendor;
36990 diff --git a/drivers/staging/hv/hyperv_vmbus.h b/drivers/staging/hv/hyperv_vmbus.h
36991 index 349ad80..3f75719 100644
36992 --- a/drivers/staging/hv/hyperv_vmbus.h
36993 +++ b/drivers/staging/hv/hyperv_vmbus.h
36994 @@ -559,7 +559,7 @@ enum vmbus_connect_state {
36995 struct vmbus_connection {
36996 enum vmbus_connect_state conn_state;
36997
36998 - atomic_t next_gpadl_handle;
36999 + atomic_unchecked_t next_gpadl_handle;
37000
37001 /*
37002 * Represents channel interrupts. Each bit position represents a
37003 diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
37004 index dbb5201..d6047c6 100644
37005 --- a/drivers/staging/hv/rndis_filter.c
37006 +++ b/drivers/staging/hv/rndis_filter.c
37007 @@ -43,7 +43,7 @@ struct rndis_device {
37008
37009 enum rndis_device_state state;
37010 u32 link_stat;
37011 - atomic_t new_req_id;
37012 + atomic_unchecked_t new_req_id;
37013
37014 spinlock_t request_lock;
37015 struct list_head req_list;
37016 @@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
37017 * template
37018 */
37019 set = &rndis_msg->msg.set_req;
37020 - set->req_id = atomic_inc_return(&dev->new_req_id);
37021 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
37022
37023 /* Add to the request list */
37024 spin_lock_irqsave(&dev->request_lock, flags);
37025 @@ -622,7 +622,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
37026
37027 /* Setup the rndis set */
37028 halt = &request->request_msg.msg.halt_req;
37029 - halt->req_id = atomic_inc_return(&dev->new_req_id);
37030 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
37031
37032 /* Ignore return since this msg is optional. */
37033 rndis_filter_send_request(dev, request);
37034 diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
37035 index 1c949f5..7a8b104 100644
37036 --- a/drivers/staging/hv/vmbus_drv.c
37037 +++ b/drivers/staging/hv/vmbus_drv.c
37038 @@ -660,11 +660,11 @@ int vmbus_child_device_register(struct hv_device *child_device_obj)
37039 {
37040 int ret = 0;
37041
37042 - static atomic_t device_num = ATOMIC_INIT(0);
37043 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
37044
37045 /* Set the device name. Otherwise, device_register() will fail. */
37046 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
37047 - atomic_inc_return(&device_num));
37048 + atomic_inc_return_unchecked(&device_num));
37049
37050 /* The new device belongs to this bus */
37051 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
37052 diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
37053 index 3f26f71..fb5c787 100644
37054 --- a/drivers/staging/iio/ring_generic.h
37055 +++ b/drivers/staging/iio/ring_generic.h
37056 @@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
37057
37058 int (*is_enabled)(struct iio_ring_buffer *ring);
37059 int (*enable)(struct iio_ring_buffer *ring);
37060 -};
37061 +} __no_const;
37062
37063 struct iio_ring_setup_ops {
37064 int (*preenable)(struct iio_dev *);
37065 diff --git a/drivers/staging/mei/interface.c b/drivers/staging/mei/interface.c
37066 index cfec92d..a65dacf 100644
37067 --- a/drivers/staging/mei/interface.c
37068 +++ b/drivers/staging/mei/interface.c
37069 @@ -332,7 +332,7 @@ int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl)
37070 mei_hdr->reserved = 0;
37071
37072 mei_flow_control = (struct hbm_flow_control *) &dev->wr_msg_buf[1];
37073 - memset(mei_flow_control, 0, sizeof(mei_flow_control));
37074 + memset(mei_flow_control, 0, sizeof(*mei_flow_control));
37075 mei_flow_control->host_addr = cl->host_client_id;
37076 mei_flow_control->me_addr = cl->me_client_id;
37077 mei_flow_control->cmd.cmd = MEI_FLOW_CONTROL_CMD;
37078 @@ -396,7 +396,7 @@ int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
37079
37080 mei_cli_disconnect =
37081 (struct hbm_client_disconnect_request *) &dev->wr_msg_buf[1];
37082 - memset(mei_cli_disconnect, 0, sizeof(mei_cli_disconnect));
37083 + memset(mei_cli_disconnect, 0, sizeof(*mei_cli_disconnect));
37084 mei_cli_disconnect->host_addr = cl->host_client_id;
37085 mei_cli_disconnect->me_addr = cl->me_client_id;
37086 mei_cli_disconnect->cmd.cmd = CLIENT_DISCONNECT_REQ_CMD;
37087 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
37088 index 8b307b4..a97ac91 100644
37089 --- a/drivers/staging/octeon/ethernet-rx.c
37090 +++ b/drivers/staging/octeon/ethernet-rx.c
37091 @@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37092 /* Increment RX stats for virtual ports */
37093 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37094 #ifdef CONFIG_64BIT
37095 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37096 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37097 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37098 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37099 #else
37100 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37101 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37102 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37103 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37104 #endif
37105 }
37106 netif_receive_skb(skb);
37107 @@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37108 dev->name);
37109 */
37110 #ifdef CONFIG_64BIT
37111 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37112 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37113 #else
37114 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37115 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
37116 #endif
37117 dev_kfree_skb_irq(skb);
37118 }
37119 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
37120 index a8f780e..aef1098 100644
37121 --- a/drivers/staging/octeon/ethernet.c
37122 +++ b/drivers/staging/octeon/ethernet.c
37123 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
37124 * since the RX tasklet also increments it.
37125 */
37126 #ifdef CONFIG_64BIT
37127 - atomic64_add(rx_status.dropped_packets,
37128 - (atomic64_t *)&priv->stats.rx_dropped);
37129 + atomic64_add_unchecked(rx_status.dropped_packets,
37130 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37131 #else
37132 - atomic_add(rx_status.dropped_packets,
37133 - (atomic_t *)&priv->stats.rx_dropped);
37134 + atomic_add_unchecked(rx_status.dropped_packets,
37135 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
37136 #endif
37137 }
37138
37139 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
37140 index f3c6060..56bf826 100644
37141 --- a/drivers/staging/pohmelfs/inode.c
37142 +++ b/drivers/staging/pohmelfs/inode.c
37143 @@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
37144 mutex_init(&psb->mcache_lock);
37145 psb->mcache_root = RB_ROOT;
37146 psb->mcache_timeout = msecs_to_jiffies(5000);
37147 - atomic_long_set(&psb->mcache_gen, 0);
37148 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
37149
37150 psb->trans_max_pages = 100;
37151
37152 @@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
37153 INIT_LIST_HEAD(&psb->crypto_ready_list);
37154 INIT_LIST_HEAD(&psb->crypto_active_list);
37155
37156 - atomic_set(&psb->trans_gen, 1);
37157 + atomic_set_unchecked(&psb->trans_gen, 1);
37158 atomic_long_set(&psb->total_inodes, 0);
37159
37160 mutex_init(&psb->state_lock);
37161 diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
37162 index e22665c..a2a9390 100644
37163 --- a/drivers/staging/pohmelfs/mcache.c
37164 +++ b/drivers/staging/pohmelfs/mcache.c
37165 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
37166 m->data = data;
37167 m->start = start;
37168 m->size = size;
37169 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
37170 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
37171
37172 mutex_lock(&psb->mcache_lock);
37173 err = pohmelfs_mcache_insert(psb, m);
37174 diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
37175 index 985b6b7..7699e05 100644
37176 --- a/drivers/staging/pohmelfs/netfs.h
37177 +++ b/drivers/staging/pohmelfs/netfs.h
37178 @@ -571,14 +571,14 @@ struct pohmelfs_config;
37179 struct pohmelfs_sb {
37180 struct rb_root mcache_root;
37181 struct mutex mcache_lock;
37182 - atomic_long_t mcache_gen;
37183 + atomic_long_unchecked_t mcache_gen;
37184 unsigned long mcache_timeout;
37185
37186 unsigned int idx;
37187
37188 unsigned int trans_retries;
37189
37190 - atomic_t trans_gen;
37191 + atomic_unchecked_t trans_gen;
37192
37193 unsigned int crypto_attached_size;
37194 unsigned int crypto_align_size;
37195 diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
37196 index 36a2535..0591bf4 100644
37197 --- a/drivers/staging/pohmelfs/trans.c
37198 +++ b/drivers/staging/pohmelfs/trans.c
37199 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
37200 int err;
37201 struct netfs_cmd *cmd = t->iovec.iov_base;
37202
37203 - t->gen = atomic_inc_return(&psb->trans_gen);
37204 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
37205
37206 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
37207 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
37208 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
37209 index b70cb2b..4db41a7 100644
37210 --- a/drivers/staging/rtl8712/rtl871x_io.h
37211 +++ b/drivers/staging/rtl8712/rtl871x_io.h
37212 @@ -83,7 +83,7 @@ struct _io_ops {
37213 u8 *pmem);
37214 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
37215 u8 *pmem);
37216 -};
37217 +} __no_const;
37218
37219 struct io_req {
37220 struct list_head list;
37221 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
37222 index c7b5e8b..783d6cb 100644
37223 --- a/drivers/staging/sbe-2t3e3/netdev.c
37224 +++ b/drivers/staging/sbe-2t3e3/netdev.c
37225 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37226 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
37227
37228 if (rlen)
37229 - if (copy_to_user(data, &resp, rlen))
37230 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
37231 return -EFAULT;
37232
37233 return 0;
37234 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
37235 index be21617..0954e45 100644
37236 --- a/drivers/staging/usbip/usbip_common.h
37237 +++ b/drivers/staging/usbip/usbip_common.h
37238 @@ -289,7 +289,7 @@ struct usbip_device {
37239 void (*shutdown)(struct usbip_device *);
37240 void (*reset)(struct usbip_device *);
37241 void (*unusable)(struct usbip_device *);
37242 - } eh_ops;
37243 + } __no_const eh_ops;
37244 };
37245
37246 #if 0
37247 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
37248 index 71a586e..4d8a91a 100644
37249 --- a/drivers/staging/usbip/vhci.h
37250 +++ b/drivers/staging/usbip/vhci.h
37251 @@ -85,7 +85,7 @@ struct vhci_hcd {
37252 unsigned resuming:1;
37253 unsigned long re_timeout;
37254
37255 - atomic_t seqnum;
37256 + atomic_unchecked_t seqnum;
37257
37258 /*
37259 * NOTE:
37260 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
37261 index 2ee97e2..0420b86 100644
37262 --- a/drivers/staging/usbip/vhci_hcd.c
37263 +++ b/drivers/staging/usbip/vhci_hcd.c
37264 @@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
37265 return;
37266 }
37267
37268 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37269 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37270 if (priv->seqnum == 0xffff)
37271 dev_info(&urb->dev->dev, "seqnum max\n");
37272
37273 @@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
37274 return -ENOMEM;
37275 }
37276
37277 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37278 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37279 if (unlink->seqnum == 0xffff)
37280 pr_info("seqnum max\n");
37281
37282 @@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
37283 vdev->rhport = rhport;
37284 }
37285
37286 - atomic_set(&vhci->seqnum, 0);
37287 + atomic_set_unchecked(&vhci->seqnum, 0);
37288 spin_lock_init(&vhci->lock);
37289
37290 hcd->power_budget = 0; /* no limit */
37291 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
37292 index 3872b8c..fe6d2f4 100644
37293 --- a/drivers/staging/usbip/vhci_rx.c
37294 +++ b/drivers/staging/usbip/vhci_rx.c
37295 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
37296 if (!urb) {
37297 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
37298 pr_info("max seqnum %d\n",
37299 - atomic_read(&the_controller->seqnum));
37300 + atomic_read_unchecked(&the_controller->seqnum));
37301 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37302 return;
37303 }
37304 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
37305 index 7735027..30eed13 100644
37306 --- a/drivers/staging/vt6655/hostap.c
37307 +++ b/drivers/staging/vt6655/hostap.c
37308 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
37309 *
37310 */
37311
37312 +static net_device_ops_no_const apdev_netdev_ops;
37313 +
37314 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37315 {
37316 PSDevice apdev_priv;
37317 struct net_device *dev = pDevice->dev;
37318 int ret;
37319 - const struct net_device_ops apdev_netdev_ops = {
37320 - .ndo_start_xmit = pDevice->tx_80211,
37321 - };
37322
37323 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37324
37325 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37326 *apdev_priv = *pDevice;
37327 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37328
37329 + /* only half broken now */
37330 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37331 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37332
37333 pDevice->apdev->type = ARPHRD_IEEE80211;
37334 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
37335 index 51b5adf..098e320 100644
37336 --- a/drivers/staging/vt6656/hostap.c
37337 +++ b/drivers/staging/vt6656/hostap.c
37338 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
37339 *
37340 */
37341
37342 +static net_device_ops_no_const apdev_netdev_ops;
37343 +
37344 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37345 {
37346 PSDevice apdev_priv;
37347 struct net_device *dev = pDevice->dev;
37348 int ret;
37349 - const struct net_device_ops apdev_netdev_ops = {
37350 - .ndo_start_xmit = pDevice->tx_80211,
37351 - };
37352
37353 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37354
37355 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37356 *apdev_priv = *pDevice;
37357 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37358
37359 + /* only half broken now */
37360 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37361 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37362
37363 pDevice->apdev->type = ARPHRD_IEEE80211;
37364 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
37365 index 7843dfd..3db105f 100644
37366 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
37367 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
37368 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
37369
37370 struct usbctlx_completor {
37371 int (*complete) (struct usbctlx_completor *);
37372 -};
37373 +} __no_const;
37374
37375 static int
37376 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
37377 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
37378 index 1ca66ea..76f1343 100644
37379 --- a/drivers/staging/zcache/tmem.c
37380 +++ b/drivers/staging/zcache/tmem.c
37381 @@ -39,7 +39,7 @@
37382 * A tmem host implementation must use this function to register callbacks
37383 * for memory allocation.
37384 */
37385 -static struct tmem_hostops tmem_hostops;
37386 +static tmem_hostops_no_const tmem_hostops;
37387
37388 static void tmem_objnode_tree_init(void);
37389
37390 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
37391 * A tmem host implementation must use this function to register
37392 * callbacks for a page-accessible memory (PAM) implementation
37393 */
37394 -static struct tmem_pamops tmem_pamops;
37395 +static tmem_pamops_no_const tmem_pamops;
37396
37397 void tmem_register_pamops(struct tmem_pamops *m)
37398 {
37399 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
37400 index ed147c4..94fc3c6 100644
37401 --- a/drivers/staging/zcache/tmem.h
37402 +++ b/drivers/staging/zcache/tmem.h
37403 @@ -180,6 +180,7 @@ struct tmem_pamops {
37404 void (*new_obj)(struct tmem_obj *);
37405 int (*replace_in_obj)(void *, struct tmem_obj *);
37406 };
37407 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
37408 extern void tmem_register_pamops(struct tmem_pamops *m);
37409
37410 /* memory allocation methods provided by the host implementation */
37411 @@ -189,6 +190,7 @@ struct tmem_hostops {
37412 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
37413 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
37414 };
37415 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
37416 extern void tmem_register_hostops(struct tmem_hostops *m);
37417
37418 /* core tmem accessor functions */
37419 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
37420 index c4ac6f6..4f90f53 100644
37421 --- a/drivers/target/iscsi/iscsi_target.c
37422 +++ b/drivers/target/iscsi/iscsi_target.c
37423 @@ -1370,7 +1370,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
37424 * outstanding_r2ts reaches zero, go ahead and send the delayed
37425 * TASK_ABORTED status.
37426 */
37427 - if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
37428 + if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
37429 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
37430 if (--cmd->outstanding_r2ts < 1) {
37431 iscsit_stop_dataout_timer(cmd);
37432 diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
37433 index 8badcb4..94c9ac6 100644
37434 --- a/drivers/target/target_core_alua.c
37435 +++ b/drivers/target/target_core_alua.c
37436 @@ -723,6 +723,8 @@ static int core_alua_update_tpg_primary_metadata(
37437 char path[ALUA_METADATA_PATH_LEN];
37438 int len;
37439
37440 + pax_track_stack();
37441 +
37442 memset(path, 0, ALUA_METADATA_PATH_LEN);
37443
37444 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
37445 @@ -986,6 +988,8 @@ static int core_alua_update_tpg_secondary_metadata(
37446 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
37447 int len;
37448
37449 + pax_track_stack();
37450 +
37451 memset(path, 0, ALUA_METADATA_PATH_LEN);
37452 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
37453
37454 diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
37455 index 5f91397..dcc2d25 100644
37456 --- a/drivers/target/target_core_cdb.c
37457 +++ b/drivers/target/target_core_cdb.c
37458 @@ -933,6 +933,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
37459 int length = 0;
37460 unsigned char buf[SE_MODE_PAGE_BUF];
37461
37462 + pax_track_stack();
37463 +
37464 memset(buf, 0, SE_MODE_PAGE_BUF);
37465
37466 switch (cdb[2] & 0x3f) {
37467 diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
37468 index b2575d8..b6b28fd 100644
37469 --- a/drivers/target/target_core_configfs.c
37470 +++ b/drivers/target/target_core_configfs.c
37471 @@ -1267,6 +1267,8 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
37472 ssize_t len = 0;
37473 int reg_count = 0, prf_isid;
37474
37475 + pax_track_stack();
37476 +
37477 if (!su_dev->se_dev_ptr)
37478 return -ENODEV;
37479
37480 diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
37481 index 7fd3a16..bc2fb3e 100644
37482 --- a/drivers/target/target_core_pr.c
37483 +++ b/drivers/target/target_core_pr.c
37484 @@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_registration(
37485 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
37486 u16 tpgt;
37487
37488 + pax_track_stack();
37489 +
37490 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
37491 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
37492 /*
37493 @@ -1867,6 +1869,8 @@ static int __core_scsi3_update_aptpl_buf(
37494 ssize_t len = 0;
37495 int reg_count = 0;
37496
37497 + pax_track_stack();
37498 +
37499 memset(buf, 0, pr_aptpl_buf_len);
37500 /*
37501 * Called to clear metadata once APTPL has been deactivated.
37502 @@ -1989,6 +1993,8 @@ static int __core_scsi3_write_aptpl_to_file(
37503 char path[512];
37504 int ret;
37505
37506 + pax_track_stack();
37507 +
37508 memset(iov, 0, sizeof(struct iovec));
37509 memset(path, 0, 512);
37510
37511 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
37512 index 5c1b8c5..0cb7d0e 100644
37513 --- a/drivers/target/target_core_tmr.c
37514 +++ b/drivers/target/target_core_tmr.c
37515 @@ -255,7 +255,7 @@ static void core_tmr_drain_task_list(
37516 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
37517 cmd->t_task_list_num,
37518 atomic_read(&cmd->t_task_cdbs_left),
37519 - atomic_read(&cmd->t_task_cdbs_sent),
37520 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37521 atomic_read(&cmd->t_transport_active),
37522 atomic_read(&cmd->t_transport_stop),
37523 atomic_read(&cmd->t_transport_sent));
37524 @@ -291,7 +291,7 @@ static void core_tmr_drain_task_list(
37525 pr_debug("LUN_RESET: got t_transport_active = 1 for"
37526 " task: %p, t_fe_count: %d dev: %p\n", task,
37527 fe_count, dev);
37528 - atomic_set(&cmd->t_transport_aborted, 1);
37529 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
37530 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
37531
37532 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
37533 @@ -299,7 +299,7 @@ static void core_tmr_drain_task_list(
37534 }
37535 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
37536 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
37537 - atomic_set(&cmd->t_transport_aborted, 1);
37538 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
37539 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
37540
37541 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
37542 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
37543 index e2added..ccb5251 100644
37544 --- a/drivers/target/target_core_transport.c
37545 +++ b/drivers/target/target_core_transport.c
37546 @@ -1445,7 +1445,7 @@ struct se_device *transport_add_device_to_core_hba(
37547
37548 dev->queue_depth = dev_limits->queue_depth;
37549 atomic_set(&dev->depth_left, dev->queue_depth);
37550 - atomic_set(&dev->dev_ordered_id, 0);
37551 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
37552
37553 se_dev_set_default_attribs(dev, dev_limits);
37554
37555 @@ -1633,7 +1633,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
37556 * Used to determine when ORDERED commands should go from
37557 * Dormant to Active status.
37558 */
37559 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
37560 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
37561 smp_mb__after_atomic_inc();
37562 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
37563 cmd->se_ordered_id, cmd->sam_task_attr,
37564 @@ -1960,7 +1960,7 @@ static void transport_generic_request_failure(
37565 " t_transport_active: %d t_transport_stop: %d"
37566 " t_transport_sent: %d\n", cmd->t_task_list_num,
37567 atomic_read(&cmd->t_task_cdbs_left),
37568 - atomic_read(&cmd->t_task_cdbs_sent),
37569 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37570 atomic_read(&cmd->t_task_cdbs_ex_left),
37571 atomic_read(&cmd->t_transport_active),
37572 atomic_read(&cmd->t_transport_stop),
37573 @@ -2460,9 +2460,9 @@ check_depth:
37574 spin_lock_irqsave(&cmd->t_state_lock, flags);
37575 atomic_set(&task->task_active, 1);
37576 atomic_set(&task->task_sent, 1);
37577 - atomic_inc(&cmd->t_task_cdbs_sent);
37578 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
37579
37580 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
37581 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
37582 cmd->t_task_list_num)
37583 atomic_set(&cmd->transport_sent, 1);
37584
37585 @@ -4682,7 +4682,7 @@ static void transport_generic_wait_for_tasks(
37586 atomic_set(&cmd->transport_lun_stop, 0);
37587 }
37588 if (!atomic_read(&cmd->t_transport_active) ||
37589 - atomic_read(&cmd->t_transport_aborted))
37590 + atomic_read_unchecked(&cmd->t_transport_aborted))
37591 goto remove;
37592
37593 atomic_set(&cmd->t_transport_stop, 1);
37594 @@ -4917,7 +4917,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
37595 {
37596 int ret = 0;
37597
37598 - if (atomic_read(&cmd->t_transport_aborted) != 0) {
37599 + if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
37600 if (!send_status ||
37601 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
37602 return 1;
37603 @@ -4954,7 +4954,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
37604 */
37605 if (cmd->data_direction == DMA_TO_DEVICE) {
37606 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
37607 - atomic_inc(&cmd->t_transport_aborted);
37608 + atomic_inc_unchecked(&cmd->t_transport_aborted);
37609 smp_mb__after_atomic_inc();
37610 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
37611 transport_new_cmd_failure(cmd);
37612 @@ -5068,7 +5068,7 @@ static void transport_processing_shutdown(struct se_device *dev)
37613 cmd->se_tfo->get_task_tag(cmd),
37614 cmd->t_task_list_num,
37615 atomic_read(&cmd->t_task_cdbs_left),
37616 - atomic_read(&cmd->t_task_cdbs_sent),
37617 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37618 atomic_read(&cmd->t_transport_active),
37619 atomic_read(&cmd->t_transport_stop),
37620 atomic_read(&cmd->t_transport_sent));
37621 diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
37622 index d5f923b..9c78228 100644
37623 --- a/drivers/telephony/ixj.c
37624 +++ b/drivers/telephony/ixj.c
37625 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
37626 bool mContinue;
37627 char *pIn, *pOut;
37628
37629 + pax_track_stack();
37630 +
37631 if (!SCI_Prepare(j))
37632 return 0;
37633
37634 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
37635 index 4c8b665..1d931eb 100644
37636 --- a/drivers/tty/hvc/hvcs.c
37637 +++ b/drivers/tty/hvc/hvcs.c
37638 @@ -83,6 +83,7 @@
37639 #include <asm/hvcserver.h>
37640 #include <asm/uaccess.h>
37641 #include <asm/vio.h>
37642 +#include <asm/local.h>
37643
37644 /*
37645 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
37646 @@ -270,7 +271,7 @@ struct hvcs_struct {
37647 unsigned int index;
37648
37649 struct tty_struct *tty;
37650 - int open_count;
37651 + local_t open_count;
37652
37653 /*
37654 * Used to tell the driver kernel_thread what operations need to take
37655 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
37656
37657 spin_lock_irqsave(&hvcsd->lock, flags);
37658
37659 - if (hvcsd->open_count > 0) {
37660 + if (local_read(&hvcsd->open_count) > 0) {
37661 spin_unlock_irqrestore(&hvcsd->lock, flags);
37662 printk(KERN_INFO "HVCS: vterm state unchanged. "
37663 "The hvcs device node is still in use.\n");
37664 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
37665 if ((retval = hvcs_partner_connect(hvcsd)))
37666 goto error_release;
37667
37668 - hvcsd->open_count = 1;
37669 + local_set(&hvcsd->open_count, 1);
37670 hvcsd->tty = tty;
37671 tty->driver_data = hvcsd;
37672
37673 @@ -1179,7 +1180,7 @@ fast_open:
37674
37675 spin_lock_irqsave(&hvcsd->lock, flags);
37676 kref_get(&hvcsd->kref);
37677 - hvcsd->open_count++;
37678 + local_inc(&hvcsd->open_count);
37679 hvcsd->todo_mask |= HVCS_SCHED_READ;
37680 spin_unlock_irqrestore(&hvcsd->lock, flags);
37681
37682 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37683 hvcsd = tty->driver_data;
37684
37685 spin_lock_irqsave(&hvcsd->lock, flags);
37686 - if (--hvcsd->open_count == 0) {
37687 + if (local_dec_and_test(&hvcsd->open_count)) {
37688
37689 vio_disable_interrupts(hvcsd->vdev);
37690
37691 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37692 free_irq(irq, hvcsd);
37693 kref_put(&hvcsd->kref, destroy_hvcs_struct);
37694 return;
37695 - } else if (hvcsd->open_count < 0) {
37696 + } else if (local_read(&hvcsd->open_count) < 0) {
37697 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
37698 " is missmanaged.\n",
37699 - hvcsd->vdev->unit_address, hvcsd->open_count);
37700 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
37701 }
37702
37703 spin_unlock_irqrestore(&hvcsd->lock, flags);
37704 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37705
37706 spin_lock_irqsave(&hvcsd->lock, flags);
37707 /* Preserve this so that we know how many kref refs to put */
37708 - temp_open_count = hvcsd->open_count;
37709 + temp_open_count = local_read(&hvcsd->open_count);
37710
37711 /*
37712 * Don't kref put inside the spinlock because the destruction
37713 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37714 hvcsd->tty->driver_data = NULL;
37715 hvcsd->tty = NULL;
37716
37717 - hvcsd->open_count = 0;
37718 + local_set(&hvcsd->open_count, 0);
37719
37720 /* This will drop any buffered data on the floor which is OK in a hangup
37721 * scenario. */
37722 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
37723 * the middle of a write operation? This is a crummy place to do this
37724 * but we want to keep it all in the spinlock.
37725 */
37726 - if (hvcsd->open_count <= 0) {
37727 + if (local_read(&hvcsd->open_count) <= 0) {
37728 spin_unlock_irqrestore(&hvcsd->lock, flags);
37729 return -ENODEV;
37730 }
37731 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
37732 {
37733 struct hvcs_struct *hvcsd = tty->driver_data;
37734
37735 - if (!hvcsd || hvcsd->open_count <= 0)
37736 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
37737 return 0;
37738
37739 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
37740 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
37741 index ef92869..f4ebd88 100644
37742 --- a/drivers/tty/ipwireless/tty.c
37743 +++ b/drivers/tty/ipwireless/tty.c
37744 @@ -29,6 +29,7 @@
37745 #include <linux/tty_driver.h>
37746 #include <linux/tty_flip.h>
37747 #include <linux/uaccess.h>
37748 +#include <asm/local.h>
37749
37750 #include "tty.h"
37751 #include "network.h"
37752 @@ -51,7 +52,7 @@ struct ipw_tty {
37753 int tty_type;
37754 struct ipw_network *network;
37755 struct tty_struct *linux_tty;
37756 - int open_count;
37757 + local_t open_count;
37758 unsigned int control_lines;
37759 struct mutex ipw_tty_mutex;
37760 int tx_bytes_queued;
37761 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37762 mutex_unlock(&tty->ipw_tty_mutex);
37763 return -ENODEV;
37764 }
37765 - if (tty->open_count == 0)
37766 + if (local_read(&tty->open_count) == 0)
37767 tty->tx_bytes_queued = 0;
37768
37769 - tty->open_count++;
37770 + local_inc(&tty->open_count);
37771
37772 tty->linux_tty = linux_tty;
37773 linux_tty->driver_data = tty;
37774 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37775
37776 static void do_ipw_close(struct ipw_tty *tty)
37777 {
37778 - tty->open_count--;
37779 -
37780 - if (tty->open_count == 0) {
37781 + if (local_dec_return(&tty->open_count) == 0) {
37782 struct tty_struct *linux_tty = tty->linux_tty;
37783
37784 if (linux_tty != NULL) {
37785 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
37786 return;
37787
37788 mutex_lock(&tty->ipw_tty_mutex);
37789 - if (tty->open_count == 0) {
37790 + if (local_read(&tty->open_count) == 0) {
37791 mutex_unlock(&tty->ipw_tty_mutex);
37792 return;
37793 }
37794 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
37795 return;
37796 }
37797
37798 - if (!tty->open_count) {
37799 + if (!local_read(&tty->open_count)) {
37800 mutex_unlock(&tty->ipw_tty_mutex);
37801 return;
37802 }
37803 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
37804 return -ENODEV;
37805
37806 mutex_lock(&tty->ipw_tty_mutex);
37807 - if (!tty->open_count) {
37808 + if (!local_read(&tty->open_count)) {
37809 mutex_unlock(&tty->ipw_tty_mutex);
37810 return -EINVAL;
37811 }
37812 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
37813 if (!tty)
37814 return -ENODEV;
37815
37816 - if (!tty->open_count)
37817 + if (!local_read(&tty->open_count))
37818 return -EINVAL;
37819
37820 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
37821 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
37822 if (!tty)
37823 return 0;
37824
37825 - if (!tty->open_count)
37826 + if (!local_read(&tty->open_count))
37827 return 0;
37828
37829 return tty->tx_bytes_queued;
37830 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
37831 if (!tty)
37832 return -ENODEV;
37833
37834 - if (!tty->open_count)
37835 + if (!local_read(&tty->open_count))
37836 return -EINVAL;
37837
37838 return get_control_lines(tty);
37839 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
37840 if (!tty)
37841 return -ENODEV;
37842
37843 - if (!tty->open_count)
37844 + if (!local_read(&tty->open_count))
37845 return -EINVAL;
37846
37847 return set_control_lines(tty, set, clear);
37848 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
37849 if (!tty)
37850 return -ENODEV;
37851
37852 - if (!tty->open_count)
37853 + if (!local_read(&tty->open_count))
37854 return -EINVAL;
37855
37856 /* FIXME: Exactly how is the tty object locked here .. */
37857 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
37858 against a parallel ioctl etc */
37859 mutex_lock(&ttyj->ipw_tty_mutex);
37860 }
37861 - while (ttyj->open_count)
37862 + while (local_read(&ttyj->open_count))
37863 do_ipw_close(ttyj);
37864 ipwireless_disassociate_network_ttys(network,
37865 ttyj->channel_idx);
37866 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
37867 index 8a50e4e..7d9ca3d 100644
37868 --- a/drivers/tty/n_gsm.c
37869 +++ b/drivers/tty/n_gsm.c
37870 @@ -1625,7 +1625,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
37871 kref_init(&dlci->ref);
37872 mutex_init(&dlci->mutex);
37873 dlci->fifo = &dlci->_fifo;
37874 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
37875 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
37876 kfree(dlci);
37877 return NULL;
37878 }
37879 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
37880 index 39d6ab6..eb97f41 100644
37881 --- a/drivers/tty/n_tty.c
37882 +++ b/drivers/tty/n_tty.c
37883 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
37884 {
37885 *ops = tty_ldisc_N_TTY;
37886 ops->owner = NULL;
37887 - ops->refcount = ops->flags = 0;
37888 + atomic_set(&ops->refcount, 0);
37889 + ops->flags = 0;
37890 }
37891 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
37892 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
37893 index e18604b..a7d5a11 100644
37894 --- a/drivers/tty/pty.c
37895 +++ b/drivers/tty/pty.c
37896 @@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
37897 register_sysctl_table(pty_root_table);
37898
37899 /* Now create the /dev/ptmx special device */
37900 + pax_open_kernel();
37901 tty_default_fops(&ptmx_fops);
37902 - ptmx_fops.open = ptmx_open;
37903 + *(void **)&ptmx_fops.open = ptmx_open;
37904 + pax_close_kernel();
37905
37906 cdev_init(&ptmx_cdev, &ptmx_fops);
37907 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
37908 diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
37909 index 6a1241c..d04ab0d 100644
37910 --- a/drivers/tty/rocket.c
37911 +++ b/drivers/tty/rocket.c
37912 @@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
37913 struct rocket_ports tmp;
37914 int board;
37915
37916 + pax_track_stack();
37917 +
37918 if (!retports)
37919 return -EFAULT;
37920 memset(&tmp, 0, sizeof (tmp));
37921 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
37922 index 87e7e6c..89744e0 100644
37923 --- a/drivers/tty/serial/kgdboc.c
37924 +++ b/drivers/tty/serial/kgdboc.c
37925 @@ -23,8 +23,9 @@
37926 #define MAX_CONFIG_LEN 40
37927
37928 static struct kgdb_io kgdboc_io_ops;
37929 +static struct kgdb_io kgdboc_io_ops_console;
37930
37931 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37932 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
37933 static int configured = -1;
37934
37935 static char config[MAX_CONFIG_LEN];
37936 @@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
37937 kgdboc_unregister_kbd();
37938 if (configured == 1)
37939 kgdb_unregister_io_module(&kgdboc_io_ops);
37940 + else if (configured == 2)
37941 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
37942 }
37943
37944 static int configure_kgdboc(void)
37945 @@ -156,13 +159,13 @@ static int configure_kgdboc(void)
37946 int err;
37947 char *cptr = config;
37948 struct console *cons;
37949 + int is_console = 0;
37950
37951 err = kgdboc_option_setup(config);
37952 if (err || !strlen(config) || isspace(config[0]))
37953 goto noconfig;
37954
37955 err = -ENODEV;
37956 - kgdboc_io_ops.is_console = 0;
37957 kgdb_tty_driver = NULL;
37958
37959 kgdboc_use_kms = 0;
37960 @@ -183,7 +186,7 @@ static int configure_kgdboc(void)
37961 int idx;
37962 if (cons->device && cons->device(cons, &idx) == p &&
37963 idx == tty_line) {
37964 - kgdboc_io_ops.is_console = 1;
37965 + is_console = 1;
37966 break;
37967 }
37968 cons = cons->next;
37969 @@ -193,12 +196,16 @@ static int configure_kgdboc(void)
37970 kgdb_tty_line = tty_line;
37971
37972 do_register:
37973 - err = kgdb_register_io_module(&kgdboc_io_ops);
37974 + if (is_console) {
37975 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
37976 + configured = 2;
37977 + } else {
37978 + err = kgdb_register_io_module(&kgdboc_io_ops);
37979 + configured = 1;
37980 + }
37981 if (err)
37982 goto noconfig;
37983
37984 - configured = 1;
37985 -
37986 return 0;
37987
37988 noconfig:
37989 @@ -212,7 +219,7 @@ noconfig:
37990 static int __init init_kgdboc(void)
37991 {
37992 /* Already configured? */
37993 - if (configured == 1)
37994 + if (configured >= 1)
37995 return 0;
37996
37997 return configure_kgdboc();
37998 @@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
37999 if (config[len - 1] == '\n')
38000 config[len - 1] = '\0';
38001
38002 - if (configured == 1)
38003 + if (configured >= 1)
38004 cleanup_kgdboc();
38005
38006 /* Go and configure with the new params. */
38007 @@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
38008 .post_exception = kgdboc_post_exp_handler,
38009 };
38010
38011 +static struct kgdb_io kgdboc_io_ops_console = {
38012 + .name = "kgdboc",
38013 + .read_char = kgdboc_get_char,
38014 + .write_char = kgdboc_put_char,
38015 + .pre_exception = kgdboc_pre_exp_handler,
38016 + .post_exception = kgdboc_post_exp_handler,
38017 + .is_console = 1
38018 +};
38019 +
38020 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
38021 /* This is only available if kgdboc is a built in for early debugging */
38022 static int __init kgdboc_early_init(char *opt)
38023 diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
38024 index cab52f4..29fc6aa 100644
38025 --- a/drivers/tty/serial/mfd.c
38026 +++ b/drivers/tty/serial/mfd.c
38027 @@ -1423,7 +1423,7 @@ static void serial_hsu_remove(struct pci_dev *pdev)
38028 }
38029
38030 /* First 3 are UART ports, and the 4th is the DMA */
38031 -static const struct pci_device_id pci_ids[] __devinitdata = {
38032 +static const struct pci_device_id pci_ids[] __devinitconst = {
38033 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
38034 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
38035 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
38036 diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
38037 index 23bc743..d425c07 100644
38038 --- a/drivers/tty/serial/mrst_max3110.c
38039 +++ b/drivers/tty/serial/mrst_max3110.c
38040 @@ -393,6 +393,8 @@ static void max3110_con_receive(struct uart_max3110 *max)
38041 int loop = 1, num, total = 0;
38042 u8 recv_buf[512], *pbuf;
38043
38044 + pax_track_stack();
38045 +
38046 pbuf = recv_buf;
38047 do {
38048 num = max3110_read_multi(max, pbuf);
38049 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
38050 index 1a890e2..1d8139c 100644
38051 --- a/drivers/tty/tty_io.c
38052 +++ b/drivers/tty/tty_io.c
38053 @@ -3238,7 +3238,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
38054
38055 void tty_default_fops(struct file_operations *fops)
38056 {
38057 - *fops = tty_fops;
38058 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
38059 }
38060
38061 /*
38062 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
38063 index a76c808..ecbc743 100644
38064 --- a/drivers/tty/tty_ldisc.c
38065 +++ b/drivers/tty/tty_ldisc.c
38066 @@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
38067 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
38068 struct tty_ldisc_ops *ldo = ld->ops;
38069
38070 - ldo->refcount--;
38071 + atomic_dec(&ldo->refcount);
38072 module_put(ldo->owner);
38073 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38074
38075 @@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
38076 spin_lock_irqsave(&tty_ldisc_lock, flags);
38077 tty_ldiscs[disc] = new_ldisc;
38078 new_ldisc->num = disc;
38079 - new_ldisc->refcount = 0;
38080 + atomic_set(&new_ldisc->refcount, 0);
38081 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38082
38083 return ret;
38084 @@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
38085 return -EINVAL;
38086
38087 spin_lock_irqsave(&tty_ldisc_lock, flags);
38088 - if (tty_ldiscs[disc]->refcount)
38089 + if (atomic_read(&tty_ldiscs[disc]->refcount))
38090 ret = -EBUSY;
38091 else
38092 tty_ldiscs[disc] = NULL;
38093 @@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
38094 if (ldops) {
38095 ret = ERR_PTR(-EAGAIN);
38096 if (try_module_get(ldops->owner)) {
38097 - ldops->refcount++;
38098 + atomic_inc(&ldops->refcount);
38099 ret = ldops;
38100 }
38101 }
38102 @@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
38103 unsigned long flags;
38104
38105 spin_lock_irqsave(&tty_ldisc_lock, flags);
38106 - ldops->refcount--;
38107 + atomic_dec(&ldops->refcount);
38108 module_put(ldops->owner);
38109 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38110 }
38111 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
38112 index 3761ccf..2c613b3 100644
38113 --- a/drivers/tty/vt/keyboard.c
38114 +++ b/drivers/tty/vt/keyboard.c
38115 @@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
38116 kbd->kbdmode == VC_OFF) &&
38117 value != KVAL(K_SAK))
38118 return; /* SAK is allowed even in raw mode */
38119 +
38120 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
38121 + {
38122 + void *func = fn_handler[value];
38123 + if (func == fn_show_state || func == fn_show_ptregs ||
38124 + func == fn_show_mem)
38125 + return;
38126 + }
38127 +#endif
38128 +
38129 fn_handler[value](vc);
38130 }
38131
38132 diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
38133 index b3915b7..e716839 100644
38134 --- a/drivers/tty/vt/vt.c
38135 +++ b/drivers/tty/vt/vt.c
38136 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
38137
38138 static void notify_write(struct vc_data *vc, unsigned int unicode)
38139 {
38140 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
38141 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
38142 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
38143 }
38144
38145 diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
38146 index 5e096f4..0da1363 100644
38147 --- a/drivers/tty/vt/vt_ioctl.c
38148 +++ b/drivers/tty/vt/vt_ioctl.c
38149 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
38150 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
38151 return -EFAULT;
38152
38153 - if (!capable(CAP_SYS_TTY_CONFIG))
38154 - perm = 0;
38155 -
38156 switch (cmd) {
38157 case KDGKBENT:
38158 key_map = key_maps[s];
38159 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
38160 val = (i ? K_HOLE : K_NOSUCHMAP);
38161 return put_user(val, &user_kbe->kb_value);
38162 case KDSKBENT:
38163 + if (!capable(CAP_SYS_TTY_CONFIG))
38164 + perm = 0;
38165 +
38166 if (!perm)
38167 return -EPERM;
38168 if (!i && v == K_NOSUCHMAP) {
38169 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38170 int i, j, k;
38171 int ret;
38172
38173 - if (!capable(CAP_SYS_TTY_CONFIG))
38174 - perm = 0;
38175 -
38176 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
38177 if (!kbs) {
38178 ret = -ENOMEM;
38179 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38180 kfree(kbs);
38181 return ((p && *p) ? -EOVERFLOW : 0);
38182 case KDSKBSENT:
38183 + if (!capable(CAP_SYS_TTY_CONFIG))
38184 + perm = 0;
38185 +
38186 if (!perm) {
38187 ret = -EPERM;
38188 goto reterr;
38189 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
38190 index d2efe82..9440ab6 100644
38191 --- a/drivers/uio/uio.c
38192 +++ b/drivers/uio/uio.c
38193 @@ -25,6 +25,7 @@
38194 #include <linux/kobject.h>
38195 #include <linux/cdev.h>
38196 #include <linux/uio_driver.h>
38197 +#include <asm/local.h>
38198
38199 #define UIO_MAX_DEVICES (1U << MINORBITS)
38200
38201 @@ -32,10 +33,10 @@ struct uio_device {
38202 struct module *owner;
38203 struct device *dev;
38204 int minor;
38205 - atomic_t event;
38206 + atomic_unchecked_t event;
38207 struct fasync_struct *async_queue;
38208 wait_queue_head_t wait;
38209 - int vma_count;
38210 + local_t vma_count;
38211 struct uio_info *info;
38212 struct kobject *map_dir;
38213 struct kobject *portio_dir;
38214 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
38215 struct device_attribute *attr, char *buf)
38216 {
38217 struct uio_device *idev = dev_get_drvdata(dev);
38218 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
38219 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
38220 }
38221
38222 static struct device_attribute uio_class_attributes[] = {
38223 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
38224 {
38225 struct uio_device *idev = info->uio_dev;
38226
38227 - atomic_inc(&idev->event);
38228 + atomic_inc_unchecked(&idev->event);
38229 wake_up_interruptible(&idev->wait);
38230 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38231 }
38232 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
38233 }
38234
38235 listener->dev = idev;
38236 - listener->event_count = atomic_read(&idev->event);
38237 + listener->event_count = atomic_read_unchecked(&idev->event);
38238 filep->private_data = listener;
38239
38240 if (idev->info->open) {
38241 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
38242 return -EIO;
38243
38244 poll_wait(filep, &idev->wait, wait);
38245 - if (listener->event_count != atomic_read(&idev->event))
38246 + if (listener->event_count != atomic_read_unchecked(&idev->event))
38247 return POLLIN | POLLRDNORM;
38248 return 0;
38249 }
38250 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
38251 do {
38252 set_current_state(TASK_INTERRUPTIBLE);
38253
38254 - event_count = atomic_read(&idev->event);
38255 + event_count = atomic_read_unchecked(&idev->event);
38256 if (event_count != listener->event_count) {
38257 if (copy_to_user(buf, &event_count, count))
38258 retval = -EFAULT;
38259 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
38260 static void uio_vma_open(struct vm_area_struct *vma)
38261 {
38262 struct uio_device *idev = vma->vm_private_data;
38263 - idev->vma_count++;
38264 + local_inc(&idev->vma_count);
38265 }
38266
38267 static void uio_vma_close(struct vm_area_struct *vma)
38268 {
38269 struct uio_device *idev = vma->vm_private_data;
38270 - idev->vma_count--;
38271 + local_dec(&idev->vma_count);
38272 }
38273
38274 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38275 @@ -823,7 +824,7 @@ int __uio_register_device(struct module *owner,
38276 idev->owner = owner;
38277 idev->info = info;
38278 init_waitqueue_head(&idev->wait);
38279 - atomic_set(&idev->event, 0);
38280 + atomic_set_unchecked(&idev->event, 0);
38281
38282 ret = uio_get_minor(idev);
38283 if (ret)
38284 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
38285 index a845f8b..4f54072 100644
38286 --- a/drivers/usb/atm/cxacru.c
38287 +++ b/drivers/usb/atm/cxacru.c
38288 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
38289 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
38290 if (ret < 2)
38291 return -EINVAL;
38292 - if (index < 0 || index > 0x7f)
38293 + if (index > 0x7f)
38294 return -EINVAL;
38295 pos += tmp;
38296
38297 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
38298 index d3448ca..d2864ca 100644
38299 --- a/drivers/usb/atm/usbatm.c
38300 +++ b/drivers/usb/atm/usbatm.c
38301 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38302 if (printk_ratelimit())
38303 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38304 __func__, vpi, vci);
38305 - atomic_inc(&vcc->stats->rx_err);
38306 + atomic_inc_unchecked(&vcc->stats->rx_err);
38307 return;
38308 }
38309
38310 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38311 if (length > ATM_MAX_AAL5_PDU) {
38312 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38313 __func__, length, vcc);
38314 - atomic_inc(&vcc->stats->rx_err);
38315 + atomic_inc_unchecked(&vcc->stats->rx_err);
38316 goto out;
38317 }
38318
38319 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38320 if (sarb->len < pdu_length) {
38321 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38322 __func__, pdu_length, sarb->len, vcc);
38323 - atomic_inc(&vcc->stats->rx_err);
38324 + atomic_inc_unchecked(&vcc->stats->rx_err);
38325 goto out;
38326 }
38327
38328 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38329 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38330 __func__, vcc);
38331 - atomic_inc(&vcc->stats->rx_err);
38332 + atomic_inc_unchecked(&vcc->stats->rx_err);
38333 goto out;
38334 }
38335
38336 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38337 if (printk_ratelimit())
38338 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38339 __func__, length);
38340 - atomic_inc(&vcc->stats->rx_drop);
38341 + atomic_inc_unchecked(&vcc->stats->rx_drop);
38342 goto out;
38343 }
38344
38345 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38346
38347 vcc->push(vcc, skb);
38348
38349 - atomic_inc(&vcc->stats->rx);
38350 + atomic_inc_unchecked(&vcc->stats->rx);
38351 out:
38352 skb_trim(sarb, 0);
38353 }
38354 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
38355 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38356
38357 usbatm_pop(vcc, skb);
38358 - atomic_inc(&vcc->stats->tx);
38359 + atomic_inc_unchecked(&vcc->stats->tx);
38360
38361 skb = skb_dequeue(&instance->sndqueue);
38362 }
38363 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
38364 if (!left--)
38365 return sprintf(page,
38366 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38367 - atomic_read(&atm_dev->stats.aal5.tx),
38368 - atomic_read(&atm_dev->stats.aal5.tx_err),
38369 - atomic_read(&atm_dev->stats.aal5.rx),
38370 - atomic_read(&atm_dev->stats.aal5.rx_err),
38371 - atomic_read(&atm_dev->stats.aal5.rx_drop));
38372 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38373 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38374 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38375 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38376 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38377
38378 if (!left--) {
38379 if (instance->disconnected)
38380 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
38381 index 0149c09..f108812 100644
38382 --- a/drivers/usb/core/devices.c
38383 +++ b/drivers/usb/core/devices.c
38384 @@ -126,7 +126,7 @@ static const char format_endpt[] =
38385 * time it gets called.
38386 */
38387 static struct device_connect_event {
38388 - atomic_t count;
38389 + atomic_unchecked_t count;
38390 wait_queue_head_t wait;
38391 } device_event = {
38392 .count = ATOMIC_INIT(1),
38393 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
38394
38395 void usbfs_conn_disc_event(void)
38396 {
38397 - atomic_add(2, &device_event.count);
38398 + atomic_add_unchecked(2, &device_event.count);
38399 wake_up(&device_event.wait);
38400 }
38401
38402 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
38403
38404 poll_wait(file, &device_event.wait, wait);
38405
38406 - event_count = atomic_read(&device_event.count);
38407 + event_count = atomic_read_unchecked(&device_event.count);
38408 if (file->f_version != event_count) {
38409 file->f_version = event_count;
38410 return POLLIN | POLLRDNORM;
38411 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
38412 index 0b5ec23..0da3d76 100644
38413 --- a/drivers/usb/core/message.c
38414 +++ b/drivers/usb/core/message.c
38415 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
38416 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
38417 if (buf) {
38418 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
38419 - if (len > 0) {
38420 - smallbuf = kmalloc(++len, GFP_NOIO);
38421 + if (len++ > 0) {
38422 + smallbuf = kmalloc(len, GFP_NOIO);
38423 if (!smallbuf)
38424 return buf;
38425 memcpy(smallbuf, buf, len);
38426 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
38427 index 1fc8f12..20647c1 100644
38428 --- a/drivers/usb/early/ehci-dbgp.c
38429 +++ b/drivers/usb/early/ehci-dbgp.c
38430 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
38431
38432 #ifdef CONFIG_KGDB
38433 static struct kgdb_io kgdbdbgp_io_ops;
38434 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
38435 +static struct kgdb_io kgdbdbgp_io_ops_console;
38436 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
38437 #else
38438 #define dbgp_kgdb_mode (0)
38439 #endif
38440 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
38441 .write_char = kgdbdbgp_write_char,
38442 };
38443
38444 +static struct kgdb_io kgdbdbgp_io_ops_console = {
38445 + .name = "kgdbdbgp",
38446 + .read_char = kgdbdbgp_read_char,
38447 + .write_char = kgdbdbgp_write_char,
38448 + .is_console = 1
38449 +};
38450 +
38451 static int kgdbdbgp_wait_time;
38452
38453 static int __init kgdbdbgp_parse_config(char *str)
38454 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
38455 ptr++;
38456 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
38457 }
38458 - kgdb_register_io_module(&kgdbdbgp_io_ops);
38459 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
38460 + if (early_dbgp_console.index != -1)
38461 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
38462 + else
38463 + kgdb_register_io_module(&kgdbdbgp_io_ops);
38464
38465 return 0;
38466 }
38467 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
38468 index d718033..6075579 100644
38469 --- a/drivers/usb/host/xhci-mem.c
38470 +++ b/drivers/usb/host/xhci-mem.c
38471 @@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
38472 unsigned int num_tests;
38473 int i, ret;
38474
38475 + pax_track_stack();
38476 +
38477 num_tests = ARRAY_SIZE(simple_test_vector);
38478 for (i = 0; i < num_tests; i++) {
38479 ret = xhci_test_trb_in_td(xhci,
38480 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
38481 index d6bea3e..60b250e 100644
38482 --- a/drivers/usb/wusbcore/wa-hc.h
38483 +++ b/drivers/usb/wusbcore/wa-hc.h
38484 @@ -192,7 +192,7 @@ struct wahc {
38485 struct list_head xfer_delayed_list;
38486 spinlock_t xfer_list_lock;
38487 struct work_struct xfer_work;
38488 - atomic_t xfer_id_count;
38489 + atomic_unchecked_t xfer_id_count;
38490 };
38491
38492
38493 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
38494 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38495 spin_lock_init(&wa->xfer_list_lock);
38496 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38497 - atomic_set(&wa->xfer_id_count, 1);
38498 + atomic_set_unchecked(&wa->xfer_id_count, 1);
38499 }
38500
38501 /**
38502 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
38503 index 4193345..49ae93d 100644
38504 --- a/drivers/usb/wusbcore/wa-xfer.c
38505 +++ b/drivers/usb/wusbcore/wa-xfer.c
38506 @@ -295,7 +295,7 @@ out:
38507 */
38508 static void wa_xfer_id_init(struct wa_xfer *xfer)
38509 {
38510 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38511 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38512 }
38513
38514 /*
38515 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
38516 index c14c42b..f955cc2 100644
38517 --- a/drivers/vhost/vhost.c
38518 +++ b/drivers/vhost/vhost.c
38519 @@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
38520 return 0;
38521 }
38522
38523 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
38524 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
38525 {
38526 struct file *eventfp, *filep = NULL,
38527 *pollstart = NULL, *pollstop = NULL;
38528 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
38529 index b0b2ac3..89a4399 100644
38530 --- a/drivers/video/aty/aty128fb.c
38531 +++ b/drivers/video/aty/aty128fb.c
38532 @@ -148,7 +148,7 @@ enum {
38533 };
38534
38535 /* Must match above enum */
38536 -static const char *r128_family[] __devinitdata = {
38537 +static const char *r128_family[] __devinitconst = {
38538 "AGP",
38539 "PCI",
38540 "PRO AGP",
38541 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
38542 index 5c3960d..15cf8fc 100644
38543 --- a/drivers/video/fbcmap.c
38544 +++ b/drivers/video/fbcmap.c
38545 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
38546 rc = -ENODEV;
38547 goto out;
38548 }
38549 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38550 - !info->fbops->fb_setcmap)) {
38551 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38552 rc = -EINVAL;
38553 goto out1;
38554 }
38555 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
38556 index ad93629..ca6a218 100644
38557 --- a/drivers/video/fbmem.c
38558 +++ b/drivers/video/fbmem.c
38559 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38560 image->dx += image->width + 8;
38561 }
38562 } else if (rotate == FB_ROTATE_UD) {
38563 - for (x = 0; x < num && image->dx >= 0; x++) {
38564 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38565 info->fbops->fb_imageblit(info, image);
38566 image->dx -= image->width + 8;
38567 }
38568 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38569 image->dy += image->height + 8;
38570 }
38571 } else if (rotate == FB_ROTATE_CCW) {
38572 - for (x = 0; x < num && image->dy >= 0; x++) {
38573 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38574 info->fbops->fb_imageblit(info, image);
38575 image->dy -= image->height + 8;
38576 }
38577 @@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
38578 int flags = info->flags;
38579 int ret = 0;
38580
38581 + pax_track_stack();
38582 +
38583 if (var->activate & FB_ACTIVATE_INV_MODE) {
38584 struct fb_videomode mode1, mode2;
38585
38586 @@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
38587 void __user *argp = (void __user *)arg;
38588 long ret = 0;
38589
38590 + pax_track_stack();
38591 +
38592 switch (cmd) {
38593 case FBIOGET_VSCREENINFO:
38594 if (!lock_fb_info(info))
38595 @@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
38596 return -EFAULT;
38597 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38598 return -EINVAL;
38599 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38600 + if (con2fb.framebuffer >= FB_MAX)
38601 return -EINVAL;
38602 if (!registered_fb[con2fb.framebuffer])
38603 request_module("fb%d", con2fb.framebuffer);
38604 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
38605 index 5a5d092..265c5ed 100644
38606 --- a/drivers/video/geode/gx1fb_core.c
38607 +++ b/drivers/video/geode/gx1fb_core.c
38608 @@ -29,7 +29,7 @@ static int crt_option = 1;
38609 static char panel_option[32] = "";
38610
38611 /* Modes relevant to the GX1 (taken from modedb.c) */
38612 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
38613 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
38614 /* 640x480-60 VESA */
38615 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
38616 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
38617 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
38618 index 896e53d..4d87d0b 100644
38619 --- a/drivers/video/gxt4500.c
38620 +++ b/drivers/video/gxt4500.c
38621 @@ -156,7 +156,7 @@ struct gxt4500_par {
38622 static char *mode_option;
38623
38624 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
38625 -static const struct fb_videomode defaultmode __devinitdata = {
38626 +static const struct fb_videomode defaultmode __devinitconst = {
38627 .refresh = 60,
38628 .xres = 1280,
38629 .yres = 1024,
38630 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
38631 return 0;
38632 }
38633
38634 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
38635 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
38636 .id = "IBM GXT4500P",
38637 .type = FB_TYPE_PACKED_PIXELS,
38638 .visual = FB_VISUAL_PSEUDOCOLOR,
38639 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
38640 index 7672d2e..b56437f 100644
38641 --- a/drivers/video/i810/i810_accel.c
38642 +++ b/drivers/video/i810/i810_accel.c
38643 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
38644 }
38645 }
38646 printk("ringbuffer lockup!!!\n");
38647 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38648 i810_report_error(mmio);
38649 par->dev_flags |= LOCKUP;
38650 info->pixmap.scan_align = 1;
38651 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
38652 index 318f6fb..9a389c1 100644
38653 --- a/drivers/video/i810/i810_main.c
38654 +++ b/drivers/video/i810/i810_main.c
38655 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
38656 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
38657
38658 /* PCI */
38659 -static const char *i810_pci_list[] __devinitdata = {
38660 +static const char *i810_pci_list[] __devinitconst = {
38661 "Intel(R) 810 Framebuffer Device" ,
38662 "Intel(R) 810-DC100 Framebuffer Device" ,
38663 "Intel(R) 810E Framebuffer Device" ,
38664 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
38665 index de36693..3c63fc2 100644
38666 --- a/drivers/video/jz4740_fb.c
38667 +++ b/drivers/video/jz4740_fb.c
38668 @@ -136,7 +136,7 @@ struct jzfb {
38669 uint32_t pseudo_palette[16];
38670 };
38671
38672 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
38673 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
38674 .id = "JZ4740 FB",
38675 .type = FB_TYPE_PACKED_PIXELS,
38676 .visual = FB_VISUAL_TRUECOLOR,
38677 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
38678 index 3c14e43..eafa544 100644
38679 --- a/drivers/video/logo/logo_linux_clut224.ppm
38680 +++ b/drivers/video/logo/logo_linux_clut224.ppm
38681 @@ -1,1604 +1,1123 @@
38682 P3
38683 -# Standard 224-color Linux logo
38684 80 80
38685 255
38686 - 0 0 0 0 0 0 0 0 0 0 0 0
38687 - 0 0 0 0 0 0 0 0 0 0 0 0
38688 - 0 0 0 0 0 0 0 0 0 0 0 0
38689 - 0 0 0 0 0 0 0 0 0 0 0 0
38690 - 0 0 0 0 0 0 0 0 0 0 0 0
38691 - 0 0 0 0 0 0 0 0 0 0 0 0
38692 - 0 0 0 0 0 0 0 0 0 0 0 0
38693 - 0 0 0 0 0 0 0 0 0 0 0 0
38694 - 0 0 0 0 0 0 0 0 0 0 0 0
38695 - 6 6 6 6 6 6 10 10 10 10 10 10
38696 - 10 10 10 6 6 6 6 6 6 6 6 6
38697 - 0 0 0 0 0 0 0 0 0 0 0 0
38698 - 0 0 0 0 0 0 0 0 0 0 0 0
38699 - 0 0 0 0 0 0 0 0 0 0 0 0
38700 - 0 0 0 0 0 0 0 0 0 0 0 0
38701 - 0 0 0 0 0 0 0 0 0 0 0 0
38702 - 0 0 0 0 0 0 0 0 0 0 0 0
38703 - 0 0 0 0 0 0 0 0 0 0 0 0
38704 - 0 0 0 0 0 0 0 0 0 0 0 0
38705 - 0 0 0 0 0 0 0 0 0 0 0 0
38706 - 0 0 0 0 0 0 0 0 0 0 0 0
38707 - 0 0 0 0 0 0 0 0 0 0 0 0
38708 - 0 0 0 0 0 0 0 0 0 0 0 0
38709 - 0 0 0 0 0 0 0 0 0 0 0 0
38710 - 0 0 0 0 0 0 0 0 0 0 0 0
38711 - 0 0 0 0 0 0 0 0 0 0 0 0
38712 - 0 0 0 0 0 0 0 0 0 0 0 0
38713 - 0 0 0 0 0 0 0 0 0 0 0 0
38714 - 0 0 0 6 6 6 10 10 10 14 14 14
38715 - 22 22 22 26 26 26 30 30 30 34 34 34
38716 - 30 30 30 30 30 30 26 26 26 18 18 18
38717 - 14 14 14 10 10 10 6 6 6 0 0 0
38718 - 0 0 0 0 0 0 0 0 0 0 0 0
38719 - 0 0 0 0 0 0 0 0 0 0 0 0
38720 - 0 0 0 0 0 0 0 0 0 0 0 0
38721 - 0 0 0 0 0 0 0 0 0 0 0 0
38722 - 0 0 0 0 0 0 0 0 0 0 0 0
38723 - 0 0 0 0 0 0 0 0 0 0 0 0
38724 - 0 0 0 0 0 0 0 0 0 0 0 0
38725 - 0 0 0 0 0 0 0 0 0 0 0 0
38726 - 0 0 0 0 0 0 0 0 0 0 0 0
38727 - 0 0 0 0 0 1 0 0 1 0 0 0
38728 - 0 0 0 0 0 0 0 0 0 0 0 0
38729 - 0 0 0 0 0 0 0 0 0 0 0 0
38730 - 0 0 0 0 0 0 0 0 0 0 0 0
38731 - 0 0 0 0 0 0 0 0 0 0 0 0
38732 - 0 0 0 0 0 0 0 0 0 0 0 0
38733 - 0 0 0 0 0 0 0 0 0 0 0 0
38734 - 6 6 6 14 14 14 26 26 26 42 42 42
38735 - 54 54 54 66 66 66 78 78 78 78 78 78
38736 - 78 78 78 74 74 74 66 66 66 54 54 54
38737 - 42 42 42 26 26 26 18 18 18 10 10 10
38738 - 6 6 6 0 0 0 0 0 0 0 0 0
38739 - 0 0 0 0 0 0 0 0 0 0 0 0
38740 - 0 0 0 0 0 0 0 0 0 0 0 0
38741 - 0 0 0 0 0 0 0 0 0 0 0 0
38742 - 0 0 0 0 0 0 0 0 0 0 0 0
38743 - 0 0 0 0 0 0 0 0 0 0 0 0
38744 - 0 0 0 0 0 0 0 0 0 0 0 0
38745 - 0 0 0 0 0 0 0 0 0 0 0 0
38746 - 0 0 0 0 0 0 0 0 0 0 0 0
38747 - 0 0 1 0 0 0 0 0 0 0 0 0
38748 - 0 0 0 0 0 0 0 0 0 0 0 0
38749 - 0 0 0 0 0 0 0 0 0 0 0 0
38750 - 0 0 0 0 0 0 0 0 0 0 0 0
38751 - 0 0 0 0 0 0 0 0 0 0 0 0
38752 - 0 0 0 0 0 0 0 0 0 0 0 0
38753 - 0 0 0 0 0 0 0 0 0 10 10 10
38754 - 22 22 22 42 42 42 66 66 66 86 86 86
38755 - 66 66 66 38 38 38 38 38 38 22 22 22
38756 - 26 26 26 34 34 34 54 54 54 66 66 66
38757 - 86 86 86 70 70 70 46 46 46 26 26 26
38758 - 14 14 14 6 6 6 0 0 0 0 0 0
38759 - 0 0 0 0 0 0 0 0 0 0 0 0
38760 - 0 0 0 0 0 0 0 0 0 0 0 0
38761 - 0 0 0 0 0 0 0 0 0 0 0 0
38762 - 0 0 0 0 0 0 0 0 0 0 0 0
38763 - 0 0 0 0 0 0 0 0 0 0 0 0
38764 - 0 0 0 0 0 0 0 0 0 0 0 0
38765 - 0 0 0 0 0 0 0 0 0 0 0 0
38766 - 0 0 0 0 0 0 0 0 0 0 0 0
38767 - 0 0 1 0 0 1 0 0 1 0 0 0
38768 - 0 0 0 0 0 0 0 0 0 0 0 0
38769 - 0 0 0 0 0 0 0 0 0 0 0 0
38770 - 0 0 0 0 0 0 0 0 0 0 0 0
38771 - 0 0 0 0 0 0 0 0 0 0 0 0
38772 - 0 0 0 0 0 0 0 0 0 0 0 0
38773 - 0 0 0 0 0 0 10 10 10 26 26 26
38774 - 50 50 50 82 82 82 58 58 58 6 6 6
38775 - 2 2 6 2 2 6 2 2 6 2 2 6
38776 - 2 2 6 2 2 6 2 2 6 2 2 6
38777 - 6 6 6 54 54 54 86 86 86 66 66 66
38778 - 38 38 38 18 18 18 6 6 6 0 0 0
38779 - 0 0 0 0 0 0 0 0 0 0 0 0
38780 - 0 0 0 0 0 0 0 0 0 0 0 0
38781 - 0 0 0 0 0 0 0 0 0 0 0 0
38782 - 0 0 0 0 0 0 0 0 0 0 0 0
38783 - 0 0 0 0 0 0 0 0 0 0 0 0
38784 - 0 0 0 0 0 0 0 0 0 0 0 0
38785 - 0 0 0 0 0 0 0 0 0 0 0 0
38786 - 0 0 0 0 0 0 0 0 0 0 0 0
38787 - 0 0 0 0 0 0 0 0 0 0 0 0
38788 - 0 0 0 0 0 0 0 0 0 0 0 0
38789 - 0 0 0 0 0 0 0 0 0 0 0 0
38790 - 0 0 0 0 0 0 0 0 0 0 0 0
38791 - 0 0 0 0 0 0 0 0 0 0 0 0
38792 - 0 0 0 0 0 0 0 0 0 0 0 0
38793 - 0 0 0 6 6 6 22 22 22 50 50 50
38794 - 78 78 78 34 34 34 2 2 6 2 2 6
38795 - 2 2 6 2 2 6 2 2 6 2 2 6
38796 - 2 2 6 2 2 6 2 2 6 2 2 6
38797 - 2 2 6 2 2 6 6 6 6 70 70 70
38798 - 78 78 78 46 46 46 22 22 22 6 6 6
38799 - 0 0 0 0 0 0 0 0 0 0 0 0
38800 - 0 0 0 0 0 0 0 0 0 0 0 0
38801 - 0 0 0 0 0 0 0 0 0 0 0 0
38802 - 0 0 0 0 0 0 0 0 0 0 0 0
38803 - 0 0 0 0 0 0 0 0 0 0 0 0
38804 - 0 0 0 0 0 0 0 0 0 0 0 0
38805 - 0 0 0 0 0 0 0 0 0 0 0 0
38806 - 0 0 0 0 0 0 0 0 0 0 0 0
38807 - 0 0 1 0 0 1 0 0 1 0 0 0
38808 - 0 0 0 0 0 0 0 0 0 0 0 0
38809 - 0 0 0 0 0 0 0 0 0 0 0 0
38810 - 0 0 0 0 0 0 0 0 0 0 0 0
38811 - 0 0 0 0 0 0 0 0 0 0 0 0
38812 - 0 0 0 0 0 0 0 0 0 0 0 0
38813 - 6 6 6 18 18 18 42 42 42 82 82 82
38814 - 26 26 26 2 2 6 2 2 6 2 2 6
38815 - 2 2 6 2 2 6 2 2 6 2 2 6
38816 - 2 2 6 2 2 6 2 2 6 14 14 14
38817 - 46 46 46 34 34 34 6 6 6 2 2 6
38818 - 42 42 42 78 78 78 42 42 42 18 18 18
38819 - 6 6 6 0 0 0 0 0 0 0 0 0
38820 - 0 0 0 0 0 0 0 0 0 0 0 0
38821 - 0 0 0 0 0 0 0 0 0 0 0 0
38822 - 0 0 0 0 0 0 0 0 0 0 0 0
38823 - 0 0 0 0 0 0 0 0 0 0 0 0
38824 - 0 0 0 0 0 0 0 0 0 0 0 0
38825 - 0 0 0 0 0 0 0 0 0 0 0 0
38826 - 0 0 0 0 0 0 0 0 0 0 0 0
38827 - 0 0 1 0 0 0 0 0 1 0 0 0
38828 - 0 0 0 0 0 0 0 0 0 0 0 0
38829 - 0 0 0 0 0 0 0 0 0 0 0 0
38830 - 0 0 0 0 0 0 0 0 0 0 0 0
38831 - 0 0 0 0 0 0 0 0 0 0 0 0
38832 - 0 0 0 0 0 0 0 0 0 0 0 0
38833 - 10 10 10 30 30 30 66 66 66 58 58 58
38834 - 2 2 6 2 2 6 2 2 6 2 2 6
38835 - 2 2 6 2 2 6 2 2 6 2 2 6
38836 - 2 2 6 2 2 6 2 2 6 26 26 26
38837 - 86 86 86 101 101 101 46 46 46 10 10 10
38838 - 2 2 6 58 58 58 70 70 70 34 34 34
38839 - 10 10 10 0 0 0 0 0 0 0 0 0
38840 - 0 0 0 0 0 0 0 0 0 0 0 0
38841 - 0 0 0 0 0 0 0 0 0 0 0 0
38842 - 0 0 0 0 0 0 0 0 0 0 0 0
38843 - 0 0 0 0 0 0 0 0 0 0 0 0
38844 - 0 0 0 0 0 0 0 0 0 0 0 0
38845 - 0 0 0 0 0 0 0 0 0 0 0 0
38846 - 0 0 0 0 0 0 0 0 0 0 0 0
38847 - 0 0 1 0 0 1 0 0 1 0 0 0
38848 - 0 0 0 0 0 0 0 0 0 0 0 0
38849 - 0 0 0 0 0 0 0 0 0 0 0 0
38850 - 0 0 0 0 0 0 0 0 0 0 0 0
38851 - 0 0 0 0 0 0 0 0 0 0 0 0
38852 - 0 0 0 0 0 0 0 0 0 0 0 0
38853 - 14 14 14 42 42 42 86 86 86 10 10 10
38854 - 2 2 6 2 2 6 2 2 6 2 2 6
38855 - 2 2 6 2 2 6 2 2 6 2 2 6
38856 - 2 2 6 2 2 6 2 2 6 30 30 30
38857 - 94 94 94 94 94 94 58 58 58 26 26 26
38858 - 2 2 6 6 6 6 78 78 78 54 54 54
38859 - 22 22 22 6 6 6 0 0 0 0 0 0
38860 - 0 0 0 0 0 0 0 0 0 0 0 0
38861 - 0 0 0 0 0 0 0 0 0 0 0 0
38862 - 0 0 0 0 0 0 0 0 0 0 0 0
38863 - 0 0 0 0 0 0 0 0 0 0 0 0
38864 - 0 0 0 0 0 0 0 0 0 0 0 0
38865 - 0 0 0 0 0 0 0 0 0 0 0 0
38866 - 0 0 0 0 0 0 0 0 0 0 0 0
38867 - 0 0 0 0 0 0 0 0 0 0 0 0
38868 - 0 0 0 0 0 0 0 0 0 0 0 0
38869 - 0 0 0 0 0 0 0 0 0 0 0 0
38870 - 0 0 0 0 0 0 0 0 0 0 0 0
38871 - 0 0 0 0 0 0 0 0 0 0 0 0
38872 - 0 0 0 0 0 0 0 0 0 6 6 6
38873 - 22 22 22 62 62 62 62 62 62 2 2 6
38874 - 2 2 6 2 2 6 2 2 6 2 2 6
38875 - 2 2 6 2 2 6 2 2 6 2 2 6
38876 - 2 2 6 2 2 6 2 2 6 26 26 26
38877 - 54 54 54 38 38 38 18 18 18 10 10 10
38878 - 2 2 6 2 2 6 34 34 34 82 82 82
38879 - 38 38 38 14 14 14 0 0 0 0 0 0
38880 - 0 0 0 0 0 0 0 0 0 0 0 0
38881 - 0 0 0 0 0 0 0 0 0 0 0 0
38882 - 0 0 0 0 0 0 0 0 0 0 0 0
38883 - 0 0 0 0 0 0 0 0 0 0 0 0
38884 - 0 0 0 0 0 0 0 0 0 0 0 0
38885 - 0 0 0 0 0 0 0 0 0 0 0 0
38886 - 0 0 0 0 0 0 0 0 0 0 0 0
38887 - 0 0 0 0 0 1 0 0 1 0 0 0
38888 - 0 0 0 0 0 0 0 0 0 0 0 0
38889 - 0 0 0 0 0 0 0 0 0 0 0 0
38890 - 0 0 0 0 0 0 0 0 0 0 0 0
38891 - 0 0 0 0 0 0 0 0 0 0 0 0
38892 - 0 0 0 0 0 0 0 0 0 6 6 6
38893 - 30 30 30 78 78 78 30 30 30 2 2 6
38894 - 2 2 6 2 2 6 2 2 6 2 2 6
38895 - 2 2 6 2 2 6 2 2 6 2 2 6
38896 - 2 2 6 2 2 6 2 2 6 10 10 10
38897 - 10 10 10 2 2 6 2 2 6 2 2 6
38898 - 2 2 6 2 2 6 2 2 6 78 78 78
38899 - 50 50 50 18 18 18 6 6 6 0 0 0
38900 - 0 0 0 0 0 0 0 0 0 0 0 0
38901 - 0 0 0 0 0 0 0 0 0 0 0 0
38902 - 0 0 0 0 0 0 0 0 0 0 0 0
38903 - 0 0 0 0 0 0 0 0 0 0 0 0
38904 - 0 0 0 0 0 0 0 0 0 0 0 0
38905 - 0 0 0 0 0 0 0 0 0 0 0 0
38906 - 0 0 0 0 0 0 0 0 0 0 0 0
38907 - 0 0 1 0 0 0 0 0 0 0 0 0
38908 - 0 0 0 0 0 0 0 0 0 0 0 0
38909 - 0 0 0 0 0 0 0 0 0 0 0 0
38910 - 0 0 0 0 0 0 0 0 0 0 0 0
38911 - 0 0 0 0 0 0 0 0 0 0 0 0
38912 - 0 0 0 0 0 0 0 0 0 10 10 10
38913 - 38 38 38 86 86 86 14 14 14 2 2 6
38914 - 2 2 6 2 2 6 2 2 6 2 2 6
38915 - 2 2 6 2 2 6 2 2 6 2 2 6
38916 - 2 2 6 2 2 6 2 2 6 2 2 6
38917 - 2 2 6 2 2 6 2 2 6 2 2 6
38918 - 2 2 6 2 2 6 2 2 6 54 54 54
38919 - 66 66 66 26 26 26 6 6 6 0 0 0
38920 - 0 0 0 0 0 0 0 0 0 0 0 0
38921 - 0 0 0 0 0 0 0 0 0 0 0 0
38922 - 0 0 0 0 0 0 0 0 0 0 0 0
38923 - 0 0 0 0 0 0 0 0 0 0 0 0
38924 - 0 0 0 0 0 0 0 0 0 0 0 0
38925 - 0 0 0 0 0 0 0 0 0 0 0 0
38926 - 0 0 0 0 0 0 0 0 0 0 0 0
38927 - 0 0 0 0 0 1 0 0 1 0 0 0
38928 - 0 0 0 0 0 0 0 0 0 0 0 0
38929 - 0 0 0 0 0 0 0 0 0 0 0 0
38930 - 0 0 0 0 0 0 0 0 0 0 0 0
38931 - 0 0 0 0 0 0 0 0 0 0 0 0
38932 - 0 0 0 0 0 0 0 0 0 14 14 14
38933 - 42 42 42 82 82 82 2 2 6 2 2 6
38934 - 2 2 6 6 6 6 10 10 10 2 2 6
38935 - 2 2 6 2 2 6 2 2 6 2 2 6
38936 - 2 2 6 2 2 6 2 2 6 6 6 6
38937 - 14 14 14 10 10 10 2 2 6 2 2 6
38938 - 2 2 6 2 2 6 2 2 6 18 18 18
38939 - 82 82 82 34 34 34 10 10 10 0 0 0
38940 - 0 0 0 0 0 0 0 0 0 0 0 0
38941 - 0 0 0 0 0 0 0 0 0 0 0 0
38942 - 0 0 0 0 0 0 0 0 0 0 0 0
38943 - 0 0 0 0 0 0 0 0 0 0 0 0
38944 - 0 0 0 0 0 0 0 0 0 0 0 0
38945 - 0 0 0 0 0 0 0 0 0 0 0 0
38946 - 0 0 0 0 0 0 0 0 0 0 0 0
38947 - 0 0 1 0 0 0 0 0 0 0 0 0
38948 - 0 0 0 0 0 0 0 0 0 0 0 0
38949 - 0 0 0 0 0 0 0 0 0 0 0 0
38950 - 0 0 0 0 0 0 0 0 0 0 0 0
38951 - 0 0 0 0 0 0 0 0 0 0 0 0
38952 - 0 0 0 0 0 0 0 0 0 14 14 14
38953 - 46 46 46 86 86 86 2 2 6 2 2 6
38954 - 6 6 6 6 6 6 22 22 22 34 34 34
38955 - 6 6 6 2 2 6 2 2 6 2 2 6
38956 - 2 2 6 2 2 6 18 18 18 34 34 34
38957 - 10 10 10 50 50 50 22 22 22 2 2 6
38958 - 2 2 6 2 2 6 2 2 6 10 10 10
38959 - 86 86 86 42 42 42 14 14 14 0 0 0
38960 - 0 0 0 0 0 0 0 0 0 0 0 0
38961 - 0 0 0 0 0 0 0 0 0 0 0 0
38962 - 0 0 0 0 0 0 0 0 0 0 0 0
38963 - 0 0 0 0 0 0 0 0 0 0 0 0
38964 - 0 0 0 0 0 0 0 0 0 0 0 0
38965 - 0 0 0 0 0 0 0 0 0 0 0 0
38966 - 0 0 0 0 0 0 0 0 0 0 0 0
38967 - 0 0 1 0 0 1 0 0 1 0 0 0
38968 - 0 0 0 0 0 0 0 0 0 0 0 0
38969 - 0 0 0 0 0 0 0 0 0 0 0 0
38970 - 0 0 0 0 0 0 0 0 0 0 0 0
38971 - 0 0 0 0 0 0 0 0 0 0 0 0
38972 - 0 0 0 0 0 0 0 0 0 14 14 14
38973 - 46 46 46 86 86 86 2 2 6 2 2 6
38974 - 38 38 38 116 116 116 94 94 94 22 22 22
38975 - 22 22 22 2 2 6 2 2 6 2 2 6
38976 - 14 14 14 86 86 86 138 138 138 162 162 162
38977 -154 154 154 38 38 38 26 26 26 6 6 6
38978 - 2 2 6 2 2 6 2 2 6 2 2 6
38979 - 86 86 86 46 46 46 14 14 14 0 0 0
38980 - 0 0 0 0 0 0 0 0 0 0 0 0
38981 - 0 0 0 0 0 0 0 0 0 0 0 0
38982 - 0 0 0 0 0 0 0 0 0 0 0 0
38983 - 0 0 0 0 0 0 0 0 0 0 0 0
38984 - 0 0 0 0 0 0 0 0 0 0 0 0
38985 - 0 0 0 0 0 0 0 0 0 0 0 0
38986 - 0 0 0 0 0 0 0 0 0 0 0 0
38987 - 0 0 0 0 0 0 0 0 0 0 0 0
38988 - 0 0 0 0 0 0 0 0 0 0 0 0
38989 - 0 0 0 0 0 0 0 0 0 0 0 0
38990 - 0 0 0 0 0 0 0 0 0 0 0 0
38991 - 0 0 0 0 0 0 0 0 0 0 0 0
38992 - 0 0 0 0 0 0 0 0 0 14 14 14
38993 - 46 46 46 86 86 86 2 2 6 14 14 14
38994 -134 134 134 198 198 198 195 195 195 116 116 116
38995 - 10 10 10 2 2 6 2 2 6 6 6 6
38996 -101 98 89 187 187 187 210 210 210 218 218 218
38997 -214 214 214 134 134 134 14 14 14 6 6 6
38998 - 2 2 6 2 2 6 2 2 6 2 2 6
38999 - 86 86 86 50 50 50 18 18 18 6 6 6
39000 - 0 0 0 0 0 0 0 0 0 0 0 0
39001 - 0 0 0 0 0 0 0 0 0 0 0 0
39002 - 0 0 0 0 0 0 0 0 0 0 0 0
39003 - 0 0 0 0 0 0 0 0 0 0 0 0
39004 - 0 0 0 0 0 0 0 0 0 0 0 0
39005 - 0 0 0 0 0 0 0 0 0 0 0 0
39006 - 0 0 0 0 0 0 0 0 1 0 0 0
39007 - 0 0 1 0 0 1 0 0 1 0 0 0
39008 - 0 0 0 0 0 0 0 0 0 0 0 0
39009 - 0 0 0 0 0 0 0 0 0 0 0 0
39010 - 0 0 0 0 0 0 0 0 0 0 0 0
39011 - 0 0 0 0 0 0 0 0 0 0 0 0
39012 - 0 0 0 0 0 0 0 0 0 14 14 14
39013 - 46 46 46 86 86 86 2 2 6 54 54 54
39014 -218 218 218 195 195 195 226 226 226 246 246 246
39015 - 58 58 58 2 2 6 2 2 6 30 30 30
39016 -210 210 210 253 253 253 174 174 174 123 123 123
39017 -221 221 221 234 234 234 74 74 74 2 2 6
39018 - 2 2 6 2 2 6 2 2 6 2 2 6
39019 - 70 70 70 58 58 58 22 22 22 6 6 6
39020 - 0 0 0 0 0 0 0 0 0 0 0 0
39021 - 0 0 0 0 0 0 0 0 0 0 0 0
39022 - 0 0 0 0 0 0 0 0 0 0 0 0
39023 - 0 0 0 0 0 0 0 0 0 0 0 0
39024 - 0 0 0 0 0 0 0 0 0 0 0 0
39025 - 0 0 0 0 0 0 0 0 0 0 0 0
39026 - 0 0 0 0 0 0 0 0 0 0 0 0
39027 - 0 0 0 0 0 0 0 0 0 0 0 0
39028 - 0 0 0 0 0 0 0 0 0 0 0 0
39029 - 0 0 0 0 0 0 0 0 0 0 0 0
39030 - 0 0 0 0 0 0 0 0 0 0 0 0
39031 - 0 0 0 0 0 0 0 0 0 0 0 0
39032 - 0 0 0 0 0 0 0 0 0 14 14 14
39033 - 46 46 46 82 82 82 2 2 6 106 106 106
39034 -170 170 170 26 26 26 86 86 86 226 226 226
39035 -123 123 123 10 10 10 14 14 14 46 46 46
39036 -231 231 231 190 190 190 6 6 6 70 70 70
39037 - 90 90 90 238 238 238 158 158 158 2 2 6
39038 - 2 2 6 2 2 6 2 2 6 2 2 6
39039 - 70 70 70 58 58 58 22 22 22 6 6 6
39040 - 0 0 0 0 0 0 0 0 0 0 0 0
39041 - 0 0 0 0 0 0 0 0 0 0 0 0
39042 - 0 0 0 0 0 0 0 0 0 0 0 0
39043 - 0 0 0 0 0 0 0 0 0 0 0 0
39044 - 0 0 0 0 0 0 0 0 0 0 0 0
39045 - 0 0 0 0 0 0 0 0 0 0 0 0
39046 - 0 0 0 0 0 0 0 0 1 0 0 0
39047 - 0 0 1 0 0 1 0 0 1 0 0 0
39048 - 0 0 0 0 0 0 0 0 0 0 0 0
39049 - 0 0 0 0 0 0 0 0 0 0 0 0
39050 - 0 0 0 0 0 0 0 0 0 0 0 0
39051 - 0 0 0 0 0 0 0 0 0 0 0 0
39052 - 0 0 0 0 0 0 0 0 0 14 14 14
39053 - 42 42 42 86 86 86 6 6 6 116 116 116
39054 -106 106 106 6 6 6 70 70 70 149 149 149
39055 -128 128 128 18 18 18 38 38 38 54 54 54
39056 -221 221 221 106 106 106 2 2 6 14 14 14
39057 - 46 46 46 190 190 190 198 198 198 2 2 6
39058 - 2 2 6 2 2 6 2 2 6 2 2 6
39059 - 74 74 74 62 62 62 22 22 22 6 6 6
39060 - 0 0 0 0 0 0 0 0 0 0 0 0
39061 - 0 0 0 0 0 0 0 0 0 0 0 0
39062 - 0 0 0 0 0 0 0 0 0 0 0 0
39063 - 0 0 0 0 0 0 0 0 0 0 0 0
39064 - 0 0 0 0 0 0 0 0 0 0 0 0
39065 - 0 0 0 0 0 0 0 0 0 0 0 0
39066 - 0 0 0 0 0 0 0 0 1 0 0 0
39067 - 0 0 1 0 0 0 0 0 1 0 0 0
39068 - 0 0 0 0 0 0 0 0 0 0 0 0
39069 - 0 0 0 0 0 0 0 0 0 0 0 0
39070 - 0 0 0 0 0 0 0 0 0 0 0 0
39071 - 0 0 0 0 0 0 0 0 0 0 0 0
39072 - 0 0 0 0 0 0 0 0 0 14 14 14
39073 - 42 42 42 94 94 94 14 14 14 101 101 101
39074 -128 128 128 2 2 6 18 18 18 116 116 116
39075 -118 98 46 121 92 8 121 92 8 98 78 10
39076 -162 162 162 106 106 106 2 2 6 2 2 6
39077 - 2 2 6 195 195 195 195 195 195 6 6 6
39078 - 2 2 6 2 2 6 2 2 6 2 2 6
39079 - 74 74 74 62 62 62 22 22 22 6 6 6
39080 - 0 0 0 0 0 0 0 0 0 0 0 0
39081 - 0 0 0 0 0 0 0 0 0 0 0 0
39082 - 0 0 0 0 0 0 0 0 0 0 0 0
39083 - 0 0 0 0 0 0 0 0 0 0 0 0
39084 - 0 0 0 0 0 0 0 0 0 0 0 0
39085 - 0 0 0 0 0 0 0 0 0 0 0 0
39086 - 0 0 0 0 0 0 0 0 1 0 0 1
39087 - 0 0 1 0 0 0 0 0 1 0 0 0
39088 - 0 0 0 0 0 0 0 0 0 0 0 0
39089 - 0 0 0 0 0 0 0 0 0 0 0 0
39090 - 0 0 0 0 0 0 0 0 0 0 0 0
39091 - 0 0 0 0 0 0 0 0 0 0 0 0
39092 - 0 0 0 0 0 0 0 0 0 10 10 10
39093 - 38 38 38 90 90 90 14 14 14 58 58 58
39094 -210 210 210 26 26 26 54 38 6 154 114 10
39095 -226 170 11 236 186 11 225 175 15 184 144 12
39096 -215 174 15 175 146 61 37 26 9 2 2 6
39097 - 70 70 70 246 246 246 138 138 138 2 2 6
39098 - 2 2 6 2 2 6 2 2 6 2 2 6
39099 - 70 70 70 66 66 66 26 26 26 6 6 6
39100 - 0 0 0 0 0 0 0 0 0 0 0 0
39101 - 0 0 0 0 0 0 0 0 0 0 0 0
39102 - 0 0 0 0 0 0 0 0 0 0 0 0
39103 - 0 0 0 0 0 0 0 0 0 0 0 0
39104 - 0 0 0 0 0 0 0 0 0 0 0 0
39105 - 0 0 0 0 0 0 0 0 0 0 0 0
39106 - 0 0 0 0 0 0 0 0 0 0 0 0
39107 - 0 0 0 0 0 0 0 0 0 0 0 0
39108 - 0 0 0 0 0 0 0 0 0 0 0 0
39109 - 0 0 0 0 0 0 0 0 0 0 0 0
39110 - 0 0 0 0 0 0 0 0 0 0 0 0
39111 - 0 0 0 0 0 0 0 0 0 0 0 0
39112 - 0 0 0 0 0 0 0 0 0 10 10 10
39113 - 38 38 38 86 86 86 14 14 14 10 10 10
39114 -195 195 195 188 164 115 192 133 9 225 175 15
39115 -239 182 13 234 190 10 232 195 16 232 200 30
39116 -245 207 45 241 208 19 232 195 16 184 144 12
39117 -218 194 134 211 206 186 42 42 42 2 2 6
39118 - 2 2 6 2 2 6 2 2 6 2 2 6
39119 - 50 50 50 74 74 74 30 30 30 6 6 6
39120 - 0 0 0 0 0 0 0 0 0 0 0 0
39121 - 0 0 0 0 0 0 0 0 0 0 0 0
39122 - 0 0 0 0 0 0 0 0 0 0 0 0
39123 - 0 0 0 0 0 0 0 0 0 0 0 0
39124 - 0 0 0 0 0 0 0 0 0 0 0 0
39125 - 0 0 0 0 0 0 0 0 0 0 0 0
39126 - 0 0 0 0 0 0 0 0 0 0 0 0
39127 - 0 0 0 0 0 0 0 0 0 0 0 0
39128 - 0 0 0 0 0 0 0 0 0 0 0 0
39129 - 0 0 0 0 0 0 0 0 0 0 0 0
39130 - 0 0 0 0 0 0 0 0 0 0 0 0
39131 - 0 0 0 0 0 0 0 0 0 0 0 0
39132 - 0 0 0 0 0 0 0 0 0 10 10 10
39133 - 34 34 34 86 86 86 14 14 14 2 2 6
39134 -121 87 25 192 133 9 219 162 10 239 182 13
39135 -236 186 11 232 195 16 241 208 19 244 214 54
39136 -246 218 60 246 218 38 246 215 20 241 208 19
39137 -241 208 19 226 184 13 121 87 25 2 2 6
39138 - 2 2 6 2 2 6 2 2 6 2 2 6
39139 - 50 50 50 82 82 82 34 34 34 10 10 10
39140 - 0 0 0 0 0 0 0 0 0 0 0 0
39141 - 0 0 0 0 0 0 0 0 0 0 0 0
39142 - 0 0 0 0 0 0 0 0 0 0 0 0
39143 - 0 0 0 0 0 0 0 0 0 0 0 0
39144 - 0 0 0 0 0 0 0 0 0 0 0 0
39145 - 0 0 0 0 0 0 0 0 0 0 0 0
39146 - 0 0 0 0 0 0 0 0 0 0 0 0
39147 - 0 0 0 0 0 0 0 0 0 0 0 0
39148 - 0 0 0 0 0 0 0 0 0 0 0 0
39149 - 0 0 0 0 0 0 0 0 0 0 0 0
39150 - 0 0 0 0 0 0 0 0 0 0 0 0
39151 - 0 0 0 0 0 0 0 0 0 0 0 0
39152 - 0 0 0 0 0 0 0 0 0 10 10 10
39153 - 34 34 34 82 82 82 30 30 30 61 42 6
39154 -180 123 7 206 145 10 230 174 11 239 182 13
39155 -234 190 10 238 202 15 241 208 19 246 218 74
39156 -246 218 38 246 215 20 246 215 20 246 215 20
39157 -226 184 13 215 174 15 184 144 12 6 6 6
39158 - 2 2 6 2 2 6 2 2 6 2 2 6
39159 - 26 26 26 94 94 94 42 42 42 14 14 14
39160 - 0 0 0 0 0 0 0 0 0 0 0 0
39161 - 0 0 0 0 0 0 0 0 0 0 0 0
39162 - 0 0 0 0 0 0 0 0 0 0 0 0
39163 - 0 0 0 0 0 0 0 0 0 0 0 0
39164 - 0 0 0 0 0 0 0 0 0 0 0 0
39165 - 0 0 0 0 0 0 0 0 0 0 0 0
39166 - 0 0 0 0 0 0 0 0 0 0 0 0
39167 - 0 0 0 0 0 0 0 0 0 0 0 0
39168 - 0 0 0 0 0 0 0 0 0 0 0 0
39169 - 0 0 0 0 0 0 0 0 0 0 0 0
39170 - 0 0 0 0 0 0 0 0 0 0 0 0
39171 - 0 0 0 0 0 0 0 0 0 0 0 0
39172 - 0 0 0 0 0 0 0 0 0 10 10 10
39173 - 30 30 30 78 78 78 50 50 50 104 69 6
39174 -192 133 9 216 158 10 236 178 12 236 186 11
39175 -232 195 16 241 208 19 244 214 54 245 215 43
39176 -246 215 20 246 215 20 241 208 19 198 155 10
39177 -200 144 11 216 158 10 156 118 10 2 2 6
39178 - 2 2 6 2 2 6 2 2 6 2 2 6
39179 - 6 6 6 90 90 90 54 54 54 18 18 18
39180 - 6 6 6 0 0 0 0 0 0 0 0 0
39181 - 0 0 0 0 0 0 0 0 0 0 0 0
39182 - 0 0 0 0 0 0 0 0 0 0 0 0
39183 - 0 0 0 0 0 0 0 0 0 0 0 0
39184 - 0 0 0 0 0 0 0 0 0 0 0 0
39185 - 0 0 0 0 0 0 0 0 0 0 0 0
39186 - 0 0 0 0 0 0 0 0 0 0 0 0
39187 - 0 0 0 0 0 0 0 0 0 0 0 0
39188 - 0 0 0 0 0 0 0 0 0 0 0 0
39189 - 0 0 0 0 0 0 0 0 0 0 0 0
39190 - 0 0 0 0 0 0 0 0 0 0 0 0
39191 - 0 0 0 0 0 0 0 0 0 0 0 0
39192 - 0 0 0 0 0 0 0 0 0 10 10 10
39193 - 30 30 30 78 78 78 46 46 46 22 22 22
39194 -137 92 6 210 162 10 239 182 13 238 190 10
39195 -238 202 15 241 208 19 246 215 20 246 215 20
39196 -241 208 19 203 166 17 185 133 11 210 150 10
39197 -216 158 10 210 150 10 102 78 10 2 2 6
39198 - 6 6 6 54 54 54 14 14 14 2 2 6
39199 - 2 2 6 62 62 62 74 74 74 30 30 30
39200 - 10 10 10 0 0 0 0 0 0 0 0 0
39201 - 0 0 0 0 0 0 0 0 0 0 0 0
39202 - 0 0 0 0 0 0 0 0 0 0 0 0
39203 - 0 0 0 0 0 0 0 0 0 0 0 0
39204 - 0 0 0 0 0 0 0 0 0 0 0 0
39205 - 0 0 0 0 0 0 0 0 0 0 0 0
39206 - 0 0 0 0 0 0 0 0 0 0 0 0
39207 - 0 0 0 0 0 0 0 0 0 0 0 0
39208 - 0 0 0 0 0 0 0 0 0 0 0 0
39209 - 0 0 0 0 0 0 0 0 0 0 0 0
39210 - 0 0 0 0 0 0 0 0 0 0 0 0
39211 - 0 0 0 0 0 0 0 0 0 0 0 0
39212 - 0 0 0 0 0 0 0 0 0 10 10 10
39213 - 34 34 34 78 78 78 50 50 50 6 6 6
39214 - 94 70 30 139 102 15 190 146 13 226 184 13
39215 -232 200 30 232 195 16 215 174 15 190 146 13
39216 -168 122 10 192 133 9 210 150 10 213 154 11
39217 -202 150 34 182 157 106 101 98 89 2 2 6
39218 - 2 2 6 78 78 78 116 116 116 58 58 58
39219 - 2 2 6 22 22 22 90 90 90 46 46 46
39220 - 18 18 18 6 6 6 0 0 0 0 0 0
39221 - 0 0 0 0 0 0 0 0 0 0 0 0
39222 - 0 0 0 0 0 0 0 0 0 0 0 0
39223 - 0 0 0 0 0 0 0 0 0 0 0 0
39224 - 0 0 0 0 0 0 0 0 0 0 0 0
39225 - 0 0 0 0 0 0 0 0 0 0 0 0
39226 - 0 0 0 0 0 0 0 0 0 0 0 0
39227 - 0 0 0 0 0 0 0 0 0 0 0 0
39228 - 0 0 0 0 0 0 0 0 0 0 0 0
39229 - 0 0 0 0 0 0 0 0 0 0 0 0
39230 - 0 0 0 0 0 0 0 0 0 0 0 0
39231 - 0 0 0 0 0 0 0 0 0 0 0 0
39232 - 0 0 0 0 0 0 0 0 0 10 10 10
39233 - 38 38 38 86 86 86 50 50 50 6 6 6
39234 -128 128 128 174 154 114 156 107 11 168 122 10
39235 -198 155 10 184 144 12 197 138 11 200 144 11
39236 -206 145 10 206 145 10 197 138 11 188 164 115
39237 -195 195 195 198 198 198 174 174 174 14 14 14
39238 - 2 2 6 22 22 22 116 116 116 116 116 116
39239 - 22 22 22 2 2 6 74 74 74 70 70 70
39240 - 30 30 30 10 10 10 0 0 0 0 0 0
39241 - 0 0 0 0 0 0 0 0 0 0 0 0
39242 - 0 0 0 0 0 0 0 0 0 0 0 0
39243 - 0 0 0 0 0 0 0 0 0 0 0 0
39244 - 0 0 0 0 0 0 0 0 0 0 0 0
39245 - 0 0 0 0 0 0 0 0 0 0 0 0
39246 - 0 0 0 0 0 0 0 0 0 0 0 0
39247 - 0 0 0 0 0 0 0 0 0 0 0 0
39248 - 0 0 0 0 0 0 0 0 0 0 0 0
39249 - 0 0 0 0 0 0 0 0 0 0 0 0
39250 - 0 0 0 0 0 0 0 0 0 0 0 0
39251 - 0 0 0 0 0 0 0 0 0 0 0 0
39252 - 0 0 0 0 0 0 6 6 6 18 18 18
39253 - 50 50 50 101 101 101 26 26 26 10 10 10
39254 -138 138 138 190 190 190 174 154 114 156 107 11
39255 -197 138 11 200 144 11 197 138 11 192 133 9
39256 -180 123 7 190 142 34 190 178 144 187 187 187
39257 -202 202 202 221 221 221 214 214 214 66 66 66
39258 - 2 2 6 2 2 6 50 50 50 62 62 62
39259 - 6 6 6 2 2 6 10 10 10 90 90 90
39260 - 50 50 50 18 18 18 6 6 6 0 0 0
39261 - 0 0 0 0 0 0 0 0 0 0 0 0
39262 - 0 0 0 0 0 0 0 0 0 0 0 0
39263 - 0 0 0 0 0 0 0 0 0 0 0 0
39264 - 0 0 0 0 0 0 0 0 0 0 0 0
39265 - 0 0 0 0 0 0 0 0 0 0 0 0
39266 - 0 0 0 0 0 0 0 0 0 0 0 0
39267 - 0 0 0 0 0 0 0 0 0 0 0 0
39268 - 0 0 0 0 0 0 0 0 0 0 0 0
39269 - 0 0 0 0 0 0 0 0 0 0 0 0
39270 - 0 0 0 0 0 0 0 0 0 0 0 0
39271 - 0 0 0 0 0 0 0 0 0 0 0 0
39272 - 0 0 0 0 0 0 10 10 10 34 34 34
39273 - 74 74 74 74 74 74 2 2 6 6 6 6
39274 -144 144 144 198 198 198 190 190 190 178 166 146
39275 -154 121 60 156 107 11 156 107 11 168 124 44
39276 -174 154 114 187 187 187 190 190 190 210 210 210
39277 -246 246 246 253 253 253 253 253 253 182 182 182
39278 - 6 6 6 2 2 6 2 2 6 2 2 6
39279 - 2 2 6 2 2 6 2 2 6 62 62 62
39280 - 74 74 74 34 34 34 14 14 14 0 0 0
39281 - 0 0 0 0 0 0 0 0 0 0 0 0
39282 - 0 0 0 0 0 0 0 0 0 0 0 0
39283 - 0 0 0 0 0 0 0 0 0 0 0 0
39284 - 0 0 0 0 0 0 0 0 0 0 0 0
39285 - 0 0 0 0 0 0 0 0 0 0 0 0
39286 - 0 0 0 0 0 0 0 0 0 0 0 0
39287 - 0 0 0 0 0 0 0 0 0 0 0 0
39288 - 0 0 0 0 0 0 0 0 0 0 0 0
39289 - 0 0 0 0 0 0 0 0 0 0 0 0
39290 - 0 0 0 0 0 0 0 0 0 0 0 0
39291 - 0 0 0 0 0 0 0 0 0 0 0 0
39292 - 0 0 0 10 10 10 22 22 22 54 54 54
39293 - 94 94 94 18 18 18 2 2 6 46 46 46
39294 -234 234 234 221 221 221 190 190 190 190 190 190
39295 -190 190 190 187 187 187 187 187 187 190 190 190
39296 -190 190 190 195 195 195 214 214 214 242 242 242
39297 -253 253 253 253 253 253 253 253 253 253 253 253
39298 - 82 82 82 2 2 6 2 2 6 2 2 6
39299 - 2 2 6 2 2 6 2 2 6 14 14 14
39300 - 86 86 86 54 54 54 22 22 22 6 6 6
39301 - 0 0 0 0 0 0 0 0 0 0 0 0
39302 - 0 0 0 0 0 0 0 0 0 0 0 0
39303 - 0 0 0 0 0 0 0 0 0 0 0 0
39304 - 0 0 0 0 0 0 0 0 0 0 0 0
39305 - 0 0 0 0 0 0 0 0 0 0 0 0
39306 - 0 0 0 0 0 0 0 0 0 0 0 0
39307 - 0 0 0 0 0 0 0 0 0 0 0 0
39308 - 0 0 0 0 0 0 0 0 0 0 0 0
39309 - 0 0 0 0 0 0 0 0 0 0 0 0
39310 - 0 0 0 0 0 0 0 0 0 0 0 0
39311 - 0 0 0 0 0 0 0 0 0 0 0 0
39312 - 6 6 6 18 18 18 46 46 46 90 90 90
39313 - 46 46 46 18 18 18 6 6 6 182 182 182
39314 -253 253 253 246 246 246 206 206 206 190 190 190
39315 -190 190 190 190 190 190 190 190 190 190 190 190
39316 -206 206 206 231 231 231 250 250 250 253 253 253
39317 -253 253 253 253 253 253 253 253 253 253 253 253
39318 -202 202 202 14 14 14 2 2 6 2 2 6
39319 - 2 2 6 2 2 6 2 2 6 2 2 6
39320 - 42 42 42 86 86 86 42 42 42 18 18 18
39321 - 6 6 6 0 0 0 0 0 0 0 0 0
39322 - 0 0 0 0 0 0 0 0 0 0 0 0
39323 - 0 0 0 0 0 0 0 0 0 0 0 0
39324 - 0 0 0 0 0 0 0 0 0 0 0 0
39325 - 0 0 0 0 0 0 0 0 0 0 0 0
39326 - 0 0 0 0 0 0 0 0 0 0 0 0
39327 - 0 0 0 0 0 0 0 0 0 0 0 0
39328 - 0 0 0 0 0 0 0 0 0 0 0 0
39329 - 0 0 0 0 0 0 0 0 0 0 0 0
39330 - 0 0 0 0 0 0 0 0 0 0 0 0
39331 - 0 0 0 0 0 0 0 0 0 6 6 6
39332 - 14 14 14 38 38 38 74 74 74 66 66 66
39333 - 2 2 6 6 6 6 90 90 90 250 250 250
39334 -253 253 253 253 253 253 238 238 238 198 198 198
39335 -190 190 190 190 190 190 195 195 195 221 221 221
39336 -246 246 246 253 253 253 253 253 253 253 253 253
39337 -253 253 253 253 253 253 253 253 253 253 253 253
39338 -253 253 253 82 82 82 2 2 6 2 2 6
39339 - 2 2 6 2 2 6 2 2 6 2 2 6
39340 - 2 2 6 78 78 78 70 70 70 34 34 34
39341 - 14 14 14 6 6 6 0 0 0 0 0 0
39342 - 0 0 0 0 0 0 0 0 0 0 0 0
39343 - 0 0 0 0 0 0 0 0 0 0 0 0
39344 - 0 0 0 0 0 0 0 0 0 0 0 0
39345 - 0 0 0 0 0 0 0 0 0 0 0 0
39346 - 0 0 0 0 0 0 0 0 0 0 0 0
39347 - 0 0 0 0 0 0 0 0 0 0 0 0
39348 - 0 0 0 0 0 0 0 0 0 0 0 0
39349 - 0 0 0 0 0 0 0 0 0 0 0 0
39350 - 0 0 0 0 0 0 0 0 0 0 0 0
39351 - 0 0 0 0 0 0 0 0 0 14 14 14
39352 - 34 34 34 66 66 66 78 78 78 6 6 6
39353 - 2 2 6 18 18 18 218 218 218 253 253 253
39354 -253 253 253 253 253 253 253 253 253 246 246 246
39355 -226 226 226 231 231 231 246 246 246 253 253 253
39356 -253 253 253 253 253 253 253 253 253 253 253 253
39357 -253 253 253 253 253 253 253 253 253 253 253 253
39358 -253 253 253 178 178 178 2 2 6 2 2 6
39359 - 2 2 6 2 2 6 2 2 6 2 2 6
39360 - 2 2 6 18 18 18 90 90 90 62 62 62
39361 - 30 30 30 10 10 10 0 0 0 0 0 0
39362 - 0 0 0 0 0 0 0 0 0 0 0 0
39363 - 0 0 0 0 0 0 0 0 0 0 0 0
39364 - 0 0 0 0 0 0 0 0 0 0 0 0
39365 - 0 0 0 0 0 0 0 0 0 0 0 0
39366 - 0 0 0 0 0 0 0 0 0 0 0 0
39367 - 0 0 0 0 0 0 0 0 0 0 0 0
39368 - 0 0 0 0 0 0 0 0 0 0 0 0
39369 - 0 0 0 0 0 0 0 0 0 0 0 0
39370 - 0 0 0 0 0 0 0 0 0 0 0 0
39371 - 0 0 0 0 0 0 10 10 10 26 26 26
39372 - 58 58 58 90 90 90 18 18 18 2 2 6
39373 - 2 2 6 110 110 110 253 253 253 253 253 253
39374 -253 253 253 253 253 253 253 253 253 253 253 253
39375 -250 250 250 253 253 253 253 253 253 253 253 253
39376 -253 253 253 253 253 253 253 253 253 253 253 253
39377 -253 253 253 253 253 253 253 253 253 253 253 253
39378 -253 253 253 231 231 231 18 18 18 2 2 6
39379 - 2 2 6 2 2 6 2 2 6 2 2 6
39380 - 2 2 6 2 2 6 18 18 18 94 94 94
39381 - 54 54 54 26 26 26 10 10 10 0 0 0
39382 - 0 0 0 0 0 0 0 0 0 0 0 0
39383 - 0 0 0 0 0 0 0 0 0 0 0 0
39384 - 0 0 0 0 0 0 0 0 0 0 0 0
39385 - 0 0 0 0 0 0 0 0 0 0 0 0
39386 - 0 0 0 0 0 0 0 0 0 0 0 0
39387 - 0 0 0 0 0 0 0 0 0 0 0 0
39388 - 0 0 0 0 0 0 0 0 0 0 0 0
39389 - 0 0 0 0 0 0 0 0 0 0 0 0
39390 - 0 0 0 0 0 0 0 0 0 0 0 0
39391 - 0 0 0 6 6 6 22 22 22 50 50 50
39392 - 90 90 90 26 26 26 2 2 6 2 2 6
39393 - 14 14 14 195 195 195 250 250 250 253 253 253
39394 -253 253 253 253 253 253 253 253 253 253 253 253
39395 -253 253 253 253 253 253 253 253 253 253 253 253
39396 -253 253 253 253 253 253 253 253 253 253 253 253
39397 -253 253 253 253 253 253 253 253 253 253 253 253
39398 -250 250 250 242 242 242 54 54 54 2 2 6
39399 - 2 2 6 2 2 6 2 2 6 2 2 6
39400 - 2 2 6 2 2 6 2 2 6 38 38 38
39401 - 86 86 86 50 50 50 22 22 22 6 6 6
39402 - 0 0 0 0 0 0 0 0 0 0 0 0
39403 - 0 0 0 0 0 0 0 0 0 0 0 0
39404 - 0 0 0 0 0 0 0 0 0 0 0 0
39405 - 0 0 0 0 0 0 0 0 0 0 0 0
39406 - 0 0 0 0 0 0 0 0 0 0 0 0
39407 - 0 0 0 0 0 0 0 0 0 0 0 0
39408 - 0 0 0 0 0 0 0 0 0 0 0 0
39409 - 0 0 0 0 0 0 0 0 0 0 0 0
39410 - 0 0 0 0 0 0 0 0 0 0 0 0
39411 - 6 6 6 14 14 14 38 38 38 82 82 82
39412 - 34 34 34 2 2 6 2 2 6 2 2 6
39413 - 42 42 42 195 195 195 246 246 246 253 253 253
39414 -253 253 253 253 253 253 253 253 253 250 250 250
39415 -242 242 242 242 242 242 250 250 250 253 253 253
39416 -253 253 253 253 253 253 253 253 253 253 253 253
39417 -253 253 253 250 250 250 246 246 246 238 238 238
39418 -226 226 226 231 231 231 101 101 101 6 6 6
39419 - 2 2 6 2 2 6 2 2 6 2 2 6
39420 - 2 2 6 2 2 6 2 2 6 2 2 6
39421 - 38 38 38 82 82 82 42 42 42 14 14 14
39422 - 6 6 6 0 0 0 0 0 0 0 0 0
39423 - 0 0 0 0 0 0 0 0 0 0 0 0
39424 - 0 0 0 0 0 0 0 0 0 0 0 0
39425 - 0 0 0 0 0 0 0 0 0 0 0 0
39426 - 0 0 0 0 0 0 0 0 0 0 0 0
39427 - 0 0 0 0 0 0 0 0 0 0 0 0
39428 - 0 0 0 0 0 0 0 0 0 0 0 0
39429 - 0 0 0 0 0 0 0 0 0 0 0 0
39430 - 0 0 0 0 0 0 0 0 0 0 0 0
39431 - 10 10 10 26 26 26 62 62 62 66 66 66
39432 - 2 2 6 2 2 6 2 2 6 6 6 6
39433 - 70 70 70 170 170 170 206 206 206 234 234 234
39434 -246 246 246 250 250 250 250 250 250 238 238 238
39435 -226 226 226 231 231 231 238 238 238 250 250 250
39436 -250 250 250 250 250 250 246 246 246 231 231 231
39437 -214 214 214 206 206 206 202 202 202 202 202 202
39438 -198 198 198 202 202 202 182 182 182 18 18 18
39439 - 2 2 6 2 2 6 2 2 6 2 2 6
39440 - 2 2 6 2 2 6 2 2 6 2 2 6
39441 - 2 2 6 62 62 62 66 66 66 30 30 30
39442 - 10 10 10 0 0 0 0 0 0 0 0 0
39443 - 0 0 0 0 0 0 0 0 0 0 0 0
39444 - 0 0 0 0 0 0 0 0 0 0 0 0
39445 - 0 0 0 0 0 0 0 0 0 0 0 0
39446 - 0 0 0 0 0 0 0 0 0 0 0 0
39447 - 0 0 0 0 0 0 0 0 0 0 0 0
39448 - 0 0 0 0 0 0 0 0 0 0 0 0
39449 - 0 0 0 0 0 0 0 0 0 0 0 0
39450 - 0 0 0 0 0 0 0 0 0 0 0 0
39451 - 14 14 14 42 42 42 82 82 82 18 18 18
39452 - 2 2 6 2 2 6 2 2 6 10 10 10
39453 - 94 94 94 182 182 182 218 218 218 242 242 242
39454 -250 250 250 253 253 253 253 253 253 250 250 250
39455 -234 234 234 253 253 253 253 253 253 253 253 253
39456 -253 253 253 253 253 253 253 253 253 246 246 246
39457 -238 238 238 226 226 226 210 210 210 202 202 202
39458 -195 195 195 195 195 195 210 210 210 158 158 158
39459 - 6 6 6 14 14 14 50 50 50 14 14 14
39460 - 2 2 6 2 2 6 2 2 6 2 2 6
39461 - 2 2 6 6 6 6 86 86 86 46 46 46
39462 - 18 18 18 6 6 6 0 0 0 0 0 0
39463 - 0 0 0 0 0 0 0 0 0 0 0 0
39464 - 0 0 0 0 0 0 0 0 0 0 0 0
39465 - 0 0 0 0 0 0 0 0 0 0 0 0
39466 - 0 0 0 0 0 0 0 0 0 0 0 0
39467 - 0 0 0 0 0 0 0 0 0 0 0 0
39468 - 0 0 0 0 0 0 0 0 0 0 0 0
39469 - 0 0 0 0 0 0 0 0 0 0 0 0
39470 - 0 0 0 0 0 0 0 0 0 6 6 6
39471 - 22 22 22 54 54 54 70 70 70 2 2 6
39472 - 2 2 6 10 10 10 2 2 6 22 22 22
39473 -166 166 166 231 231 231 250 250 250 253 253 253
39474 -253 253 253 253 253 253 253 253 253 250 250 250
39475 -242 242 242 253 253 253 253 253 253 253 253 253
39476 -253 253 253 253 253 253 253 253 253 253 253 253
39477 -253 253 253 253 253 253 253 253 253 246 246 246
39478 -231 231 231 206 206 206 198 198 198 226 226 226
39479 - 94 94 94 2 2 6 6 6 6 38 38 38
39480 - 30 30 30 2 2 6 2 2 6 2 2 6
39481 - 2 2 6 2 2 6 62 62 62 66 66 66
39482 - 26 26 26 10 10 10 0 0 0 0 0 0
39483 - 0 0 0 0 0 0 0 0 0 0 0 0
39484 - 0 0 0 0 0 0 0 0 0 0 0 0
39485 - 0 0 0 0 0 0 0 0 0 0 0 0
39486 - 0 0 0 0 0 0 0 0 0 0 0 0
39487 - 0 0 0 0 0 0 0 0 0 0 0 0
39488 - 0 0 0 0 0 0 0 0 0 0 0 0
39489 - 0 0 0 0 0 0 0 0 0 0 0 0
39490 - 0 0 0 0 0 0 0 0 0 10 10 10
39491 - 30 30 30 74 74 74 50 50 50 2 2 6
39492 - 26 26 26 26 26 26 2 2 6 106 106 106
39493 -238 238 238 253 253 253 253 253 253 253 253 253
39494 -253 253 253 253 253 253 253 253 253 253 253 253
39495 -253 253 253 253 253 253 253 253 253 253 253 253
39496 -253 253 253 253 253 253 253 253 253 253 253 253
39497 -253 253 253 253 253 253 253 253 253 253 253 253
39498 -253 253 253 246 246 246 218 218 218 202 202 202
39499 -210 210 210 14 14 14 2 2 6 2 2 6
39500 - 30 30 30 22 22 22 2 2 6 2 2 6
39501 - 2 2 6 2 2 6 18 18 18 86 86 86
39502 - 42 42 42 14 14 14 0 0 0 0 0 0
39503 - 0 0 0 0 0 0 0 0 0 0 0 0
39504 - 0 0 0 0 0 0 0 0 0 0 0 0
39505 - 0 0 0 0 0 0 0 0 0 0 0 0
39506 - 0 0 0 0 0 0 0 0 0 0 0 0
39507 - 0 0 0 0 0 0 0 0 0 0 0 0
39508 - 0 0 0 0 0 0 0 0 0 0 0 0
39509 - 0 0 0 0 0 0 0 0 0 0 0 0
39510 - 0 0 0 0 0 0 0 0 0 14 14 14
39511 - 42 42 42 90 90 90 22 22 22 2 2 6
39512 - 42 42 42 2 2 6 18 18 18 218 218 218
39513 -253 253 253 253 253 253 253 253 253 253 253 253
39514 -253 253 253 253 253 253 253 253 253 253 253 253
39515 -253 253 253 253 253 253 253 253 253 253 253 253
39516 -253 253 253 253 253 253 253 253 253 253 253 253
39517 -253 253 253 253 253 253 253 253 253 253 253 253
39518 -253 253 253 253 253 253 250 250 250 221 221 221
39519 -218 218 218 101 101 101 2 2 6 14 14 14
39520 - 18 18 18 38 38 38 10 10 10 2 2 6
39521 - 2 2 6 2 2 6 2 2 6 78 78 78
39522 - 58 58 58 22 22 22 6 6 6 0 0 0
39523 - 0 0 0 0 0 0 0 0 0 0 0 0
39524 - 0 0 0 0 0 0 0 0 0 0 0 0
39525 - 0 0 0 0 0 0 0 0 0 0 0 0
39526 - 0 0 0 0 0 0 0 0 0 0 0 0
39527 - 0 0 0 0 0 0 0 0 0 0 0 0
39528 - 0 0 0 0 0 0 0 0 0 0 0 0
39529 - 0 0 0 0 0 0 0 0 0 0 0 0
39530 - 0 0 0 0 0 0 6 6 6 18 18 18
39531 - 54 54 54 82 82 82 2 2 6 26 26 26
39532 - 22 22 22 2 2 6 123 123 123 253 253 253
39533 -253 253 253 253 253 253 253 253 253 253 253 253
39534 -253 253 253 253 253 253 253 253 253 253 253 253
39535 -253 253 253 253 253 253 253 253 253 253 253 253
39536 -253 253 253 253 253 253 253 253 253 253 253 253
39537 -253 253 253 253 253 253 253 253 253 253 253 253
39538 -253 253 253 253 253 253 253 253 253 250 250 250
39539 -238 238 238 198 198 198 6 6 6 38 38 38
39540 - 58 58 58 26 26 26 38 38 38 2 2 6
39541 - 2 2 6 2 2 6 2 2 6 46 46 46
39542 - 78 78 78 30 30 30 10 10 10 0 0 0
39543 - 0 0 0 0 0 0 0 0 0 0 0 0
39544 - 0 0 0 0 0 0 0 0 0 0 0 0
39545 - 0 0 0 0 0 0 0 0 0 0 0 0
39546 - 0 0 0 0 0 0 0 0 0 0 0 0
39547 - 0 0 0 0 0 0 0 0 0 0 0 0
39548 - 0 0 0 0 0 0 0 0 0 0 0 0
39549 - 0 0 0 0 0 0 0 0 0 0 0 0
39550 - 0 0 0 0 0 0 10 10 10 30 30 30
39551 - 74 74 74 58 58 58 2 2 6 42 42 42
39552 - 2 2 6 22 22 22 231 231 231 253 253 253
39553 -253 253 253 253 253 253 253 253 253 253 253 253
39554 -253 253 253 253 253 253 253 253 253 250 250 250
39555 -253 253 253 253 253 253 253 253 253 253 253 253
39556 -253 253 253 253 253 253 253 253 253 253 253 253
39557 -253 253 253 253 253 253 253 253 253 253 253 253
39558 -253 253 253 253 253 253 253 253 253 253 253 253
39559 -253 253 253 246 246 246 46 46 46 38 38 38
39560 - 42 42 42 14 14 14 38 38 38 14 14 14
39561 - 2 2 6 2 2 6 2 2 6 6 6 6
39562 - 86 86 86 46 46 46 14 14 14 0 0 0
39563 - 0 0 0 0 0 0 0 0 0 0 0 0
39564 - 0 0 0 0 0 0 0 0 0 0 0 0
39565 - 0 0 0 0 0 0 0 0 0 0 0 0
39566 - 0 0 0 0 0 0 0 0 0 0 0 0
39567 - 0 0 0 0 0 0 0 0 0 0 0 0
39568 - 0 0 0 0 0 0 0 0 0 0 0 0
39569 - 0 0 0 0 0 0 0 0 0 0 0 0
39570 - 0 0 0 6 6 6 14 14 14 42 42 42
39571 - 90 90 90 18 18 18 18 18 18 26 26 26
39572 - 2 2 6 116 116 116 253 253 253 253 253 253
39573 -253 253 253 253 253 253 253 253 253 253 253 253
39574 -253 253 253 253 253 253 250 250 250 238 238 238
39575 -253 253 253 253 253 253 253 253 253 253 253 253
39576 -253 253 253 253 253 253 253 253 253 253 253 253
39577 -253 253 253 253 253 253 253 253 253 253 253 253
39578 -253 253 253 253 253 253 253 253 253 253 253 253
39579 -253 253 253 253 253 253 94 94 94 6 6 6
39580 - 2 2 6 2 2 6 10 10 10 34 34 34
39581 - 2 2 6 2 2 6 2 2 6 2 2 6
39582 - 74 74 74 58 58 58 22 22 22 6 6 6
39583 - 0 0 0 0 0 0 0 0 0 0 0 0
39584 - 0 0 0 0 0 0 0 0 0 0 0 0
39585 - 0 0 0 0 0 0 0 0 0 0 0 0
39586 - 0 0 0 0 0 0 0 0 0 0 0 0
39587 - 0 0 0 0 0 0 0 0 0 0 0 0
39588 - 0 0 0 0 0 0 0 0 0 0 0 0
39589 - 0 0 0 0 0 0 0 0 0 0 0 0
39590 - 0 0 0 10 10 10 26 26 26 66 66 66
39591 - 82 82 82 2 2 6 38 38 38 6 6 6
39592 - 14 14 14 210 210 210 253 253 253 253 253 253
39593 -253 253 253 253 253 253 253 253 253 253 253 253
39594 -253 253 253 253 253 253 246 246 246 242 242 242
39595 -253 253 253 253 253 253 253 253 253 253 253 253
39596 -253 253 253 253 253 253 253 253 253 253 253 253
39597 -253 253 253 253 253 253 253 253 253 253 253 253
39598 -253 253 253 253 253 253 253 253 253 253 253 253
39599 -253 253 253 253 253 253 144 144 144 2 2 6
39600 - 2 2 6 2 2 6 2 2 6 46 46 46
39601 - 2 2 6 2 2 6 2 2 6 2 2 6
39602 - 42 42 42 74 74 74 30 30 30 10 10 10
39603 - 0 0 0 0 0 0 0 0 0 0 0 0
39604 - 0 0 0 0 0 0 0 0 0 0 0 0
39605 - 0 0 0 0 0 0 0 0 0 0 0 0
39606 - 0 0 0 0 0 0 0 0 0 0 0 0
39607 - 0 0 0 0 0 0 0 0 0 0 0 0
39608 - 0 0 0 0 0 0 0 0 0 0 0 0
39609 - 0 0 0 0 0 0 0 0 0 0 0 0
39610 - 6 6 6 14 14 14 42 42 42 90 90 90
39611 - 26 26 26 6 6 6 42 42 42 2 2 6
39612 - 74 74 74 250 250 250 253 253 253 253 253 253
39613 -253 253 253 253 253 253 253 253 253 253 253 253
39614 -253 253 253 253 253 253 242 242 242 242 242 242
39615 -253 253 253 253 253 253 253 253 253 253 253 253
39616 -253 253 253 253 253 253 253 253 253 253 253 253
39617 -253 253 253 253 253 253 253 253 253 253 253 253
39618 -253 253 253 253 253 253 253 253 253 253 253 253
39619 -253 253 253 253 253 253 182 182 182 2 2 6
39620 - 2 2 6 2 2 6 2 2 6 46 46 46
39621 - 2 2 6 2 2 6 2 2 6 2 2 6
39622 - 10 10 10 86 86 86 38 38 38 10 10 10
39623 - 0 0 0 0 0 0 0 0 0 0 0 0
39624 - 0 0 0 0 0 0 0 0 0 0 0 0
39625 - 0 0 0 0 0 0 0 0 0 0 0 0
39626 - 0 0 0 0 0 0 0 0 0 0 0 0
39627 - 0 0 0 0 0 0 0 0 0 0 0 0
39628 - 0 0 0 0 0 0 0 0 0 0 0 0
39629 - 0 0 0 0 0 0 0 0 0 0 0 0
39630 - 10 10 10 26 26 26 66 66 66 82 82 82
39631 - 2 2 6 22 22 22 18 18 18 2 2 6
39632 -149 149 149 253 253 253 253 253 253 253 253 253
39633 -253 253 253 253 253 253 253 253 253 253 253 253
39634 -253 253 253 253 253 253 234 234 234 242 242 242
39635 -253 253 253 253 253 253 253 253 253 253 253 253
39636 -253 253 253 253 253 253 253 253 253 253 253 253
39637 -253 253 253 253 253 253 253 253 253 253 253 253
39638 -253 253 253 253 253 253 253 253 253 253 253 253
39639 -253 253 253 253 253 253 206 206 206 2 2 6
39640 - 2 2 6 2 2 6 2 2 6 38 38 38
39641 - 2 2 6 2 2 6 2 2 6 2 2 6
39642 - 6 6 6 86 86 86 46 46 46 14 14 14
39643 - 0 0 0 0 0 0 0 0 0 0 0 0
39644 - 0 0 0 0 0 0 0 0 0 0 0 0
39645 - 0 0 0 0 0 0 0 0 0 0 0 0
39646 - 0 0 0 0 0 0 0 0 0 0 0 0
39647 - 0 0 0 0 0 0 0 0 0 0 0 0
39648 - 0 0 0 0 0 0 0 0 0 0 0 0
39649 - 0 0 0 0 0 0 0 0 0 6 6 6
39650 - 18 18 18 46 46 46 86 86 86 18 18 18
39651 - 2 2 6 34 34 34 10 10 10 6 6 6
39652 -210 210 210 253 253 253 253 253 253 253 253 253
39653 -253 253 253 253 253 253 253 253 253 253 253 253
39654 -253 253 253 253 253 253 234 234 234 242 242 242
39655 -253 253 253 253 253 253 253 253 253 253 253 253
39656 -253 253 253 253 253 253 253 253 253 253 253 253
39657 -253 253 253 253 253 253 253 253 253 253 253 253
39658 -253 253 253 253 253 253 253 253 253 253 253 253
39659 -253 253 253 253 253 253 221 221 221 6 6 6
39660 - 2 2 6 2 2 6 6 6 6 30 30 30
39661 - 2 2 6 2 2 6 2 2 6 2 2 6
39662 - 2 2 6 82 82 82 54 54 54 18 18 18
39663 - 6 6 6 0 0 0 0 0 0 0 0 0
39664 - 0 0 0 0 0 0 0 0 0 0 0 0
39665 - 0 0 0 0 0 0 0 0 0 0 0 0
39666 - 0 0 0 0 0 0 0 0 0 0 0 0
39667 - 0 0 0 0 0 0 0 0 0 0 0 0
39668 - 0 0 0 0 0 0 0 0 0 0 0 0
39669 - 0 0 0 0 0 0 0 0 0 10 10 10
39670 - 26 26 26 66 66 66 62 62 62 2 2 6
39671 - 2 2 6 38 38 38 10 10 10 26 26 26
39672 -238 238 238 253 253 253 253 253 253 253 253 253
39673 -253 253 253 253 253 253 253 253 253 253 253 253
39674 -253 253 253 253 253 253 231 231 231 238 238 238
39675 -253 253 253 253 253 253 253 253 253 253 253 253
39676 -253 253 253 253 253 253 253 253 253 253 253 253
39677 -253 253 253 253 253 253 253 253 253 253 253 253
39678 -253 253 253 253 253 253 253 253 253 253 253 253
39679 -253 253 253 253 253 253 231 231 231 6 6 6
39680 - 2 2 6 2 2 6 10 10 10 30 30 30
39681 - 2 2 6 2 2 6 2 2 6 2 2 6
39682 - 2 2 6 66 66 66 58 58 58 22 22 22
39683 - 6 6 6 0 0 0 0 0 0 0 0 0
39684 - 0 0 0 0 0 0 0 0 0 0 0 0
39685 - 0 0 0 0 0 0 0 0 0 0 0 0
39686 - 0 0 0 0 0 0 0 0 0 0 0 0
39687 - 0 0 0 0 0 0 0 0 0 0 0 0
39688 - 0 0 0 0 0 0 0 0 0 0 0 0
39689 - 0 0 0 0 0 0 0 0 0 10 10 10
39690 - 38 38 38 78 78 78 6 6 6 2 2 6
39691 - 2 2 6 46 46 46 14 14 14 42 42 42
39692 -246 246 246 253 253 253 253 253 253 253 253 253
39693 -253 253 253 253 253 253 253 253 253 253 253 253
39694 -253 253 253 253 253 253 231 231 231 242 242 242
39695 -253 253 253 253 253 253 253 253 253 253 253 253
39696 -253 253 253 253 253 253 253 253 253 253 253 253
39697 -253 253 253 253 253 253 253 253 253 253 253 253
39698 -253 253 253 253 253 253 253 253 253 253 253 253
39699 -253 253 253 253 253 253 234 234 234 10 10 10
39700 - 2 2 6 2 2 6 22 22 22 14 14 14
39701 - 2 2 6 2 2 6 2 2 6 2 2 6
39702 - 2 2 6 66 66 66 62 62 62 22 22 22
39703 - 6 6 6 0 0 0 0 0 0 0 0 0
39704 - 0 0 0 0 0 0 0 0 0 0 0 0
39705 - 0 0 0 0 0 0 0 0 0 0 0 0
39706 - 0 0 0 0 0 0 0 0 0 0 0 0
39707 - 0 0 0 0 0 0 0 0 0 0 0 0
39708 - 0 0 0 0 0 0 0 0 0 0 0 0
39709 - 0 0 0 0 0 0 6 6 6 18 18 18
39710 - 50 50 50 74 74 74 2 2 6 2 2 6
39711 - 14 14 14 70 70 70 34 34 34 62 62 62
39712 -250 250 250 253 253 253 253 253 253 253 253 253
39713 -253 253 253 253 253 253 253 253 253 253 253 253
39714 -253 253 253 253 253 253 231 231 231 246 246 246
39715 -253 253 253 253 253 253 253 253 253 253 253 253
39716 -253 253 253 253 253 253 253 253 253 253 253 253
39717 -253 253 253 253 253 253 253 253 253 253 253 253
39718 -253 253 253 253 253 253 253 253 253 253 253 253
39719 -253 253 253 253 253 253 234 234 234 14 14 14
39720 - 2 2 6 2 2 6 30 30 30 2 2 6
39721 - 2 2 6 2 2 6 2 2 6 2 2 6
39722 - 2 2 6 66 66 66 62 62 62 22 22 22
39723 - 6 6 6 0 0 0 0 0 0 0 0 0
39724 - 0 0 0 0 0 0 0 0 0 0 0 0
39725 - 0 0 0 0 0 0 0 0 0 0 0 0
39726 - 0 0 0 0 0 0 0 0 0 0 0 0
39727 - 0 0 0 0 0 0 0 0 0 0 0 0
39728 - 0 0 0 0 0 0 0 0 0 0 0 0
39729 - 0 0 0 0 0 0 6 6 6 18 18 18
39730 - 54 54 54 62 62 62 2 2 6 2 2 6
39731 - 2 2 6 30 30 30 46 46 46 70 70 70
39732 -250 250 250 253 253 253 253 253 253 253 253 253
39733 -253 253 253 253 253 253 253 253 253 253 253 253
39734 -253 253 253 253 253 253 231 231 231 246 246 246
39735 -253 253 253 253 253 253 253 253 253 253 253 253
39736 -253 253 253 253 253 253 253 253 253 253 253 253
39737 -253 253 253 253 253 253 253 253 253 253 253 253
39738 -253 253 253 253 253 253 253 253 253 253 253 253
39739 -253 253 253 253 253 253 226 226 226 10 10 10
39740 - 2 2 6 6 6 6 30 30 30 2 2 6
39741 - 2 2 6 2 2 6 2 2 6 2 2 6
39742 - 2 2 6 66 66 66 58 58 58 22 22 22
39743 - 6 6 6 0 0 0 0 0 0 0 0 0
39744 - 0 0 0 0 0 0 0 0 0 0 0 0
39745 - 0 0 0 0 0 0 0 0 0 0 0 0
39746 - 0 0 0 0 0 0 0 0 0 0 0 0
39747 - 0 0 0 0 0 0 0 0 0 0 0 0
39748 - 0 0 0 0 0 0 0 0 0 0 0 0
39749 - 0 0 0 0 0 0 6 6 6 22 22 22
39750 - 58 58 58 62 62 62 2 2 6 2 2 6
39751 - 2 2 6 2 2 6 30 30 30 78 78 78
39752 -250 250 250 253 253 253 253 253 253 253 253 253
39753 -253 253 253 253 253 253 253 253 253 253 253 253
39754 -253 253 253 253 253 253 231 231 231 246 246 246
39755 -253 253 253 253 253 253 253 253 253 253 253 253
39756 -253 253 253 253 253 253 253 253 253 253 253 253
39757 -253 253 253 253 253 253 253 253 253 253 253 253
39758 -253 253 253 253 253 253 253 253 253 253 253 253
39759 -253 253 253 253 253 253 206 206 206 2 2 6
39760 - 22 22 22 34 34 34 18 14 6 22 22 22
39761 - 26 26 26 18 18 18 6 6 6 2 2 6
39762 - 2 2 6 82 82 82 54 54 54 18 18 18
39763 - 6 6 6 0 0 0 0 0 0 0 0 0
39764 - 0 0 0 0 0 0 0 0 0 0 0 0
39765 - 0 0 0 0 0 0 0 0 0 0 0 0
39766 - 0 0 0 0 0 0 0 0 0 0 0 0
39767 - 0 0 0 0 0 0 0 0 0 0 0 0
39768 - 0 0 0 0 0 0 0 0 0 0 0 0
39769 - 0 0 0 0 0 0 6 6 6 26 26 26
39770 - 62 62 62 106 106 106 74 54 14 185 133 11
39771 -210 162 10 121 92 8 6 6 6 62 62 62
39772 -238 238 238 253 253 253 253 253 253 253 253 253
39773 -253 253 253 253 253 253 253 253 253 253 253 253
39774 -253 253 253 253 253 253 231 231 231 246 246 246
39775 -253 253 253 253 253 253 253 253 253 253 253 253
39776 -253 253 253 253 253 253 253 253 253 253 253 253
39777 -253 253 253 253 253 253 253 253 253 253 253 253
39778 -253 253 253 253 253 253 253 253 253 253 253 253
39779 -253 253 253 253 253 253 158 158 158 18 18 18
39780 - 14 14 14 2 2 6 2 2 6 2 2 6
39781 - 6 6 6 18 18 18 66 66 66 38 38 38
39782 - 6 6 6 94 94 94 50 50 50 18 18 18
39783 - 6 6 6 0 0 0 0 0 0 0 0 0
39784 - 0 0 0 0 0 0 0 0 0 0 0 0
39785 - 0 0 0 0 0 0 0 0 0 0 0 0
39786 - 0 0 0 0 0 0 0 0 0 0 0 0
39787 - 0 0 0 0 0 0 0 0 0 0 0 0
39788 - 0 0 0 0 0 0 0 0 0 6 6 6
39789 - 10 10 10 10 10 10 18 18 18 38 38 38
39790 - 78 78 78 142 134 106 216 158 10 242 186 14
39791 -246 190 14 246 190 14 156 118 10 10 10 10
39792 - 90 90 90 238 238 238 253 253 253 253 253 253
39793 -253 253 253 253 253 253 253 253 253 253 253 253
39794 -253 253 253 253 253 253 231 231 231 250 250 250
39795 -253 253 253 253 253 253 253 253 253 253 253 253
39796 -253 253 253 253 253 253 253 253 253 253 253 253
39797 -253 253 253 253 253 253 253 253 253 253 253 253
39798 -253 253 253 253 253 253 253 253 253 246 230 190
39799 -238 204 91 238 204 91 181 142 44 37 26 9
39800 - 2 2 6 2 2 6 2 2 6 2 2 6
39801 - 2 2 6 2 2 6 38 38 38 46 46 46
39802 - 26 26 26 106 106 106 54 54 54 18 18 18
39803 - 6 6 6 0 0 0 0 0 0 0 0 0
39804 - 0 0 0 0 0 0 0 0 0 0 0 0
39805 - 0 0 0 0 0 0 0 0 0 0 0 0
39806 - 0 0 0 0 0 0 0 0 0 0 0 0
39807 - 0 0 0 0 0 0 0 0 0 0 0 0
39808 - 0 0 0 6 6 6 14 14 14 22 22 22
39809 - 30 30 30 38 38 38 50 50 50 70 70 70
39810 -106 106 106 190 142 34 226 170 11 242 186 14
39811 -246 190 14 246 190 14 246 190 14 154 114 10
39812 - 6 6 6 74 74 74 226 226 226 253 253 253
39813 -253 253 253 253 253 253 253 253 253 253 253 253
39814 -253 253 253 253 253 253 231 231 231 250 250 250
39815 -253 253 253 253 253 253 253 253 253 253 253 253
39816 -253 253 253 253 253 253 253 253 253 253 253 253
39817 -253 253 253 253 253 253 253 253 253 253 253 253
39818 -253 253 253 253 253 253 253 253 253 228 184 62
39819 -241 196 14 241 208 19 232 195 16 38 30 10
39820 - 2 2 6 2 2 6 2 2 6 2 2 6
39821 - 2 2 6 6 6 6 30 30 30 26 26 26
39822 -203 166 17 154 142 90 66 66 66 26 26 26
39823 - 6 6 6 0 0 0 0 0 0 0 0 0
39824 - 0 0 0 0 0 0 0 0 0 0 0 0
39825 - 0 0 0 0 0 0 0 0 0 0 0 0
39826 - 0 0 0 0 0 0 0 0 0 0 0 0
39827 - 0 0 0 0 0 0 0 0 0 0 0 0
39828 - 6 6 6 18 18 18 38 38 38 58 58 58
39829 - 78 78 78 86 86 86 101 101 101 123 123 123
39830 -175 146 61 210 150 10 234 174 13 246 186 14
39831 -246 190 14 246 190 14 246 190 14 238 190 10
39832 -102 78 10 2 2 6 46 46 46 198 198 198
39833 -253 253 253 253 253 253 253 253 253 253 253 253
39834 -253 253 253 253 253 253 234 234 234 242 242 242
39835 -253 253 253 253 253 253 253 253 253 253 253 253
39836 -253 253 253 253 253 253 253 253 253 253 253 253
39837 -253 253 253 253 253 253 253 253 253 253 253 253
39838 -253 253 253 253 253 253 253 253 253 224 178 62
39839 -242 186 14 241 196 14 210 166 10 22 18 6
39840 - 2 2 6 2 2 6 2 2 6 2 2 6
39841 - 2 2 6 2 2 6 6 6 6 121 92 8
39842 -238 202 15 232 195 16 82 82 82 34 34 34
39843 - 10 10 10 0 0 0 0 0 0 0 0 0
39844 - 0 0 0 0 0 0 0 0 0 0 0 0
39845 - 0 0 0 0 0 0 0 0 0 0 0 0
39846 - 0 0 0 0 0 0 0 0 0 0 0 0
39847 - 0 0 0 0 0 0 0 0 0 0 0 0
39848 - 14 14 14 38 38 38 70 70 70 154 122 46
39849 -190 142 34 200 144 11 197 138 11 197 138 11
39850 -213 154 11 226 170 11 242 186 14 246 190 14
39851 -246 190 14 246 190 14 246 190 14 246 190 14
39852 -225 175 15 46 32 6 2 2 6 22 22 22
39853 -158 158 158 250 250 250 253 253 253 253 253 253
39854 -253 253 253 253 253 253 253 253 253 253 253 253
39855 -253 253 253 253 253 253 253 253 253 253 253 253
39856 -253 253 253 253 253 253 253 253 253 253 253 253
39857 -253 253 253 253 253 253 253 253 253 253 253 253
39858 -253 253 253 250 250 250 242 242 242 224 178 62
39859 -239 182 13 236 186 11 213 154 11 46 32 6
39860 - 2 2 6 2 2 6 2 2 6 2 2 6
39861 - 2 2 6 2 2 6 61 42 6 225 175 15
39862 -238 190 10 236 186 11 112 100 78 42 42 42
39863 - 14 14 14 0 0 0 0 0 0 0 0 0
39864 - 0 0 0 0 0 0 0 0 0 0 0 0
39865 - 0 0 0 0 0 0 0 0 0 0 0 0
39866 - 0 0 0 0 0 0 0 0 0 0 0 0
39867 - 0 0 0 0 0 0 0 0 0 6 6 6
39868 - 22 22 22 54 54 54 154 122 46 213 154 11
39869 -226 170 11 230 174 11 226 170 11 226 170 11
39870 -236 178 12 242 186 14 246 190 14 246 190 14
39871 -246 190 14 246 190 14 246 190 14 246 190 14
39872 -241 196 14 184 144 12 10 10 10 2 2 6
39873 - 6 6 6 116 116 116 242 242 242 253 253 253
39874 -253 253 253 253 253 253 253 253 253 253 253 253
39875 -253 253 253 253 253 253 253 253 253 253 253 253
39876 -253 253 253 253 253 253 253 253 253 253 253 253
39877 -253 253 253 253 253 253 253 253 253 253 253 253
39878 -253 253 253 231 231 231 198 198 198 214 170 54
39879 -236 178 12 236 178 12 210 150 10 137 92 6
39880 - 18 14 6 2 2 6 2 2 6 2 2 6
39881 - 6 6 6 70 47 6 200 144 11 236 178 12
39882 -239 182 13 239 182 13 124 112 88 58 58 58
39883 - 22 22 22 6 6 6 0 0 0 0 0 0
39884 - 0 0 0 0 0 0 0 0 0 0 0 0
39885 - 0 0 0 0 0 0 0 0 0 0 0 0
39886 - 0 0 0 0 0 0 0 0 0 0 0 0
39887 - 0 0 0 0 0 0 0 0 0 10 10 10
39888 - 30 30 30 70 70 70 180 133 36 226 170 11
39889 -239 182 13 242 186 14 242 186 14 246 186 14
39890 -246 190 14 246 190 14 246 190 14 246 190 14
39891 -246 190 14 246 190 14 246 190 14 246 190 14
39892 -246 190 14 232 195 16 98 70 6 2 2 6
39893 - 2 2 6 2 2 6 66 66 66 221 221 221
39894 -253 253 253 253 253 253 253 253 253 253 253 253
39895 -253 253 253 253 253 253 253 253 253 253 253 253
39896 -253 253 253 253 253 253 253 253 253 253 253 253
39897 -253 253 253 253 253 253 253 253 253 253 253 253
39898 -253 253 253 206 206 206 198 198 198 214 166 58
39899 -230 174 11 230 174 11 216 158 10 192 133 9
39900 -163 110 8 116 81 8 102 78 10 116 81 8
39901 -167 114 7 197 138 11 226 170 11 239 182 13
39902 -242 186 14 242 186 14 162 146 94 78 78 78
39903 - 34 34 34 14 14 14 6 6 6 0 0 0
39904 - 0 0 0 0 0 0 0 0 0 0 0 0
39905 - 0 0 0 0 0 0 0 0 0 0 0 0
39906 - 0 0 0 0 0 0 0 0 0 0 0 0
39907 - 0 0 0 0 0 0 0 0 0 6 6 6
39908 - 30 30 30 78 78 78 190 142 34 226 170 11
39909 -239 182 13 246 190 14 246 190 14 246 190 14
39910 -246 190 14 246 190 14 246 190 14 246 190 14
39911 -246 190 14 246 190 14 246 190 14 246 190 14
39912 -246 190 14 241 196 14 203 166 17 22 18 6
39913 - 2 2 6 2 2 6 2 2 6 38 38 38
39914 -218 218 218 253 253 253 253 253 253 253 253 253
39915 -253 253 253 253 253 253 253 253 253 253 253 253
39916 -253 253 253 253 253 253 253 253 253 253 253 253
39917 -253 253 253 253 253 253 253 253 253 253 253 253
39918 -250 250 250 206 206 206 198 198 198 202 162 69
39919 -226 170 11 236 178 12 224 166 10 210 150 10
39920 -200 144 11 197 138 11 192 133 9 197 138 11
39921 -210 150 10 226 170 11 242 186 14 246 190 14
39922 -246 190 14 246 186 14 225 175 15 124 112 88
39923 - 62 62 62 30 30 30 14 14 14 6 6 6
39924 - 0 0 0 0 0 0 0 0 0 0 0 0
39925 - 0 0 0 0 0 0 0 0 0 0 0 0
39926 - 0 0 0 0 0 0 0 0 0 0 0 0
39927 - 0 0 0 0 0 0 0 0 0 10 10 10
39928 - 30 30 30 78 78 78 174 135 50 224 166 10
39929 -239 182 13 246 190 14 246 190 14 246 190 14
39930 -246 190 14 246 190 14 246 190 14 246 190 14
39931 -246 190 14 246 190 14 246 190 14 246 190 14
39932 -246 190 14 246 190 14 241 196 14 139 102 15
39933 - 2 2 6 2 2 6 2 2 6 2 2 6
39934 - 78 78 78 250 250 250 253 253 253 253 253 253
39935 -253 253 253 253 253 253 253 253 253 253 253 253
39936 -253 253 253 253 253 253 253 253 253 253 253 253
39937 -253 253 253 253 253 253 253 253 253 253 253 253
39938 -250 250 250 214 214 214 198 198 198 190 150 46
39939 -219 162 10 236 178 12 234 174 13 224 166 10
39940 -216 158 10 213 154 11 213 154 11 216 158 10
39941 -226 170 11 239 182 13 246 190 14 246 190 14
39942 -246 190 14 246 190 14 242 186 14 206 162 42
39943 -101 101 101 58 58 58 30 30 30 14 14 14
39944 - 6 6 6 0 0 0 0 0 0 0 0 0
39945 - 0 0 0 0 0 0 0 0 0 0 0 0
39946 - 0 0 0 0 0 0 0 0 0 0 0 0
39947 - 0 0 0 0 0 0 0 0 0 10 10 10
39948 - 30 30 30 74 74 74 174 135 50 216 158 10
39949 -236 178 12 246 190 14 246 190 14 246 190 14
39950 -246 190 14 246 190 14 246 190 14 246 190 14
39951 -246 190 14 246 190 14 246 190 14 246 190 14
39952 -246 190 14 246 190 14 241 196 14 226 184 13
39953 - 61 42 6 2 2 6 2 2 6 2 2 6
39954 - 22 22 22 238 238 238 253 253 253 253 253 253
39955 -253 253 253 253 253 253 253 253 253 253 253 253
39956 -253 253 253 253 253 253 253 253 253 253 253 253
39957 -253 253 253 253 253 253 253 253 253 253 253 253
39958 -253 253 253 226 226 226 187 187 187 180 133 36
39959 -216 158 10 236 178 12 239 182 13 236 178 12
39960 -230 174 11 226 170 11 226 170 11 230 174 11
39961 -236 178 12 242 186 14 246 190 14 246 190 14
39962 -246 190 14 246 190 14 246 186 14 239 182 13
39963 -206 162 42 106 106 106 66 66 66 34 34 34
39964 - 14 14 14 6 6 6 0 0 0 0 0 0
39965 - 0 0 0 0 0 0 0 0 0 0 0 0
39966 - 0 0 0 0 0 0 0 0 0 0 0 0
39967 - 0 0 0 0 0 0 0 0 0 6 6 6
39968 - 26 26 26 70 70 70 163 133 67 213 154 11
39969 -236 178 12 246 190 14 246 190 14 246 190 14
39970 -246 190 14 246 190 14 246 190 14 246 190 14
39971 -246 190 14 246 190 14 246 190 14 246 190 14
39972 -246 190 14 246 190 14 246 190 14 241 196 14
39973 -190 146 13 18 14 6 2 2 6 2 2 6
39974 - 46 46 46 246 246 246 253 253 253 253 253 253
39975 -253 253 253 253 253 253 253 253 253 253 253 253
39976 -253 253 253 253 253 253 253 253 253 253 253 253
39977 -253 253 253 253 253 253 253 253 253 253 253 253
39978 -253 253 253 221 221 221 86 86 86 156 107 11
39979 -216 158 10 236 178 12 242 186 14 246 186 14
39980 -242 186 14 239 182 13 239 182 13 242 186 14
39981 -242 186 14 246 186 14 246 190 14 246 190 14
39982 -246 190 14 246 190 14 246 190 14 246 190 14
39983 -242 186 14 225 175 15 142 122 72 66 66 66
39984 - 30 30 30 10 10 10 0 0 0 0 0 0
39985 - 0 0 0 0 0 0 0 0 0 0 0 0
39986 - 0 0 0 0 0 0 0 0 0 0 0 0
39987 - 0 0 0 0 0 0 0 0 0 6 6 6
39988 - 26 26 26 70 70 70 163 133 67 210 150 10
39989 -236 178 12 246 190 14 246 190 14 246 190 14
39990 -246 190 14 246 190 14 246 190 14 246 190 14
39991 -246 190 14 246 190 14 246 190 14 246 190 14
39992 -246 190 14 246 190 14 246 190 14 246 190 14
39993 -232 195 16 121 92 8 34 34 34 106 106 106
39994 -221 221 221 253 253 253 253 253 253 253 253 253
39995 -253 253 253 253 253 253 253 253 253 253 253 253
39996 -253 253 253 253 253 253 253 253 253 253 253 253
39997 -253 253 253 253 253 253 253 253 253 253 253 253
39998 -242 242 242 82 82 82 18 14 6 163 110 8
39999 -216 158 10 236 178 12 242 186 14 246 190 14
40000 -246 190 14 246 190 14 246 190 14 246 190 14
40001 -246 190 14 246 190 14 246 190 14 246 190 14
40002 -246 190 14 246 190 14 246 190 14 246 190 14
40003 -246 190 14 246 190 14 242 186 14 163 133 67
40004 - 46 46 46 18 18 18 6 6 6 0 0 0
40005 - 0 0 0 0 0 0 0 0 0 0 0 0
40006 - 0 0 0 0 0 0 0 0 0 0 0 0
40007 - 0 0 0 0 0 0 0 0 0 10 10 10
40008 - 30 30 30 78 78 78 163 133 67 210 150 10
40009 -236 178 12 246 186 14 246 190 14 246 190 14
40010 -246 190 14 246 190 14 246 190 14 246 190 14
40011 -246 190 14 246 190 14 246 190 14 246 190 14
40012 -246 190 14 246 190 14 246 190 14 246 190 14
40013 -241 196 14 215 174 15 190 178 144 253 253 253
40014 -253 253 253 253 253 253 253 253 253 253 253 253
40015 -253 253 253 253 253 253 253 253 253 253 253 253
40016 -253 253 253 253 253 253 253 253 253 253 253 253
40017 -253 253 253 253 253 253 253 253 253 218 218 218
40018 - 58 58 58 2 2 6 22 18 6 167 114 7
40019 -216 158 10 236 178 12 246 186 14 246 190 14
40020 -246 190 14 246 190 14 246 190 14 246 190 14
40021 -246 190 14 246 190 14 246 190 14 246 190 14
40022 -246 190 14 246 190 14 246 190 14 246 190 14
40023 -246 190 14 246 186 14 242 186 14 190 150 46
40024 - 54 54 54 22 22 22 6 6 6 0 0 0
40025 - 0 0 0 0 0 0 0 0 0 0 0 0
40026 - 0 0 0 0 0 0 0 0 0 0 0 0
40027 - 0 0 0 0 0 0 0 0 0 14 14 14
40028 - 38 38 38 86 86 86 180 133 36 213 154 11
40029 -236 178 12 246 186 14 246 190 14 246 190 14
40030 -246 190 14 246 190 14 246 190 14 246 190 14
40031 -246 190 14 246 190 14 246 190 14 246 190 14
40032 -246 190 14 246 190 14 246 190 14 246 190 14
40033 -246 190 14 232 195 16 190 146 13 214 214 214
40034 -253 253 253 253 253 253 253 253 253 253 253 253
40035 -253 253 253 253 253 253 253 253 253 253 253 253
40036 -253 253 253 253 253 253 253 253 253 253 253 253
40037 -253 253 253 250 250 250 170 170 170 26 26 26
40038 - 2 2 6 2 2 6 37 26 9 163 110 8
40039 -219 162 10 239 182 13 246 186 14 246 190 14
40040 -246 190 14 246 190 14 246 190 14 246 190 14
40041 -246 190 14 246 190 14 246 190 14 246 190 14
40042 -246 190 14 246 190 14 246 190 14 246 190 14
40043 -246 186 14 236 178 12 224 166 10 142 122 72
40044 - 46 46 46 18 18 18 6 6 6 0 0 0
40045 - 0 0 0 0 0 0 0 0 0 0 0 0
40046 - 0 0 0 0 0 0 0 0 0 0 0 0
40047 - 0 0 0 0 0 0 6 6 6 18 18 18
40048 - 50 50 50 109 106 95 192 133 9 224 166 10
40049 -242 186 14 246 190 14 246 190 14 246 190 14
40050 -246 190 14 246 190 14 246 190 14 246 190 14
40051 -246 190 14 246 190 14 246 190 14 246 190 14
40052 -246 190 14 246 190 14 246 190 14 246 190 14
40053 -242 186 14 226 184 13 210 162 10 142 110 46
40054 -226 226 226 253 253 253 253 253 253 253 253 253
40055 -253 253 253 253 253 253 253 253 253 253 253 253
40056 -253 253 253 253 253 253 253 253 253 253 253 253
40057 -198 198 198 66 66 66 2 2 6 2 2 6
40058 - 2 2 6 2 2 6 50 34 6 156 107 11
40059 -219 162 10 239 182 13 246 186 14 246 190 14
40060 -246 190 14 246 190 14 246 190 14 246 190 14
40061 -246 190 14 246 190 14 246 190 14 246 190 14
40062 -246 190 14 246 190 14 246 190 14 242 186 14
40063 -234 174 13 213 154 11 154 122 46 66 66 66
40064 - 30 30 30 10 10 10 0 0 0 0 0 0
40065 - 0 0 0 0 0 0 0 0 0 0 0 0
40066 - 0 0 0 0 0 0 0 0 0 0 0 0
40067 - 0 0 0 0 0 0 6 6 6 22 22 22
40068 - 58 58 58 154 121 60 206 145 10 234 174 13
40069 -242 186 14 246 186 14 246 190 14 246 190 14
40070 -246 190 14 246 190 14 246 190 14 246 190 14
40071 -246 190 14 246 190 14 246 190 14 246 190 14
40072 -246 190 14 246 190 14 246 190 14 246 190 14
40073 -246 186 14 236 178 12 210 162 10 163 110 8
40074 - 61 42 6 138 138 138 218 218 218 250 250 250
40075 -253 253 253 253 253 253 253 253 253 250 250 250
40076 -242 242 242 210 210 210 144 144 144 66 66 66
40077 - 6 6 6 2 2 6 2 2 6 2 2 6
40078 - 2 2 6 2 2 6 61 42 6 163 110 8
40079 -216 158 10 236 178 12 246 190 14 246 190 14
40080 -246 190 14 246 190 14 246 190 14 246 190 14
40081 -246 190 14 246 190 14 246 190 14 246 190 14
40082 -246 190 14 239 182 13 230 174 11 216 158 10
40083 -190 142 34 124 112 88 70 70 70 38 38 38
40084 - 18 18 18 6 6 6 0 0 0 0 0 0
40085 - 0 0 0 0 0 0 0 0 0 0 0 0
40086 - 0 0 0 0 0 0 0 0 0 0 0 0
40087 - 0 0 0 0 0 0 6 6 6 22 22 22
40088 - 62 62 62 168 124 44 206 145 10 224 166 10
40089 -236 178 12 239 182 13 242 186 14 242 186 14
40090 -246 186 14 246 190 14 246 190 14 246 190 14
40091 -246 190 14 246 190 14 246 190 14 246 190 14
40092 -246 190 14 246 190 14 246 190 14 246 190 14
40093 -246 190 14 236 178 12 216 158 10 175 118 6
40094 - 80 54 7 2 2 6 6 6 6 30 30 30
40095 - 54 54 54 62 62 62 50 50 50 38 38 38
40096 - 14 14 14 2 2 6 2 2 6 2 2 6
40097 - 2 2 6 2 2 6 2 2 6 2 2 6
40098 - 2 2 6 6 6 6 80 54 7 167 114 7
40099 -213 154 11 236 178 12 246 190 14 246 190 14
40100 -246 190 14 246 190 14 246 190 14 246 190 14
40101 -246 190 14 242 186 14 239 182 13 239 182 13
40102 -230 174 11 210 150 10 174 135 50 124 112 88
40103 - 82 82 82 54 54 54 34 34 34 18 18 18
40104 - 6 6 6 0 0 0 0 0 0 0 0 0
40105 - 0 0 0 0 0 0 0 0 0 0 0 0
40106 - 0 0 0 0 0 0 0 0 0 0 0 0
40107 - 0 0 0 0 0 0 6 6 6 18 18 18
40108 - 50 50 50 158 118 36 192 133 9 200 144 11
40109 -216 158 10 219 162 10 224 166 10 226 170 11
40110 -230 174 11 236 178 12 239 182 13 239 182 13
40111 -242 186 14 246 186 14 246 190 14 246 190 14
40112 -246 190 14 246 190 14 246 190 14 246 190 14
40113 -246 186 14 230 174 11 210 150 10 163 110 8
40114 -104 69 6 10 10 10 2 2 6 2 2 6
40115 - 2 2 6 2 2 6 2 2 6 2 2 6
40116 - 2 2 6 2 2 6 2 2 6 2 2 6
40117 - 2 2 6 2 2 6 2 2 6 2 2 6
40118 - 2 2 6 6 6 6 91 60 6 167 114 7
40119 -206 145 10 230 174 11 242 186 14 246 190 14
40120 -246 190 14 246 190 14 246 186 14 242 186 14
40121 -239 182 13 230 174 11 224 166 10 213 154 11
40122 -180 133 36 124 112 88 86 86 86 58 58 58
40123 - 38 38 38 22 22 22 10 10 10 6 6 6
40124 - 0 0 0 0 0 0 0 0 0 0 0 0
40125 - 0 0 0 0 0 0 0 0 0 0 0 0
40126 - 0 0 0 0 0 0 0 0 0 0 0 0
40127 - 0 0 0 0 0 0 0 0 0 14 14 14
40128 - 34 34 34 70 70 70 138 110 50 158 118 36
40129 -167 114 7 180 123 7 192 133 9 197 138 11
40130 -200 144 11 206 145 10 213 154 11 219 162 10
40131 -224 166 10 230 174 11 239 182 13 242 186 14
40132 -246 186 14 246 186 14 246 186 14 246 186 14
40133 -239 182 13 216 158 10 185 133 11 152 99 6
40134 -104 69 6 18 14 6 2 2 6 2 2 6
40135 - 2 2 6 2 2 6 2 2 6 2 2 6
40136 - 2 2 6 2 2 6 2 2 6 2 2 6
40137 - 2 2 6 2 2 6 2 2 6 2 2 6
40138 - 2 2 6 6 6 6 80 54 7 152 99 6
40139 -192 133 9 219 162 10 236 178 12 239 182 13
40140 -246 186 14 242 186 14 239 182 13 236 178 12
40141 -224 166 10 206 145 10 192 133 9 154 121 60
40142 - 94 94 94 62 62 62 42 42 42 22 22 22
40143 - 14 14 14 6 6 6 0 0 0 0 0 0
40144 - 0 0 0 0 0 0 0 0 0 0 0 0
40145 - 0 0 0 0 0 0 0 0 0 0 0 0
40146 - 0 0 0 0 0 0 0 0 0 0 0 0
40147 - 0 0 0 0 0 0 0 0 0 6 6 6
40148 - 18 18 18 34 34 34 58 58 58 78 78 78
40149 -101 98 89 124 112 88 142 110 46 156 107 11
40150 -163 110 8 167 114 7 175 118 6 180 123 7
40151 -185 133 11 197 138 11 210 150 10 219 162 10
40152 -226 170 11 236 178 12 236 178 12 234 174 13
40153 -219 162 10 197 138 11 163 110 8 130 83 6
40154 - 91 60 6 10 10 10 2 2 6 2 2 6
40155 - 18 18 18 38 38 38 38 38 38 38 38 38
40156 - 38 38 38 38 38 38 38 38 38 38 38 38
40157 - 38 38 38 38 38 38 26 26 26 2 2 6
40158 - 2 2 6 6 6 6 70 47 6 137 92 6
40159 -175 118 6 200 144 11 219 162 10 230 174 11
40160 -234 174 13 230 174 11 219 162 10 210 150 10
40161 -192 133 9 163 110 8 124 112 88 82 82 82
40162 - 50 50 50 30 30 30 14 14 14 6 6 6
40163 - 0 0 0 0 0 0 0 0 0 0 0 0
40164 - 0 0 0 0 0 0 0 0 0 0 0 0
40165 - 0 0 0 0 0 0 0 0 0 0 0 0
40166 - 0 0 0 0 0 0 0 0 0 0 0 0
40167 - 0 0 0 0 0 0 0 0 0 0 0 0
40168 - 6 6 6 14 14 14 22 22 22 34 34 34
40169 - 42 42 42 58 58 58 74 74 74 86 86 86
40170 -101 98 89 122 102 70 130 98 46 121 87 25
40171 -137 92 6 152 99 6 163 110 8 180 123 7
40172 -185 133 11 197 138 11 206 145 10 200 144 11
40173 -180 123 7 156 107 11 130 83 6 104 69 6
40174 - 50 34 6 54 54 54 110 110 110 101 98 89
40175 - 86 86 86 82 82 82 78 78 78 78 78 78
40176 - 78 78 78 78 78 78 78 78 78 78 78 78
40177 - 78 78 78 82 82 82 86 86 86 94 94 94
40178 -106 106 106 101 101 101 86 66 34 124 80 6
40179 -156 107 11 180 123 7 192 133 9 200 144 11
40180 -206 145 10 200 144 11 192 133 9 175 118 6
40181 -139 102 15 109 106 95 70 70 70 42 42 42
40182 - 22 22 22 10 10 10 0 0 0 0 0 0
40183 - 0 0 0 0 0 0 0 0 0 0 0 0
40184 - 0 0 0 0 0 0 0 0 0 0 0 0
40185 - 0 0 0 0 0 0 0 0 0 0 0 0
40186 - 0 0 0 0 0 0 0 0 0 0 0 0
40187 - 0 0 0 0 0 0 0 0 0 0 0 0
40188 - 0 0 0 0 0 0 6 6 6 10 10 10
40189 - 14 14 14 22 22 22 30 30 30 38 38 38
40190 - 50 50 50 62 62 62 74 74 74 90 90 90
40191 -101 98 89 112 100 78 121 87 25 124 80 6
40192 -137 92 6 152 99 6 152 99 6 152 99 6
40193 -138 86 6 124 80 6 98 70 6 86 66 30
40194 -101 98 89 82 82 82 58 58 58 46 46 46
40195 - 38 38 38 34 34 34 34 34 34 34 34 34
40196 - 34 34 34 34 34 34 34 34 34 34 34 34
40197 - 34 34 34 34 34 34 38 38 38 42 42 42
40198 - 54 54 54 82 82 82 94 86 76 91 60 6
40199 -134 86 6 156 107 11 167 114 7 175 118 6
40200 -175 118 6 167 114 7 152 99 6 121 87 25
40201 -101 98 89 62 62 62 34 34 34 18 18 18
40202 - 6 6 6 0 0 0 0 0 0 0 0 0
40203 - 0 0 0 0 0 0 0 0 0 0 0 0
40204 - 0 0 0 0 0 0 0 0 0 0 0 0
40205 - 0 0 0 0 0 0 0 0 0 0 0 0
40206 - 0 0 0 0 0 0 0 0 0 0 0 0
40207 - 0 0 0 0 0 0 0 0 0 0 0 0
40208 - 0 0 0 0 0 0 0 0 0 0 0 0
40209 - 0 0 0 6 6 6 6 6 6 10 10 10
40210 - 18 18 18 22 22 22 30 30 30 42 42 42
40211 - 50 50 50 66 66 66 86 86 86 101 98 89
40212 -106 86 58 98 70 6 104 69 6 104 69 6
40213 -104 69 6 91 60 6 82 62 34 90 90 90
40214 - 62 62 62 38 38 38 22 22 22 14 14 14
40215 - 10 10 10 10 10 10 10 10 10 10 10 10
40216 - 10 10 10 10 10 10 6 6 6 10 10 10
40217 - 10 10 10 10 10 10 10 10 10 14 14 14
40218 - 22 22 22 42 42 42 70 70 70 89 81 66
40219 - 80 54 7 104 69 6 124 80 6 137 92 6
40220 -134 86 6 116 81 8 100 82 52 86 86 86
40221 - 58 58 58 30 30 30 14 14 14 6 6 6
40222 - 0 0 0 0 0 0 0 0 0 0 0 0
40223 - 0 0 0 0 0 0 0 0 0 0 0 0
40224 - 0 0 0 0 0 0 0 0 0 0 0 0
40225 - 0 0 0 0 0 0 0 0 0 0 0 0
40226 - 0 0 0 0 0 0 0 0 0 0 0 0
40227 - 0 0 0 0 0 0 0 0 0 0 0 0
40228 - 0 0 0 0 0 0 0 0 0 0 0 0
40229 - 0 0 0 0 0 0 0 0 0 0 0 0
40230 - 0 0 0 6 6 6 10 10 10 14 14 14
40231 - 18 18 18 26 26 26 38 38 38 54 54 54
40232 - 70 70 70 86 86 86 94 86 76 89 81 66
40233 - 89 81 66 86 86 86 74 74 74 50 50 50
40234 - 30 30 30 14 14 14 6 6 6 0 0 0
40235 - 0 0 0 0 0 0 0 0 0 0 0 0
40236 - 0 0 0 0 0 0 0 0 0 0 0 0
40237 - 0 0 0 0 0 0 0 0 0 0 0 0
40238 - 6 6 6 18 18 18 34 34 34 58 58 58
40239 - 82 82 82 89 81 66 89 81 66 89 81 66
40240 - 94 86 66 94 86 76 74 74 74 50 50 50
40241 - 26 26 26 14 14 14 6 6 6 0 0 0
40242 - 0 0 0 0 0 0 0 0 0 0 0 0
40243 - 0 0 0 0 0 0 0 0 0 0 0 0
40244 - 0 0 0 0 0 0 0 0 0 0 0 0
40245 - 0 0 0 0 0 0 0 0 0 0 0 0
40246 - 0 0 0 0 0 0 0 0 0 0 0 0
40247 - 0 0 0 0 0 0 0 0 0 0 0 0
40248 - 0 0 0 0 0 0 0 0 0 0 0 0
40249 - 0 0 0 0 0 0 0 0 0 0 0 0
40250 - 0 0 0 0 0 0 0 0 0 0 0 0
40251 - 6 6 6 6 6 6 14 14 14 18 18 18
40252 - 30 30 30 38 38 38 46 46 46 54 54 54
40253 - 50 50 50 42 42 42 30 30 30 18 18 18
40254 - 10 10 10 0 0 0 0 0 0 0 0 0
40255 - 0 0 0 0 0 0 0 0 0 0 0 0
40256 - 0 0 0 0 0 0 0 0 0 0 0 0
40257 - 0 0 0 0 0 0 0 0 0 0 0 0
40258 - 0 0 0 6 6 6 14 14 14 26 26 26
40259 - 38 38 38 50 50 50 58 58 58 58 58 58
40260 - 54 54 54 42 42 42 30 30 30 18 18 18
40261 - 10 10 10 0 0 0 0 0 0 0 0 0
40262 - 0 0 0 0 0 0 0 0 0 0 0 0
40263 - 0 0 0 0 0 0 0 0 0 0 0 0
40264 - 0 0 0 0 0 0 0 0 0 0 0 0
40265 - 0 0 0 0 0 0 0 0 0 0 0 0
40266 - 0 0 0 0 0 0 0 0 0 0 0 0
40267 - 0 0 0 0 0 0 0 0 0 0 0 0
40268 - 0 0 0 0 0 0 0 0 0 0 0 0
40269 - 0 0 0 0 0 0 0 0 0 0 0 0
40270 - 0 0 0 0 0 0 0 0 0 0 0 0
40271 - 0 0 0 0 0 0 0 0 0 6 6 6
40272 - 6 6 6 10 10 10 14 14 14 18 18 18
40273 - 18 18 18 14 14 14 10 10 10 6 6 6
40274 - 0 0 0 0 0 0 0 0 0 0 0 0
40275 - 0 0 0 0 0 0 0 0 0 0 0 0
40276 - 0 0 0 0 0 0 0 0 0 0 0 0
40277 - 0 0 0 0 0 0 0 0 0 0 0 0
40278 - 0 0 0 0 0 0 0 0 0 6 6 6
40279 - 14 14 14 18 18 18 22 22 22 22 22 22
40280 - 18 18 18 14 14 14 10 10 10 6 6 6
40281 - 0 0 0 0 0 0 0 0 0 0 0 0
40282 - 0 0 0 0 0 0 0 0 0 0 0 0
40283 - 0 0 0 0 0 0 0 0 0 0 0 0
40284 - 0 0 0 0 0 0 0 0 0 0 0 0
40285 - 0 0 0 0 0 0 0 0 0 0 0 0
40286 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40287 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40288 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40289 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40290 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40291 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40292 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40293 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40294 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40295 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40296 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40299 +4 4 4 4 4 4
40300 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40301 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40302 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40303 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40304 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40305 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40306 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40307 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40308 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40310 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40313 +4 4 4 4 4 4
40314 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40315 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40316 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40317 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40318 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40319 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40320 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40321 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40322 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40324 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40326 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40327 +4 4 4 4 4 4
40328 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40329 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40330 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40331 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40332 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40333 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40334 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40335 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40336 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40338 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40340 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40341 +4 4 4 4 4 4
40342 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40343 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40344 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40345 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40346 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40347 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40348 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40349 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40350 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40351 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40352 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40353 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40354 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40355 +4 4 4 4 4 4
40356 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40357 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40358 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40359 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40360 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40361 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40362 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40363 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40364 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40365 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40366 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40367 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40368 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40369 +4 4 4 4 4 4
40370 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40371 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40372 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40373 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40374 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
40375 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
40376 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40377 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40378 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40379 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
40380 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40381 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
40382 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40383 +4 4 4 4 4 4
40384 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40385 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40386 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40387 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40388 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
40389 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
40390 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40391 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40392 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40393 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
40394 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
40395 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
40396 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40397 +4 4 4 4 4 4
40398 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40399 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40400 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40401 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40402 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
40403 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
40404 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40405 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40406 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40407 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
40408 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
40409 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
40410 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
40411 +4 4 4 4 4 4
40412 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40413 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40414 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40415 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
40416 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
40417 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
40418 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
40419 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40420 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40421 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
40422 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
40423 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
40424 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
40425 +4 4 4 4 4 4
40426 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40427 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40428 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40429 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
40430 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
40431 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
40432 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
40433 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40434 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
40435 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
40436 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
40437 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
40438 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
40439 +4 4 4 4 4 4
40440 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40441 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40442 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40443 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
40444 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
40445 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
40446 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
40447 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40448 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
40449 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
40450 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
40451 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
40452 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
40453 +4 4 4 4 4 4
40454 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40455 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40456 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
40457 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
40458 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
40459 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
40460 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
40461 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
40462 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
40463 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
40464 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
40465 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
40466 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
40467 +4 4 4 4 4 4
40468 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40469 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40470 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
40471 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
40472 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
40473 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
40474 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
40475 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
40476 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
40477 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
40478 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
40479 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
40480 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
40481 +4 4 4 4 4 4
40482 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40483 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40484 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
40485 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
40486 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
40487 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
40488 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
40489 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
40490 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
40491 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
40492 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
40493 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
40494 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40495 +4 4 4 4 4 4
40496 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40497 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40498 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
40499 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
40500 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
40501 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
40502 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
40503 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
40504 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
40505 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
40506 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
40507 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
40508 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
40509 +4 4 4 4 4 4
40510 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40511 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
40512 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
40513 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
40514 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
40515 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
40516 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
40517 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
40518 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
40519 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
40520 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
40521 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
40522 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
40523 +4 4 4 4 4 4
40524 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40525 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
40526 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
40527 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
40528 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40529 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
40530 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
40531 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
40532 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
40533 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
40534 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
40535 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
40536 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
40537 +0 0 0 4 4 4
40538 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40539 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
40540 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
40541 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
40542 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
40543 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
40544 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
40545 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
40546 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
40547 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
40548 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
40549 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
40550 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
40551 +2 0 0 0 0 0
40552 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
40553 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
40554 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
40555 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
40556 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
40557 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
40558 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
40559 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
40560 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
40561 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
40562 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
40563 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
40564 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
40565 +37 38 37 0 0 0
40566 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40567 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
40568 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
40569 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
40570 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
40571 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
40572 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
40573 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
40574 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
40575 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
40576 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
40577 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
40578 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
40579 +85 115 134 4 0 0
40580 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
40581 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
40582 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
40583 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
40584 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
40585 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
40586 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
40587 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
40588 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
40589 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
40590 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
40591 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
40592 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
40593 +60 73 81 4 0 0
40594 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
40595 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
40596 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
40597 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
40598 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
40599 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
40600 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
40601 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
40602 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
40603 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
40604 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
40605 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
40606 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
40607 +16 19 21 4 0 0
40608 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
40609 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
40610 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
40611 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
40612 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
40613 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
40614 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
40615 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
40616 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
40617 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
40618 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
40619 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
40620 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
40621 +4 0 0 4 3 3
40622 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
40623 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
40624 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
40625 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
40626 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
40627 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
40628 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
40629 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
40630 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
40631 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
40632 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
40633 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
40634 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
40635 +3 2 2 4 4 4
40636 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
40637 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
40638 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
40639 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40640 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
40641 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
40642 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
40643 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
40644 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
40645 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
40646 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
40647 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
40648 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
40649 +4 4 4 4 4 4
40650 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
40651 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
40652 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
40653 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
40654 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
40655 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
40656 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
40657 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
40658 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
40659 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
40660 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
40661 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
40662 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
40663 +4 4 4 4 4 4
40664 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
40665 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
40666 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
40667 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
40668 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
40669 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40670 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
40671 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
40672 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
40673 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
40674 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
40675 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
40676 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
40677 +5 5 5 5 5 5
40678 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
40679 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
40680 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
40681 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
40682 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
40683 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40684 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
40685 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
40686 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
40687 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
40688 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
40689 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
40690 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40691 +5 5 5 4 4 4
40692 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
40693 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
40694 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
40695 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
40696 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40697 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
40698 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
40699 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
40700 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
40701 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
40702 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
40703 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40704 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40705 +4 4 4 4 4 4
40706 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
40707 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
40708 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
40709 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
40710 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
40711 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40712 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40713 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
40714 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
40715 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
40716 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
40717 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
40718 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40719 +4 4 4 4 4 4
40720 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
40721 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
40722 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
40723 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
40724 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40725 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
40726 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
40727 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
40728 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
40729 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
40730 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
40731 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40732 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40733 +4 4 4 4 4 4
40734 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
40735 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
40736 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
40737 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
40738 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40739 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40740 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40741 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
40742 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
40743 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
40744 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
40745 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40746 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40747 +4 4 4 4 4 4
40748 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
40749 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
40750 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
40751 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
40752 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40753 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
40754 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40755 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
40756 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
40757 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
40758 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40759 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40760 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40761 +4 4 4 4 4 4
40762 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
40763 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
40764 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
40765 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
40766 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40767 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
40768 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
40769 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
40770 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
40771 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
40772 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
40773 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40774 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40775 +4 4 4 4 4 4
40776 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
40777 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
40778 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
40779 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
40780 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40781 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
40782 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
40783 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
40784 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
40785 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
40786 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
40787 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40788 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40789 +4 4 4 4 4 4
40790 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
40791 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
40792 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
40793 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40794 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
40795 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
40796 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
40797 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
40798 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
40799 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
40800 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40801 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40802 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40803 +4 4 4 4 4 4
40804 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
40805 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
40806 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
40807 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40808 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40809 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
40810 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
40811 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
40812 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
40813 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
40814 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40815 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40816 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40817 +4 4 4 4 4 4
40818 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
40819 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
40820 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40821 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40822 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40823 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
40824 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
40825 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
40826 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
40827 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
40828 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40829 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40830 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40831 +4 4 4 4 4 4
40832 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
40833 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
40834 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40835 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40836 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40837 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
40838 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
40839 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
40840 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40841 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40842 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40843 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40844 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40845 +4 4 4 4 4 4
40846 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40847 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
40848 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40849 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
40850 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
40851 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
40852 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
40853 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
40854 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40855 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40856 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40857 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40858 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40859 +4 4 4 4 4 4
40860 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40861 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
40862 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40863 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
40864 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40865 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
40866 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
40867 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
40868 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40869 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40870 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40871 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40872 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40873 +4 4 4 4 4 4
40874 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
40875 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
40876 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40877 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
40878 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
40879 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
40880 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
40881 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
40882 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40883 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40884 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40885 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40886 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40887 +4 4 4 4 4 4
40888 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
40889 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
40890 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40891 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
40892 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
40893 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
40894 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
40895 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
40896 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40897 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40898 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40899 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40900 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40901 +4 4 4 4 4 4
40902 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40903 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
40904 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40905 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
40906 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
40907 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
40908 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
40909 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
40910 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40911 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40912 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40913 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40914 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40915 +4 4 4 4 4 4
40916 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
40917 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
40918 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40919 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
40920 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
40921 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
40922 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
40923 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
40924 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
40925 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40926 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40927 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40928 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40929 +4 4 4 4 4 4
40930 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40931 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
40932 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
40933 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
40934 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
40935 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
40936 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
40937 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
40938 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40939 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40940 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40941 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40942 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40943 +4 4 4 4 4 4
40944 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40945 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
40946 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40947 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
40948 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
40949 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
40950 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
40951 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
40952 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40953 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40954 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40955 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40956 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40957 +4 4 4 4 4 4
40958 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40959 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
40960 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
40961 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
40962 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
40963 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
40964 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40965 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
40966 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40967 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40968 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40969 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40970 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40971 +4 4 4 4 4 4
40972 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40973 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
40974 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
40975 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40976 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
40977 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
40978 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40979 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
40980 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40981 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40982 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40983 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40984 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40985 +4 4 4 4 4 4
40986 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40987 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
40988 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
40989 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
40990 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
40991 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
40992 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
40993 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
40994 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
40995 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40996 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40997 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40998 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40999 +4 4 4 4 4 4
41000 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41001 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
41002 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
41003 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
41004 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
41005 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
41006 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
41007 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
41008 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
41009 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41010 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41011 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41012 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41013 +4 4 4 4 4 4
41014 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
41015 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
41016 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
41017 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
41018 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41019 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
41020 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
41021 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
41022 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
41023 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41024 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41025 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41026 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41027 +4 4 4 4 4 4
41028 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41029 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
41030 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
41031 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
41032 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
41033 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
41034 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
41035 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
41036 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
41037 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41038 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41039 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41040 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41041 +4 4 4 4 4 4
41042 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
41043 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
41044 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
41045 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
41046 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
41047 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
41048 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
41049 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
41050 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
41051 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
41052 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41053 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41054 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41055 +4 4 4 4 4 4
41056 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
41057 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41058 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
41059 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
41060 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
41061 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
41062 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
41063 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
41064 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
41065 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
41066 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41067 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41068 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41069 +4 4 4 4 4 4
41070 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
41071 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41072 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
41073 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
41074 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
41075 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
41076 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41077 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
41078 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
41079 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
41080 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41081 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41082 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41083 +4 4 4 4 4 4
41084 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
41085 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
41086 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
41087 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
41088 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
41089 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
41090 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
41091 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
41092 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
41093 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
41094 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41095 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41096 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41097 +4 4 4 4 4 4
41098 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
41099 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
41100 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41101 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
41102 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
41103 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
41104 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
41105 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
41106 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
41107 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
41108 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41109 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41110 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41111 +4 4 4 4 4 4
41112 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41113 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
41114 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
41115 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
41116 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
41117 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
41118 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
41119 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
41120 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
41121 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41122 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41123 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41124 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41125 +4 4 4 4 4 4
41126 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
41127 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
41128 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
41129 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
41130 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
41131 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
41132 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
41133 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
41134 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
41135 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41136 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41137 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41138 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41139 +4 4 4 4 4 4
41140 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
41141 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
41142 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
41143 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
41144 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
41145 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
41146 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
41147 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
41148 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41149 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41150 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41151 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41152 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41153 +4 4 4 4 4 4
41154 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
41155 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41156 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
41157 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41158 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
41159 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
41160 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
41161 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
41162 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
41163 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41164 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41165 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41166 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41167 +4 4 4 4 4 4
41168 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
41169 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
41170 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
41171 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
41172 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
41173 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
41174 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
41175 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
41176 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
41177 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41178 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41179 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41180 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41181 +4 4 4 4 4 4
41182 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41183 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
41184 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
41185 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
41186 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
41187 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
41188 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
41189 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
41190 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41191 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41192 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41193 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41194 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41195 +4 4 4 4 4 4
41196 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
41197 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
41198 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41199 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
41200 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
41201 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
41202 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
41203 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
41204 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41205 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41206 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41207 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41208 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41209 +4 4 4 4 4 4
41210 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41211 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
41212 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
41213 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
41214 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
41215 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
41216 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
41217 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41218 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41219 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41220 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41221 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41222 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41223 +4 4 4 4 4 4
41224 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41225 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
41226 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41227 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
41228 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
41229 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
41230 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
41231 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
41232 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41233 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41234 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41235 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41236 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41237 +4 4 4 4 4 4
41238 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41239 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
41240 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
41241 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
41242 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
41243 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
41244 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
41245 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
41246 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41247 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41248 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41249 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41250 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41251 +4 4 4 4 4 4
41252 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41253 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
41254 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
41255 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41256 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
41257 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
41258 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
41259 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41260 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41261 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41262 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41263 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41264 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41265 +4 4 4 4 4 4
41266 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41267 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41268 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41269 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
41270 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
41271 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
41272 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
41273 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41274 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41275 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41276 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41277 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41278 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41279 +4 4 4 4 4 4
41280 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41281 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41282 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41283 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41284 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
41285 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
41286 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
41287 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41288 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41289 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41290 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41291 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41292 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41293 +4 4 4 4 4 4
41294 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41295 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41296 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41297 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41298 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41299 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
41300 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
41301 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41302 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41303 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41304 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41305 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41306 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41307 +4 4 4 4 4 4
41308 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41310 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41311 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41312 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41313 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41314 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
41315 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41316 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41317 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41318 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41319 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41320 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41321 +4 4 4 4 4 4
41322 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41324 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41325 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
41326 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
41327 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
41328 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
41329 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41330 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41331 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41332 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41333 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41334 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41335 +4 4 4 4 4 4
41336 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41338 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41340 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
41341 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41342 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41343 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41344 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41345 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41346 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41347 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41348 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41349 +4 4 4 4 4 4
41350 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41351 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41352 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41353 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41354 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
41355 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
41356 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
41357 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41358 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41359 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41360 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41361 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41362 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41363 +4 4 4 4 4 4
41364 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41365 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41366 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41367 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41368 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
41369 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
41370 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41371 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41372 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41373 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41374 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41375 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41376 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41377 +4 4 4 4 4 4
41378 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41379 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41380 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41381 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41382 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
41383 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
41384 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41385 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41386 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41387 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41388 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41389 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41390 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41391 +4 4 4 4 4 4
41392 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41393 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41394 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41395 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41396 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41397 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
41398 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41399 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41400 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41401 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41402 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41403 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41404 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41405 +4 4 4 4 4 4
41406 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
41407 index 087fc99..f85ed76 100644
41408 --- a/drivers/video/udlfb.c
41409 +++ b/drivers/video/udlfb.c
41410 @@ -585,11 +585,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
41411 dlfb_urb_completion(urb);
41412
41413 error:
41414 - atomic_add(bytes_sent, &dev->bytes_sent);
41415 - atomic_add(bytes_identical, &dev->bytes_identical);
41416 - atomic_add(width*height*2, &dev->bytes_rendered);
41417 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41418 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41419 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
41420 end_cycles = get_cycles();
41421 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
41422 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41423 >> 10)), /* Kcycles */
41424 &dev->cpu_kcycles_used);
41425
41426 @@ -710,11 +710,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
41427 dlfb_urb_completion(urb);
41428
41429 error:
41430 - atomic_add(bytes_sent, &dev->bytes_sent);
41431 - atomic_add(bytes_identical, &dev->bytes_identical);
41432 - atomic_add(bytes_rendered, &dev->bytes_rendered);
41433 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41434 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41435 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
41436 end_cycles = get_cycles();
41437 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
41438 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41439 >> 10)), /* Kcycles */
41440 &dev->cpu_kcycles_used);
41441 }
41442 @@ -1306,7 +1306,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
41443 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41444 struct dlfb_data *dev = fb_info->par;
41445 return snprintf(buf, PAGE_SIZE, "%u\n",
41446 - atomic_read(&dev->bytes_rendered));
41447 + atomic_read_unchecked(&dev->bytes_rendered));
41448 }
41449
41450 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41451 @@ -1314,7 +1314,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41452 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41453 struct dlfb_data *dev = fb_info->par;
41454 return snprintf(buf, PAGE_SIZE, "%u\n",
41455 - atomic_read(&dev->bytes_identical));
41456 + atomic_read_unchecked(&dev->bytes_identical));
41457 }
41458
41459 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41460 @@ -1322,7 +1322,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41461 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41462 struct dlfb_data *dev = fb_info->par;
41463 return snprintf(buf, PAGE_SIZE, "%u\n",
41464 - atomic_read(&dev->bytes_sent));
41465 + atomic_read_unchecked(&dev->bytes_sent));
41466 }
41467
41468 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41469 @@ -1330,7 +1330,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41470 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41471 struct dlfb_data *dev = fb_info->par;
41472 return snprintf(buf, PAGE_SIZE, "%u\n",
41473 - atomic_read(&dev->cpu_kcycles_used));
41474 + atomic_read_unchecked(&dev->cpu_kcycles_used));
41475 }
41476
41477 static ssize_t edid_show(
41478 @@ -1387,10 +1387,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
41479 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41480 struct dlfb_data *dev = fb_info->par;
41481
41482 - atomic_set(&dev->bytes_rendered, 0);
41483 - atomic_set(&dev->bytes_identical, 0);
41484 - atomic_set(&dev->bytes_sent, 0);
41485 - atomic_set(&dev->cpu_kcycles_used, 0);
41486 + atomic_set_unchecked(&dev->bytes_rendered, 0);
41487 + atomic_set_unchecked(&dev->bytes_identical, 0);
41488 + atomic_set_unchecked(&dev->bytes_sent, 0);
41489 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
41490
41491 return count;
41492 }
41493 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
41494 index 7f8472c..9842e87 100644
41495 --- a/drivers/video/uvesafb.c
41496 +++ b/drivers/video/uvesafb.c
41497 @@ -19,6 +19,7 @@
41498 #include <linux/io.h>
41499 #include <linux/mutex.h>
41500 #include <linux/slab.h>
41501 +#include <linux/moduleloader.h>
41502 #include <video/edid.h>
41503 #include <video/uvesafb.h>
41504 #ifdef CONFIG_X86
41505 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
41506 NULL,
41507 };
41508
41509 - return call_usermodehelper(v86d_path, argv, envp, 1);
41510 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
41511 }
41512
41513 /*
41514 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
41515 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
41516 par->pmi_setpal = par->ypan = 0;
41517 } else {
41518 +
41519 +#ifdef CONFIG_PAX_KERNEXEC
41520 +#ifdef CONFIG_MODULES
41521 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
41522 +#endif
41523 + if (!par->pmi_code) {
41524 + par->pmi_setpal = par->ypan = 0;
41525 + return 0;
41526 + }
41527 +#endif
41528 +
41529 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
41530 + task->t.regs.edi);
41531 +
41532 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41533 + pax_open_kernel();
41534 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
41535 + pax_close_kernel();
41536 +
41537 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
41538 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
41539 +#else
41540 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
41541 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
41542 +#endif
41543 +
41544 printk(KERN_INFO "uvesafb: protected mode interface info at "
41545 "%04x:%04x\n",
41546 (u16)task->t.regs.es, (u16)task->t.regs.edi);
41547 @@ -1821,6 +1844,11 @@ out:
41548 if (par->vbe_modes)
41549 kfree(par->vbe_modes);
41550
41551 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41552 + if (par->pmi_code)
41553 + module_free_exec(NULL, par->pmi_code);
41554 +#endif
41555 +
41556 framebuffer_release(info);
41557 return err;
41558 }
41559 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
41560 kfree(par->vbe_state_orig);
41561 if (par->vbe_state_saved)
41562 kfree(par->vbe_state_saved);
41563 +
41564 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41565 + if (par->pmi_code)
41566 + module_free_exec(NULL, par->pmi_code);
41567 +#endif
41568 +
41569 }
41570
41571 framebuffer_release(info);
41572 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
41573 index 501b340..86bd4cf 100644
41574 --- a/drivers/video/vesafb.c
41575 +++ b/drivers/video/vesafb.c
41576 @@ -9,6 +9,7 @@
41577 */
41578
41579 #include <linux/module.h>
41580 +#include <linux/moduleloader.h>
41581 #include <linux/kernel.h>
41582 #include <linux/errno.h>
41583 #include <linux/string.h>
41584 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
41585 static int vram_total __initdata; /* Set total amount of memory */
41586 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
41587 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
41588 -static void (*pmi_start)(void) __read_mostly;
41589 -static void (*pmi_pal) (void) __read_mostly;
41590 +static void (*pmi_start)(void) __read_only;
41591 +static void (*pmi_pal) (void) __read_only;
41592 static int depth __read_mostly;
41593 static int vga_compat __read_mostly;
41594 /* --------------------------------------------------------------------- */
41595 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
41596 unsigned int size_vmode;
41597 unsigned int size_remap;
41598 unsigned int size_total;
41599 + void *pmi_code = NULL;
41600
41601 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
41602 return -ENODEV;
41603 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
41604 size_remap = size_total;
41605 vesafb_fix.smem_len = size_remap;
41606
41607 -#ifndef __i386__
41608 - screen_info.vesapm_seg = 0;
41609 -#endif
41610 -
41611 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
41612 printk(KERN_WARNING
41613 "vesafb: cannot reserve video memory at 0x%lx\n",
41614 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
41615 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
41616 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
41617
41618 +#ifdef __i386__
41619 +
41620 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41621 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
41622 + if (!pmi_code)
41623 +#elif !defined(CONFIG_PAX_KERNEXEC)
41624 + if (0)
41625 +#endif
41626 +
41627 +#endif
41628 + screen_info.vesapm_seg = 0;
41629 +
41630 if (screen_info.vesapm_seg) {
41631 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
41632 - screen_info.vesapm_seg,screen_info.vesapm_off);
41633 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
41634 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
41635 }
41636
41637 if (screen_info.vesapm_seg < 0xc000)
41638 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
41639
41640 if (ypan || pmi_setpal) {
41641 unsigned short *pmi_base;
41642 +
41643 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
41644 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
41645 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
41646 +
41647 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41648 + pax_open_kernel();
41649 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
41650 +#else
41651 + pmi_code = pmi_base;
41652 +#endif
41653 +
41654 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
41655 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
41656 +
41657 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41658 + pmi_start = ktva_ktla(pmi_start);
41659 + pmi_pal = ktva_ktla(pmi_pal);
41660 + pax_close_kernel();
41661 +#endif
41662 +
41663 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
41664 if (pmi_base[3]) {
41665 printk(KERN_INFO "vesafb: pmi: ports = ");
41666 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
41667 info->node, info->fix.id);
41668 return 0;
41669 err:
41670 +
41671 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41672 + module_free_exec(NULL, pmi_code);
41673 +#endif
41674 +
41675 if (info->screen_base)
41676 iounmap(info->screen_base);
41677 framebuffer_release(info);
41678 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
41679 index 88714ae..16c2e11 100644
41680 --- a/drivers/video/via/via_clock.h
41681 +++ b/drivers/video/via/via_clock.h
41682 @@ -56,7 +56,7 @@ struct via_clock {
41683
41684 void (*set_engine_pll_state)(u8 state);
41685 void (*set_engine_pll)(struct via_pll_config config);
41686 -};
41687 +} __no_const;
41688
41689
41690 static inline u32 get_pll_internal_frequency(u32 ref_freq,
41691 diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
41692 index e058ace..2424d93 100644
41693 --- a/drivers/virtio/virtio_balloon.c
41694 +++ b/drivers/virtio/virtio_balloon.c
41695 @@ -174,6 +174,8 @@ static void update_balloon_stats(struct virtio_balloon *vb)
41696 struct sysinfo i;
41697 int idx = 0;
41698
41699 + pax_track_stack();
41700 +
41701 all_vm_events(events);
41702 si_meminfo(&i);
41703
41704 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
41705 index e56c934..fc22f4b 100644
41706 --- a/drivers/xen/xen-pciback/conf_space.h
41707 +++ b/drivers/xen/xen-pciback/conf_space.h
41708 @@ -44,15 +44,15 @@ struct config_field {
41709 struct {
41710 conf_dword_write write;
41711 conf_dword_read read;
41712 - } dw;
41713 + } __no_const dw;
41714 struct {
41715 conf_word_write write;
41716 conf_word_read read;
41717 - } w;
41718 + } __no_const w;
41719 struct {
41720 conf_byte_write write;
41721 conf_byte_read read;
41722 - } b;
41723 + } __no_const b;
41724 } u;
41725 struct list_head list;
41726 };
41727 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
41728 index e3c03db..93b0172 100644
41729 --- a/fs/9p/vfs_inode.c
41730 +++ b/fs/9p/vfs_inode.c
41731 @@ -1288,7 +1288,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41732 void
41733 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41734 {
41735 - char *s = nd_get_link(nd);
41736 + const char *s = nd_get_link(nd);
41737
41738 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
41739 IS_ERR(s) ? "<error>" : s);
41740 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
41741 index 79e2ca7..5828ad1 100644
41742 --- a/fs/Kconfig.binfmt
41743 +++ b/fs/Kconfig.binfmt
41744 @@ -86,7 +86,7 @@ config HAVE_AOUT
41745
41746 config BINFMT_AOUT
41747 tristate "Kernel support for a.out and ECOFF binaries"
41748 - depends on HAVE_AOUT
41749 + depends on HAVE_AOUT && BROKEN
41750 ---help---
41751 A.out (Assembler.OUTput) is a set of formats for libraries and
41752 executables used in the earliest versions of UNIX. Linux used
41753 diff --git a/fs/aio.c b/fs/aio.c
41754 index e29ec48..f083e5e 100644
41755 --- a/fs/aio.c
41756 +++ b/fs/aio.c
41757 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
41758 size += sizeof(struct io_event) * nr_events;
41759 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
41760
41761 - if (nr_pages < 0)
41762 + if (nr_pages <= 0)
41763 return -EINVAL;
41764
41765 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
41766 @@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ctx,
41767 struct aio_timeout to;
41768 int retry = 0;
41769
41770 + pax_track_stack();
41771 +
41772 /* needed to zero any padding within an entry (there shouldn't be
41773 * any, but C is fun!
41774 */
41775 @@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
41776 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41777 {
41778 ssize_t ret;
41779 + struct iovec iovstack;
41780
41781 #ifdef CONFIG_COMPAT
41782 if (compat)
41783 ret = compat_rw_copy_check_uvector(type,
41784 (struct compat_iovec __user *)kiocb->ki_buf,
41785 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41786 + kiocb->ki_nbytes, 1, &iovstack,
41787 &kiocb->ki_iovec);
41788 else
41789 #endif
41790 ret = rw_copy_check_uvector(type,
41791 (struct iovec __user *)kiocb->ki_buf,
41792 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41793 + kiocb->ki_nbytes, 1, &iovstack,
41794 &kiocb->ki_iovec);
41795 if (ret < 0)
41796 goto out;
41797
41798 + if (kiocb->ki_iovec == &iovstack) {
41799 + kiocb->ki_inline_vec = iovstack;
41800 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
41801 + }
41802 kiocb->ki_nr_segs = kiocb->ki_nbytes;
41803 kiocb->ki_cur_seg = 0;
41804 /* ki_nbytes/left now reflect bytes instead of segs */
41805 diff --git a/fs/attr.c b/fs/attr.c
41806 index 538e279..046cc6d 100644
41807 --- a/fs/attr.c
41808 +++ b/fs/attr.c
41809 @@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
41810 unsigned long limit;
41811
41812 limit = rlimit(RLIMIT_FSIZE);
41813 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
41814 if (limit != RLIM_INFINITY && offset > limit)
41815 goto out_sig;
41816 if (offset > inode->i_sb->s_maxbytes)
41817 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
41818 index e1fbdee..cd5ea56 100644
41819 --- a/fs/autofs4/waitq.c
41820 +++ b/fs/autofs4/waitq.c
41821 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
41822 {
41823 unsigned long sigpipe, flags;
41824 mm_segment_t fs;
41825 - const char *data = (const char *)addr;
41826 + const char __user *data = (const char __force_user *)addr;
41827 ssize_t wr = 0;
41828
41829 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
41830 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
41831 index 720d885..012e7f0 100644
41832 --- a/fs/befs/linuxvfs.c
41833 +++ b/fs/befs/linuxvfs.c
41834 @@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41835 {
41836 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
41837 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
41838 - char *link = nd_get_link(nd);
41839 + const char *link = nd_get_link(nd);
41840 if (!IS_ERR(link))
41841 kfree(link);
41842 }
41843 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
41844 index a6395bd..a5b24c4 100644
41845 --- a/fs/binfmt_aout.c
41846 +++ b/fs/binfmt_aout.c
41847 @@ -16,6 +16,7 @@
41848 #include <linux/string.h>
41849 #include <linux/fs.h>
41850 #include <linux/file.h>
41851 +#include <linux/security.h>
41852 #include <linux/stat.h>
41853 #include <linux/fcntl.h>
41854 #include <linux/ptrace.h>
41855 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
41856 #endif
41857 # define START_STACK(u) ((void __user *)u.start_stack)
41858
41859 + memset(&dump, 0, sizeof(dump));
41860 +
41861 fs = get_fs();
41862 set_fs(KERNEL_DS);
41863 has_dumped = 1;
41864 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
41865
41866 /* If the size of the dump file exceeds the rlimit, then see what would happen
41867 if we wrote the stack, but not the data area. */
41868 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
41869 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
41870 dump.u_dsize = 0;
41871
41872 /* Make sure we have enough room to write the stack and data areas. */
41873 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
41874 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
41875 dump.u_ssize = 0;
41876
41877 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41878 rlim = rlimit(RLIMIT_DATA);
41879 if (rlim >= RLIM_INFINITY)
41880 rlim = ~0;
41881 +
41882 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
41883 if (ex.a_data + ex.a_bss > rlim)
41884 return -ENOMEM;
41885
41886 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41887 install_exec_creds(bprm);
41888 current->flags &= ~PF_FORKNOEXEC;
41889
41890 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41891 + current->mm->pax_flags = 0UL;
41892 +#endif
41893 +
41894 +#ifdef CONFIG_PAX_PAGEEXEC
41895 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
41896 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
41897 +
41898 +#ifdef CONFIG_PAX_EMUTRAMP
41899 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
41900 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
41901 +#endif
41902 +
41903 +#ifdef CONFIG_PAX_MPROTECT
41904 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
41905 + current->mm->pax_flags |= MF_PAX_MPROTECT;
41906 +#endif
41907 +
41908 + }
41909 +#endif
41910 +
41911 if (N_MAGIC(ex) == OMAGIC) {
41912 unsigned long text_addr, map_size;
41913 loff_t pos;
41914 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41915
41916 down_write(&current->mm->mmap_sem);
41917 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
41918 - PROT_READ | PROT_WRITE | PROT_EXEC,
41919 + PROT_READ | PROT_WRITE,
41920 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
41921 fd_offset + ex.a_text);
41922 up_write(&current->mm->mmap_sem);
41923 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
41924 index 21ac5ee..f54fdd0 100644
41925 --- a/fs/binfmt_elf.c
41926 +++ b/fs/binfmt_elf.c
41927 @@ -32,6 +32,7 @@
41928 #include <linux/elf.h>
41929 #include <linux/utsname.h>
41930 #include <linux/coredump.h>
41931 +#include <linux/xattr.h>
41932 #include <asm/uaccess.h>
41933 #include <asm/param.h>
41934 #include <asm/page.h>
41935 @@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
41936 #define elf_core_dump NULL
41937 #endif
41938
41939 +#ifdef CONFIG_PAX_MPROTECT
41940 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
41941 +#endif
41942 +
41943 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
41944 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
41945 #else
41946 @@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
41947 .load_binary = load_elf_binary,
41948 .load_shlib = load_elf_library,
41949 .core_dump = elf_core_dump,
41950 +
41951 +#ifdef CONFIG_PAX_MPROTECT
41952 + .handle_mprotect= elf_handle_mprotect,
41953 +#endif
41954 +
41955 .min_coredump = ELF_EXEC_PAGESIZE,
41956 };
41957
41958 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
41959
41960 static int set_brk(unsigned long start, unsigned long end)
41961 {
41962 + unsigned long e = end;
41963 +
41964 start = ELF_PAGEALIGN(start);
41965 end = ELF_PAGEALIGN(end);
41966 if (end > start) {
41967 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
41968 if (BAD_ADDR(addr))
41969 return addr;
41970 }
41971 - current->mm->start_brk = current->mm->brk = end;
41972 + current->mm->start_brk = current->mm->brk = e;
41973 return 0;
41974 }
41975
41976 @@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41977 elf_addr_t __user *u_rand_bytes;
41978 const char *k_platform = ELF_PLATFORM;
41979 const char *k_base_platform = ELF_BASE_PLATFORM;
41980 - unsigned char k_rand_bytes[16];
41981 + u32 k_rand_bytes[4];
41982 int items;
41983 elf_addr_t *elf_info;
41984 int ei_index = 0;
41985 const struct cred *cred = current_cred();
41986 struct vm_area_struct *vma;
41987 + unsigned long saved_auxv[AT_VECTOR_SIZE];
41988 +
41989 + pax_track_stack();
41990
41991 /*
41992 * In some cases (e.g. Hyper-Threading), we want to avoid L1
41993 @@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41994 * Generate 16 random bytes for userspace PRNG seeding.
41995 */
41996 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
41997 - u_rand_bytes = (elf_addr_t __user *)
41998 - STACK_ALLOC(p, sizeof(k_rand_bytes));
41999 + srandom32(k_rand_bytes[0] ^ random32());
42000 + srandom32(k_rand_bytes[1] ^ random32());
42001 + srandom32(k_rand_bytes[2] ^ random32());
42002 + srandom32(k_rand_bytes[3] ^ random32());
42003 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
42004 + u_rand_bytes = (elf_addr_t __user *) p;
42005 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
42006 return -EFAULT;
42007
42008 @@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42009 return -EFAULT;
42010 current->mm->env_end = p;
42011
42012 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
42013 +
42014 /* Put the elf_info on the stack in the right place. */
42015 sp = (elf_addr_t __user *)envp + 1;
42016 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
42017 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
42018 return -EFAULT;
42019 return 0;
42020 }
42021 @@ -381,10 +402,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42022 {
42023 struct elf_phdr *elf_phdata;
42024 struct elf_phdr *eppnt;
42025 - unsigned long load_addr = 0;
42026 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
42027 int load_addr_set = 0;
42028 unsigned long last_bss = 0, elf_bss = 0;
42029 - unsigned long error = ~0UL;
42030 + unsigned long error = -EINVAL;
42031 unsigned long total_size;
42032 int retval, i, size;
42033
42034 @@ -430,6 +451,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42035 goto out_close;
42036 }
42037
42038 +#ifdef CONFIG_PAX_SEGMEXEC
42039 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
42040 + pax_task_size = SEGMEXEC_TASK_SIZE;
42041 +#endif
42042 +
42043 eppnt = elf_phdata;
42044 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
42045 if (eppnt->p_type == PT_LOAD) {
42046 @@ -473,8 +499,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42047 k = load_addr + eppnt->p_vaddr;
42048 if (BAD_ADDR(k) ||
42049 eppnt->p_filesz > eppnt->p_memsz ||
42050 - eppnt->p_memsz > TASK_SIZE ||
42051 - TASK_SIZE - eppnt->p_memsz < k) {
42052 + eppnt->p_memsz > pax_task_size ||
42053 + pax_task_size - eppnt->p_memsz < k) {
42054 error = -ENOMEM;
42055 goto out_close;
42056 }
42057 @@ -528,6 +554,348 @@ out:
42058 return error;
42059 }
42060
42061 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
42062 +{
42063 + unsigned long pax_flags = 0UL;
42064 +
42065 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
42066 +
42067 +#ifdef CONFIG_PAX_PAGEEXEC
42068 + if (elf_phdata->p_flags & PF_PAGEEXEC)
42069 + pax_flags |= MF_PAX_PAGEEXEC;
42070 +#endif
42071 +
42072 +#ifdef CONFIG_PAX_SEGMEXEC
42073 + if (elf_phdata->p_flags & PF_SEGMEXEC)
42074 + pax_flags |= MF_PAX_SEGMEXEC;
42075 +#endif
42076 +
42077 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42078 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42079 + if ((__supported_pte_mask & _PAGE_NX))
42080 + pax_flags &= ~MF_PAX_SEGMEXEC;
42081 + else
42082 + pax_flags &= ~MF_PAX_PAGEEXEC;
42083 + }
42084 +#endif
42085 +
42086 +#ifdef CONFIG_PAX_EMUTRAMP
42087 + if (elf_phdata->p_flags & PF_EMUTRAMP)
42088 + pax_flags |= MF_PAX_EMUTRAMP;
42089 +#endif
42090 +
42091 +#ifdef CONFIG_PAX_MPROTECT
42092 + if (elf_phdata->p_flags & PF_MPROTECT)
42093 + pax_flags |= MF_PAX_MPROTECT;
42094 +#endif
42095 +
42096 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42097 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
42098 + pax_flags |= MF_PAX_RANDMMAP;
42099 +#endif
42100 +
42101 +#endif
42102 +
42103 + return pax_flags;
42104 +}
42105 +
42106 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
42107 +{
42108 + unsigned long pax_flags = 0UL;
42109 +
42110 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
42111 +
42112 +#ifdef CONFIG_PAX_PAGEEXEC
42113 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
42114 + pax_flags |= MF_PAX_PAGEEXEC;
42115 +#endif
42116 +
42117 +#ifdef CONFIG_PAX_SEGMEXEC
42118 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
42119 + pax_flags |= MF_PAX_SEGMEXEC;
42120 +#endif
42121 +
42122 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42123 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42124 + if ((__supported_pte_mask & _PAGE_NX))
42125 + pax_flags &= ~MF_PAX_SEGMEXEC;
42126 + else
42127 + pax_flags &= ~MF_PAX_PAGEEXEC;
42128 + }
42129 +#endif
42130 +
42131 +#ifdef CONFIG_PAX_EMUTRAMP
42132 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
42133 + pax_flags |= MF_PAX_EMUTRAMP;
42134 +#endif
42135 +
42136 +#ifdef CONFIG_PAX_MPROTECT
42137 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
42138 + pax_flags |= MF_PAX_MPROTECT;
42139 +#endif
42140 +
42141 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42142 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
42143 + pax_flags |= MF_PAX_RANDMMAP;
42144 +#endif
42145 +
42146 +#endif
42147 +
42148 + return pax_flags;
42149 +}
42150 +
42151 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
42152 +{
42153 + unsigned long pax_flags = 0UL;
42154 +
42155 +#ifdef CONFIG_PAX_EI_PAX
42156 +
42157 +#ifdef CONFIG_PAX_PAGEEXEC
42158 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
42159 + pax_flags |= MF_PAX_PAGEEXEC;
42160 +#endif
42161 +
42162 +#ifdef CONFIG_PAX_SEGMEXEC
42163 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
42164 + pax_flags |= MF_PAX_SEGMEXEC;
42165 +#endif
42166 +
42167 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42168 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42169 + if ((__supported_pte_mask & _PAGE_NX))
42170 + pax_flags &= ~MF_PAX_SEGMEXEC;
42171 + else
42172 + pax_flags &= ~MF_PAX_PAGEEXEC;
42173 + }
42174 +#endif
42175 +
42176 +#ifdef CONFIG_PAX_EMUTRAMP
42177 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
42178 + pax_flags |= MF_PAX_EMUTRAMP;
42179 +#endif
42180 +
42181 +#ifdef CONFIG_PAX_MPROTECT
42182 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
42183 + pax_flags |= MF_PAX_MPROTECT;
42184 +#endif
42185 +
42186 +#ifdef CONFIG_PAX_ASLR
42187 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
42188 + pax_flags |= MF_PAX_RANDMMAP;
42189 +#endif
42190 +
42191 +#else
42192 +
42193 +#ifdef CONFIG_PAX_PAGEEXEC
42194 + pax_flags |= MF_PAX_PAGEEXEC;
42195 +#endif
42196 +
42197 +#ifdef CONFIG_PAX_MPROTECT
42198 + pax_flags |= MF_PAX_MPROTECT;
42199 +#endif
42200 +
42201 +#ifdef CONFIG_PAX_RANDMMAP
42202 + pax_flags |= MF_PAX_RANDMMAP;
42203 +#endif
42204 +
42205 +#ifdef CONFIG_PAX_SEGMEXEC
42206 + if (!(__supported_pte_mask & _PAGE_NX)) {
42207 + pax_flags &= ~MF_PAX_PAGEEXEC;
42208 + pax_flags |= MF_PAX_SEGMEXEC;
42209 + }
42210 +#endif
42211 +
42212 +#endif
42213 +
42214 + return pax_flags;
42215 +}
42216 +
42217 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
42218 +{
42219 +
42220 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
42221 + unsigned long i;
42222 +
42223 + for (i = 0UL; i < elf_ex->e_phnum; i++)
42224 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
42225 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
42226 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
42227 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
42228 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
42229 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
42230 + return ~0UL;
42231 +
42232 +#ifdef CONFIG_PAX_SOFTMODE
42233 + if (pax_softmode)
42234 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
42235 + else
42236 +#endif
42237 +
42238 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
42239 + break;
42240 + }
42241 +#endif
42242 +
42243 + return ~0UL;
42244 +}
42245 +
42246 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
42247 +{
42248 + unsigned long pax_flags = 0UL;
42249 +
42250 +#ifdef CONFIG_PAX_PAGEEXEC
42251 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
42252 + pax_flags |= MF_PAX_PAGEEXEC;
42253 +#endif
42254 +
42255 +#ifdef CONFIG_PAX_SEGMEXEC
42256 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
42257 + pax_flags |= MF_PAX_SEGMEXEC;
42258 +#endif
42259 +
42260 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42261 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42262 + if ((__supported_pte_mask & _PAGE_NX))
42263 + pax_flags &= ~MF_PAX_SEGMEXEC;
42264 + else
42265 + pax_flags &= ~MF_PAX_PAGEEXEC;
42266 + }
42267 +#endif
42268 +
42269 +#ifdef CONFIG_PAX_EMUTRAMP
42270 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
42271 + pax_flags |= MF_PAX_EMUTRAMP;
42272 +#endif
42273 +
42274 +#ifdef CONFIG_PAX_MPROTECT
42275 + if (pax_flags_softmode & MF_PAX_MPROTECT)
42276 + pax_flags |= MF_PAX_MPROTECT;
42277 +#endif
42278 +
42279 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42280 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
42281 + pax_flags |= MF_PAX_RANDMMAP;
42282 +#endif
42283 +
42284 + return pax_flags;
42285 +}
42286 +
42287 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
42288 +{
42289 + unsigned long pax_flags = 0UL;
42290 +
42291 +#ifdef CONFIG_PAX_PAGEEXEC
42292 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
42293 + pax_flags |= MF_PAX_PAGEEXEC;
42294 +#endif
42295 +
42296 +#ifdef CONFIG_PAX_SEGMEXEC
42297 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
42298 + pax_flags |= MF_PAX_SEGMEXEC;
42299 +#endif
42300 +
42301 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42302 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42303 + if ((__supported_pte_mask & _PAGE_NX))
42304 + pax_flags &= ~MF_PAX_SEGMEXEC;
42305 + else
42306 + pax_flags &= ~MF_PAX_PAGEEXEC;
42307 + }
42308 +#endif
42309 +
42310 +#ifdef CONFIG_PAX_EMUTRAMP
42311 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
42312 + pax_flags |= MF_PAX_EMUTRAMP;
42313 +#endif
42314 +
42315 +#ifdef CONFIG_PAX_MPROTECT
42316 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
42317 + pax_flags |= MF_PAX_MPROTECT;
42318 +#endif
42319 +
42320 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42321 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
42322 + pax_flags |= MF_PAX_RANDMMAP;
42323 +#endif
42324 +
42325 + return pax_flags;
42326 +}
42327 +
42328 +static unsigned long pax_parse_xattr_pax(struct file * const file)
42329 +{
42330 +
42331 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42332 + ssize_t xattr_size, i;
42333 + unsigned char xattr_value[5];
42334 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
42335 +
42336 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
42337 + if (xattr_size <= 0)
42338 + return ~0UL;
42339 +
42340 + for (i = 0; i < xattr_size; i++)
42341 + switch (xattr_value[i]) {
42342 + default:
42343 + return ~0UL;
42344 +
42345 +#define parse_flag(option1, option2, flag) \
42346 + case option1: \
42347 + pax_flags_hardmode |= MF_PAX_##flag; \
42348 + break; \
42349 + case option2: \
42350 + pax_flags_softmode |= MF_PAX_##flag; \
42351 + break;
42352 +
42353 + parse_flag('p', 'P', PAGEEXEC);
42354 + parse_flag('e', 'E', EMUTRAMP);
42355 + parse_flag('m', 'M', MPROTECT);
42356 + parse_flag('r', 'R', RANDMMAP);
42357 + parse_flag('s', 'S', SEGMEXEC);
42358 +
42359 +#undef parse_flag
42360 + }
42361 +
42362 + if (pax_flags_hardmode & pax_flags_softmode)
42363 + return ~0UL;
42364 +
42365 +#ifdef CONFIG_PAX_SOFTMODE
42366 + if (pax_softmode)
42367 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
42368 + else
42369 +#endif
42370 +
42371 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
42372 +#else
42373 + return ~0UL;
42374 +#endif
42375 +}
42376 +
42377 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
42378 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
42379 +{
42380 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
42381 +
42382 + pax_flags = pax_parse_ei_pax(elf_ex);
42383 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
42384 + xattr_pax_flags = pax_parse_xattr_pax(file);
42385 +
42386 + if (pt_pax_flags == ~0UL)
42387 + pt_pax_flags = xattr_pax_flags;
42388 + else if (xattr_pax_flags == ~0UL)
42389 + xattr_pax_flags = pt_pax_flags;
42390 + if (pt_pax_flags != xattr_pax_flags)
42391 + return -EINVAL;
42392 + if (pt_pax_flags != ~0UL)
42393 + pax_flags = pt_pax_flags;
42394 +
42395 + if (0 > pax_check_flags(&pax_flags))
42396 + return -EINVAL;
42397 +
42398 + current->mm->pax_flags = pax_flags;
42399 + return 0;
42400 +}
42401 +#endif
42402 +
42403 /*
42404 * These are the functions used to load ELF style executables and shared
42405 * libraries. There is no binary dependent code anywhere else.
42406 @@ -544,6 +912,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
42407 {
42408 unsigned int random_variable = 0;
42409
42410 +#ifdef CONFIG_PAX_RANDUSTACK
42411 + if (randomize_va_space)
42412 + return stack_top - current->mm->delta_stack;
42413 +#endif
42414 +
42415 if ((current->flags & PF_RANDOMIZE) &&
42416 !(current->personality & ADDR_NO_RANDOMIZE)) {
42417 random_variable = get_random_int() & STACK_RND_MASK;
42418 @@ -562,7 +935,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42419 unsigned long load_addr = 0, load_bias = 0;
42420 int load_addr_set = 0;
42421 char * elf_interpreter = NULL;
42422 - unsigned long error;
42423 + unsigned long error = 0;
42424 struct elf_phdr *elf_ppnt, *elf_phdata;
42425 unsigned long elf_bss, elf_brk;
42426 int retval, i;
42427 @@ -572,11 +945,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42428 unsigned long start_code, end_code, start_data, end_data;
42429 unsigned long reloc_func_desc __maybe_unused = 0;
42430 int executable_stack = EXSTACK_DEFAULT;
42431 - unsigned long def_flags = 0;
42432 struct {
42433 struct elfhdr elf_ex;
42434 struct elfhdr interp_elf_ex;
42435 } *loc;
42436 + unsigned long pax_task_size = TASK_SIZE;
42437
42438 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
42439 if (!loc) {
42440 @@ -713,11 +1086,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42441
42442 /* OK, This is the point of no return */
42443 current->flags &= ~PF_FORKNOEXEC;
42444 - current->mm->def_flags = def_flags;
42445 +
42446 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42447 + current->mm->pax_flags = 0UL;
42448 +#endif
42449 +
42450 +#ifdef CONFIG_PAX_DLRESOLVE
42451 + current->mm->call_dl_resolve = 0UL;
42452 +#endif
42453 +
42454 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
42455 + current->mm->call_syscall = 0UL;
42456 +#endif
42457 +
42458 +#ifdef CONFIG_PAX_ASLR
42459 + current->mm->delta_mmap = 0UL;
42460 + current->mm->delta_stack = 0UL;
42461 +#endif
42462 +
42463 + current->mm->def_flags = 0;
42464 +
42465 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
42466 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
42467 + send_sig(SIGKILL, current, 0);
42468 + goto out_free_dentry;
42469 + }
42470 +#endif
42471 +
42472 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
42473 + pax_set_initial_flags(bprm);
42474 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
42475 + if (pax_set_initial_flags_func)
42476 + (pax_set_initial_flags_func)(bprm);
42477 +#endif
42478 +
42479 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42480 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
42481 + current->mm->context.user_cs_limit = PAGE_SIZE;
42482 + current->mm->def_flags |= VM_PAGEEXEC;
42483 + }
42484 +#endif
42485 +
42486 +#ifdef CONFIG_PAX_SEGMEXEC
42487 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
42488 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
42489 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
42490 + pax_task_size = SEGMEXEC_TASK_SIZE;
42491 + current->mm->def_flags |= VM_NOHUGEPAGE;
42492 + }
42493 +#endif
42494 +
42495 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
42496 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42497 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
42498 + put_cpu();
42499 + }
42500 +#endif
42501
42502 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
42503 may depend on the personality. */
42504 SET_PERSONALITY(loc->elf_ex);
42505 +
42506 +#ifdef CONFIG_PAX_ASLR
42507 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42508 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
42509 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
42510 + }
42511 +#endif
42512 +
42513 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42514 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42515 + executable_stack = EXSTACK_DISABLE_X;
42516 + current->personality &= ~READ_IMPLIES_EXEC;
42517 + } else
42518 +#endif
42519 +
42520 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
42521 current->personality |= READ_IMPLIES_EXEC;
42522
42523 @@ -808,6 +1251,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42524 #else
42525 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
42526 #endif
42527 +
42528 +#ifdef CONFIG_PAX_RANDMMAP
42529 + /* PaX: randomize base address at the default exe base if requested */
42530 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
42531 +#ifdef CONFIG_SPARC64
42532 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
42533 +#else
42534 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
42535 +#endif
42536 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
42537 + elf_flags |= MAP_FIXED;
42538 + }
42539 +#endif
42540 +
42541 }
42542
42543 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
42544 @@ -840,9 +1297,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42545 * allowed task size. Note that p_filesz must always be
42546 * <= p_memsz so it is only necessary to check p_memsz.
42547 */
42548 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42549 - elf_ppnt->p_memsz > TASK_SIZE ||
42550 - TASK_SIZE - elf_ppnt->p_memsz < k) {
42551 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42552 + elf_ppnt->p_memsz > pax_task_size ||
42553 + pax_task_size - elf_ppnt->p_memsz < k) {
42554 /* set_brk can never work. Avoid overflows. */
42555 send_sig(SIGKILL, current, 0);
42556 retval = -EINVAL;
42557 @@ -870,6 +1327,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42558 start_data += load_bias;
42559 end_data += load_bias;
42560
42561 +#ifdef CONFIG_PAX_RANDMMAP
42562 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
42563 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
42564 +#endif
42565 +
42566 /* Calling set_brk effectively mmaps the pages that we need
42567 * for the bss and break sections. We must do this before
42568 * mapping in the interpreter, to make sure it doesn't wind
42569 @@ -881,9 +1343,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42570 goto out_free_dentry;
42571 }
42572 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
42573 - send_sig(SIGSEGV, current, 0);
42574 - retval = -EFAULT; /* Nobody gets to see this, but.. */
42575 - goto out_free_dentry;
42576 + /*
42577 + * This bss-zeroing can fail if the ELF
42578 + * file specifies odd protections. So
42579 + * we don't check the return value
42580 + */
42581 }
42582
42583 if (elf_interpreter) {
42584 @@ -1098,7 +1562,7 @@ out:
42585 * Decide what to dump of a segment, part, all or none.
42586 */
42587 static unsigned long vma_dump_size(struct vm_area_struct *vma,
42588 - unsigned long mm_flags)
42589 + unsigned long mm_flags, long signr)
42590 {
42591 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
42592
42593 @@ -1132,7 +1596,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
42594 if (vma->vm_file == NULL)
42595 return 0;
42596
42597 - if (FILTER(MAPPED_PRIVATE))
42598 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
42599 goto whole;
42600
42601 /*
42602 @@ -1354,9 +1818,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
42603 {
42604 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
42605 int i = 0;
42606 - do
42607 + do {
42608 i += 2;
42609 - while (auxv[i - 2] != AT_NULL);
42610 + } while (auxv[i - 2] != AT_NULL);
42611 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
42612 }
42613
42614 @@ -1862,14 +2326,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
42615 }
42616
42617 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
42618 - unsigned long mm_flags)
42619 + struct coredump_params *cprm)
42620 {
42621 struct vm_area_struct *vma;
42622 size_t size = 0;
42623
42624 for (vma = first_vma(current, gate_vma); vma != NULL;
42625 vma = next_vma(vma, gate_vma))
42626 - size += vma_dump_size(vma, mm_flags);
42627 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42628 return size;
42629 }
42630
42631 @@ -1963,7 +2427,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42632
42633 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
42634
42635 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
42636 + offset += elf_core_vma_data_size(gate_vma, cprm);
42637 offset += elf_core_extra_data_size();
42638 e_shoff = offset;
42639
42640 @@ -1977,10 +2441,12 @@ static int elf_core_dump(struct coredump_params *cprm)
42641 offset = dataoff;
42642
42643 size += sizeof(*elf);
42644 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42645 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
42646 goto end_coredump;
42647
42648 size += sizeof(*phdr4note);
42649 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42650 if (size > cprm->limit
42651 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
42652 goto end_coredump;
42653 @@ -1994,7 +2460,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42654 phdr.p_offset = offset;
42655 phdr.p_vaddr = vma->vm_start;
42656 phdr.p_paddr = 0;
42657 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
42658 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42659 phdr.p_memsz = vma->vm_end - vma->vm_start;
42660 offset += phdr.p_filesz;
42661 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
42662 @@ -2005,6 +2471,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42663 phdr.p_align = ELF_EXEC_PAGESIZE;
42664
42665 size += sizeof(phdr);
42666 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42667 if (size > cprm->limit
42668 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
42669 goto end_coredump;
42670 @@ -2029,7 +2496,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42671 unsigned long addr;
42672 unsigned long end;
42673
42674 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
42675 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42676
42677 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
42678 struct page *page;
42679 @@ -2038,6 +2505,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42680 page = get_dump_page(addr);
42681 if (page) {
42682 void *kaddr = kmap(page);
42683 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
42684 stop = ((size += PAGE_SIZE) > cprm->limit) ||
42685 !dump_write(cprm->file, kaddr,
42686 PAGE_SIZE);
42687 @@ -2055,6 +2523,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42688
42689 if (e_phnum == PN_XNUM) {
42690 size += sizeof(*shdr4extnum);
42691 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42692 if (size > cprm->limit
42693 || !dump_write(cprm->file, shdr4extnum,
42694 sizeof(*shdr4extnum)))
42695 @@ -2075,6 +2544,97 @@ out:
42696
42697 #endif /* CONFIG_ELF_CORE */
42698
42699 +#ifdef CONFIG_PAX_MPROTECT
42700 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
42701 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
42702 + * we'll remove VM_MAYWRITE for good on RELRO segments.
42703 + *
42704 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
42705 + * basis because we want to allow the common case and not the special ones.
42706 + */
42707 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
42708 +{
42709 + struct elfhdr elf_h;
42710 + struct elf_phdr elf_p;
42711 + unsigned long i;
42712 + unsigned long oldflags;
42713 + bool is_textrel_rw, is_textrel_rx, is_relro;
42714 +
42715 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
42716 + return;
42717 +
42718 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
42719 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
42720 +
42721 +#ifdef CONFIG_PAX_ELFRELOCS
42722 + /* possible TEXTREL */
42723 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
42724 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
42725 +#else
42726 + is_textrel_rw = false;
42727 + is_textrel_rx = false;
42728 +#endif
42729 +
42730 + /* possible RELRO */
42731 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
42732 +
42733 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
42734 + return;
42735 +
42736 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
42737 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
42738 +
42739 +#ifdef CONFIG_PAX_ETEXECRELOCS
42740 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42741 +#else
42742 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
42743 +#endif
42744 +
42745 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42746 + !elf_check_arch(&elf_h) ||
42747 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
42748 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
42749 + return;
42750 +
42751 + for (i = 0UL; i < elf_h.e_phnum; i++) {
42752 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
42753 + return;
42754 + switch (elf_p.p_type) {
42755 + case PT_DYNAMIC:
42756 + if (!is_textrel_rw && !is_textrel_rx)
42757 + continue;
42758 + i = 0UL;
42759 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
42760 + elf_dyn dyn;
42761 +
42762 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
42763 + return;
42764 + if (dyn.d_tag == DT_NULL)
42765 + return;
42766 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
42767 + gr_log_textrel(vma);
42768 + if (is_textrel_rw)
42769 + vma->vm_flags |= VM_MAYWRITE;
42770 + else
42771 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
42772 + vma->vm_flags &= ~VM_MAYWRITE;
42773 + return;
42774 + }
42775 + i++;
42776 + }
42777 + return;
42778 +
42779 + case PT_GNU_RELRO:
42780 + if (!is_relro)
42781 + continue;
42782 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
42783 + vma->vm_flags &= ~VM_MAYWRITE;
42784 + return;
42785 + }
42786 + }
42787 +}
42788 +#endif
42789 +
42790 static int __init init_elf_binfmt(void)
42791 {
42792 return register_binfmt(&elf_format);
42793 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
42794 index 1bffbe0..c8c283e 100644
42795 --- a/fs/binfmt_flat.c
42796 +++ b/fs/binfmt_flat.c
42797 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
42798 realdatastart = (unsigned long) -ENOMEM;
42799 printk("Unable to allocate RAM for process data, errno %d\n",
42800 (int)-realdatastart);
42801 + down_write(&current->mm->mmap_sem);
42802 do_munmap(current->mm, textpos, text_len);
42803 + up_write(&current->mm->mmap_sem);
42804 ret = realdatastart;
42805 goto err;
42806 }
42807 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42808 }
42809 if (IS_ERR_VALUE(result)) {
42810 printk("Unable to read data+bss, errno %d\n", (int)-result);
42811 + down_write(&current->mm->mmap_sem);
42812 do_munmap(current->mm, textpos, text_len);
42813 do_munmap(current->mm, realdatastart, len);
42814 + up_write(&current->mm->mmap_sem);
42815 ret = result;
42816 goto err;
42817 }
42818 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42819 }
42820 if (IS_ERR_VALUE(result)) {
42821 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
42822 + down_write(&current->mm->mmap_sem);
42823 do_munmap(current->mm, textpos, text_len + data_len + extra +
42824 MAX_SHARED_LIBS * sizeof(unsigned long));
42825 + up_write(&current->mm->mmap_sem);
42826 ret = result;
42827 goto err;
42828 }
42829 diff --git a/fs/bio.c b/fs/bio.c
42830 index 9bfade8..782f3b9 100644
42831 --- a/fs/bio.c
42832 +++ b/fs/bio.c
42833 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
42834 const int read = bio_data_dir(bio) == READ;
42835 struct bio_map_data *bmd = bio->bi_private;
42836 int i;
42837 - char *p = bmd->sgvecs[0].iov_base;
42838 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
42839
42840 __bio_for_each_segment(bvec, bio, i, 0) {
42841 char *addr = page_address(bvec->bv_page);
42842 diff --git a/fs/block_dev.c b/fs/block_dev.c
42843 index 1c44b8d..e2507b4 100644
42844 --- a/fs/block_dev.c
42845 +++ b/fs/block_dev.c
42846 @@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
42847 else if (bdev->bd_contains == bdev)
42848 return true; /* is a whole device which isn't held */
42849
42850 - else if (whole->bd_holder == bd_may_claim)
42851 + else if (whole->bd_holder == (void *)bd_may_claim)
42852 return true; /* is a partition of a device that is being partitioned */
42853 else if (whole->bd_holder != NULL)
42854 return false; /* is a partition of a held device */
42855 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
42856 index 011cab3..9ace713 100644
42857 --- a/fs/btrfs/ctree.c
42858 +++ b/fs/btrfs/ctree.c
42859 @@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
42860 free_extent_buffer(buf);
42861 add_root_to_dirty_list(root);
42862 } else {
42863 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
42864 - parent_start = parent->start;
42865 - else
42866 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
42867 + if (parent)
42868 + parent_start = parent->start;
42869 + else
42870 + parent_start = 0;
42871 + } else
42872 parent_start = 0;
42873
42874 WARN_ON(trans->transid != btrfs_header_generation(parent));
42875 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
42876 index b2d004a..6bb543d 100644
42877 --- a/fs/btrfs/inode.c
42878 +++ b/fs/btrfs/inode.c
42879 @@ -6922,7 +6922,7 @@ fail:
42880 return -ENOMEM;
42881 }
42882
42883 -static int btrfs_getattr(struct vfsmount *mnt,
42884 +int btrfs_getattr(struct vfsmount *mnt,
42885 struct dentry *dentry, struct kstat *stat)
42886 {
42887 struct inode *inode = dentry->d_inode;
42888 @@ -6934,6 +6934,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
42889 return 0;
42890 }
42891
42892 +EXPORT_SYMBOL(btrfs_getattr);
42893 +
42894 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
42895 +{
42896 + return BTRFS_I(inode)->root->anon_dev;
42897 +}
42898 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
42899 +
42900 /*
42901 * If a file is moved, it will inherit the cow and compression flags of the new
42902 * directory.
42903 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
42904 index dae5dfe..6aa01b1 100644
42905 --- a/fs/btrfs/ioctl.c
42906 +++ b/fs/btrfs/ioctl.c
42907 @@ -2704,9 +2704,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42908 for (i = 0; i < num_types; i++) {
42909 struct btrfs_space_info *tmp;
42910
42911 + /* Don't copy in more than we allocated */
42912 if (!slot_count)
42913 break;
42914
42915 + slot_count--;
42916 +
42917 info = NULL;
42918 rcu_read_lock();
42919 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
42920 @@ -2728,15 +2731,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42921 memcpy(dest, &space, sizeof(space));
42922 dest++;
42923 space_args.total_spaces++;
42924 - slot_count--;
42925 }
42926 - if (!slot_count)
42927 - break;
42928 }
42929 up_read(&info->groups_sem);
42930 }
42931
42932 - user_dest = (struct btrfs_ioctl_space_info *)
42933 + user_dest = (struct btrfs_ioctl_space_info __user *)
42934 (arg + sizeof(struct btrfs_ioctl_space_args));
42935
42936 if (copy_to_user(user_dest, dest_orig, alloc_size))
42937 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
42938 index 59bb176..be9977d 100644
42939 --- a/fs/btrfs/relocation.c
42940 +++ b/fs/btrfs/relocation.c
42941 @@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
42942 }
42943 spin_unlock(&rc->reloc_root_tree.lock);
42944
42945 - BUG_ON((struct btrfs_root *)node->data != root);
42946 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
42947
42948 if (!del) {
42949 spin_lock(&rc->reloc_root_tree.lock);
42950 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
42951 index 622f469..e8d2d55 100644
42952 --- a/fs/cachefiles/bind.c
42953 +++ b/fs/cachefiles/bind.c
42954 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
42955 args);
42956
42957 /* start by checking things over */
42958 - ASSERT(cache->fstop_percent >= 0 &&
42959 - cache->fstop_percent < cache->fcull_percent &&
42960 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
42961 cache->fcull_percent < cache->frun_percent &&
42962 cache->frun_percent < 100);
42963
42964 - ASSERT(cache->bstop_percent >= 0 &&
42965 - cache->bstop_percent < cache->bcull_percent &&
42966 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
42967 cache->bcull_percent < cache->brun_percent &&
42968 cache->brun_percent < 100);
42969
42970 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
42971 index 0a1467b..6a53245 100644
42972 --- a/fs/cachefiles/daemon.c
42973 +++ b/fs/cachefiles/daemon.c
42974 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
42975 if (n > buflen)
42976 return -EMSGSIZE;
42977
42978 - if (copy_to_user(_buffer, buffer, n) != 0)
42979 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
42980 return -EFAULT;
42981
42982 return n;
42983 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
42984 if (test_bit(CACHEFILES_DEAD, &cache->flags))
42985 return -EIO;
42986
42987 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
42988 + if (datalen > PAGE_SIZE - 1)
42989 return -EOPNOTSUPP;
42990
42991 /* drag the command string into the kernel so we can parse it */
42992 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
42993 if (args[0] != '%' || args[1] != '\0')
42994 return -EINVAL;
42995
42996 - if (fstop < 0 || fstop >= cache->fcull_percent)
42997 + if (fstop >= cache->fcull_percent)
42998 return cachefiles_daemon_range_error(cache, args);
42999
43000 cache->fstop_percent = fstop;
43001 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
43002 if (args[0] != '%' || args[1] != '\0')
43003 return -EINVAL;
43004
43005 - if (bstop < 0 || bstop >= cache->bcull_percent)
43006 + if (bstop >= cache->bcull_percent)
43007 return cachefiles_daemon_range_error(cache, args);
43008
43009 cache->bstop_percent = bstop;
43010 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
43011 index bd6bc1b..b627b53 100644
43012 --- a/fs/cachefiles/internal.h
43013 +++ b/fs/cachefiles/internal.h
43014 @@ -57,7 +57,7 @@ struct cachefiles_cache {
43015 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
43016 struct rb_root active_nodes; /* active nodes (can't be culled) */
43017 rwlock_t active_lock; /* lock for active_nodes */
43018 - atomic_t gravecounter; /* graveyard uniquifier */
43019 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
43020 unsigned frun_percent; /* when to stop culling (% files) */
43021 unsigned fcull_percent; /* when to start culling (% files) */
43022 unsigned fstop_percent; /* when to stop allocating (% files) */
43023 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
43024 * proc.c
43025 */
43026 #ifdef CONFIG_CACHEFILES_HISTOGRAM
43027 -extern atomic_t cachefiles_lookup_histogram[HZ];
43028 -extern atomic_t cachefiles_mkdir_histogram[HZ];
43029 -extern atomic_t cachefiles_create_histogram[HZ];
43030 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43031 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43032 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
43033
43034 extern int __init cachefiles_proc_init(void);
43035 extern void cachefiles_proc_cleanup(void);
43036 static inline
43037 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
43038 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
43039 {
43040 unsigned long jif = jiffies - start_jif;
43041 if (jif >= HZ)
43042 jif = HZ - 1;
43043 - atomic_inc(&histogram[jif]);
43044 + atomic_inc_unchecked(&histogram[jif]);
43045 }
43046
43047 #else
43048 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
43049 index a0358c2..d6137f2 100644
43050 --- a/fs/cachefiles/namei.c
43051 +++ b/fs/cachefiles/namei.c
43052 @@ -318,7 +318,7 @@ try_again:
43053 /* first step is to make up a grave dentry in the graveyard */
43054 sprintf(nbuffer, "%08x%08x",
43055 (uint32_t) get_seconds(),
43056 - (uint32_t) atomic_inc_return(&cache->gravecounter));
43057 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
43058
43059 /* do the multiway lock magic */
43060 trap = lock_rename(cache->graveyard, dir);
43061 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
43062 index eccd339..4c1d995 100644
43063 --- a/fs/cachefiles/proc.c
43064 +++ b/fs/cachefiles/proc.c
43065 @@ -14,9 +14,9 @@
43066 #include <linux/seq_file.h>
43067 #include "internal.h"
43068
43069 -atomic_t cachefiles_lookup_histogram[HZ];
43070 -atomic_t cachefiles_mkdir_histogram[HZ];
43071 -atomic_t cachefiles_create_histogram[HZ];
43072 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43073 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43074 +atomic_unchecked_t cachefiles_create_histogram[HZ];
43075
43076 /*
43077 * display the latency histogram
43078 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
43079 return 0;
43080 default:
43081 index = (unsigned long) v - 3;
43082 - x = atomic_read(&cachefiles_lookup_histogram[index]);
43083 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
43084 - z = atomic_read(&cachefiles_create_histogram[index]);
43085 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
43086 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
43087 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
43088 if (x == 0 && y == 0 && z == 0)
43089 return 0;
43090
43091 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
43092 index 0e3c092..818480e 100644
43093 --- a/fs/cachefiles/rdwr.c
43094 +++ b/fs/cachefiles/rdwr.c
43095 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
43096 old_fs = get_fs();
43097 set_fs(KERNEL_DS);
43098 ret = file->f_op->write(
43099 - file, (const void __user *) data, len, &pos);
43100 + file, (const void __force_user *) data, len, &pos);
43101 set_fs(old_fs);
43102 kunmap(page);
43103 if (ret != len)
43104 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
43105 index 382abc9..bd89646 100644
43106 --- a/fs/ceph/dir.c
43107 +++ b/fs/ceph/dir.c
43108 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
43109 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
43110 struct ceph_mds_client *mdsc = fsc->mdsc;
43111 unsigned frag = fpos_frag(filp->f_pos);
43112 - int off = fpos_off(filp->f_pos);
43113 + unsigned int off = fpos_off(filp->f_pos);
43114 int err;
43115 u32 ftype;
43116 struct ceph_mds_reply_info_parsed *rinfo;
43117 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
43118 index 6d40656..bc1f825 100644
43119 --- a/fs/cifs/cifs_debug.c
43120 +++ b/fs/cifs/cifs_debug.c
43121 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43122
43123 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
43124 #ifdef CONFIG_CIFS_STATS2
43125 - atomic_set(&totBufAllocCount, 0);
43126 - atomic_set(&totSmBufAllocCount, 0);
43127 + atomic_set_unchecked(&totBufAllocCount, 0);
43128 + atomic_set_unchecked(&totSmBufAllocCount, 0);
43129 #endif /* CONFIG_CIFS_STATS2 */
43130 spin_lock(&cifs_tcp_ses_lock);
43131 list_for_each(tmp1, &cifs_tcp_ses_list) {
43132 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43133 tcon = list_entry(tmp3,
43134 struct cifs_tcon,
43135 tcon_list);
43136 - atomic_set(&tcon->num_smbs_sent, 0);
43137 - atomic_set(&tcon->num_writes, 0);
43138 - atomic_set(&tcon->num_reads, 0);
43139 - atomic_set(&tcon->num_oplock_brks, 0);
43140 - atomic_set(&tcon->num_opens, 0);
43141 - atomic_set(&tcon->num_posixopens, 0);
43142 - atomic_set(&tcon->num_posixmkdirs, 0);
43143 - atomic_set(&tcon->num_closes, 0);
43144 - atomic_set(&tcon->num_deletes, 0);
43145 - atomic_set(&tcon->num_mkdirs, 0);
43146 - atomic_set(&tcon->num_rmdirs, 0);
43147 - atomic_set(&tcon->num_renames, 0);
43148 - atomic_set(&tcon->num_t2renames, 0);
43149 - atomic_set(&tcon->num_ffirst, 0);
43150 - atomic_set(&tcon->num_fnext, 0);
43151 - atomic_set(&tcon->num_fclose, 0);
43152 - atomic_set(&tcon->num_hardlinks, 0);
43153 - atomic_set(&tcon->num_symlinks, 0);
43154 - atomic_set(&tcon->num_locks, 0);
43155 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
43156 + atomic_set_unchecked(&tcon->num_writes, 0);
43157 + atomic_set_unchecked(&tcon->num_reads, 0);
43158 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
43159 + atomic_set_unchecked(&tcon->num_opens, 0);
43160 + atomic_set_unchecked(&tcon->num_posixopens, 0);
43161 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
43162 + atomic_set_unchecked(&tcon->num_closes, 0);
43163 + atomic_set_unchecked(&tcon->num_deletes, 0);
43164 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
43165 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
43166 + atomic_set_unchecked(&tcon->num_renames, 0);
43167 + atomic_set_unchecked(&tcon->num_t2renames, 0);
43168 + atomic_set_unchecked(&tcon->num_ffirst, 0);
43169 + atomic_set_unchecked(&tcon->num_fnext, 0);
43170 + atomic_set_unchecked(&tcon->num_fclose, 0);
43171 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
43172 + atomic_set_unchecked(&tcon->num_symlinks, 0);
43173 + atomic_set_unchecked(&tcon->num_locks, 0);
43174 }
43175 }
43176 }
43177 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
43178 smBufAllocCount.counter, cifs_min_small);
43179 #ifdef CONFIG_CIFS_STATS2
43180 seq_printf(m, "Total Large %d Small %d Allocations\n",
43181 - atomic_read(&totBufAllocCount),
43182 - atomic_read(&totSmBufAllocCount));
43183 + atomic_read_unchecked(&totBufAllocCount),
43184 + atomic_read_unchecked(&totSmBufAllocCount));
43185 #endif /* CONFIG_CIFS_STATS2 */
43186
43187 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
43188 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
43189 if (tcon->need_reconnect)
43190 seq_puts(m, "\tDISCONNECTED ");
43191 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
43192 - atomic_read(&tcon->num_smbs_sent),
43193 - atomic_read(&tcon->num_oplock_brks));
43194 + atomic_read_unchecked(&tcon->num_smbs_sent),
43195 + atomic_read_unchecked(&tcon->num_oplock_brks));
43196 seq_printf(m, "\nReads: %d Bytes: %lld",
43197 - atomic_read(&tcon->num_reads),
43198 + atomic_read_unchecked(&tcon->num_reads),
43199 (long long)(tcon->bytes_read));
43200 seq_printf(m, "\nWrites: %d Bytes: %lld",
43201 - atomic_read(&tcon->num_writes),
43202 + atomic_read_unchecked(&tcon->num_writes),
43203 (long long)(tcon->bytes_written));
43204 seq_printf(m, "\nFlushes: %d",
43205 - atomic_read(&tcon->num_flushes));
43206 + atomic_read_unchecked(&tcon->num_flushes));
43207 seq_printf(m, "\nLocks: %d HardLinks: %d "
43208 "Symlinks: %d",
43209 - atomic_read(&tcon->num_locks),
43210 - atomic_read(&tcon->num_hardlinks),
43211 - atomic_read(&tcon->num_symlinks));
43212 + atomic_read_unchecked(&tcon->num_locks),
43213 + atomic_read_unchecked(&tcon->num_hardlinks),
43214 + atomic_read_unchecked(&tcon->num_symlinks));
43215 seq_printf(m, "\nOpens: %d Closes: %d "
43216 "Deletes: %d",
43217 - atomic_read(&tcon->num_opens),
43218 - atomic_read(&tcon->num_closes),
43219 - atomic_read(&tcon->num_deletes));
43220 + atomic_read_unchecked(&tcon->num_opens),
43221 + atomic_read_unchecked(&tcon->num_closes),
43222 + atomic_read_unchecked(&tcon->num_deletes));
43223 seq_printf(m, "\nPosix Opens: %d "
43224 "Posix Mkdirs: %d",
43225 - atomic_read(&tcon->num_posixopens),
43226 - atomic_read(&tcon->num_posixmkdirs));
43227 + atomic_read_unchecked(&tcon->num_posixopens),
43228 + atomic_read_unchecked(&tcon->num_posixmkdirs));
43229 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
43230 - atomic_read(&tcon->num_mkdirs),
43231 - atomic_read(&tcon->num_rmdirs));
43232 + atomic_read_unchecked(&tcon->num_mkdirs),
43233 + atomic_read_unchecked(&tcon->num_rmdirs));
43234 seq_printf(m, "\nRenames: %d T2 Renames %d",
43235 - atomic_read(&tcon->num_renames),
43236 - atomic_read(&tcon->num_t2renames));
43237 + atomic_read_unchecked(&tcon->num_renames),
43238 + atomic_read_unchecked(&tcon->num_t2renames));
43239 seq_printf(m, "\nFindFirst: %d FNext %d "
43240 "FClose %d",
43241 - atomic_read(&tcon->num_ffirst),
43242 - atomic_read(&tcon->num_fnext),
43243 - atomic_read(&tcon->num_fclose));
43244 + atomic_read_unchecked(&tcon->num_ffirst),
43245 + atomic_read_unchecked(&tcon->num_fnext),
43246 + atomic_read_unchecked(&tcon->num_fclose));
43247 }
43248 }
43249 }
43250 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
43251 index 54b8f1e..f6a4c00 100644
43252 --- a/fs/cifs/cifsfs.c
43253 +++ b/fs/cifs/cifsfs.c
43254 @@ -981,7 +981,7 @@ cifs_init_request_bufs(void)
43255 cifs_req_cachep = kmem_cache_create("cifs_request",
43256 CIFSMaxBufSize +
43257 MAX_CIFS_HDR_SIZE, 0,
43258 - SLAB_HWCACHE_ALIGN, NULL);
43259 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
43260 if (cifs_req_cachep == NULL)
43261 return -ENOMEM;
43262
43263 @@ -1008,7 +1008,7 @@ cifs_init_request_bufs(void)
43264 efficient to alloc 1 per page off the slab compared to 17K (5page)
43265 alloc of large cifs buffers even when page debugging is on */
43266 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
43267 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
43268 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
43269 NULL);
43270 if (cifs_sm_req_cachep == NULL) {
43271 mempool_destroy(cifs_req_poolp);
43272 @@ -1093,8 +1093,8 @@ init_cifs(void)
43273 atomic_set(&bufAllocCount, 0);
43274 atomic_set(&smBufAllocCount, 0);
43275 #ifdef CONFIG_CIFS_STATS2
43276 - atomic_set(&totBufAllocCount, 0);
43277 - atomic_set(&totSmBufAllocCount, 0);
43278 + atomic_set_unchecked(&totBufAllocCount, 0);
43279 + atomic_set_unchecked(&totSmBufAllocCount, 0);
43280 #endif /* CONFIG_CIFS_STATS2 */
43281
43282 atomic_set(&midCount, 0);
43283 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
43284 index 95dad9d..fe7af1a 100644
43285 --- a/fs/cifs/cifsglob.h
43286 +++ b/fs/cifs/cifsglob.h
43287 @@ -381,28 +381,28 @@ struct cifs_tcon {
43288 __u16 Flags; /* optional support bits */
43289 enum statusEnum tidStatus;
43290 #ifdef CONFIG_CIFS_STATS
43291 - atomic_t num_smbs_sent;
43292 - atomic_t num_writes;
43293 - atomic_t num_reads;
43294 - atomic_t num_flushes;
43295 - atomic_t num_oplock_brks;
43296 - atomic_t num_opens;
43297 - atomic_t num_closes;
43298 - atomic_t num_deletes;
43299 - atomic_t num_mkdirs;
43300 - atomic_t num_posixopens;
43301 - atomic_t num_posixmkdirs;
43302 - atomic_t num_rmdirs;
43303 - atomic_t num_renames;
43304 - atomic_t num_t2renames;
43305 - atomic_t num_ffirst;
43306 - atomic_t num_fnext;
43307 - atomic_t num_fclose;
43308 - atomic_t num_hardlinks;
43309 - atomic_t num_symlinks;
43310 - atomic_t num_locks;
43311 - atomic_t num_acl_get;
43312 - atomic_t num_acl_set;
43313 + atomic_unchecked_t num_smbs_sent;
43314 + atomic_unchecked_t num_writes;
43315 + atomic_unchecked_t num_reads;
43316 + atomic_unchecked_t num_flushes;
43317 + atomic_unchecked_t num_oplock_brks;
43318 + atomic_unchecked_t num_opens;
43319 + atomic_unchecked_t num_closes;
43320 + atomic_unchecked_t num_deletes;
43321 + atomic_unchecked_t num_mkdirs;
43322 + atomic_unchecked_t num_posixopens;
43323 + atomic_unchecked_t num_posixmkdirs;
43324 + atomic_unchecked_t num_rmdirs;
43325 + atomic_unchecked_t num_renames;
43326 + atomic_unchecked_t num_t2renames;
43327 + atomic_unchecked_t num_ffirst;
43328 + atomic_unchecked_t num_fnext;
43329 + atomic_unchecked_t num_fclose;
43330 + atomic_unchecked_t num_hardlinks;
43331 + atomic_unchecked_t num_symlinks;
43332 + atomic_unchecked_t num_locks;
43333 + atomic_unchecked_t num_acl_get;
43334 + atomic_unchecked_t num_acl_set;
43335 #ifdef CONFIG_CIFS_STATS2
43336 unsigned long long time_writes;
43337 unsigned long long time_reads;
43338 @@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim)
43339 }
43340
43341 #ifdef CONFIG_CIFS_STATS
43342 -#define cifs_stats_inc atomic_inc
43343 +#define cifs_stats_inc atomic_inc_unchecked
43344
43345 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
43346 unsigned int bytes)
43347 @@ -953,8 +953,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
43348 /* Various Debug counters */
43349 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
43350 #ifdef CONFIG_CIFS_STATS2
43351 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
43352 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
43353 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
43354 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
43355 #endif
43356 GLOBAL_EXTERN atomic_t smBufAllocCount;
43357 GLOBAL_EXTERN atomic_t midCount;
43358 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
43359 index db3f18c..1f5955e 100644
43360 --- a/fs/cifs/link.c
43361 +++ b/fs/cifs/link.c
43362 @@ -593,7 +593,7 @@ symlink_exit:
43363
43364 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
43365 {
43366 - char *p = nd_get_link(nd);
43367 + const char *p = nd_get_link(nd);
43368 if (!IS_ERR(p))
43369 kfree(p);
43370 }
43371 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
43372 index 7c16933..c8212b5 100644
43373 --- a/fs/cifs/misc.c
43374 +++ b/fs/cifs/misc.c
43375 @@ -156,7 +156,7 @@ cifs_buf_get(void)
43376 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
43377 atomic_inc(&bufAllocCount);
43378 #ifdef CONFIG_CIFS_STATS2
43379 - atomic_inc(&totBufAllocCount);
43380 + atomic_inc_unchecked(&totBufAllocCount);
43381 #endif /* CONFIG_CIFS_STATS2 */
43382 }
43383
43384 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
43385 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
43386 atomic_inc(&smBufAllocCount);
43387 #ifdef CONFIG_CIFS_STATS2
43388 - atomic_inc(&totSmBufAllocCount);
43389 + atomic_inc_unchecked(&totSmBufAllocCount);
43390 #endif /* CONFIG_CIFS_STATS2 */
43391
43392 }
43393 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
43394 index 6901578..d402eb5 100644
43395 --- a/fs/coda/cache.c
43396 +++ b/fs/coda/cache.c
43397 @@ -24,7 +24,7 @@
43398 #include "coda_linux.h"
43399 #include "coda_cache.h"
43400
43401 -static atomic_t permission_epoch = ATOMIC_INIT(0);
43402 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
43403
43404 /* replace or extend an acl cache hit */
43405 void coda_cache_enter(struct inode *inode, int mask)
43406 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
43407 struct coda_inode_info *cii = ITOC(inode);
43408
43409 spin_lock(&cii->c_lock);
43410 - cii->c_cached_epoch = atomic_read(&permission_epoch);
43411 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
43412 if (cii->c_uid != current_fsuid()) {
43413 cii->c_uid = current_fsuid();
43414 cii->c_cached_perm = mask;
43415 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
43416 {
43417 struct coda_inode_info *cii = ITOC(inode);
43418 spin_lock(&cii->c_lock);
43419 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
43420 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
43421 spin_unlock(&cii->c_lock);
43422 }
43423
43424 /* remove all acl caches */
43425 void coda_cache_clear_all(struct super_block *sb)
43426 {
43427 - atomic_inc(&permission_epoch);
43428 + atomic_inc_unchecked(&permission_epoch);
43429 }
43430
43431
43432 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
43433 spin_lock(&cii->c_lock);
43434 hit = (mask & cii->c_cached_perm) == mask &&
43435 cii->c_uid == current_fsuid() &&
43436 - cii->c_cached_epoch == atomic_read(&permission_epoch);
43437 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
43438 spin_unlock(&cii->c_lock);
43439
43440 return hit;
43441 diff --git a/fs/compat.c b/fs/compat.c
43442 index 58b1da4..afcd9b8 100644
43443 --- a/fs/compat.c
43444 +++ b/fs/compat.c
43445 @@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
43446 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
43447 {
43448 compat_ino_t ino = stat->ino;
43449 - typeof(ubuf->st_uid) uid = 0;
43450 - typeof(ubuf->st_gid) gid = 0;
43451 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
43452 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
43453 int err;
43454
43455 SET_UID(uid, stat->uid);
43456 @@ -508,7 +508,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
43457
43458 set_fs(KERNEL_DS);
43459 /* The __user pointer cast is valid because of the set_fs() */
43460 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
43461 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
43462 set_fs(oldfs);
43463 /* truncating is ok because it's a user address */
43464 if (!ret)
43465 @@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
43466 goto out;
43467
43468 ret = -EINVAL;
43469 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
43470 + if (nr_segs > UIO_MAXIOV)
43471 goto out;
43472 if (nr_segs > fast_segs) {
43473 ret = -ENOMEM;
43474 @@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
43475
43476 struct compat_readdir_callback {
43477 struct compat_old_linux_dirent __user *dirent;
43478 + struct file * file;
43479 int result;
43480 };
43481
43482 @@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
43483 buf->result = -EOVERFLOW;
43484 return -EOVERFLOW;
43485 }
43486 +
43487 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43488 + return 0;
43489 +
43490 buf->result++;
43491 dirent = buf->dirent;
43492 if (!access_ok(VERIFY_WRITE, dirent,
43493 @@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
43494
43495 buf.result = 0;
43496 buf.dirent = dirent;
43497 + buf.file = file;
43498
43499 error = vfs_readdir(file, compat_fillonedir, &buf);
43500 if (buf.result)
43501 @@ -917,6 +923,7 @@ struct compat_linux_dirent {
43502 struct compat_getdents_callback {
43503 struct compat_linux_dirent __user *current_dir;
43504 struct compat_linux_dirent __user *previous;
43505 + struct file * file;
43506 int count;
43507 int error;
43508 };
43509 @@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
43510 buf->error = -EOVERFLOW;
43511 return -EOVERFLOW;
43512 }
43513 +
43514 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43515 + return 0;
43516 +
43517 dirent = buf->previous;
43518 if (dirent) {
43519 if (__put_user(offset, &dirent->d_off))
43520 @@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
43521 buf.previous = NULL;
43522 buf.count = count;
43523 buf.error = 0;
43524 + buf.file = file;
43525
43526 error = vfs_readdir(file, compat_filldir, &buf);
43527 if (error >= 0)
43528 @@ -1006,6 +1018,7 @@ out:
43529 struct compat_getdents_callback64 {
43530 struct linux_dirent64 __user *current_dir;
43531 struct linux_dirent64 __user *previous;
43532 + struct file * file;
43533 int count;
43534 int error;
43535 };
43536 @@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
43537 buf->error = -EINVAL; /* only used if we fail.. */
43538 if (reclen > buf->count)
43539 return -EINVAL;
43540 +
43541 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43542 + return 0;
43543 +
43544 dirent = buf->previous;
43545
43546 if (dirent) {
43547 @@ -1073,13 +1090,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
43548 buf.previous = NULL;
43549 buf.count = count;
43550 buf.error = 0;
43551 + buf.file = file;
43552
43553 error = vfs_readdir(file, compat_filldir64, &buf);
43554 if (error >= 0)
43555 error = buf.error;
43556 lastdirent = buf.previous;
43557 if (lastdirent) {
43558 - typeof(lastdirent->d_off) d_off = file->f_pos;
43559 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
43560 if (__put_user_unaligned(d_off, &lastdirent->d_off))
43561 error = -EFAULT;
43562 else
43563 @@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
43564 struct fdtable *fdt;
43565 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
43566
43567 + pax_track_stack();
43568 +
43569 if (n < 0)
43570 goto out_nofds;
43571
43572 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
43573 index 112e45a..b59845b 100644
43574 --- a/fs/compat_binfmt_elf.c
43575 +++ b/fs/compat_binfmt_elf.c
43576 @@ -30,11 +30,13 @@
43577 #undef elf_phdr
43578 #undef elf_shdr
43579 #undef elf_note
43580 +#undef elf_dyn
43581 #undef elf_addr_t
43582 #define elfhdr elf32_hdr
43583 #define elf_phdr elf32_phdr
43584 #define elf_shdr elf32_shdr
43585 #define elf_note elf32_note
43586 +#define elf_dyn Elf32_Dyn
43587 #define elf_addr_t Elf32_Addr
43588
43589 /*
43590 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
43591 index 51352de..93292ff 100644
43592 --- a/fs/compat_ioctl.c
43593 +++ b/fs/compat_ioctl.c
43594 @@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
43595
43596 err = get_user(palp, &up->palette);
43597 err |= get_user(length, &up->length);
43598 + if (err)
43599 + return -EFAULT;
43600
43601 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
43602 err = put_user(compat_ptr(palp), &up_native->palette);
43603 @@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
43604 return -EFAULT;
43605 if (__get_user(udata, &ss32->iomem_base))
43606 return -EFAULT;
43607 - ss.iomem_base = compat_ptr(udata);
43608 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
43609 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
43610 __get_user(ss.port_high, &ss32->port_high))
43611 return -EFAULT;
43612 @@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
43613 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
43614 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
43615 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
43616 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43617 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43618 return -EFAULT;
43619
43620 return ioctl_preallocate(file, p);
43621 @@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
43622 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
43623 {
43624 unsigned int a, b;
43625 - a = *(unsigned int *)p;
43626 - b = *(unsigned int *)q;
43627 + a = *(const unsigned int *)p;
43628 + b = *(const unsigned int *)q;
43629 if (a > b)
43630 return 1;
43631 if (a < b)
43632 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
43633 index 9a37a9b..35792b6 100644
43634 --- a/fs/configfs/dir.c
43635 +++ b/fs/configfs/dir.c
43636 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43637 }
43638 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
43639 struct configfs_dirent *next;
43640 - const char * name;
43641 + const unsigned char * name;
43642 + char d_name[sizeof(next->s_dentry->d_iname)];
43643 int len;
43644 struct inode *inode = NULL;
43645
43646 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43647 continue;
43648
43649 name = configfs_get_name(next);
43650 - len = strlen(name);
43651 + if (next->s_dentry && name == next->s_dentry->d_iname) {
43652 + len = next->s_dentry->d_name.len;
43653 + memcpy(d_name, name, len);
43654 + name = d_name;
43655 + } else
43656 + len = strlen(name);
43657
43658 /*
43659 * We'll have a dentry and an inode for
43660 diff --git a/fs/dcache.c b/fs/dcache.c
43661 index 8b732a2..6db6c27 100644
43662 --- a/fs/dcache.c
43663 +++ b/fs/dcache.c
43664 @@ -3015,7 +3015,7 @@ void __init vfs_caches_init(unsigned long mempages)
43665 mempages -= reserve;
43666
43667 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
43668 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
43669 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
43670
43671 dcache_init();
43672 inode_init();
43673 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
43674 index 528da01..bd8c23d 100644
43675 --- a/fs/ecryptfs/inode.c
43676 +++ b/fs/ecryptfs/inode.c
43677 @@ -691,7 +691,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
43678 old_fs = get_fs();
43679 set_fs(get_ds());
43680 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
43681 - (char __user *)lower_buf,
43682 + (char __force_user *)lower_buf,
43683 lower_bufsiz);
43684 set_fs(old_fs);
43685 if (rc < 0)
43686 @@ -737,7 +737,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
43687 }
43688 old_fs = get_fs();
43689 set_fs(get_ds());
43690 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
43691 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
43692 set_fs(old_fs);
43693 if (rc < 0) {
43694 kfree(buf);
43695 @@ -752,7 +752,7 @@ out:
43696 static void
43697 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
43698 {
43699 - char *buf = nd_get_link(nd);
43700 + const char *buf = nd_get_link(nd);
43701 if (!IS_ERR(buf)) {
43702 /* Free the char* */
43703 kfree(buf);
43704 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
43705 index 940a82e..63af89e 100644
43706 --- a/fs/ecryptfs/miscdev.c
43707 +++ b/fs/ecryptfs/miscdev.c
43708 @@ -328,7 +328,7 @@ check_list:
43709 goto out_unlock_msg_ctx;
43710 i = 5;
43711 if (msg_ctx->msg) {
43712 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
43713 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
43714 goto out_unlock_msg_ctx;
43715 i += packet_length_size;
43716 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
43717 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
43718 index 3745f7c..89cc7a3 100644
43719 --- a/fs/ecryptfs/read_write.c
43720 +++ b/fs/ecryptfs/read_write.c
43721 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
43722 return -EIO;
43723 fs_save = get_fs();
43724 set_fs(get_ds());
43725 - rc = vfs_write(lower_file, data, size, &offset);
43726 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
43727 set_fs(fs_save);
43728 mark_inode_dirty_sync(ecryptfs_inode);
43729 return rc;
43730 @@ -235,7 +235,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
43731 return -EIO;
43732 fs_save = get_fs();
43733 set_fs(get_ds());
43734 - rc = vfs_read(lower_file, data, size, &offset);
43735 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
43736 set_fs(fs_save);
43737 return rc;
43738 }
43739 diff --git a/fs/exec.c b/fs/exec.c
43740 index 25dcbe5..09c172c 100644
43741 --- a/fs/exec.c
43742 +++ b/fs/exec.c
43743 @@ -55,12 +55,28 @@
43744 #include <linux/pipe_fs_i.h>
43745 #include <linux/oom.h>
43746 #include <linux/compat.h>
43747 +#include <linux/random.h>
43748 +#include <linux/seq_file.h>
43749 +
43750 +#ifdef CONFIG_PAX_REFCOUNT
43751 +#include <linux/kallsyms.h>
43752 +#include <linux/kdebug.h>
43753 +#endif
43754
43755 #include <asm/uaccess.h>
43756 #include <asm/mmu_context.h>
43757 #include <asm/tlb.h>
43758 #include "internal.h"
43759
43760 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
43761 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
43762 +#endif
43763 +
43764 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
43765 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
43766 +EXPORT_SYMBOL(pax_set_initial_flags_func);
43767 +#endif
43768 +
43769 int core_uses_pid;
43770 char core_pattern[CORENAME_MAX_SIZE] = "core";
43771 unsigned int core_pipe_limit;
43772 @@ -70,7 +86,7 @@ struct core_name {
43773 char *corename;
43774 int used, size;
43775 };
43776 -static atomic_t call_count = ATOMIC_INIT(1);
43777 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
43778
43779 /* The maximal length of core_pattern is also specified in sysctl.c */
43780
43781 @@ -188,18 +204,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43782 int write)
43783 {
43784 struct page *page;
43785 - int ret;
43786
43787 -#ifdef CONFIG_STACK_GROWSUP
43788 - if (write) {
43789 - ret = expand_downwards(bprm->vma, pos);
43790 - if (ret < 0)
43791 - return NULL;
43792 - }
43793 -#endif
43794 - ret = get_user_pages(current, bprm->mm, pos,
43795 - 1, write, 1, &page, NULL);
43796 - if (ret <= 0)
43797 + if (0 > expand_downwards(bprm->vma, pos))
43798 + return NULL;
43799 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
43800 return NULL;
43801
43802 if (write) {
43803 @@ -274,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43804 vma->vm_end = STACK_TOP_MAX;
43805 vma->vm_start = vma->vm_end - PAGE_SIZE;
43806 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
43807 +
43808 +#ifdef CONFIG_PAX_SEGMEXEC
43809 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
43810 +#endif
43811 +
43812 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
43813 INIT_LIST_HEAD(&vma->anon_vma_chain);
43814
43815 @@ -288,6 +301,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43816 mm->stack_vm = mm->total_vm = 1;
43817 up_write(&mm->mmap_sem);
43818 bprm->p = vma->vm_end - sizeof(void *);
43819 +
43820 +#ifdef CONFIG_PAX_RANDUSTACK
43821 + if (randomize_va_space)
43822 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
43823 +#endif
43824 +
43825 return 0;
43826 err:
43827 up_write(&mm->mmap_sem);
43828 @@ -396,19 +415,7 @@ err:
43829 return err;
43830 }
43831
43832 -struct user_arg_ptr {
43833 -#ifdef CONFIG_COMPAT
43834 - bool is_compat;
43835 -#endif
43836 - union {
43837 - const char __user *const __user *native;
43838 -#ifdef CONFIG_COMPAT
43839 - compat_uptr_t __user *compat;
43840 -#endif
43841 - } ptr;
43842 -};
43843 -
43844 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43845 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43846 {
43847 const char __user *native;
43848
43849 @@ -417,14 +424,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43850 compat_uptr_t compat;
43851
43852 if (get_user(compat, argv.ptr.compat + nr))
43853 - return ERR_PTR(-EFAULT);
43854 + return (const char __force_user *)ERR_PTR(-EFAULT);
43855
43856 return compat_ptr(compat);
43857 }
43858 #endif
43859
43860 if (get_user(native, argv.ptr.native + nr))
43861 - return ERR_PTR(-EFAULT);
43862 + return (const char __force_user *)ERR_PTR(-EFAULT);
43863
43864 return native;
43865 }
43866 @@ -443,7 +450,7 @@ static int count(struct user_arg_ptr argv, int max)
43867 if (!p)
43868 break;
43869
43870 - if (IS_ERR(p))
43871 + if (IS_ERR((const char __force_kernel *)p))
43872 return -EFAULT;
43873
43874 if (i++ >= max)
43875 @@ -477,7 +484,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
43876
43877 ret = -EFAULT;
43878 str = get_user_arg_ptr(argv, argc);
43879 - if (IS_ERR(str))
43880 + if (IS_ERR((const char __force_kernel *)str))
43881 goto out;
43882
43883 len = strnlen_user(str, MAX_ARG_STRLEN);
43884 @@ -559,7 +566,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
43885 int r;
43886 mm_segment_t oldfs = get_fs();
43887 struct user_arg_ptr argv = {
43888 - .ptr.native = (const char __user *const __user *)__argv,
43889 + .ptr.native = (const char __force_user *const __force_user *)__argv,
43890 };
43891
43892 set_fs(KERNEL_DS);
43893 @@ -594,7 +601,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43894 unsigned long new_end = old_end - shift;
43895 struct mmu_gather tlb;
43896
43897 - BUG_ON(new_start > new_end);
43898 + if (new_start >= new_end || new_start < mmap_min_addr)
43899 + return -ENOMEM;
43900
43901 /*
43902 * ensure there are no vmas between where we want to go
43903 @@ -603,6 +611,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43904 if (vma != find_vma(mm, new_start))
43905 return -EFAULT;
43906
43907 +#ifdef CONFIG_PAX_SEGMEXEC
43908 + BUG_ON(pax_find_mirror_vma(vma));
43909 +#endif
43910 +
43911 /*
43912 * cover the whole range: [new_start, old_end)
43913 */
43914 @@ -683,10 +695,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43915 stack_top = arch_align_stack(stack_top);
43916 stack_top = PAGE_ALIGN(stack_top);
43917
43918 - if (unlikely(stack_top < mmap_min_addr) ||
43919 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
43920 - return -ENOMEM;
43921 -
43922 stack_shift = vma->vm_end - stack_top;
43923
43924 bprm->p -= stack_shift;
43925 @@ -698,8 +706,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
43926 bprm->exec -= stack_shift;
43927
43928 down_write(&mm->mmap_sem);
43929 +
43930 + /* Move stack pages down in memory. */
43931 + if (stack_shift) {
43932 + ret = shift_arg_pages(vma, stack_shift);
43933 + if (ret)
43934 + goto out_unlock;
43935 + }
43936 +
43937 vm_flags = VM_STACK_FLAGS;
43938
43939 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43940 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43941 + vm_flags &= ~VM_EXEC;
43942 +
43943 +#ifdef CONFIG_PAX_MPROTECT
43944 + if (mm->pax_flags & MF_PAX_MPROTECT)
43945 + vm_flags &= ~VM_MAYEXEC;
43946 +#endif
43947 +
43948 + }
43949 +#endif
43950 +
43951 /*
43952 * Adjust stack execute permissions; explicitly enable for
43953 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
43954 @@ -718,13 +746,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43955 goto out_unlock;
43956 BUG_ON(prev != vma);
43957
43958 - /* Move stack pages down in memory. */
43959 - if (stack_shift) {
43960 - ret = shift_arg_pages(vma, stack_shift);
43961 - if (ret)
43962 - goto out_unlock;
43963 - }
43964 -
43965 /* mprotect_fixup is overkill to remove the temporary stack flags */
43966 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
43967
43968 @@ -805,7 +826,7 @@ int kernel_read(struct file *file, loff_t offset,
43969 old_fs = get_fs();
43970 set_fs(get_ds());
43971 /* The cast to a user pointer is valid due to the set_fs() */
43972 - result = vfs_read(file, (void __user *)addr, count, &pos);
43973 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
43974 set_fs(old_fs);
43975 return result;
43976 }
43977 @@ -1251,7 +1272,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
43978 }
43979 rcu_read_unlock();
43980
43981 - if (p->fs->users > n_fs) {
43982 + if (atomic_read(&p->fs->users) > n_fs) {
43983 bprm->unsafe |= LSM_UNSAFE_SHARE;
43984 } else {
43985 res = -EAGAIN;
43986 @@ -1454,6 +1475,11 @@ static int do_execve_common(const char *filename,
43987 struct user_arg_ptr envp,
43988 struct pt_regs *regs)
43989 {
43990 +#ifdef CONFIG_GRKERNSEC
43991 + struct file *old_exec_file;
43992 + struct acl_subject_label *old_acl;
43993 + struct rlimit old_rlim[RLIM_NLIMITS];
43994 +#endif
43995 struct linux_binprm *bprm;
43996 struct file *file;
43997 struct files_struct *displaced;
43998 @@ -1461,6 +1487,8 @@ static int do_execve_common(const char *filename,
43999 int retval;
44000 const struct cred *cred = current_cred();
44001
44002 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
44003 +
44004 /*
44005 * We move the actual failure in case of RLIMIT_NPROC excess from
44006 * set*uid() to execve() because too many poorly written programs
44007 @@ -1507,6 +1535,16 @@ static int do_execve_common(const char *filename,
44008 bprm->filename = filename;
44009 bprm->interp = filename;
44010
44011 + if (gr_process_user_ban()) {
44012 + retval = -EPERM;
44013 + goto out_file;
44014 + }
44015 +
44016 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
44017 + retval = -EACCES;
44018 + goto out_file;
44019 + }
44020 +
44021 retval = bprm_mm_init(bprm);
44022 if (retval)
44023 goto out_file;
44024 @@ -1536,9 +1574,40 @@ static int do_execve_common(const char *filename,
44025 if (retval < 0)
44026 goto out;
44027
44028 + if (!gr_tpe_allow(file)) {
44029 + retval = -EACCES;
44030 + goto out;
44031 + }
44032 +
44033 + if (gr_check_crash_exec(file)) {
44034 + retval = -EACCES;
44035 + goto out;
44036 + }
44037 +
44038 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
44039 +
44040 + gr_handle_exec_args(bprm, argv);
44041 +
44042 +#ifdef CONFIG_GRKERNSEC
44043 + old_acl = current->acl;
44044 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
44045 + old_exec_file = current->exec_file;
44046 + get_file(file);
44047 + current->exec_file = file;
44048 +#endif
44049 +
44050 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
44051 + bprm->unsafe & LSM_UNSAFE_SHARE);
44052 + if (retval < 0)
44053 + goto out_fail;
44054 +
44055 retval = search_binary_handler(bprm,regs);
44056 if (retval < 0)
44057 - goto out;
44058 + goto out_fail;
44059 +#ifdef CONFIG_GRKERNSEC
44060 + if (old_exec_file)
44061 + fput(old_exec_file);
44062 +#endif
44063
44064 /* execve succeeded */
44065 current->fs->in_exec = 0;
44066 @@ -1549,6 +1618,14 @@ static int do_execve_common(const char *filename,
44067 put_files_struct(displaced);
44068 return retval;
44069
44070 +out_fail:
44071 +#ifdef CONFIG_GRKERNSEC
44072 + current->acl = old_acl;
44073 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
44074 + fput(current->exec_file);
44075 + current->exec_file = old_exec_file;
44076 +#endif
44077 +
44078 out:
44079 if (bprm->mm) {
44080 acct_arg_size(bprm, 0);
44081 @@ -1622,7 +1699,7 @@ static int expand_corename(struct core_name *cn)
44082 {
44083 char *old_corename = cn->corename;
44084
44085 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
44086 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
44087 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
44088
44089 if (!cn->corename) {
44090 @@ -1719,7 +1796,7 @@ static int format_corename(struct core_name *cn, long signr)
44091 int pid_in_pattern = 0;
44092 int err = 0;
44093
44094 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
44095 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
44096 cn->corename = kmalloc(cn->size, GFP_KERNEL);
44097 cn->used = 0;
44098
44099 @@ -1816,6 +1893,218 @@ out:
44100 return ispipe;
44101 }
44102
44103 +int pax_check_flags(unsigned long *flags)
44104 +{
44105 + int retval = 0;
44106 +
44107 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
44108 + if (*flags & MF_PAX_SEGMEXEC)
44109 + {
44110 + *flags &= ~MF_PAX_SEGMEXEC;
44111 + retval = -EINVAL;
44112 + }
44113 +#endif
44114 +
44115 + if ((*flags & MF_PAX_PAGEEXEC)
44116 +
44117 +#ifdef CONFIG_PAX_PAGEEXEC
44118 + && (*flags & MF_PAX_SEGMEXEC)
44119 +#endif
44120 +
44121 + )
44122 + {
44123 + *flags &= ~MF_PAX_PAGEEXEC;
44124 + retval = -EINVAL;
44125 + }
44126 +
44127 + if ((*flags & MF_PAX_MPROTECT)
44128 +
44129 +#ifdef CONFIG_PAX_MPROTECT
44130 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44131 +#endif
44132 +
44133 + )
44134 + {
44135 + *flags &= ~MF_PAX_MPROTECT;
44136 + retval = -EINVAL;
44137 + }
44138 +
44139 + if ((*flags & MF_PAX_EMUTRAMP)
44140 +
44141 +#ifdef CONFIG_PAX_EMUTRAMP
44142 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44143 +#endif
44144 +
44145 + )
44146 + {
44147 + *flags &= ~MF_PAX_EMUTRAMP;
44148 + retval = -EINVAL;
44149 + }
44150 +
44151 + return retval;
44152 +}
44153 +
44154 +EXPORT_SYMBOL(pax_check_flags);
44155 +
44156 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44157 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
44158 +{
44159 + struct task_struct *tsk = current;
44160 + struct mm_struct *mm = current->mm;
44161 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
44162 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
44163 + char *path_exec = NULL;
44164 + char *path_fault = NULL;
44165 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
44166 +
44167 + if (buffer_exec && buffer_fault) {
44168 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
44169 +
44170 + down_read(&mm->mmap_sem);
44171 + vma = mm->mmap;
44172 + while (vma && (!vma_exec || !vma_fault)) {
44173 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
44174 + vma_exec = vma;
44175 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
44176 + vma_fault = vma;
44177 + vma = vma->vm_next;
44178 + }
44179 + if (vma_exec) {
44180 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
44181 + if (IS_ERR(path_exec))
44182 + path_exec = "<path too long>";
44183 + else {
44184 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
44185 + if (path_exec) {
44186 + *path_exec = 0;
44187 + path_exec = buffer_exec;
44188 + } else
44189 + path_exec = "<path too long>";
44190 + }
44191 + }
44192 + if (vma_fault) {
44193 + start = vma_fault->vm_start;
44194 + end = vma_fault->vm_end;
44195 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
44196 + if (vma_fault->vm_file) {
44197 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
44198 + if (IS_ERR(path_fault))
44199 + path_fault = "<path too long>";
44200 + else {
44201 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
44202 + if (path_fault) {
44203 + *path_fault = 0;
44204 + path_fault = buffer_fault;
44205 + } else
44206 + path_fault = "<path too long>";
44207 + }
44208 + } else
44209 + path_fault = "<anonymous mapping>";
44210 + }
44211 + up_read(&mm->mmap_sem);
44212 + }
44213 + if (tsk->signal->curr_ip)
44214 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
44215 + else
44216 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
44217 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
44218 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
44219 + task_uid(tsk), task_euid(tsk), pc, sp);
44220 + free_page((unsigned long)buffer_exec);
44221 + free_page((unsigned long)buffer_fault);
44222 + pax_report_insns(regs, pc, sp);
44223 + do_coredump(SIGKILL, SIGKILL, regs);
44224 +}
44225 +#endif
44226 +
44227 +#ifdef CONFIG_PAX_REFCOUNT
44228 +void pax_report_refcount_overflow(struct pt_regs *regs)
44229 +{
44230 + if (current->signal->curr_ip)
44231 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44232 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
44233 + else
44234 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44235 + current->comm, task_pid_nr(current), current_uid(), current_euid());
44236 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
44237 + show_regs(regs);
44238 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
44239 +}
44240 +#endif
44241 +
44242 +#ifdef CONFIG_PAX_USERCOPY
44243 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
44244 +int object_is_on_stack(const void *obj, unsigned long len)
44245 +{
44246 + const void * const stack = task_stack_page(current);
44247 + const void * const stackend = stack + THREAD_SIZE;
44248 +
44249 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44250 + const void *frame = NULL;
44251 + const void *oldframe;
44252 +#endif
44253 +
44254 + if (obj + len < obj)
44255 + return -1;
44256 +
44257 + if (obj + len <= stack || stackend <= obj)
44258 + return 0;
44259 +
44260 + if (obj < stack || stackend < obj + len)
44261 + return -1;
44262 +
44263 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44264 + oldframe = __builtin_frame_address(1);
44265 + if (oldframe)
44266 + frame = __builtin_frame_address(2);
44267 + /*
44268 + low ----------------------------------------------> high
44269 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
44270 + ^----------------^
44271 + allow copies only within here
44272 + */
44273 + while (stack <= frame && frame < stackend) {
44274 + /* if obj + len extends past the last frame, this
44275 + check won't pass and the next frame will be 0,
44276 + causing us to bail out and correctly report
44277 + the copy as invalid
44278 + */
44279 + if (obj + len <= frame)
44280 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
44281 + oldframe = frame;
44282 + frame = *(const void * const *)frame;
44283 + }
44284 + return -1;
44285 +#else
44286 + return 1;
44287 +#endif
44288 +}
44289 +
44290 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
44291 +{
44292 + if (current->signal->curr_ip)
44293 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44294 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44295 + else
44296 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44297 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44298 + dump_stack();
44299 + gr_handle_kernel_exploit();
44300 + do_group_exit(SIGKILL);
44301 +}
44302 +#endif
44303 +
44304 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
44305 +void pax_track_stack(void)
44306 +{
44307 + unsigned long sp = (unsigned long)&sp;
44308 + if (sp < current_thread_info()->lowest_stack &&
44309 + sp > (unsigned long)task_stack_page(current))
44310 + current_thread_info()->lowest_stack = sp;
44311 +}
44312 +EXPORT_SYMBOL(pax_track_stack);
44313 +#endif
44314 +
44315 static int zap_process(struct task_struct *start, int exit_code)
44316 {
44317 struct task_struct *t;
44318 @@ -2027,17 +2316,17 @@ static void wait_for_dump_helpers(struct file *file)
44319 pipe = file->f_path.dentry->d_inode->i_pipe;
44320
44321 pipe_lock(pipe);
44322 - pipe->readers++;
44323 - pipe->writers--;
44324 + atomic_inc(&pipe->readers);
44325 + atomic_dec(&pipe->writers);
44326
44327 - while ((pipe->readers > 1) && (!signal_pending(current))) {
44328 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
44329 wake_up_interruptible_sync(&pipe->wait);
44330 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44331 pipe_wait(pipe);
44332 }
44333
44334 - pipe->readers--;
44335 - pipe->writers++;
44336 + atomic_dec(&pipe->readers);
44337 + atomic_inc(&pipe->writers);
44338 pipe_unlock(pipe);
44339
44340 }
44341 @@ -2098,7 +2387,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44342 int retval = 0;
44343 int flag = 0;
44344 int ispipe;
44345 - static atomic_t core_dump_count = ATOMIC_INIT(0);
44346 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
44347 struct coredump_params cprm = {
44348 .signr = signr,
44349 .regs = regs,
44350 @@ -2113,6 +2402,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44351
44352 audit_core_dumps(signr);
44353
44354 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
44355 + gr_handle_brute_attach(current, cprm.mm_flags);
44356 +
44357 binfmt = mm->binfmt;
44358 if (!binfmt || !binfmt->core_dump)
44359 goto fail;
44360 @@ -2180,7 +2472,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44361 }
44362 cprm.limit = RLIM_INFINITY;
44363
44364 - dump_count = atomic_inc_return(&core_dump_count);
44365 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
44366 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
44367 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
44368 task_tgid_vnr(current), current->comm);
44369 @@ -2207,6 +2499,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44370 } else {
44371 struct inode *inode;
44372
44373 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
44374 +
44375 if (cprm.limit < binfmt->min_coredump)
44376 goto fail_unlock;
44377
44378 @@ -2250,7 +2544,7 @@ close_fail:
44379 filp_close(cprm.file, NULL);
44380 fail_dropcount:
44381 if (ispipe)
44382 - atomic_dec(&core_dump_count);
44383 + atomic_dec_unchecked(&core_dump_count);
44384 fail_unlock:
44385 kfree(cn.corename);
44386 fail_corename:
44387 @@ -2269,7 +2563,7 @@ fail:
44388 */
44389 int dump_write(struct file *file, const void *addr, int nr)
44390 {
44391 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
44392 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
44393 }
44394 EXPORT_SYMBOL(dump_write);
44395
44396 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
44397 index 8f44cef..cb07120 100644
44398 --- a/fs/ext2/balloc.c
44399 +++ b/fs/ext2/balloc.c
44400 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
44401
44402 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44403 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44404 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44405 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44406 sbi->s_resuid != current_fsuid() &&
44407 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44408 return 0;
44409 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
44410 index 6386d76..0a266b1 100644
44411 --- a/fs/ext3/balloc.c
44412 +++ b/fs/ext3/balloc.c
44413 @@ -1446,7 +1446,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
44414
44415 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44416 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44417 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44418 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44419 sbi->s_resuid != current_fsuid() &&
44420 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44421 return 0;
44422 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
44423 index f8224ad..fbef97c 100644
44424 --- a/fs/ext4/balloc.c
44425 +++ b/fs/ext4/balloc.c
44426 @@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct ext4_sb_info *sbi,
44427 /* Hm, nope. Are (enough) root reserved blocks available? */
44428 if (sbi->s_resuid == current_fsuid() ||
44429 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
44430 - capable(CAP_SYS_RESOURCE) ||
44431 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
44432 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
44433 + capable_nolog(CAP_SYS_RESOURCE)) {
44434
44435 if (free_blocks >= (nblocks + dirty_blocks))
44436 return 1;
44437 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
44438 index 5c38120..2291d18 100644
44439 --- a/fs/ext4/ext4.h
44440 +++ b/fs/ext4/ext4.h
44441 @@ -1180,19 +1180,19 @@ struct ext4_sb_info {
44442 unsigned long s_mb_last_start;
44443
44444 /* stats for buddy allocator */
44445 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
44446 - atomic_t s_bal_success; /* we found long enough chunks */
44447 - atomic_t s_bal_allocated; /* in blocks */
44448 - atomic_t s_bal_ex_scanned; /* total extents scanned */
44449 - atomic_t s_bal_goals; /* goal hits */
44450 - atomic_t s_bal_breaks; /* too long searches */
44451 - atomic_t s_bal_2orders; /* 2^order hits */
44452 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
44453 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
44454 + atomic_unchecked_t s_bal_allocated; /* in blocks */
44455 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
44456 + atomic_unchecked_t s_bal_goals; /* goal hits */
44457 + atomic_unchecked_t s_bal_breaks; /* too long searches */
44458 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
44459 spinlock_t s_bal_lock;
44460 unsigned long s_mb_buddies_generated;
44461 unsigned long long s_mb_generation_time;
44462 - atomic_t s_mb_lost_chunks;
44463 - atomic_t s_mb_preallocated;
44464 - atomic_t s_mb_discarded;
44465 + atomic_unchecked_t s_mb_lost_chunks;
44466 + atomic_unchecked_t s_mb_preallocated;
44467 + atomic_unchecked_t s_mb_discarded;
44468 atomic_t s_lock_busy;
44469
44470 /* locality groups */
44471 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
44472 index e4095e9..1c006c5 100644
44473 --- a/fs/ext4/file.c
44474 +++ b/fs/ext4/file.c
44475 @@ -181,8 +181,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
44476 path.dentry = mnt->mnt_root;
44477 cp = d_path(&path, buf, sizeof(buf));
44478 if (!IS_ERR(cp)) {
44479 - memcpy(sbi->s_es->s_last_mounted, cp,
44480 - sizeof(sbi->s_es->s_last_mounted));
44481 + strlcpy(sbi->s_es->s_last_mounted, cp,
44482 + sizeof(sbi->s_es->s_last_mounted));
44483 ext4_mark_super_dirty(sb);
44484 }
44485 }
44486 diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
44487 index f18bfe3..43759b1 100644
44488 --- a/fs/ext4/ioctl.c
44489 +++ b/fs/ext4/ioctl.c
44490 @@ -348,7 +348,7 @@ mext_out:
44491 if (!blk_queue_discard(q))
44492 return -EOPNOTSUPP;
44493
44494 - if (copy_from_user(&range, (struct fstrim_range *)arg,
44495 + if (copy_from_user(&range, (struct fstrim_range __user *)arg,
44496 sizeof(range)))
44497 return -EFAULT;
44498
44499 @@ -358,7 +358,7 @@ mext_out:
44500 if (ret < 0)
44501 return ret;
44502
44503 - if (copy_to_user((struct fstrim_range *)arg, &range,
44504 + if (copy_to_user((struct fstrim_range __user *)arg, &range,
44505 sizeof(range)))
44506 return -EFAULT;
44507
44508 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
44509 index 17a5a57..b6be3c5 100644
44510 --- a/fs/ext4/mballoc.c
44511 +++ b/fs/ext4/mballoc.c
44512 @@ -1795,7 +1795,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
44513 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
44514
44515 if (EXT4_SB(sb)->s_mb_stats)
44516 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
44517 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
44518
44519 break;
44520 }
44521 @@ -2089,7 +2089,7 @@ repeat:
44522 ac->ac_status = AC_STATUS_CONTINUE;
44523 ac->ac_flags |= EXT4_MB_HINT_FIRST;
44524 cr = 3;
44525 - atomic_inc(&sbi->s_mb_lost_chunks);
44526 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
44527 goto repeat;
44528 }
44529 }
44530 @@ -2132,6 +2132,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
44531 ext4_grpblk_t counters[16];
44532 } sg;
44533
44534 + pax_track_stack();
44535 +
44536 group--;
44537 if (group == 0)
44538 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
44539 @@ -2573,25 +2575,25 @@ int ext4_mb_release(struct super_block *sb)
44540 if (sbi->s_mb_stats) {
44541 ext4_msg(sb, KERN_INFO,
44542 "mballoc: %u blocks %u reqs (%u success)",
44543 - atomic_read(&sbi->s_bal_allocated),
44544 - atomic_read(&sbi->s_bal_reqs),
44545 - atomic_read(&sbi->s_bal_success));
44546 + atomic_read_unchecked(&sbi->s_bal_allocated),
44547 + atomic_read_unchecked(&sbi->s_bal_reqs),
44548 + atomic_read_unchecked(&sbi->s_bal_success));
44549 ext4_msg(sb, KERN_INFO,
44550 "mballoc: %u extents scanned, %u goal hits, "
44551 "%u 2^N hits, %u breaks, %u lost",
44552 - atomic_read(&sbi->s_bal_ex_scanned),
44553 - atomic_read(&sbi->s_bal_goals),
44554 - atomic_read(&sbi->s_bal_2orders),
44555 - atomic_read(&sbi->s_bal_breaks),
44556 - atomic_read(&sbi->s_mb_lost_chunks));
44557 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
44558 + atomic_read_unchecked(&sbi->s_bal_goals),
44559 + atomic_read_unchecked(&sbi->s_bal_2orders),
44560 + atomic_read_unchecked(&sbi->s_bal_breaks),
44561 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
44562 ext4_msg(sb, KERN_INFO,
44563 "mballoc: %lu generated and it took %Lu",
44564 sbi->s_mb_buddies_generated,
44565 sbi->s_mb_generation_time);
44566 ext4_msg(sb, KERN_INFO,
44567 "mballoc: %u preallocated, %u discarded",
44568 - atomic_read(&sbi->s_mb_preallocated),
44569 - atomic_read(&sbi->s_mb_discarded));
44570 + atomic_read_unchecked(&sbi->s_mb_preallocated),
44571 + atomic_read_unchecked(&sbi->s_mb_discarded));
44572 }
44573
44574 free_percpu(sbi->s_locality_groups);
44575 @@ -3070,16 +3072,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
44576 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
44577
44578 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
44579 - atomic_inc(&sbi->s_bal_reqs);
44580 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44581 + atomic_inc_unchecked(&sbi->s_bal_reqs);
44582 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44583 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
44584 - atomic_inc(&sbi->s_bal_success);
44585 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
44586 + atomic_inc_unchecked(&sbi->s_bal_success);
44587 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
44588 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
44589 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
44590 - atomic_inc(&sbi->s_bal_goals);
44591 + atomic_inc_unchecked(&sbi->s_bal_goals);
44592 if (ac->ac_found > sbi->s_mb_max_to_scan)
44593 - atomic_inc(&sbi->s_bal_breaks);
44594 + atomic_inc_unchecked(&sbi->s_bal_breaks);
44595 }
44596
44597 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
44598 @@ -3477,7 +3479,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
44599 trace_ext4_mb_new_inode_pa(ac, pa);
44600
44601 ext4_mb_use_inode_pa(ac, pa);
44602 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44603 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44604
44605 ei = EXT4_I(ac->ac_inode);
44606 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44607 @@ -3537,7 +3539,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
44608 trace_ext4_mb_new_group_pa(ac, pa);
44609
44610 ext4_mb_use_group_pa(ac, pa);
44611 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44612 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44613
44614 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44615 lg = ac->ac_lg;
44616 @@ -3625,7 +3627,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
44617 * from the bitmap and continue.
44618 */
44619 }
44620 - atomic_add(free, &sbi->s_mb_discarded);
44621 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
44622
44623 return err;
44624 }
44625 @@ -3643,7 +3645,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
44626 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
44627 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
44628 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
44629 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44630 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44631 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
44632
44633 return 0;
44634 diff --git a/fs/fcntl.c b/fs/fcntl.c
44635 index 22764c7..86372c9 100644
44636 --- a/fs/fcntl.c
44637 +++ b/fs/fcntl.c
44638 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
44639 if (err)
44640 return err;
44641
44642 + if (gr_handle_chroot_fowner(pid, type))
44643 + return -ENOENT;
44644 + if (gr_check_protected_task_fowner(pid, type))
44645 + return -EACCES;
44646 +
44647 f_modown(filp, pid, type, force);
44648 return 0;
44649 }
44650 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
44651
44652 static int f_setown_ex(struct file *filp, unsigned long arg)
44653 {
44654 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44655 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44656 struct f_owner_ex owner;
44657 struct pid *pid;
44658 int type;
44659 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
44660
44661 static int f_getown_ex(struct file *filp, unsigned long arg)
44662 {
44663 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44664 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44665 struct f_owner_ex owner;
44666 int ret = 0;
44667
44668 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
44669 switch (cmd) {
44670 case F_DUPFD:
44671 case F_DUPFD_CLOEXEC:
44672 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
44673 if (arg >= rlimit(RLIMIT_NOFILE))
44674 break;
44675 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
44676 diff --git a/fs/fifo.c b/fs/fifo.c
44677 index b1a524d..4ee270e 100644
44678 --- a/fs/fifo.c
44679 +++ b/fs/fifo.c
44680 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
44681 */
44682 filp->f_op = &read_pipefifo_fops;
44683 pipe->r_counter++;
44684 - if (pipe->readers++ == 0)
44685 + if (atomic_inc_return(&pipe->readers) == 1)
44686 wake_up_partner(inode);
44687
44688 - if (!pipe->writers) {
44689 + if (!atomic_read(&pipe->writers)) {
44690 if ((filp->f_flags & O_NONBLOCK)) {
44691 /* suppress POLLHUP until we have
44692 * seen a writer */
44693 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
44694 * errno=ENXIO when there is no process reading the FIFO.
44695 */
44696 ret = -ENXIO;
44697 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
44698 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
44699 goto err;
44700
44701 filp->f_op = &write_pipefifo_fops;
44702 pipe->w_counter++;
44703 - if (!pipe->writers++)
44704 + if (atomic_inc_return(&pipe->writers) == 1)
44705 wake_up_partner(inode);
44706
44707 - if (!pipe->readers) {
44708 + if (!atomic_read(&pipe->readers)) {
44709 wait_for_partner(inode, &pipe->r_counter);
44710 if (signal_pending(current))
44711 goto err_wr;
44712 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
44713 */
44714 filp->f_op = &rdwr_pipefifo_fops;
44715
44716 - pipe->readers++;
44717 - pipe->writers++;
44718 + atomic_inc(&pipe->readers);
44719 + atomic_inc(&pipe->writers);
44720 pipe->r_counter++;
44721 pipe->w_counter++;
44722 - if (pipe->readers == 1 || pipe->writers == 1)
44723 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
44724 wake_up_partner(inode);
44725 break;
44726
44727 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
44728 return 0;
44729
44730 err_rd:
44731 - if (!--pipe->readers)
44732 + if (atomic_dec_and_test(&pipe->readers))
44733 wake_up_interruptible(&pipe->wait);
44734 ret = -ERESTARTSYS;
44735 goto err;
44736
44737 err_wr:
44738 - if (!--pipe->writers)
44739 + if (atomic_dec_and_test(&pipe->writers))
44740 wake_up_interruptible(&pipe->wait);
44741 ret = -ERESTARTSYS;
44742 goto err;
44743
44744 err:
44745 - if (!pipe->readers && !pipe->writers)
44746 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
44747 free_pipe_info(inode);
44748
44749 err_nocleanup:
44750 diff --git a/fs/file.c b/fs/file.c
44751 index 4c6992d..104cdea 100644
44752 --- a/fs/file.c
44753 +++ b/fs/file.c
44754 @@ -15,6 +15,7 @@
44755 #include <linux/slab.h>
44756 #include <linux/vmalloc.h>
44757 #include <linux/file.h>
44758 +#include <linux/security.h>
44759 #include <linux/fdtable.h>
44760 #include <linux/bitops.h>
44761 #include <linux/interrupt.h>
44762 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
44763 * N.B. For clone tasks sharing a files structure, this test
44764 * will limit the total number of files that can be opened.
44765 */
44766 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
44767 if (nr >= rlimit(RLIMIT_NOFILE))
44768 return -EMFILE;
44769
44770 diff --git a/fs/filesystems.c b/fs/filesystems.c
44771 index 0845f84..7b4ebef 100644
44772 --- a/fs/filesystems.c
44773 +++ b/fs/filesystems.c
44774 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
44775 int len = dot ? dot - name : strlen(name);
44776
44777 fs = __get_fs_type(name, len);
44778 +
44779 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
44780 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
44781 +#else
44782 if (!fs && (request_module("%.*s", len, name) == 0))
44783 +#endif
44784 fs = __get_fs_type(name, len);
44785
44786 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
44787 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
44788 index 78b519c..212c0d0 100644
44789 --- a/fs/fs_struct.c
44790 +++ b/fs/fs_struct.c
44791 @@ -4,6 +4,7 @@
44792 #include <linux/path.h>
44793 #include <linux/slab.h>
44794 #include <linux/fs_struct.h>
44795 +#include <linux/grsecurity.h>
44796 #include "internal.h"
44797
44798 static inline void path_get_longterm(struct path *path)
44799 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
44800 old_root = fs->root;
44801 fs->root = *path;
44802 path_get_longterm(path);
44803 + gr_set_chroot_entries(current, path);
44804 write_seqcount_end(&fs->seq);
44805 spin_unlock(&fs->lock);
44806 if (old_root.dentry)
44807 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
44808 && fs->root.mnt == old_root->mnt) {
44809 path_get_longterm(new_root);
44810 fs->root = *new_root;
44811 + gr_set_chroot_entries(p, new_root);
44812 count++;
44813 }
44814 if (fs->pwd.dentry == old_root->dentry
44815 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
44816 spin_lock(&fs->lock);
44817 write_seqcount_begin(&fs->seq);
44818 tsk->fs = NULL;
44819 - kill = !--fs->users;
44820 + gr_clear_chroot_entries(tsk);
44821 + kill = !atomic_dec_return(&fs->users);
44822 write_seqcount_end(&fs->seq);
44823 spin_unlock(&fs->lock);
44824 task_unlock(tsk);
44825 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44826 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
44827 /* We don't need to lock fs - think why ;-) */
44828 if (fs) {
44829 - fs->users = 1;
44830 + atomic_set(&fs->users, 1);
44831 fs->in_exec = 0;
44832 spin_lock_init(&fs->lock);
44833 seqcount_init(&fs->seq);
44834 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44835 spin_lock(&old->lock);
44836 fs->root = old->root;
44837 path_get_longterm(&fs->root);
44838 + /* instead of calling gr_set_chroot_entries here,
44839 + we call it from every caller of this function
44840 + */
44841 fs->pwd = old->pwd;
44842 path_get_longterm(&fs->pwd);
44843 spin_unlock(&old->lock);
44844 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
44845
44846 task_lock(current);
44847 spin_lock(&fs->lock);
44848 - kill = !--fs->users;
44849 + kill = !atomic_dec_return(&fs->users);
44850 current->fs = new_fs;
44851 + gr_set_chroot_entries(current, &new_fs->root);
44852 spin_unlock(&fs->lock);
44853 task_unlock(current);
44854
44855 @@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
44856
44857 /* to be mentioned only in INIT_TASK */
44858 struct fs_struct init_fs = {
44859 - .users = 1,
44860 + .users = ATOMIC_INIT(1),
44861 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
44862 .seq = SEQCNT_ZERO,
44863 .umask = 0022,
44864 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
44865 task_lock(current);
44866
44867 spin_lock(&init_fs.lock);
44868 - init_fs.users++;
44869 + atomic_inc(&init_fs.users);
44870 spin_unlock(&init_fs.lock);
44871
44872 spin_lock(&fs->lock);
44873 current->fs = &init_fs;
44874 - kill = !--fs->users;
44875 + gr_set_chroot_entries(current, &current->fs->root);
44876 + kill = !atomic_dec_return(&fs->users);
44877 spin_unlock(&fs->lock);
44878
44879 task_unlock(current);
44880 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
44881 index 9905350..02eaec4 100644
44882 --- a/fs/fscache/cookie.c
44883 +++ b/fs/fscache/cookie.c
44884 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
44885 parent ? (char *) parent->def->name : "<no-parent>",
44886 def->name, netfs_data);
44887
44888 - fscache_stat(&fscache_n_acquires);
44889 + fscache_stat_unchecked(&fscache_n_acquires);
44890
44891 /* if there's no parent cookie, then we don't create one here either */
44892 if (!parent) {
44893 - fscache_stat(&fscache_n_acquires_null);
44894 + fscache_stat_unchecked(&fscache_n_acquires_null);
44895 _leave(" [no parent]");
44896 return NULL;
44897 }
44898 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
44899 /* allocate and initialise a cookie */
44900 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
44901 if (!cookie) {
44902 - fscache_stat(&fscache_n_acquires_oom);
44903 + fscache_stat_unchecked(&fscache_n_acquires_oom);
44904 _leave(" [ENOMEM]");
44905 return NULL;
44906 }
44907 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44908
44909 switch (cookie->def->type) {
44910 case FSCACHE_COOKIE_TYPE_INDEX:
44911 - fscache_stat(&fscache_n_cookie_index);
44912 + fscache_stat_unchecked(&fscache_n_cookie_index);
44913 break;
44914 case FSCACHE_COOKIE_TYPE_DATAFILE:
44915 - fscache_stat(&fscache_n_cookie_data);
44916 + fscache_stat_unchecked(&fscache_n_cookie_data);
44917 break;
44918 default:
44919 - fscache_stat(&fscache_n_cookie_special);
44920 + fscache_stat_unchecked(&fscache_n_cookie_special);
44921 break;
44922 }
44923
44924 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44925 if (fscache_acquire_non_index_cookie(cookie) < 0) {
44926 atomic_dec(&parent->n_children);
44927 __fscache_cookie_put(cookie);
44928 - fscache_stat(&fscache_n_acquires_nobufs);
44929 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
44930 _leave(" = NULL");
44931 return NULL;
44932 }
44933 }
44934
44935 - fscache_stat(&fscache_n_acquires_ok);
44936 + fscache_stat_unchecked(&fscache_n_acquires_ok);
44937 _leave(" = %p", cookie);
44938 return cookie;
44939 }
44940 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
44941 cache = fscache_select_cache_for_object(cookie->parent);
44942 if (!cache) {
44943 up_read(&fscache_addremove_sem);
44944 - fscache_stat(&fscache_n_acquires_no_cache);
44945 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
44946 _leave(" = -ENOMEDIUM [no cache]");
44947 return -ENOMEDIUM;
44948 }
44949 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
44950 object = cache->ops->alloc_object(cache, cookie);
44951 fscache_stat_d(&fscache_n_cop_alloc_object);
44952 if (IS_ERR(object)) {
44953 - fscache_stat(&fscache_n_object_no_alloc);
44954 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
44955 ret = PTR_ERR(object);
44956 goto error;
44957 }
44958
44959 - fscache_stat(&fscache_n_object_alloc);
44960 + fscache_stat_unchecked(&fscache_n_object_alloc);
44961
44962 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
44963
44964 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
44965 struct fscache_object *object;
44966 struct hlist_node *_p;
44967
44968 - fscache_stat(&fscache_n_updates);
44969 + fscache_stat_unchecked(&fscache_n_updates);
44970
44971 if (!cookie) {
44972 - fscache_stat(&fscache_n_updates_null);
44973 + fscache_stat_unchecked(&fscache_n_updates_null);
44974 _leave(" [no cookie]");
44975 return;
44976 }
44977 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44978 struct fscache_object *object;
44979 unsigned long event;
44980
44981 - fscache_stat(&fscache_n_relinquishes);
44982 + fscache_stat_unchecked(&fscache_n_relinquishes);
44983 if (retire)
44984 - fscache_stat(&fscache_n_relinquishes_retire);
44985 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
44986
44987 if (!cookie) {
44988 - fscache_stat(&fscache_n_relinquishes_null);
44989 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
44990 _leave(" [no cookie]");
44991 return;
44992 }
44993 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44994
44995 /* wait for the cookie to finish being instantiated (or to fail) */
44996 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
44997 - fscache_stat(&fscache_n_relinquishes_waitcrt);
44998 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
44999 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
45000 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
45001 }
45002 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
45003 index f6aad48..88dcf26 100644
45004 --- a/fs/fscache/internal.h
45005 +++ b/fs/fscache/internal.h
45006 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
45007 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
45008 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
45009
45010 -extern atomic_t fscache_n_op_pend;
45011 -extern atomic_t fscache_n_op_run;
45012 -extern atomic_t fscache_n_op_enqueue;
45013 -extern atomic_t fscache_n_op_deferred_release;
45014 -extern atomic_t fscache_n_op_release;
45015 -extern atomic_t fscache_n_op_gc;
45016 -extern atomic_t fscache_n_op_cancelled;
45017 -extern atomic_t fscache_n_op_rejected;
45018 +extern atomic_unchecked_t fscache_n_op_pend;
45019 +extern atomic_unchecked_t fscache_n_op_run;
45020 +extern atomic_unchecked_t fscache_n_op_enqueue;
45021 +extern atomic_unchecked_t fscache_n_op_deferred_release;
45022 +extern atomic_unchecked_t fscache_n_op_release;
45023 +extern atomic_unchecked_t fscache_n_op_gc;
45024 +extern atomic_unchecked_t fscache_n_op_cancelled;
45025 +extern atomic_unchecked_t fscache_n_op_rejected;
45026
45027 -extern atomic_t fscache_n_attr_changed;
45028 -extern atomic_t fscache_n_attr_changed_ok;
45029 -extern atomic_t fscache_n_attr_changed_nobufs;
45030 -extern atomic_t fscache_n_attr_changed_nomem;
45031 -extern atomic_t fscache_n_attr_changed_calls;
45032 +extern atomic_unchecked_t fscache_n_attr_changed;
45033 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
45034 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
45035 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
45036 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
45037
45038 -extern atomic_t fscache_n_allocs;
45039 -extern atomic_t fscache_n_allocs_ok;
45040 -extern atomic_t fscache_n_allocs_wait;
45041 -extern atomic_t fscache_n_allocs_nobufs;
45042 -extern atomic_t fscache_n_allocs_intr;
45043 -extern atomic_t fscache_n_allocs_object_dead;
45044 -extern atomic_t fscache_n_alloc_ops;
45045 -extern atomic_t fscache_n_alloc_op_waits;
45046 +extern atomic_unchecked_t fscache_n_allocs;
45047 +extern atomic_unchecked_t fscache_n_allocs_ok;
45048 +extern atomic_unchecked_t fscache_n_allocs_wait;
45049 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
45050 +extern atomic_unchecked_t fscache_n_allocs_intr;
45051 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
45052 +extern atomic_unchecked_t fscache_n_alloc_ops;
45053 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
45054
45055 -extern atomic_t fscache_n_retrievals;
45056 -extern atomic_t fscache_n_retrievals_ok;
45057 -extern atomic_t fscache_n_retrievals_wait;
45058 -extern atomic_t fscache_n_retrievals_nodata;
45059 -extern atomic_t fscache_n_retrievals_nobufs;
45060 -extern atomic_t fscache_n_retrievals_intr;
45061 -extern atomic_t fscache_n_retrievals_nomem;
45062 -extern atomic_t fscache_n_retrievals_object_dead;
45063 -extern atomic_t fscache_n_retrieval_ops;
45064 -extern atomic_t fscache_n_retrieval_op_waits;
45065 +extern atomic_unchecked_t fscache_n_retrievals;
45066 +extern atomic_unchecked_t fscache_n_retrievals_ok;
45067 +extern atomic_unchecked_t fscache_n_retrievals_wait;
45068 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
45069 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
45070 +extern atomic_unchecked_t fscache_n_retrievals_intr;
45071 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
45072 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
45073 +extern atomic_unchecked_t fscache_n_retrieval_ops;
45074 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
45075
45076 -extern atomic_t fscache_n_stores;
45077 -extern atomic_t fscache_n_stores_ok;
45078 -extern atomic_t fscache_n_stores_again;
45079 -extern atomic_t fscache_n_stores_nobufs;
45080 -extern atomic_t fscache_n_stores_oom;
45081 -extern atomic_t fscache_n_store_ops;
45082 -extern atomic_t fscache_n_store_calls;
45083 -extern atomic_t fscache_n_store_pages;
45084 -extern atomic_t fscache_n_store_radix_deletes;
45085 -extern atomic_t fscache_n_store_pages_over_limit;
45086 +extern atomic_unchecked_t fscache_n_stores;
45087 +extern atomic_unchecked_t fscache_n_stores_ok;
45088 +extern atomic_unchecked_t fscache_n_stores_again;
45089 +extern atomic_unchecked_t fscache_n_stores_nobufs;
45090 +extern atomic_unchecked_t fscache_n_stores_oom;
45091 +extern atomic_unchecked_t fscache_n_store_ops;
45092 +extern atomic_unchecked_t fscache_n_store_calls;
45093 +extern atomic_unchecked_t fscache_n_store_pages;
45094 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
45095 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
45096
45097 -extern atomic_t fscache_n_store_vmscan_not_storing;
45098 -extern atomic_t fscache_n_store_vmscan_gone;
45099 -extern atomic_t fscache_n_store_vmscan_busy;
45100 -extern atomic_t fscache_n_store_vmscan_cancelled;
45101 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45102 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
45103 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
45104 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45105
45106 -extern atomic_t fscache_n_marks;
45107 -extern atomic_t fscache_n_uncaches;
45108 +extern atomic_unchecked_t fscache_n_marks;
45109 +extern atomic_unchecked_t fscache_n_uncaches;
45110
45111 -extern atomic_t fscache_n_acquires;
45112 -extern atomic_t fscache_n_acquires_null;
45113 -extern atomic_t fscache_n_acquires_no_cache;
45114 -extern atomic_t fscache_n_acquires_ok;
45115 -extern atomic_t fscache_n_acquires_nobufs;
45116 -extern atomic_t fscache_n_acquires_oom;
45117 +extern atomic_unchecked_t fscache_n_acquires;
45118 +extern atomic_unchecked_t fscache_n_acquires_null;
45119 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
45120 +extern atomic_unchecked_t fscache_n_acquires_ok;
45121 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
45122 +extern atomic_unchecked_t fscache_n_acquires_oom;
45123
45124 -extern atomic_t fscache_n_updates;
45125 -extern atomic_t fscache_n_updates_null;
45126 -extern atomic_t fscache_n_updates_run;
45127 +extern atomic_unchecked_t fscache_n_updates;
45128 +extern atomic_unchecked_t fscache_n_updates_null;
45129 +extern atomic_unchecked_t fscache_n_updates_run;
45130
45131 -extern atomic_t fscache_n_relinquishes;
45132 -extern atomic_t fscache_n_relinquishes_null;
45133 -extern atomic_t fscache_n_relinquishes_waitcrt;
45134 -extern atomic_t fscache_n_relinquishes_retire;
45135 +extern atomic_unchecked_t fscache_n_relinquishes;
45136 +extern atomic_unchecked_t fscache_n_relinquishes_null;
45137 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45138 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
45139
45140 -extern atomic_t fscache_n_cookie_index;
45141 -extern atomic_t fscache_n_cookie_data;
45142 -extern atomic_t fscache_n_cookie_special;
45143 +extern atomic_unchecked_t fscache_n_cookie_index;
45144 +extern atomic_unchecked_t fscache_n_cookie_data;
45145 +extern atomic_unchecked_t fscache_n_cookie_special;
45146
45147 -extern atomic_t fscache_n_object_alloc;
45148 -extern atomic_t fscache_n_object_no_alloc;
45149 -extern atomic_t fscache_n_object_lookups;
45150 -extern atomic_t fscache_n_object_lookups_negative;
45151 -extern atomic_t fscache_n_object_lookups_positive;
45152 -extern atomic_t fscache_n_object_lookups_timed_out;
45153 -extern atomic_t fscache_n_object_created;
45154 -extern atomic_t fscache_n_object_avail;
45155 -extern atomic_t fscache_n_object_dead;
45156 +extern atomic_unchecked_t fscache_n_object_alloc;
45157 +extern atomic_unchecked_t fscache_n_object_no_alloc;
45158 +extern atomic_unchecked_t fscache_n_object_lookups;
45159 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
45160 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
45161 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
45162 +extern atomic_unchecked_t fscache_n_object_created;
45163 +extern atomic_unchecked_t fscache_n_object_avail;
45164 +extern atomic_unchecked_t fscache_n_object_dead;
45165
45166 -extern atomic_t fscache_n_checkaux_none;
45167 -extern atomic_t fscache_n_checkaux_okay;
45168 -extern atomic_t fscache_n_checkaux_update;
45169 -extern atomic_t fscache_n_checkaux_obsolete;
45170 +extern atomic_unchecked_t fscache_n_checkaux_none;
45171 +extern atomic_unchecked_t fscache_n_checkaux_okay;
45172 +extern atomic_unchecked_t fscache_n_checkaux_update;
45173 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
45174
45175 extern atomic_t fscache_n_cop_alloc_object;
45176 extern atomic_t fscache_n_cop_lookup_object;
45177 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
45178 atomic_inc(stat);
45179 }
45180
45181 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
45182 +{
45183 + atomic_inc_unchecked(stat);
45184 +}
45185 +
45186 static inline void fscache_stat_d(atomic_t *stat)
45187 {
45188 atomic_dec(stat);
45189 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
45190
45191 #define __fscache_stat(stat) (NULL)
45192 #define fscache_stat(stat) do {} while (0)
45193 +#define fscache_stat_unchecked(stat) do {} while (0)
45194 #define fscache_stat_d(stat) do {} while (0)
45195 #endif
45196
45197 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
45198 index b6b897c..0ffff9c 100644
45199 --- a/fs/fscache/object.c
45200 +++ b/fs/fscache/object.c
45201 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45202 /* update the object metadata on disk */
45203 case FSCACHE_OBJECT_UPDATING:
45204 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
45205 - fscache_stat(&fscache_n_updates_run);
45206 + fscache_stat_unchecked(&fscache_n_updates_run);
45207 fscache_stat(&fscache_n_cop_update_object);
45208 object->cache->ops->update_object(object);
45209 fscache_stat_d(&fscache_n_cop_update_object);
45210 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45211 spin_lock(&object->lock);
45212 object->state = FSCACHE_OBJECT_DEAD;
45213 spin_unlock(&object->lock);
45214 - fscache_stat(&fscache_n_object_dead);
45215 + fscache_stat_unchecked(&fscache_n_object_dead);
45216 goto terminal_transit;
45217
45218 /* handle the parent cache of this object being withdrawn from
45219 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45220 spin_lock(&object->lock);
45221 object->state = FSCACHE_OBJECT_DEAD;
45222 spin_unlock(&object->lock);
45223 - fscache_stat(&fscache_n_object_dead);
45224 + fscache_stat_unchecked(&fscache_n_object_dead);
45225 goto terminal_transit;
45226
45227 /* complain about the object being woken up once it is
45228 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
45229 parent->cookie->def->name, cookie->def->name,
45230 object->cache->tag->name);
45231
45232 - fscache_stat(&fscache_n_object_lookups);
45233 + fscache_stat_unchecked(&fscache_n_object_lookups);
45234 fscache_stat(&fscache_n_cop_lookup_object);
45235 ret = object->cache->ops->lookup_object(object);
45236 fscache_stat_d(&fscache_n_cop_lookup_object);
45237 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
45238 if (ret == -ETIMEDOUT) {
45239 /* probably stuck behind another object, so move this one to
45240 * the back of the queue */
45241 - fscache_stat(&fscache_n_object_lookups_timed_out);
45242 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
45243 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45244 }
45245
45246 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
45247
45248 spin_lock(&object->lock);
45249 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45250 - fscache_stat(&fscache_n_object_lookups_negative);
45251 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
45252
45253 /* transit here to allow write requests to begin stacking up
45254 * and read requests to begin returning ENODATA */
45255 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
45256 * result, in which case there may be data available */
45257 spin_lock(&object->lock);
45258 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45259 - fscache_stat(&fscache_n_object_lookups_positive);
45260 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
45261
45262 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
45263
45264 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
45265 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45266 } else {
45267 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
45268 - fscache_stat(&fscache_n_object_created);
45269 + fscache_stat_unchecked(&fscache_n_object_created);
45270
45271 object->state = FSCACHE_OBJECT_AVAILABLE;
45272 spin_unlock(&object->lock);
45273 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
45274 fscache_enqueue_dependents(object);
45275
45276 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
45277 - fscache_stat(&fscache_n_object_avail);
45278 + fscache_stat_unchecked(&fscache_n_object_avail);
45279
45280 _leave("");
45281 }
45282 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45283 enum fscache_checkaux result;
45284
45285 if (!object->cookie->def->check_aux) {
45286 - fscache_stat(&fscache_n_checkaux_none);
45287 + fscache_stat_unchecked(&fscache_n_checkaux_none);
45288 return FSCACHE_CHECKAUX_OKAY;
45289 }
45290
45291 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45292 switch (result) {
45293 /* entry okay as is */
45294 case FSCACHE_CHECKAUX_OKAY:
45295 - fscache_stat(&fscache_n_checkaux_okay);
45296 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
45297 break;
45298
45299 /* entry requires update */
45300 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
45301 - fscache_stat(&fscache_n_checkaux_update);
45302 + fscache_stat_unchecked(&fscache_n_checkaux_update);
45303 break;
45304
45305 /* entry requires deletion */
45306 case FSCACHE_CHECKAUX_OBSOLETE:
45307 - fscache_stat(&fscache_n_checkaux_obsolete);
45308 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
45309 break;
45310
45311 default:
45312 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
45313 index 30afdfa..2256596 100644
45314 --- a/fs/fscache/operation.c
45315 +++ b/fs/fscache/operation.c
45316 @@ -17,7 +17,7 @@
45317 #include <linux/slab.h>
45318 #include "internal.h"
45319
45320 -atomic_t fscache_op_debug_id;
45321 +atomic_unchecked_t fscache_op_debug_id;
45322 EXPORT_SYMBOL(fscache_op_debug_id);
45323
45324 /**
45325 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
45326 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
45327 ASSERTCMP(atomic_read(&op->usage), >, 0);
45328
45329 - fscache_stat(&fscache_n_op_enqueue);
45330 + fscache_stat_unchecked(&fscache_n_op_enqueue);
45331 switch (op->flags & FSCACHE_OP_TYPE) {
45332 case FSCACHE_OP_ASYNC:
45333 _debug("queue async");
45334 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
45335 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
45336 if (op->processor)
45337 fscache_enqueue_operation(op);
45338 - fscache_stat(&fscache_n_op_run);
45339 + fscache_stat_unchecked(&fscache_n_op_run);
45340 }
45341
45342 /*
45343 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45344 if (object->n_ops > 1) {
45345 atomic_inc(&op->usage);
45346 list_add_tail(&op->pend_link, &object->pending_ops);
45347 - fscache_stat(&fscache_n_op_pend);
45348 + fscache_stat_unchecked(&fscache_n_op_pend);
45349 } else if (!list_empty(&object->pending_ops)) {
45350 atomic_inc(&op->usage);
45351 list_add_tail(&op->pend_link, &object->pending_ops);
45352 - fscache_stat(&fscache_n_op_pend);
45353 + fscache_stat_unchecked(&fscache_n_op_pend);
45354 fscache_start_operations(object);
45355 } else {
45356 ASSERTCMP(object->n_in_progress, ==, 0);
45357 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45358 object->n_exclusive++; /* reads and writes must wait */
45359 atomic_inc(&op->usage);
45360 list_add_tail(&op->pend_link, &object->pending_ops);
45361 - fscache_stat(&fscache_n_op_pend);
45362 + fscache_stat_unchecked(&fscache_n_op_pend);
45363 ret = 0;
45364 } else {
45365 /* not allowed to submit ops in any other state */
45366 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
45367 if (object->n_exclusive > 0) {
45368 atomic_inc(&op->usage);
45369 list_add_tail(&op->pend_link, &object->pending_ops);
45370 - fscache_stat(&fscache_n_op_pend);
45371 + fscache_stat_unchecked(&fscache_n_op_pend);
45372 } else if (!list_empty(&object->pending_ops)) {
45373 atomic_inc(&op->usage);
45374 list_add_tail(&op->pend_link, &object->pending_ops);
45375 - fscache_stat(&fscache_n_op_pend);
45376 + fscache_stat_unchecked(&fscache_n_op_pend);
45377 fscache_start_operations(object);
45378 } else {
45379 ASSERTCMP(object->n_exclusive, ==, 0);
45380 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
45381 object->n_ops++;
45382 atomic_inc(&op->usage);
45383 list_add_tail(&op->pend_link, &object->pending_ops);
45384 - fscache_stat(&fscache_n_op_pend);
45385 + fscache_stat_unchecked(&fscache_n_op_pend);
45386 ret = 0;
45387 } else if (object->state == FSCACHE_OBJECT_DYING ||
45388 object->state == FSCACHE_OBJECT_LC_DYING ||
45389 object->state == FSCACHE_OBJECT_WITHDRAWING) {
45390 - fscache_stat(&fscache_n_op_rejected);
45391 + fscache_stat_unchecked(&fscache_n_op_rejected);
45392 ret = -ENOBUFS;
45393 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
45394 fscache_report_unexpected_submission(object, op, ostate);
45395 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
45396
45397 ret = -EBUSY;
45398 if (!list_empty(&op->pend_link)) {
45399 - fscache_stat(&fscache_n_op_cancelled);
45400 + fscache_stat_unchecked(&fscache_n_op_cancelled);
45401 list_del_init(&op->pend_link);
45402 object->n_ops--;
45403 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
45404 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
45405 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
45406 BUG();
45407
45408 - fscache_stat(&fscache_n_op_release);
45409 + fscache_stat_unchecked(&fscache_n_op_release);
45410
45411 if (op->release) {
45412 op->release(op);
45413 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
45414 * lock, and defer it otherwise */
45415 if (!spin_trylock(&object->lock)) {
45416 _debug("defer put");
45417 - fscache_stat(&fscache_n_op_deferred_release);
45418 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
45419
45420 cache = object->cache;
45421 spin_lock(&cache->op_gc_list_lock);
45422 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
45423
45424 _debug("GC DEFERRED REL OBJ%x OP%x",
45425 object->debug_id, op->debug_id);
45426 - fscache_stat(&fscache_n_op_gc);
45427 + fscache_stat_unchecked(&fscache_n_op_gc);
45428
45429 ASSERTCMP(atomic_read(&op->usage), ==, 0);
45430
45431 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
45432 index 3f7a59b..cf196cc 100644
45433 --- a/fs/fscache/page.c
45434 +++ b/fs/fscache/page.c
45435 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45436 val = radix_tree_lookup(&cookie->stores, page->index);
45437 if (!val) {
45438 rcu_read_unlock();
45439 - fscache_stat(&fscache_n_store_vmscan_not_storing);
45440 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
45441 __fscache_uncache_page(cookie, page);
45442 return true;
45443 }
45444 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45445 spin_unlock(&cookie->stores_lock);
45446
45447 if (xpage) {
45448 - fscache_stat(&fscache_n_store_vmscan_cancelled);
45449 - fscache_stat(&fscache_n_store_radix_deletes);
45450 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
45451 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45452 ASSERTCMP(xpage, ==, page);
45453 } else {
45454 - fscache_stat(&fscache_n_store_vmscan_gone);
45455 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
45456 }
45457
45458 wake_up_bit(&cookie->flags, 0);
45459 @@ -107,7 +107,7 @@ page_busy:
45460 /* we might want to wait here, but that could deadlock the allocator as
45461 * the work threads writing to the cache may all end up sleeping
45462 * on memory allocation */
45463 - fscache_stat(&fscache_n_store_vmscan_busy);
45464 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
45465 return false;
45466 }
45467 EXPORT_SYMBOL(__fscache_maybe_release_page);
45468 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
45469 FSCACHE_COOKIE_STORING_TAG);
45470 if (!radix_tree_tag_get(&cookie->stores, page->index,
45471 FSCACHE_COOKIE_PENDING_TAG)) {
45472 - fscache_stat(&fscache_n_store_radix_deletes);
45473 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45474 xpage = radix_tree_delete(&cookie->stores, page->index);
45475 }
45476 spin_unlock(&cookie->stores_lock);
45477 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
45478
45479 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
45480
45481 - fscache_stat(&fscache_n_attr_changed_calls);
45482 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
45483
45484 if (fscache_object_is_active(object)) {
45485 fscache_stat(&fscache_n_cop_attr_changed);
45486 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45487
45488 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45489
45490 - fscache_stat(&fscache_n_attr_changed);
45491 + fscache_stat_unchecked(&fscache_n_attr_changed);
45492
45493 op = kzalloc(sizeof(*op), GFP_KERNEL);
45494 if (!op) {
45495 - fscache_stat(&fscache_n_attr_changed_nomem);
45496 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
45497 _leave(" = -ENOMEM");
45498 return -ENOMEM;
45499 }
45500 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45501 if (fscache_submit_exclusive_op(object, op) < 0)
45502 goto nobufs;
45503 spin_unlock(&cookie->lock);
45504 - fscache_stat(&fscache_n_attr_changed_ok);
45505 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
45506 fscache_put_operation(op);
45507 _leave(" = 0");
45508 return 0;
45509 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45510 nobufs:
45511 spin_unlock(&cookie->lock);
45512 kfree(op);
45513 - fscache_stat(&fscache_n_attr_changed_nobufs);
45514 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
45515 _leave(" = %d", -ENOBUFS);
45516 return -ENOBUFS;
45517 }
45518 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
45519 /* allocate a retrieval operation and attempt to submit it */
45520 op = kzalloc(sizeof(*op), GFP_NOIO);
45521 if (!op) {
45522 - fscache_stat(&fscache_n_retrievals_nomem);
45523 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45524 return NULL;
45525 }
45526
45527 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45528 return 0;
45529 }
45530
45531 - fscache_stat(&fscache_n_retrievals_wait);
45532 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
45533
45534 jif = jiffies;
45535 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
45536 fscache_wait_bit_interruptible,
45537 TASK_INTERRUPTIBLE) != 0) {
45538 - fscache_stat(&fscache_n_retrievals_intr);
45539 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45540 _leave(" = -ERESTARTSYS");
45541 return -ERESTARTSYS;
45542 }
45543 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45544 */
45545 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45546 struct fscache_retrieval *op,
45547 - atomic_t *stat_op_waits,
45548 - atomic_t *stat_object_dead)
45549 + atomic_unchecked_t *stat_op_waits,
45550 + atomic_unchecked_t *stat_object_dead)
45551 {
45552 int ret;
45553
45554 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45555 goto check_if_dead;
45556
45557 _debug(">>> WT");
45558 - fscache_stat(stat_op_waits);
45559 + fscache_stat_unchecked(stat_op_waits);
45560 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
45561 fscache_wait_bit_interruptible,
45562 TASK_INTERRUPTIBLE) < 0) {
45563 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45564
45565 check_if_dead:
45566 if (unlikely(fscache_object_is_dead(object))) {
45567 - fscache_stat(stat_object_dead);
45568 + fscache_stat_unchecked(stat_object_dead);
45569 return -ENOBUFS;
45570 }
45571 return 0;
45572 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45573
45574 _enter("%p,%p,,,", cookie, page);
45575
45576 - fscache_stat(&fscache_n_retrievals);
45577 + fscache_stat_unchecked(&fscache_n_retrievals);
45578
45579 if (hlist_empty(&cookie->backing_objects))
45580 goto nobufs;
45581 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45582 goto nobufs_unlock;
45583 spin_unlock(&cookie->lock);
45584
45585 - fscache_stat(&fscache_n_retrieval_ops);
45586 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45587
45588 /* pin the netfs read context in case we need to do the actual netfs
45589 * read because we've encountered a cache read failure */
45590 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45591
45592 error:
45593 if (ret == -ENOMEM)
45594 - fscache_stat(&fscache_n_retrievals_nomem);
45595 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45596 else if (ret == -ERESTARTSYS)
45597 - fscache_stat(&fscache_n_retrievals_intr);
45598 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45599 else if (ret == -ENODATA)
45600 - fscache_stat(&fscache_n_retrievals_nodata);
45601 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45602 else if (ret < 0)
45603 - fscache_stat(&fscache_n_retrievals_nobufs);
45604 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45605 else
45606 - fscache_stat(&fscache_n_retrievals_ok);
45607 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45608
45609 fscache_put_retrieval(op);
45610 _leave(" = %d", ret);
45611 @@ -429,7 +429,7 @@ nobufs_unlock:
45612 spin_unlock(&cookie->lock);
45613 kfree(op);
45614 nobufs:
45615 - fscache_stat(&fscache_n_retrievals_nobufs);
45616 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45617 _leave(" = -ENOBUFS");
45618 return -ENOBUFS;
45619 }
45620 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45621
45622 _enter("%p,,%d,,,", cookie, *nr_pages);
45623
45624 - fscache_stat(&fscache_n_retrievals);
45625 + fscache_stat_unchecked(&fscache_n_retrievals);
45626
45627 if (hlist_empty(&cookie->backing_objects))
45628 goto nobufs;
45629 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45630 goto nobufs_unlock;
45631 spin_unlock(&cookie->lock);
45632
45633 - fscache_stat(&fscache_n_retrieval_ops);
45634 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45635
45636 /* pin the netfs read context in case we need to do the actual netfs
45637 * read because we've encountered a cache read failure */
45638 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45639
45640 error:
45641 if (ret == -ENOMEM)
45642 - fscache_stat(&fscache_n_retrievals_nomem);
45643 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45644 else if (ret == -ERESTARTSYS)
45645 - fscache_stat(&fscache_n_retrievals_intr);
45646 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45647 else if (ret == -ENODATA)
45648 - fscache_stat(&fscache_n_retrievals_nodata);
45649 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45650 else if (ret < 0)
45651 - fscache_stat(&fscache_n_retrievals_nobufs);
45652 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45653 else
45654 - fscache_stat(&fscache_n_retrievals_ok);
45655 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45656
45657 fscache_put_retrieval(op);
45658 _leave(" = %d", ret);
45659 @@ -545,7 +545,7 @@ nobufs_unlock:
45660 spin_unlock(&cookie->lock);
45661 kfree(op);
45662 nobufs:
45663 - fscache_stat(&fscache_n_retrievals_nobufs);
45664 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45665 _leave(" = -ENOBUFS");
45666 return -ENOBUFS;
45667 }
45668 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45669
45670 _enter("%p,%p,,,", cookie, page);
45671
45672 - fscache_stat(&fscache_n_allocs);
45673 + fscache_stat_unchecked(&fscache_n_allocs);
45674
45675 if (hlist_empty(&cookie->backing_objects))
45676 goto nobufs;
45677 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45678 goto nobufs_unlock;
45679 spin_unlock(&cookie->lock);
45680
45681 - fscache_stat(&fscache_n_alloc_ops);
45682 + fscache_stat_unchecked(&fscache_n_alloc_ops);
45683
45684 ret = fscache_wait_for_retrieval_activation(
45685 object, op,
45686 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45687
45688 error:
45689 if (ret == -ERESTARTSYS)
45690 - fscache_stat(&fscache_n_allocs_intr);
45691 + fscache_stat_unchecked(&fscache_n_allocs_intr);
45692 else if (ret < 0)
45693 - fscache_stat(&fscache_n_allocs_nobufs);
45694 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45695 else
45696 - fscache_stat(&fscache_n_allocs_ok);
45697 + fscache_stat_unchecked(&fscache_n_allocs_ok);
45698
45699 fscache_put_retrieval(op);
45700 _leave(" = %d", ret);
45701 @@ -625,7 +625,7 @@ nobufs_unlock:
45702 spin_unlock(&cookie->lock);
45703 kfree(op);
45704 nobufs:
45705 - fscache_stat(&fscache_n_allocs_nobufs);
45706 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45707 _leave(" = -ENOBUFS");
45708 return -ENOBUFS;
45709 }
45710 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45711
45712 spin_lock(&cookie->stores_lock);
45713
45714 - fscache_stat(&fscache_n_store_calls);
45715 + fscache_stat_unchecked(&fscache_n_store_calls);
45716
45717 /* find a page to store */
45718 page = NULL;
45719 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45720 page = results[0];
45721 _debug("gang %d [%lx]", n, page->index);
45722 if (page->index > op->store_limit) {
45723 - fscache_stat(&fscache_n_store_pages_over_limit);
45724 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
45725 goto superseded;
45726 }
45727
45728 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45729 spin_unlock(&cookie->stores_lock);
45730 spin_unlock(&object->lock);
45731
45732 - fscache_stat(&fscache_n_store_pages);
45733 + fscache_stat_unchecked(&fscache_n_store_pages);
45734 fscache_stat(&fscache_n_cop_write_page);
45735 ret = object->cache->ops->write_page(op, page);
45736 fscache_stat_d(&fscache_n_cop_write_page);
45737 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45738 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45739 ASSERT(PageFsCache(page));
45740
45741 - fscache_stat(&fscache_n_stores);
45742 + fscache_stat_unchecked(&fscache_n_stores);
45743
45744 op = kzalloc(sizeof(*op), GFP_NOIO);
45745 if (!op)
45746 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45747 spin_unlock(&cookie->stores_lock);
45748 spin_unlock(&object->lock);
45749
45750 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
45751 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
45752 op->store_limit = object->store_limit;
45753
45754 if (fscache_submit_op(object, &op->op) < 0)
45755 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45756
45757 spin_unlock(&cookie->lock);
45758 radix_tree_preload_end();
45759 - fscache_stat(&fscache_n_store_ops);
45760 - fscache_stat(&fscache_n_stores_ok);
45761 + fscache_stat_unchecked(&fscache_n_store_ops);
45762 + fscache_stat_unchecked(&fscache_n_stores_ok);
45763
45764 /* the work queue now carries its own ref on the object */
45765 fscache_put_operation(&op->op);
45766 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45767 return 0;
45768
45769 already_queued:
45770 - fscache_stat(&fscache_n_stores_again);
45771 + fscache_stat_unchecked(&fscache_n_stores_again);
45772 already_pending:
45773 spin_unlock(&cookie->stores_lock);
45774 spin_unlock(&object->lock);
45775 spin_unlock(&cookie->lock);
45776 radix_tree_preload_end();
45777 kfree(op);
45778 - fscache_stat(&fscache_n_stores_ok);
45779 + fscache_stat_unchecked(&fscache_n_stores_ok);
45780 _leave(" = 0");
45781 return 0;
45782
45783 @@ -851,14 +851,14 @@ nobufs:
45784 spin_unlock(&cookie->lock);
45785 radix_tree_preload_end();
45786 kfree(op);
45787 - fscache_stat(&fscache_n_stores_nobufs);
45788 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
45789 _leave(" = -ENOBUFS");
45790 return -ENOBUFS;
45791
45792 nomem_free:
45793 kfree(op);
45794 nomem:
45795 - fscache_stat(&fscache_n_stores_oom);
45796 + fscache_stat_unchecked(&fscache_n_stores_oom);
45797 _leave(" = -ENOMEM");
45798 return -ENOMEM;
45799 }
45800 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
45801 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45802 ASSERTCMP(page, !=, NULL);
45803
45804 - fscache_stat(&fscache_n_uncaches);
45805 + fscache_stat_unchecked(&fscache_n_uncaches);
45806
45807 /* cache withdrawal may beat us to it */
45808 if (!PageFsCache(page))
45809 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
45810 unsigned long loop;
45811
45812 #ifdef CONFIG_FSCACHE_STATS
45813 - atomic_add(pagevec->nr, &fscache_n_marks);
45814 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
45815 #endif
45816
45817 for (loop = 0; loop < pagevec->nr; loop++) {
45818 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
45819 index 4765190..2a067f2 100644
45820 --- a/fs/fscache/stats.c
45821 +++ b/fs/fscache/stats.c
45822 @@ -18,95 +18,95 @@
45823 /*
45824 * operation counters
45825 */
45826 -atomic_t fscache_n_op_pend;
45827 -atomic_t fscache_n_op_run;
45828 -atomic_t fscache_n_op_enqueue;
45829 -atomic_t fscache_n_op_requeue;
45830 -atomic_t fscache_n_op_deferred_release;
45831 -atomic_t fscache_n_op_release;
45832 -atomic_t fscache_n_op_gc;
45833 -atomic_t fscache_n_op_cancelled;
45834 -atomic_t fscache_n_op_rejected;
45835 +atomic_unchecked_t fscache_n_op_pend;
45836 +atomic_unchecked_t fscache_n_op_run;
45837 +atomic_unchecked_t fscache_n_op_enqueue;
45838 +atomic_unchecked_t fscache_n_op_requeue;
45839 +atomic_unchecked_t fscache_n_op_deferred_release;
45840 +atomic_unchecked_t fscache_n_op_release;
45841 +atomic_unchecked_t fscache_n_op_gc;
45842 +atomic_unchecked_t fscache_n_op_cancelled;
45843 +atomic_unchecked_t fscache_n_op_rejected;
45844
45845 -atomic_t fscache_n_attr_changed;
45846 -atomic_t fscache_n_attr_changed_ok;
45847 -atomic_t fscache_n_attr_changed_nobufs;
45848 -atomic_t fscache_n_attr_changed_nomem;
45849 -atomic_t fscache_n_attr_changed_calls;
45850 +atomic_unchecked_t fscache_n_attr_changed;
45851 +atomic_unchecked_t fscache_n_attr_changed_ok;
45852 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
45853 +atomic_unchecked_t fscache_n_attr_changed_nomem;
45854 +atomic_unchecked_t fscache_n_attr_changed_calls;
45855
45856 -atomic_t fscache_n_allocs;
45857 -atomic_t fscache_n_allocs_ok;
45858 -atomic_t fscache_n_allocs_wait;
45859 -atomic_t fscache_n_allocs_nobufs;
45860 -atomic_t fscache_n_allocs_intr;
45861 -atomic_t fscache_n_allocs_object_dead;
45862 -atomic_t fscache_n_alloc_ops;
45863 -atomic_t fscache_n_alloc_op_waits;
45864 +atomic_unchecked_t fscache_n_allocs;
45865 +atomic_unchecked_t fscache_n_allocs_ok;
45866 +atomic_unchecked_t fscache_n_allocs_wait;
45867 +atomic_unchecked_t fscache_n_allocs_nobufs;
45868 +atomic_unchecked_t fscache_n_allocs_intr;
45869 +atomic_unchecked_t fscache_n_allocs_object_dead;
45870 +atomic_unchecked_t fscache_n_alloc_ops;
45871 +atomic_unchecked_t fscache_n_alloc_op_waits;
45872
45873 -atomic_t fscache_n_retrievals;
45874 -atomic_t fscache_n_retrievals_ok;
45875 -atomic_t fscache_n_retrievals_wait;
45876 -atomic_t fscache_n_retrievals_nodata;
45877 -atomic_t fscache_n_retrievals_nobufs;
45878 -atomic_t fscache_n_retrievals_intr;
45879 -atomic_t fscache_n_retrievals_nomem;
45880 -atomic_t fscache_n_retrievals_object_dead;
45881 -atomic_t fscache_n_retrieval_ops;
45882 -atomic_t fscache_n_retrieval_op_waits;
45883 +atomic_unchecked_t fscache_n_retrievals;
45884 +atomic_unchecked_t fscache_n_retrievals_ok;
45885 +atomic_unchecked_t fscache_n_retrievals_wait;
45886 +atomic_unchecked_t fscache_n_retrievals_nodata;
45887 +atomic_unchecked_t fscache_n_retrievals_nobufs;
45888 +atomic_unchecked_t fscache_n_retrievals_intr;
45889 +atomic_unchecked_t fscache_n_retrievals_nomem;
45890 +atomic_unchecked_t fscache_n_retrievals_object_dead;
45891 +atomic_unchecked_t fscache_n_retrieval_ops;
45892 +atomic_unchecked_t fscache_n_retrieval_op_waits;
45893
45894 -atomic_t fscache_n_stores;
45895 -atomic_t fscache_n_stores_ok;
45896 -atomic_t fscache_n_stores_again;
45897 -atomic_t fscache_n_stores_nobufs;
45898 -atomic_t fscache_n_stores_oom;
45899 -atomic_t fscache_n_store_ops;
45900 -atomic_t fscache_n_store_calls;
45901 -atomic_t fscache_n_store_pages;
45902 -atomic_t fscache_n_store_radix_deletes;
45903 -atomic_t fscache_n_store_pages_over_limit;
45904 +atomic_unchecked_t fscache_n_stores;
45905 +atomic_unchecked_t fscache_n_stores_ok;
45906 +atomic_unchecked_t fscache_n_stores_again;
45907 +atomic_unchecked_t fscache_n_stores_nobufs;
45908 +atomic_unchecked_t fscache_n_stores_oom;
45909 +atomic_unchecked_t fscache_n_store_ops;
45910 +atomic_unchecked_t fscache_n_store_calls;
45911 +atomic_unchecked_t fscache_n_store_pages;
45912 +atomic_unchecked_t fscache_n_store_radix_deletes;
45913 +atomic_unchecked_t fscache_n_store_pages_over_limit;
45914
45915 -atomic_t fscache_n_store_vmscan_not_storing;
45916 -atomic_t fscache_n_store_vmscan_gone;
45917 -atomic_t fscache_n_store_vmscan_busy;
45918 -atomic_t fscache_n_store_vmscan_cancelled;
45919 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45920 +atomic_unchecked_t fscache_n_store_vmscan_gone;
45921 +atomic_unchecked_t fscache_n_store_vmscan_busy;
45922 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45923
45924 -atomic_t fscache_n_marks;
45925 -atomic_t fscache_n_uncaches;
45926 +atomic_unchecked_t fscache_n_marks;
45927 +atomic_unchecked_t fscache_n_uncaches;
45928
45929 -atomic_t fscache_n_acquires;
45930 -atomic_t fscache_n_acquires_null;
45931 -atomic_t fscache_n_acquires_no_cache;
45932 -atomic_t fscache_n_acquires_ok;
45933 -atomic_t fscache_n_acquires_nobufs;
45934 -atomic_t fscache_n_acquires_oom;
45935 +atomic_unchecked_t fscache_n_acquires;
45936 +atomic_unchecked_t fscache_n_acquires_null;
45937 +atomic_unchecked_t fscache_n_acquires_no_cache;
45938 +atomic_unchecked_t fscache_n_acquires_ok;
45939 +atomic_unchecked_t fscache_n_acquires_nobufs;
45940 +atomic_unchecked_t fscache_n_acquires_oom;
45941
45942 -atomic_t fscache_n_updates;
45943 -atomic_t fscache_n_updates_null;
45944 -atomic_t fscache_n_updates_run;
45945 +atomic_unchecked_t fscache_n_updates;
45946 +atomic_unchecked_t fscache_n_updates_null;
45947 +atomic_unchecked_t fscache_n_updates_run;
45948
45949 -atomic_t fscache_n_relinquishes;
45950 -atomic_t fscache_n_relinquishes_null;
45951 -atomic_t fscache_n_relinquishes_waitcrt;
45952 -atomic_t fscache_n_relinquishes_retire;
45953 +atomic_unchecked_t fscache_n_relinquishes;
45954 +atomic_unchecked_t fscache_n_relinquishes_null;
45955 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45956 +atomic_unchecked_t fscache_n_relinquishes_retire;
45957
45958 -atomic_t fscache_n_cookie_index;
45959 -atomic_t fscache_n_cookie_data;
45960 -atomic_t fscache_n_cookie_special;
45961 +atomic_unchecked_t fscache_n_cookie_index;
45962 +atomic_unchecked_t fscache_n_cookie_data;
45963 +atomic_unchecked_t fscache_n_cookie_special;
45964
45965 -atomic_t fscache_n_object_alloc;
45966 -atomic_t fscache_n_object_no_alloc;
45967 -atomic_t fscache_n_object_lookups;
45968 -atomic_t fscache_n_object_lookups_negative;
45969 -atomic_t fscache_n_object_lookups_positive;
45970 -atomic_t fscache_n_object_lookups_timed_out;
45971 -atomic_t fscache_n_object_created;
45972 -atomic_t fscache_n_object_avail;
45973 -atomic_t fscache_n_object_dead;
45974 +atomic_unchecked_t fscache_n_object_alloc;
45975 +atomic_unchecked_t fscache_n_object_no_alloc;
45976 +atomic_unchecked_t fscache_n_object_lookups;
45977 +atomic_unchecked_t fscache_n_object_lookups_negative;
45978 +atomic_unchecked_t fscache_n_object_lookups_positive;
45979 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
45980 +atomic_unchecked_t fscache_n_object_created;
45981 +atomic_unchecked_t fscache_n_object_avail;
45982 +atomic_unchecked_t fscache_n_object_dead;
45983
45984 -atomic_t fscache_n_checkaux_none;
45985 -atomic_t fscache_n_checkaux_okay;
45986 -atomic_t fscache_n_checkaux_update;
45987 -atomic_t fscache_n_checkaux_obsolete;
45988 +atomic_unchecked_t fscache_n_checkaux_none;
45989 +atomic_unchecked_t fscache_n_checkaux_okay;
45990 +atomic_unchecked_t fscache_n_checkaux_update;
45991 +atomic_unchecked_t fscache_n_checkaux_obsolete;
45992
45993 atomic_t fscache_n_cop_alloc_object;
45994 atomic_t fscache_n_cop_lookup_object;
45995 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
45996 seq_puts(m, "FS-Cache statistics\n");
45997
45998 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
45999 - atomic_read(&fscache_n_cookie_index),
46000 - atomic_read(&fscache_n_cookie_data),
46001 - atomic_read(&fscache_n_cookie_special));
46002 + atomic_read_unchecked(&fscache_n_cookie_index),
46003 + atomic_read_unchecked(&fscache_n_cookie_data),
46004 + atomic_read_unchecked(&fscache_n_cookie_special));
46005
46006 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
46007 - atomic_read(&fscache_n_object_alloc),
46008 - atomic_read(&fscache_n_object_no_alloc),
46009 - atomic_read(&fscache_n_object_avail),
46010 - atomic_read(&fscache_n_object_dead));
46011 + atomic_read_unchecked(&fscache_n_object_alloc),
46012 + atomic_read_unchecked(&fscache_n_object_no_alloc),
46013 + atomic_read_unchecked(&fscache_n_object_avail),
46014 + atomic_read_unchecked(&fscache_n_object_dead));
46015 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
46016 - atomic_read(&fscache_n_checkaux_none),
46017 - atomic_read(&fscache_n_checkaux_okay),
46018 - atomic_read(&fscache_n_checkaux_update),
46019 - atomic_read(&fscache_n_checkaux_obsolete));
46020 + atomic_read_unchecked(&fscache_n_checkaux_none),
46021 + atomic_read_unchecked(&fscache_n_checkaux_okay),
46022 + atomic_read_unchecked(&fscache_n_checkaux_update),
46023 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
46024
46025 seq_printf(m, "Pages : mrk=%u unc=%u\n",
46026 - atomic_read(&fscache_n_marks),
46027 - atomic_read(&fscache_n_uncaches));
46028 + atomic_read_unchecked(&fscache_n_marks),
46029 + atomic_read_unchecked(&fscache_n_uncaches));
46030
46031 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
46032 " oom=%u\n",
46033 - atomic_read(&fscache_n_acquires),
46034 - atomic_read(&fscache_n_acquires_null),
46035 - atomic_read(&fscache_n_acquires_no_cache),
46036 - atomic_read(&fscache_n_acquires_ok),
46037 - atomic_read(&fscache_n_acquires_nobufs),
46038 - atomic_read(&fscache_n_acquires_oom));
46039 + atomic_read_unchecked(&fscache_n_acquires),
46040 + atomic_read_unchecked(&fscache_n_acquires_null),
46041 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
46042 + atomic_read_unchecked(&fscache_n_acquires_ok),
46043 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
46044 + atomic_read_unchecked(&fscache_n_acquires_oom));
46045
46046 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
46047 - atomic_read(&fscache_n_object_lookups),
46048 - atomic_read(&fscache_n_object_lookups_negative),
46049 - atomic_read(&fscache_n_object_lookups_positive),
46050 - atomic_read(&fscache_n_object_created),
46051 - atomic_read(&fscache_n_object_lookups_timed_out));
46052 + atomic_read_unchecked(&fscache_n_object_lookups),
46053 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
46054 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
46055 + atomic_read_unchecked(&fscache_n_object_created),
46056 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
46057
46058 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
46059 - atomic_read(&fscache_n_updates),
46060 - atomic_read(&fscache_n_updates_null),
46061 - atomic_read(&fscache_n_updates_run));
46062 + atomic_read_unchecked(&fscache_n_updates),
46063 + atomic_read_unchecked(&fscache_n_updates_null),
46064 + atomic_read_unchecked(&fscache_n_updates_run));
46065
46066 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
46067 - atomic_read(&fscache_n_relinquishes),
46068 - atomic_read(&fscache_n_relinquishes_null),
46069 - atomic_read(&fscache_n_relinquishes_waitcrt),
46070 - atomic_read(&fscache_n_relinquishes_retire));
46071 + atomic_read_unchecked(&fscache_n_relinquishes),
46072 + atomic_read_unchecked(&fscache_n_relinquishes_null),
46073 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
46074 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
46075
46076 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
46077 - atomic_read(&fscache_n_attr_changed),
46078 - atomic_read(&fscache_n_attr_changed_ok),
46079 - atomic_read(&fscache_n_attr_changed_nobufs),
46080 - atomic_read(&fscache_n_attr_changed_nomem),
46081 - atomic_read(&fscache_n_attr_changed_calls));
46082 + atomic_read_unchecked(&fscache_n_attr_changed),
46083 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
46084 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
46085 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
46086 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
46087
46088 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
46089 - atomic_read(&fscache_n_allocs),
46090 - atomic_read(&fscache_n_allocs_ok),
46091 - atomic_read(&fscache_n_allocs_wait),
46092 - atomic_read(&fscache_n_allocs_nobufs),
46093 - atomic_read(&fscache_n_allocs_intr));
46094 + atomic_read_unchecked(&fscache_n_allocs),
46095 + atomic_read_unchecked(&fscache_n_allocs_ok),
46096 + atomic_read_unchecked(&fscache_n_allocs_wait),
46097 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
46098 + atomic_read_unchecked(&fscache_n_allocs_intr));
46099 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
46100 - atomic_read(&fscache_n_alloc_ops),
46101 - atomic_read(&fscache_n_alloc_op_waits),
46102 - atomic_read(&fscache_n_allocs_object_dead));
46103 + atomic_read_unchecked(&fscache_n_alloc_ops),
46104 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
46105 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
46106
46107 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
46108 " int=%u oom=%u\n",
46109 - atomic_read(&fscache_n_retrievals),
46110 - atomic_read(&fscache_n_retrievals_ok),
46111 - atomic_read(&fscache_n_retrievals_wait),
46112 - atomic_read(&fscache_n_retrievals_nodata),
46113 - atomic_read(&fscache_n_retrievals_nobufs),
46114 - atomic_read(&fscache_n_retrievals_intr),
46115 - atomic_read(&fscache_n_retrievals_nomem));
46116 + atomic_read_unchecked(&fscache_n_retrievals),
46117 + atomic_read_unchecked(&fscache_n_retrievals_ok),
46118 + atomic_read_unchecked(&fscache_n_retrievals_wait),
46119 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
46120 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
46121 + atomic_read_unchecked(&fscache_n_retrievals_intr),
46122 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
46123 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
46124 - atomic_read(&fscache_n_retrieval_ops),
46125 - atomic_read(&fscache_n_retrieval_op_waits),
46126 - atomic_read(&fscache_n_retrievals_object_dead));
46127 + atomic_read_unchecked(&fscache_n_retrieval_ops),
46128 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
46129 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
46130
46131 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
46132 - atomic_read(&fscache_n_stores),
46133 - atomic_read(&fscache_n_stores_ok),
46134 - atomic_read(&fscache_n_stores_again),
46135 - atomic_read(&fscache_n_stores_nobufs),
46136 - atomic_read(&fscache_n_stores_oom));
46137 + atomic_read_unchecked(&fscache_n_stores),
46138 + atomic_read_unchecked(&fscache_n_stores_ok),
46139 + atomic_read_unchecked(&fscache_n_stores_again),
46140 + atomic_read_unchecked(&fscache_n_stores_nobufs),
46141 + atomic_read_unchecked(&fscache_n_stores_oom));
46142 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
46143 - atomic_read(&fscache_n_store_ops),
46144 - atomic_read(&fscache_n_store_calls),
46145 - atomic_read(&fscache_n_store_pages),
46146 - atomic_read(&fscache_n_store_radix_deletes),
46147 - atomic_read(&fscache_n_store_pages_over_limit));
46148 + atomic_read_unchecked(&fscache_n_store_ops),
46149 + atomic_read_unchecked(&fscache_n_store_calls),
46150 + atomic_read_unchecked(&fscache_n_store_pages),
46151 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
46152 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
46153
46154 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
46155 - atomic_read(&fscache_n_store_vmscan_not_storing),
46156 - atomic_read(&fscache_n_store_vmscan_gone),
46157 - atomic_read(&fscache_n_store_vmscan_busy),
46158 - atomic_read(&fscache_n_store_vmscan_cancelled));
46159 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
46160 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
46161 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
46162 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
46163
46164 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
46165 - atomic_read(&fscache_n_op_pend),
46166 - atomic_read(&fscache_n_op_run),
46167 - atomic_read(&fscache_n_op_enqueue),
46168 - atomic_read(&fscache_n_op_cancelled),
46169 - atomic_read(&fscache_n_op_rejected));
46170 + atomic_read_unchecked(&fscache_n_op_pend),
46171 + atomic_read_unchecked(&fscache_n_op_run),
46172 + atomic_read_unchecked(&fscache_n_op_enqueue),
46173 + atomic_read_unchecked(&fscache_n_op_cancelled),
46174 + atomic_read_unchecked(&fscache_n_op_rejected));
46175 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
46176 - atomic_read(&fscache_n_op_deferred_release),
46177 - atomic_read(&fscache_n_op_release),
46178 - atomic_read(&fscache_n_op_gc));
46179 + atomic_read_unchecked(&fscache_n_op_deferred_release),
46180 + atomic_read_unchecked(&fscache_n_op_release),
46181 + atomic_read_unchecked(&fscache_n_op_gc));
46182
46183 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
46184 atomic_read(&fscache_n_cop_alloc_object),
46185 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
46186 index b6cca47..ec782c3 100644
46187 --- a/fs/fuse/cuse.c
46188 +++ b/fs/fuse/cuse.c
46189 @@ -586,10 +586,12 @@ static int __init cuse_init(void)
46190 INIT_LIST_HEAD(&cuse_conntbl[i]);
46191
46192 /* inherit and extend fuse_dev_operations */
46193 - cuse_channel_fops = fuse_dev_operations;
46194 - cuse_channel_fops.owner = THIS_MODULE;
46195 - cuse_channel_fops.open = cuse_channel_open;
46196 - cuse_channel_fops.release = cuse_channel_release;
46197 + pax_open_kernel();
46198 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
46199 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
46200 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
46201 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
46202 + pax_close_kernel();
46203
46204 cuse_class = class_create(THIS_MODULE, "cuse");
46205 if (IS_ERR(cuse_class))
46206 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
46207 index 2aaf3ea..8e50863 100644
46208 --- a/fs/fuse/dev.c
46209 +++ b/fs/fuse/dev.c
46210 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
46211 ret = 0;
46212 pipe_lock(pipe);
46213
46214 - if (!pipe->readers) {
46215 + if (!atomic_read(&pipe->readers)) {
46216 send_sig(SIGPIPE, current, 0);
46217 if (!ret)
46218 ret = -EPIPE;
46219 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
46220 index 9f63e49..d8a64c0 100644
46221 --- a/fs/fuse/dir.c
46222 +++ b/fs/fuse/dir.c
46223 @@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry)
46224 return link;
46225 }
46226
46227 -static void free_link(char *link)
46228 +static void free_link(const char *link)
46229 {
46230 if (!IS_ERR(link))
46231 free_page((unsigned long) link);
46232 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
46233 index 900cf98..3896726 100644
46234 --- a/fs/gfs2/inode.c
46235 +++ b/fs/gfs2/inode.c
46236 @@ -1517,7 +1517,7 @@ out:
46237
46238 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46239 {
46240 - char *s = nd_get_link(nd);
46241 + const char *s = nd_get_link(nd);
46242 if (!IS_ERR(s))
46243 kfree(s);
46244 }
46245 diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
46246 index 4dfbfec..947c9c2 100644
46247 --- a/fs/hfsplus/catalog.c
46248 +++ b/fs/hfsplus/catalog.c
46249 @@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
46250 int err;
46251 u16 type;
46252
46253 + pax_track_stack();
46254 +
46255 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
46256 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
46257 if (err)
46258 @@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
46259 int entry_size;
46260 int err;
46261
46262 + pax_track_stack();
46263 +
46264 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
46265 str->name, cnid, inode->i_nlink);
46266 err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
46267 @@ -353,6 +357,8 @@ int hfsplus_rename_cat(u32 cnid,
46268 int entry_size, type;
46269 int err;
46270
46271 + pax_track_stack();
46272 +
46273 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
46274 cnid, src_dir->i_ino, src_name->name,
46275 dst_dir->i_ino, dst_name->name);
46276 diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
46277 index 25b2443..09a3341 100644
46278 --- a/fs/hfsplus/dir.c
46279 +++ b/fs/hfsplus/dir.c
46280 @@ -131,6 +131,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
46281 struct hfsplus_readdir_data *rd;
46282 u16 type;
46283
46284 + pax_track_stack();
46285 +
46286 if (filp->f_pos >= inode->i_size)
46287 return 0;
46288
46289 diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
46290 index 4cc1e3a..ad0f70b 100644
46291 --- a/fs/hfsplus/inode.c
46292 +++ b/fs/hfsplus/inode.c
46293 @@ -501,6 +501,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
46294 int res = 0;
46295 u16 type;
46296
46297 + pax_track_stack();
46298 +
46299 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
46300
46301 HFSPLUS_I(inode)->linkid = 0;
46302 @@ -564,6 +566,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
46303 struct hfs_find_data fd;
46304 hfsplus_cat_entry entry;
46305
46306 + pax_track_stack();
46307 +
46308 if (HFSPLUS_IS_RSRC(inode))
46309 main_inode = HFSPLUS_I(inode)->rsrc_inode;
46310
46311 diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
46312 index fbaa669..c548cd0 100644
46313 --- a/fs/hfsplus/ioctl.c
46314 +++ b/fs/hfsplus/ioctl.c
46315 @@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
46316 struct hfsplus_cat_file *file;
46317 int res;
46318
46319 + pax_track_stack();
46320 +
46321 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
46322 return -EOPNOTSUPP;
46323
46324 @@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
46325 struct hfsplus_cat_file *file;
46326 ssize_t res = 0;
46327
46328 + pax_track_stack();
46329 +
46330 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
46331 return -EOPNOTSUPP;
46332
46333 diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
46334 index d24a9b6..dd9b3dd 100644
46335 --- a/fs/hfsplus/super.c
46336 +++ b/fs/hfsplus/super.c
46337 @@ -347,6 +347,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
46338 u64 last_fs_block, last_fs_page;
46339 int err;
46340
46341 + pax_track_stack();
46342 +
46343 err = -EINVAL;
46344 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
46345 if (!sbi)
46346 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
46347 index ec88953..cb5e98e 100644
46348 --- a/fs/hugetlbfs/inode.c
46349 +++ b/fs/hugetlbfs/inode.c
46350 @@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = {
46351 .kill_sb = kill_litter_super,
46352 };
46353
46354 -static struct vfsmount *hugetlbfs_vfsmount;
46355 +struct vfsmount *hugetlbfs_vfsmount;
46356
46357 static int can_do_hugetlb_shm(void)
46358 {
46359 diff --git a/fs/inode.c b/fs/inode.c
46360 index ec79246..054c36a 100644
46361 --- a/fs/inode.c
46362 +++ b/fs/inode.c
46363 @@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
46364
46365 #ifdef CONFIG_SMP
46366 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
46367 - static atomic_t shared_last_ino;
46368 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
46369 + static atomic_unchecked_t shared_last_ino;
46370 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
46371
46372 res = next - LAST_INO_BATCH;
46373 }
46374 diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
46375 index f94fc48..3bb8d30 100644
46376 --- a/fs/jbd/checkpoint.c
46377 +++ b/fs/jbd/checkpoint.c
46378 @@ -358,6 +358,8 @@ int log_do_checkpoint(journal_t *journal)
46379 tid_t this_tid;
46380 int result;
46381
46382 + pax_track_stack();
46383 +
46384 jbd_debug(1, "Start checkpoint\n");
46385
46386 /*
46387 diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
46388 index 16a5047..88ff6ca 100644
46389 --- a/fs/jffs2/compr_rtime.c
46390 +++ b/fs/jffs2/compr_rtime.c
46391 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
46392 int outpos = 0;
46393 int pos=0;
46394
46395 + pax_track_stack();
46396 +
46397 memset(positions,0,sizeof(positions));
46398
46399 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
46400 @@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
46401 int outpos = 0;
46402 int pos=0;
46403
46404 + pax_track_stack();
46405 +
46406 memset(positions,0,sizeof(positions));
46407
46408 while (outpos<destlen) {
46409 diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
46410 index 9e7cec8..4713089 100644
46411 --- a/fs/jffs2/compr_rubin.c
46412 +++ b/fs/jffs2/compr_rubin.c
46413 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
46414 int ret;
46415 uint32_t mysrclen, mydstlen;
46416
46417 + pax_track_stack();
46418 +
46419 mysrclen = *sourcelen;
46420 mydstlen = *dstlen - 8;
46421
46422 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
46423 index e513f19..2ab1351 100644
46424 --- a/fs/jffs2/erase.c
46425 +++ b/fs/jffs2/erase.c
46426 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
46427 struct jffs2_unknown_node marker = {
46428 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
46429 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46430 - .totlen = cpu_to_je32(c->cleanmarker_size)
46431 + .totlen = cpu_to_je32(c->cleanmarker_size),
46432 + .hdr_crc = cpu_to_je32(0)
46433 };
46434
46435 jffs2_prealloc_raw_node_refs(c, jeb, 1);
46436 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
46437 index 4515bea..178f2d6 100644
46438 --- a/fs/jffs2/wbuf.c
46439 +++ b/fs/jffs2/wbuf.c
46440 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
46441 {
46442 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
46443 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46444 - .totlen = constant_cpu_to_je32(8)
46445 + .totlen = constant_cpu_to_je32(8),
46446 + .hdr_crc = constant_cpu_to_je32(0)
46447 };
46448
46449 /*
46450 diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
46451 index 3e93cdd..c8a80e1 100644
46452 --- a/fs/jffs2/xattr.c
46453 +++ b/fs/jffs2/xattr.c
46454 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
46455
46456 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
46457
46458 + pax_track_stack();
46459 +
46460 /* Phase.1 : Merge same xref */
46461 for (i=0; i < XREF_TMPHASH_SIZE; i++)
46462 xref_tmphash[i] = NULL;
46463 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
46464 index 06c8a67..589dbbd 100644
46465 --- a/fs/jfs/super.c
46466 +++ b/fs/jfs/super.c
46467 @@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
46468
46469 jfs_inode_cachep =
46470 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
46471 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
46472 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
46473 init_once);
46474 if (jfs_inode_cachep == NULL)
46475 return -ENOMEM;
46476 diff --git a/fs/libfs.c b/fs/libfs.c
46477 index c18e9a1..0b04e2c 100644
46478 --- a/fs/libfs.c
46479 +++ b/fs/libfs.c
46480 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46481
46482 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
46483 struct dentry *next;
46484 + char d_name[sizeof(next->d_iname)];
46485 + const unsigned char *name;
46486 +
46487 next = list_entry(p, struct dentry, d_u.d_child);
46488 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
46489 if (!simple_positive(next)) {
46490 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46491
46492 spin_unlock(&next->d_lock);
46493 spin_unlock(&dentry->d_lock);
46494 - if (filldir(dirent, next->d_name.name,
46495 + name = next->d_name.name;
46496 + if (name == next->d_iname) {
46497 + memcpy(d_name, name, next->d_name.len);
46498 + name = d_name;
46499 + }
46500 + if (filldir(dirent, name,
46501 next->d_name.len, filp->f_pos,
46502 next->d_inode->i_ino,
46503 dt_type(next->d_inode)) < 0)
46504 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
46505 index 8392cb8..ae8ed40 100644
46506 --- a/fs/lockd/clntproc.c
46507 +++ b/fs/lockd/clntproc.c
46508 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
46509 /*
46510 * Cookie counter for NLM requests
46511 */
46512 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
46513 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
46514
46515 void nlmclnt_next_cookie(struct nlm_cookie *c)
46516 {
46517 - u32 cookie = atomic_inc_return(&nlm_cookie);
46518 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
46519
46520 memcpy(c->data, &cookie, 4);
46521 c->len=4;
46522 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
46523 struct nlm_rqst reqst, *req;
46524 int status;
46525
46526 + pax_track_stack();
46527 +
46528 req = &reqst;
46529 memset(req, 0, sizeof(*req));
46530 locks_init_lock(&req->a_args.lock.fl);
46531 diff --git a/fs/locks.c b/fs/locks.c
46532 index 703f545..150a552 100644
46533 --- a/fs/locks.c
46534 +++ b/fs/locks.c
46535 @@ -2022,16 +2022,16 @@ void locks_remove_flock(struct file *filp)
46536 return;
46537
46538 if (filp->f_op && filp->f_op->flock) {
46539 - struct file_lock fl = {
46540 + struct file_lock flock = {
46541 .fl_pid = current->tgid,
46542 .fl_file = filp,
46543 .fl_flags = FL_FLOCK,
46544 .fl_type = F_UNLCK,
46545 .fl_end = OFFSET_MAX,
46546 };
46547 - filp->f_op->flock(filp, F_SETLKW, &fl);
46548 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
46549 - fl.fl_ops->fl_release_private(&fl);
46550 + filp->f_op->flock(filp, F_SETLKW, &flock);
46551 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
46552 + flock.fl_ops->fl_release_private(&flock);
46553 }
46554
46555 lock_flocks();
46556 diff --git a/fs/logfs/super.c b/fs/logfs/super.c
46557 index ce03a18..ac8c14f 100644
46558 --- a/fs/logfs/super.c
46559 +++ b/fs/logfs/super.c
46560 @@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super_block *sb)
46561 struct logfs_disk_super _ds1, *ds1 = &_ds1;
46562 int err, valid0, valid1;
46563
46564 + pax_track_stack();
46565 +
46566 /* read first superblock */
46567 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
46568 if (err)
46569 diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
46570 index 3f32bcb..7c82c29 100644
46571 --- a/fs/minix/bitmap.c
46572 +++ b/fs/minix/bitmap.c
46573 @@ -20,10 +20,11 @@ static const int nibblemap[] = { 4,3,3,2,3,2,2,1,3,2,2,1,2,1,1,0 };
46574
46575 static DEFINE_SPINLOCK(bitmap_lock);
46576
46577 -static unsigned long count_free(struct buffer_head *map[], unsigned numblocks, __u32 numbits)
46578 +static unsigned long count_free(struct buffer_head *map[], unsigned blocksize, __u32 numbits)
46579 {
46580 unsigned i, j, sum = 0;
46581 struct buffer_head *bh;
46582 + unsigned numblocks = minix_blocks_needed(numbits, blocksize);
46583
46584 for (i=0; i<numblocks-1; i++) {
46585 if (!(bh=map[i]))
46586 @@ -105,10 +106,12 @@ int minix_new_block(struct inode * inode)
46587 return 0;
46588 }
46589
46590 -unsigned long minix_count_free_blocks(struct minix_sb_info *sbi)
46591 +unsigned long minix_count_free_blocks(struct super_block *sb)
46592 {
46593 - return (count_free(sbi->s_zmap, sbi->s_zmap_blocks,
46594 - sbi->s_nzones - sbi->s_firstdatazone + 1)
46595 + struct minix_sb_info *sbi = minix_sb(sb);
46596 + u32 bits = sbi->s_nzones - (sbi->s_firstdatazone + 1);
46597 +
46598 + return (count_free(sbi->s_zmap, sb->s_blocksize, bits)
46599 << sbi->s_log_zone_size);
46600 }
46601
46602 @@ -273,7 +276,10 @@ struct inode *minix_new_inode(const struct inode *dir, int mode, int *error)
46603 return inode;
46604 }
46605
46606 -unsigned long minix_count_free_inodes(struct minix_sb_info *sbi)
46607 +unsigned long minix_count_free_inodes(struct super_block *sb)
46608 {
46609 - return count_free(sbi->s_imap, sbi->s_imap_blocks, sbi->s_ninodes + 1);
46610 + struct minix_sb_info *sbi = minix_sb(sb);
46611 + u32 bits = sbi->s_ninodes + 1;
46612 +
46613 + return count_free(sbi->s_imap, sb->s_blocksize, bits);
46614 }
46615 diff --git a/fs/minix/inode.c b/fs/minix/inode.c
46616 index e7d23e2..1ed1351 100644
46617 --- a/fs/minix/inode.c
46618 +++ b/fs/minix/inode.c
46619 @@ -279,6 +279,27 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
46620 else if (sbi->s_mount_state & MINIX_ERROR_FS)
46621 printk("MINIX-fs: mounting file system with errors, "
46622 "running fsck is recommended\n");
46623 +
46624 + /* Apparently minix can create filesystems that allocate more blocks for
46625 + * the bitmaps than needed. We simply ignore that, but verify it didn't
46626 + * create one with not enough blocks and bail out if so.
46627 + */
46628 + block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize);
46629 + if (sbi->s_imap_blocks < block) {
46630 + printk("MINIX-fs: file system does not have enough "
46631 + "imap blocks allocated. Refusing to mount\n");
46632 + goto out_iput;
46633 + }
46634 +
46635 + block = minix_blocks_needed(
46636 + (sbi->s_nzones - (sbi->s_firstdatazone + 1)),
46637 + s->s_blocksize);
46638 + if (sbi->s_zmap_blocks < block) {
46639 + printk("MINIX-fs: file system does not have enough "
46640 + "zmap blocks allocated. Refusing to mount.\n");
46641 + goto out_iput;
46642 + }
46643 +
46644 return 0;
46645
46646 out_iput:
46647 @@ -339,10 +360,10 @@ static int minix_statfs(struct dentry *dentry, struct kstatfs *buf)
46648 buf->f_type = sb->s_magic;
46649 buf->f_bsize = sb->s_blocksize;
46650 buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size;
46651 - buf->f_bfree = minix_count_free_blocks(sbi);
46652 + buf->f_bfree = minix_count_free_blocks(sb);
46653 buf->f_bavail = buf->f_bfree;
46654 buf->f_files = sbi->s_ninodes;
46655 - buf->f_ffree = minix_count_free_inodes(sbi);
46656 + buf->f_ffree = minix_count_free_inodes(sb);
46657 buf->f_namelen = sbi->s_namelen;
46658 buf->f_fsid.val[0] = (u32)id;
46659 buf->f_fsid.val[1] = (u32)(id >> 32);
46660 diff --git a/fs/minix/minix.h b/fs/minix/minix.h
46661 index 341e212..6415fe0 100644
46662 --- a/fs/minix/minix.h
46663 +++ b/fs/minix/minix.h
46664 @@ -48,10 +48,10 @@ extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, stru
46665 extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
46666 extern struct inode * minix_new_inode(const struct inode *, int, int *);
46667 extern void minix_free_inode(struct inode * inode);
46668 -extern unsigned long minix_count_free_inodes(struct minix_sb_info *sbi);
46669 +extern unsigned long minix_count_free_inodes(struct super_block *sb);
46670 extern int minix_new_block(struct inode * inode);
46671 extern void minix_free_block(struct inode *inode, unsigned long block);
46672 -extern unsigned long minix_count_free_blocks(struct minix_sb_info *sbi);
46673 +extern unsigned long minix_count_free_blocks(struct super_block *sb);
46674 extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *);
46675 extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len);
46676
46677 @@ -88,6 +88,11 @@ static inline struct minix_inode_info *minix_i(struct inode *inode)
46678 return list_entry(inode, struct minix_inode_info, vfs_inode);
46679 }
46680
46681 +static inline unsigned minix_blocks_needed(unsigned bits, unsigned blocksize)
46682 +{
46683 + return DIV_ROUND_UP(bits, blocksize * 8);
46684 +}
46685 +
46686 #if defined(CONFIG_MINIX_FS_NATIVE_ENDIAN) && \
46687 defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED)
46688
46689 diff --git a/fs/namei.c b/fs/namei.c
46690 index 3d15072..c1ddf9c 100644
46691 --- a/fs/namei.c
46692 +++ b/fs/namei.c
46693 @@ -281,16 +281,32 @@ int generic_permission(struct inode *inode, int mask)
46694 if (ret != -EACCES)
46695 return ret;
46696
46697 +#ifdef CONFIG_GRKERNSEC
46698 + /* we'll block if we have to log due to a denied capability use */
46699 + if (mask & MAY_NOT_BLOCK)
46700 + return -ECHILD;
46701 +#endif
46702 +
46703 if (S_ISDIR(inode->i_mode)) {
46704 /* DACs are overridable for directories */
46705 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46706 - return 0;
46707 if (!(mask & MAY_WRITE))
46708 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46709 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46710 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46711 return 0;
46712 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46713 + return 0;
46714 return -EACCES;
46715 }
46716 /*
46717 + * Searching includes executable on directories, else just read.
46718 + */
46719 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46720 + if (mask == MAY_READ)
46721 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46722 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46723 + return 0;
46724 +
46725 + /*
46726 * Read/write DACs are always overridable.
46727 * Executable DACs are overridable when there is
46728 * at least one exec bit set.
46729 @@ -299,14 +315,6 @@ int generic_permission(struct inode *inode, int mask)
46730 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46731 return 0;
46732
46733 - /*
46734 - * Searching includes executable on directories, else just read.
46735 - */
46736 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46737 - if (mask == MAY_READ)
46738 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46739 - return 0;
46740 -
46741 return -EACCES;
46742 }
46743
46744 @@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
46745 return error;
46746 }
46747
46748 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
46749 + dentry->d_inode, dentry, nd->path.mnt)) {
46750 + error = -EACCES;
46751 + *p = ERR_PTR(error); /* no ->put_link(), please */
46752 + path_put(&nd->path);
46753 + return error;
46754 + }
46755 +
46756 nd->last_type = LAST_BIND;
46757 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
46758 error = PTR_ERR(*p);
46759 if (!IS_ERR(*p)) {
46760 - char *s = nd_get_link(nd);
46761 + const char *s = nd_get_link(nd);
46762 error = 0;
46763 if (s)
46764 error = __vfs_follow_link(nd, s);
46765 @@ -1622,6 +1638,21 @@ static int path_lookupat(int dfd, const char *name,
46766 if (!err)
46767 err = complete_walk(nd);
46768
46769 + if (!(nd->flags & LOOKUP_PARENT)) {
46770 +#ifdef CONFIG_GRKERNSEC
46771 + if (flags & LOOKUP_RCU) {
46772 + if (!err)
46773 + path_put(&nd->path);
46774 + err = -ECHILD;
46775 + } else
46776 +#endif
46777 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46778 + if (!err)
46779 + path_put(&nd->path);
46780 + err = -ENOENT;
46781 + }
46782 + }
46783 +
46784 if (!err && nd->flags & LOOKUP_DIRECTORY) {
46785 if (!nd->inode->i_op->lookup) {
46786 path_put(&nd->path);
46787 @@ -1649,6 +1680,15 @@ static int do_path_lookup(int dfd, const char *name,
46788 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
46789
46790 if (likely(!retval)) {
46791 + if (*name != '/' && nd->path.dentry && nd->inode) {
46792 +#ifdef CONFIG_GRKERNSEC
46793 + if (flags & LOOKUP_RCU)
46794 + return -ECHILD;
46795 +#endif
46796 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
46797 + return -ENOENT;
46798 + }
46799 +
46800 if (unlikely(!audit_dummy_context())) {
46801 if (nd->path.dentry && nd->inode)
46802 audit_inode(name, nd->path.dentry);
46803 @@ -2049,7 +2089,27 @@ static int may_open(struct path *path, int acc_mode, int flag)
46804 /*
46805 * Ensure there are no outstanding leases on the file.
46806 */
46807 - return break_lease(inode, flag);
46808 + error = break_lease(inode, flag);
46809 +
46810 + if (error)
46811 + return error;
46812 +
46813 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
46814 + error = -EPERM;
46815 + goto exit;
46816 + }
46817 +
46818 + if (gr_handle_rawio(inode)) {
46819 + error = -EPERM;
46820 + goto exit;
46821 + }
46822 +
46823 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
46824 + error = -EACCES;
46825 + goto exit;
46826 + }
46827 +exit:
46828 + return error;
46829 }
46830
46831 static int handle_truncate(struct file *filp)
46832 @@ -2110,6 +2170,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46833 error = complete_walk(nd);
46834 if (error)
46835 return ERR_PTR(error);
46836 +#ifdef CONFIG_GRKERNSEC
46837 + if (nd->flags & LOOKUP_RCU) {
46838 + error = -ECHILD;
46839 + goto exit;
46840 + }
46841 +#endif
46842 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46843 + error = -ENOENT;
46844 + goto exit;
46845 + }
46846 audit_inode(pathname, nd->path.dentry);
46847 if (open_flag & O_CREAT) {
46848 error = -EISDIR;
46849 @@ -2120,6 +2190,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46850 error = complete_walk(nd);
46851 if (error)
46852 return ERR_PTR(error);
46853 +#ifdef CONFIG_GRKERNSEC
46854 + if (nd->flags & LOOKUP_RCU) {
46855 + error = -ECHILD;
46856 + goto exit;
46857 + }
46858 +#endif
46859 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
46860 + error = -ENOENT;
46861 + goto exit;
46862 + }
46863 audit_inode(pathname, dir);
46864 goto ok;
46865 }
46866 @@ -2141,6 +2221,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46867 error = complete_walk(nd);
46868 if (error)
46869 return ERR_PTR(-ECHILD);
46870 +#ifdef CONFIG_GRKERNSEC
46871 + if (nd->flags & LOOKUP_RCU) {
46872 + error = -ECHILD;
46873 + goto exit;
46874 + }
46875 +#endif
46876 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46877 + error = -ENOENT;
46878 + goto exit;
46879 + }
46880
46881 error = -ENOTDIR;
46882 if (nd->flags & LOOKUP_DIRECTORY) {
46883 @@ -2181,6 +2271,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46884 /* Negative dentry, just create the file */
46885 if (!dentry->d_inode) {
46886 int mode = op->mode;
46887 +
46888 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
46889 + error = -EACCES;
46890 + goto exit_mutex_unlock;
46891 + }
46892 +
46893 if (!IS_POSIXACL(dir->d_inode))
46894 mode &= ~current_umask();
46895 /*
46896 @@ -2204,6 +2300,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46897 error = vfs_create(dir->d_inode, dentry, mode, nd);
46898 if (error)
46899 goto exit_mutex_unlock;
46900 + else
46901 + gr_handle_create(path->dentry, path->mnt);
46902 mutex_unlock(&dir->d_inode->i_mutex);
46903 dput(nd->path.dentry);
46904 nd->path.dentry = dentry;
46905 @@ -2213,6 +2311,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46906 /*
46907 * It already exists.
46908 */
46909 +
46910 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
46911 + error = -ENOENT;
46912 + goto exit_mutex_unlock;
46913 + }
46914 +
46915 + /* only check if O_CREAT is specified, all other checks need to go
46916 + into may_open */
46917 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
46918 + error = -EACCES;
46919 + goto exit_mutex_unlock;
46920 + }
46921 +
46922 mutex_unlock(&dir->d_inode->i_mutex);
46923 audit_inode(pathname, path->dentry);
46924
46925 @@ -2425,6 +2536,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
46926 *path = nd.path;
46927 return dentry;
46928 eexist:
46929 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
46930 + dput(dentry);
46931 + dentry = ERR_PTR(-ENOENT);
46932 + goto fail;
46933 + }
46934 dput(dentry);
46935 dentry = ERR_PTR(-EEXIST);
46936 fail:
46937 @@ -2447,6 +2563,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
46938 }
46939 EXPORT_SYMBOL(user_path_create);
46940
46941 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
46942 +{
46943 + char *tmp = getname(pathname);
46944 + struct dentry *res;
46945 + if (IS_ERR(tmp))
46946 + return ERR_CAST(tmp);
46947 + res = kern_path_create(dfd, tmp, path, is_dir);
46948 + if (IS_ERR(res))
46949 + putname(tmp);
46950 + else
46951 + *to = tmp;
46952 + return res;
46953 +}
46954 +
46955 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
46956 {
46957 int error = may_create(dir, dentry);
46958 @@ -2514,6 +2644,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
46959 error = mnt_want_write(path.mnt);
46960 if (error)
46961 goto out_dput;
46962 +
46963 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
46964 + error = -EPERM;
46965 + goto out_drop_write;
46966 + }
46967 +
46968 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
46969 + error = -EACCES;
46970 + goto out_drop_write;
46971 + }
46972 +
46973 error = security_path_mknod(&path, dentry, mode, dev);
46974 if (error)
46975 goto out_drop_write;
46976 @@ -2531,6 +2672,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
46977 }
46978 out_drop_write:
46979 mnt_drop_write(path.mnt);
46980 +
46981 + if (!error)
46982 + gr_handle_create(dentry, path.mnt);
46983 out_dput:
46984 dput(dentry);
46985 mutex_unlock(&path.dentry->d_inode->i_mutex);
46986 @@ -2580,12 +2724,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
46987 error = mnt_want_write(path.mnt);
46988 if (error)
46989 goto out_dput;
46990 +
46991 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
46992 + error = -EACCES;
46993 + goto out_drop_write;
46994 + }
46995 +
46996 error = security_path_mkdir(&path, dentry, mode);
46997 if (error)
46998 goto out_drop_write;
46999 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
47000 out_drop_write:
47001 mnt_drop_write(path.mnt);
47002 +
47003 + if (!error)
47004 + gr_handle_create(dentry, path.mnt);
47005 out_dput:
47006 dput(dentry);
47007 mutex_unlock(&path.dentry->d_inode->i_mutex);
47008 @@ -2665,6 +2818,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
47009 char * name;
47010 struct dentry *dentry;
47011 struct nameidata nd;
47012 + ino_t saved_ino = 0;
47013 + dev_t saved_dev = 0;
47014
47015 error = user_path_parent(dfd, pathname, &nd, &name);
47016 if (error)
47017 @@ -2693,6 +2848,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
47018 error = -ENOENT;
47019 goto exit3;
47020 }
47021 +
47022 + saved_ino = dentry->d_inode->i_ino;
47023 + saved_dev = gr_get_dev_from_dentry(dentry);
47024 +
47025 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
47026 + error = -EACCES;
47027 + goto exit3;
47028 + }
47029 +
47030 error = mnt_want_write(nd.path.mnt);
47031 if (error)
47032 goto exit3;
47033 @@ -2700,6 +2864,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
47034 if (error)
47035 goto exit4;
47036 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
47037 + if (!error && (saved_dev || saved_ino))
47038 + gr_handle_delete(saved_ino, saved_dev);
47039 exit4:
47040 mnt_drop_write(nd.path.mnt);
47041 exit3:
47042 @@ -2762,6 +2928,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
47043 struct dentry *dentry;
47044 struct nameidata nd;
47045 struct inode *inode = NULL;
47046 + ino_t saved_ino = 0;
47047 + dev_t saved_dev = 0;
47048
47049 error = user_path_parent(dfd, pathname, &nd, &name);
47050 if (error)
47051 @@ -2784,6 +2952,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
47052 if (!inode)
47053 goto slashes;
47054 ihold(inode);
47055 +
47056 + if (inode->i_nlink <= 1) {
47057 + saved_ino = inode->i_ino;
47058 + saved_dev = gr_get_dev_from_dentry(dentry);
47059 + }
47060 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
47061 + error = -EACCES;
47062 + goto exit2;
47063 + }
47064 +
47065 error = mnt_want_write(nd.path.mnt);
47066 if (error)
47067 goto exit2;
47068 @@ -2791,6 +2969,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
47069 if (error)
47070 goto exit3;
47071 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
47072 + if (!error && (saved_ino || saved_dev))
47073 + gr_handle_delete(saved_ino, saved_dev);
47074 exit3:
47075 mnt_drop_write(nd.path.mnt);
47076 exit2:
47077 @@ -2866,10 +3046,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
47078 error = mnt_want_write(path.mnt);
47079 if (error)
47080 goto out_dput;
47081 +
47082 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
47083 + error = -EACCES;
47084 + goto out_drop_write;
47085 + }
47086 +
47087 error = security_path_symlink(&path, dentry, from);
47088 if (error)
47089 goto out_drop_write;
47090 error = vfs_symlink(path.dentry->d_inode, dentry, from);
47091 + if (!error)
47092 + gr_handle_create(dentry, path.mnt);
47093 out_drop_write:
47094 mnt_drop_write(path.mnt);
47095 out_dput:
47096 @@ -2941,6 +3129,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47097 {
47098 struct dentry *new_dentry;
47099 struct path old_path, new_path;
47100 + char *to = NULL;
47101 int how = 0;
47102 int error;
47103
47104 @@ -2964,7 +3153,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47105 if (error)
47106 return error;
47107
47108 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
47109 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
47110 error = PTR_ERR(new_dentry);
47111 if (IS_ERR(new_dentry))
47112 goto out;
47113 @@ -2975,13 +3164,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47114 error = mnt_want_write(new_path.mnt);
47115 if (error)
47116 goto out_dput;
47117 +
47118 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
47119 + old_path.dentry->d_inode,
47120 + old_path.dentry->d_inode->i_mode, to)) {
47121 + error = -EACCES;
47122 + goto out_drop_write;
47123 + }
47124 +
47125 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
47126 + old_path.dentry, old_path.mnt, to)) {
47127 + error = -EACCES;
47128 + goto out_drop_write;
47129 + }
47130 +
47131 error = security_path_link(old_path.dentry, &new_path, new_dentry);
47132 if (error)
47133 goto out_drop_write;
47134 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
47135 + if (!error)
47136 + gr_handle_create(new_dentry, new_path.mnt);
47137 out_drop_write:
47138 mnt_drop_write(new_path.mnt);
47139 out_dput:
47140 + putname(to);
47141 dput(new_dentry);
47142 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
47143 path_put(&new_path);
47144 @@ -3153,6 +3359,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
47145 char *to;
47146 int error;
47147
47148 + pax_track_stack();
47149 +
47150 error = user_path_parent(olddfd, oldname, &oldnd, &from);
47151 if (error)
47152 goto exit;
47153 @@ -3209,6 +3417,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
47154 if (new_dentry == trap)
47155 goto exit5;
47156
47157 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
47158 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
47159 + to);
47160 + if (error)
47161 + goto exit5;
47162 +
47163 error = mnt_want_write(oldnd.path.mnt);
47164 if (error)
47165 goto exit5;
47166 @@ -3218,6 +3432,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
47167 goto exit6;
47168 error = vfs_rename(old_dir->d_inode, old_dentry,
47169 new_dir->d_inode, new_dentry);
47170 + if (!error)
47171 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
47172 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
47173 exit6:
47174 mnt_drop_write(oldnd.path.mnt);
47175 exit5:
47176 @@ -3243,6 +3460,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
47177
47178 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
47179 {
47180 + char tmpbuf[64];
47181 + const char *newlink;
47182 int len;
47183
47184 len = PTR_ERR(link);
47185 @@ -3252,7 +3471,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
47186 len = strlen(link);
47187 if (len > (unsigned) buflen)
47188 len = buflen;
47189 - if (copy_to_user(buffer, link, len))
47190 +
47191 + if (len < sizeof(tmpbuf)) {
47192 + memcpy(tmpbuf, link, len);
47193 + newlink = tmpbuf;
47194 + } else
47195 + newlink = link;
47196 +
47197 + if (copy_to_user(buffer, newlink, len))
47198 len = -EFAULT;
47199 out:
47200 return len;
47201 diff --git a/fs/namespace.c b/fs/namespace.c
47202 index 5e7f2e9..cd13685 100644
47203 --- a/fs/namespace.c
47204 +++ b/fs/namespace.c
47205 @@ -1326,6 +1326,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
47206 if (!(sb->s_flags & MS_RDONLY))
47207 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
47208 up_write(&sb->s_umount);
47209 +
47210 + gr_log_remount(mnt->mnt_devname, retval);
47211 +
47212 return retval;
47213 }
47214
47215 @@ -1345,6 +1348,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
47216 br_write_unlock(vfsmount_lock);
47217 up_write(&namespace_sem);
47218 release_mounts(&umount_list);
47219 +
47220 + gr_log_unmount(mnt->mnt_devname, retval);
47221 +
47222 return retval;
47223 }
47224
47225 @@ -2336,6 +2342,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47226 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
47227 MS_STRICTATIME);
47228
47229 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
47230 + retval = -EPERM;
47231 + goto dput_out;
47232 + }
47233 +
47234 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
47235 + retval = -EPERM;
47236 + goto dput_out;
47237 + }
47238 +
47239 if (flags & MS_REMOUNT)
47240 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
47241 data_page);
47242 @@ -2350,6 +2366,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47243 dev_name, data_page);
47244 dput_out:
47245 path_put(&path);
47246 +
47247 + gr_log_mount(dev_name, dir_name, retval);
47248 +
47249 return retval;
47250 }
47251
47252 @@ -2573,6 +2592,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
47253 if (error)
47254 goto out2;
47255
47256 + if (gr_handle_chroot_pivot()) {
47257 + error = -EPERM;
47258 + goto out2;
47259 + }
47260 +
47261 get_fs_root(current->fs, &root);
47262 error = lock_mount(&old);
47263 if (error)
47264 diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
47265 index 9c51f62..503b252 100644
47266 --- a/fs/ncpfs/dir.c
47267 +++ b/fs/ncpfs/dir.c
47268 @@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentry, struct nameidata *nd)
47269 int res, val = 0, len;
47270 __u8 __name[NCP_MAXPATHLEN + 1];
47271
47272 + pax_track_stack();
47273 +
47274 if (dentry == dentry->d_sb->s_root)
47275 return 1;
47276
47277 @@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
47278 int error, res, len;
47279 __u8 __name[NCP_MAXPATHLEN + 1];
47280
47281 + pax_track_stack();
47282 +
47283 error = -EIO;
47284 if (!ncp_conn_valid(server))
47285 goto finished;
47286 @@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
47287 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
47288 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
47289
47290 + pax_track_stack();
47291 +
47292 ncp_age_dentry(server, dentry);
47293 len = sizeof(__name);
47294 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
47295 @@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
47296 int error, len;
47297 __u8 __name[NCP_MAXPATHLEN + 1];
47298
47299 + pax_track_stack();
47300 +
47301 DPRINTK("ncp_mkdir: making %s/%s\n",
47302 dentry->d_parent->d_name.name, dentry->d_name.name);
47303
47304 @@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
47305 int old_len, new_len;
47306 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
47307
47308 + pax_track_stack();
47309 +
47310 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
47311 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
47312 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
47313 diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
47314 index 202f370..9d4565e 100644
47315 --- a/fs/ncpfs/inode.c
47316 +++ b/fs/ncpfs/inode.c
47317 @@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
47318 #endif
47319 struct ncp_entry_info finfo;
47320
47321 + pax_track_stack();
47322 +
47323 memset(&data, 0, sizeof(data));
47324 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
47325 if (!server)
47326 diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
47327 index 281ae95..dd895b9 100644
47328 --- a/fs/nfs/blocklayout/blocklayout.c
47329 +++ b/fs/nfs/blocklayout/blocklayout.c
47330 @@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
47331 */
47332 struct parallel_io {
47333 struct kref refcnt;
47334 - struct rpc_call_ops call_ops;
47335 + rpc_call_ops_no_const call_ops;
47336 void (*pnfs_callback) (void *data);
47337 void *data;
47338 };
47339 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
47340 index 679d2f5..ef1ffec 100644
47341 --- a/fs/nfs/inode.c
47342 +++ b/fs/nfs/inode.c
47343 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
47344 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
47345 nfsi->attrtimeo_timestamp = jiffies;
47346
47347 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
47348 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
47349 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
47350 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
47351 else
47352 @@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
47353 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
47354 }
47355
47356 -static atomic_long_t nfs_attr_generation_counter;
47357 +static atomic_long_unchecked_t nfs_attr_generation_counter;
47358
47359 static unsigned long nfs_read_attr_generation_counter(void)
47360 {
47361 - return atomic_long_read(&nfs_attr_generation_counter);
47362 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
47363 }
47364
47365 unsigned long nfs_inc_attr_generation_counter(void)
47366 {
47367 - return atomic_long_inc_return(&nfs_attr_generation_counter);
47368 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
47369 }
47370
47371 void nfs_fattr_init(struct nfs_fattr *fattr)
47372 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
47373 index 6f8bcc7..8f823c5 100644
47374 --- a/fs/nfsd/nfs4state.c
47375 +++ b/fs/nfsd/nfs4state.c
47376 @@ -3999,6 +3999,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
47377 unsigned int strhashval;
47378 int err;
47379
47380 + pax_track_stack();
47381 +
47382 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
47383 (long long) lock->lk_offset,
47384 (long long) lock->lk_length);
47385 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
47386 index f810996..cec8977 100644
47387 --- a/fs/nfsd/nfs4xdr.c
47388 +++ b/fs/nfsd/nfs4xdr.c
47389 @@ -1875,6 +1875,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
47390 .dentry = dentry,
47391 };
47392
47393 + pax_track_stack();
47394 +
47395 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
47396 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
47397 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
47398 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
47399 index acf88ae..4fd6245 100644
47400 --- a/fs/nfsd/vfs.c
47401 +++ b/fs/nfsd/vfs.c
47402 @@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47403 } else {
47404 oldfs = get_fs();
47405 set_fs(KERNEL_DS);
47406 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
47407 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
47408 set_fs(oldfs);
47409 }
47410
47411 @@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47412
47413 /* Write the data. */
47414 oldfs = get_fs(); set_fs(KERNEL_DS);
47415 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
47416 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
47417 set_fs(oldfs);
47418 if (host_err < 0)
47419 goto out_nfserr;
47420 @@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
47421 */
47422
47423 oldfs = get_fs(); set_fs(KERNEL_DS);
47424 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
47425 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
47426 set_fs(oldfs);
47427
47428 if (host_err < 0)
47429 diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
47430 index 3e65427..ac258be 100644
47431 --- a/fs/nilfs2/ioctl.c
47432 +++ b/fs/nilfs2/ioctl.c
47433 @@ -625,6 +625,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
47434 if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
47435 goto out_free;
47436
47437 + if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
47438 + goto out_free;
47439 +
47440 len = argv[n].v_size * argv[n].v_nmembs;
47441 base = (void __user *)(unsigned long)argv[n].v_base;
47442 if (len == 0) {
47443 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
47444 index 9fde1c0..14e8827 100644
47445 --- a/fs/notify/fanotify/fanotify_user.c
47446 +++ b/fs/notify/fanotify/fanotify_user.c
47447 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
47448 goto out_close_fd;
47449
47450 ret = -EFAULT;
47451 - if (copy_to_user(buf, &fanotify_event_metadata,
47452 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
47453 + copy_to_user(buf, &fanotify_event_metadata,
47454 fanotify_event_metadata.event_len))
47455 goto out_kill_access_response;
47456
47457 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
47458 index ee18815..7aa5d01 100644
47459 --- a/fs/notify/notification.c
47460 +++ b/fs/notify/notification.c
47461 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
47462 * get set to 0 so it will never get 'freed'
47463 */
47464 static struct fsnotify_event *q_overflow_event;
47465 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47466 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47467
47468 /**
47469 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
47470 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47471 */
47472 u32 fsnotify_get_cookie(void)
47473 {
47474 - return atomic_inc_return(&fsnotify_sync_cookie);
47475 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
47476 }
47477 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
47478
47479 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
47480 index 99e3610..02c1068 100644
47481 --- a/fs/ntfs/dir.c
47482 +++ b/fs/ntfs/dir.c
47483 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
47484 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
47485 ~(s64)(ndir->itype.index.block_size - 1)));
47486 /* Bounds checks. */
47487 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47488 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47489 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
47490 "inode 0x%lx or driver bug.", vdir->i_ino);
47491 goto err_out;
47492 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
47493 index c587e2d..3641eaa 100644
47494 --- a/fs/ntfs/file.c
47495 +++ b/fs/ntfs/file.c
47496 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
47497 #endif /* NTFS_RW */
47498 };
47499
47500 -const struct file_operations ntfs_empty_file_ops = {};
47501 +const struct file_operations ntfs_empty_file_ops __read_only;
47502
47503 -const struct inode_operations ntfs_empty_inode_ops = {};
47504 +const struct inode_operations ntfs_empty_inode_ops __read_only;
47505 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
47506 index 210c352..a174f83 100644
47507 --- a/fs/ocfs2/localalloc.c
47508 +++ b/fs/ocfs2/localalloc.c
47509 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
47510 goto bail;
47511 }
47512
47513 - atomic_inc(&osb->alloc_stats.moves);
47514 + atomic_inc_unchecked(&osb->alloc_stats.moves);
47515
47516 bail:
47517 if (handle)
47518 diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
47519 index 53aa41e..d7df9f1 100644
47520 --- a/fs/ocfs2/namei.c
47521 +++ b/fs/ocfs2/namei.c
47522 @@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *old_dir,
47523 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
47524 struct ocfs2_dir_lookup_result target_insert = { NULL, };
47525
47526 + pax_track_stack();
47527 +
47528 /* At some point it might be nice to break this function up a
47529 * bit. */
47530
47531 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
47532 index 4092858..51c70ff 100644
47533 --- a/fs/ocfs2/ocfs2.h
47534 +++ b/fs/ocfs2/ocfs2.h
47535 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
47536
47537 struct ocfs2_alloc_stats
47538 {
47539 - atomic_t moves;
47540 - atomic_t local_data;
47541 - atomic_t bitmap_data;
47542 - atomic_t bg_allocs;
47543 - atomic_t bg_extends;
47544 + atomic_unchecked_t moves;
47545 + atomic_unchecked_t local_data;
47546 + atomic_unchecked_t bitmap_data;
47547 + atomic_unchecked_t bg_allocs;
47548 + atomic_unchecked_t bg_extends;
47549 };
47550
47551 enum ocfs2_local_alloc_state
47552 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
47553 index ba5d97e..c77db25 100644
47554 --- a/fs/ocfs2/suballoc.c
47555 +++ b/fs/ocfs2/suballoc.c
47556 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
47557 mlog_errno(status);
47558 goto bail;
47559 }
47560 - atomic_inc(&osb->alloc_stats.bg_extends);
47561 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
47562
47563 /* You should never ask for this much metadata */
47564 BUG_ON(bits_wanted >
47565 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
47566 mlog_errno(status);
47567 goto bail;
47568 }
47569 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47570 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47571
47572 *suballoc_loc = res.sr_bg_blkno;
47573 *suballoc_bit_start = res.sr_bit_offset;
47574 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
47575 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
47576 res->sr_bits);
47577
47578 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47579 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47580
47581 BUG_ON(res->sr_bits != 1);
47582
47583 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
47584 mlog_errno(status);
47585 goto bail;
47586 }
47587 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47588 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47589
47590 BUG_ON(res.sr_bits != 1);
47591
47592 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47593 cluster_start,
47594 num_clusters);
47595 if (!status)
47596 - atomic_inc(&osb->alloc_stats.local_data);
47597 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
47598 } else {
47599 if (min_clusters > (osb->bitmap_cpg - 1)) {
47600 /* The only paths asking for contiguousness
47601 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47602 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
47603 res.sr_bg_blkno,
47604 res.sr_bit_offset);
47605 - atomic_inc(&osb->alloc_stats.bitmap_data);
47606 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
47607 *num_clusters = res.sr_bits;
47608 }
47609 }
47610 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
47611 index 56f6102..1433c29 100644
47612 --- a/fs/ocfs2/super.c
47613 +++ b/fs/ocfs2/super.c
47614 @@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
47615 "%10s => GlobalAllocs: %d LocalAllocs: %d "
47616 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
47617 "Stats",
47618 - atomic_read(&osb->alloc_stats.bitmap_data),
47619 - atomic_read(&osb->alloc_stats.local_data),
47620 - atomic_read(&osb->alloc_stats.bg_allocs),
47621 - atomic_read(&osb->alloc_stats.moves),
47622 - atomic_read(&osb->alloc_stats.bg_extends));
47623 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
47624 + atomic_read_unchecked(&osb->alloc_stats.local_data),
47625 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
47626 + atomic_read_unchecked(&osb->alloc_stats.moves),
47627 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
47628
47629 out += snprintf(buf + out, len - out,
47630 "%10s => State: %u Descriptor: %llu Size: %u bits "
47631 @@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
47632 spin_lock_init(&osb->osb_xattr_lock);
47633 ocfs2_init_steal_slots(osb);
47634
47635 - atomic_set(&osb->alloc_stats.moves, 0);
47636 - atomic_set(&osb->alloc_stats.local_data, 0);
47637 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
47638 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
47639 - atomic_set(&osb->alloc_stats.bg_extends, 0);
47640 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
47641 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
47642 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
47643 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
47644 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
47645
47646 /* Copy the blockcheck stats from the superblock probe */
47647 osb->osb_ecc_stats = *stats;
47648 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
47649 index 5d22872..523db20 100644
47650 --- a/fs/ocfs2/symlink.c
47651 +++ b/fs/ocfs2/symlink.c
47652 @@ -142,7 +142,7 @@ bail:
47653
47654 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47655 {
47656 - char *link = nd_get_link(nd);
47657 + const char *link = nd_get_link(nd);
47658 if (!IS_ERR(link))
47659 kfree(link);
47660 }
47661 diff --git a/fs/open.c b/fs/open.c
47662 index f711921..28d5958 100644
47663 --- a/fs/open.c
47664 +++ b/fs/open.c
47665 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
47666 error = locks_verify_truncate(inode, NULL, length);
47667 if (!error)
47668 error = security_path_truncate(&path);
47669 +
47670 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
47671 + error = -EACCES;
47672 +
47673 if (!error)
47674 error = do_truncate(path.dentry, length, 0, NULL);
47675
47676 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
47677 if (__mnt_is_readonly(path.mnt))
47678 res = -EROFS;
47679
47680 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
47681 + res = -EACCES;
47682 +
47683 out_path_release:
47684 path_put(&path);
47685 out:
47686 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
47687 if (error)
47688 goto dput_and_out;
47689
47690 + gr_log_chdir(path.dentry, path.mnt);
47691 +
47692 set_fs_pwd(current->fs, &path);
47693
47694 dput_and_out:
47695 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
47696 goto out_putf;
47697
47698 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
47699 +
47700 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
47701 + error = -EPERM;
47702 +
47703 + if (!error)
47704 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
47705 +
47706 if (!error)
47707 set_fs_pwd(current->fs, &file->f_path);
47708 out_putf:
47709 @@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
47710 if (error)
47711 goto dput_and_out;
47712
47713 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
47714 + goto dput_and_out;
47715 +
47716 set_fs_root(current->fs, &path);
47717 +
47718 + gr_handle_chroot_chdir(&path);
47719 +
47720 error = 0;
47721 dput_and_out:
47722 path_put(&path);
47723 @@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
47724 if (error)
47725 return error;
47726 mutex_lock(&inode->i_mutex);
47727 +
47728 + if (!gr_acl_handle_fchmod(path->dentry, path->mnt, mode)) {
47729 + error = -EACCES;
47730 + goto out_unlock;
47731 + }
47732 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
47733 + error = -EACCES;
47734 + goto out_unlock;
47735 + }
47736 +
47737 error = security_path_chmod(path->dentry, path->mnt, mode);
47738 if (error)
47739 goto out_unlock;
47740 @@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
47741 int error;
47742 struct iattr newattrs;
47743
47744 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
47745 + return -EACCES;
47746 +
47747 newattrs.ia_valid = ATTR_CTIME;
47748 if (user != (uid_t) -1) {
47749 newattrs.ia_valid |= ATTR_UID;
47750 diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
47751 index 6296b40..417c00f 100644
47752 --- a/fs/partitions/efi.c
47753 +++ b/fs/partitions/efi.c
47754 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
47755 if (!gpt)
47756 return NULL;
47757
47758 + if (!le32_to_cpu(gpt->num_partition_entries))
47759 + return NULL;
47760 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
47761 + if (!pte)
47762 + return NULL;
47763 +
47764 count = le32_to_cpu(gpt->num_partition_entries) *
47765 le32_to_cpu(gpt->sizeof_partition_entry);
47766 - if (!count)
47767 - return NULL;
47768 - pte = kzalloc(count, GFP_KERNEL);
47769 - if (!pte)
47770 - return NULL;
47771 -
47772 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
47773 (u8 *) pte,
47774 count) < count) {
47775 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
47776 index af9fdf0..75b15c3 100644
47777 --- a/fs/partitions/ldm.c
47778 +++ b/fs/partitions/ldm.c
47779 @@ -1322,7 +1322,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
47780 goto found;
47781 }
47782
47783 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
47784 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
47785 if (!f) {
47786 ldm_crit ("Out of memory.");
47787 return false;
47788 diff --git a/fs/pipe.c b/fs/pipe.c
47789 index 0e0be1d..f62a72d 100644
47790 --- a/fs/pipe.c
47791 +++ b/fs/pipe.c
47792 @@ -420,9 +420,9 @@ redo:
47793 }
47794 if (bufs) /* More to do? */
47795 continue;
47796 - if (!pipe->writers)
47797 + if (!atomic_read(&pipe->writers))
47798 break;
47799 - if (!pipe->waiting_writers) {
47800 + if (!atomic_read(&pipe->waiting_writers)) {
47801 /* syscall merging: Usually we must not sleep
47802 * if O_NONBLOCK is set, or if we got some data.
47803 * But if a writer sleeps in kernel space, then
47804 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
47805 mutex_lock(&inode->i_mutex);
47806 pipe = inode->i_pipe;
47807
47808 - if (!pipe->readers) {
47809 + if (!atomic_read(&pipe->readers)) {
47810 send_sig(SIGPIPE, current, 0);
47811 ret = -EPIPE;
47812 goto out;
47813 @@ -530,7 +530,7 @@ redo1:
47814 for (;;) {
47815 int bufs;
47816
47817 - if (!pipe->readers) {
47818 + if (!atomic_read(&pipe->readers)) {
47819 send_sig(SIGPIPE, current, 0);
47820 if (!ret)
47821 ret = -EPIPE;
47822 @@ -616,9 +616,9 @@ redo2:
47823 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
47824 do_wakeup = 0;
47825 }
47826 - pipe->waiting_writers++;
47827 + atomic_inc(&pipe->waiting_writers);
47828 pipe_wait(pipe);
47829 - pipe->waiting_writers--;
47830 + atomic_dec(&pipe->waiting_writers);
47831 }
47832 out:
47833 mutex_unlock(&inode->i_mutex);
47834 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47835 mask = 0;
47836 if (filp->f_mode & FMODE_READ) {
47837 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
47838 - if (!pipe->writers && filp->f_version != pipe->w_counter)
47839 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
47840 mask |= POLLHUP;
47841 }
47842
47843 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47844 * Most Unices do not set POLLERR for FIFOs but on Linux they
47845 * behave exactly like pipes for poll().
47846 */
47847 - if (!pipe->readers)
47848 + if (!atomic_read(&pipe->readers))
47849 mask |= POLLERR;
47850 }
47851
47852 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
47853
47854 mutex_lock(&inode->i_mutex);
47855 pipe = inode->i_pipe;
47856 - pipe->readers -= decr;
47857 - pipe->writers -= decw;
47858 + atomic_sub(decr, &pipe->readers);
47859 + atomic_sub(decw, &pipe->writers);
47860
47861 - if (!pipe->readers && !pipe->writers) {
47862 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
47863 free_pipe_info(inode);
47864 } else {
47865 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
47866 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
47867
47868 if (inode->i_pipe) {
47869 ret = 0;
47870 - inode->i_pipe->readers++;
47871 + atomic_inc(&inode->i_pipe->readers);
47872 }
47873
47874 mutex_unlock(&inode->i_mutex);
47875 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
47876
47877 if (inode->i_pipe) {
47878 ret = 0;
47879 - inode->i_pipe->writers++;
47880 + atomic_inc(&inode->i_pipe->writers);
47881 }
47882
47883 mutex_unlock(&inode->i_mutex);
47884 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
47885 if (inode->i_pipe) {
47886 ret = 0;
47887 if (filp->f_mode & FMODE_READ)
47888 - inode->i_pipe->readers++;
47889 + atomic_inc(&inode->i_pipe->readers);
47890 if (filp->f_mode & FMODE_WRITE)
47891 - inode->i_pipe->writers++;
47892 + atomic_inc(&inode->i_pipe->writers);
47893 }
47894
47895 mutex_unlock(&inode->i_mutex);
47896 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
47897 inode->i_pipe = NULL;
47898 }
47899
47900 -static struct vfsmount *pipe_mnt __read_mostly;
47901 +struct vfsmount *pipe_mnt __read_mostly;
47902
47903 /*
47904 * pipefs_dname() is called from d_path().
47905 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
47906 goto fail_iput;
47907 inode->i_pipe = pipe;
47908
47909 - pipe->readers = pipe->writers = 1;
47910 + atomic_set(&pipe->readers, 1);
47911 + atomic_set(&pipe->writers, 1);
47912 inode->i_fop = &rdwr_pipefifo_fops;
47913
47914 /*
47915 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
47916 index 15af622..0e9f4467 100644
47917 --- a/fs/proc/Kconfig
47918 +++ b/fs/proc/Kconfig
47919 @@ -30,12 +30,12 @@ config PROC_FS
47920
47921 config PROC_KCORE
47922 bool "/proc/kcore support" if !ARM
47923 - depends on PROC_FS && MMU
47924 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
47925
47926 config PROC_VMCORE
47927 bool "/proc/vmcore support"
47928 - depends on PROC_FS && CRASH_DUMP
47929 - default y
47930 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
47931 + default n
47932 help
47933 Exports the dump image of crashed kernel in ELF format.
47934
47935 @@ -59,8 +59,8 @@ config PROC_SYSCTL
47936 limited in memory.
47937
47938 config PROC_PAGE_MONITOR
47939 - default y
47940 - depends on PROC_FS && MMU
47941 + default n
47942 + depends on PROC_FS && MMU && !GRKERNSEC
47943 bool "Enable /proc page monitoring" if EXPERT
47944 help
47945 Various /proc files exist to monitor process memory utilization:
47946 diff --git a/fs/proc/array.c b/fs/proc/array.c
47947 index 3a1dafd..c7fed72 100644
47948 --- a/fs/proc/array.c
47949 +++ b/fs/proc/array.c
47950 @@ -60,6 +60,7 @@
47951 #include <linux/tty.h>
47952 #include <linux/string.h>
47953 #include <linux/mman.h>
47954 +#include <linux/grsecurity.h>
47955 #include <linux/proc_fs.h>
47956 #include <linux/ioport.h>
47957 #include <linux/uaccess.h>
47958 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
47959 seq_putc(m, '\n');
47960 }
47961
47962 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47963 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
47964 +{
47965 + if (p->mm)
47966 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
47967 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
47968 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
47969 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
47970 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
47971 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
47972 + else
47973 + seq_printf(m, "PaX:\t-----\n");
47974 +}
47975 +#endif
47976 +
47977 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47978 struct pid *pid, struct task_struct *task)
47979 {
47980 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47981 task_cpus_allowed(m, task);
47982 cpuset_task_status_allowed(m, task);
47983 task_context_switch_counts(m, task);
47984 +
47985 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47986 + task_pax(m, task);
47987 +#endif
47988 +
47989 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
47990 + task_grsec_rbac(m, task);
47991 +#endif
47992 +
47993 return 0;
47994 }
47995
47996 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47997 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47998 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47999 + _mm->pax_flags & MF_PAX_SEGMEXEC))
48000 +#endif
48001 +
48002 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48003 struct pid *pid, struct task_struct *task, int whole)
48004 {
48005 @@ -378,6 +409,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48006 char tcomm[sizeof(task->comm)];
48007 unsigned long flags;
48008
48009 + pax_track_stack();
48010 +
48011 state = *get_task_state(task);
48012 vsize = eip = esp = 0;
48013 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
48014 @@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48015 gtime = task->gtime;
48016 }
48017
48018 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48019 + if (PAX_RAND_FLAGS(mm)) {
48020 + eip = 0;
48021 + esp = 0;
48022 + wchan = 0;
48023 + }
48024 +#endif
48025 +#ifdef CONFIG_GRKERNSEC_HIDESYM
48026 + wchan = 0;
48027 + eip =0;
48028 + esp =0;
48029 +#endif
48030 +
48031 /* scale priority and nice values from timeslices to -20..20 */
48032 /* to make it look like a "normal" Unix priority/nice value */
48033 priority = task_prio(task);
48034 @@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48035 vsize,
48036 mm ? get_mm_rss(mm) : 0,
48037 rsslim,
48038 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48039 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
48040 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
48041 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
48042 +#else
48043 mm ? (permitted ? mm->start_code : 1) : 0,
48044 mm ? (permitted ? mm->end_code : 1) : 0,
48045 (permitted && mm) ? mm->start_stack : 0,
48046 +#endif
48047 esp,
48048 eip,
48049 /* The signal information here is obsolete.
48050 @@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48051
48052 return 0;
48053 }
48054 +
48055 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48056 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
48057 +{
48058 + u32 curr_ip = 0;
48059 + unsigned long flags;
48060 +
48061 + if (lock_task_sighand(task, &flags)) {
48062 + curr_ip = task->signal->curr_ip;
48063 + unlock_task_sighand(task, &flags);
48064 + }
48065 +
48066 + return sprintf(buffer, "%pI4\n", &curr_ip);
48067 +}
48068 +#endif
48069 diff --git a/fs/proc/base.c b/fs/proc/base.c
48070 index 5eb0206..fe01db4 100644
48071 --- a/fs/proc/base.c
48072 +++ b/fs/proc/base.c
48073 @@ -107,6 +107,22 @@ struct pid_entry {
48074 union proc_op op;
48075 };
48076
48077 +struct getdents_callback {
48078 + struct linux_dirent __user * current_dir;
48079 + struct linux_dirent __user * previous;
48080 + struct file * file;
48081 + int count;
48082 + int error;
48083 +};
48084 +
48085 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
48086 + loff_t offset, u64 ino, unsigned int d_type)
48087 +{
48088 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
48089 + buf->error = -EINVAL;
48090 + return 0;
48091 +}
48092 +
48093 #define NOD(NAME, MODE, IOP, FOP, OP) { \
48094 .name = (NAME), \
48095 .len = sizeof(NAME) - 1, \
48096 @@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_permission(struct task_struct *task)
48097 if (task == current)
48098 return mm;
48099
48100 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
48101 + return ERR_PTR(-EPERM);
48102 +
48103 /*
48104 * If current is actively ptrace'ing, and would also be
48105 * permitted to freshly attach with ptrace now, permit it.
48106 @@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
48107 if (!mm->arg_end)
48108 goto out_mm; /* Shh! No looking before we're done */
48109
48110 + if (gr_acl_handle_procpidmem(task))
48111 + goto out_mm;
48112 +
48113 len = mm->arg_end - mm->arg_start;
48114
48115 if (len > PAGE_SIZE)
48116 @@ -309,12 +331,28 @@ out:
48117 return res;
48118 }
48119
48120 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48121 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48122 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
48123 + _mm->pax_flags & MF_PAX_SEGMEXEC))
48124 +#endif
48125 +
48126 static int proc_pid_auxv(struct task_struct *task, char *buffer)
48127 {
48128 struct mm_struct *mm = mm_for_maps(task);
48129 int res = PTR_ERR(mm);
48130 if (mm && !IS_ERR(mm)) {
48131 unsigned int nwords = 0;
48132 +
48133 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48134 + /* allow if we're currently ptracing this task */
48135 + if (PAX_RAND_FLAGS(mm) &&
48136 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
48137 + mmput(mm);
48138 + return 0;
48139 + }
48140 +#endif
48141 +
48142 do {
48143 nwords += 2;
48144 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
48145 @@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
48146 }
48147
48148
48149 -#ifdef CONFIG_KALLSYMS
48150 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48151 /*
48152 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
48153 * Returns the resolved symbol. If that fails, simply return the address.
48154 @@ -367,7 +405,7 @@ static void unlock_trace(struct task_struct *task)
48155 mutex_unlock(&task->signal->cred_guard_mutex);
48156 }
48157
48158 -#ifdef CONFIG_STACKTRACE
48159 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48160
48161 #define MAX_STACK_TRACE_DEPTH 64
48162
48163 @@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
48164 return count;
48165 }
48166
48167 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48168 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48169 static int proc_pid_syscall(struct task_struct *task, char *buffer)
48170 {
48171 long nr;
48172 @@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
48173 /************************************************************************/
48174
48175 /* permission checks */
48176 -static int proc_fd_access_allowed(struct inode *inode)
48177 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
48178 {
48179 struct task_struct *task;
48180 int allowed = 0;
48181 @@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct inode *inode)
48182 */
48183 task = get_proc_task(inode);
48184 if (task) {
48185 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
48186 + if (log)
48187 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
48188 + else
48189 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
48190 put_task_struct(task);
48191 }
48192 return allowed;
48193 @@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
48194 if (!task)
48195 goto out_no_task;
48196
48197 + if (gr_acl_handle_procpidmem(task))
48198 + goto out;
48199 +
48200 ret = -ENOMEM;
48201 page = (char *)__get_free_page(GFP_TEMPORARY);
48202 if (!page)
48203 @@ -1613,7 +1657,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
48204 path_put(&nd->path);
48205
48206 /* Are we allowed to snoop on the tasks file descriptors? */
48207 - if (!proc_fd_access_allowed(inode))
48208 + if (!proc_fd_access_allowed(inode,0))
48209 goto out;
48210
48211 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
48212 @@ -1652,8 +1696,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
48213 struct path path;
48214
48215 /* Are we allowed to snoop on the tasks file descriptors? */
48216 - if (!proc_fd_access_allowed(inode))
48217 - goto out;
48218 + /* logging this is needed for learning on chromium to work properly,
48219 + but we don't want to flood the logs from 'ps' which does a readlink
48220 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
48221 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
48222 + */
48223 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
48224 + if (!proc_fd_access_allowed(inode,0))
48225 + goto out;
48226 + } else {
48227 + if (!proc_fd_access_allowed(inode,1))
48228 + goto out;
48229 + }
48230
48231 error = PROC_I(inode)->op.proc_get_link(inode, &path);
48232 if (error)
48233 @@ -1718,7 +1772,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
48234 rcu_read_lock();
48235 cred = __task_cred(task);
48236 inode->i_uid = cred->euid;
48237 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48238 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48239 +#else
48240 inode->i_gid = cred->egid;
48241 +#endif
48242 rcu_read_unlock();
48243 }
48244 security_task_to_inode(task, inode);
48245 @@ -1736,6 +1794,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
48246 struct inode *inode = dentry->d_inode;
48247 struct task_struct *task;
48248 const struct cred *cred;
48249 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48250 + const struct cred *tmpcred = current_cred();
48251 +#endif
48252
48253 generic_fillattr(inode, stat);
48254
48255 @@ -1743,13 +1804,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
48256 stat->uid = 0;
48257 stat->gid = 0;
48258 task = pid_task(proc_pid(inode), PIDTYPE_PID);
48259 +
48260 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
48261 + rcu_read_unlock();
48262 + return -ENOENT;
48263 + }
48264 +
48265 if (task) {
48266 + cred = __task_cred(task);
48267 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48268 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
48269 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48270 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
48271 +#endif
48272 + ) {
48273 +#endif
48274 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48275 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48276 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48277 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48278 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48279 +#endif
48280 task_dumpable(task)) {
48281 - cred = __task_cred(task);
48282 stat->uid = cred->euid;
48283 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48284 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
48285 +#else
48286 stat->gid = cred->egid;
48287 +#endif
48288 }
48289 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48290 + } else {
48291 + rcu_read_unlock();
48292 + return -ENOENT;
48293 + }
48294 +#endif
48295 }
48296 rcu_read_unlock();
48297 return 0;
48298 @@ -1786,11 +1875,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
48299
48300 if (task) {
48301 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48302 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48303 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48304 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48305 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48306 +#endif
48307 task_dumpable(task)) {
48308 rcu_read_lock();
48309 cred = __task_cred(task);
48310 inode->i_uid = cred->euid;
48311 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48312 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48313 +#else
48314 inode->i_gid = cred->egid;
48315 +#endif
48316 rcu_read_unlock();
48317 } else {
48318 inode->i_uid = 0;
48319 @@ -1908,7 +2006,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
48320 int fd = proc_fd(inode);
48321
48322 if (task) {
48323 - files = get_files_struct(task);
48324 + if (!gr_acl_handle_procpidmem(task))
48325 + files = get_files_struct(task);
48326 put_task_struct(task);
48327 }
48328 if (files) {
48329 @@ -2176,11 +2275,21 @@ static const struct file_operations proc_fd_operations = {
48330 */
48331 static int proc_fd_permission(struct inode *inode, int mask)
48332 {
48333 + struct task_struct *task;
48334 int rv = generic_permission(inode, mask);
48335 - if (rv == 0)
48336 - return 0;
48337 +
48338 if (task_pid(current) == proc_pid(inode))
48339 rv = 0;
48340 +
48341 + task = get_proc_task(inode);
48342 + if (task == NULL)
48343 + return rv;
48344 +
48345 + if (gr_acl_handle_procpidmem(task))
48346 + rv = -EACCES;
48347 +
48348 + put_task_struct(task);
48349 +
48350 return rv;
48351 }
48352
48353 @@ -2290,6 +2399,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
48354 if (!task)
48355 goto out_no_task;
48356
48357 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48358 + goto out;
48359 +
48360 /*
48361 * Yes, it does not scale. And it should not. Don't add
48362 * new entries into /proc/<tgid>/ without very good reasons.
48363 @@ -2334,6 +2446,9 @@ static int proc_pident_readdir(struct file *filp,
48364 if (!task)
48365 goto out_no_task;
48366
48367 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48368 + goto out;
48369 +
48370 ret = 0;
48371 i = filp->f_pos;
48372 switch (i) {
48373 @@ -2604,7 +2719,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
48374 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
48375 void *cookie)
48376 {
48377 - char *s = nd_get_link(nd);
48378 + const char *s = nd_get_link(nd);
48379 if (!IS_ERR(s))
48380 __putname(s);
48381 }
48382 @@ -2802,7 +2917,7 @@ static const struct pid_entry tgid_base_stuff[] = {
48383 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
48384 #endif
48385 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48386 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48387 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48388 INF("syscall", S_IRUGO, proc_pid_syscall),
48389 #endif
48390 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48391 @@ -2827,10 +2942,10 @@ static const struct pid_entry tgid_base_stuff[] = {
48392 #ifdef CONFIG_SECURITY
48393 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48394 #endif
48395 -#ifdef CONFIG_KALLSYMS
48396 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48397 INF("wchan", S_IRUGO, proc_pid_wchan),
48398 #endif
48399 -#ifdef CONFIG_STACKTRACE
48400 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48401 ONE("stack", S_IRUGO, proc_pid_stack),
48402 #endif
48403 #ifdef CONFIG_SCHEDSTATS
48404 @@ -2864,6 +2979,9 @@ static const struct pid_entry tgid_base_stuff[] = {
48405 #ifdef CONFIG_HARDWALL
48406 INF("hardwall", S_IRUGO, proc_pid_hardwall),
48407 #endif
48408 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48409 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
48410 +#endif
48411 };
48412
48413 static int proc_tgid_base_readdir(struct file * filp,
48414 @@ -2989,7 +3107,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
48415 if (!inode)
48416 goto out;
48417
48418 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48419 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
48420 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48421 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48422 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
48423 +#else
48424 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
48425 +#endif
48426 inode->i_op = &proc_tgid_base_inode_operations;
48427 inode->i_fop = &proc_tgid_base_operations;
48428 inode->i_flags|=S_IMMUTABLE;
48429 @@ -3031,7 +3156,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
48430 if (!task)
48431 goto out;
48432
48433 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48434 + goto out_put_task;
48435 +
48436 result = proc_pid_instantiate(dir, dentry, task, NULL);
48437 +out_put_task:
48438 put_task_struct(task);
48439 out:
48440 return result;
48441 @@ -3096,6 +3225,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
48442 {
48443 unsigned int nr;
48444 struct task_struct *reaper;
48445 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48446 + const struct cred *tmpcred = current_cred();
48447 + const struct cred *itercred;
48448 +#endif
48449 + filldir_t __filldir = filldir;
48450 struct tgid_iter iter;
48451 struct pid_namespace *ns;
48452
48453 @@ -3119,8 +3253,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
48454 for (iter = next_tgid(ns, iter);
48455 iter.task;
48456 iter.tgid += 1, iter = next_tgid(ns, iter)) {
48457 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48458 + rcu_read_lock();
48459 + itercred = __task_cred(iter.task);
48460 +#endif
48461 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
48462 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48463 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
48464 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48465 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
48466 +#endif
48467 + )
48468 +#endif
48469 + )
48470 + __filldir = &gr_fake_filldir;
48471 + else
48472 + __filldir = filldir;
48473 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48474 + rcu_read_unlock();
48475 +#endif
48476 filp->f_pos = iter.tgid + TGID_OFFSET;
48477 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
48478 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
48479 put_task_struct(iter.task);
48480 goto out;
48481 }
48482 @@ -3148,7 +3301,7 @@ static const struct pid_entry tid_base_stuff[] = {
48483 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
48484 #endif
48485 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48486 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48487 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48488 INF("syscall", S_IRUGO, proc_pid_syscall),
48489 #endif
48490 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48491 @@ -3172,10 +3325,10 @@ static const struct pid_entry tid_base_stuff[] = {
48492 #ifdef CONFIG_SECURITY
48493 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48494 #endif
48495 -#ifdef CONFIG_KALLSYMS
48496 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48497 INF("wchan", S_IRUGO, proc_pid_wchan),
48498 #endif
48499 -#ifdef CONFIG_STACKTRACE
48500 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48501 ONE("stack", S_IRUGO, proc_pid_stack),
48502 #endif
48503 #ifdef CONFIG_SCHEDSTATS
48504 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
48505 index 82676e3..5f8518a 100644
48506 --- a/fs/proc/cmdline.c
48507 +++ b/fs/proc/cmdline.c
48508 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
48509
48510 static int __init proc_cmdline_init(void)
48511 {
48512 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48513 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
48514 +#else
48515 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
48516 +#endif
48517 return 0;
48518 }
48519 module_init(proc_cmdline_init);
48520 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
48521 index b143471..bb105e5 100644
48522 --- a/fs/proc/devices.c
48523 +++ b/fs/proc/devices.c
48524 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
48525
48526 static int __init proc_devices_init(void)
48527 {
48528 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48529 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
48530 +#else
48531 proc_create("devices", 0, NULL, &proc_devinfo_operations);
48532 +#endif
48533 return 0;
48534 }
48535 module_init(proc_devices_init);
48536 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
48537 index 7ed72d6..d5f061a 100644
48538 --- a/fs/proc/inode.c
48539 +++ b/fs/proc/inode.c
48540 @@ -18,12 +18,18 @@
48541 #include <linux/module.h>
48542 #include <linux/sysctl.h>
48543 #include <linux/slab.h>
48544 +#include <linux/grsecurity.h>
48545
48546 #include <asm/system.h>
48547 #include <asm/uaccess.h>
48548
48549 #include "internal.h"
48550
48551 +#ifdef CONFIG_PROC_SYSCTL
48552 +extern const struct inode_operations proc_sys_inode_operations;
48553 +extern const struct inode_operations proc_sys_dir_operations;
48554 +#endif
48555 +
48556 static void proc_evict_inode(struct inode *inode)
48557 {
48558 struct proc_dir_entry *de;
48559 @@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
48560 ns_ops = PROC_I(inode)->ns_ops;
48561 if (ns_ops && ns_ops->put)
48562 ns_ops->put(PROC_I(inode)->ns);
48563 +
48564 +#ifdef CONFIG_PROC_SYSCTL
48565 + if (inode->i_op == &proc_sys_inode_operations ||
48566 + inode->i_op == &proc_sys_dir_operations)
48567 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
48568 +#endif
48569 +
48570 }
48571
48572 static struct kmem_cache * proc_inode_cachep;
48573 @@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
48574 if (de->mode) {
48575 inode->i_mode = de->mode;
48576 inode->i_uid = de->uid;
48577 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48578 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48579 +#else
48580 inode->i_gid = de->gid;
48581 +#endif
48582 }
48583 if (de->size)
48584 inode->i_size = de->size;
48585 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
48586 index 7838e5c..ff92cbc 100644
48587 --- a/fs/proc/internal.h
48588 +++ b/fs/proc/internal.h
48589 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48590 struct pid *pid, struct task_struct *task);
48591 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48592 struct pid *pid, struct task_struct *task);
48593 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48594 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
48595 +#endif
48596 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
48597
48598 extern const struct file_operations proc_maps_operations;
48599 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
48600 index d245cb2..7e645bd 100644
48601 --- a/fs/proc/kcore.c
48602 +++ b/fs/proc/kcore.c
48603 @@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
48604 off_t offset = 0;
48605 struct kcore_list *m;
48606
48607 + pax_track_stack();
48608 +
48609 /* setup ELF header */
48610 elf = (struct elfhdr *) bufp;
48611 bufp += sizeof(struct elfhdr);
48612 @@ -478,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48613 * the addresses in the elf_phdr on our list.
48614 */
48615 start = kc_offset_to_vaddr(*fpos - elf_buflen);
48616 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
48617 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
48618 + if (tsz > buflen)
48619 tsz = buflen;
48620 -
48621 +
48622 while (buflen) {
48623 struct kcore_list *m;
48624
48625 @@ -509,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48626 kfree(elf_buf);
48627 } else {
48628 if (kern_addr_valid(start)) {
48629 - unsigned long n;
48630 + char *elf_buf;
48631 + mm_segment_t oldfs;
48632
48633 - n = copy_to_user(buffer, (char *)start, tsz);
48634 - /*
48635 - * We cannot distingush between fault on source
48636 - * and fault on destination. When this happens
48637 - * we clear too and hope it will trigger the
48638 - * EFAULT again.
48639 - */
48640 - if (n) {
48641 - if (clear_user(buffer + tsz - n,
48642 - n))
48643 + elf_buf = kmalloc(tsz, GFP_KERNEL);
48644 + if (!elf_buf)
48645 + return -ENOMEM;
48646 + oldfs = get_fs();
48647 + set_fs(KERNEL_DS);
48648 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
48649 + set_fs(oldfs);
48650 + if (copy_to_user(buffer, elf_buf, tsz)) {
48651 + kfree(elf_buf);
48652 return -EFAULT;
48653 + }
48654 }
48655 + set_fs(oldfs);
48656 + kfree(elf_buf);
48657 } else {
48658 if (clear_user(buffer, tsz))
48659 return -EFAULT;
48660 @@ -542,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48661
48662 static int open_kcore(struct inode *inode, struct file *filp)
48663 {
48664 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
48665 + return -EPERM;
48666 +#endif
48667 if (!capable(CAP_SYS_RAWIO))
48668 return -EPERM;
48669 if (kcore_need_update)
48670 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
48671 index 80e4645..d2689e9 100644
48672 --- a/fs/proc/meminfo.c
48673 +++ b/fs/proc/meminfo.c
48674 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
48675 unsigned long pages[NR_LRU_LISTS];
48676 int lru;
48677
48678 + pax_track_stack();
48679 +
48680 /*
48681 * display in kilobytes.
48682 */
48683 @@ -158,7 +160,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
48684 vmi.used >> 10,
48685 vmi.largest_chunk >> 10
48686 #ifdef CONFIG_MEMORY_FAILURE
48687 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
48688 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
48689 #endif
48690 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
48691 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
48692 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
48693 index b1822dd..df622cb 100644
48694 --- a/fs/proc/nommu.c
48695 +++ b/fs/proc/nommu.c
48696 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
48697 if (len < 1)
48698 len = 1;
48699 seq_printf(m, "%*c", len, ' ');
48700 - seq_path(m, &file->f_path, "");
48701 + seq_path(m, &file->f_path, "\n\\");
48702 }
48703
48704 seq_putc(m, '\n');
48705 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
48706 index f738024..876984a 100644
48707 --- a/fs/proc/proc_net.c
48708 +++ b/fs/proc/proc_net.c
48709 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
48710 struct task_struct *task;
48711 struct nsproxy *ns;
48712 struct net *net = NULL;
48713 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48714 + const struct cred *cred = current_cred();
48715 +#endif
48716 +
48717 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48718 + if (cred->fsuid)
48719 + return net;
48720 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48721 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
48722 + return net;
48723 +#endif
48724
48725 rcu_read_lock();
48726 task = pid_task(proc_pid(dir), PIDTYPE_PID);
48727 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
48728 index 1a77dbe..56ec911 100644
48729 --- a/fs/proc/proc_sysctl.c
48730 +++ b/fs/proc/proc_sysctl.c
48731 @@ -8,11 +8,13 @@
48732 #include <linux/namei.h>
48733 #include "internal.h"
48734
48735 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
48736 +
48737 static const struct dentry_operations proc_sys_dentry_operations;
48738 static const struct file_operations proc_sys_file_operations;
48739 -static const struct inode_operations proc_sys_inode_operations;
48740 +const struct inode_operations proc_sys_inode_operations;
48741 static const struct file_operations proc_sys_dir_file_operations;
48742 -static const struct inode_operations proc_sys_dir_operations;
48743 +const struct inode_operations proc_sys_dir_operations;
48744
48745 static struct inode *proc_sys_make_inode(struct super_block *sb,
48746 struct ctl_table_header *head, struct ctl_table *table)
48747 @@ -121,8 +123,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
48748
48749 err = NULL;
48750 d_set_d_op(dentry, &proc_sys_dentry_operations);
48751 +
48752 + gr_handle_proc_create(dentry, inode);
48753 +
48754 d_add(dentry, inode);
48755
48756 + if (gr_handle_sysctl(p, MAY_EXEC))
48757 + err = ERR_PTR(-ENOENT);
48758 +
48759 out:
48760 sysctl_head_finish(head);
48761 return err;
48762 @@ -202,6 +210,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
48763 return -ENOMEM;
48764 } else {
48765 d_set_d_op(child, &proc_sys_dentry_operations);
48766 +
48767 + gr_handle_proc_create(child, inode);
48768 +
48769 d_add(child, inode);
48770 }
48771 } else {
48772 @@ -230,6 +241,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
48773 if (*pos < file->f_pos)
48774 continue;
48775
48776 + if (gr_handle_sysctl(table, 0))
48777 + continue;
48778 +
48779 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
48780 if (res)
48781 return res;
48782 @@ -355,6 +369,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
48783 if (IS_ERR(head))
48784 return PTR_ERR(head);
48785
48786 + if (table && gr_handle_sysctl(table, MAY_EXEC))
48787 + return -ENOENT;
48788 +
48789 generic_fillattr(inode, stat);
48790 if (table)
48791 stat->mode = (stat->mode & S_IFMT) | table->mode;
48792 @@ -370,17 +387,18 @@ static const struct file_operations proc_sys_file_operations = {
48793 };
48794
48795 static const struct file_operations proc_sys_dir_file_operations = {
48796 + .read = generic_read_dir,
48797 .readdir = proc_sys_readdir,
48798 .llseek = generic_file_llseek,
48799 };
48800
48801 -static const struct inode_operations proc_sys_inode_operations = {
48802 +const struct inode_operations proc_sys_inode_operations = {
48803 .permission = proc_sys_permission,
48804 .setattr = proc_sys_setattr,
48805 .getattr = proc_sys_getattr,
48806 };
48807
48808 -static const struct inode_operations proc_sys_dir_operations = {
48809 +const struct inode_operations proc_sys_dir_operations = {
48810 .lookup = proc_sys_lookup,
48811 .permission = proc_sys_permission,
48812 .setattr = proc_sys_setattr,
48813 diff --git a/fs/proc/root.c b/fs/proc/root.c
48814 index 9a8a2b7..3018df6 100644
48815 --- a/fs/proc/root.c
48816 +++ b/fs/proc/root.c
48817 @@ -123,7 +123,15 @@ void __init proc_root_init(void)
48818 #ifdef CONFIG_PROC_DEVICETREE
48819 proc_device_tree_init();
48820 #endif
48821 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48822 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48823 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
48824 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48825 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
48826 +#endif
48827 +#else
48828 proc_mkdir("bus", NULL);
48829 +#endif
48830 proc_sys_init();
48831 }
48832
48833 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
48834 index c7d4ee6..41c5564 100644
48835 --- a/fs/proc/task_mmu.c
48836 +++ b/fs/proc/task_mmu.c
48837 @@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48838 "VmExe:\t%8lu kB\n"
48839 "VmLib:\t%8lu kB\n"
48840 "VmPTE:\t%8lu kB\n"
48841 - "VmSwap:\t%8lu kB\n",
48842 - hiwater_vm << (PAGE_SHIFT-10),
48843 + "VmSwap:\t%8lu kB\n"
48844 +
48845 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48846 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
48847 +#endif
48848 +
48849 + ,hiwater_vm << (PAGE_SHIFT-10),
48850 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
48851 mm->locked_vm << (PAGE_SHIFT-10),
48852 hiwater_rss << (PAGE_SHIFT-10),
48853 @@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48854 data << (PAGE_SHIFT-10),
48855 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
48856 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
48857 - swap << (PAGE_SHIFT-10));
48858 + swap << (PAGE_SHIFT-10)
48859 +
48860 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48861 + , mm->context.user_cs_base, mm->context.user_cs_limit
48862 +#endif
48863 +
48864 + );
48865 }
48866
48867 unsigned long task_vsize(struct mm_struct *mm)
48868 @@ -207,6 +218,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
48869 return ret;
48870 }
48871
48872 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48873 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48874 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
48875 + _mm->pax_flags & MF_PAX_SEGMEXEC))
48876 +#endif
48877 +
48878 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48879 {
48880 struct mm_struct *mm = vma->vm_mm;
48881 @@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48882 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
48883 }
48884
48885 - /* We don't show the stack guard page in /proc/maps */
48886 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48887 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
48888 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
48889 +#else
48890 start = vma->vm_start;
48891 - if (stack_guard_page_start(vma, start))
48892 - start += PAGE_SIZE;
48893 end = vma->vm_end;
48894 - if (stack_guard_page_end(vma, end))
48895 - end -= PAGE_SIZE;
48896 +#endif
48897
48898 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
48899 start,
48900 @@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48901 flags & VM_WRITE ? 'w' : '-',
48902 flags & VM_EXEC ? 'x' : '-',
48903 flags & VM_MAYSHARE ? 's' : 'p',
48904 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48905 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
48906 +#else
48907 pgoff,
48908 +#endif
48909 MAJOR(dev), MINOR(dev), ino, &len);
48910
48911 /*
48912 @@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48913 */
48914 if (file) {
48915 pad_len_spaces(m, len);
48916 - seq_path(m, &file->f_path, "\n");
48917 + seq_path(m, &file->f_path, "\n\\");
48918 } else {
48919 const char *name = arch_vma_name(vma);
48920 if (!name) {
48921 @@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48922 if (vma->vm_start <= mm->brk &&
48923 vma->vm_end >= mm->start_brk) {
48924 name = "[heap]";
48925 - } else if (vma->vm_start <= mm->start_stack &&
48926 - vma->vm_end >= mm->start_stack) {
48927 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
48928 + (vma->vm_start <= mm->start_stack &&
48929 + vma->vm_end >= mm->start_stack)) {
48930 name = "[stack]";
48931 }
48932 } else {
48933 @@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m, void *v)
48934 };
48935
48936 memset(&mss, 0, sizeof mss);
48937 - mss.vma = vma;
48938 - /* mmap_sem is held in m_start */
48939 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48940 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48941 -
48942 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48943 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
48944 +#endif
48945 + mss.vma = vma;
48946 + /* mmap_sem is held in m_start */
48947 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48948 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48949 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48950 + }
48951 +#endif
48952 show_map_vma(m, vma);
48953
48954 seq_printf(m,
48955 @@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m, void *v)
48956 "KernelPageSize: %8lu kB\n"
48957 "MMUPageSize: %8lu kB\n"
48958 "Locked: %8lu kB\n",
48959 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48960 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
48961 +#else
48962 (vma->vm_end - vma->vm_start) >> 10,
48963 +#endif
48964 mss.resident >> 10,
48965 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
48966 mss.shared_clean >> 10,
48967 @@ -1031,7 +1062,7 @@ static int show_numa_map(struct seq_file *m, void *v)
48968
48969 if (file) {
48970 seq_printf(m, " file=");
48971 - seq_path(m, &file->f_path, "\n\t= ");
48972 + seq_path(m, &file->f_path, "\n\t\\= ");
48973 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
48974 seq_printf(m, " heap");
48975 } else if (vma->vm_start <= mm->start_stack &&
48976 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
48977 index 980de54..2a4db5f 100644
48978 --- a/fs/proc/task_nommu.c
48979 +++ b/fs/proc/task_nommu.c
48980 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48981 else
48982 bytes += kobjsize(mm);
48983
48984 - if (current->fs && current->fs->users > 1)
48985 + if (current->fs && atomic_read(&current->fs->users) > 1)
48986 sbytes += kobjsize(current->fs);
48987 else
48988 bytes += kobjsize(current->fs);
48989 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
48990
48991 if (file) {
48992 pad_len_spaces(m, len);
48993 - seq_path(m, &file->f_path, "");
48994 + seq_path(m, &file->f_path, "\n\\");
48995 } else if (mm) {
48996 if (vma->vm_start <= mm->start_stack &&
48997 vma->vm_end >= mm->start_stack) {
48998 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
48999 index d67908b..d13f6a6 100644
49000 --- a/fs/quota/netlink.c
49001 +++ b/fs/quota/netlink.c
49002 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
49003 void quota_send_warning(short type, unsigned int id, dev_t dev,
49004 const char warntype)
49005 {
49006 - static atomic_t seq;
49007 + static atomic_unchecked_t seq;
49008 struct sk_buff *skb;
49009 void *msg_head;
49010 int ret;
49011 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
49012 "VFS: Not enough memory to send quota warning.\n");
49013 return;
49014 }
49015 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
49016 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
49017 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
49018 if (!msg_head) {
49019 printk(KERN_ERR
49020 diff --git a/fs/readdir.c b/fs/readdir.c
49021 index 356f715..c918d38 100644
49022 --- a/fs/readdir.c
49023 +++ b/fs/readdir.c
49024 @@ -17,6 +17,7 @@
49025 #include <linux/security.h>
49026 #include <linux/syscalls.h>
49027 #include <linux/unistd.h>
49028 +#include <linux/namei.h>
49029
49030 #include <asm/uaccess.h>
49031
49032 @@ -67,6 +68,7 @@ struct old_linux_dirent {
49033
49034 struct readdir_callback {
49035 struct old_linux_dirent __user * dirent;
49036 + struct file * file;
49037 int result;
49038 };
49039
49040 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
49041 buf->result = -EOVERFLOW;
49042 return -EOVERFLOW;
49043 }
49044 +
49045 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49046 + return 0;
49047 +
49048 buf->result++;
49049 dirent = buf->dirent;
49050 if (!access_ok(VERIFY_WRITE, dirent,
49051 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
49052
49053 buf.result = 0;
49054 buf.dirent = dirent;
49055 + buf.file = file;
49056
49057 error = vfs_readdir(file, fillonedir, &buf);
49058 if (buf.result)
49059 @@ -142,6 +149,7 @@ struct linux_dirent {
49060 struct getdents_callback {
49061 struct linux_dirent __user * current_dir;
49062 struct linux_dirent __user * previous;
49063 + struct file * file;
49064 int count;
49065 int error;
49066 };
49067 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
49068 buf->error = -EOVERFLOW;
49069 return -EOVERFLOW;
49070 }
49071 +
49072 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49073 + return 0;
49074 +
49075 dirent = buf->previous;
49076 if (dirent) {
49077 if (__put_user(offset, &dirent->d_off))
49078 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
49079 buf.previous = NULL;
49080 buf.count = count;
49081 buf.error = 0;
49082 + buf.file = file;
49083
49084 error = vfs_readdir(file, filldir, &buf);
49085 if (error >= 0)
49086 @@ -229,6 +242,7 @@ out:
49087 struct getdents_callback64 {
49088 struct linux_dirent64 __user * current_dir;
49089 struct linux_dirent64 __user * previous;
49090 + struct file *file;
49091 int count;
49092 int error;
49093 };
49094 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
49095 buf->error = -EINVAL; /* only used if we fail.. */
49096 if (reclen > buf->count)
49097 return -EINVAL;
49098 +
49099 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49100 + return 0;
49101 +
49102 dirent = buf->previous;
49103 if (dirent) {
49104 if (__put_user(offset, &dirent->d_off))
49105 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
49106
49107 buf.current_dir = dirent;
49108 buf.previous = NULL;
49109 + buf.file = file;
49110 buf.count = count;
49111 buf.error = 0;
49112
49113 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
49114 error = buf.error;
49115 lastdirent = buf.previous;
49116 if (lastdirent) {
49117 - typeof(lastdirent->d_off) d_off = file->f_pos;
49118 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
49119 if (__put_user(d_off, &lastdirent->d_off))
49120 error = -EFAULT;
49121 else
49122 diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
49123 index 133e935..349ef18 100644
49124 --- a/fs/reiserfs/dir.c
49125 +++ b/fs/reiserfs/dir.c
49126 @@ -75,6 +75,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
49127 struct reiserfs_dir_entry de;
49128 int ret = 0;
49129
49130 + pax_track_stack();
49131 +
49132 reiserfs_write_lock(inode->i_sb);
49133
49134 reiserfs_check_lock_depth(inode->i_sb, "readdir");
49135 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
49136 index 60c0804..d814f98 100644
49137 --- a/fs/reiserfs/do_balan.c
49138 +++ b/fs/reiserfs/do_balan.c
49139 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
49140 return;
49141 }
49142
49143 - atomic_inc(&(fs_generation(tb->tb_sb)));
49144 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
49145 do_balance_starts(tb);
49146
49147 /* balance leaf returns 0 except if combining L R and S into
49148 diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
49149 index a159ba5..0396a76 100644
49150 --- a/fs/reiserfs/journal.c
49151 +++ b/fs/reiserfs/journal.c
49152 @@ -2289,6 +2289,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
49153 struct buffer_head *bh;
49154 int i, j;
49155
49156 + pax_track_stack();
49157 +
49158 bh = __getblk(dev, block, bufsize);
49159 if (buffer_uptodate(bh))
49160 return (bh);
49161 diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
49162 index ef39232..0fa91ba 100644
49163 --- a/fs/reiserfs/namei.c
49164 +++ b/fs/reiserfs/namei.c
49165 @@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
49166 unsigned long savelink = 1;
49167 struct timespec ctime;
49168
49169 + pax_track_stack();
49170 +
49171 /* three balancings: (1) old name removal, (2) new name insertion
49172 and (3) maybe "save" link insertion
49173 stat data updates: (1) old directory,
49174 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
49175 index 7a99811..2c9286f 100644
49176 --- a/fs/reiserfs/procfs.c
49177 +++ b/fs/reiserfs/procfs.c
49178 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
49179 "SMALL_TAILS " : "NO_TAILS ",
49180 replay_only(sb) ? "REPLAY_ONLY " : "",
49181 convert_reiserfs(sb) ? "CONV " : "",
49182 - atomic_read(&r->s_generation_counter),
49183 + atomic_read_unchecked(&r->s_generation_counter),
49184 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
49185 SF(s_do_balance), SF(s_unneeded_left_neighbor),
49186 SF(s_good_search_by_key_reada), SF(s_bmaps),
49187 @@ -299,6 +299,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
49188 struct journal_params *jp = &rs->s_v1.s_journal;
49189 char b[BDEVNAME_SIZE];
49190
49191 + pax_track_stack();
49192 +
49193 seq_printf(m, /* on-disk fields */
49194 "jp_journal_1st_block: \t%i\n"
49195 "jp_journal_dev: \t%s[%x]\n"
49196 diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
49197 index 313d39d..3a5811b 100644
49198 --- a/fs/reiserfs/stree.c
49199 +++ b/fs/reiserfs/stree.c
49200 @@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
49201 int iter = 0;
49202 #endif
49203
49204 + pax_track_stack();
49205 +
49206 BUG_ON(!th->t_trans_id);
49207
49208 init_tb_struct(th, &s_del_balance, sb, path,
49209 @@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
49210 int retval;
49211 int quota_cut_bytes = 0;
49212
49213 + pax_track_stack();
49214 +
49215 BUG_ON(!th->t_trans_id);
49216
49217 le_key2cpu_key(&cpu_key, key);
49218 @@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
49219 int quota_cut_bytes;
49220 loff_t tail_pos = 0;
49221
49222 + pax_track_stack();
49223 +
49224 BUG_ON(!th->t_trans_id);
49225
49226 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
49227 @@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
49228 int retval;
49229 int fs_gen;
49230
49231 + pax_track_stack();
49232 +
49233 BUG_ON(!th->t_trans_id);
49234
49235 fs_gen = get_generation(inode->i_sb);
49236 @@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
49237 int fs_gen = 0;
49238 int quota_bytes = 0;
49239
49240 + pax_track_stack();
49241 +
49242 BUG_ON(!th->t_trans_id);
49243
49244 if (inode) { /* Do we count quotas for item? */
49245 diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
49246 index 14363b9..dd95a04 100644
49247 --- a/fs/reiserfs/super.c
49248 +++ b/fs/reiserfs/super.c
49249 @@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
49250 {.option_name = NULL}
49251 };
49252
49253 + pax_track_stack();
49254 +
49255 *blocks = 0;
49256 if (!options || !*options)
49257 /* use default configuration: create tails, journaling on, no
49258 diff --git a/fs/select.c b/fs/select.c
49259 index d33418f..f8e06bc 100644
49260 --- a/fs/select.c
49261 +++ b/fs/select.c
49262 @@ -20,6 +20,7 @@
49263 #include <linux/module.h>
49264 #include <linux/slab.h>
49265 #include <linux/poll.h>
49266 +#include <linux/security.h>
49267 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
49268 #include <linux/file.h>
49269 #include <linux/fdtable.h>
49270 @@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
49271 int retval, i, timed_out = 0;
49272 unsigned long slack = 0;
49273
49274 + pax_track_stack();
49275 +
49276 rcu_read_lock();
49277 retval = max_select_fd(n, fds);
49278 rcu_read_unlock();
49279 @@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
49280 /* Allocate small arguments on the stack to save memory and be faster */
49281 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
49282
49283 + pax_track_stack();
49284 +
49285 ret = -EINVAL;
49286 if (n < 0)
49287 goto out_nofds;
49288 @@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
49289 struct poll_list *walk = head;
49290 unsigned long todo = nfds;
49291
49292 + pax_track_stack();
49293 +
49294 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
49295 if (nfds > rlimit(RLIMIT_NOFILE))
49296 return -EINVAL;
49297
49298 diff --git a/fs/seq_file.c b/fs/seq_file.c
49299 index dba43c3..a99fb63 100644
49300 --- a/fs/seq_file.c
49301 +++ b/fs/seq_file.c
49302 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m, loff_t offset)
49303 return 0;
49304 }
49305 if (!m->buf) {
49306 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
49307 + m->size = PAGE_SIZE;
49308 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
49309 if (!m->buf)
49310 return -ENOMEM;
49311 }
49312 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m, loff_t offset)
49313 Eoverflow:
49314 m->op->stop(m, p);
49315 kfree(m->buf);
49316 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
49317 + m->size <<= 1;
49318 + m->buf = kmalloc(m->size, GFP_KERNEL);
49319 return !m->buf ? -ENOMEM : -EAGAIN;
49320 }
49321
49322 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
49323 m->version = file->f_version;
49324 /* grab buffer if we didn't have one */
49325 if (!m->buf) {
49326 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
49327 + m->size = PAGE_SIZE;
49328 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
49329 if (!m->buf)
49330 goto Enomem;
49331 }
49332 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
49333 goto Fill;
49334 m->op->stop(m, p);
49335 kfree(m->buf);
49336 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
49337 + m->size <<= 1;
49338 + m->buf = kmalloc(m->size, GFP_KERNEL);
49339 if (!m->buf)
49340 goto Enomem;
49341 m->count = 0;
49342 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file *p, void *v)
49343 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
49344 void *data)
49345 {
49346 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
49347 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
49348 int res = -ENOMEM;
49349
49350 if (op) {
49351 diff --git a/fs/splice.c b/fs/splice.c
49352 index fa2defa..9a697a5 100644
49353 --- a/fs/splice.c
49354 +++ b/fs/splice.c
49355 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49356 pipe_lock(pipe);
49357
49358 for (;;) {
49359 - if (!pipe->readers) {
49360 + if (!atomic_read(&pipe->readers)) {
49361 send_sig(SIGPIPE, current, 0);
49362 if (!ret)
49363 ret = -EPIPE;
49364 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49365 do_wakeup = 0;
49366 }
49367
49368 - pipe->waiting_writers++;
49369 + atomic_inc(&pipe->waiting_writers);
49370 pipe_wait(pipe);
49371 - pipe->waiting_writers--;
49372 + atomic_dec(&pipe->waiting_writers);
49373 }
49374
49375 pipe_unlock(pipe);
49376 @@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
49377 .spd_release = spd_release_page,
49378 };
49379
49380 + pax_track_stack();
49381 +
49382 if (splice_grow_spd(pipe, &spd))
49383 return -ENOMEM;
49384
49385 @@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
49386 old_fs = get_fs();
49387 set_fs(get_ds());
49388 /* The cast to a user pointer is valid due to the set_fs() */
49389 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
49390 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
49391 set_fs(old_fs);
49392
49393 return res;
49394 @@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
49395 old_fs = get_fs();
49396 set_fs(get_ds());
49397 /* The cast to a user pointer is valid due to the set_fs() */
49398 - res = vfs_write(file, (const char __user *)buf, count, &pos);
49399 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
49400 set_fs(old_fs);
49401
49402 return res;
49403 @@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
49404 .spd_release = spd_release_page,
49405 };
49406
49407 + pax_track_stack();
49408 +
49409 if (splice_grow_spd(pipe, &spd))
49410 return -ENOMEM;
49411
49412 @@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
49413 goto err;
49414
49415 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
49416 - vec[i].iov_base = (void __user *) page_address(page);
49417 + vec[i].iov_base = (void __force_user *) page_address(page);
49418 vec[i].iov_len = this_len;
49419 spd.pages[i] = page;
49420 spd.nr_pages++;
49421 @@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
49422 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
49423 {
49424 while (!pipe->nrbufs) {
49425 - if (!pipe->writers)
49426 + if (!atomic_read(&pipe->writers))
49427 return 0;
49428
49429 - if (!pipe->waiting_writers && sd->num_spliced)
49430 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
49431 return 0;
49432
49433 if (sd->flags & SPLICE_F_NONBLOCK)
49434 @@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
49435 * out of the pipe right after the splice_to_pipe(). So set
49436 * PIPE_READERS appropriately.
49437 */
49438 - pipe->readers = 1;
49439 + atomic_set(&pipe->readers, 1);
49440
49441 current->splice_pipe = pipe;
49442 }
49443 @@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
49444 };
49445 long ret;
49446
49447 + pax_track_stack();
49448 +
49449 pipe = get_pipe_info(file);
49450 if (!pipe)
49451 return -EBADF;
49452 @@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49453 ret = -ERESTARTSYS;
49454 break;
49455 }
49456 - if (!pipe->writers)
49457 + if (!atomic_read(&pipe->writers))
49458 break;
49459 - if (!pipe->waiting_writers) {
49460 + if (!atomic_read(&pipe->waiting_writers)) {
49461 if (flags & SPLICE_F_NONBLOCK) {
49462 ret = -EAGAIN;
49463 break;
49464 @@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49465 pipe_lock(pipe);
49466
49467 while (pipe->nrbufs >= pipe->buffers) {
49468 - if (!pipe->readers) {
49469 + if (!atomic_read(&pipe->readers)) {
49470 send_sig(SIGPIPE, current, 0);
49471 ret = -EPIPE;
49472 break;
49473 @@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49474 ret = -ERESTARTSYS;
49475 break;
49476 }
49477 - pipe->waiting_writers++;
49478 + atomic_inc(&pipe->waiting_writers);
49479 pipe_wait(pipe);
49480 - pipe->waiting_writers--;
49481 + atomic_dec(&pipe->waiting_writers);
49482 }
49483
49484 pipe_unlock(pipe);
49485 @@ -1819,14 +1825,14 @@ retry:
49486 pipe_double_lock(ipipe, opipe);
49487
49488 do {
49489 - if (!opipe->readers) {
49490 + if (!atomic_read(&opipe->readers)) {
49491 send_sig(SIGPIPE, current, 0);
49492 if (!ret)
49493 ret = -EPIPE;
49494 break;
49495 }
49496
49497 - if (!ipipe->nrbufs && !ipipe->writers)
49498 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
49499 break;
49500
49501 /*
49502 @@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49503 pipe_double_lock(ipipe, opipe);
49504
49505 do {
49506 - if (!opipe->readers) {
49507 + if (!atomic_read(&opipe->readers)) {
49508 send_sig(SIGPIPE, current, 0);
49509 if (!ret)
49510 ret = -EPIPE;
49511 @@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49512 * return EAGAIN if we have the potential of some data in the
49513 * future, otherwise just return 0
49514 */
49515 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
49516 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
49517 ret = -EAGAIN;
49518
49519 pipe_unlock(ipipe);
49520 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
49521 index 1ad8c93..6633545 100644
49522 --- a/fs/sysfs/file.c
49523 +++ b/fs/sysfs/file.c
49524 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
49525
49526 struct sysfs_open_dirent {
49527 atomic_t refcnt;
49528 - atomic_t event;
49529 + atomic_unchecked_t event;
49530 wait_queue_head_t poll;
49531 struct list_head buffers; /* goes through sysfs_buffer.list */
49532 };
49533 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
49534 if (!sysfs_get_active(attr_sd))
49535 return -ENODEV;
49536
49537 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
49538 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
49539 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
49540
49541 sysfs_put_active(attr_sd);
49542 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
49543 return -ENOMEM;
49544
49545 atomic_set(&new_od->refcnt, 0);
49546 - atomic_set(&new_od->event, 1);
49547 + atomic_set_unchecked(&new_od->event, 1);
49548 init_waitqueue_head(&new_od->poll);
49549 INIT_LIST_HEAD(&new_od->buffers);
49550 goto retry;
49551 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
49552
49553 sysfs_put_active(attr_sd);
49554
49555 - if (buffer->event != atomic_read(&od->event))
49556 + if (buffer->event != atomic_read_unchecked(&od->event))
49557 goto trigger;
49558
49559 return DEFAULT_POLLMASK;
49560 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
49561
49562 od = sd->s_attr.open;
49563 if (od) {
49564 - atomic_inc(&od->event);
49565 + atomic_inc_unchecked(&od->event);
49566 wake_up_interruptible(&od->poll);
49567 }
49568
49569 diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
49570 index e34f0d9..740ea7b 100644
49571 --- a/fs/sysfs/mount.c
49572 +++ b/fs/sysfs/mount.c
49573 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
49574 .s_name = "",
49575 .s_count = ATOMIC_INIT(1),
49576 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
49577 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
49578 + .s_mode = S_IFDIR | S_IRWXU,
49579 +#else
49580 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
49581 +#endif
49582 .s_ino = 1,
49583 };
49584
49585 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
49586 index a7ac78f..02158e1 100644
49587 --- a/fs/sysfs/symlink.c
49588 +++ b/fs/sysfs/symlink.c
49589 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
49590
49591 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
49592 {
49593 - char *page = nd_get_link(nd);
49594 + const char *page = nd_get_link(nd);
49595 if (!IS_ERR(page))
49596 free_page((unsigned long)page);
49597 }
49598 diff --git a/fs/udf/inode.c b/fs/udf/inode.c
49599 index 1d1358e..408bedb 100644
49600 --- a/fs/udf/inode.c
49601 +++ b/fs/udf/inode.c
49602 @@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
49603 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
49604 int lastblock = 0;
49605
49606 + pax_track_stack();
49607 +
49608 prev_epos.offset = udf_file_entry_alloc_offset(inode);
49609 prev_epos.block = iinfo->i_location;
49610 prev_epos.bh = NULL;
49611 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
49612 index 9215700..bf1f68e 100644
49613 --- a/fs/udf/misc.c
49614 +++ b/fs/udf/misc.c
49615 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
49616
49617 u8 udf_tag_checksum(const struct tag *t)
49618 {
49619 - u8 *data = (u8 *)t;
49620 + const u8 *data = (const u8 *)t;
49621 u8 checksum = 0;
49622 int i;
49623 for (i = 0; i < sizeof(struct tag); ++i)
49624 diff --git a/fs/utimes.c b/fs/utimes.c
49625 index ba653f3..06ea4b1 100644
49626 --- a/fs/utimes.c
49627 +++ b/fs/utimes.c
49628 @@ -1,6 +1,7 @@
49629 #include <linux/compiler.h>
49630 #include <linux/file.h>
49631 #include <linux/fs.h>
49632 +#include <linux/security.h>
49633 #include <linux/linkage.h>
49634 #include <linux/mount.h>
49635 #include <linux/namei.h>
49636 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
49637 goto mnt_drop_write_and_out;
49638 }
49639 }
49640 +
49641 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
49642 + error = -EACCES;
49643 + goto mnt_drop_write_and_out;
49644 + }
49645 +
49646 mutex_lock(&inode->i_mutex);
49647 error = notify_change(path->dentry, &newattrs);
49648 mutex_unlock(&inode->i_mutex);
49649 diff --git a/fs/xattr.c b/fs/xattr.c
49650 index f060663..def7007 100644
49651 --- a/fs/xattr.c
49652 +++ b/fs/xattr.c
49653 @@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
49654 * Extended attribute SET operations
49655 */
49656 static long
49657 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
49658 +setxattr(struct path *path, const char __user *name, const void __user *value,
49659 size_t size, int flags)
49660 {
49661 int error;
49662 @@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
49663 return PTR_ERR(kvalue);
49664 }
49665
49666 - error = vfs_setxattr(d, kname, kvalue, size, flags);
49667 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
49668 + error = -EACCES;
49669 + goto out;
49670 + }
49671 +
49672 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
49673 +out:
49674 kfree(kvalue);
49675 return error;
49676 }
49677 @@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
49678 return error;
49679 error = mnt_want_write(path.mnt);
49680 if (!error) {
49681 - error = setxattr(path.dentry, name, value, size, flags);
49682 + error = setxattr(&path, name, value, size, flags);
49683 mnt_drop_write(path.mnt);
49684 }
49685 path_put(&path);
49686 @@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
49687 return error;
49688 error = mnt_want_write(path.mnt);
49689 if (!error) {
49690 - error = setxattr(path.dentry, name, value, size, flags);
49691 + error = setxattr(&path, name, value, size, flags);
49692 mnt_drop_write(path.mnt);
49693 }
49694 path_put(&path);
49695 @@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
49696 const void __user *,value, size_t, size, int, flags)
49697 {
49698 struct file *f;
49699 - struct dentry *dentry;
49700 int error = -EBADF;
49701
49702 f = fget(fd);
49703 if (!f)
49704 return error;
49705 - dentry = f->f_path.dentry;
49706 - audit_inode(NULL, dentry);
49707 + audit_inode(NULL, f->f_path.dentry);
49708 error = mnt_want_write_file(f);
49709 if (!error) {
49710 - error = setxattr(dentry, name, value, size, flags);
49711 + error = setxattr(&f->f_path, name, value, size, flags);
49712 mnt_drop_write(f->f_path.mnt);
49713 }
49714 fput(f);
49715 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
49716 index 8d5a506..7f62712 100644
49717 --- a/fs/xattr_acl.c
49718 +++ b/fs/xattr_acl.c
49719 @@ -17,8 +17,8 @@
49720 struct posix_acl *
49721 posix_acl_from_xattr(const void *value, size_t size)
49722 {
49723 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
49724 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
49725 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
49726 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
49727 int count;
49728 struct posix_acl *acl;
49729 struct posix_acl_entry *acl_e;
49730 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
49731 index 452a291..91a95f3b 100644
49732 --- a/fs/xfs/xfs_bmap.c
49733 +++ b/fs/xfs/xfs_bmap.c
49734 @@ -250,7 +250,7 @@ xfs_bmap_validate_ret(
49735 int nmap,
49736 int ret_nmap);
49737 #else
49738 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
49739 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
49740 #endif /* DEBUG */
49741
49742 STATIC int
49743 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
49744 index 79d05e8..e3e5861 100644
49745 --- a/fs/xfs/xfs_dir2_sf.c
49746 +++ b/fs/xfs/xfs_dir2_sf.c
49747 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
49748 }
49749
49750 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
49751 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49752 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
49753 + char name[sfep->namelen];
49754 + memcpy(name, sfep->name, sfep->namelen);
49755 + if (filldir(dirent, name, sfep->namelen,
49756 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
49757 + *offset = off & 0x7fffffff;
49758 + return 0;
49759 + }
49760 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49761 off & 0x7fffffff, ino, DT_UNKNOWN)) {
49762 *offset = off & 0x7fffffff;
49763 return 0;
49764 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
49765 index f7ce7de..e1a5db0 100644
49766 --- a/fs/xfs/xfs_ioctl.c
49767 +++ b/fs/xfs/xfs_ioctl.c
49768 @@ -128,7 +128,7 @@ xfs_find_handle(
49769 }
49770
49771 error = -EFAULT;
49772 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
49773 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
49774 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
49775 goto out_put;
49776
49777 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
49778 index 474920b..97169a9 100644
49779 --- a/fs/xfs/xfs_iops.c
49780 +++ b/fs/xfs/xfs_iops.c
49781 @@ -446,7 +446,7 @@ xfs_vn_put_link(
49782 struct nameidata *nd,
49783 void *p)
49784 {
49785 - char *s = nd_get_link(nd);
49786 + const char *s = nd_get_link(nd);
49787
49788 if (!IS_ERR(s))
49789 kfree(s);
49790 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
49791 new file mode 100644
49792 index 0000000..4639511
49793 --- /dev/null
49794 +++ b/grsecurity/Kconfig
49795 @@ -0,0 +1,1051 @@
49796 +#
49797 +# grecurity configuration
49798 +#
49799 +
49800 +menu "Grsecurity"
49801 +
49802 +config GRKERNSEC
49803 + bool "Grsecurity"
49804 + select CRYPTO
49805 + select CRYPTO_SHA256
49806 + help
49807 + If you say Y here, you will be able to configure many features
49808 + that will enhance the security of your system. It is highly
49809 + recommended that you say Y here and read through the help
49810 + for each option so that you fully understand the features and
49811 + can evaluate their usefulness for your machine.
49812 +
49813 +choice
49814 + prompt "Security Level"
49815 + depends on GRKERNSEC
49816 + default GRKERNSEC_CUSTOM
49817 +
49818 +config GRKERNSEC_LOW
49819 + bool "Low"
49820 + select GRKERNSEC_LINK
49821 + select GRKERNSEC_FIFO
49822 + select GRKERNSEC_RANDNET
49823 + select GRKERNSEC_DMESG
49824 + select GRKERNSEC_CHROOT
49825 + select GRKERNSEC_CHROOT_CHDIR
49826 +
49827 + help
49828 + If you choose this option, several of the grsecurity options will
49829 + be enabled that will give you greater protection against a number
49830 + of attacks, while assuring that none of your software will have any
49831 + conflicts with the additional security measures. If you run a lot
49832 + of unusual software, or you are having problems with the higher
49833 + security levels, you should say Y here. With this option, the
49834 + following features are enabled:
49835 +
49836 + - Linking restrictions
49837 + - FIFO restrictions
49838 + - Restricted dmesg
49839 + - Enforced chdir("/") on chroot
49840 + - Runtime module disabling
49841 +
49842 +config GRKERNSEC_MEDIUM
49843 + bool "Medium"
49844 + select PAX
49845 + select PAX_EI_PAX
49846 + select PAX_PT_PAX_FLAGS
49847 + select PAX_HAVE_ACL_FLAGS
49848 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49849 + select GRKERNSEC_CHROOT
49850 + select GRKERNSEC_CHROOT_SYSCTL
49851 + select GRKERNSEC_LINK
49852 + select GRKERNSEC_FIFO
49853 + select GRKERNSEC_DMESG
49854 + select GRKERNSEC_RANDNET
49855 + select GRKERNSEC_FORKFAIL
49856 + select GRKERNSEC_TIME
49857 + select GRKERNSEC_SIGNAL
49858 + select GRKERNSEC_CHROOT
49859 + select GRKERNSEC_CHROOT_UNIX
49860 + select GRKERNSEC_CHROOT_MOUNT
49861 + select GRKERNSEC_CHROOT_PIVOT
49862 + select GRKERNSEC_CHROOT_DOUBLE
49863 + select GRKERNSEC_CHROOT_CHDIR
49864 + select GRKERNSEC_CHROOT_MKNOD
49865 + select GRKERNSEC_PROC
49866 + select GRKERNSEC_PROC_USERGROUP
49867 + select PAX_RANDUSTACK
49868 + select PAX_ASLR
49869 + select PAX_RANDMMAP
49870 + select PAX_REFCOUNT if (X86 || SPARC64)
49871 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
49872 +
49873 + help
49874 + If you say Y here, several features in addition to those included
49875 + in the low additional security level will be enabled. These
49876 + features provide even more security to your system, though in rare
49877 + cases they may be incompatible with very old or poorly written
49878 + software. If you enable this option, make sure that your auth
49879 + service (identd) is running as gid 1001. With this option,
49880 + the following features (in addition to those provided in the
49881 + low additional security level) will be enabled:
49882 +
49883 + - Failed fork logging
49884 + - Time change logging
49885 + - Signal logging
49886 + - Deny mounts in chroot
49887 + - Deny double chrooting
49888 + - Deny sysctl writes in chroot
49889 + - Deny mknod in chroot
49890 + - Deny access to abstract AF_UNIX sockets out of chroot
49891 + - Deny pivot_root in chroot
49892 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
49893 + - /proc restrictions with special GID set to 10 (usually wheel)
49894 + - Address Space Layout Randomization (ASLR)
49895 + - Prevent exploitation of most refcount overflows
49896 + - Bounds checking of copying between the kernel and userland
49897 +
49898 +config GRKERNSEC_HIGH
49899 + bool "High"
49900 + select GRKERNSEC_LINK
49901 + select GRKERNSEC_FIFO
49902 + select GRKERNSEC_DMESG
49903 + select GRKERNSEC_FORKFAIL
49904 + select GRKERNSEC_TIME
49905 + select GRKERNSEC_SIGNAL
49906 + select GRKERNSEC_CHROOT
49907 + select GRKERNSEC_CHROOT_SHMAT
49908 + select GRKERNSEC_CHROOT_UNIX
49909 + select GRKERNSEC_CHROOT_MOUNT
49910 + select GRKERNSEC_CHROOT_FCHDIR
49911 + select GRKERNSEC_CHROOT_PIVOT
49912 + select GRKERNSEC_CHROOT_DOUBLE
49913 + select GRKERNSEC_CHROOT_CHDIR
49914 + select GRKERNSEC_CHROOT_MKNOD
49915 + select GRKERNSEC_CHROOT_CAPS
49916 + select GRKERNSEC_CHROOT_SYSCTL
49917 + select GRKERNSEC_CHROOT_FINDTASK
49918 + select GRKERNSEC_SYSFS_RESTRICT
49919 + select GRKERNSEC_PROC
49920 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49921 + select GRKERNSEC_HIDESYM
49922 + select GRKERNSEC_BRUTE
49923 + select GRKERNSEC_PROC_USERGROUP
49924 + select GRKERNSEC_KMEM
49925 + select GRKERNSEC_RESLOG
49926 + select GRKERNSEC_RANDNET
49927 + select GRKERNSEC_PROC_ADD
49928 + select GRKERNSEC_CHROOT_CHMOD
49929 + select GRKERNSEC_CHROOT_NICE
49930 + select GRKERNSEC_SETXID
49931 + select GRKERNSEC_AUDIT_MOUNT
49932 + select GRKERNSEC_MODHARDEN if (MODULES)
49933 + select GRKERNSEC_HARDEN_PTRACE
49934 + select GRKERNSEC_VM86 if (X86_32)
49935 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
49936 + select PAX
49937 + select PAX_RANDUSTACK
49938 + select PAX_ASLR
49939 + select PAX_RANDMMAP
49940 + select PAX_NOEXEC
49941 + select PAX_MPROTECT
49942 + select PAX_EI_PAX
49943 + select PAX_PT_PAX_FLAGS
49944 + select PAX_HAVE_ACL_FLAGS
49945 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
49946 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
49947 + select PAX_RANDKSTACK if (X86_TSC && X86)
49948 + select PAX_SEGMEXEC if (X86_32)
49949 + select PAX_PAGEEXEC
49950 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
49951 + select PAX_EMUTRAMP if (PARISC)
49952 + select PAX_EMUSIGRT if (PARISC)
49953 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
49954 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
49955 + select PAX_REFCOUNT if (X86 || SPARC64)
49956 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
49957 + help
49958 + If you say Y here, many of the features of grsecurity will be
49959 + enabled, which will protect you against many kinds of attacks
49960 + against your system. The heightened security comes at a cost
49961 + of an increased chance of incompatibilities with rare software
49962 + on your machine. Since this security level enables PaX, you should
49963 + view <http://pax.grsecurity.net> and read about the PaX
49964 + project. While you are there, download chpax and run it on
49965 + binaries that cause problems with PaX. Also remember that
49966 + since the /proc restrictions are enabled, you must run your
49967 + identd as gid 1001. This security level enables the following
49968 + features in addition to those listed in the low and medium
49969 + security levels:
49970 +
49971 + - Additional /proc restrictions
49972 + - Chmod restrictions in chroot
49973 + - No signals, ptrace, or viewing of processes outside of chroot
49974 + - Capability restrictions in chroot
49975 + - Deny fchdir out of chroot
49976 + - Priority restrictions in chroot
49977 + - Segmentation-based implementation of PaX
49978 + - Mprotect restrictions
49979 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
49980 + - Kernel stack randomization
49981 + - Mount/unmount/remount logging
49982 + - Kernel symbol hiding
49983 + - Hardening of module auto-loading
49984 + - Ptrace restrictions
49985 + - Restricted vm86 mode
49986 + - Restricted sysfs/debugfs
49987 + - Active kernel exploit response
49988 +
49989 +config GRKERNSEC_CUSTOM
49990 + bool "Custom"
49991 + help
49992 + If you say Y here, you will be able to configure every grsecurity
49993 + option, which allows you to enable many more features that aren't
49994 + covered in the basic security levels. These additional features
49995 + include TPE, socket restrictions, and the sysctl system for
49996 + grsecurity. It is advised that you read through the help for
49997 + each option to determine its usefulness in your situation.
49998 +
49999 +endchoice
50000 +
50001 +menu "Address Space Protection"
50002 +depends on GRKERNSEC
50003 +
50004 +config GRKERNSEC_KMEM
50005 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
50006 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
50007 + help
50008 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
50009 + be written to or read from to modify or leak the contents of the running
50010 + kernel. /dev/port will also not be allowed to be opened. If you have module
50011 + support disabled, enabling this will close up four ways that are
50012 + currently used to insert malicious code into the running kernel.
50013 + Even with all these features enabled, we still highly recommend that
50014 + you use the RBAC system, as it is still possible for an attacker to
50015 + modify the running kernel through privileged I/O granted by ioperm/iopl.
50016 + If you are not using XFree86, you may be able to stop this additional
50017 + case by enabling the 'Disable privileged I/O' option. Though nothing
50018 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
50019 + but only to video memory, which is the only writing we allow in this
50020 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
50021 + not be allowed to mprotect it with PROT_WRITE later.
50022 + It is highly recommended that you say Y here if you meet all the
50023 + conditions above.
50024 +
50025 +config GRKERNSEC_VM86
50026 + bool "Restrict VM86 mode"
50027 + depends on X86_32
50028 +
50029 + help
50030 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
50031 + make use of a special execution mode on 32bit x86 processors called
50032 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
50033 + video cards and will still work with this option enabled. The purpose
50034 + of the option is to prevent exploitation of emulation errors in
50035 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
50036 + Nearly all users should be able to enable this option.
50037 +
50038 +config GRKERNSEC_IO
50039 + bool "Disable privileged I/O"
50040 + depends on X86
50041 + select RTC_CLASS
50042 + select RTC_INTF_DEV
50043 + select RTC_DRV_CMOS
50044 +
50045 + help
50046 + If you say Y here, all ioperm and iopl calls will return an error.
50047 + Ioperm and iopl can be used to modify the running kernel.
50048 + Unfortunately, some programs need this access to operate properly,
50049 + the most notable of which are XFree86 and hwclock. hwclock can be
50050 + remedied by having RTC support in the kernel, so real-time
50051 + clock support is enabled if this option is enabled, to ensure
50052 + that hwclock operates correctly. XFree86 still will not
50053 + operate correctly with this option enabled, so DO NOT CHOOSE Y
50054 + IF YOU USE XFree86. If you use XFree86 and you still want to
50055 + protect your kernel against modification, use the RBAC system.
50056 +
50057 +config GRKERNSEC_PROC_MEMMAP
50058 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
50059 + default y if (PAX_NOEXEC || PAX_ASLR)
50060 + depends on PAX_NOEXEC || PAX_ASLR
50061 + help
50062 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
50063 + give no information about the addresses of its mappings if
50064 + PaX features that rely on random addresses are enabled on the task.
50065 + If you use PaX it is greatly recommended that you say Y here as it
50066 + closes up a hole that makes the full ASLR useless for suid
50067 + binaries.
50068 +
50069 +config GRKERNSEC_BRUTE
50070 + bool "Deter exploit bruteforcing"
50071 + help
50072 + If you say Y here, attempts to bruteforce exploits against forking
50073 + daemons such as apache or sshd, as well as against suid/sgid binaries
50074 + will be deterred. When a child of a forking daemon is killed by PaX
50075 + or crashes due to an illegal instruction or other suspicious signal,
50076 + the parent process will be delayed 30 seconds upon every subsequent
50077 + fork until the administrator is able to assess the situation and
50078 + restart the daemon.
50079 + In the suid/sgid case, the attempt is logged, the user has all their
50080 + processes terminated, and they are prevented from executing any further
50081 + processes for 15 minutes.
50082 + It is recommended that you also enable signal logging in the auditing
50083 + section so that logs are generated when a process triggers a suspicious
50084 + signal.
50085 + If the sysctl option is enabled, a sysctl option with name
50086 + "deter_bruteforce" is created.
50087 +
50088 +
50089 +config GRKERNSEC_MODHARDEN
50090 + bool "Harden module auto-loading"
50091 + depends on MODULES
50092 + help
50093 + If you say Y here, module auto-loading in response to use of some
50094 + feature implemented by an unloaded module will be restricted to
50095 + root users. Enabling this option helps defend against attacks
50096 + by unprivileged users who abuse the auto-loading behavior to
50097 + cause a vulnerable module to load that is then exploited.
50098 +
50099 + If this option prevents a legitimate use of auto-loading for a
50100 + non-root user, the administrator can execute modprobe manually
50101 + with the exact name of the module mentioned in the alert log.
50102 + Alternatively, the administrator can add the module to the list
50103 + of modules loaded at boot by modifying init scripts.
50104 +
50105 + Modification of init scripts will most likely be needed on
50106 + Ubuntu servers with encrypted home directory support enabled,
50107 + as the first non-root user logging in will cause the ecb(aes),
50108 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
50109 +
50110 +config GRKERNSEC_HIDESYM
50111 + bool "Hide kernel symbols"
50112 + help
50113 + If you say Y here, getting information on loaded modules, and
50114 + displaying all kernel symbols through a syscall will be restricted
50115 + to users with CAP_SYS_MODULE. For software compatibility reasons,
50116 + /proc/kallsyms will be restricted to the root user. The RBAC
50117 + system can hide that entry even from root.
50118 +
50119 + This option also prevents leaking of kernel addresses through
50120 + several /proc entries.
50121 +
50122 + Note that this option is only effective provided the following
50123 + conditions are met:
50124 + 1) The kernel using grsecurity is not precompiled by some distribution
50125 + 2) You have also enabled GRKERNSEC_DMESG
50126 + 3) You are using the RBAC system and hiding other files such as your
50127 + kernel image and System.map. Alternatively, enabling this option
50128 + causes the permissions on /boot, /lib/modules, and the kernel
50129 + source directory to change at compile time to prevent
50130 + reading by non-root users.
50131 + If the above conditions are met, this option will aid in providing a
50132 + useful protection against local kernel exploitation of overflows
50133 + and arbitrary read/write vulnerabilities.
50134 +
50135 +config GRKERNSEC_KERN_LOCKOUT
50136 + bool "Active kernel exploit response"
50137 + depends on X86 || ARM || PPC || SPARC
50138 + help
50139 + If you say Y here, when a PaX alert is triggered due to suspicious
50140 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
50141 + or an OOPs occurs due to bad memory accesses, instead of just
50142 + terminating the offending process (and potentially allowing
50143 + a subsequent exploit from the same user), we will take one of two
50144 + actions:
50145 + If the user was root, we will panic the system
50146 + If the user was non-root, we will log the attempt, terminate
50147 + all processes owned by the user, then prevent them from creating
50148 + any new processes until the system is restarted
50149 + This deters repeated kernel exploitation/bruteforcing attempts
50150 + and is useful for later forensics.
50151 +
50152 +endmenu
50153 +menu "Role Based Access Control Options"
50154 +depends on GRKERNSEC
50155 +
50156 +config GRKERNSEC_RBAC_DEBUG
50157 + bool
50158 +
50159 +config GRKERNSEC_NO_RBAC
50160 + bool "Disable RBAC system"
50161 + help
50162 + If you say Y here, the /dev/grsec device will be removed from the kernel,
50163 + preventing the RBAC system from being enabled. You should only say Y
50164 + here if you have no intention of using the RBAC system, so as to prevent
50165 + an attacker with root access from misusing the RBAC system to hide files
50166 + and processes when loadable module support and /dev/[k]mem have been
50167 + locked down.
50168 +
50169 +config GRKERNSEC_ACL_HIDEKERN
50170 + bool "Hide kernel processes"
50171 + help
50172 + If you say Y here, all kernel threads will be hidden to all
50173 + processes but those whose subject has the "view hidden processes"
50174 + flag.
50175 +
50176 +config GRKERNSEC_ACL_MAXTRIES
50177 + int "Maximum tries before password lockout"
50178 + default 3
50179 + help
50180 + This option enforces the maximum number of times a user can attempt
50181 + to authorize themselves with the grsecurity RBAC system before being
50182 + denied the ability to attempt authorization again for a specified time.
50183 + The lower the number, the harder it will be to brute-force a password.
50184 +
50185 +config GRKERNSEC_ACL_TIMEOUT
50186 + int "Time to wait after max password tries, in seconds"
50187 + default 30
50188 + help
50189 + This option specifies the time the user must wait after attempting to
50190 + authorize to the RBAC system with the maximum number of invalid
50191 + passwords. The higher the number, the harder it will be to brute-force
50192 + a password.
50193 +
50194 +endmenu
50195 +menu "Filesystem Protections"
50196 +depends on GRKERNSEC
50197 +
50198 +config GRKERNSEC_PROC
50199 + bool "Proc restrictions"
50200 + help
50201 + If you say Y here, the permissions of the /proc filesystem
50202 + will be altered to enhance system security and privacy. You MUST
50203 + choose either a user only restriction or a user and group restriction.
50204 + Depending upon the option you choose, you can either restrict users to
50205 + see only the processes they themselves run, or choose a group that can
50206 + view all processes and files normally restricted to root if you choose
50207 + the "restrict to user only" option. NOTE: If you're running identd as
50208 + a non-root user, you will have to run it as the group you specify here.
50209 +
50210 +config GRKERNSEC_PROC_USER
50211 + bool "Restrict /proc to user only"
50212 + depends on GRKERNSEC_PROC
50213 + help
50214 + If you say Y here, non-root users will only be able to view their own
50215 + processes, and restricts them from viewing network-related information,
50216 + and viewing kernel symbol and module information.
50217 +
50218 +config GRKERNSEC_PROC_USERGROUP
50219 + bool "Allow special group"
50220 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50221 + help
50222 + If you say Y here, you will be able to select a group that will be
50223 + able to view all processes and network-related information. If you've
50224 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50225 + remain hidden. This option is useful if you want to run identd as
50226 + a non-root user.
50227 +
50228 +config GRKERNSEC_PROC_GID
50229 + int "GID for special group"
50230 + depends on GRKERNSEC_PROC_USERGROUP
50231 + default 1001
50232 +
50233 +config GRKERNSEC_PROC_ADD
50234 + bool "Additional restrictions"
50235 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50236 + help
50237 + If you say Y here, additional restrictions will be placed on
50238 + /proc that keep normal users from viewing device information and
50239 + slabinfo information that could be useful for exploits.
50240 +
50241 +config GRKERNSEC_LINK
50242 + bool "Linking restrictions"
50243 + help
50244 + If you say Y here, /tmp race exploits will be prevented, since users
50245 + will no longer be able to follow symlinks owned by other users in
50246 + world-writable +t directories (e.g. /tmp), unless the owner of the
50247 + symlink is the owner of the directory. users will also not be
50248 + able to hardlink to files they do not own. If the sysctl option is
50249 + enabled, a sysctl option with name "linking_restrictions" is created.
50250 +
50251 +config GRKERNSEC_FIFO
50252 + bool "FIFO restrictions"
50253 + help
50254 + If you say Y here, users will not be able to write to FIFOs they don't
50255 + own in world-writable +t directories (e.g. /tmp), unless the owner of
50256 + the FIFO is the same owner of the directory it's held in. If the sysctl
50257 + option is enabled, a sysctl option with name "fifo_restrictions" is
50258 + created.
50259 +
50260 +config GRKERNSEC_SYSFS_RESTRICT
50261 + bool "Sysfs/debugfs restriction"
50262 + depends on SYSFS
50263 + help
50264 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
50265 + any filesystem normally mounted under it (e.g. debugfs) will only
50266 + be accessible by root. These filesystems generally provide access
50267 + to hardware and debug information that isn't appropriate for unprivileged
50268 + users of the system. Sysfs and debugfs have also become a large source
50269 + of new vulnerabilities, ranging from infoleaks to local compromise.
50270 + There has been very little oversight with an eye toward security involved
50271 + in adding new exporters of information to these filesystems, so their
50272 + use is discouraged.
50273 + This option is equivalent to a chmod 0700 of the mount paths.
50274 +
50275 +config GRKERNSEC_ROFS
50276 + bool "Runtime read-only mount protection"
50277 + help
50278 + If you say Y here, a sysctl option with name "romount_protect" will
50279 + be created. By setting this option to 1 at runtime, filesystems
50280 + will be protected in the following ways:
50281 + * No new writable mounts will be allowed
50282 + * Existing read-only mounts won't be able to be remounted read/write
50283 + * Write operations will be denied on all block devices
50284 + This option acts independently of grsec_lock: once it is set to 1,
50285 + it cannot be turned off. Therefore, please be mindful of the resulting
50286 + behavior if this option is enabled in an init script on a read-only
50287 + filesystem. This feature is mainly intended for secure embedded systems.
50288 +
50289 +config GRKERNSEC_CHROOT
50290 + bool "Chroot jail restrictions"
50291 + help
50292 + If you say Y here, you will be able to choose several options that will
50293 + make breaking out of a chrooted jail much more difficult. If you
50294 + encounter no software incompatibilities with the following options, it
50295 + is recommended that you enable each one.
50296 +
50297 +config GRKERNSEC_CHROOT_MOUNT
50298 + bool "Deny mounts"
50299 + depends on GRKERNSEC_CHROOT
50300 + help
50301 + If you say Y here, processes inside a chroot will not be able to
50302 + mount or remount filesystems. If the sysctl option is enabled, a
50303 + sysctl option with name "chroot_deny_mount" is created.
50304 +
50305 +config GRKERNSEC_CHROOT_DOUBLE
50306 + bool "Deny double-chroots"
50307 + depends on GRKERNSEC_CHROOT
50308 + help
50309 + If you say Y here, processes inside a chroot will not be able to chroot
50310 + again outside the chroot. This is a widely used method of breaking
50311 + out of a chroot jail and should not be allowed. If the sysctl
50312 + option is enabled, a sysctl option with name
50313 + "chroot_deny_chroot" is created.
50314 +
50315 +config GRKERNSEC_CHROOT_PIVOT
50316 + bool "Deny pivot_root in chroot"
50317 + depends on GRKERNSEC_CHROOT
50318 + help
50319 + If you say Y here, processes inside a chroot will not be able to use
50320 + a function called pivot_root() that was introduced in Linux 2.3.41. It
50321 + works similar to chroot in that it changes the root filesystem. This
50322 + function could be misused in a chrooted process to attempt to break out
50323 + of the chroot, and therefore should not be allowed. If the sysctl
50324 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
50325 + created.
50326 +
50327 +config GRKERNSEC_CHROOT_CHDIR
50328 + bool "Enforce chdir(\"/\") on all chroots"
50329 + depends on GRKERNSEC_CHROOT
50330 + help
50331 + If you say Y here, the current working directory of all newly-chrooted
50332 + applications will be set to the the root directory of the chroot.
50333 + The man page on chroot(2) states:
50334 + Note that this call does not change the current working
50335 + directory, so that `.' can be outside the tree rooted at
50336 + `/'. In particular, the super-user can escape from a
50337 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50338 +
50339 + It is recommended that you say Y here, since it's not known to break
50340 + any software. If the sysctl option is enabled, a sysctl option with
50341 + name "chroot_enforce_chdir" is created.
50342 +
50343 +config GRKERNSEC_CHROOT_CHMOD
50344 + bool "Deny (f)chmod +s"
50345 + depends on GRKERNSEC_CHROOT
50346 + help
50347 + If you say Y here, processes inside a chroot will not be able to chmod
50348 + or fchmod files to make them have suid or sgid bits. This protects
50349 + against another published method of breaking a chroot. If the sysctl
50350 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
50351 + created.
50352 +
50353 +config GRKERNSEC_CHROOT_FCHDIR
50354 + bool "Deny fchdir out of chroot"
50355 + depends on GRKERNSEC_CHROOT
50356 + help
50357 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
50358 + to a file descriptor of the chrooting process that points to a directory
50359 + outside the filesystem will be stopped. If the sysctl option
50360 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50361 +
50362 +config GRKERNSEC_CHROOT_MKNOD
50363 + bool "Deny mknod"
50364 + depends on GRKERNSEC_CHROOT
50365 + help
50366 + If you say Y here, processes inside a chroot will not be allowed to
50367 + mknod. The problem with using mknod inside a chroot is that it
50368 + would allow an attacker to create a device entry that is the same
50369 + as one on the physical root of your system, which could range from
50370 + anything from the console device to a device for your harddrive (which
50371 + they could then use to wipe the drive or steal data). It is recommended
50372 + that you say Y here, unless you run into software incompatibilities.
50373 + If the sysctl option is enabled, a sysctl option with name
50374 + "chroot_deny_mknod" is created.
50375 +
50376 +config GRKERNSEC_CHROOT_SHMAT
50377 + bool "Deny shmat() out of chroot"
50378 + depends on GRKERNSEC_CHROOT
50379 + help
50380 + If you say Y here, processes inside a chroot will not be able to attach
50381 + to shared memory segments that were created outside of the chroot jail.
50382 + It is recommended that you say Y here. If the sysctl option is enabled,
50383 + a sysctl option with name "chroot_deny_shmat" is created.
50384 +
50385 +config GRKERNSEC_CHROOT_UNIX
50386 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
50387 + depends on GRKERNSEC_CHROOT
50388 + help
50389 + If you say Y here, processes inside a chroot will not be able to
50390 + connect to abstract (meaning not belonging to a filesystem) Unix
50391 + domain sockets that were bound outside of a chroot. It is recommended
50392 + that you say Y here. If the sysctl option is enabled, a sysctl option
50393 + with name "chroot_deny_unix" is created.
50394 +
50395 +config GRKERNSEC_CHROOT_FINDTASK
50396 + bool "Protect outside processes"
50397 + depends on GRKERNSEC_CHROOT
50398 + help
50399 + If you say Y here, processes inside a chroot will not be able to
50400 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50401 + getsid, or view any process outside of the chroot. If the sysctl
50402 + option is enabled, a sysctl option with name "chroot_findtask" is
50403 + created.
50404 +
50405 +config GRKERNSEC_CHROOT_NICE
50406 + bool "Restrict priority changes"
50407 + depends on GRKERNSEC_CHROOT
50408 + help
50409 + If you say Y here, processes inside a chroot will not be able to raise
50410 + the priority of processes in the chroot, or alter the priority of
50411 + processes outside the chroot. This provides more security than simply
50412 + removing CAP_SYS_NICE from the process' capability set. If the
50413 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50414 + is created.
50415 +
50416 +config GRKERNSEC_CHROOT_SYSCTL
50417 + bool "Deny sysctl writes"
50418 + depends on GRKERNSEC_CHROOT
50419 + help
50420 + If you say Y here, an attacker in a chroot will not be able to
50421 + write to sysctl entries, either by sysctl(2) or through a /proc
50422 + interface. It is strongly recommended that you say Y here. If the
50423 + sysctl option is enabled, a sysctl option with name
50424 + "chroot_deny_sysctl" is created.
50425 +
50426 +config GRKERNSEC_CHROOT_CAPS
50427 + bool "Capability restrictions"
50428 + depends on GRKERNSEC_CHROOT
50429 + help
50430 + If you say Y here, the capabilities on all processes within a
50431 + chroot jail will be lowered to stop module insertion, raw i/o,
50432 + system and net admin tasks, rebooting the system, modifying immutable
50433 + files, modifying IPC owned by another, and changing the system time.
50434 + This is left an option because it can break some apps. Disable this
50435 + if your chrooted apps are having problems performing those kinds of
50436 + tasks. If the sysctl option is enabled, a sysctl option with
50437 + name "chroot_caps" is created.
50438 +
50439 +endmenu
50440 +menu "Kernel Auditing"
50441 +depends on GRKERNSEC
50442 +
50443 +config GRKERNSEC_AUDIT_GROUP
50444 + bool "Single group for auditing"
50445 + help
50446 + If you say Y here, the exec, chdir, and (un)mount logging features
50447 + will only operate on a group you specify. This option is recommended
50448 + if you only want to watch certain users instead of having a large
50449 + amount of logs from the entire system. If the sysctl option is enabled,
50450 + a sysctl option with name "audit_group" is created.
50451 +
50452 +config GRKERNSEC_AUDIT_GID
50453 + int "GID for auditing"
50454 + depends on GRKERNSEC_AUDIT_GROUP
50455 + default 1007
50456 +
50457 +config GRKERNSEC_EXECLOG
50458 + bool "Exec logging"
50459 + help
50460 + If you say Y here, all execve() calls will be logged (since the
50461 + other exec*() calls are frontends to execve(), all execution
50462 + will be logged). Useful for shell-servers that like to keep track
50463 + of their users. If the sysctl option is enabled, a sysctl option with
50464 + name "exec_logging" is created.
50465 + WARNING: This option when enabled will produce a LOT of logs, especially
50466 + on an active system.
50467 +
50468 +config GRKERNSEC_RESLOG
50469 + bool "Resource logging"
50470 + help
50471 + If you say Y here, all attempts to overstep resource limits will
50472 + be logged with the resource name, the requested size, and the current
50473 + limit. It is highly recommended that you say Y here. If the sysctl
50474 + option is enabled, a sysctl option with name "resource_logging" is
50475 + created. If the RBAC system is enabled, the sysctl value is ignored.
50476 +
50477 +config GRKERNSEC_CHROOT_EXECLOG
50478 + bool "Log execs within chroot"
50479 + help
50480 + If you say Y here, all executions inside a chroot jail will be logged
50481 + to syslog. This can cause a large amount of logs if certain
50482 + applications (eg. djb's daemontools) are installed on the system, and
50483 + is therefore left as an option. If the sysctl option is enabled, a
50484 + sysctl option with name "chroot_execlog" is created.
50485 +
50486 +config GRKERNSEC_AUDIT_PTRACE
50487 + bool "Ptrace logging"
50488 + help
50489 + If you say Y here, all attempts to attach to a process via ptrace
50490 + will be logged. If the sysctl option is enabled, a sysctl option
50491 + with name "audit_ptrace" is created.
50492 +
50493 +config GRKERNSEC_AUDIT_CHDIR
50494 + bool "Chdir logging"
50495 + help
50496 + If you say Y here, all chdir() calls will be logged. If the sysctl
50497 + option is enabled, a sysctl option with name "audit_chdir" is created.
50498 +
50499 +config GRKERNSEC_AUDIT_MOUNT
50500 + bool "(Un)Mount logging"
50501 + help
50502 + If you say Y here, all mounts and unmounts will be logged. If the
50503 + sysctl option is enabled, a sysctl option with name "audit_mount" is
50504 + created.
50505 +
50506 +config GRKERNSEC_SIGNAL
50507 + bool "Signal logging"
50508 + help
50509 + If you say Y here, certain important signals will be logged, such as
50510 + SIGSEGV, which will as a result inform you of when a error in a program
50511 + occurred, which in some cases could mean a possible exploit attempt.
50512 + If the sysctl option is enabled, a sysctl option with name
50513 + "signal_logging" is created.
50514 +
50515 +config GRKERNSEC_FORKFAIL
50516 + bool "Fork failure logging"
50517 + help
50518 + If you say Y here, all failed fork() attempts will be logged.
50519 + This could suggest a fork bomb, or someone attempting to overstep
50520 + their process limit. If the sysctl option is enabled, a sysctl option
50521 + with name "forkfail_logging" is created.
50522 +
50523 +config GRKERNSEC_TIME
50524 + bool "Time change logging"
50525 + help
50526 + If you say Y here, any changes of the system clock will be logged.
50527 + If the sysctl option is enabled, a sysctl option with name
50528 + "timechange_logging" is created.
50529 +
50530 +config GRKERNSEC_PROC_IPADDR
50531 + bool "/proc/<pid>/ipaddr support"
50532 + help
50533 + If you say Y here, a new entry will be added to each /proc/<pid>
50534 + directory that contains the IP address of the person using the task.
50535 + The IP is carried across local TCP and AF_UNIX stream sockets.
50536 + This information can be useful for IDS/IPSes to perform remote response
50537 + to a local attack. The entry is readable by only the owner of the
50538 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50539 + the RBAC system), and thus does not create privacy concerns.
50540 +
50541 +config GRKERNSEC_RWXMAP_LOG
50542 + bool 'Denied RWX mmap/mprotect logging'
50543 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50544 + help
50545 + If you say Y here, calls to mmap() and mprotect() with explicit
50546 + usage of PROT_WRITE and PROT_EXEC together will be logged when
50547 + denied by the PAX_MPROTECT feature. If the sysctl option is
50548 + enabled, a sysctl option with name "rwxmap_logging" is created.
50549 +
50550 +config GRKERNSEC_AUDIT_TEXTREL
50551 + bool 'ELF text relocations logging (READ HELP)'
50552 + depends on PAX_MPROTECT
50553 + help
50554 + If you say Y here, text relocations will be logged with the filename
50555 + of the offending library or binary. The purpose of the feature is
50556 + to help Linux distribution developers get rid of libraries and
50557 + binaries that need text relocations which hinder the future progress
50558 + of PaX. Only Linux distribution developers should say Y here, and
50559 + never on a production machine, as this option creates an information
50560 + leak that could aid an attacker in defeating the randomization of
50561 + a single memory region. If the sysctl option is enabled, a sysctl
50562 + option with name "audit_textrel" is created.
50563 +
50564 +endmenu
50565 +
50566 +menu "Executable Protections"
50567 +depends on GRKERNSEC
50568 +
50569 +config GRKERNSEC_DMESG
50570 + bool "Dmesg(8) restriction"
50571 + help
50572 + If you say Y here, non-root users will not be able to use dmesg(8)
50573 + to view up to the last 4kb of messages in the kernel's log buffer.
50574 + The kernel's log buffer often contains kernel addresses and other
50575 + identifying information useful to an attacker in fingerprinting a
50576 + system for a targeted exploit.
50577 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
50578 + created.
50579 +
50580 +config GRKERNSEC_HARDEN_PTRACE
50581 + bool "Deter ptrace-based process snooping"
50582 + help
50583 + If you say Y here, TTY sniffers and other malicious monitoring
50584 + programs implemented through ptrace will be defeated. If you
50585 + have been using the RBAC system, this option has already been
50586 + enabled for several years for all users, with the ability to make
50587 + fine-grained exceptions.
50588 +
50589 + This option only affects the ability of non-root users to ptrace
50590 + processes that are not a descendent of the ptracing process.
50591 + This means that strace ./binary and gdb ./binary will still work,
50592 + but attaching to arbitrary processes will not. If the sysctl
50593 + option is enabled, a sysctl option with name "harden_ptrace" is
50594 + created.
50595 +
50596 +config GRKERNSEC_SETXID
50597 + bool "Enforce consistent multithreaded privileges"
50598 + help
50599 + If you say Y here, a change from a root uid to a non-root uid
50600 + in a multithreaded application will cause the resulting uids,
50601 + gids, supplementary groups, and capabilities in that thread
50602 + to be propagated to the other threads of the process. In most
50603 + cases this is unnecessary, as glibc will emulate this behavior
50604 + on behalf of the application. Other libcs do not act in the
50605 + same way, allowing the other threads of the process to continue
50606 + running with root privileges. If the sysctl option is enabled,
50607 + a sysctl option with name "consistent_setxid" is created.
50608 +
50609 +config GRKERNSEC_TPE
50610 + bool "Trusted Path Execution (TPE)"
50611 + help
50612 + If you say Y here, you will be able to choose a gid to add to the
50613 + supplementary groups of users you want to mark as "untrusted."
50614 + These users will not be able to execute any files that are not in
50615 + root-owned directories writable only by root. If the sysctl option
50616 + is enabled, a sysctl option with name "tpe" is created.
50617 +
50618 +config GRKERNSEC_TPE_ALL
50619 + bool "Partially restrict all non-root users"
50620 + depends on GRKERNSEC_TPE
50621 + help
50622 + If you say Y here, all non-root users will be covered under
50623 + a weaker TPE restriction. This is separate from, and in addition to,
50624 + the main TPE options that you have selected elsewhere. Thus, if a
50625 + "trusted" GID is chosen, this restriction applies to even that GID.
50626 + Under this restriction, all non-root users will only be allowed to
50627 + execute files in directories they own that are not group or
50628 + world-writable, or in directories owned by root and writable only by
50629 + root. If the sysctl option is enabled, a sysctl option with name
50630 + "tpe_restrict_all" is created.
50631 +
50632 +config GRKERNSEC_TPE_INVERT
50633 + bool "Invert GID option"
50634 + depends on GRKERNSEC_TPE
50635 + help
50636 + If you say Y here, the group you specify in the TPE configuration will
50637 + decide what group TPE restrictions will be *disabled* for. This
50638 + option is useful if you want TPE restrictions to be applied to most
50639 + users on the system. If the sysctl option is enabled, a sysctl option
50640 + with name "tpe_invert" is created. Unlike other sysctl options, this
50641 + entry will default to on for backward-compatibility.
50642 +
50643 +config GRKERNSEC_TPE_GID
50644 + int "GID for untrusted users"
50645 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50646 + default 1005
50647 + help
50648 + Setting this GID determines what group TPE restrictions will be
50649 + *enabled* for. If the sysctl option is enabled, a sysctl option
50650 + with name "tpe_gid" is created.
50651 +
50652 +config GRKERNSEC_TPE_GID
50653 + int "GID for trusted users"
50654 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50655 + default 1005
50656 + help
50657 + Setting this GID determines what group TPE restrictions will be
50658 + *disabled* for. If the sysctl option is enabled, a sysctl option
50659 + with name "tpe_gid" is created.
50660 +
50661 +endmenu
50662 +menu "Network Protections"
50663 +depends on GRKERNSEC
50664 +
50665 +config GRKERNSEC_RANDNET
50666 + bool "Larger entropy pools"
50667 + help
50668 + If you say Y here, the entropy pools used for many features of Linux
50669 + and grsecurity will be doubled in size. Since several grsecurity
50670 + features use additional randomness, it is recommended that you say Y
50671 + here. Saying Y here has a similar effect as modifying
50672 + /proc/sys/kernel/random/poolsize.
50673 +
50674 +config GRKERNSEC_BLACKHOLE
50675 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50676 + depends on NET
50677 + help
50678 + If you say Y here, neither TCP resets nor ICMP
50679 + destination-unreachable packets will be sent in response to packets
50680 + sent to ports for which no associated listening process exists.
50681 + This feature supports both IPV4 and IPV6 and exempts the
50682 + loopback interface from blackholing. Enabling this feature
50683 + makes a host more resilient to DoS attacks and reduces network
50684 + visibility against scanners.
50685 +
50686 + The blackhole feature as-implemented is equivalent to the FreeBSD
50687 + blackhole feature, as it prevents RST responses to all packets, not
50688 + just SYNs. Under most application behavior this causes no
50689 + problems, but applications (like haproxy) may not close certain
50690 + connections in a way that cleanly terminates them on the remote
50691 + end, leaving the remote host in LAST_ACK state. Because of this
50692 + side-effect and to prevent intentional LAST_ACK DoSes, this
50693 + feature also adds automatic mitigation against such attacks.
50694 + The mitigation drastically reduces the amount of time a socket
50695 + can spend in LAST_ACK state. If you're using haproxy and not
50696 + all servers it connects to have this option enabled, consider
50697 + disabling this feature on the haproxy host.
50698 +
50699 + If the sysctl option is enabled, two sysctl options with names
50700 + "ip_blackhole" and "lastack_retries" will be created.
50701 + While "ip_blackhole" takes the standard zero/non-zero on/off
50702 + toggle, "lastack_retries" uses the same kinds of values as
50703 + "tcp_retries1" and "tcp_retries2". The default value of 4
50704 + prevents a socket from lasting more than 45 seconds in LAST_ACK
50705 + state.
50706 +
50707 +config GRKERNSEC_SOCKET
50708 + bool "Socket restrictions"
50709 + depends on NET
50710 + help
50711 + If you say Y here, you will be able to choose from several options.
50712 + If you assign a GID on your system and add it to the supplementary
50713 + groups of users you want to restrict socket access to, this patch
50714 + will perform up to three things, based on the option(s) you choose.
50715 +
50716 +config GRKERNSEC_SOCKET_ALL
50717 + bool "Deny any sockets to group"
50718 + depends on GRKERNSEC_SOCKET
50719 + help
50720 + If you say Y here, you will be able to choose a GID of whose users will
50721 + be unable to connect to other hosts from your machine or run server
50722 + applications from your machine. If the sysctl option is enabled, a
50723 + sysctl option with name "socket_all" is created.
50724 +
50725 +config GRKERNSEC_SOCKET_ALL_GID
50726 + int "GID to deny all sockets for"
50727 + depends on GRKERNSEC_SOCKET_ALL
50728 + default 1004
50729 + help
50730 + Here you can choose the GID to disable socket access for. Remember to
50731 + add the users you want socket access disabled for to the GID
50732 + specified here. If the sysctl option is enabled, a sysctl option
50733 + with name "socket_all_gid" is created.
50734 +
50735 +config GRKERNSEC_SOCKET_CLIENT
50736 + bool "Deny client sockets to group"
50737 + depends on GRKERNSEC_SOCKET
50738 + help
50739 + If you say Y here, you will be able to choose a GID of whose users will
50740 + be unable to connect to other hosts from your machine, but will be
50741 + able to run servers. If this option is enabled, all users in the group
50742 + you specify will have to use passive mode when initiating ftp transfers
50743 + from the shell on your machine. If the sysctl option is enabled, a
50744 + sysctl option with name "socket_client" is created.
50745 +
50746 +config GRKERNSEC_SOCKET_CLIENT_GID
50747 + int "GID to deny client sockets for"
50748 + depends on GRKERNSEC_SOCKET_CLIENT
50749 + default 1003
50750 + help
50751 + Here you can choose the GID to disable client socket access for.
50752 + Remember to add the users you want client socket access disabled for to
50753 + the GID specified here. If the sysctl option is enabled, a sysctl
50754 + option with name "socket_client_gid" is created.
50755 +
50756 +config GRKERNSEC_SOCKET_SERVER
50757 + bool "Deny server sockets to group"
50758 + depends on GRKERNSEC_SOCKET
50759 + help
50760 + If you say Y here, you will be able to choose a GID of whose users will
50761 + be unable to run server applications from your machine. If the sysctl
50762 + option is enabled, a sysctl option with name "socket_server" is created.
50763 +
50764 +config GRKERNSEC_SOCKET_SERVER_GID
50765 + int "GID to deny server sockets for"
50766 + depends on GRKERNSEC_SOCKET_SERVER
50767 + default 1002
50768 + help
50769 + Here you can choose the GID to disable server socket access for.
50770 + Remember to add the users you want server socket access disabled for to
50771 + the GID specified here. If the sysctl option is enabled, a sysctl
50772 + option with name "socket_server_gid" is created.
50773 +
50774 +endmenu
50775 +menu "Sysctl support"
50776 +depends on GRKERNSEC && SYSCTL
50777 +
50778 +config GRKERNSEC_SYSCTL
50779 + bool "Sysctl support"
50780 + help
50781 + If you say Y here, you will be able to change the options that
50782 + grsecurity runs with at bootup, without having to recompile your
50783 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50784 + to enable (1) or disable (0) various features. All the sysctl entries
50785 + are mutable until the "grsec_lock" entry is set to a non-zero value.
50786 + All features enabled in the kernel configuration are disabled at boot
50787 + if you do not say Y to the "Turn on features by default" option.
50788 + All options should be set at startup, and the grsec_lock entry should
50789 + be set to a non-zero value after all the options are set.
50790 + *THIS IS EXTREMELY IMPORTANT*
50791 +
50792 +config GRKERNSEC_SYSCTL_DISTRO
50793 + bool "Extra sysctl support for distro makers (READ HELP)"
50794 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50795 + help
50796 + If you say Y here, additional sysctl options will be created
50797 + for features that affect processes running as root. Therefore,
50798 + it is critical when using this option that the grsec_lock entry be
50799 + enabled after boot. Only distros with prebuilt kernel packages
50800 + with this option enabled that can ensure grsec_lock is enabled
50801 + after boot should use this option.
50802 + *Failure to set grsec_lock after boot makes all grsec features
50803 + this option covers useless*
50804 +
50805 + Currently this option creates the following sysctl entries:
50806 + "Disable Privileged I/O": "disable_priv_io"
50807 +
50808 +config GRKERNSEC_SYSCTL_ON
50809 + bool "Turn on features by default"
50810 + depends on GRKERNSEC_SYSCTL
50811 + help
50812 + If you say Y here, instead of having all features enabled in the
50813 + kernel configuration disabled at boot time, the features will be
50814 + enabled at boot time. It is recommended you say Y here unless
50815 + there is some reason you would want all sysctl-tunable features to
50816 + be disabled by default. As mentioned elsewhere, it is important
50817 + to enable the grsec_lock entry once you have finished modifying
50818 + the sysctl entries.
50819 +
50820 +endmenu
50821 +menu "Logging Options"
50822 +depends on GRKERNSEC
50823 +
50824 +config GRKERNSEC_FLOODTIME
50825 + int "Seconds in between log messages (minimum)"
50826 + default 10
50827 + help
50828 + This option allows you to enforce the number of seconds between
50829 + grsecurity log messages. The default should be suitable for most
50830 + people, however, if you choose to change it, choose a value small enough
50831 + to allow informative logs to be produced, but large enough to
50832 + prevent flooding.
50833 +
50834 +config GRKERNSEC_FLOODBURST
50835 + int "Number of messages in a burst (maximum)"
50836 + default 6
50837 + help
50838 + This option allows you to choose the maximum number of messages allowed
50839 + within the flood time interval you chose in a separate option. The
50840 + default should be suitable for most people, however if you find that
50841 + many of your logs are being interpreted as flooding, you may want to
50842 + raise this value.
50843 +
50844 +endmenu
50845 +
50846 +endmenu
50847 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
50848 new file mode 100644
50849 index 0000000..be9ae3a
50850 --- /dev/null
50851 +++ b/grsecurity/Makefile
50852 @@ -0,0 +1,36 @@
50853 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50854 +# during 2001-2009 it has been completely redesigned by Brad Spengler
50855 +# into an RBAC system
50856 +#
50857 +# All code in this directory and various hooks inserted throughout the kernel
50858 +# are copyright Brad Spengler - Open Source Security, Inc., and released
50859 +# under the GPL v2 or higher
50860 +
50861 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50862 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
50863 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
50864 +
50865 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50866 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50867 + gracl_learn.o grsec_log.o
50868 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
50869 +
50870 +ifdef CONFIG_NET
50871 +obj-y += grsec_sock.o
50872 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50873 +endif
50874 +
50875 +ifndef CONFIG_GRKERNSEC
50876 +obj-y += grsec_disabled.o
50877 +endif
50878 +
50879 +ifdef CONFIG_GRKERNSEC_HIDESYM
50880 +extra-y := grsec_hidesym.o
50881 +$(obj)/grsec_hidesym.o:
50882 + @-chmod -f 500 /boot
50883 + @-chmod -f 500 /lib/modules
50884 + @-chmod -f 500 /lib64/modules
50885 + @-chmod -f 500 /lib32/modules
50886 + @-chmod -f 700 .
50887 + @echo ' grsec: protected kernel image paths'
50888 +endif
50889 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
50890 new file mode 100644
50891 index 0000000..09258e0
50892 --- /dev/null
50893 +++ b/grsecurity/gracl.c
50894 @@ -0,0 +1,4156 @@
50895 +#include <linux/kernel.h>
50896 +#include <linux/module.h>
50897 +#include <linux/sched.h>
50898 +#include <linux/mm.h>
50899 +#include <linux/file.h>
50900 +#include <linux/fs.h>
50901 +#include <linux/namei.h>
50902 +#include <linux/mount.h>
50903 +#include <linux/tty.h>
50904 +#include <linux/proc_fs.h>
50905 +#include <linux/lglock.h>
50906 +#include <linux/slab.h>
50907 +#include <linux/vmalloc.h>
50908 +#include <linux/types.h>
50909 +#include <linux/sysctl.h>
50910 +#include <linux/netdevice.h>
50911 +#include <linux/ptrace.h>
50912 +#include <linux/gracl.h>
50913 +#include <linux/gralloc.h>
50914 +#include <linux/grsecurity.h>
50915 +#include <linux/grinternal.h>
50916 +#include <linux/pid_namespace.h>
50917 +#include <linux/fdtable.h>
50918 +#include <linux/percpu.h>
50919 +
50920 +#include <asm/uaccess.h>
50921 +#include <asm/errno.h>
50922 +#include <asm/mman.h>
50923 +
50924 +static struct acl_role_db acl_role_set;
50925 +static struct name_db name_set;
50926 +static struct inodev_db inodev_set;
50927 +
50928 +/* for keeping track of userspace pointers used for subjects, so we
50929 + can share references in the kernel as well
50930 +*/
50931 +
50932 +static struct path real_root;
50933 +
50934 +static struct acl_subj_map_db subj_map_set;
50935 +
50936 +static struct acl_role_label *default_role;
50937 +
50938 +static struct acl_role_label *role_list;
50939 +
50940 +static u16 acl_sp_role_value;
50941 +
50942 +extern char *gr_shared_page[4];
50943 +static DEFINE_MUTEX(gr_dev_mutex);
50944 +DEFINE_RWLOCK(gr_inode_lock);
50945 +
50946 +struct gr_arg *gr_usermode;
50947 +
50948 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
50949 +
50950 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
50951 +extern void gr_clear_learn_entries(void);
50952 +
50953 +#ifdef CONFIG_GRKERNSEC_RESLOG
50954 +extern void gr_log_resource(const struct task_struct *task,
50955 + const int res, const unsigned long wanted, const int gt);
50956 +#endif
50957 +
50958 +unsigned char *gr_system_salt;
50959 +unsigned char *gr_system_sum;
50960 +
50961 +static struct sprole_pw **acl_special_roles = NULL;
50962 +static __u16 num_sprole_pws = 0;
50963 +
50964 +static struct acl_role_label *kernel_role = NULL;
50965 +
50966 +static unsigned int gr_auth_attempts = 0;
50967 +static unsigned long gr_auth_expires = 0UL;
50968 +
50969 +#ifdef CONFIG_NET
50970 +extern struct vfsmount *sock_mnt;
50971 +#endif
50972 +
50973 +extern struct vfsmount *pipe_mnt;
50974 +extern struct vfsmount *shm_mnt;
50975 +#ifdef CONFIG_HUGETLBFS
50976 +extern struct vfsmount *hugetlbfs_vfsmount;
50977 +#endif
50978 +
50979 +static struct acl_object_label *fakefs_obj_rw;
50980 +static struct acl_object_label *fakefs_obj_rwx;
50981 +
50982 +extern int gr_init_uidset(void);
50983 +extern void gr_free_uidset(void);
50984 +extern void gr_remove_uid(uid_t uid);
50985 +extern int gr_find_uid(uid_t uid);
50986 +
50987 +DECLARE_BRLOCK(vfsmount_lock);
50988 +
50989 +__inline__ int
50990 +gr_acl_is_enabled(void)
50991 +{
50992 + return (gr_status & GR_READY);
50993 +}
50994 +
50995 +#ifdef CONFIG_BTRFS_FS
50996 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
50997 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
50998 +#endif
50999 +
51000 +static inline dev_t __get_dev(const struct dentry *dentry)
51001 +{
51002 +#ifdef CONFIG_BTRFS_FS
51003 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
51004 + return get_btrfs_dev_from_inode(dentry->d_inode);
51005 + else
51006 +#endif
51007 + return dentry->d_inode->i_sb->s_dev;
51008 +}
51009 +
51010 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
51011 +{
51012 + return __get_dev(dentry);
51013 +}
51014 +
51015 +static char gr_task_roletype_to_char(struct task_struct *task)
51016 +{
51017 + switch (task->role->roletype &
51018 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
51019 + GR_ROLE_SPECIAL)) {
51020 + case GR_ROLE_DEFAULT:
51021 + return 'D';
51022 + case GR_ROLE_USER:
51023 + return 'U';
51024 + case GR_ROLE_GROUP:
51025 + return 'G';
51026 + case GR_ROLE_SPECIAL:
51027 + return 'S';
51028 + }
51029 +
51030 + return 'X';
51031 +}
51032 +
51033 +char gr_roletype_to_char(void)
51034 +{
51035 + return gr_task_roletype_to_char(current);
51036 +}
51037 +
51038 +__inline__ int
51039 +gr_acl_tpe_check(void)
51040 +{
51041 + if (unlikely(!(gr_status & GR_READY)))
51042 + return 0;
51043 + if (current->role->roletype & GR_ROLE_TPE)
51044 + return 1;
51045 + else
51046 + return 0;
51047 +}
51048 +
51049 +int
51050 +gr_handle_rawio(const struct inode *inode)
51051 +{
51052 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51053 + if (inode && S_ISBLK(inode->i_mode) &&
51054 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
51055 + !capable(CAP_SYS_RAWIO))
51056 + return 1;
51057 +#endif
51058 + return 0;
51059 +}
51060 +
51061 +static int
51062 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
51063 +{
51064 + if (likely(lena != lenb))
51065 + return 0;
51066 +
51067 + return !memcmp(a, b, lena);
51068 +}
51069 +
51070 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
51071 +{
51072 + *buflen -= namelen;
51073 + if (*buflen < 0)
51074 + return -ENAMETOOLONG;
51075 + *buffer -= namelen;
51076 + memcpy(*buffer, str, namelen);
51077 + return 0;
51078 +}
51079 +
51080 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
51081 +{
51082 + return prepend(buffer, buflen, name->name, name->len);
51083 +}
51084 +
51085 +static int prepend_path(const struct path *path, struct path *root,
51086 + char **buffer, int *buflen)
51087 +{
51088 + struct dentry *dentry = path->dentry;
51089 + struct vfsmount *vfsmnt = path->mnt;
51090 + bool slash = false;
51091 + int error = 0;
51092 +
51093 + while (dentry != root->dentry || vfsmnt != root->mnt) {
51094 + struct dentry * parent;
51095 +
51096 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
51097 + /* Global root? */
51098 + if (vfsmnt->mnt_parent == vfsmnt) {
51099 + goto out;
51100 + }
51101 + dentry = vfsmnt->mnt_mountpoint;
51102 + vfsmnt = vfsmnt->mnt_parent;
51103 + continue;
51104 + }
51105 + parent = dentry->d_parent;
51106 + prefetch(parent);
51107 + spin_lock(&dentry->d_lock);
51108 + error = prepend_name(buffer, buflen, &dentry->d_name);
51109 + spin_unlock(&dentry->d_lock);
51110 + if (!error)
51111 + error = prepend(buffer, buflen, "/", 1);
51112 + if (error)
51113 + break;
51114 +
51115 + slash = true;
51116 + dentry = parent;
51117 + }
51118 +
51119 +out:
51120 + if (!error && !slash)
51121 + error = prepend(buffer, buflen, "/", 1);
51122 +
51123 + return error;
51124 +}
51125 +
51126 +/* this must be called with vfsmount_lock and rename_lock held */
51127 +
51128 +static char *__our_d_path(const struct path *path, struct path *root,
51129 + char *buf, int buflen)
51130 +{
51131 + char *res = buf + buflen;
51132 + int error;
51133 +
51134 + prepend(&res, &buflen, "\0", 1);
51135 + error = prepend_path(path, root, &res, &buflen);
51136 + if (error)
51137 + return ERR_PTR(error);
51138 +
51139 + return res;
51140 +}
51141 +
51142 +static char *
51143 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
51144 +{
51145 + char *retval;
51146 +
51147 + retval = __our_d_path(path, root, buf, buflen);
51148 + if (unlikely(IS_ERR(retval)))
51149 + retval = strcpy(buf, "<path too long>");
51150 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
51151 + retval[1] = '\0';
51152 +
51153 + return retval;
51154 +}
51155 +
51156 +static char *
51157 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
51158 + char *buf, int buflen)
51159 +{
51160 + struct path path;
51161 + char *res;
51162 +
51163 + path.dentry = (struct dentry *)dentry;
51164 + path.mnt = (struct vfsmount *)vfsmnt;
51165 +
51166 + /* we can use real_root.dentry, real_root.mnt, because this is only called
51167 + by the RBAC system */
51168 + res = gen_full_path(&path, &real_root, buf, buflen);
51169 +
51170 + return res;
51171 +}
51172 +
51173 +static char *
51174 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
51175 + char *buf, int buflen)
51176 +{
51177 + char *res;
51178 + struct path path;
51179 + struct path root;
51180 + struct task_struct *reaper = &init_task;
51181 +
51182 + path.dentry = (struct dentry *)dentry;
51183 + path.mnt = (struct vfsmount *)vfsmnt;
51184 +
51185 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
51186 + get_fs_root(reaper->fs, &root);
51187 +
51188 + write_seqlock(&rename_lock);
51189 + br_read_lock(vfsmount_lock);
51190 + res = gen_full_path(&path, &root, buf, buflen);
51191 + br_read_unlock(vfsmount_lock);
51192 + write_sequnlock(&rename_lock);
51193 +
51194 + path_put(&root);
51195 + return res;
51196 +}
51197 +
51198 +static char *
51199 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
51200 +{
51201 + char *ret;
51202 + write_seqlock(&rename_lock);
51203 + br_read_lock(vfsmount_lock);
51204 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
51205 + PAGE_SIZE);
51206 + br_read_unlock(vfsmount_lock);
51207 + write_sequnlock(&rename_lock);
51208 + return ret;
51209 +}
51210 +
51211 +static char *
51212 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
51213 +{
51214 + char *ret;
51215 + char *buf;
51216 + int buflen;
51217 +
51218 + write_seqlock(&rename_lock);
51219 + br_read_lock(vfsmount_lock);
51220 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51221 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
51222 + buflen = (int)(ret - buf);
51223 + if (buflen >= 5)
51224 + prepend(&ret, &buflen, "/proc", 5);
51225 + else
51226 + ret = strcpy(buf, "<path too long>");
51227 + br_read_unlock(vfsmount_lock);
51228 + write_sequnlock(&rename_lock);
51229 + return ret;
51230 +}
51231 +
51232 +char *
51233 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
51234 +{
51235 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
51236 + PAGE_SIZE);
51237 +}
51238 +
51239 +char *
51240 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
51241 +{
51242 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
51243 + PAGE_SIZE);
51244 +}
51245 +
51246 +char *
51247 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
51248 +{
51249 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
51250 + PAGE_SIZE);
51251 +}
51252 +
51253 +char *
51254 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
51255 +{
51256 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
51257 + PAGE_SIZE);
51258 +}
51259 +
51260 +char *
51261 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
51262 +{
51263 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
51264 + PAGE_SIZE);
51265 +}
51266 +
51267 +__inline__ __u32
51268 +to_gr_audit(const __u32 reqmode)
51269 +{
51270 + /* masks off auditable permission flags, then shifts them to create
51271 + auditing flags, and adds the special case of append auditing if
51272 + we're requesting write */
51273 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
51274 +}
51275 +
51276 +struct acl_subject_label *
51277 +lookup_subject_map(const struct acl_subject_label *userp)
51278 +{
51279 + unsigned int index = shash(userp, subj_map_set.s_size);
51280 + struct subject_map *match;
51281 +
51282 + match = subj_map_set.s_hash[index];
51283 +
51284 + while (match && match->user != userp)
51285 + match = match->next;
51286 +
51287 + if (match != NULL)
51288 + return match->kernel;
51289 + else
51290 + return NULL;
51291 +}
51292 +
51293 +static void
51294 +insert_subj_map_entry(struct subject_map *subjmap)
51295 +{
51296 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
51297 + struct subject_map **curr;
51298 +
51299 + subjmap->prev = NULL;
51300 +
51301 + curr = &subj_map_set.s_hash[index];
51302 + if (*curr != NULL)
51303 + (*curr)->prev = subjmap;
51304 +
51305 + subjmap->next = *curr;
51306 + *curr = subjmap;
51307 +
51308 + return;
51309 +}
51310 +
51311 +static struct acl_role_label *
51312 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
51313 + const gid_t gid)
51314 +{
51315 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
51316 + struct acl_role_label *match;
51317 + struct role_allowed_ip *ipp;
51318 + unsigned int x;
51319 + u32 curr_ip = task->signal->curr_ip;
51320 +
51321 + task->signal->saved_ip = curr_ip;
51322 +
51323 + match = acl_role_set.r_hash[index];
51324 +
51325 + while (match) {
51326 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
51327 + for (x = 0; x < match->domain_child_num; x++) {
51328 + if (match->domain_children[x] == uid)
51329 + goto found;
51330 + }
51331 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
51332 + break;
51333 + match = match->next;
51334 + }
51335 +found:
51336 + if (match == NULL) {
51337 + try_group:
51338 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
51339 + match = acl_role_set.r_hash[index];
51340 +
51341 + while (match) {
51342 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
51343 + for (x = 0; x < match->domain_child_num; x++) {
51344 + if (match->domain_children[x] == gid)
51345 + goto found2;
51346 + }
51347 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
51348 + break;
51349 + match = match->next;
51350 + }
51351 +found2:
51352 + if (match == NULL)
51353 + match = default_role;
51354 + if (match->allowed_ips == NULL)
51355 + return match;
51356 + else {
51357 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51358 + if (likely
51359 + ((ntohl(curr_ip) & ipp->netmask) ==
51360 + (ntohl(ipp->addr) & ipp->netmask)))
51361 + return match;
51362 + }
51363 + match = default_role;
51364 + }
51365 + } else if (match->allowed_ips == NULL) {
51366 + return match;
51367 + } else {
51368 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51369 + if (likely
51370 + ((ntohl(curr_ip) & ipp->netmask) ==
51371 + (ntohl(ipp->addr) & ipp->netmask)))
51372 + return match;
51373 + }
51374 + goto try_group;
51375 + }
51376 +
51377 + return match;
51378 +}
51379 +
51380 +struct acl_subject_label *
51381 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
51382 + const struct acl_role_label *role)
51383 +{
51384 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
51385 + struct acl_subject_label *match;
51386 +
51387 + match = role->subj_hash[index];
51388 +
51389 + while (match && (match->inode != ino || match->device != dev ||
51390 + (match->mode & GR_DELETED))) {
51391 + match = match->next;
51392 + }
51393 +
51394 + if (match && !(match->mode & GR_DELETED))
51395 + return match;
51396 + else
51397 + return NULL;
51398 +}
51399 +
51400 +struct acl_subject_label *
51401 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
51402 + const struct acl_role_label *role)
51403 +{
51404 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
51405 + struct acl_subject_label *match;
51406 +
51407 + match = role->subj_hash[index];
51408 +
51409 + while (match && (match->inode != ino || match->device != dev ||
51410 + !(match->mode & GR_DELETED))) {
51411 + match = match->next;
51412 + }
51413 +
51414 + if (match && (match->mode & GR_DELETED))
51415 + return match;
51416 + else
51417 + return NULL;
51418 +}
51419 +
51420 +static struct acl_object_label *
51421 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
51422 + const struct acl_subject_label *subj)
51423 +{
51424 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51425 + struct acl_object_label *match;
51426 +
51427 + match = subj->obj_hash[index];
51428 +
51429 + while (match && (match->inode != ino || match->device != dev ||
51430 + (match->mode & GR_DELETED))) {
51431 + match = match->next;
51432 + }
51433 +
51434 + if (match && !(match->mode & GR_DELETED))
51435 + return match;
51436 + else
51437 + return NULL;
51438 +}
51439 +
51440 +static struct acl_object_label *
51441 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
51442 + const struct acl_subject_label *subj)
51443 +{
51444 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51445 + struct acl_object_label *match;
51446 +
51447 + match = subj->obj_hash[index];
51448 +
51449 + while (match && (match->inode != ino || match->device != dev ||
51450 + !(match->mode & GR_DELETED))) {
51451 + match = match->next;
51452 + }
51453 +
51454 + if (match && (match->mode & GR_DELETED))
51455 + return match;
51456 +
51457 + match = subj->obj_hash[index];
51458 +
51459 + while (match && (match->inode != ino || match->device != dev ||
51460 + (match->mode & GR_DELETED))) {
51461 + match = match->next;
51462 + }
51463 +
51464 + if (match && !(match->mode & GR_DELETED))
51465 + return match;
51466 + else
51467 + return NULL;
51468 +}
51469 +
51470 +static struct name_entry *
51471 +lookup_name_entry(const char *name)
51472 +{
51473 + unsigned int len = strlen(name);
51474 + unsigned int key = full_name_hash(name, len);
51475 + unsigned int index = key % name_set.n_size;
51476 + struct name_entry *match;
51477 +
51478 + match = name_set.n_hash[index];
51479 +
51480 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
51481 + match = match->next;
51482 +
51483 + return match;
51484 +}
51485 +
51486 +static struct name_entry *
51487 +lookup_name_entry_create(const char *name)
51488 +{
51489 + unsigned int len = strlen(name);
51490 + unsigned int key = full_name_hash(name, len);
51491 + unsigned int index = key % name_set.n_size;
51492 + struct name_entry *match;
51493 +
51494 + match = name_set.n_hash[index];
51495 +
51496 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51497 + !match->deleted))
51498 + match = match->next;
51499 +
51500 + if (match && match->deleted)
51501 + return match;
51502 +
51503 + match = name_set.n_hash[index];
51504 +
51505 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51506 + match->deleted))
51507 + match = match->next;
51508 +
51509 + if (match && !match->deleted)
51510 + return match;
51511 + else
51512 + return NULL;
51513 +}
51514 +
51515 +static struct inodev_entry *
51516 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
51517 +{
51518 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
51519 + struct inodev_entry *match;
51520 +
51521 + match = inodev_set.i_hash[index];
51522 +
51523 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
51524 + match = match->next;
51525 +
51526 + return match;
51527 +}
51528 +
51529 +static void
51530 +insert_inodev_entry(struct inodev_entry *entry)
51531 +{
51532 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
51533 + inodev_set.i_size);
51534 + struct inodev_entry **curr;
51535 +
51536 + entry->prev = NULL;
51537 +
51538 + curr = &inodev_set.i_hash[index];
51539 + if (*curr != NULL)
51540 + (*curr)->prev = entry;
51541 +
51542 + entry->next = *curr;
51543 + *curr = entry;
51544 +
51545 + return;
51546 +}
51547 +
51548 +static void
51549 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
51550 +{
51551 + unsigned int index =
51552 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
51553 + struct acl_role_label **curr;
51554 + struct acl_role_label *tmp;
51555 +
51556 + curr = &acl_role_set.r_hash[index];
51557 +
51558 + /* if role was already inserted due to domains and already has
51559 + a role in the same bucket as it attached, then we need to
51560 + combine these two buckets
51561 + */
51562 + if (role->next) {
51563 + tmp = role->next;
51564 + while (tmp->next)
51565 + tmp = tmp->next;
51566 + tmp->next = *curr;
51567 + } else
51568 + role->next = *curr;
51569 + *curr = role;
51570 +
51571 + return;
51572 +}
51573 +
51574 +static void
51575 +insert_acl_role_label(struct acl_role_label *role)
51576 +{
51577 + int i;
51578 +
51579 + if (role_list == NULL) {
51580 + role_list = role;
51581 + role->prev = NULL;
51582 + } else {
51583 + role->prev = role_list;
51584 + role_list = role;
51585 + }
51586 +
51587 + /* used for hash chains */
51588 + role->next = NULL;
51589 +
51590 + if (role->roletype & GR_ROLE_DOMAIN) {
51591 + for (i = 0; i < role->domain_child_num; i++)
51592 + __insert_acl_role_label(role, role->domain_children[i]);
51593 + } else
51594 + __insert_acl_role_label(role, role->uidgid);
51595 +}
51596 +
51597 +static int
51598 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
51599 +{
51600 + struct name_entry **curr, *nentry;
51601 + struct inodev_entry *ientry;
51602 + unsigned int len = strlen(name);
51603 + unsigned int key = full_name_hash(name, len);
51604 + unsigned int index = key % name_set.n_size;
51605 +
51606 + curr = &name_set.n_hash[index];
51607 +
51608 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
51609 + curr = &((*curr)->next);
51610 +
51611 + if (*curr != NULL)
51612 + return 1;
51613 +
51614 + nentry = acl_alloc(sizeof (struct name_entry));
51615 + if (nentry == NULL)
51616 + return 0;
51617 + ientry = acl_alloc(sizeof (struct inodev_entry));
51618 + if (ientry == NULL)
51619 + return 0;
51620 + ientry->nentry = nentry;
51621 +
51622 + nentry->key = key;
51623 + nentry->name = name;
51624 + nentry->inode = inode;
51625 + nentry->device = device;
51626 + nentry->len = len;
51627 + nentry->deleted = deleted;
51628 +
51629 + nentry->prev = NULL;
51630 + curr = &name_set.n_hash[index];
51631 + if (*curr != NULL)
51632 + (*curr)->prev = nentry;
51633 + nentry->next = *curr;
51634 + *curr = nentry;
51635 +
51636 + /* insert us into the table searchable by inode/dev */
51637 + insert_inodev_entry(ientry);
51638 +
51639 + return 1;
51640 +}
51641 +
51642 +static void
51643 +insert_acl_obj_label(struct acl_object_label *obj,
51644 + struct acl_subject_label *subj)
51645 +{
51646 + unsigned int index =
51647 + fhash(obj->inode, obj->device, subj->obj_hash_size);
51648 + struct acl_object_label **curr;
51649 +
51650 +
51651 + obj->prev = NULL;
51652 +
51653 + curr = &subj->obj_hash[index];
51654 + if (*curr != NULL)
51655 + (*curr)->prev = obj;
51656 +
51657 + obj->next = *curr;
51658 + *curr = obj;
51659 +
51660 + return;
51661 +}
51662 +
51663 +static void
51664 +insert_acl_subj_label(struct acl_subject_label *obj,
51665 + struct acl_role_label *role)
51666 +{
51667 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
51668 + struct acl_subject_label **curr;
51669 +
51670 + obj->prev = NULL;
51671 +
51672 + curr = &role->subj_hash[index];
51673 + if (*curr != NULL)
51674 + (*curr)->prev = obj;
51675 +
51676 + obj->next = *curr;
51677 + *curr = obj;
51678 +
51679 + return;
51680 +}
51681 +
51682 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
51683 +
51684 +static void *
51685 +create_table(__u32 * len, int elementsize)
51686 +{
51687 + unsigned int table_sizes[] = {
51688 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
51689 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
51690 + 4194301, 8388593, 16777213, 33554393, 67108859
51691 + };
51692 + void *newtable = NULL;
51693 + unsigned int pwr = 0;
51694 +
51695 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
51696 + table_sizes[pwr] <= *len)
51697 + pwr++;
51698 +
51699 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
51700 + return newtable;
51701 +
51702 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
51703 + newtable =
51704 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
51705 + else
51706 + newtable = vmalloc(table_sizes[pwr] * elementsize);
51707 +
51708 + *len = table_sizes[pwr];
51709 +
51710 + return newtable;
51711 +}
51712 +
51713 +static int
51714 +init_variables(const struct gr_arg *arg)
51715 +{
51716 + struct task_struct *reaper = &init_task;
51717 + unsigned int stacksize;
51718 +
51719 + subj_map_set.s_size = arg->role_db.num_subjects;
51720 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
51721 + name_set.n_size = arg->role_db.num_objects;
51722 + inodev_set.i_size = arg->role_db.num_objects;
51723 +
51724 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
51725 + !name_set.n_size || !inodev_set.i_size)
51726 + return 1;
51727 +
51728 + if (!gr_init_uidset())
51729 + return 1;
51730 +
51731 + /* set up the stack that holds allocation info */
51732 +
51733 + stacksize = arg->role_db.num_pointers + 5;
51734 +
51735 + if (!acl_alloc_stack_init(stacksize))
51736 + return 1;
51737 +
51738 + /* grab reference for the real root dentry and vfsmount */
51739 + get_fs_root(reaper->fs, &real_root);
51740 +
51741 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51742 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
51743 +#endif
51744 +
51745 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
51746 + if (fakefs_obj_rw == NULL)
51747 + return 1;
51748 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
51749 +
51750 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
51751 + if (fakefs_obj_rwx == NULL)
51752 + return 1;
51753 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
51754 +
51755 + subj_map_set.s_hash =
51756 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
51757 + acl_role_set.r_hash =
51758 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
51759 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
51760 + inodev_set.i_hash =
51761 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
51762 +
51763 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
51764 + !name_set.n_hash || !inodev_set.i_hash)
51765 + return 1;
51766 +
51767 + memset(subj_map_set.s_hash, 0,
51768 + sizeof(struct subject_map *) * subj_map_set.s_size);
51769 + memset(acl_role_set.r_hash, 0,
51770 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
51771 + memset(name_set.n_hash, 0,
51772 + sizeof (struct name_entry *) * name_set.n_size);
51773 + memset(inodev_set.i_hash, 0,
51774 + sizeof (struct inodev_entry *) * inodev_set.i_size);
51775 +
51776 + return 0;
51777 +}
51778 +
51779 +/* free information not needed after startup
51780 + currently contains user->kernel pointer mappings for subjects
51781 +*/
51782 +
51783 +static void
51784 +free_init_variables(void)
51785 +{
51786 + __u32 i;
51787 +
51788 + if (subj_map_set.s_hash) {
51789 + for (i = 0; i < subj_map_set.s_size; i++) {
51790 + if (subj_map_set.s_hash[i]) {
51791 + kfree(subj_map_set.s_hash[i]);
51792 + subj_map_set.s_hash[i] = NULL;
51793 + }
51794 + }
51795 +
51796 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
51797 + PAGE_SIZE)
51798 + kfree(subj_map_set.s_hash);
51799 + else
51800 + vfree(subj_map_set.s_hash);
51801 + }
51802 +
51803 + return;
51804 +}
51805 +
51806 +static void
51807 +free_variables(void)
51808 +{
51809 + struct acl_subject_label *s;
51810 + struct acl_role_label *r;
51811 + struct task_struct *task, *task2;
51812 + unsigned int x;
51813 +
51814 + gr_clear_learn_entries();
51815 +
51816 + read_lock(&tasklist_lock);
51817 + do_each_thread(task2, task) {
51818 + task->acl_sp_role = 0;
51819 + task->acl_role_id = 0;
51820 + task->acl = NULL;
51821 + task->role = NULL;
51822 + } while_each_thread(task2, task);
51823 + read_unlock(&tasklist_lock);
51824 +
51825 + /* release the reference to the real root dentry and vfsmount */
51826 + path_put(&real_root);
51827 +
51828 + /* free all object hash tables */
51829 +
51830 + FOR_EACH_ROLE_START(r)
51831 + if (r->subj_hash == NULL)
51832 + goto next_role;
51833 + FOR_EACH_SUBJECT_START(r, s, x)
51834 + if (s->obj_hash == NULL)
51835 + break;
51836 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51837 + kfree(s->obj_hash);
51838 + else
51839 + vfree(s->obj_hash);
51840 + FOR_EACH_SUBJECT_END(s, x)
51841 + FOR_EACH_NESTED_SUBJECT_START(r, s)
51842 + if (s->obj_hash == NULL)
51843 + break;
51844 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51845 + kfree(s->obj_hash);
51846 + else
51847 + vfree(s->obj_hash);
51848 + FOR_EACH_NESTED_SUBJECT_END(s)
51849 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
51850 + kfree(r->subj_hash);
51851 + else
51852 + vfree(r->subj_hash);
51853 + r->subj_hash = NULL;
51854 +next_role:
51855 + FOR_EACH_ROLE_END(r)
51856 +
51857 + acl_free_all();
51858 +
51859 + if (acl_role_set.r_hash) {
51860 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
51861 + PAGE_SIZE)
51862 + kfree(acl_role_set.r_hash);
51863 + else
51864 + vfree(acl_role_set.r_hash);
51865 + }
51866 + if (name_set.n_hash) {
51867 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
51868 + PAGE_SIZE)
51869 + kfree(name_set.n_hash);
51870 + else
51871 + vfree(name_set.n_hash);
51872 + }
51873 +
51874 + if (inodev_set.i_hash) {
51875 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
51876 + PAGE_SIZE)
51877 + kfree(inodev_set.i_hash);
51878 + else
51879 + vfree(inodev_set.i_hash);
51880 + }
51881 +
51882 + gr_free_uidset();
51883 +
51884 + memset(&name_set, 0, sizeof (struct name_db));
51885 + memset(&inodev_set, 0, sizeof (struct inodev_db));
51886 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
51887 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
51888 +
51889 + default_role = NULL;
51890 + role_list = NULL;
51891 +
51892 + return;
51893 +}
51894 +
51895 +static __u32
51896 +count_user_objs(struct acl_object_label *userp)
51897 +{
51898 + struct acl_object_label o_tmp;
51899 + __u32 num = 0;
51900 +
51901 + while (userp) {
51902 + if (copy_from_user(&o_tmp, userp,
51903 + sizeof (struct acl_object_label)))
51904 + break;
51905 +
51906 + userp = o_tmp.prev;
51907 + num++;
51908 + }
51909 +
51910 + return num;
51911 +}
51912 +
51913 +static struct acl_subject_label *
51914 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
51915 +
51916 +static int
51917 +copy_user_glob(struct acl_object_label *obj)
51918 +{
51919 + struct acl_object_label *g_tmp, **guser;
51920 + unsigned int len;
51921 + char *tmp;
51922 +
51923 + if (obj->globbed == NULL)
51924 + return 0;
51925 +
51926 + guser = &obj->globbed;
51927 + while (*guser) {
51928 + g_tmp = (struct acl_object_label *)
51929 + acl_alloc(sizeof (struct acl_object_label));
51930 + if (g_tmp == NULL)
51931 + return -ENOMEM;
51932 +
51933 + if (copy_from_user(g_tmp, *guser,
51934 + sizeof (struct acl_object_label)))
51935 + return -EFAULT;
51936 +
51937 + len = strnlen_user(g_tmp->filename, PATH_MAX);
51938 +
51939 + if (!len || len >= PATH_MAX)
51940 + return -EINVAL;
51941 +
51942 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51943 + return -ENOMEM;
51944 +
51945 + if (copy_from_user(tmp, g_tmp->filename, len))
51946 + return -EFAULT;
51947 + tmp[len-1] = '\0';
51948 + g_tmp->filename = tmp;
51949 +
51950 + *guser = g_tmp;
51951 + guser = &(g_tmp->next);
51952 + }
51953 +
51954 + return 0;
51955 +}
51956 +
51957 +static int
51958 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
51959 + struct acl_role_label *role)
51960 +{
51961 + struct acl_object_label *o_tmp;
51962 + unsigned int len;
51963 + int ret;
51964 + char *tmp;
51965 +
51966 + while (userp) {
51967 + if ((o_tmp = (struct acl_object_label *)
51968 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
51969 + return -ENOMEM;
51970 +
51971 + if (copy_from_user(o_tmp, userp,
51972 + sizeof (struct acl_object_label)))
51973 + return -EFAULT;
51974 +
51975 + userp = o_tmp->prev;
51976 +
51977 + len = strnlen_user(o_tmp->filename, PATH_MAX);
51978 +
51979 + if (!len || len >= PATH_MAX)
51980 + return -EINVAL;
51981 +
51982 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51983 + return -ENOMEM;
51984 +
51985 + if (copy_from_user(tmp, o_tmp->filename, len))
51986 + return -EFAULT;
51987 + tmp[len-1] = '\0';
51988 + o_tmp->filename = tmp;
51989 +
51990 + insert_acl_obj_label(o_tmp, subj);
51991 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
51992 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
51993 + return -ENOMEM;
51994 +
51995 + ret = copy_user_glob(o_tmp);
51996 + if (ret)
51997 + return ret;
51998 +
51999 + if (o_tmp->nested) {
52000 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
52001 + if (IS_ERR(o_tmp->nested))
52002 + return PTR_ERR(o_tmp->nested);
52003 +
52004 + /* insert into nested subject list */
52005 + o_tmp->nested->next = role->hash->first;
52006 + role->hash->first = o_tmp->nested;
52007 + }
52008 + }
52009 +
52010 + return 0;
52011 +}
52012 +
52013 +static __u32
52014 +count_user_subjs(struct acl_subject_label *userp)
52015 +{
52016 + struct acl_subject_label s_tmp;
52017 + __u32 num = 0;
52018 +
52019 + while (userp) {
52020 + if (copy_from_user(&s_tmp, userp,
52021 + sizeof (struct acl_subject_label)))
52022 + break;
52023 +
52024 + userp = s_tmp.prev;
52025 + /* do not count nested subjects against this count, since
52026 + they are not included in the hash table, but are
52027 + attached to objects. We have already counted
52028 + the subjects in userspace for the allocation
52029 + stack
52030 + */
52031 + if (!(s_tmp.mode & GR_NESTED))
52032 + num++;
52033 + }
52034 +
52035 + return num;
52036 +}
52037 +
52038 +static int
52039 +copy_user_allowedips(struct acl_role_label *rolep)
52040 +{
52041 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
52042 +
52043 + ruserip = rolep->allowed_ips;
52044 +
52045 + while (ruserip) {
52046 + rlast = rtmp;
52047 +
52048 + if ((rtmp = (struct role_allowed_ip *)
52049 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
52050 + return -ENOMEM;
52051 +
52052 + if (copy_from_user(rtmp, ruserip,
52053 + sizeof (struct role_allowed_ip)))
52054 + return -EFAULT;
52055 +
52056 + ruserip = rtmp->prev;
52057 +
52058 + if (!rlast) {
52059 + rtmp->prev = NULL;
52060 + rolep->allowed_ips = rtmp;
52061 + } else {
52062 + rlast->next = rtmp;
52063 + rtmp->prev = rlast;
52064 + }
52065 +
52066 + if (!ruserip)
52067 + rtmp->next = NULL;
52068 + }
52069 +
52070 + return 0;
52071 +}
52072 +
52073 +static int
52074 +copy_user_transitions(struct acl_role_label *rolep)
52075 +{
52076 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
52077 +
52078 + unsigned int len;
52079 + char *tmp;
52080 +
52081 + rusertp = rolep->transitions;
52082 +
52083 + while (rusertp) {
52084 + rlast = rtmp;
52085 +
52086 + if ((rtmp = (struct role_transition *)
52087 + acl_alloc(sizeof (struct role_transition))) == NULL)
52088 + return -ENOMEM;
52089 +
52090 + if (copy_from_user(rtmp, rusertp,
52091 + sizeof (struct role_transition)))
52092 + return -EFAULT;
52093 +
52094 + rusertp = rtmp->prev;
52095 +
52096 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
52097 +
52098 + if (!len || len >= GR_SPROLE_LEN)
52099 + return -EINVAL;
52100 +
52101 + if ((tmp = (char *) acl_alloc(len)) == NULL)
52102 + return -ENOMEM;
52103 +
52104 + if (copy_from_user(tmp, rtmp->rolename, len))
52105 + return -EFAULT;
52106 + tmp[len-1] = '\0';
52107 + rtmp->rolename = tmp;
52108 +
52109 + if (!rlast) {
52110 + rtmp->prev = NULL;
52111 + rolep->transitions = rtmp;
52112 + } else {
52113 + rlast->next = rtmp;
52114 + rtmp->prev = rlast;
52115 + }
52116 +
52117 + if (!rusertp)
52118 + rtmp->next = NULL;
52119 + }
52120 +
52121 + return 0;
52122 +}
52123 +
52124 +static struct acl_subject_label *
52125 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
52126 +{
52127 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
52128 + unsigned int len;
52129 + char *tmp;
52130 + __u32 num_objs;
52131 + struct acl_ip_label **i_tmp, *i_utmp2;
52132 + struct gr_hash_struct ghash;
52133 + struct subject_map *subjmap;
52134 + unsigned int i_num;
52135 + int err;
52136 +
52137 + s_tmp = lookup_subject_map(userp);
52138 +
52139 + /* we've already copied this subject into the kernel, just return
52140 + the reference to it, and don't copy it over again
52141 + */
52142 + if (s_tmp)
52143 + return(s_tmp);
52144 +
52145 + if ((s_tmp = (struct acl_subject_label *)
52146 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
52147 + return ERR_PTR(-ENOMEM);
52148 +
52149 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
52150 + if (subjmap == NULL)
52151 + return ERR_PTR(-ENOMEM);
52152 +
52153 + subjmap->user = userp;
52154 + subjmap->kernel = s_tmp;
52155 + insert_subj_map_entry(subjmap);
52156 +
52157 + if (copy_from_user(s_tmp, userp,
52158 + sizeof (struct acl_subject_label)))
52159 + return ERR_PTR(-EFAULT);
52160 +
52161 + len = strnlen_user(s_tmp->filename, PATH_MAX);
52162 +
52163 + if (!len || len >= PATH_MAX)
52164 + return ERR_PTR(-EINVAL);
52165 +
52166 + if ((tmp = (char *) acl_alloc(len)) == NULL)
52167 + return ERR_PTR(-ENOMEM);
52168 +
52169 + if (copy_from_user(tmp, s_tmp->filename, len))
52170 + return ERR_PTR(-EFAULT);
52171 + tmp[len-1] = '\0';
52172 + s_tmp->filename = tmp;
52173 +
52174 + if (!strcmp(s_tmp->filename, "/"))
52175 + role->root_label = s_tmp;
52176 +
52177 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
52178 + return ERR_PTR(-EFAULT);
52179 +
52180 + /* copy user and group transition tables */
52181 +
52182 + if (s_tmp->user_trans_num) {
52183 + uid_t *uidlist;
52184 +
52185 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
52186 + if (uidlist == NULL)
52187 + return ERR_PTR(-ENOMEM);
52188 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
52189 + return ERR_PTR(-EFAULT);
52190 +
52191 + s_tmp->user_transitions = uidlist;
52192 + }
52193 +
52194 + if (s_tmp->group_trans_num) {
52195 + gid_t *gidlist;
52196 +
52197 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
52198 + if (gidlist == NULL)
52199 + return ERR_PTR(-ENOMEM);
52200 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
52201 + return ERR_PTR(-EFAULT);
52202 +
52203 + s_tmp->group_transitions = gidlist;
52204 + }
52205 +
52206 + /* set up object hash table */
52207 + num_objs = count_user_objs(ghash.first);
52208 +
52209 + s_tmp->obj_hash_size = num_objs;
52210 + s_tmp->obj_hash =
52211 + (struct acl_object_label **)
52212 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
52213 +
52214 + if (!s_tmp->obj_hash)
52215 + return ERR_PTR(-ENOMEM);
52216 +
52217 + memset(s_tmp->obj_hash, 0,
52218 + s_tmp->obj_hash_size *
52219 + sizeof (struct acl_object_label *));
52220 +
52221 + /* add in objects */
52222 + err = copy_user_objs(ghash.first, s_tmp, role);
52223 +
52224 + if (err)
52225 + return ERR_PTR(err);
52226 +
52227 + /* set pointer for parent subject */
52228 + if (s_tmp->parent_subject) {
52229 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
52230 +
52231 + if (IS_ERR(s_tmp2))
52232 + return s_tmp2;
52233 +
52234 + s_tmp->parent_subject = s_tmp2;
52235 + }
52236 +
52237 + /* add in ip acls */
52238 +
52239 + if (!s_tmp->ip_num) {
52240 + s_tmp->ips = NULL;
52241 + goto insert;
52242 + }
52243 +
52244 + i_tmp =
52245 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
52246 + sizeof (struct acl_ip_label *));
52247 +
52248 + if (!i_tmp)
52249 + return ERR_PTR(-ENOMEM);
52250 +
52251 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
52252 + *(i_tmp + i_num) =
52253 + (struct acl_ip_label *)
52254 + acl_alloc(sizeof (struct acl_ip_label));
52255 + if (!*(i_tmp + i_num))
52256 + return ERR_PTR(-ENOMEM);
52257 +
52258 + if (copy_from_user
52259 + (&i_utmp2, s_tmp->ips + i_num,
52260 + sizeof (struct acl_ip_label *)))
52261 + return ERR_PTR(-EFAULT);
52262 +
52263 + if (copy_from_user
52264 + (*(i_tmp + i_num), i_utmp2,
52265 + sizeof (struct acl_ip_label)))
52266 + return ERR_PTR(-EFAULT);
52267 +
52268 + if ((*(i_tmp + i_num))->iface == NULL)
52269 + continue;
52270 +
52271 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
52272 + if (!len || len >= IFNAMSIZ)
52273 + return ERR_PTR(-EINVAL);
52274 + tmp = acl_alloc(len);
52275 + if (tmp == NULL)
52276 + return ERR_PTR(-ENOMEM);
52277 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
52278 + return ERR_PTR(-EFAULT);
52279 + (*(i_tmp + i_num))->iface = tmp;
52280 + }
52281 +
52282 + s_tmp->ips = i_tmp;
52283 +
52284 +insert:
52285 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
52286 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
52287 + return ERR_PTR(-ENOMEM);
52288 +
52289 + return s_tmp;
52290 +}
52291 +
52292 +static int
52293 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
52294 +{
52295 + struct acl_subject_label s_pre;
52296 + struct acl_subject_label * ret;
52297 + int err;
52298 +
52299 + while (userp) {
52300 + if (copy_from_user(&s_pre, userp,
52301 + sizeof (struct acl_subject_label)))
52302 + return -EFAULT;
52303 +
52304 + /* do not add nested subjects here, add
52305 + while parsing objects
52306 + */
52307 +
52308 + if (s_pre.mode & GR_NESTED) {
52309 + userp = s_pre.prev;
52310 + continue;
52311 + }
52312 +
52313 + ret = do_copy_user_subj(userp, role);
52314 +
52315 + err = PTR_ERR(ret);
52316 + if (IS_ERR(ret))
52317 + return err;
52318 +
52319 + insert_acl_subj_label(ret, role);
52320 +
52321 + userp = s_pre.prev;
52322 + }
52323 +
52324 + return 0;
52325 +}
52326 +
52327 +static int
52328 +copy_user_acl(struct gr_arg *arg)
52329 +{
52330 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
52331 + struct sprole_pw *sptmp;
52332 + struct gr_hash_struct *ghash;
52333 + uid_t *domainlist;
52334 + unsigned int r_num;
52335 + unsigned int len;
52336 + char *tmp;
52337 + int err = 0;
52338 + __u16 i;
52339 + __u32 num_subjs;
52340 +
52341 + /* we need a default and kernel role */
52342 + if (arg->role_db.num_roles < 2)
52343 + return -EINVAL;
52344 +
52345 + /* copy special role authentication info from userspace */
52346 +
52347 + num_sprole_pws = arg->num_sprole_pws;
52348 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
52349 +
52350 + if (!acl_special_roles) {
52351 + err = -ENOMEM;
52352 + goto cleanup;
52353 + }
52354 +
52355 + for (i = 0; i < num_sprole_pws; i++) {
52356 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
52357 + if (!sptmp) {
52358 + err = -ENOMEM;
52359 + goto cleanup;
52360 + }
52361 + if (copy_from_user(sptmp, arg->sprole_pws + i,
52362 + sizeof (struct sprole_pw))) {
52363 + err = -EFAULT;
52364 + goto cleanup;
52365 + }
52366 +
52367 + len =
52368 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
52369 +
52370 + if (!len || len >= GR_SPROLE_LEN) {
52371 + err = -EINVAL;
52372 + goto cleanup;
52373 + }
52374 +
52375 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
52376 + err = -ENOMEM;
52377 + goto cleanup;
52378 + }
52379 +
52380 + if (copy_from_user(tmp, sptmp->rolename, len)) {
52381 + err = -EFAULT;
52382 + goto cleanup;
52383 + }
52384 + tmp[len-1] = '\0';
52385 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52386 + printk(KERN_ALERT "Copying special role %s\n", tmp);
52387 +#endif
52388 + sptmp->rolename = tmp;
52389 + acl_special_roles[i] = sptmp;
52390 + }
52391 +
52392 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
52393 +
52394 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
52395 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
52396 +
52397 + if (!r_tmp) {
52398 + err = -ENOMEM;
52399 + goto cleanup;
52400 + }
52401 +
52402 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
52403 + sizeof (struct acl_role_label *))) {
52404 + err = -EFAULT;
52405 + goto cleanup;
52406 + }
52407 +
52408 + if (copy_from_user(r_tmp, r_utmp2,
52409 + sizeof (struct acl_role_label))) {
52410 + err = -EFAULT;
52411 + goto cleanup;
52412 + }
52413 +
52414 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
52415 +
52416 + if (!len || len >= PATH_MAX) {
52417 + err = -EINVAL;
52418 + goto cleanup;
52419 + }
52420 +
52421 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
52422 + err = -ENOMEM;
52423 + goto cleanup;
52424 + }
52425 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
52426 + err = -EFAULT;
52427 + goto cleanup;
52428 + }
52429 + tmp[len-1] = '\0';
52430 + r_tmp->rolename = tmp;
52431 +
52432 + if (!strcmp(r_tmp->rolename, "default")
52433 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
52434 + default_role = r_tmp;
52435 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
52436 + kernel_role = r_tmp;
52437 + }
52438 +
52439 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
52440 + err = -ENOMEM;
52441 + goto cleanup;
52442 + }
52443 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
52444 + err = -EFAULT;
52445 + goto cleanup;
52446 + }
52447 +
52448 + r_tmp->hash = ghash;
52449 +
52450 + num_subjs = count_user_subjs(r_tmp->hash->first);
52451 +
52452 + r_tmp->subj_hash_size = num_subjs;
52453 + r_tmp->subj_hash =
52454 + (struct acl_subject_label **)
52455 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
52456 +
52457 + if (!r_tmp->subj_hash) {
52458 + err = -ENOMEM;
52459 + goto cleanup;
52460 + }
52461 +
52462 + err = copy_user_allowedips(r_tmp);
52463 + if (err)
52464 + goto cleanup;
52465 +
52466 + /* copy domain info */
52467 + if (r_tmp->domain_children != NULL) {
52468 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
52469 + if (domainlist == NULL) {
52470 + err = -ENOMEM;
52471 + goto cleanup;
52472 + }
52473 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
52474 + err = -EFAULT;
52475 + goto cleanup;
52476 + }
52477 + r_tmp->domain_children = domainlist;
52478 + }
52479 +
52480 + err = copy_user_transitions(r_tmp);
52481 + if (err)
52482 + goto cleanup;
52483 +
52484 + memset(r_tmp->subj_hash, 0,
52485 + r_tmp->subj_hash_size *
52486 + sizeof (struct acl_subject_label *));
52487 +
52488 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
52489 +
52490 + if (err)
52491 + goto cleanup;
52492 +
52493 + /* set nested subject list to null */
52494 + r_tmp->hash->first = NULL;
52495 +
52496 + insert_acl_role_label(r_tmp);
52497 + }
52498 +
52499 + goto return_err;
52500 + cleanup:
52501 + free_variables();
52502 + return_err:
52503 + return err;
52504 +
52505 +}
52506 +
52507 +static int
52508 +gracl_init(struct gr_arg *args)
52509 +{
52510 + int error = 0;
52511 +
52512 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
52513 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
52514 +
52515 + if (init_variables(args)) {
52516 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
52517 + error = -ENOMEM;
52518 + free_variables();
52519 + goto out;
52520 + }
52521 +
52522 + error = copy_user_acl(args);
52523 + free_init_variables();
52524 + if (error) {
52525 + free_variables();
52526 + goto out;
52527 + }
52528 +
52529 + if ((error = gr_set_acls(0))) {
52530 + free_variables();
52531 + goto out;
52532 + }
52533 +
52534 + pax_open_kernel();
52535 + gr_status |= GR_READY;
52536 + pax_close_kernel();
52537 +
52538 + out:
52539 + return error;
52540 +}
52541 +
52542 +/* derived from glibc fnmatch() 0: match, 1: no match*/
52543 +
52544 +static int
52545 +glob_match(const char *p, const char *n)
52546 +{
52547 + char c;
52548 +
52549 + while ((c = *p++) != '\0') {
52550 + switch (c) {
52551 + case '?':
52552 + if (*n == '\0')
52553 + return 1;
52554 + else if (*n == '/')
52555 + return 1;
52556 + break;
52557 + case '\\':
52558 + if (*n != c)
52559 + return 1;
52560 + break;
52561 + case '*':
52562 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
52563 + if (*n == '/')
52564 + return 1;
52565 + else if (c == '?') {
52566 + if (*n == '\0')
52567 + return 1;
52568 + else
52569 + ++n;
52570 + }
52571 + }
52572 + if (c == '\0') {
52573 + return 0;
52574 + } else {
52575 + const char *endp;
52576 +
52577 + if ((endp = strchr(n, '/')) == NULL)
52578 + endp = n + strlen(n);
52579 +
52580 + if (c == '[') {
52581 + for (--p; n < endp; ++n)
52582 + if (!glob_match(p, n))
52583 + return 0;
52584 + } else if (c == '/') {
52585 + while (*n != '\0' && *n != '/')
52586 + ++n;
52587 + if (*n == '/' && !glob_match(p, n + 1))
52588 + return 0;
52589 + } else {
52590 + for (--p; n < endp; ++n)
52591 + if (*n == c && !glob_match(p, n))
52592 + return 0;
52593 + }
52594 +
52595 + return 1;
52596 + }
52597 + case '[':
52598 + {
52599 + int not;
52600 + char cold;
52601 +
52602 + if (*n == '\0' || *n == '/')
52603 + return 1;
52604 +
52605 + not = (*p == '!' || *p == '^');
52606 + if (not)
52607 + ++p;
52608 +
52609 + c = *p++;
52610 + for (;;) {
52611 + unsigned char fn = (unsigned char)*n;
52612 +
52613 + if (c == '\0')
52614 + return 1;
52615 + else {
52616 + if (c == fn)
52617 + goto matched;
52618 + cold = c;
52619 + c = *p++;
52620 +
52621 + if (c == '-' && *p != ']') {
52622 + unsigned char cend = *p++;
52623 +
52624 + if (cend == '\0')
52625 + return 1;
52626 +
52627 + if (cold <= fn && fn <= cend)
52628 + goto matched;
52629 +
52630 + c = *p++;
52631 + }
52632 + }
52633 +
52634 + if (c == ']')
52635 + break;
52636 + }
52637 + if (!not)
52638 + return 1;
52639 + break;
52640 + matched:
52641 + while (c != ']') {
52642 + if (c == '\0')
52643 + return 1;
52644 +
52645 + c = *p++;
52646 + }
52647 + if (not)
52648 + return 1;
52649 + }
52650 + break;
52651 + default:
52652 + if (c != *n)
52653 + return 1;
52654 + }
52655 +
52656 + ++n;
52657 + }
52658 +
52659 + if (*n == '\0')
52660 + return 0;
52661 +
52662 + if (*n == '/')
52663 + return 0;
52664 +
52665 + return 1;
52666 +}
52667 +
52668 +static struct acl_object_label *
52669 +chk_glob_label(struct acl_object_label *globbed,
52670 + struct dentry *dentry, struct vfsmount *mnt, char **path)
52671 +{
52672 + struct acl_object_label *tmp;
52673 +
52674 + if (*path == NULL)
52675 + *path = gr_to_filename_nolock(dentry, mnt);
52676 +
52677 + tmp = globbed;
52678 +
52679 + while (tmp) {
52680 + if (!glob_match(tmp->filename, *path))
52681 + return tmp;
52682 + tmp = tmp->next;
52683 + }
52684 +
52685 + return NULL;
52686 +}
52687 +
52688 +static struct acl_object_label *
52689 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52690 + const ino_t curr_ino, const dev_t curr_dev,
52691 + const struct acl_subject_label *subj, char **path, const int checkglob)
52692 +{
52693 + struct acl_subject_label *tmpsubj;
52694 + struct acl_object_label *retval;
52695 + struct acl_object_label *retval2;
52696 +
52697 + tmpsubj = (struct acl_subject_label *) subj;
52698 + read_lock(&gr_inode_lock);
52699 + do {
52700 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
52701 + if (retval) {
52702 + if (checkglob && retval->globbed) {
52703 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
52704 + (struct vfsmount *)orig_mnt, path);
52705 + if (retval2)
52706 + retval = retval2;
52707 + }
52708 + break;
52709 + }
52710 + } while ((tmpsubj = tmpsubj->parent_subject));
52711 + read_unlock(&gr_inode_lock);
52712 +
52713 + return retval;
52714 +}
52715 +
52716 +static __inline__ struct acl_object_label *
52717 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52718 + struct dentry *curr_dentry,
52719 + const struct acl_subject_label *subj, char **path, const int checkglob)
52720 +{
52721 + int newglob = checkglob;
52722 + ino_t inode;
52723 + dev_t device;
52724 +
52725 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
52726 + as we don't want a / * rule to match instead of the / object
52727 + don't do this for create lookups that call this function though, since they're looking up
52728 + on the parent and thus need globbing checks on all paths
52729 + */
52730 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
52731 + newglob = GR_NO_GLOB;
52732 +
52733 + spin_lock(&curr_dentry->d_lock);
52734 + inode = curr_dentry->d_inode->i_ino;
52735 + device = __get_dev(curr_dentry);
52736 + spin_unlock(&curr_dentry->d_lock);
52737 +
52738 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
52739 +}
52740 +
52741 +static struct acl_object_label *
52742 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52743 + const struct acl_subject_label *subj, char *path, const int checkglob)
52744 +{
52745 + struct dentry *dentry = (struct dentry *) l_dentry;
52746 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52747 + struct acl_object_label *retval;
52748 + struct dentry *parent;
52749 +
52750 + write_seqlock(&rename_lock);
52751 + br_read_lock(vfsmount_lock);
52752 +
52753 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
52754 +#ifdef CONFIG_NET
52755 + mnt == sock_mnt ||
52756 +#endif
52757 +#ifdef CONFIG_HUGETLBFS
52758 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
52759 +#endif
52760 + /* ignore Eric Biederman */
52761 + IS_PRIVATE(l_dentry->d_inode))) {
52762 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
52763 + goto out;
52764 + }
52765 +
52766 + for (;;) {
52767 + if (dentry == real_root.dentry && mnt == real_root.mnt)
52768 + break;
52769 +
52770 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52771 + if (mnt->mnt_parent == mnt)
52772 + break;
52773 +
52774 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52775 + if (retval != NULL)
52776 + goto out;
52777 +
52778 + dentry = mnt->mnt_mountpoint;
52779 + mnt = mnt->mnt_parent;
52780 + continue;
52781 + }
52782 +
52783 + parent = dentry->d_parent;
52784 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52785 + if (retval != NULL)
52786 + goto out;
52787 +
52788 + dentry = parent;
52789 + }
52790 +
52791 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52792 +
52793 + /* real_root is pinned so we don't have to hold a reference */
52794 + if (retval == NULL)
52795 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
52796 +out:
52797 + br_read_unlock(vfsmount_lock);
52798 + write_sequnlock(&rename_lock);
52799 +
52800 + BUG_ON(retval == NULL);
52801 +
52802 + return retval;
52803 +}
52804 +
52805 +static __inline__ struct acl_object_label *
52806 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52807 + const struct acl_subject_label *subj)
52808 +{
52809 + char *path = NULL;
52810 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
52811 +}
52812 +
52813 +static __inline__ struct acl_object_label *
52814 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52815 + const struct acl_subject_label *subj)
52816 +{
52817 + char *path = NULL;
52818 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
52819 +}
52820 +
52821 +static __inline__ struct acl_object_label *
52822 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52823 + const struct acl_subject_label *subj, char *path)
52824 +{
52825 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
52826 +}
52827 +
52828 +static struct acl_subject_label *
52829 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52830 + const struct acl_role_label *role)
52831 +{
52832 + struct dentry *dentry = (struct dentry *) l_dentry;
52833 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52834 + struct acl_subject_label *retval;
52835 + struct dentry *parent;
52836 +
52837 + write_seqlock(&rename_lock);
52838 + br_read_lock(vfsmount_lock);
52839 +
52840 + for (;;) {
52841 + if (dentry == real_root.dentry && mnt == real_root.mnt)
52842 + break;
52843 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52844 + if (mnt->mnt_parent == mnt)
52845 + break;
52846 +
52847 + spin_lock(&dentry->d_lock);
52848 + read_lock(&gr_inode_lock);
52849 + retval =
52850 + lookup_acl_subj_label(dentry->d_inode->i_ino,
52851 + __get_dev(dentry), role);
52852 + read_unlock(&gr_inode_lock);
52853 + spin_unlock(&dentry->d_lock);
52854 + if (retval != NULL)
52855 + goto out;
52856 +
52857 + dentry = mnt->mnt_mountpoint;
52858 + mnt = mnt->mnt_parent;
52859 + continue;
52860 + }
52861 +
52862 + spin_lock(&dentry->d_lock);
52863 + read_lock(&gr_inode_lock);
52864 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52865 + __get_dev(dentry), role);
52866 + read_unlock(&gr_inode_lock);
52867 + parent = dentry->d_parent;
52868 + spin_unlock(&dentry->d_lock);
52869 +
52870 + if (retval != NULL)
52871 + goto out;
52872 +
52873 + dentry = parent;
52874 + }
52875 +
52876 + spin_lock(&dentry->d_lock);
52877 + read_lock(&gr_inode_lock);
52878 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52879 + __get_dev(dentry), role);
52880 + read_unlock(&gr_inode_lock);
52881 + spin_unlock(&dentry->d_lock);
52882 +
52883 + if (unlikely(retval == NULL)) {
52884 + /* real_root is pinned, we don't need to hold a reference */
52885 + read_lock(&gr_inode_lock);
52886 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
52887 + __get_dev(real_root.dentry), role);
52888 + read_unlock(&gr_inode_lock);
52889 + }
52890 +out:
52891 + br_read_unlock(vfsmount_lock);
52892 + write_sequnlock(&rename_lock);
52893 +
52894 + BUG_ON(retval == NULL);
52895 +
52896 + return retval;
52897 +}
52898 +
52899 +static void
52900 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
52901 +{
52902 + struct task_struct *task = current;
52903 + const struct cred *cred = current_cred();
52904 +
52905 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52906 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52907 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52908 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
52909 +
52910 + return;
52911 +}
52912 +
52913 +static void
52914 +gr_log_learn_sysctl(const char *path, const __u32 mode)
52915 +{
52916 + struct task_struct *task = current;
52917 + const struct cred *cred = current_cred();
52918 +
52919 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52920 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52921 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52922 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
52923 +
52924 + return;
52925 +}
52926 +
52927 +static void
52928 +gr_log_learn_id_change(const char type, const unsigned int real,
52929 + const unsigned int effective, const unsigned int fs)
52930 +{
52931 + struct task_struct *task = current;
52932 + const struct cred *cred = current_cred();
52933 +
52934 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
52935 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52936 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52937 + type, real, effective, fs, &task->signal->saved_ip);
52938 +
52939 + return;
52940 +}
52941 +
52942 +__u32
52943 +gr_search_file(const struct dentry * dentry, const __u32 mode,
52944 + const struct vfsmount * mnt)
52945 +{
52946 + __u32 retval = mode;
52947 + struct acl_subject_label *curracl;
52948 + struct acl_object_label *currobj;
52949 +
52950 + if (unlikely(!(gr_status & GR_READY)))
52951 + return (mode & ~GR_AUDITS);
52952 +
52953 + curracl = current->acl;
52954 +
52955 + currobj = chk_obj_label(dentry, mnt, curracl);
52956 + retval = currobj->mode & mode;
52957 +
52958 + /* if we're opening a specified transfer file for writing
52959 + (e.g. /dev/initctl), then transfer our role to init
52960 + */
52961 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
52962 + current->role->roletype & GR_ROLE_PERSIST)) {
52963 + struct task_struct *task = init_pid_ns.child_reaper;
52964 +
52965 + if (task->role != current->role) {
52966 + task->acl_sp_role = 0;
52967 + task->acl_role_id = current->acl_role_id;
52968 + task->role = current->role;
52969 + rcu_read_lock();
52970 + read_lock(&grsec_exec_file_lock);
52971 + gr_apply_subject_to_task(task);
52972 + read_unlock(&grsec_exec_file_lock);
52973 + rcu_read_unlock();
52974 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
52975 + }
52976 + }
52977 +
52978 + if (unlikely
52979 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
52980 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
52981 + __u32 new_mode = mode;
52982 +
52983 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52984 +
52985 + retval = new_mode;
52986 +
52987 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
52988 + new_mode |= GR_INHERIT;
52989 +
52990 + if (!(mode & GR_NOLEARN))
52991 + gr_log_learn(dentry, mnt, new_mode);
52992 + }
52993 +
52994 + return retval;
52995 +}
52996 +
52997 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
52998 + const struct dentry *parent,
52999 + const struct vfsmount *mnt)
53000 +{
53001 + struct name_entry *match;
53002 + struct acl_object_label *matchpo;
53003 + struct acl_subject_label *curracl;
53004 + char *path;
53005 +
53006 + if (unlikely(!(gr_status & GR_READY)))
53007 + return NULL;
53008 +
53009 + preempt_disable();
53010 + path = gr_to_filename_rbac(new_dentry, mnt);
53011 + match = lookup_name_entry_create(path);
53012 +
53013 + curracl = current->acl;
53014 +
53015 + if (match) {
53016 + read_lock(&gr_inode_lock);
53017 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
53018 + read_unlock(&gr_inode_lock);
53019 +
53020 + if (matchpo) {
53021 + preempt_enable();
53022 + return matchpo;
53023 + }
53024 + }
53025 +
53026 + // lookup parent
53027 +
53028 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
53029 +
53030 + preempt_enable();
53031 + return matchpo;
53032 +}
53033 +
53034 +__u32
53035 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
53036 + const struct vfsmount * mnt, const __u32 mode)
53037 +{
53038 + struct acl_object_label *matchpo;
53039 + __u32 retval;
53040 +
53041 + if (unlikely(!(gr_status & GR_READY)))
53042 + return (mode & ~GR_AUDITS);
53043 +
53044 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
53045 +
53046 + retval = matchpo->mode & mode;
53047 +
53048 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
53049 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
53050 + __u32 new_mode = mode;
53051 +
53052 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
53053 +
53054 + gr_log_learn(new_dentry, mnt, new_mode);
53055 + return new_mode;
53056 + }
53057 +
53058 + return retval;
53059 +}
53060 +
53061 +__u32
53062 +gr_check_link(const struct dentry * new_dentry,
53063 + const struct dentry * parent_dentry,
53064 + const struct vfsmount * parent_mnt,
53065 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
53066 +{
53067 + struct acl_object_label *obj;
53068 + __u32 oldmode, newmode;
53069 + __u32 needmode;
53070 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
53071 + GR_DELETE | GR_INHERIT;
53072 +
53073 + if (unlikely(!(gr_status & GR_READY)))
53074 + return (GR_CREATE | GR_LINK);
53075 +
53076 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
53077 + oldmode = obj->mode;
53078 +
53079 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
53080 + newmode = obj->mode;
53081 +
53082 + needmode = newmode & checkmodes;
53083 +
53084 + // old name for hardlink must have at least the permissions of the new name
53085 + if ((oldmode & needmode) != needmode)
53086 + goto bad;
53087 +
53088 + // if old name had restrictions/auditing, make sure the new name does as well
53089 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
53090 +
53091 + // don't allow hardlinking of suid/sgid files without permission
53092 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
53093 + needmode |= GR_SETID;
53094 +
53095 + if ((newmode & needmode) != needmode)
53096 + goto bad;
53097 +
53098 + // enforce minimum permissions
53099 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
53100 + return newmode;
53101 +bad:
53102 + needmode = oldmode;
53103 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
53104 + needmode |= GR_SETID;
53105 +
53106 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
53107 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
53108 + return (GR_CREATE | GR_LINK);
53109 + } else if (newmode & GR_SUPPRESS)
53110 + return GR_SUPPRESS;
53111 + else
53112 + return 0;
53113 +}
53114 +
53115 +int
53116 +gr_check_hidden_task(const struct task_struct *task)
53117 +{
53118 + if (unlikely(!(gr_status & GR_READY)))
53119 + return 0;
53120 +
53121 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
53122 + return 1;
53123 +
53124 + return 0;
53125 +}
53126 +
53127 +int
53128 +gr_check_protected_task(const struct task_struct *task)
53129 +{
53130 + if (unlikely(!(gr_status & GR_READY) || !task))
53131 + return 0;
53132 +
53133 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
53134 + task->acl != current->acl)
53135 + return 1;
53136 +
53137 + return 0;
53138 +}
53139 +
53140 +int
53141 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
53142 +{
53143 + struct task_struct *p;
53144 + int ret = 0;
53145 +
53146 + if (unlikely(!(gr_status & GR_READY) || !pid))
53147 + return ret;
53148 +
53149 + read_lock(&tasklist_lock);
53150 + do_each_pid_task(pid, type, p) {
53151 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
53152 + p->acl != current->acl) {
53153 + ret = 1;
53154 + goto out;
53155 + }
53156 + } while_each_pid_task(pid, type, p);
53157 +out:
53158 + read_unlock(&tasklist_lock);
53159 +
53160 + return ret;
53161 +}
53162 +
53163 +void
53164 +gr_copy_label(struct task_struct *tsk)
53165 +{
53166 + tsk->signal->used_accept = 0;
53167 + tsk->acl_sp_role = 0;
53168 + tsk->acl_role_id = current->acl_role_id;
53169 + tsk->acl = current->acl;
53170 + tsk->role = current->role;
53171 + tsk->signal->curr_ip = current->signal->curr_ip;
53172 + tsk->signal->saved_ip = current->signal->saved_ip;
53173 + if (current->exec_file)
53174 + get_file(current->exec_file);
53175 + tsk->exec_file = current->exec_file;
53176 + tsk->is_writable = current->is_writable;
53177 + if (unlikely(current->signal->used_accept)) {
53178 + current->signal->curr_ip = 0;
53179 + current->signal->saved_ip = 0;
53180 + }
53181 +
53182 + return;
53183 +}
53184 +
53185 +static void
53186 +gr_set_proc_res(struct task_struct *task)
53187 +{
53188 + struct acl_subject_label *proc;
53189 + unsigned short i;
53190 +
53191 + proc = task->acl;
53192 +
53193 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
53194 + return;
53195 +
53196 + for (i = 0; i < RLIM_NLIMITS; i++) {
53197 + if (!(proc->resmask & (1 << i)))
53198 + continue;
53199 +
53200 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
53201 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
53202 + }
53203 +
53204 + return;
53205 +}
53206 +
53207 +extern int __gr_process_user_ban(struct user_struct *user);
53208 +
53209 +int
53210 +gr_check_user_change(int real, int effective, int fs)
53211 +{
53212 + unsigned int i;
53213 + __u16 num;
53214 + uid_t *uidlist;
53215 + int curuid;
53216 + int realok = 0;
53217 + int effectiveok = 0;
53218 + int fsok = 0;
53219 +
53220 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
53221 + struct user_struct *user;
53222 +
53223 + if (real == -1)
53224 + goto skipit;
53225 +
53226 + user = find_user(real);
53227 + if (user == NULL)
53228 + goto skipit;
53229 +
53230 + if (__gr_process_user_ban(user)) {
53231 + /* for find_user */
53232 + free_uid(user);
53233 + return 1;
53234 + }
53235 +
53236 + /* for find_user */
53237 + free_uid(user);
53238 +
53239 +skipit:
53240 +#endif
53241 +
53242 + if (unlikely(!(gr_status & GR_READY)))
53243 + return 0;
53244 +
53245 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53246 + gr_log_learn_id_change('u', real, effective, fs);
53247 +
53248 + num = current->acl->user_trans_num;
53249 + uidlist = current->acl->user_transitions;
53250 +
53251 + if (uidlist == NULL)
53252 + return 0;
53253 +
53254 + if (real == -1)
53255 + realok = 1;
53256 + if (effective == -1)
53257 + effectiveok = 1;
53258 + if (fs == -1)
53259 + fsok = 1;
53260 +
53261 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
53262 + for (i = 0; i < num; i++) {
53263 + curuid = (int)uidlist[i];
53264 + if (real == curuid)
53265 + realok = 1;
53266 + if (effective == curuid)
53267 + effectiveok = 1;
53268 + if (fs == curuid)
53269 + fsok = 1;
53270 + }
53271 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
53272 + for (i = 0; i < num; i++) {
53273 + curuid = (int)uidlist[i];
53274 + if (real == curuid)
53275 + break;
53276 + if (effective == curuid)
53277 + break;
53278 + if (fs == curuid)
53279 + break;
53280 + }
53281 + /* not in deny list */
53282 + if (i == num) {
53283 + realok = 1;
53284 + effectiveok = 1;
53285 + fsok = 1;
53286 + }
53287 + }
53288 +
53289 + if (realok && effectiveok && fsok)
53290 + return 0;
53291 + else {
53292 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53293 + return 1;
53294 + }
53295 +}
53296 +
53297 +int
53298 +gr_check_group_change(int real, int effective, int fs)
53299 +{
53300 + unsigned int i;
53301 + __u16 num;
53302 + gid_t *gidlist;
53303 + int curgid;
53304 + int realok = 0;
53305 + int effectiveok = 0;
53306 + int fsok = 0;
53307 +
53308 + if (unlikely(!(gr_status & GR_READY)))
53309 + return 0;
53310 +
53311 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53312 + gr_log_learn_id_change('g', real, effective, fs);
53313 +
53314 + num = current->acl->group_trans_num;
53315 + gidlist = current->acl->group_transitions;
53316 +
53317 + if (gidlist == NULL)
53318 + return 0;
53319 +
53320 + if (real == -1)
53321 + realok = 1;
53322 + if (effective == -1)
53323 + effectiveok = 1;
53324 + if (fs == -1)
53325 + fsok = 1;
53326 +
53327 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
53328 + for (i = 0; i < num; i++) {
53329 + curgid = (int)gidlist[i];
53330 + if (real == curgid)
53331 + realok = 1;
53332 + if (effective == curgid)
53333 + effectiveok = 1;
53334 + if (fs == curgid)
53335 + fsok = 1;
53336 + }
53337 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
53338 + for (i = 0; i < num; i++) {
53339 + curgid = (int)gidlist[i];
53340 + if (real == curgid)
53341 + break;
53342 + if (effective == curgid)
53343 + break;
53344 + if (fs == curgid)
53345 + break;
53346 + }
53347 + /* not in deny list */
53348 + if (i == num) {
53349 + realok = 1;
53350 + effectiveok = 1;
53351 + fsok = 1;
53352 + }
53353 + }
53354 +
53355 + if (realok && effectiveok && fsok)
53356 + return 0;
53357 + else {
53358 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53359 + return 1;
53360 + }
53361 +}
53362 +
53363 +void
53364 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
53365 +{
53366 + struct acl_role_label *role = task->role;
53367 + struct acl_subject_label *subj = NULL;
53368 + struct acl_object_label *obj;
53369 + struct file *filp;
53370 +
53371 + if (unlikely(!(gr_status & GR_READY)))
53372 + return;
53373 +
53374 + filp = task->exec_file;
53375 +
53376 + /* kernel process, we'll give them the kernel role */
53377 + if (unlikely(!filp)) {
53378 + task->role = kernel_role;
53379 + task->acl = kernel_role->root_label;
53380 + return;
53381 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
53382 + role = lookup_acl_role_label(task, uid, gid);
53383 +
53384 + /* perform subject lookup in possibly new role
53385 + we can use this result below in the case where role == task->role
53386 + */
53387 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
53388 +
53389 + /* if we changed uid/gid, but result in the same role
53390 + and are using inheritance, don't lose the inherited subject
53391 + if current subject is other than what normal lookup
53392 + would result in, we arrived via inheritance, don't
53393 + lose subject
53394 + */
53395 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
53396 + (subj == task->acl)))
53397 + task->acl = subj;
53398 +
53399 + task->role = role;
53400 +
53401 + task->is_writable = 0;
53402 +
53403 + /* ignore additional mmap checks for processes that are writable
53404 + by the default ACL */
53405 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53406 + if (unlikely(obj->mode & GR_WRITE))
53407 + task->is_writable = 1;
53408 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53409 + if (unlikely(obj->mode & GR_WRITE))
53410 + task->is_writable = 1;
53411 +
53412 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53413 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53414 +#endif
53415 +
53416 + gr_set_proc_res(task);
53417 +
53418 + return;
53419 +}
53420 +
53421 +int
53422 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53423 + const int unsafe_share)
53424 +{
53425 + struct task_struct *task = current;
53426 + struct acl_subject_label *newacl;
53427 + struct acl_object_label *obj;
53428 + __u32 retmode;
53429 +
53430 + if (unlikely(!(gr_status & GR_READY)))
53431 + return 0;
53432 +
53433 + newacl = chk_subj_label(dentry, mnt, task->role);
53434 +
53435 + task_lock(task);
53436 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
53437 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
53438 + !(task->role->roletype & GR_ROLE_GOD) &&
53439 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
53440 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
53441 + task_unlock(task);
53442 + if (unsafe_share)
53443 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
53444 + else
53445 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
53446 + return -EACCES;
53447 + }
53448 + task_unlock(task);
53449 +
53450 + obj = chk_obj_label(dentry, mnt, task->acl);
53451 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
53452 +
53453 + if (!(task->acl->mode & GR_INHERITLEARN) &&
53454 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
53455 + if (obj->nested)
53456 + task->acl = obj->nested;
53457 + else
53458 + task->acl = newacl;
53459 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
53460 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
53461 +
53462 + task->is_writable = 0;
53463 +
53464 + /* ignore additional mmap checks for processes that are writable
53465 + by the default ACL */
53466 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
53467 + if (unlikely(obj->mode & GR_WRITE))
53468 + task->is_writable = 1;
53469 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
53470 + if (unlikely(obj->mode & GR_WRITE))
53471 + task->is_writable = 1;
53472 +
53473 + gr_set_proc_res(task);
53474 +
53475 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53476 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53477 +#endif
53478 + return 0;
53479 +}
53480 +
53481 +/* always called with valid inodev ptr */
53482 +static void
53483 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
53484 +{
53485 + struct acl_object_label *matchpo;
53486 + struct acl_subject_label *matchps;
53487 + struct acl_subject_label *subj;
53488 + struct acl_role_label *role;
53489 + unsigned int x;
53490 +
53491 + FOR_EACH_ROLE_START(role)
53492 + FOR_EACH_SUBJECT_START(role, subj, x)
53493 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
53494 + matchpo->mode |= GR_DELETED;
53495 + FOR_EACH_SUBJECT_END(subj,x)
53496 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
53497 + if (subj->inode == ino && subj->device == dev)
53498 + subj->mode |= GR_DELETED;
53499 + FOR_EACH_NESTED_SUBJECT_END(subj)
53500 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
53501 + matchps->mode |= GR_DELETED;
53502 + FOR_EACH_ROLE_END(role)
53503 +
53504 + inodev->nentry->deleted = 1;
53505 +
53506 + return;
53507 +}
53508 +
53509 +void
53510 +gr_handle_delete(const ino_t ino, const dev_t dev)
53511 +{
53512 + struct inodev_entry *inodev;
53513 +
53514 + if (unlikely(!(gr_status & GR_READY)))
53515 + return;
53516 +
53517 + write_lock(&gr_inode_lock);
53518 + inodev = lookup_inodev_entry(ino, dev);
53519 + if (inodev != NULL)
53520 + do_handle_delete(inodev, ino, dev);
53521 + write_unlock(&gr_inode_lock);
53522 +
53523 + return;
53524 +}
53525 +
53526 +static void
53527 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
53528 + const ino_t newinode, const dev_t newdevice,
53529 + struct acl_subject_label *subj)
53530 +{
53531 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
53532 + struct acl_object_label *match;
53533 +
53534 + match = subj->obj_hash[index];
53535 +
53536 + while (match && (match->inode != oldinode ||
53537 + match->device != olddevice ||
53538 + !(match->mode & GR_DELETED)))
53539 + match = match->next;
53540 +
53541 + if (match && (match->inode == oldinode)
53542 + && (match->device == olddevice)
53543 + && (match->mode & GR_DELETED)) {
53544 + if (match->prev == NULL) {
53545 + subj->obj_hash[index] = match->next;
53546 + if (match->next != NULL)
53547 + match->next->prev = NULL;
53548 + } else {
53549 + match->prev->next = match->next;
53550 + if (match->next != NULL)
53551 + match->next->prev = match->prev;
53552 + }
53553 + match->prev = NULL;
53554 + match->next = NULL;
53555 + match->inode = newinode;
53556 + match->device = newdevice;
53557 + match->mode &= ~GR_DELETED;
53558 +
53559 + insert_acl_obj_label(match, subj);
53560 + }
53561 +
53562 + return;
53563 +}
53564 +
53565 +static void
53566 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
53567 + const ino_t newinode, const dev_t newdevice,
53568 + struct acl_role_label *role)
53569 +{
53570 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
53571 + struct acl_subject_label *match;
53572 +
53573 + match = role->subj_hash[index];
53574 +
53575 + while (match && (match->inode != oldinode ||
53576 + match->device != olddevice ||
53577 + !(match->mode & GR_DELETED)))
53578 + match = match->next;
53579 +
53580 + if (match && (match->inode == oldinode)
53581 + && (match->device == olddevice)
53582 + && (match->mode & GR_DELETED)) {
53583 + if (match->prev == NULL) {
53584 + role->subj_hash[index] = match->next;
53585 + if (match->next != NULL)
53586 + match->next->prev = NULL;
53587 + } else {
53588 + match->prev->next = match->next;
53589 + if (match->next != NULL)
53590 + match->next->prev = match->prev;
53591 + }
53592 + match->prev = NULL;
53593 + match->next = NULL;
53594 + match->inode = newinode;
53595 + match->device = newdevice;
53596 + match->mode &= ~GR_DELETED;
53597 +
53598 + insert_acl_subj_label(match, role);
53599 + }
53600 +
53601 + return;
53602 +}
53603 +
53604 +static void
53605 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
53606 + const ino_t newinode, const dev_t newdevice)
53607 +{
53608 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
53609 + struct inodev_entry *match;
53610 +
53611 + match = inodev_set.i_hash[index];
53612 +
53613 + while (match && (match->nentry->inode != oldinode ||
53614 + match->nentry->device != olddevice || !match->nentry->deleted))
53615 + match = match->next;
53616 +
53617 + if (match && (match->nentry->inode == oldinode)
53618 + && (match->nentry->device == olddevice) &&
53619 + match->nentry->deleted) {
53620 + if (match->prev == NULL) {
53621 + inodev_set.i_hash[index] = match->next;
53622 + if (match->next != NULL)
53623 + match->next->prev = NULL;
53624 + } else {
53625 + match->prev->next = match->next;
53626 + if (match->next != NULL)
53627 + match->next->prev = match->prev;
53628 + }
53629 + match->prev = NULL;
53630 + match->next = NULL;
53631 + match->nentry->inode = newinode;
53632 + match->nentry->device = newdevice;
53633 + match->nentry->deleted = 0;
53634 +
53635 + insert_inodev_entry(match);
53636 + }
53637 +
53638 + return;
53639 +}
53640 +
53641 +static void
53642 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
53643 +{
53644 + struct acl_subject_label *subj;
53645 + struct acl_role_label *role;
53646 + unsigned int x;
53647 +
53648 + FOR_EACH_ROLE_START(role)
53649 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
53650 +
53651 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
53652 + if ((subj->inode == ino) && (subj->device == dev)) {
53653 + subj->inode = ino;
53654 + subj->device = dev;
53655 + }
53656 + FOR_EACH_NESTED_SUBJECT_END(subj)
53657 + FOR_EACH_SUBJECT_START(role, subj, x)
53658 + update_acl_obj_label(matchn->inode, matchn->device,
53659 + ino, dev, subj);
53660 + FOR_EACH_SUBJECT_END(subj,x)
53661 + FOR_EACH_ROLE_END(role)
53662 +
53663 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
53664 +
53665 + return;
53666 +}
53667 +
53668 +static void
53669 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
53670 + const struct vfsmount *mnt)
53671 +{
53672 + ino_t ino = dentry->d_inode->i_ino;
53673 + dev_t dev = __get_dev(dentry);
53674 +
53675 + __do_handle_create(matchn, ino, dev);
53676 +
53677 + return;
53678 +}
53679 +
53680 +void
53681 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53682 +{
53683 + struct name_entry *matchn;
53684 +
53685 + if (unlikely(!(gr_status & GR_READY)))
53686 + return;
53687 +
53688 + preempt_disable();
53689 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
53690 +
53691 + if (unlikely((unsigned long)matchn)) {
53692 + write_lock(&gr_inode_lock);
53693 + do_handle_create(matchn, dentry, mnt);
53694 + write_unlock(&gr_inode_lock);
53695 + }
53696 + preempt_enable();
53697 +
53698 + return;
53699 +}
53700 +
53701 +void
53702 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
53703 +{
53704 + struct name_entry *matchn;
53705 +
53706 + if (unlikely(!(gr_status & GR_READY)))
53707 + return;
53708 +
53709 + preempt_disable();
53710 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
53711 +
53712 + if (unlikely((unsigned long)matchn)) {
53713 + write_lock(&gr_inode_lock);
53714 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
53715 + write_unlock(&gr_inode_lock);
53716 + }
53717 + preempt_enable();
53718 +
53719 + return;
53720 +}
53721 +
53722 +void
53723 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53724 + struct dentry *old_dentry,
53725 + struct dentry *new_dentry,
53726 + struct vfsmount *mnt, const __u8 replace)
53727 +{
53728 + struct name_entry *matchn;
53729 + struct inodev_entry *inodev;
53730 + struct inode *inode = new_dentry->d_inode;
53731 + ino_t old_ino = old_dentry->d_inode->i_ino;
53732 + dev_t old_dev = __get_dev(old_dentry);
53733 +
53734 + /* vfs_rename swaps the name and parent link for old_dentry and
53735 + new_dentry
53736 + at this point, old_dentry has the new name, parent link, and inode
53737 + for the renamed file
53738 + if a file is being replaced by a rename, new_dentry has the inode
53739 + and name for the replaced file
53740 + */
53741 +
53742 + if (unlikely(!(gr_status & GR_READY)))
53743 + return;
53744 +
53745 + preempt_disable();
53746 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
53747 +
53748 + /* we wouldn't have to check d_inode if it weren't for
53749 + NFS silly-renaming
53750 + */
53751 +
53752 + write_lock(&gr_inode_lock);
53753 + if (unlikely(replace && inode)) {
53754 + ino_t new_ino = inode->i_ino;
53755 + dev_t new_dev = __get_dev(new_dentry);
53756 +
53757 + inodev = lookup_inodev_entry(new_ino, new_dev);
53758 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
53759 + do_handle_delete(inodev, new_ino, new_dev);
53760 + }
53761 +
53762 + inodev = lookup_inodev_entry(old_ino, old_dev);
53763 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
53764 + do_handle_delete(inodev, old_ino, old_dev);
53765 +
53766 + if (unlikely((unsigned long)matchn))
53767 + do_handle_create(matchn, old_dentry, mnt);
53768 +
53769 + write_unlock(&gr_inode_lock);
53770 + preempt_enable();
53771 +
53772 + return;
53773 +}
53774 +
53775 +static int
53776 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
53777 + unsigned char **sum)
53778 +{
53779 + struct acl_role_label *r;
53780 + struct role_allowed_ip *ipp;
53781 + struct role_transition *trans;
53782 + unsigned int i;
53783 + int found = 0;
53784 + u32 curr_ip = current->signal->curr_ip;
53785 +
53786 + current->signal->saved_ip = curr_ip;
53787 +
53788 + /* check transition table */
53789 +
53790 + for (trans = current->role->transitions; trans; trans = trans->next) {
53791 + if (!strcmp(rolename, trans->rolename)) {
53792 + found = 1;
53793 + break;
53794 + }
53795 + }
53796 +
53797 + if (!found)
53798 + return 0;
53799 +
53800 + /* handle special roles that do not require authentication
53801 + and check ip */
53802 +
53803 + FOR_EACH_ROLE_START(r)
53804 + if (!strcmp(rolename, r->rolename) &&
53805 + (r->roletype & GR_ROLE_SPECIAL)) {
53806 + found = 0;
53807 + if (r->allowed_ips != NULL) {
53808 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
53809 + if ((ntohl(curr_ip) & ipp->netmask) ==
53810 + (ntohl(ipp->addr) & ipp->netmask))
53811 + found = 1;
53812 + }
53813 + } else
53814 + found = 2;
53815 + if (!found)
53816 + return 0;
53817 +
53818 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
53819 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
53820 + *salt = NULL;
53821 + *sum = NULL;
53822 + return 1;
53823 + }
53824 + }
53825 + FOR_EACH_ROLE_END(r)
53826 +
53827 + for (i = 0; i < num_sprole_pws; i++) {
53828 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
53829 + *salt = acl_special_roles[i]->salt;
53830 + *sum = acl_special_roles[i]->sum;
53831 + return 1;
53832 + }
53833 + }
53834 +
53835 + return 0;
53836 +}
53837 +
53838 +static void
53839 +assign_special_role(char *rolename)
53840 +{
53841 + struct acl_object_label *obj;
53842 + struct acl_role_label *r;
53843 + struct acl_role_label *assigned = NULL;
53844 + struct task_struct *tsk;
53845 + struct file *filp;
53846 +
53847 + FOR_EACH_ROLE_START(r)
53848 + if (!strcmp(rolename, r->rolename) &&
53849 + (r->roletype & GR_ROLE_SPECIAL)) {
53850 + assigned = r;
53851 + break;
53852 + }
53853 + FOR_EACH_ROLE_END(r)
53854 +
53855 + if (!assigned)
53856 + return;
53857 +
53858 + read_lock(&tasklist_lock);
53859 + read_lock(&grsec_exec_file_lock);
53860 +
53861 + tsk = current->real_parent;
53862 + if (tsk == NULL)
53863 + goto out_unlock;
53864 +
53865 + filp = tsk->exec_file;
53866 + if (filp == NULL)
53867 + goto out_unlock;
53868 +
53869 + tsk->is_writable = 0;
53870 +
53871 + tsk->acl_sp_role = 1;
53872 + tsk->acl_role_id = ++acl_sp_role_value;
53873 + tsk->role = assigned;
53874 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
53875 +
53876 + /* ignore additional mmap checks for processes that are writable
53877 + by the default ACL */
53878 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53879 + if (unlikely(obj->mode & GR_WRITE))
53880 + tsk->is_writable = 1;
53881 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
53882 + if (unlikely(obj->mode & GR_WRITE))
53883 + tsk->is_writable = 1;
53884 +
53885 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53886 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
53887 +#endif
53888 +
53889 +out_unlock:
53890 + read_unlock(&grsec_exec_file_lock);
53891 + read_unlock(&tasklist_lock);
53892 + return;
53893 +}
53894 +
53895 +int gr_check_secure_terminal(struct task_struct *task)
53896 +{
53897 + struct task_struct *p, *p2, *p3;
53898 + struct files_struct *files;
53899 + struct fdtable *fdt;
53900 + struct file *our_file = NULL, *file;
53901 + int i;
53902 +
53903 + if (task->signal->tty == NULL)
53904 + return 1;
53905 +
53906 + files = get_files_struct(task);
53907 + if (files != NULL) {
53908 + rcu_read_lock();
53909 + fdt = files_fdtable(files);
53910 + for (i=0; i < fdt->max_fds; i++) {
53911 + file = fcheck_files(files, i);
53912 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
53913 + get_file(file);
53914 + our_file = file;
53915 + }
53916 + }
53917 + rcu_read_unlock();
53918 + put_files_struct(files);
53919 + }
53920 +
53921 + if (our_file == NULL)
53922 + return 1;
53923 +
53924 + read_lock(&tasklist_lock);
53925 + do_each_thread(p2, p) {
53926 + files = get_files_struct(p);
53927 + if (files == NULL ||
53928 + (p->signal && p->signal->tty == task->signal->tty)) {
53929 + if (files != NULL)
53930 + put_files_struct(files);
53931 + continue;
53932 + }
53933 + rcu_read_lock();
53934 + fdt = files_fdtable(files);
53935 + for (i=0; i < fdt->max_fds; i++) {
53936 + file = fcheck_files(files, i);
53937 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
53938 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
53939 + p3 = task;
53940 + while (p3->pid > 0) {
53941 + if (p3 == p)
53942 + break;
53943 + p3 = p3->real_parent;
53944 + }
53945 + if (p3 == p)
53946 + break;
53947 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
53948 + gr_handle_alertkill(p);
53949 + rcu_read_unlock();
53950 + put_files_struct(files);
53951 + read_unlock(&tasklist_lock);
53952 + fput(our_file);
53953 + return 0;
53954 + }
53955 + }
53956 + rcu_read_unlock();
53957 + put_files_struct(files);
53958 + } while_each_thread(p2, p);
53959 + read_unlock(&tasklist_lock);
53960 +
53961 + fput(our_file);
53962 + return 1;
53963 +}
53964 +
53965 +ssize_t
53966 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
53967 +{
53968 + struct gr_arg_wrapper uwrap;
53969 + unsigned char *sprole_salt = NULL;
53970 + unsigned char *sprole_sum = NULL;
53971 + int error = sizeof (struct gr_arg_wrapper);
53972 + int error2 = 0;
53973 +
53974 + mutex_lock(&gr_dev_mutex);
53975 +
53976 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
53977 + error = -EPERM;
53978 + goto out;
53979 + }
53980 +
53981 + if (count != sizeof (struct gr_arg_wrapper)) {
53982 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
53983 + error = -EINVAL;
53984 + goto out;
53985 + }
53986 +
53987 +
53988 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
53989 + gr_auth_expires = 0;
53990 + gr_auth_attempts = 0;
53991 + }
53992 +
53993 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
53994 + error = -EFAULT;
53995 + goto out;
53996 + }
53997 +
53998 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
53999 + error = -EINVAL;
54000 + goto out;
54001 + }
54002 +
54003 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
54004 + error = -EFAULT;
54005 + goto out;
54006 + }
54007 +
54008 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
54009 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
54010 + time_after(gr_auth_expires, get_seconds())) {
54011 + error = -EBUSY;
54012 + goto out;
54013 + }
54014 +
54015 + /* if non-root trying to do anything other than use a special role,
54016 + do not attempt authentication, do not count towards authentication
54017 + locking
54018 + */
54019 +
54020 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
54021 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
54022 + current_uid()) {
54023 + error = -EPERM;
54024 + goto out;
54025 + }
54026 +
54027 + /* ensure pw and special role name are null terminated */
54028 +
54029 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
54030 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
54031 +
54032 + /* Okay.
54033 + * We have our enough of the argument structure..(we have yet
54034 + * to copy_from_user the tables themselves) . Copy the tables
54035 + * only if we need them, i.e. for loading operations. */
54036 +
54037 + switch (gr_usermode->mode) {
54038 + case GR_STATUS:
54039 + if (gr_status & GR_READY) {
54040 + error = 1;
54041 + if (!gr_check_secure_terminal(current))
54042 + error = 3;
54043 + } else
54044 + error = 2;
54045 + goto out;
54046 + case GR_SHUTDOWN:
54047 + if ((gr_status & GR_READY)
54048 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54049 + pax_open_kernel();
54050 + gr_status &= ~GR_READY;
54051 + pax_close_kernel();
54052 +
54053 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
54054 + free_variables();
54055 + memset(gr_usermode, 0, sizeof (struct gr_arg));
54056 + memset(gr_system_salt, 0, GR_SALT_LEN);
54057 + memset(gr_system_sum, 0, GR_SHA_LEN);
54058 + } else if (gr_status & GR_READY) {
54059 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
54060 + error = -EPERM;
54061 + } else {
54062 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
54063 + error = -EAGAIN;
54064 + }
54065 + break;
54066 + case GR_ENABLE:
54067 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
54068 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
54069 + else {
54070 + if (gr_status & GR_READY)
54071 + error = -EAGAIN;
54072 + else
54073 + error = error2;
54074 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
54075 + }
54076 + break;
54077 + case GR_RELOAD:
54078 + if (!(gr_status & GR_READY)) {
54079 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
54080 + error = -EAGAIN;
54081 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54082 + preempt_disable();
54083 +
54084 + pax_open_kernel();
54085 + gr_status &= ~GR_READY;
54086 + pax_close_kernel();
54087 +
54088 + free_variables();
54089 + if (!(error2 = gracl_init(gr_usermode))) {
54090 + preempt_enable();
54091 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
54092 + } else {
54093 + preempt_enable();
54094 + error = error2;
54095 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
54096 + }
54097 + } else {
54098 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
54099 + error = -EPERM;
54100 + }
54101 + break;
54102 + case GR_SEGVMOD:
54103 + if (unlikely(!(gr_status & GR_READY))) {
54104 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
54105 + error = -EAGAIN;
54106 + break;
54107 + }
54108 +
54109 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54110 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
54111 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
54112 + struct acl_subject_label *segvacl;
54113 + segvacl =
54114 + lookup_acl_subj_label(gr_usermode->segv_inode,
54115 + gr_usermode->segv_device,
54116 + current->role);
54117 + if (segvacl) {
54118 + segvacl->crashes = 0;
54119 + segvacl->expires = 0;
54120 + }
54121 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
54122 + gr_remove_uid(gr_usermode->segv_uid);
54123 + }
54124 + } else {
54125 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
54126 + error = -EPERM;
54127 + }
54128 + break;
54129 + case GR_SPROLE:
54130 + case GR_SPROLEPAM:
54131 + if (unlikely(!(gr_status & GR_READY))) {
54132 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
54133 + error = -EAGAIN;
54134 + break;
54135 + }
54136 +
54137 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
54138 + current->role->expires = 0;
54139 + current->role->auth_attempts = 0;
54140 + }
54141 +
54142 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
54143 + time_after(current->role->expires, get_seconds())) {
54144 + error = -EBUSY;
54145 + goto out;
54146 + }
54147 +
54148 + if (lookup_special_role_auth
54149 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
54150 + && ((!sprole_salt && !sprole_sum)
54151 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
54152 + char *p = "";
54153 + assign_special_role(gr_usermode->sp_role);
54154 + read_lock(&tasklist_lock);
54155 + if (current->real_parent)
54156 + p = current->real_parent->role->rolename;
54157 + read_unlock(&tasklist_lock);
54158 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
54159 + p, acl_sp_role_value);
54160 + } else {
54161 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
54162 + error = -EPERM;
54163 + if(!(current->role->auth_attempts++))
54164 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
54165 +
54166 + goto out;
54167 + }
54168 + break;
54169 + case GR_UNSPROLE:
54170 + if (unlikely(!(gr_status & GR_READY))) {
54171 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
54172 + error = -EAGAIN;
54173 + break;
54174 + }
54175 +
54176 + if (current->role->roletype & GR_ROLE_SPECIAL) {
54177 + char *p = "";
54178 + int i = 0;
54179 +
54180 + read_lock(&tasklist_lock);
54181 + if (current->real_parent) {
54182 + p = current->real_parent->role->rolename;
54183 + i = current->real_parent->acl_role_id;
54184 + }
54185 + read_unlock(&tasklist_lock);
54186 +
54187 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
54188 + gr_set_acls(1);
54189 + } else {
54190 + error = -EPERM;
54191 + goto out;
54192 + }
54193 + break;
54194 + default:
54195 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
54196 + error = -EINVAL;
54197 + break;
54198 + }
54199 +
54200 + if (error != -EPERM)
54201 + goto out;
54202 +
54203 + if(!(gr_auth_attempts++))
54204 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
54205 +
54206 + out:
54207 + mutex_unlock(&gr_dev_mutex);
54208 + return error;
54209 +}
54210 +
54211 +/* must be called with
54212 + rcu_read_lock();
54213 + read_lock(&tasklist_lock);
54214 + read_lock(&grsec_exec_file_lock);
54215 +*/
54216 +int gr_apply_subject_to_task(struct task_struct *task)
54217 +{
54218 + struct acl_object_label *obj;
54219 + char *tmpname;
54220 + struct acl_subject_label *tmpsubj;
54221 + struct file *filp;
54222 + struct name_entry *nmatch;
54223 +
54224 + filp = task->exec_file;
54225 + if (filp == NULL)
54226 + return 0;
54227 +
54228 + /* the following is to apply the correct subject
54229 + on binaries running when the RBAC system
54230 + is enabled, when the binaries have been
54231 + replaced or deleted since their execution
54232 + -----
54233 + when the RBAC system starts, the inode/dev
54234 + from exec_file will be one the RBAC system
54235 + is unaware of. It only knows the inode/dev
54236 + of the present file on disk, or the absence
54237 + of it.
54238 + */
54239 + preempt_disable();
54240 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
54241 +
54242 + nmatch = lookup_name_entry(tmpname);
54243 + preempt_enable();
54244 + tmpsubj = NULL;
54245 + if (nmatch) {
54246 + if (nmatch->deleted)
54247 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
54248 + else
54249 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
54250 + if (tmpsubj != NULL)
54251 + task->acl = tmpsubj;
54252 + }
54253 + if (tmpsubj == NULL)
54254 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
54255 + task->role);
54256 + if (task->acl) {
54257 + task->is_writable = 0;
54258 + /* ignore additional mmap checks for processes that are writable
54259 + by the default ACL */
54260 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54261 + if (unlikely(obj->mode & GR_WRITE))
54262 + task->is_writable = 1;
54263 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
54264 + if (unlikely(obj->mode & GR_WRITE))
54265 + task->is_writable = 1;
54266 +
54267 + gr_set_proc_res(task);
54268 +
54269 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54270 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
54271 +#endif
54272 + } else {
54273 + return 1;
54274 + }
54275 +
54276 + return 0;
54277 +}
54278 +
54279 +int
54280 +gr_set_acls(const int type)
54281 +{
54282 + struct task_struct *task, *task2;
54283 + struct acl_role_label *role = current->role;
54284 + __u16 acl_role_id = current->acl_role_id;
54285 + const struct cred *cred;
54286 + int ret;
54287 +
54288 + rcu_read_lock();
54289 + read_lock(&tasklist_lock);
54290 + read_lock(&grsec_exec_file_lock);
54291 + do_each_thread(task2, task) {
54292 + /* check to see if we're called from the exit handler,
54293 + if so, only replace ACLs that have inherited the admin
54294 + ACL */
54295 +
54296 + if (type && (task->role != role ||
54297 + task->acl_role_id != acl_role_id))
54298 + continue;
54299 +
54300 + task->acl_role_id = 0;
54301 + task->acl_sp_role = 0;
54302 +
54303 + if (task->exec_file) {
54304 + cred = __task_cred(task);
54305 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
54306 + ret = gr_apply_subject_to_task(task);
54307 + if (ret) {
54308 + read_unlock(&grsec_exec_file_lock);
54309 + read_unlock(&tasklist_lock);
54310 + rcu_read_unlock();
54311 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
54312 + return ret;
54313 + }
54314 + } else {
54315 + // it's a kernel process
54316 + task->role = kernel_role;
54317 + task->acl = kernel_role->root_label;
54318 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
54319 + task->acl->mode &= ~GR_PROCFIND;
54320 +#endif
54321 + }
54322 + } while_each_thread(task2, task);
54323 + read_unlock(&grsec_exec_file_lock);
54324 + read_unlock(&tasklist_lock);
54325 + rcu_read_unlock();
54326 +
54327 + return 0;
54328 +}
54329 +
54330 +void
54331 +gr_learn_resource(const struct task_struct *task,
54332 + const int res, const unsigned long wanted, const int gt)
54333 +{
54334 + struct acl_subject_label *acl;
54335 + const struct cred *cred;
54336 +
54337 + if (unlikely((gr_status & GR_READY) &&
54338 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
54339 + goto skip_reslog;
54340 +
54341 +#ifdef CONFIG_GRKERNSEC_RESLOG
54342 + gr_log_resource(task, res, wanted, gt);
54343 +#endif
54344 + skip_reslog:
54345 +
54346 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
54347 + return;
54348 +
54349 + acl = task->acl;
54350 +
54351 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
54352 + !(acl->resmask & (1 << (unsigned short) res))))
54353 + return;
54354 +
54355 + if (wanted >= acl->res[res].rlim_cur) {
54356 + unsigned long res_add;
54357 +
54358 + res_add = wanted;
54359 + switch (res) {
54360 + case RLIMIT_CPU:
54361 + res_add += GR_RLIM_CPU_BUMP;
54362 + break;
54363 + case RLIMIT_FSIZE:
54364 + res_add += GR_RLIM_FSIZE_BUMP;
54365 + break;
54366 + case RLIMIT_DATA:
54367 + res_add += GR_RLIM_DATA_BUMP;
54368 + break;
54369 + case RLIMIT_STACK:
54370 + res_add += GR_RLIM_STACK_BUMP;
54371 + break;
54372 + case RLIMIT_CORE:
54373 + res_add += GR_RLIM_CORE_BUMP;
54374 + break;
54375 + case RLIMIT_RSS:
54376 + res_add += GR_RLIM_RSS_BUMP;
54377 + break;
54378 + case RLIMIT_NPROC:
54379 + res_add += GR_RLIM_NPROC_BUMP;
54380 + break;
54381 + case RLIMIT_NOFILE:
54382 + res_add += GR_RLIM_NOFILE_BUMP;
54383 + break;
54384 + case RLIMIT_MEMLOCK:
54385 + res_add += GR_RLIM_MEMLOCK_BUMP;
54386 + break;
54387 + case RLIMIT_AS:
54388 + res_add += GR_RLIM_AS_BUMP;
54389 + break;
54390 + case RLIMIT_LOCKS:
54391 + res_add += GR_RLIM_LOCKS_BUMP;
54392 + break;
54393 + case RLIMIT_SIGPENDING:
54394 + res_add += GR_RLIM_SIGPENDING_BUMP;
54395 + break;
54396 + case RLIMIT_MSGQUEUE:
54397 + res_add += GR_RLIM_MSGQUEUE_BUMP;
54398 + break;
54399 + case RLIMIT_NICE:
54400 + res_add += GR_RLIM_NICE_BUMP;
54401 + break;
54402 + case RLIMIT_RTPRIO:
54403 + res_add += GR_RLIM_RTPRIO_BUMP;
54404 + break;
54405 + case RLIMIT_RTTIME:
54406 + res_add += GR_RLIM_RTTIME_BUMP;
54407 + break;
54408 + }
54409 +
54410 + acl->res[res].rlim_cur = res_add;
54411 +
54412 + if (wanted > acl->res[res].rlim_max)
54413 + acl->res[res].rlim_max = res_add;
54414 +
54415 + /* only log the subject filename, since resource logging is supported for
54416 + single-subject learning only */
54417 + rcu_read_lock();
54418 + cred = __task_cred(task);
54419 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54420 + task->role->roletype, cred->uid, cred->gid, acl->filename,
54421 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
54422 + "", (unsigned long) res, &task->signal->saved_ip);
54423 + rcu_read_unlock();
54424 + }
54425 +
54426 + return;
54427 +}
54428 +
54429 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
54430 +void
54431 +pax_set_initial_flags(struct linux_binprm *bprm)
54432 +{
54433 + struct task_struct *task = current;
54434 + struct acl_subject_label *proc;
54435 + unsigned long flags;
54436 +
54437 + if (unlikely(!(gr_status & GR_READY)))
54438 + return;
54439 +
54440 + flags = pax_get_flags(task);
54441 +
54442 + proc = task->acl;
54443 +
54444 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
54445 + flags &= ~MF_PAX_PAGEEXEC;
54446 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
54447 + flags &= ~MF_PAX_SEGMEXEC;
54448 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
54449 + flags &= ~MF_PAX_RANDMMAP;
54450 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
54451 + flags &= ~MF_PAX_EMUTRAMP;
54452 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
54453 + flags &= ~MF_PAX_MPROTECT;
54454 +
54455 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
54456 + flags |= MF_PAX_PAGEEXEC;
54457 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
54458 + flags |= MF_PAX_SEGMEXEC;
54459 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
54460 + flags |= MF_PAX_RANDMMAP;
54461 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
54462 + flags |= MF_PAX_EMUTRAMP;
54463 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
54464 + flags |= MF_PAX_MPROTECT;
54465 +
54466 + pax_set_flags(task, flags);
54467 +
54468 + return;
54469 +}
54470 +#endif
54471 +
54472 +#ifdef CONFIG_SYSCTL
54473 +/* Eric Biederman likes breaking userland ABI and every inode-based security
54474 + system to save 35kb of memory */
54475 +
54476 +/* we modify the passed in filename, but adjust it back before returning */
54477 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
54478 +{
54479 + struct name_entry *nmatch;
54480 + char *p, *lastp = NULL;
54481 + struct acl_object_label *obj = NULL, *tmp;
54482 + struct acl_subject_label *tmpsubj;
54483 + char c = '\0';
54484 +
54485 + read_lock(&gr_inode_lock);
54486 +
54487 + p = name + len - 1;
54488 + do {
54489 + nmatch = lookup_name_entry(name);
54490 + if (lastp != NULL)
54491 + *lastp = c;
54492 +
54493 + if (nmatch == NULL)
54494 + goto next_component;
54495 + tmpsubj = current->acl;
54496 + do {
54497 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
54498 + if (obj != NULL) {
54499 + tmp = obj->globbed;
54500 + while (tmp) {
54501 + if (!glob_match(tmp->filename, name)) {
54502 + obj = tmp;
54503 + goto found_obj;
54504 + }
54505 + tmp = tmp->next;
54506 + }
54507 + goto found_obj;
54508 + }
54509 + } while ((tmpsubj = tmpsubj->parent_subject));
54510 +next_component:
54511 + /* end case */
54512 + if (p == name)
54513 + break;
54514 +
54515 + while (*p != '/')
54516 + p--;
54517 + if (p == name)
54518 + lastp = p + 1;
54519 + else {
54520 + lastp = p;
54521 + p--;
54522 + }
54523 + c = *lastp;
54524 + *lastp = '\0';
54525 + } while (1);
54526 +found_obj:
54527 + read_unlock(&gr_inode_lock);
54528 + /* obj returned will always be non-null */
54529 + return obj;
54530 +}
54531 +
54532 +/* returns 0 when allowing, non-zero on error
54533 + op of 0 is used for readdir, so we don't log the names of hidden files
54534 +*/
54535 +__u32
54536 +gr_handle_sysctl(const struct ctl_table *table, const int op)
54537 +{
54538 + struct ctl_table *tmp;
54539 + const char *proc_sys = "/proc/sys";
54540 + char *path;
54541 + struct acl_object_label *obj;
54542 + unsigned short len = 0, pos = 0, depth = 0, i;
54543 + __u32 err = 0;
54544 + __u32 mode = 0;
54545 +
54546 + if (unlikely(!(gr_status & GR_READY)))
54547 + return 0;
54548 +
54549 + /* for now, ignore operations on non-sysctl entries if it's not a
54550 + readdir*/
54551 + if (table->child != NULL && op != 0)
54552 + return 0;
54553 +
54554 + mode |= GR_FIND;
54555 + /* it's only a read if it's an entry, read on dirs is for readdir */
54556 + if (op & MAY_READ)
54557 + mode |= GR_READ;
54558 + if (op & MAY_WRITE)
54559 + mode |= GR_WRITE;
54560 +
54561 + preempt_disable();
54562 +
54563 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
54564 +
54565 + /* it's only a read/write if it's an actual entry, not a dir
54566 + (which are opened for readdir)
54567 + */
54568 +
54569 + /* convert the requested sysctl entry into a pathname */
54570 +
54571 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
54572 + len += strlen(tmp->procname);
54573 + len++;
54574 + depth++;
54575 + }
54576 +
54577 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
54578 + /* deny */
54579 + goto out;
54580 + }
54581 +
54582 + memset(path, 0, PAGE_SIZE);
54583 +
54584 + memcpy(path, proc_sys, strlen(proc_sys));
54585 +
54586 + pos += strlen(proc_sys);
54587 +
54588 + for (; depth > 0; depth--) {
54589 + path[pos] = '/';
54590 + pos++;
54591 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
54592 + if (depth == i) {
54593 + memcpy(path + pos, tmp->procname,
54594 + strlen(tmp->procname));
54595 + pos += strlen(tmp->procname);
54596 + }
54597 + i++;
54598 + }
54599 + }
54600 +
54601 + obj = gr_lookup_by_name(path, pos);
54602 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
54603 +
54604 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
54605 + ((err & mode) != mode))) {
54606 + __u32 new_mode = mode;
54607 +
54608 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
54609 +
54610 + err = 0;
54611 + gr_log_learn_sysctl(path, new_mode);
54612 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
54613 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
54614 + err = -ENOENT;
54615 + } else if (!(err & GR_FIND)) {
54616 + err = -ENOENT;
54617 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
54618 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
54619 + path, (mode & GR_READ) ? " reading" : "",
54620 + (mode & GR_WRITE) ? " writing" : "");
54621 + err = -EACCES;
54622 + } else if ((err & mode) != mode) {
54623 + err = -EACCES;
54624 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
54625 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
54626 + path, (mode & GR_READ) ? " reading" : "",
54627 + (mode & GR_WRITE) ? " writing" : "");
54628 + err = 0;
54629 + } else
54630 + err = 0;
54631 +
54632 + out:
54633 + preempt_enable();
54634 +
54635 + return err;
54636 +}
54637 +#endif
54638 +
54639 +int
54640 +gr_handle_proc_ptrace(struct task_struct *task)
54641 +{
54642 + struct file *filp;
54643 + struct task_struct *tmp = task;
54644 + struct task_struct *curtemp = current;
54645 + __u32 retmode;
54646 +
54647 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54648 + if (unlikely(!(gr_status & GR_READY)))
54649 + return 0;
54650 +#endif
54651 +
54652 + read_lock(&tasklist_lock);
54653 + read_lock(&grsec_exec_file_lock);
54654 + filp = task->exec_file;
54655 +
54656 + while (tmp->pid > 0) {
54657 + if (tmp == curtemp)
54658 + break;
54659 + tmp = tmp->real_parent;
54660 + }
54661 +
54662 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54663 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
54664 + read_unlock(&grsec_exec_file_lock);
54665 + read_unlock(&tasklist_lock);
54666 + return 1;
54667 + }
54668 +
54669 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54670 + if (!(gr_status & GR_READY)) {
54671 + read_unlock(&grsec_exec_file_lock);
54672 + read_unlock(&tasklist_lock);
54673 + return 0;
54674 + }
54675 +#endif
54676 +
54677 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
54678 + read_unlock(&grsec_exec_file_lock);
54679 + read_unlock(&tasklist_lock);
54680 +
54681 + if (retmode & GR_NOPTRACE)
54682 + return 1;
54683 +
54684 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
54685 + && (current->acl != task->acl || (current->acl != current->role->root_label
54686 + && current->pid != task->pid)))
54687 + return 1;
54688 +
54689 + return 0;
54690 +}
54691 +
54692 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
54693 +{
54694 + if (unlikely(!(gr_status & GR_READY)))
54695 + return;
54696 +
54697 + if (!(current->role->roletype & GR_ROLE_GOD))
54698 + return;
54699 +
54700 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
54701 + p->role->rolename, gr_task_roletype_to_char(p),
54702 + p->acl->filename);
54703 +}
54704 +
54705 +int
54706 +gr_handle_ptrace(struct task_struct *task, const long request)
54707 +{
54708 + struct task_struct *tmp = task;
54709 + struct task_struct *curtemp = current;
54710 + __u32 retmode;
54711 +
54712 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54713 + if (unlikely(!(gr_status & GR_READY)))
54714 + return 0;
54715 +#endif
54716 +
54717 + read_lock(&tasklist_lock);
54718 + while (tmp->pid > 0) {
54719 + if (tmp == curtemp)
54720 + break;
54721 + tmp = tmp->real_parent;
54722 + }
54723 +
54724 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54725 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
54726 + read_unlock(&tasklist_lock);
54727 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54728 + return 1;
54729 + }
54730 + read_unlock(&tasklist_lock);
54731 +
54732 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54733 + if (!(gr_status & GR_READY))
54734 + return 0;
54735 +#endif
54736 +
54737 + read_lock(&grsec_exec_file_lock);
54738 + if (unlikely(!task->exec_file)) {
54739 + read_unlock(&grsec_exec_file_lock);
54740 + return 0;
54741 + }
54742 +
54743 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
54744 + read_unlock(&grsec_exec_file_lock);
54745 +
54746 + if (retmode & GR_NOPTRACE) {
54747 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54748 + return 1;
54749 + }
54750 +
54751 + if (retmode & GR_PTRACERD) {
54752 + switch (request) {
54753 + case PTRACE_SEIZE:
54754 + case PTRACE_POKETEXT:
54755 + case PTRACE_POKEDATA:
54756 + case PTRACE_POKEUSR:
54757 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
54758 + case PTRACE_SETREGS:
54759 + case PTRACE_SETFPREGS:
54760 +#endif
54761 +#ifdef CONFIG_X86
54762 + case PTRACE_SETFPXREGS:
54763 +#endif
54764 +#ifdef CONFIG_ALTIVEC
54765 + case PTRACE_SETVRREGS:
54766 +#endif
54767 + return 1;
54768 + default:
54769 + return 0;
54770 + }
54771 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
54772 + !(current->role->roletype & GR_ROLE_GOD) &&
54773 + (current->acl != task->acl)) {
54774 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54775 + return 1;
54776 + }
54777 +
54778 + return 0;
54779 +}
54780 +
54781 +static int is_writable_mmap(const struct file *filp)
54782 +{
54783 + struct task_struct *task = current;
54784 + struct acl_object_label *obj, *obj2;
54785 +
54786 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
54787 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
54788 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54789 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
54790 + task->role->root_label);
54791 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
54792 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
54793 + return 1;
54794 + }
54795 + }
54796 + return 0;
54797 +}
54798 +
54799 +int
54800 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
54801 +{
54802 + __u32 mode;
54803 +
54804 + if (unlikely(!file || !(prot & PROT_EXEC)))
54805 + return 1;
54806 +
54807 + if (is_writable_mmap(file))
54808 + return 0;
54809 +
54810 + mode =
54811 + gr_search_file(file->f_path.dentry,
54812 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54813 + file->f_path.mnt);
54814 +
54815 + if (!gr_tpe_allow(file))
54816 + return 0;
54817 +
54818 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54819 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54820 + return 0;
54821 + } else if (unlikely(!(mode & GR_EXEC))) {
54822 + return 0;
54823 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54824 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54825 + return 1;
54826 + }
54827 +
54828 + return 1;
54829 +}
54830 +
54831 +int
54832 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54833 +{
54834 + __u32 mode;
54835 +
54836 + if (unlikely(!file || !(prot & PROT_EXEC)))
54837 + return 1;
54838 +
54839 + if (is_writable_mmap(file))
54840 + return 0;
54841 +
54842 + mode =
54843 + gr_search_file(file->f_path.dentry,
54844 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54845 + file->f_path.mnt);
54846 +
54847 + if (!gr_tpe_allow(file))
54848 + return 0;
54849 +
54850 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54851 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54852 + return 0;
54853 + } else if (unlikely(!(mode & GR_EXEC))) {
54854 + return 0;
54855 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54856 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54857 + return 1;
54858 + }
54859 +
54860 + return 1;
54861 +}
54862 +
54863 +void
54864 +gr_acl_handle_psacct(struct task_struct *task, const long code)
54865 +{
54866 + unsigned long runtime;
54867 + unsigned long cputime;
54868 + unsigned int wday, cday;
54869 + __u8 whr, chr;
54870 + __u8 wmin, cmin;
54871 + __u8 wsec, csec;
54872 + struct timespec timeval;
54873 +
54874 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
54875 + !(task->acl->mode & GR_PROCACCT)))
54876 + return;
54877 +
54878 + do_posix_clock_monotonic_gettime(&timeval);
54879 + runtime = timeval.tv_sec - task->start_time.tv_sec;
54880 + wday = runtime / (3600 * 24);
54881 + runtime -= wday * (3600 * 24);
54882 + whr = runtime / 3600;
54883 + runtime -= whr * 3600;
54884 + wmin = runtime / 60;
54885 + runtime -= wmin * 60;
54886 + wsec = runtime;
54887 +
54888 + cputime = (task->utime + task->stime) / HZ;
54889 + cday = cputime / (3600 * 24);
54890 + cputime -= cday * (3600 * 24);
54891 + chr = cputime / 3600;
54892 + cputime -= chr * 3600;
54893 + cmin = cputime / 60;
54894 + cputime -= cmin * 60;
54895 + csec = cputime;
54896 +
54897 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
54898 +
54899 + return;
54900 +}
54901 +
54902 +void gr_set_kernel_label(struct task_struct *task)
54903 +{
54904 + if (gr_status & GR_READY) {
54905 + task->role = kernel_role;
54906 + task->acl = kernel_role->root_label;
54907 + }
54908 + return;
54909 +}
54910 +
54911 +#ifdef CONFIG_TASKSTATS
54912 +int gr_is_taskstats_denied(int pid)
54913 +{
54914 + struct task_struct *task;
54915 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54916 + const struct cred *cred;
54917 +#endif
54918 + int ret = 0;
54919 +
54920 + /* restrict taskstats viewing to un-chrooted root users
54921 + who have the 'view' subject flag if the RBAC system is enabled
54922 + */
54923 +
54924 + rcu_read_lock();
54925 + read_lock(&tasklist_lock);
54926 + task = find_task_by_vpid(pid);
54927 + if (task) {
54928 +#ifdef CONFIG_GRKERNSEC_CHROOT
54929 + if (proc_is_chrooted(task))
54930 + ret = -EACCES;
54931 +#endif
54932 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54933 + cred = __task_cred(task);
54934 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54935 + if (cred->uid != 0)
54936 + ret = -EACCES;
54937 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54938 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
54939 + ret = -EACCES;
54940 +#endif
54941 +#endif
54942 + if (gr_status & GR_READY) {
54943 + if (!(task->acl->mode & GR_VIEW))
54944 + ret = -EACCES;
54945 + }
54946 + } else
54947 + ret = -ENOENT;
54948 +
54949 + read_unlock(&tasklist_lock);
54950 + rcu_read_unlock();
54951 +
54952 + return ret;
54953 +}
54954 +#endif
54955 +
54956 +/* AUXV entries are filled via a descendant of search_binary_handler
54957 + after we've already applied the subject for the target
54958 +*/
54959 +int gr_acl_enable_at_secure(void)
54960 +{
54961 + if (unlikely(!(gr_status & GR_READY)))
54962 + return 0;
54963 +
54964 + if (current->acl->mode & GR_ATSECURE)
54965 + return 1;
54966 +
54967 + return 0;
54968 +}
54969 +
54970 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
54971 +{
54972 + struct task_struct *task = current;
54973 + struct dentry *dentry = file->f_path.dentry;
54974 + struct vfsmount *mnt = file->f_path.mnt;
54975 + struct acl_object_label *obj, *tmp;
54976 + struct acl_subject_label *subj;
54977 + unsigned int bufsize;
54978 + int is_not_root;
54979 + char *path;
54980 + dev_t dev = __get_dev(dentry);
54981 +
54982 + if (unlikely(!(gr_status & GR_READY)))
54983 + return 1;
54984 +
54985 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
54986 + return 1;
54987 +
54988 + /* ignore Eric Biederman */
54989 + if (IS_PRIVATE(dentry->d_inode))
54990 + return 1;
54991 +
54992 + subj = task->acl;
54993 + do {
54994 + obj = lookup_acl_obj_label(ino, dev, subj);
54995 + if (obj != NULL)
54996 + return (obj->mode & GR_FIND) ? 1 : 0;
54997 + } while ((subj = subj->parent_subject));
54998 +
54999 + /* this is purely an optimization since we're looking for an object
55000 + for the directory we're doing a readdir on
55001 + if it's possible for any globbed object to match the entry we're
55002 + filling into the directory, then the object we find here will be
55003 + an anchor point with attached globbed objects
55004 + */
55005 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
55006 + if (obj->globbed == NULL)
55007 + return (obj->mode & GR_FIND) ? 1 : 0;
55008 +
55009 + is_not_root = ((obj->filename[0] == '/') &&
55010 + (obj->filename[1] == '\0')) ? 0 : 1;
55011 + bufsize = PAGE_SIZE - namelen - is_not_root;
55012 +
55013 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
55014 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
55015 + return 1;
55016 +
55017 + preempt_disable();
55018 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
55019 + bufsize);
55020 +
55021 + bufsize = strlen(path);
55022 +
55023 + /* if base is "/", don't append an additional slash */
55024 + if (is_not_root)
55025 + *(path + bufsize) = '/';
55026 + memcpy(path + bufsize + is_not_root, name, namelen);
55027 + *(path + bufsize + namelen + is_not_root) = '\0';
55028 +
55029 + tmp = obj->globbed;
55030 + while (tmp) {
55031 + if (!glob_match(tmp->filename, path)) {
55032 + preempt_enable();
55033 + return (tmp->mode & GR_FIND) ? 1 : 0;
55034 + }
55035 + tmp = tmp->next;
55036 + }
55037 + preempt_enable();
55038 + return (obj->mode & GR_FIND) ? 1 : 0;
55039 +}
55040 +
55041 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
55042 +EXPORT_SYMBOL(gr_acl_is_enabled);
55043 +#endif
55044 +EXPORT_SYMBOL(gr_learn_resource);
55045 +EXPORT_SYMBOL(gr_set_kernel_label);
55046 +#ifdef CONFIG_SECURITY
55047 +EXPORT_SYMBOL(gr_check_user_change);
55048 +EXPORT_SYMBOL(gr_check_group_change);
55049 +#endif
55050 +
55051 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
55052 new file mode 100644
55053 index 0000000..34fefda
55054 --- /dev/null
55055 +++ b/grsecurity/gracl_alloc.c
55056 @@ -0,0 +1,105 @@
55057 +#include <linux/kernel.h>
55058 +#include <linux/mm.h>
55059 +#include <linux/slab.h>
55060 +#include <linux/vmalloc.h>
55061 +#include <linux/gracl.h>
55062 +#include <linux/grsecurity.h>
55063 +
55064 +static unsigned long alloc_stack_next = 1;
55065 +static unsigned long alloc_stack_size = 1;
55066 +static void **alloc_stack;
55067 +
55068 +static __inline__ int
55069 +alloc_pop(void)
55070 +{
55071 + if (alloc_stack_next == 1)
55072 + return 0;
55073 +
55074 + kfree(alloc_stack[alloc_stack_next - 2]);
55075 +
55076 + alloc_stack_next--;
55077 +
55078 + return 1;
55079 +}
55080 +
55081 +static __inline__ int
55082 +alloc_push(void *buf)
55083 +{
55084 + if (alloc_stack_next >= alloc_stack_size)
55085 + return 1;
55086 +
55087 + alloc_stack[alloc_stack_next - 1] = buf;
55088 +
55089 + alloc_stack_next++;
55090 +
55091 + return 0;
55092 +}
55093 +
55094 +void *
55095 +acl_alloc(unsigned long len)
55096 +{
55097 + void *ret = NULL;
55098 +
55099 + if (!len || len > PAGE_SIZE)
55100 + goto out;
55101 +
55102 + ret = kmalloc(len, GFP_KERNEL);
55103 +
55104 + if (ret) {
55105 + if (alloc_push(ret)) {
55106 + kfree(ret);
55107 + ret = NULL;
55108 + }
55109 + }
55110 +
55111 +out:
55112 + return ret;
55113 +}
55114 +
55115 +void *
55116 +acl_alloc_num(unsigned long num, unsigned long len)
55117 +{
55118 + if (!len || (num > (PAGE_SIZE / len)))
55119 + return NULL;
55120 +
55121 + return acl_alloc(num * len);
55122 +}
55123 +
55124 +void
55125 +acl_free_all(void)
55126 +{
55127 + if (gr_acl_is_enabled() || !alloc_stack)
55128 + return;
55129 +
55130 + while (alloc_pop()) ;
55131 +
55132 + if (alloc_stack) {
55133 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
55134 + kfree(alloc_stack);
55135 + else
55136 + vfree(alloc_stack);
55137 + }
55138 +
55139 + alloc_stack = NULL;
55140 + alloc_stack_size = 1;
55141 + alloc_stack_next = 1;
55142 +
55143 + return;
55144 +}
55145 +
55146 +int
55147 +acl_alloc_stack_init(unsigned long size)
55148 +{
55149 + if ((size * sizeof (void *)) <= PAGE_SIZE)
55150 + alloc_stack =
55151 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
55152 + else
55153 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
55154 +
55155 + alloc_stack_size = size;
55156 +
55157 + if (!alloc_stack)
55158 + return 0;
55159 + else
55160 + return 1;
55161 +}
55162 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
55163 new file mode 100644
55164 index 0000000..955ddfb
55165 --- /dev/null
55166 +++ b/grsecurity/gracl_cap.c
55167 @@ -0,0 +1,101 @@
55168 +#include <linux/kernel.h>
55169 +#include <linux/module.h>
55170 +#include <linux/sched.h>
55171 +#include <linux/gracl.h>
55172 +#include <linux/grsecurity.h>
55173 +#include <linux/grinternal.h>
55174 +
55175 +extern const char *captab_log[];
55176 +extern int captab_log_entries;
55177 +
55178 +int
55179 +gr_acl_is_capable(const int cap)
55180 +{
55181 + struct task_struct *task = current;
55182 + const struct cred *cred = current_cred();
55183 + struct acl_subject_label *curracl;
55184 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
55185 + kernel_cap_t cap_audit = __cap_empty_set;
55186 +
55187 + if (!gr_acl_is_enabled())
55188 + return 1;
55189 +
55190 + curracl = task->acl;
55191 +
55192 + cap_drop = curracl->cap_lower;
55193 + cap_mask = curracl->cap_mask;
55194 + cap_audit = curracl->cap_invert_audit;
55195 +
55196 + while ((curracl = curracl->parent_subject)) {
55197 + /* if the cap isn't specified in the current computed mask but is specified in the
55198 + current level subject, and is lowered in the current level subject, then add
55199 + it to the set of dropped capabilities
55200 + otherwise, add the current level subject's mask to the current computed mask
55201 + */
55202 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
55203 + cap_raise(cap_mask, cap);
55204 + if (cap_raised(curracl->cap_lower, cap))
55205 + cap_raise(cap_drop, cap);
55206 + if (cap_raised(curracl->cap_invert_audit, cap))
55207 + cap_raise(cap_audit, cap);
55208 + }
55209 + }
55210 +
55211 + if (!cap_raised(cap_drop, cap)) {
55212 + if (cap_raised(cap_audit, cap))
55213 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
55214 + return 1;
55215 + }
55216 +
55217 + curracl = task->acl;
55218 +
55219 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
55220 + && cap_raised(cred->cap_effective, cap)) {
55221 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
55222 + task->role->roletype, cred->uid,
55223 + cred->gid, task->exec_file ?
55224 + gr_to_filename(task->exec_file->f_path.dentry,
55225 + task->exec_file->f_path.mnt) : curracl->filename,
55226 + curracl->filename, 0UL,
55227 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
55228 + return 1;
55229 + }
55230 +
55231 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
55232 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
55233 + return 0;
55234 +}
55235 +
55236 +int
55237 +gr_acl_is_capable_nolog(const int cap)
55238 +{
55239 + struct acl_subject_label *curracl;
55240 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
55241 +
55242 + if (!gr_acl_is_enabled())
55243 + return 1;
55244 +
55245 + curracl = current->acl;
55246 +
55247 + cap_drop = curracl->cap_lower;
55248 + cap_mask = curracl->cap_mask;
55249 +
55250 + while ((curracl = curracl->parent_subject)) {
55251 + /* if the cap isn't specified in the current computed mask but is specified in the
55252 + current level subject, and is lowered in the current level subject, then add
55253 + it to the set of dropped capabilities
55254 + otherwise, add the current level subject's mask to the current computed mask
55255 + */
55256 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
55257 + cap_raise(cap_mask, cap);
55258 + if (cap_raised(curracl->cap_lower, cap))
55259 + cap_raise(cap_drop, cap);
55260 + }
55261 + }
55262 +
55263 + if (!cap_raised(cap_drop, cap))
55264 + return 1;
55265 +
55266 + return 0;
55267 +}
55268 +
55269 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
55270 new file mode 100644
55271 index 0000000..4eda5c3
55272 --- /dev/null
55273 +++ b/grsecurity/gracl_fs.c
55274 @@ -0,0 +1,433 @@
55275 +#include <linux/kernel.h>
55276 +#include <linux/sched.h>
55277 +#include <linux/types.h>
55278 +#include <linux/fs.h>
55279 +#include <linux/file.h>
55280 +#include <linux/stat.h>
55281 +#include <linux/grsecurity.h>
55282 +#include <linux/grinternal.h>
55283 +#include <linux/gracl.h>
55284 +
55285 +__u32
55286 +gr_acl_handle_hidden_file(const struct dentry * dentry,
55287 + const struct vfsmount * mnt)
55288 +{
55289 + __u32 mode;
55290 +
55291 + if (unlikely(!dentry->d_inode))
55292 + return GR_FIND;
55293 +
55294 + mode =
55295 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
55296 +
55297 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
55298 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55299 + return mode;
55300 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
55301 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55302 + return 0;
55303 + } else if (unlikely(!(mode & GR_FIND)))
55304 + return 0;
55305 +
55306 + return GR_FIND;
55307 +}
55308 +
55309 +__u32
55310 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
55311 + int acc_mode)
55312 +{
55313 + __u32 reqmode = GR_FIND;
55314 + __u32 mode;
55315 +
55316 + if (unlikely(!dentry->d_inode))
55317 + return reqmode;
55318 +
55319 + if (acc_mode & MAY_APPEND)
55320 + reqmode |= GR_APPEND;
55321 + else if (acc_mode & MAY_WRITE)
55322 + reqmode |= GR_WRITE;
55323 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
55324 + reqmode |= GR_READ;
55325 +
55326 + mode =
55327 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55328 + mnt);
55329 +
55330 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55331 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55332 + reqmode & GR_READ ? " reading" : "",
55333 + reqmode & GR_WRITE ? " writing" : reqmode &
55334 + GR_APPEND ? " appending" : "");
55335 + return reqmode;
55336 + } else
55337 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55338 + {
55339 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55340 + reqmode & GR_READ ? " reading" : "",
55341 + reqmode & GR_WRITE ? " writing" : reqmode &
55342 + GR_APPEND ? " appending" : "");
55343 + return 0;
55344 + } else if (unlikely((mode & reqmode) != reqmode))
55345 + return 0;
55346 +
55347 + return reqmode;
55348 +}
55349 +
55350 +__u32
55351 +gr_acl_handle_creat(const struct dentry * dentry,
55352 + const struct dentry * p_dentry,
55353 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
55354 + const int imode)
55355 +{
55356 + __u32 reqmode = GR_WRITE | GR_CREATE;
55357 + __u32 mode;
55358 +
55359 + if (acc_mode & MAY_APPEND)
55360 + reqmode |= GR_APPEND;
55361 + // if a directory was required or the directory already exists, then
55362 + // don't count this open as a read
55363 + if ((acc_mode & MAY_READ) &&
55364 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
55365 + reqmode |= GR_READ;
55366 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
55367 + reqmode |= GR_SETID;
55368 +
55369 + mode =
55370 + gr_check_create(dentry, p_dentry, p_mnt,
55371 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55372 +
55373 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55374 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55375 + reqmode & GR_READ ? " reading" : "",
55376 + reqmode & GR_WRITE ? " writing" : reqmode &
55377 + GR_APPEND ? " appending" : "");
55378 + return reqmode;
55379 + } else
55380 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55381 + {
55382 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55383 + reqmode & GR_READ ? " reading" : "",
55384 + reqmode & GR_WRITE ? " writing" : reqmode &
55385 + GR_APPEND ? " appending" : "");
55386 + return 0;
55387 + } else if (unlikely((mode & reqmode) != reqmode))
55388 + return 0;
55389 +
55390 + return reqmode;
55391 +}
55392 +
55393 +__u32
55394 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
55395 + const int fmode)
55396 +{
55397 + __u32 mode, reqmode = GR_FIND;
55398 +
55399 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
55400 + reqmode |= GR_EXEC;
55401 + if (fmode & S_IWOTH)
55402 + reqmode |= GR_WRITE;
55403 + if (fmode & S_IROTH)
55404 + reqmode |= GR_READ;
55405 +
55406 + mode =
55407 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55408 + mnt);
55409 +
55410 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55411 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55412 + reqmode & GR_READ ? " reading" : "",
55413 + reqmode & GR_WRITE ? " writing" : "",
55414 + reqmode & GR_EXEC ? " executing" : "");
55415 + return reqmode;
55416 + } else
55417 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55418 + {
55419 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55420 + reqmode & GR_READ ? " reading" : "",
55421 + reqmode & GR_WRITE ? " writing" : "",
55422 + reqmode & GR_EXEC ? " executing" : "");
55423 + return 0;
55424 + } else if (unlikely((mode & reqmode) != reqmode))
55425 + return 0;
55426 +
55427 + return reqmode;
55428 +}
55429 +
55430 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
55431 +{
55432 + __u32 mode;
55433 +
55434 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
55435 +
55436 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55437 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
55438 + return mode;
55439 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55440 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
55441 + return 0;
55442 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
55443 + return 0;
55444 +
55445 + return (reqmode);
55446 +}
55447 +
55448 +__u32
55449 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
55450 +{
55451 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
55452 +}
55453 +
55454 +__u32
55455 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
55456 +{
55457 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
55458 +}
55459 +
55460 +__u32
55461 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
55462 +{
55463 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
55464 +}
55465 +
55466 +__u32
55467 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
55468 +{
55469 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
55470 +}
55471 +
55472 +__u32
55473 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
55474 + mode_t mode)
55475 +{
55476 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
55477 + return 1;
55478 +
55479 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
55480 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
55481 + GR_FCHMOD_ACL_MSG);
55482 + } else {
55483 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
55484 + }
55485 +}
55486 +
55487 +__u32
55488 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
55489 + mode_t mode)
55490 +{
55491 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
55492 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
55493 + GR_CHMOD_ACL_MSG);
55494 + } else {
55495 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
55496 + }
55497 +}
55498 +
55499 +__u32
55500 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
55501 +{
55502 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
55503 +}
55504 +
55505 +__u32
55506 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
55507 +{
55508 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
55509 +}
55510 +
55511 +__u32
55512 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
55513 +{
55514 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
55515 +}
55516 +
55517 +__u32
55518 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
55519 +{
55520 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
55521 + GR_UNIXCONNECT_ACL_MSG);
55522 +}
55523 +
55524 +/* hardlinks require at minimum create and link permission,
55525 + any additional privilege required is based on the
55526 + privilege of the file being linked to
55527 +*/
55528 +__u32
55529 +gr_acl_handle_link(const struct dentry * new_dentry,
55530 + const struct dentry * parent_dentry,
55531 + const struct vfsmount * parent_mnt,
55532 + const struct dentry * old_dentry,
55533 + const struct vfsmount * old_mnt, const char *to)
55534 +{
55535 + __u32 mode;
55536 + __u32 needmode = GR_CREATE | GR_LINK;
55537 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
55538 +
55539 + mode =
55540 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
55541 + old_mnt);
55542 +
55543 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
55544 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55545 + return mode;
55546 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55547 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55548 + return 0;
55549 + } else if (unlikely((mode & needmode) != needmode))
55550 + return 0;
55551 +
55552 + return 1;
55553 +}
55554 +
55555 +__u32
55556 +gr_acl_handle_symlink(const struct dentry * new_dentry,
55557 + const struct dentry * parent_dentry,
55558 + const struct vfsmount * parent_mnt, const char *from)
55559 +{
55560 + __u32 needmode = GR_WRITE | GR_CREATE;
55561 + __u32 mode;
55562 +
55563 + mode =
55564 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
55565 + GR_CREATE | GR_AUDIT_CREATE |
55566 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
55567 +
55568 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
55569 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55570 + return mode;
55571 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55572 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55573 + return 0;
55574 + } else if (unlikely((mode & needmode) != needmode))
55575 + return 0;
55576 +
55577 + return (GR_WRITE | GR_CREATE);
55578 +}
55579 +
55580 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
55581 +{
55582 + __u32 mode;
55583 +
55584 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55585 +
55586 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55587 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
55588 + return mode;
55589 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55590 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
55591 + return 0;
55592 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
55593 + return 0;
55594 +
55595 + return (reqmode);
55596 +}
55597 +
55598 +__u32
55599 +gr_acl_handle_mknod(const struct dentry * new_dentry,
55600 + const struct dentry * parent_dentry,
55601 + const struct vfsmount * parent_mnt,
55602 + const int mode)
55603 +{
55604 + __u32 reqmode = GR_WRITE | GR_CREATE;
55605 + if (unlikely(mode & (S_ISUID | S_ISGID)))
55606 + reqmode |= GR_SETID;
55607 +
55608 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55609 + reqmode, GR_MKNOD_ACL_MSG);
55610 +}
55611 +
55612 +__u32
55613 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
55614 + const struct dentry *parent_dentry,
55615 + const struct vfsmount *parent_mnt)
55616 +{
55617 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55618 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
55619 +}
55620 +
55621 +#define RENAME_CHECK_SUCCESS(old, new) \
55622 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
55623 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
55624 +
55625 +int
55626 +gr_acl_handle_rename(struct dentry *new_dentry,
55627 + struct dentry *parent_dentry,
55628 + const struct vfsmount *parent_mnt,
55629 + struct dentry *old_dentry,
55630 + struct inode *old_parent_inode,
55631 + struct vfsmount *old_mnt, const char *newname)
55632 +{
55633 + __u32 comp1, comp2;
55634 + int error = 0;
55635 +
55636 + if (unlikely(!gr_acl_is_enabled()))
55637 + return 0;
55638 +
55639 + if (!new_dentry->d_inode) {
55640 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
55641 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
55642 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
55643 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
55644 + GR_DELETE | GR_AUDIT_DELETE |
55645 + GR_AUDIT_READ | GR_AUDIT_WRITE |
55646 + GR_SUPPRESS, old_mnt);
55647 + } else {
55648 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
55649 + GR_CREATE | GR_DELETE |
55650 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
55651 + GR_AUDIT_READ | GR_AUDIT_WRITE |
55652 + GR_SUPPRESS, parent_mnt);
55653 + comp2 =
55654 + gr_search_file(old_dentry,
55655 + GR_READ | GR_WRITE | GR_AUDIT_READ |
55656 + GR_DELETE | GR_AUDIT_DELETE |
55657 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
55658 + }
55659 +
55660 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
55661 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
55662 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55663 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
55664 + && !(comp2 & GR_SUPPRESS)) {
55665 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55666 + error = -EACCES;
55667 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
55668 + error = -EACCES;
55669 +
55670 + return error;
55671 +}
55672 +
55673 +void
55674 +gr_acl_handle_exit(void)
55675 +{
55676 + u16 id;
55677 + char *rolename;
55678 + struct file *exec_file;
55679 +
55680 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
55681 + !(current->role->roletype & GR_ROLE_PERSIST))) {
55682 + id = current->acl_role_id;
55683 + rolename = current->role->rolename;
55684 + gr_set_acls(1);
55685 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
55686 + }
55687 +
55688 + write_lock(&grsec_exec_file_lock);
55689 + exec_file = current->exec_file;
55690 + current->exec_file = NULL;
55691 + write_unlock(&grsec_exec_file_lock);
55692 +
55693 + if (exec_file)
55694 + fput(exec_file);
55695 +}
55696 +
55697 +int
55698 +gr_acl_handle_procpidmem(const struct task_struct *task)
55699 +{
55700 + if (unlikely(!gr_acl_is_enabled()))
55701 + return 0;
55702 +
55703 + if (task != current && task->acl->mode & GR_PROTPROCFD)
55704 + return -EACCES;
55705 +
55706 + return 0;
55707 +}
55708 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
55709 new file mode 100644
55710 index 0000000..17050ca
55711 --- /dev/null
55712 +++ b/grsecurity/gracl_ip.c
55713 @@ -0,0 +1,381 @@
55714 +#include <linux/kernel.h>
55715 +#include <asm/uaccess.h>
55716 +#include <asm/errno.h>
55717 +#include <net/sock.h>
55718 +#include <linux/file.h>
55719 +#include <linux/fs.h>
55720 +#include <linux/net.h>
55721 +#include <linux/in.h>
55722 +#include <linux/skbuff.h>
55723 +#include <linux/ip.h>
55724 +#include <linux/udp.h>
55725 +#include <linux/types.h>
55726 +#include <linux/sched.h>
55727 +#include <linux/netdevice.h>
55728 +#include <linux/inetdevice.h>
55729 +#include <linux/gracl.h>
55730 +#include <linux/grsecurity.h>
55731 +#include <linux/grinternal.h>
55732 +
55733 +#define GR_BIND 0x01
55734 +#define GR_CONNECT 0x02
55735 +#define GR_INVERT 0x04
55736 +#define GR_BINDOVERRIDE 0x08
55737 +#define GR_CONNECTOVERRIDE 0x10
55738 +#define GR_SOCK_FAMILY 0x20
55739 +
55740 +static const char * gr_protocols[IPPROTO_MAX] = {
55741 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
55742 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
55743 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
55744 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
55745 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
55746 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
55747 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
55748 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
55749 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
55750 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
55751 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
55752 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
55753 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
55754 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
55755 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
55756 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
55757 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
55758 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
55759 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
55760 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
55761 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
55762 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
55763 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
55764 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
55765 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
55766 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
55767 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
55768 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
55769 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
55770 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
55771 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
55772 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
55773 + };
55774 +
55775 +static const char * gr_socktypes[SOCK_MAX] = {
55776 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
55777 + "unknown:7", "unknown:8", "unknown:9", "packet"
55778 + };
55779 +
55780 +static const char * gr_sockfamilies[AF_MAX+1] = {
55781 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
55782 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
55783 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
55784 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
55785 + };
55786 +
55787 +const char *
55788 +gr_proto_to_name(unsigned char proto)
55789 +{
55790 + return gr_protocols[proto];
55791 +}
55792 +
55793 +const char *
55794 +gr_socktype_to_name(unsigned char type)
55795 +{
55796 + return gr_socktypes[type];
55797 +}
55798 +
55799 +const char *
55800 +gr_sockfamily_to_name(unsigned char family)
55801 +{
55802 + return gr_sockfamilies[family];
55803 +}
55804 +
55805 +int
55806 +gr_search_socket(const int domain, const int type, const int protocol)
55807 +{
55808 + struct acl_subject_label *curr;
55809 + const struct cred *cred = current_cred();
55810 +
55811 + if (unlikely(!gr_acl_is_enabled()))
55812 + goto exit;
55813 +
55814 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
55815 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
55816 + goto exit; // let the kernel handle it
55817 +
55818 + curr = current->acl;
55819 +
55820 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
55821 + /* the family is allowed, if this is PF_INET allow it only if
55822 + the extra sock type/protocol checks pass */
55823 + if (domain == PF_INET)
55824 + goto inet_check;
55825 + goto exit;
55826 + } else {
55827 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55828 + __u32 fakeip = 0;
55829 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55830 + current->role->roletype, cred->uid,
55831 + cred->gid, current->exec_file ?
55832 + gr_to_filename(current->exec_file->f_path.dentry,
55833 + current->exec_file->f_path.mnt) :
55834 + curr->filename, curr->filename,
55835 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
55836 + &current->signal->saved_ip);
55837 + goto exit;
55838 + }
55839 + goto exit_fail;
55840 + }
55841 +
55842 +inet_check:
55843 + /* the rest of this checking is for IPv4 only */
55844 + if (!curr->ips)
55845 + goto exit;
55846 +
55847 + if ((curr->ip_type & (1 << type)) &&
55848 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
55849 + goto exit;
55850 +
55851 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55852 + /* we don't place acls on raw sockets , and sometimes
55853 + dgram/ip sockets are opened for ioctl and not
55854 + bind/connect, so we'll fake a bind learn log */
55855 + if (type == SOCK_RAW || type == SOCK_PACKET) {
55856 + __u32 fakeip = 0;
55857 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55858 + current->role->roletype, cred->uid,
55859 + cred->gid, current->exec_file ?
55860 + gr_to_filename(current->exec_file->f_path.dentry,
55861 + current->exec_file->f_path.mnt) :
55862 + curr->filename, curr->filename,
55863 + &fakeip, 0, type,
55864 + protocol, GR_CONNECT, &current->signal->saved_ip);
55865 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
55866 + __u32 fakeip = 0;
55867 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55868 + current->role->roletype, cred->uid,
55869 + cred->gid, current->exec_file ?
55870 + gr_to_filename(current->exec_file->f_path.dentry,
55871 + current->exec_file->f_path.mnt) :
55872 + curr->filename, curr->filename,
55873 + &fakeip, 0, type,
55874 + protocol, GR_BIND, &current->signal->saved_ip);
55875 + }
55876 + /* we'll log when they use connect or bind */
55877 + goto exit;
55878 + }
55879 +
55880 +exit_fail:
55881 + if (domain == PF_INET)
55882 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
55883 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
55884 + else
55885 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
55886 + gr_socktype_to_name(type), protocol);
55887 +
55888 + return 0;
55889 +exit:
55890 + return 1;
55891 +}
55892 +
55893 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
55894 +{
55895 + if ((ip->mode & mode) &&
55896 + (ip_port >= ip->low) &&
55897 + (ip_port <= ip->high) &&
55898 + ((ntohl(ip_addr) & our_netmask) ==
55899 + (ntohl(our_addr) & our_netmask))
55900 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
55901 + && (ip->type & (1 << type))) {
55902 + if (ip->mode & GR_INVERT)
55903 + return 2; // specifically denied
55904 + else
55905 + return 1; // allowed
55906 + }
55907 +
55908 + return 0; // not specifically allowed, may continue parsing
55909 +}
55910 +
55911 +static int
55912 +gr_search_connectbind(const int full_mode, struct sock *sk,
55913 + struct sockaddr_in *addr, const int type)
55914 +{
55915 + char iface[IFNAMSIZ] = {0};
55916 + struct acl_subject_label *curr;
55917 + struct acl_ip_label *ip;
55918 + struct inet_sock *isk;
55919 + struct net_device *dev;
55920 + struct in_device *idev;
55921 + unsigned long i;
55922 + int ret;
55923 + int mode = full_mode & (GR_BIND | GR_CONNECT);
55924 + __u32 ip_addr = 0;
55925 + __u32 our_addr;
55926 + __u32 our_netmask;
55927 + char *p;
55928 + __u16 ip_port = 0;
55929 + const struct cred *cred = current_cred();
55930 +
55931 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
55932 + return 0;
55933 +
55934 + curr = current->acl;
55935 + isk = inet_sk(sk);
55936 +
55937 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
55938 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
55939 + addr->sin_addr.s_addr = curr->inaddr_any_override;
55940 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
55941 + struct sockaddr_in saddr;
55942 + int err;
55943 +
55944 + saddr.sin_family = AF_INET;
55945 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
55946 + saddr.sin_port = isk->inet_sport;
55947 +
55948 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55949 + if (err)
55950 + return err;
55951 +
55952 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55953 + if (err)
55954 + return err;
55955 + }
55956 +
55957 + if (!curr->ips)
55958 + return 0;
55959 +
55960 + ip_addr = addr->sin_addr.s_addr;
55961 + ip_port = ntohs(addr->sin_port);
55962 +
55963 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55964 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55965 + current->role->roletype, cred->uid,
55966 + cred->gid, current->exec_file ?
55967 + gr_to_filename(current->exec_file->f_path.dentry,
55968 + current->exec_file->f_path.mnt) :
55969 + curr->filename, curr->filename,
55970 + &ip_addr, ip_port, type,
55971 + sk->sk_protocol, mode, &current->signal->saved_ip);
55972 + return 0;
55973 + }
55974 +
55975 + for (i = 0; i < curr->ip_num; i++) {
55976 + ip = *(curr->ips + i);
55977 + if (ip->iface != NULL) {
55978 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
55979 + p = strchr(iface, ':');
55980 + if (p != NULL)
55981 + *p = '\0';
55982 + dev = dev_get_by_name(sock_net(sk), iface);
55983 + if (dev == NULL)
55984 + continue;
55985 + idev = in_dev_get(dev);
55986 + if (idev == NULL) {
55987 + dev_put(dev);
55988 + continue;
55989 + }
55990 + rcu_read_lock();
55991 + for_ifa(idev) {
55992 + if (!strcmp(ip->iface, ifa->ifa_label)) {
55993 + our_addr = ifa->ifa_address;
55994 + our_netmask = 0xffffffff;
55995 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55996 + if (ret == 1) {
55997 + rcu_read_unlock();
55998 + in_dev_put(idev);
55999 + dev_put(dev);
56000 + return 0;
56001 + } else if (ret == 2) {
56002 + rcu_read_unlock();
56003 + in_dev_put(idev);
56004 + dev_put(dev);
56005 + goto denied;
56006 + }
56007 + }
56008 + } endfor_ifa(idev);
56009 + rcu_read_unlock();
56010 + in_dev_put(idev);
56011 + dev_put(dev);
56012 + } else {
56013 + our_addr = ip->addr;
56014 + our_netmask = ip->netmask;
56015 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
56016 + if (ret == 1)
56017 + return 0;
56018 + else if (ret == 2)
56019 + goto denied;
56020 + }
56021 + }
56022 +
56023 +denied:
56024 + if (mode == GR_BIND)
56025 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
56026 + else if (mode == GR_CONNECT)
56027 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
56028 +
56029 + return -EACCES;
56030 +}
56031 +
56032 +int
56033 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
56034 +{
56035 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
56036 +}
56037 +
56038 +int
56039 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
56040 +{
56041 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
56042 +}
56043 +
56044 +int gr_search_listen(struct socket *sock)
56045 +{
56046 + struct sock *sk = sock->sk;
56047 + struct sockaddr_in addr;
56048 +
56049 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
56050 + addr.sin_port = inet_sk(sk)->inet_sport;
56051 +
56052 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
56053 +}
56054 +
56055 +int gr_search_accept(struct socket *sock)
56056 +{
56057 + struct sock *sk = sock->sk;
56058 + struct sockaddr_in addr;
56059 +
56060 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
56061 + addr.sin_port = inet_sk(sk)->inet_sport;
56062 +
56063 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
56064 +}
56065 +
56066 +int
56067 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
56068 +{
56069 + if (addr)
56070 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
56071 + else {
56072 + struct sockaddr_in sin;
56073 + const struct inet_sock *inet = inet_sk(sk);
56074 +
56075 + sin.sin_addr.s_addr = inet->inet_daddr;
56076 + sin.sin_port = inet->inet_dport;
56077 +
56078 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
56079 + }
56080 +}
56081 +
56082 +int
56083 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
56084 +{
56085 + struct sockaddr_in sin;
56086 +
56087 + if (unlikely(skb->len < sizeof (struct udphdr)))
56088 + return 0; // skip this packet
56089 +
56090 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
56091 + sin.sin_port = udp_hdr(skb)->source;
56092 +
56093 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
56094 +}
56095 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
56096 new file mode 100644
56097 index 0000000..25f54ef
56098 --- /dev/null
56099 +++ b/grsecurity/gracl_learn.c
56100 @@ -0,0 +1,207 @@
56101 +#include <linux/kernel.h>
56102 +#include <linux/mm.h>
56103 +#include <linux/sched.h>
56104 +#include <linux/poll.h>
56105 +#include <linux/string.h>
56106 +#include <linux/file.h>
56107 +#include <linux/types.h>
56108 +#include <linux/vmalloc.h>
56109 +#include <linux/grinternal.h>
56110 +
56111 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
56112 + size_t count, loff_t *ppos);
56113 +extern int gr_acl_is_enabled(void);
56114 +
56115 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
56116 +static int gr_learn_attached;
56117 +
56118 +/* use a 512k buffer */
56119 +#define LEARN_BUFFER_SIZE (512 * 1024)
56120 +
56121 +static DEFINE_SPINLOCK(gr_learn_lock);
56122 +static DEFINE_MUTEX(gr_learn_user_mutex);
56123 +
56124 +/* we need to maintain two buffers, so that the kernel context of grlearn
56125 + uses a semaphore around the userspace copying, and the other kernel contexts
56126 + use a spinlock when copying into the buffer, since they cannot sleep
56127 +*/
56128 +static char *learn_buffer;
56129 +static char *learn_buffer_user;
56130 +static int learn_buffer_len;
56131 +static int learn_buffer_user_len;
56132 +
56133 +static ssize_t
56134 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
56135 +{
56136 + DECLARE_WAITQUEUE(wait, current);
56137 + ssize_t retval = 0;
56138 +
56139 + add_wait_queue(&learn_wait, &wait);
56140 + set_current_state(TASK_INTERRUPTIBLE);
56141 + do {
56142 + mutex_lock(&gr_learn_user_mutex);
56143 + spin_lock(&gr_learn_lock);
56144 + if (learn_buffer_len)
56145 + break;
56146 + spin_unlock(&gr_learn_lock);
56147 + mutex_unlock(&gr_learn_user_mutex);
56148 + if (file->f_flags & O_NONBLOCK) {
56149 + retval = -EAGAIN;
56150 + goto out;
56151 + }
56152 + if (signal_pending(current)) {
56153 + retval = -ERESTARTSYS;
56154 + goto out;
56155 + }
56156 +
56157 + schedule();
56158 + } while (1);
56159 +
56160 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
56161 + learn_buffer_user_len = learn_buffer_len;
56162 + retval = learn_buffer_len;
56163 + learn_buffer_len = 0;
56164 +
56165 + spin_unlock(&gr_learn_lock);
56166 +
56167 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
56168 + retval = -EFAULT;
56169 +
56170 + mutex_unlock(&gr_learn_user_mutex);
56171 +out:
56172 + set_current_state(TASK_RUNNING);
56173 + remove_wait_queue(&learn_wait, &wait);
56174 + return retval;
56175 +}
56176 +
56177 +static unsigned int
56178 +poll_learn(struct file * file, poll_table * wait)
56179 +{
56180 + poll_wait(file, &learn_wait, wait);
56181 +
56182 + if (learn_buffer_len)
56183 + return (POLLIN | POLLRDNORM);
56184 +
56185 + return 0;
56186 +}
56187 +
56188 +void
56189 +gr_clear_learn_entries(void)
56190 +{
56191 + char *tmp;
56192 +
56193 + mutex_lock(&gr_learn_user_mutex);
56194 + spin_lock(&gr_learn_lock);
56195 + tmp = learn_buffer;
56196 + learn_buffer = NULL;
56197 + spin_unlock(&gr_learn_lock);
56198 + if (tmp)
56199 + vfree(tmp);
56200 + if (learn_buffer_user != NULL) {
56201 + vfree(learn_buffer_user);
56202 + learn_buffer_user = NULL;
56203 + }
56204 + learn_buffer_len = 0;
56205 + mutex_unlock(&gr_learn_user_mutex);
56206 +
56207 + return;
56208 +}
56209 +
56210 +void
56211 +gr_add_learn_entry(const char *fmt, ...)
56212 +{
56213 + va_list args;
56214 + unsigned int len;
56215 +
56216 + if (!gr_learn_attached)
56217 + return;
56218 +
56219 + spin_lock(&gr_learn_lock);
56220 +
56221 + /* leave a gap at the end so we know when it's "full" but don't have to
56222 + compute the exact length of the string we're trying to append
56223 + */
56224 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
56225 + spin_unlock(&gr_learn_lock);
56226 + wake_up_interruptible(&learn_wait);
56227 + return;
56228 + }
56229 + if (learn_buffer == NULL) {
56230 + spin_unlock(&gr_learn_lock);
56231 + return;
56232 + }
56233 +
56234 + va_start(args, fmt);
56235 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
56236 + va_end(args);
56237 +
56238 + learn_buffer_len += len + 1;
56239 +
56240 + spin_unlock(&gr_learn_lock);
56241 + wake_up_interruptible(&learn_wait);
56242 +
56243 + return;
56244 +}
56245 +
56246 +static int
56247 +open_learn(struct inode *inode, struct file *file)
56248 +{
56249 + if (file->f_mode & FMODE_READ && gr_learn_attached)
56250 + return -EBUSY;
56251 + if (file->f_mode & FMODE_READ) {
56252 + int retval = 0;
56253 + mutex_lock(&gr_learn_user_mutex);
56254 + if (learn_buffer == NULL)
56255 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
56256 + if (learn_buffer_user == NULL)
56257 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
56258 + if (learn_buffer == NULL) {
56259 + retval = -ENOMEM;
56260 + goto out_error;
56261 + }
56262 + if (learn_buffer_user == NULL) {
56263 + retval = -ENOMEM;
56264 + goto out_error;
56265 + }
56266 + learn_buffer_len = 0;
56267 + learn_buffer_user_len = 0;
56268 + gr_learn_attached = 1;
56269 +out_error:
56270 + mutex_unlock(&gr_learn_user_mutex);
56271 + return retval;
56272 + }
56273 + return 0;
56274 +}
56275 +
56276 +static int
56277 +close_learn(struct inode *inode, struct file *file)
56278 +{
56279 + if (file->f_mode & FMODE_READ) {
56280 + char *tmp = NULL;
56281 + mutex_lock(&gr_learn_user_mutex);
56282 + spin_lock(&gr_learn_lock);
56283 + tmp = learn_buffer;
56284 + learn_buffer = NULL;
56285 + spin_unlock(&gr_learn_lock);
56286 + if (tmp)
56287 + vfree(tmp);
56288 + if (learn_buffer_user != NULL) {
56289 + vfree(learn_buffer_user);
56290 + learn_buffer_user = NULL;
56291 + }
56292 + learn_buffer_len = 0;
56293 + learn_buffer_user_len = 0;
56294 + gr_learn_attached = 0;
56295 + mutex_unlock(&gr_learn_user_mutex);
56296 + }
56297 +
56298 + return 0;
56299 +}
56300 +
56301 +const struct file_operations grsec_fops = {
56302 + .read = read_learn,
56303 + .write = write_grsec_handler,
56304 + .open = open_learn,
56305 + .release = close_learn,
56306 + .poll = poll_learn,
56307 +};
56308 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
56309 new file mode 100644
56310 index 0000000..39645c9
56311 --- /dev/null
56312 +++ b/grsecurity/gracl_res.c
56313 @@ -0,0 +1,68 @@
56314 +#include <linux/kernel.h>
56315 +#include <linux/sched.h>
56316 +#include <linux/gracl.h>
56317 +#include <linux/grinternal.h>
56318 +
56319 +static const char *restab_log[] = {
56320 + [RLIMIT_CPU] = "RLIMIT_CPU",
56321 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
56322 + [RLIMIT_DATA] = "RLIMIT_DATA",
56323 + [RLIMIT_STACK] = "RLIMIT_STACK",
56324 + [RLIMIT_CORE] = "RLIMIT_CORE",
56325 + [RLIMIT_RSS] = "RLIMIT_RSS",
56326 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
56327 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
56328 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
56329 + [RLIMIT_AS] = "RLIMIT_AS",
56330 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
56331 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
56332 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
56333 + [RLIMIT_NICE] = "RLIMIT_NICE",
56334 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
56335 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
56336 + [GR_CRASH_RES] = "RLIMIT_CRASH"
56337 +};
56338 +
56339 +void
56340 +gr_log_resource(const struct task_struct *task,
56341 + const int res, const unsigned long wanted, const int gt)
56342 +{
56343 + const struct cred *cred;
56344 + unsigned long rlim;
56345 +
56346 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
56347 + return;
56348 +
56349 + // not yet supported resource
56350 + if (unlikely(!restab_log[res]))
56351 + return;
56352 +
56353 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
56354 + rlim = task_rlimit_max(task, res);
56355 + else
56356 + rlim = task_rlimit(task, res);
56357 +
56358 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
56359 + return;
56360 +
56361 + rcu_read_lock();
56362 + cred = __task_cred(task);
56363 +
56364 + if (res == RLIMIT_NPROC &&
56365 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
56366 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
56367 + goto out_rcu_unlock;
56368 + else if (res == RLIMIT_MEMLOCK &&
56369 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
56370 + goto out_rcu_unlock;
56371 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
56372 + goto out_rcu_unlock;
56373 + rcu_read_unlock();
56374 +
56375 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
56376 +
56377 + return;
56378 +out_rcu_unlock:
56379 + rcu_read_unlock();
56380 + return;
56381 +}
56382 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
56383 new file mode 100644
56384 index 0000000..5556be3
56385 --- /dev/null
56386 +++ b/grsecurity/gracl_segv.c
56387 @@ -0,0 +1,299 @@
56388 +#include <linux/kernel.h>
56389 +#include <linux/mm.h>
56390 +#include <asm/uaccess.h>
56391 +#include <asm/errno.h>
56392 +#include <asm/mman.h>
56393 +#include <net/sock.h>
56394 +#include <linux/file.h>
56395 +#include <linux/fs.h>
56396 +#include <linux/net.h>
56397 +#include <linux/in.h>
56398 +#include <linux/slab.h>
56399 +#include <linux/types.h>
56400 +#include <linux/sched.h>
56401 +#include <linux/timer.h>
56402 +#include <linux/gracl.h>
56403 +#include <linux/grsecurity.h>
56404 +#include <linux/grinternal.h>
56405 +
56406 +static struct crash_uid *uid_set;
56407 +static unsigned short uid_used;
56408 +static DEFINE_SPINLOCK(gr_uid_lock);
56409 +extern rwlock_t gr_inode_lock;
56410 +extern struct acl_subject_label *
56411 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
56412 + struct acl_role_label *role);
56413 +
56414 +#ifdef CONFIG_BTRFS_FS
56415 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56416 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56417 +#endif
56418 +
56419 +static inline dev_t __get_dev(const struct dentry *dentry)
56420 +{
56421 +#ifdef CONFIG_BTRFS_FS
56422 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56423 + return get_btrfs_dev_from_inode(dentry->d_inode);
56424 + else
56425 +#endif
56426 + return dentry->d_inode->i_sb->s_dev;
56427 +}
56428 +
56429 +int
56430 +gr_init_uidset(void)
56431 +{
56432 + uid_set =
56433 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
56434 + uid_used = 0;
56435 +
56436 + return uid_set ? 1 : 0;
56437 +}
56438 +
56439 +void
56440 +gr_free_uidset(void)
56441 +{
56442 + if (uid_set)
56443 + kfree(uid_set);
56444 +
56445 + return;
56446 +}
56447 +
56448 +int
56449 +gr_find_uid(const uid_t uid)
56450 +{
56451 + struct crash_uid *tmp = uid_set;
56452 + uid_t buid;
56453 + int low = 0, high = uid_used - 1, mid;
56454 +
56455 + while (high >= low) {
56456 + mid = (low + high) >> 1;
56457 + buid = tmp[mid].uid;
56458 + if (buid == uid)
56459 + return mid;
56460 + if (buid > uid)
56461 + high = mid - 1;
56462 + if (buid < uid)
56463 + low = mid + 1;
56464 + }
56465 +
56466 + return -1;
56467 +}
56468 +
56469 +static __inline__ void
56470 +gr_insertsort(void)
56471 +{
56472 + unsigned short i, j;
56473 + struct crash_uid index;
56474 +
56475 + for (i = 1; i < uid_used; i++) {
56476 + index = uid_set[i];
56477 + j = i;
56478 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
56479 + uid_set[j] = uid_set[j - 1];
56480 + j--;
56481 + }
56482 + uid_set[j] = index;
56483 + }
56484 +
56485 + return;
56486 +}
56487 +
56488 +static __inline__ void
56489 +gr_insert_uid(const uid_t uid, const unsigned long expires)
56490 +{
56491 + int loc;
56492 +
56493 + if (uid_used == GR_UIDTABLE_MAX)
56494 + return;
56495 +
56496 + loc = gr_find_uid(uid);
56497 +
56498 + if (loc >= 0) {
56499 + uid_set[loc].expires = expires;
56500 + return;
56501 + }
56502 +
56503 + uid_set[uid_used].uid = uid;
56504 + uid_set[uid_used].expires = expires;
56505 + uid_used++;
56506 +
56507 + gr_insertsort();
56508 +
56509 + return;
56510 +}
56511 +
56512 +void
56513 +gr_remove_uid(const unsigned short loc)
56514 +{
56515 + unsigned short i;
56516 +
56517 + for (i = loc + 1; i < uid_used; i++)
56518 + uid_set[i - 1] = uid_set[i];
56519 +
56520 + uid_used--;
56521 +
56522 + return;
56523 +}
56524 +
56525 +int
56526 +gr_check_crash_uid(const uid_t uid)
56527 +{
56528 + int loc;
56529 + int ret = 0;
56530 +
56531 + if (unlikely(!gr_acl_is_enabled()))
56532 + return 0;
56533 +
56534 + spin_lock(&gr_uid_lock);
56535 + loc = gr_find_uid(uid);
56536 +
56537 + if (loc < 0)
56538 + goto out_unlock;
56539 +
56540 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
56541 + gr_remove_uid(loc);
56542 + else
56543 + ret = 1;
56544 +
56545 +out_unlock:
56546 + spin_unlock(&gr_uid_lock);
56547 + return ret;
56548 +}
56549 +
56550 +static __inline__ int
56551 +proc_is_setxid(const struct cred *cred)
56552 +{
56553 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
56554 + cred->uid != cred->fsuid)
56555 + return 1;
56556 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
56557 + cred->gid != cred->fsgid)
56558 + return 1;
56559 +
56560 + return 0;
56561 +}
56562 +
56563 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
56564 +
56565 +void
56566 +gr_handle_crash(struct task_struct *task, const int sig)
56567 +{
56568 + struct acl_subject_label *curr;
56569 + struct task_struct *tsk, *tsk2;
56570 + const struct cred *cred;
56571 + const struct cred *cred2;
56572 +
56573 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
56574 + return;
56575 +
56576 + if (unlikely(!gr_acl_is_enabled()))
56577 + return;
56578 +
56579 + curr = task->acl;
56580 +
56581 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
56582 + return;
56583 +
56584 + if (time_before_eq(curr->expires, get_seconds())) {
56585 + curr->expires = 0;
56586 + curr->crashes = 0;
56587 + }
56588 +
56589 + curr->crashes++;
56590 +
56591 + if (!curr->expires)
56592 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
56593 +
56594 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56595 + time_after(curr->expires, get_seconds())) {
56596 + rcu_read_lock();
56597 + cred = __task_cred(task);
56598 + if (cred->uid && proc_is_setxid(cred)) {
56599 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56600 + spin_lock(&gr_uid_lock);
56601 + gr_insert_uid(cred->uid, curr->expires);
56602 + spin_unlock(&gr_uid_lock);
56603 + curr->expires = 0;
56604 + curr->crashes = 0;
56605 + read_lock(&tasklist_lock);
56606 + do_each_thread(tsk2, tsk) {
56607 + cred2 = __task_cred(tsk);
56608 + if (tsk != task && cred2->uid == cred->uid)
56609 + gr_fake_force_sig(SIGKILL, tsk);
56610 + } while_each_thread(tsk2, tsk);
56611 + read_unlock(&tasklist_lock);
56612 + } else {
56613 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56614 + read_lock(&tasklist_lock);
56615 + read_lock(&grsec_exec_file_lock);
56616 + do_each_thread(tsk2, tsk) {
56617 + if (likely(tsk != task)) {
56618 + // if this thread has the same subject as the one that triggered
56619 + // RES_CRASH and it's the same binary, kill it
56620 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
56621 + gr_fake_force_sig(SIGKILL, tsk);
56622 + }
56623 + } while_each_thread(tsk2, tsk);
56624 + read_unlock(&grsec_exec_file_lock);
56625 + read_unlock(&tasklist_lock);
56626 + }
56627 + rcu_read_unlock();
56628 + }
56629 +
56630 + return;
56631 +}
56632 +
56633 +int
56634 +gr_check_crash_exec(const struct file *filp)
56635 +{
56636 + struct acl_subject_label *curr;
56637 +
56638 + if (unlikely(!gr_acl_is_enabled()))
56639 + return 0;
56640 +
56641 + read_lock(&gr_inode_lock);
56642 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
56643 + __get_dev(filp->f_path.dentry),
56644 + current->role);
56645 + read_unlock(&gr_inode_lock);
56646 +
56647 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
56648 + (!curr->crashes && !curr->expires))
56649 + return 0;
56650 +
56651 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56652 + time_after(curr->expires, get_seconds()))
56653 + return 1;
56654 + else if (time_before_eq(curr->expires, get_seconds())) {
56655 + curr->crashes = 0;
56656 + curr->expires = 0;
56657 + }
56658 +
56659 + return 0;
56660 +}
56661 +
56662 +void
56663 +gr_handle_alertkill(struct task_struct *task)
56664 +{
56665 + struct acl_subject_label *curracl;
56666 + __u32 curr_ip;
56667 + struct task_struct *p, *p2;
56668 +
56669 + if (unlikely(!gr_acl_is_enabled()))
56670 + return;
56671 +
56672 + curracl = task->acl;
56673 + curr_ip = task->signal->curr_ip;
56674 +
56675 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
56676 + read_lock(&tasklist_lock);
56677 + do_each_thread(p2, p) {
56678 + if (p->signal->curr_ip == curr_ip)
56679 + gr_fake_force_sig(SIGKILL, p);
56680 + } while_each_thread(p2, p);
56681 + read_unlock(&tasklist_lock);
56682 + } else if (curracl->mode & GR_KILLPROC)
56683 + gr_fake_force_sig(SIGKILL, task);
56684 +
56685 + return;
56686 +}
56687 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
56688 new file mode 100644
56689 index 0000000..9d83a69
56690 --- /dev/null
56691 +++ b/grsecurity/gracl_shm.c
56692 @@ -0,0 +1,40 @@
56693 +#include <linux/kernel.h>
56694 +#include <linux/mm.h>
56695 +#include <linux/sched.h>
56696 +#include <linux/file.h>
56697 +#include <linux/ipc.h>
56698 +#include <linux/gracl.h>
56699 +#include <linux/grsecurity.h>
56700 +#include <linux/grinternal.h>
56701 +
56702 +int
56703 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56704 + const time_t shm_createtime, const uid_t cuid, const int shmid)
56705 +{
56706 + struct task_struct *task;
56707 +
56708 + if (!gr_acl_is_enabled())
56709 + return 1;
56710 +
56711 + rcu_read_lock();
56712 + read_lock(&tasklist_lock);
56713 +
56714 + task = find_task_by_vpid(shm_cprid);
56715 +
56716 + if (unlikely(!task))
56717 + task = find_task_by_vpid(shm_lapid);
56718 +
56719 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
56720 + (task->pid == shm_lapid)) &&
56721 + (task->acl->mode & GR_PROTSHM) &&
56722 + (task->acl != current->acl))) {
56723 + read_unlock(&tasklist_lock);
56724 + rcu_read_unlock();
56725 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
56726 + return 0;
56727 + }
56728 + read_unlock(&tasklist_lock);
56729 + rcu_read_unlock();
56730 +
56731 + return 1;
56732 +}
56733 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
56734 new file mode 100644
56735 index 0000000..bc0be01
56736 --- /dev/null
56737 +++ b/grsecurity/grsec_chdir.c
56738 @@ -0,0 +1,19 @@
56739 +#include <linux/kernel.h>
56740 +#include <linux/sched.h>
56741 +#include <linux/fs.h>
56742 +#include <linux/file.h>
56743 +#include <linux/grsecurity.h>
56744 +#include <linux/grinternal.h>
56745 +
56746 +void
56747 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
56748 +{
56749 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56750 + if ((grsec_enable_chdir && grsec_enable_group &&
56751 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
56752 + !grsec_enable_group)) {
56753 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
56754 + }
56755 +#endif
56756 + return;
56757 +}
56758 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
56759 new file mode 100644
56760 index 0000000..a2dc675
56761 --- /dev/null
56762 +++ b/grsecurity/grsec_chroot.c
56763 @@ -0,0 +1,351 @@
56764 +#include <linux/kernel.h>
56765 +#include <linux/module.h>
56766 +#include <linux/sched.h>
56767 +#include <linux/file.h>
56768 +#include <linux/fs.h>
56769 +#include <linux/mount.h>
56770 +#include <linux/types.h>
56771 +#include <linux/pid_namespace.h>
56772 +#include <linux/grsecurity.h>
56773 +#include <linux/grinternal.h>
56774 +
56775 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
56776 +{
56777 +#ifdef CONFIG_GRKERNSEC
56778 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
56779 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
56780 + task->gr_is_chrooted = 1;
56781 + else
56782 + task->gr_is_chrooted = 0;
56783 +
56784 + task->gr_chroot_dentry = path->dentry;
56785 +#endif
56786 + return;
56787 +}
56788 +
56789 +void gr_clear_chroot_entries(struct task_struct *task)
56790 +{
56791 +#ifdef CONFIG_GRKERNSEC
56792 + task->gr_is_chrooted = 0;
56793 + task->gr_chroot_dentry = NULL;
56794 +#endif
56795 + return;
56796 +}
56797 +
56798 +int
56799 +gr_handle_chroot_unix(const pid_t pid)
56800 +{
56801 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56802 + struct task_struct *p;
56803 +
56804 + if (unlikely(!grsec_enable_chroot_unix))
56805 + return 1;
56806 +
56807 + if (likely(!proc_is_chrooted(current)))
56808 + return 1;
56809 +
56810 + rcu_read_lock();
56811 + read_lock(&tasklist_lock);
56812 + p = find_task_by_vpid_unrestricted(pid);
56813 + if (unlikely(p && !have_same_root(current, p))) {
56814 + read_unlock(&tasklist_lock);
56815 + rcu_read_unlock();
56816 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
56817 + return 0;
56818 + }
56819 + read_unlock(&tasklist_lock);
56820 + rcu_read_unlock();
56821 +#endif
56822 + return 1;
56823 +}
56824 +
56825 +int
56826 +gr_handle_chroot_nice(void)
56827 +{
56828 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56829 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
56830 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
56831 + return -EPERM;
56832 + }
56833 +#endif
56834 + return 0;
56835 +}
56836 +
56837 +int
56838 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
56839 +{
56840 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56841 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
56842 + && proc_is_chrooted(current)) {
56843 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
56844 + return -EACCES;
56845 + }
56846 +#endif
56847 + return 0;
56848 +}
56849 +
56850 +int
56851 +gr_handle_chroot_rawio(const struct inode *inode)
56852 +{
56853 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56854 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56855 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
56856 + return 1;
56857 +#endif
56858 + return 0;
56859 +}
56860 +
56861 +int
56862 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
56863 +{
56864 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56865 + struct task_struct *p;
56866 + int ret = 0;
56867 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
56868 + return ret;
56869 +
56870 + read_lock(&tasklist_lock);
56871 + do_each_pid_task(pid, type, p) {
56872 + if (!have_same_root(current, p)) {
56873 + ret = 1;
56874 + goto out;
56875 + }
56876 + } while_each_pid_task(pid, type, p);
56877 +out:
56878 + read_unlock(&tasklist_lock);
56879 + return ret;
56880 +#endif
56881 + return 0;
56882 +}
56883 +
56884 +int
56885 +gr_pid_is_chrooted(struct task_struct *p)
56886 +{
56887 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56888 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
56889 + return 0;
56890 +
56891 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
56892 + !have_same_root(current, p)) {
56893 + return 1;
56894 + }
56895 +#endif
56896 + return 0;
56897 +}
56898 +
56899 +EXPORT_SYMBOL(gr_pid_is_chrooted);
56900 +
56901 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
56902 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
56903 +{
56904 + struct path path, currentroot;
56905 + int ret = 0;
56906 +
56907 + path.dentry = (struct dentry *)u_dentry;
56908 + path.mnt = (struct vfsmount *)u_mnt;
56909 + get_fs_root(current->fs, &currentroot);
56910 + if (path_is_under(&path, &currentroot))
56911 + ret = 1;
56912 + path_put(&currentroot);
56913 +
56914 + return ret;
56915 +}
56916 +#endif
56917 +
56918 +int
56919 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
56920 +{
56921 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56922 + if (!grsec_enable_chroot_fchdir)
56923 + return 1;
56924 +
56925 + if (!proc_is_chrooted(current))
56926 + return 1;
56927 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
56928 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
56929 + return 0;
56930 + }
56931 +#endif
56932 + return 1;
56933 +}
56934 +
56935 +int
56936 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56937 + const time_t shm_createtime)
56938 +{
56939 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56940 + struct task_struct *p;
56941 + time_t starttime;
56942 +
56943 + if (unlikely(!grsec_enable_chroot_shmat))
56944 + return 1;
56945 +
56946 + if (likely(!proc_is_chrooted(current)))
56947 + return 1;
56948 +
56949 + rcu_read_lock();
56950 + read_lock(&tasklist_lock);
56951 +
56952 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
56953 + starttime = p->start_time.tv_sec;
56954 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
56955 + if (have_same_root(current, p)) {
56956 + goto allow;
56957 + } else {
56958 + read_unlock(&tasklist_lock);
56959 + rcu_read_unlock();
56960 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56961 + return 0;
56962 + }
56963 + }
56964 + /* creator exited, pid reuse, fall through to next check */
56965 + }
56966 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
56967 + if (unlikely(!have_same_root(current, p))) {
56968 + read_unlock(&tasklist_lock);
56969 + rcu_read_unlock();
56970 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56971 + return 0;
56972 + }
56973 + }
56974 +
56975 +allow:
56976 + read_unlock(&tasklist_lock);
56977 + rcu_read_unlock();
56978 +#endif
56979 + return 1;
56980 +}
56981 +
56982 +void
56983 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
56984 +{
56985 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56986 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
56987 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
56988 +#endif
56989 + return;
56990 +}
56991 +
56992 +int
56993 +gr_handle_chroot_mknod(const struct dentry *dentry,
56994 + const struct vfsmount *mnt, const int mode)
56995 +{
56996 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56997 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
56998 + proc_is_chrooted(current)) {
56999 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
57000 + return -EPERM;
57001 + }
57002 +#endif
57003 + return 0;
57004 +}
57005 +
57006 +int
57007 +gr_handle_chroot_mount(const struct dentry *dentry,
57008 + const struct vfsmount *mnt, const char *dev_name)
57009 +{
57010 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57011 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
57012 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
57013 + return -EPERM;
57014 + }
57015 +#endif
57016 + return 0;
57017 +}
57018 +
57019 +int
57020 +gr_handle_chroot_pivot(void)
57021 +{
57022 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57023 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
57024 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
57025 + return -EPERM;
57026 + }
57027 +#endif
57028 + return 0;
57029 +}
57030 +
57031 +int
57032 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
57033 +{
57034 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57035 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
57036 + !gr_is_outside_chroot(dentry, mnt)) {
57037 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
57038 + return -EPERM;
57039 + }
57040 +#endif
57041 + return 0;
57042 +}
57043 +
57044 +extern const char *captab_log[];
57045 +extern int captab_log_entries;
57046 +
57047 +int
57048 +gr_chroot_is_capable(const int cap)
57049 +{
57050 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57051 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
57052 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
57053 + if (cap_raised(chroot_caps, cap)) {
57054 + const struct cred *creds = current_cred();
57055 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
57056 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
57057 + }
57058 + return 0;
57059 + }
57060 + }
57061 +#endif
57062 + return 1;
57063 +}
57064 +
57065 +int
57066 +gr_chroot_is_capable_nolog(const int cap)
57067 +{
57068 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57069 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
57070 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
57071 + if (cap_raised(chroot_caps, cap)) {
57072 + return 0;
57073 + }
57074 + }
57075 +#endif
57076 + return 1;
57077 +}
57078 +
57079 +int
57080 +gr_handle_chroot_sysctl(const int op)
57081 +{
57082 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57083 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
57084 + proc_is_chrooted(current))
57085 + return -EACCES;
57086 +#endif
57087 + return 0;
57088 +}
57089 +
57090 +void
57091 +gr_handle_chroot_chdir(struct path *path)
57092 +{
57093 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57094 + if (grsec_enable_chroot_chdir)
57095 + set_fs_pwd(current->fs, path);
57096 +#endif
57097 + return;
57098 +}
57099 +
57100 +int
57101 +gr_handle_chroot_chmod(const struct dentry *dentry,
57102 + const struct vfsmount *mnt, const int mode)
57103 +{
57104 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57105 + /* allow chmod +s on directories, but not files */
57106 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
57107 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
57108 + proc_is_chrooted(current)) {
57109 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
57110 + return -EPERM;
57111 + }
57112 +#endif
57113 + return 0;
57114 +}
57115 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
57116 new file mode 100644
57117 index 0000000..d81a586
57118 --- /dev/null
57119 +++ b/grsecurity/grsec_disabled.c
57120 @@ -0,0 +1,439 @@
57121 +#include <linux/kernel.h>
57122 +#include <linux/module.h>
57123 +#include <linux/sched.h>
57124 +#include <linux/file.h>
57125 +#include <linux/fs.h>
57126 +#include <linux/kdev_t.h>
57127 +#include <linux/net.h>
57128 +#include <linux/in.h>
57129 +#include <linux/ip.h>
57130 +#include <linux/skbuff.h>
57131 +#include <linux/sysctl.h>
57132 +
57133 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
57134 +void
57135 +pax_set_initial_flags(struct linux_binprm *bprm)
57136 +{
57137 + return;
57138 +}
57139 +#endif
57140 +
57141 +#ifdef CONFIG_SYSCTL
57142 +__u32
57143 +gr_handle_sysctl(const struct ctl_table * table, const int op)
57144 +{
57145 + return 0;
57146 +}
57147 +#endif
57148 +
57149 +#ifdef CONFIG_TASKSTATS
57150 +int gr_is_taskstats_denied(int pid)
57151 +{
57152 + return 0;
57153 +}
57154 +#endif
57155 +
57156 +int
57157 +gr_acl_is_enabled(void)
57158 +{
57159 + return 0;
57160 +}
57161 +
57162 +void
57163 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
57164 +{
57165 + return;
57166 +}
57167 +
57168 +int
57169 +gr_handle_rawio(const struct inode *inode)
57170 +{
57171 + return 0;
57172 +}
57173 +
57174 +void
57175 +gr_acl_handle_psacct(struct task_struct *task, const long code)
57176 +{
57177 + return;
57178 +}
57179 +
57180 +int
57181 +gr_handle_ptrace(struct task_struct *task, const long request)
57182 +{
57183 + return 0;
57184 +}
57185 +
57186 +int
57187 +gr_handle_proc_ptrace(struct task_struct *task)
57188 +{
57189 + return 0;
57190 +}
57191 +
57192 +void
57193 +gr_learn_resource(const struct task_struct *task,
57194 + const int res, const unsigned long wanted, const int gt)
57195 +{
57196 + return;
57197 +}
57198 +
57199 +int
57200 +gr_set_acls(const int type)
57201 +{
57202 + return 0;
57203 +}
57204 +
57205 +int
57206 +gr_check_hidden_task(const struct task_struct *tsk)
57207 +{
57208 + return 0;
57209 +}
57210 +
57211 +int
57212 +gr_check_protected_task(const struct task_struct *task)
57213 +{
57214 + return 0;
57215 +}
57216 +
57217 +int
57218 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
57219 +{
57220 + return 0;
57221 +}
57222 +
57223 +void
57224 +gr_copy_label(struct task_struct *tsk)
57225 +{
57226 + return;
57227 +}
57228 +
57229 +void
57230 +gr_set_pax_flags(struct task_struct *task)
57231 +{
57232 + return;
57233 +}
57234 +
57235 +int
57236 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
57237 + const int unsafe_share)
57238 +{
57239 + return 0;
57240 +}
57241 +
57242 +void
57243 +gr_handle_delete(const ino_t ino, const dev_t dev)
57244 +{
57245 + return;
57246 +}
57247 +
57248 +void
57249 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
57250 +{
57251 + return;
57252 +}
57253 +
57254 +void
57255 +gr_handle_crash(struct task_struct *task, const int sig)
57256 +{
57257 + return;
57258 +}
57259 +
57260 +int
57261 +gr_check_crash_exec(const struct file *filp)
57262 +{
57263 + return 0;
57264 +}
57265 +
57266 +int
57267 +gr_check_crash_uid(const uid_t uid)
57268 +{
57269 + return 0;
57270 +}
57271 +
57272 +void
57273 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
57274 + struct dentry *old_dentry,
57275 + struct dentry *new_dentry,
57276 + struct vfsmount *mnt, const __u8 replace)
57277 +{
57278 + return;
57279 +}
57280 +
57281 +int
57282 +gr_search_socket(const int family, const int type, const int protocol)
57283 +{
57284 + return 1;
57285 +}
57286 +
57287 +int
57288 +gr_search_connectbind(const int mode, const struct socket *sock,
57289 + const struct sockaddr_in *addr)
57290 +{
57291 + return 0;
57292 +}
57293 +
57294 +void
57295 +gr_handle_alertkill(struct task_struct *task)
57296 +{
57297 + return;
57298 +}
57299 +
57300 +__u32
57301 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
57302 +{
57303 + return 1;
57304 +}
57305 +
57306 +__u32
57307 +gr_acl_handle_hidden_file(const struct dentry * dentry,
57308 + const struct vfsmount * mnt)
57309 +{
57310 + return 1;
57311 +}
57312 +
57313 +__u32
57314 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
57315 + int acc_mode)
57316 +{
57317 + return 1;
57318 +}
57319 +
57320 +__u32
57321 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
57322 +{
57323 + return 1;
57324 +}
57325 +
57326 +__u32
57327 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
57328 +{
57329 + return 1;
57330 +}
57331 +
57332 +int
57333 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
57334 + unsigned int *vm_flags)
57335 +{
57336 + return 1;
57337 +}
57338 +
57339 +__u32
57340 +gr_acl_handle_truncate(const struct dentry * dentry,
57341 + const struct vfsmount * mnt)
57342 +{
57343 + return 1;
57344 +}
57345 +
57346 +__u32
57347 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
57348 +{
57349 + return 1;
57350 +}
57351 +
57352 +__u32
57353 +gr_acl_handle_access(const struct dentry * dentry,
57354 + const struct vfsmount * mnt, const int fmode)
57355 +{
57356 + return 1;
57357 +}
57358 +
57359 +__u32
57360 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
57361 + mode_t mode)
57362 +{
57363 + return 1;
57364 +}
57365 +
57366 +__u32
57367 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
57368 + mode_t mode)
57369 +{
57370 + return 1;
57371 +}
57372 +
57373 +__u32
57374 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
57375 +{
57376 + return 1;
57377 +}
57378 +
57379 +__u32
57380 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
57381 +{
57382 + return 1;
57383 +}
57384 +
57385 +void
57386 +grsecurity_init(void)
57387 +{
57388 + return;
57389 +}
57390 +
57391 +__u32
57392 +gr_acl_handle_mknod(const struct dentry * new_dentry,
57393 + const struct dentry * parent_dentry,
57394 + const struct vfsmount * parent_mnt,
57395 + const int mode)
57396 +{
57397 + return 1;
57398 +}
57399 +
57400 +__u32
57401 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
57402 + const struct dentry * parent_dentry,
57403 + const struct vfsmount * parent_mnt)
57404 +{
57405 + return 1;
57406 +}
57407 +
57408 +__u32
57409 +gr_acl_handle_symlink(const struct dentry * new_dentry,
57410 + const struct dentry * parent_dentry,
57411 + const struct vfsmount * parent_mnt, const char *from)
57412 +{
57413 + return 1;
57414 +}
57415 +
57416 +__u32
57417 +gr_acl_handle_link(const struct dentry * new_dentry,
57418 + const struct dentry * parent_dentry,
57419 + const struct vfsmount * parent_mnt,
57420 + const struct dentry * old_dentry,
57421 + const struct vfsmount * old_mnt, const char *to)
57422 +{
57423 + return 1;
57424 +}
57425 +
57426 +int
57427 +gr_acl_handle_rename(const struct dentry *new_dentry,
57428 + const struct dentry *parent_dentry,
57429 + const struct vfsmount *parent_mnt,
57430 + const struct dentry *old_dentry,
57431 + const struct inode *old_parent_inode,
57432 + const struct vfsmount *old_mnt, const char *newname)
57433 +{
57434 + return 0;
57435 +}
57436 +
57437 +int
57438 +gr_acl_handle_filldir(const struct file *file, const char *name,
57439 + const int namelen, const ino_t ino)
57440 +{
57441 + return 1;
57442 +}
57443 +
57444 +int
57445 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57446 + const time_t shm_createtime, const uid_t cuid, const int shmid)
57447 +{
57448 + return 1;
57449 +}
57450 +
57451 +int
57452 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
57453 +{
57454 + return 0;
57455 +}
57456 +
57457 +int
57458 +gr_search_accept(const struct socket *sock)
57459 +{
57460 + return 0;
57461 +}
57462 +
57463 +int
57464 +gr_search_listen(const struct socket *sock)
57465 +{
57466 + return 0;
57467 +}
57468 +
57469 +int
57470 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
57471 +{
57472 + return 0;
57473 +}
57474 +
57475 +__u32
57476 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
57477 +{
57478 + return 1;
57479 +}
57480 +
57481 +__u32
57482 +gr_acl_handle_creat(const struct dentry * dentry,
57483 + const struct dentry * p_dentry,
57484 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
57485 + const int imode)
57486 +{
57487 + return 1;
57488 +}
57489 +
57490 +void
57491 +gr_acl_handle_exit(void)
57492 +{
57493 + return;
57494 +}
57495 +
57496 +int
57497 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
57498 +{
57499 + return 1;
57500 +}
57501 +
57502 +void
57503 +gr_set_role_label(const uid_t uid, const gid_t gid)
57504 +{
57505 + return;
57506 +}
57507 +
57508 +int
57509 +gr_acl_handle_procpidmem(const struct task_struct *task)
57510 +{
57511 + return 0;
57512 +}
57513 +
57514 +int
57515 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
57516 +{
57517 + return 0;
57518 +}
57519 +
57520 +int
57521 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
57522 +{
57523 + return 0;
57524 +}
57525 +
57526 +void
57527 +gr_set_kernel_label(struct task_struct *task)
57528 +{
57529 + return;
57530 +}
57531 +
57532 +int
57533 +gr_check_user_change(int real, int effective, int fs)
57534 +{
57535 + return 0;
57536 +}
57537 +
57538 +int
57539 +gr_check_group_change(int real, int effective, int fs)
57540 +{
57541 + return 0;
57542 +}
57543 +
57544 +int gr_acl_enable_at_secure(void)
57545 +{
57546 + return 0;
57547 +}
57548 +
57549 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
57550 +{
57551 + return dentry->d_inode->i_sb->s_dev;
57552 +}
57553 +
57554 +EXPORT_SYMBOL(gr_learn_resource);
57555 +EXPORT_SYMBOL(gr_set_kernel_label);
57556 +#ifdef CONFIG_SECURITY
57557 +EXPORT_SYMBOL(gr_check_user_change);
57558 +EXPORT_SYMBOL(gr_check_group_change);
57559 +#endif
57560 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
57561 new file mode 100644
57562 index 0000000..2b05ada
57563 --- /dev/null
57564 +++ b/grsecurity/grsec_exec.c
57565 @@ -0,0 +1,146 @@
57566 +#include <linux/kernel.h>
57567 +#include <linux/sched.h>
57568 +#include <linux/file.h>
57569 +#include <linux/binfmts.h>
57570 +#include <linux/fs.h>
57571 +#include <linux/types.h>
57572 +#include <linux/grdefs.h>
57573 +#include <linux/grsecurity.h>
57574 +#include <linux/grinternal.h>
57575 +#include <linux/capability.h>
57576 +#include <linux/module.h>
57577 +
57578 +#include <asm/uaccess.h>
57579 +
57580 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57581 +static char gr_exec_arg_buf[132];
57582 +static DEFINE_MUTEX(gr_exec_arg_mutex);
57583 +#endif
57584 +
57585 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
57586 +
57587 +void
57588 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
57589 +{
57590 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57591 + char *grarg = gr_exec_arg_buf;
57592 + unsigned int i, x, execlen = 0;
57593 + char c;
57594 +
57595 + if (!((grsec_enable_execlog && grsec_enable_group &&
57596 + in_group_p(grsec_audit_gid))
57597 + || (grsec_enable_execlog && !grsec_enable_group)))
57598 + return;
57599 +
57600 + mutex_lock(&gr_exec_arg_mutex);
57601 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
57602 +
57603 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
57604 + const char __user *p;
57605 + unsigned int len;
57606 +
57607 + p = get_user_arg_ptr(argv, i);
57608 + if (IS_ERR(p))
57609 + goto log;
57610 +
57611 + len = strnlen_user(p, 128 - execlen);
57612 + if (len > 128 - execlen)
57613 + len = 128 - execlen;
57614 + else if (len > 0)
57615 + len--;
57616 + if (copy_from_user(grarg + execlen, p, len))
57617 + goto log;
57618 +
57619 + /* rewrite unprintable characters */
57620 + for (x = 0; x < len; x++) {
57621 + c = *(grarg + execlen + x);
57622 + if (c < 32 || c > 126)
57623 + *(grarg + execlen + x) = ' ';
57624 + }
57625 +
57626 + execlen += len;
57627 + *(grarg + execlen) = ' ';
57628 + *(grarg + execlen + 1) = '\0';
57629 + execlen++;
57630 + }
57631 +
57632 + log:
57633 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
57634 + bprm->file->f_path.mnt, grarg);
57635 + mutex_unlock(&gr_exec_arg_mutex);
57636 +#endif
57637 + return;
57638 +}
57639 +
57640 +#ifdef CONFIG_GRKERNSEC
57641 +extern int gr_acl_is_capable(const int cap);
57642 +extern int gr_acl_is_capable_nolog(const int cap);
57643 +extern int gr_chroot_is_capable(const int cap);
57644 +extern int gr_chroot_is_capable_nolog(const int cap);
57645 +#endif
57646 +
57647 +const char *captab_log[] = {
57648 + "CAP_CHOWN",
57649 + "CAP_DAC_OVERRIDE",
57650 + "CAP_DAC_READ_SEARCH",
57651 + "CAP_FOWNER",
57652 + "CAP_FSETID",
57653 + "CAP_KILL",
57654 + "CAP_SETGID",
57655 + "CAP_SETUID",
57656 + "CAP_SETPCAP",
57657 + "CAP_LINUX_IMMUTABLE",
57658 + "CAP_NET_BIND_SERVICE",
57659 + "CAP_NET_BROADCAST",
57660 + "CAP_NET_ADMIN",
57661 + "CAP_NET_RAW",
57662 + "CAP_IPC_LOCK",
57663 + "CAP_IPC_OWNER",
57664 + "CAP_SYS_MODULE",
57665 + "CAP_SYS_RAWIO",
57666 + "CAP_SYS_CHROOT",
57667 + "CAP_SYS_PTRACE",
57668 + "CAP_SYS_PACCT",
57669 + "CAP_SYS_ADMIN",
57670 + "CAP_SYS_BOOT",
57671 + "CAP_SYS_NICE",
57672 + "CAP_SYS_RESOURCE",
57673 + "CAP_SYS_TIME",
57674 + "CAP_SYS_TTY_CONFIG",
57675 + "CAP_MKNOD",
57676 + "CAP_LEASE",
57677 + "CAP_AUDIT_WRITE",
57678 + "CAP_AUDIT_CONTROL",
57679 + "CAP_SETFCAP",
57680 + "CAP_MAC_OVERRIDE",
57681 + "CAP_MAC_ADMIN",
57682 + "CAP_SYSLOG",
57683 + "CAP_WAKE_ALARM"
57684 +};
57685 +
57686 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
57687 +
57688 +int gr_is_capable(const int cap)
57689 +{
57690 +#ifdef CONFIG_GRKERNSEC
57691 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
57692 + return 1;
57693 + return 0;
57694 +#else
57695 + return 1;
57696 +#endif
57697 +}
57698 +
57699 +int gr_is_capable_nolog(const int cap)
57700 +{
57701 +#ifdef CONFIG_GRKERNSEC
57702 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
57703 + return 1;
57704 + return 0;
57705 +#else
57706 + return 1;
57707 +#endif
57708 +}
57709 +
57710 +EXPORT_SYMBOL(gr_is_capable);
57711 +EXPORT_SYMBOL(gr_is_capable_nolog);
57712 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
57713 new file mode 100644
57714 index 0000000..d3ee748
57715 --- /dev/null
57716 +++ b/grsecurity/grsec_fifo.c
57717 @@ -0,0 +1,24 @@
57718 +#include <linux/kernel.h>
57719 +#include <linux/sched.h>
57720 +#include <linux/fs.h>
57721 +#include <linux/file.h>
57722 +#include <linux/grinternal.h>
57723 +
57724 +int
57725 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
57726 + const struct dentry *dir, const int flag, const int acc_mode)
57727 +{
57728 +#ifdef CONFIG_GRKERNSEC_FIFO
57729 + const struct cred *cred = current_cred();
57730 +
57731 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
57732 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
57733 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
57734 + (cred->fsuid != dentry->d_inode->i_uid)) {
57735 + if (!inode_permission(dentry->d_inode, acc_mode))
57736 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
57737 + return -EACCES;
57738 + }
57739 +#endif
57740 + return 0;
57741 +}
57742 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
57743 new file mode 100644
57744 index 0000000..8ca18bf
57745 --- /dev/null
57746 +++ b/grsecurity/grsec_fork.c
57747 @@ -0,0 +1,23 @@
57748 +#include <linux/kernel.h>
57749 +#include <linux/sched.h>
57750 +#include <linux/grsecurity.h>
57751 +#include <linux/grinternal.h>
57752 +#include <linux/errno.h>
57753 +
57754 +void
57755 +gr_log_forkfail(const int retval)
57756 +{
57757 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
57758 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
57759 + switch (retval) {
57760 + case -EAGAIN:
57761 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
57762 + break;
57763 + case -ENOMEM:
57764 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
57765 + break;
57766 + }
57767 + }
57768 +#endif
57769 + return;
57770 +}
57771 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
57772 new file mode 100644
57773 index 0000000..cb8e5a1
57774 --- /dev/null
57775 +++ b/grsecurity/grsec_init.c
57776 @@ -0,0 +1,273 @@
57777 +#include <linux/kernel.h>
57778 +#include <linux/sched.h>
57779 +#include <linux/mm.h>
57780 +#include <linux/gracl.h>
57781 +#include <linux/slab.h>
57782 +#include <linux/vmalloc.h>
57783 +#include <linux/percpu.h>
57784 +#include <linux/module.h>
57785 +
57786 +int grsec_enable_setxid;
57787 +int grsec_enable_brute;
57788 +int grsec_enable_link;
57789 +int grsec_enable_dmesg;
57790 +int grsec_enable_harden_ptrace;
57791 +int grsec_enable_fifo;
57792 +int grsec_enable_execlog;
57793 +int grsec_enable_signal;
57794 +int grsec_enable_forkfail;
57795 +int grsec_enable_audit_ptrace;
57796 +int grsec_enable_time;
57797 +int grsec_enable_audit_textrel;
57798 +int grsec_enable_group;
57799 +int grsec_audit_gid;
57800 +int grsec_enable_chdir;
57801 +int grsec_enable_mount;
57802 +int grsec_enable_rofs;
57803 +int grsec_enable_chroot_findtask;
57804 +int grsec_enable_chroot_mount;
57805 +int grsec_enable_chroot_shmat;
57806 +int grsec_enable_chroot_fchdir;
57807 +int grsec_enable_chroot_double;
57808 +int grsec_enable_chroot_pivot;
57809 +int grsec_enable_chroot_chdir;
57810 +int grsec_enable_chroot_chmod;
57811 +int grsec_enable_chroot_mknod;
57812 +int grsec_enable_chroot_nice;
57813 +int grsec_enable_chroot_execlog;
57814 +int grsec_enable_chroot_caps;
57815 +int grsec_enable_chroot_sysctl;
57816 +int grsec_enable_chroot_unix;
57817 +int grsec_enable_tpe;
57818 +int grsec_tpe_gid;
57819 +int grsec_enable_blackhole;
57820 +#ifdef CONFIG_IPV6_MODULE
57821 +EXPORT_SYMBOL(grsec_enable_blackhole);
57822 +#endif
57823 +int grsec_lastack_retries;
57824 +int grsec_enable_tpe_all;
57825 +int grsec_enable_tpe_invert;
57826 +int grsec_enable_socket_all;
57827 +int grsec_socket_all_gid;
57828 +int grsec_enable_socket_client;
57829 +int grsec_socket_client_gid;
57830 +int grsec_enable_socket_server;
57831 +int grsec_socket_server_gid;
57832 +int grsec_resource_logging;
57833 +int grsec_disable_privio;
57834 +int grsec_enable_log_rwxmaps;
57835 +int grsec_lock;
57836 +
57837 +DEFINE_SPINLOCK(grsec_alert_lock);
57838 +unsigned long grsec_alert_wtime = 0;
57839 +unsigned long grsec_alert_fyet = 0;
57840 +
57841 +DEFINE_SPINLOCK(grsec_audit_lock);
57842 +
57843 +DEFINE_RWLOCK(grsec_exec_file_lock);
57844 +
57845 +char *gr_shared_page[4];
57846 +
57847 +char *gr_alert_log_fmt;
57848 +char *gr_audit_log_fmt;
57849 +char *gr_alert_log_buf;
57850 +char *gr_audit_log_buf;
57851 +
57852 +extern struct gr_arg *gr_usermode;
57853 +extern unsigned char *gr_system_salt;
57854 +extern unsigned char *gr_system_sum;
57855 +
57856 +void __init
57857 +grsecurity_init(void)
57858 +{
57859 + int j;
57860 + /* create the per-cpu shared pages */
57861 +
57862 +#ifdef CONFIG_X86
57863 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
57864 +#endif
57865 +
57866 + for (j = 0; j < 4; j++) {
57867 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
57868 + if (gr_shared_page[j] == NULL) {
57869 + panic("Unable to allocate grsecurity shared page");
57870 + return;
57871 + }
57872 + }
57873 +
57874 + /* allocate log buffers */
57875 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
57876 + if (!gr_alert_log_fmt) {
57877 + panic("Unable to allocate grsecurity alert log format buffer");
57878 + return;
57879 + }
57880 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
57881 + if (!gr_audit_log_fmt) {
57882 + panic("Unable to allocate grsecurity audit log format buffer");
57883 + return;
57884 + }
57885 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57886 + if (!gr_alert_log_buf) {
57887 + panic("Unable to allocate grsecurity alert log buffer");
57888 + return;
57889 + }
57890 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57891 + if (!gr_audit_log_buf) {
57892 + panic("Unable to allocate grsecurity audit log buffer");
57893 + return;
57894 + }
57895 +
57896 + /* allocate memory for authentication structure */
57897 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
57898 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
57899 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
57900 +
57901 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
57902 + panic("Unable to allocate grsecurity authentication structure");
57903 + return;
57904 + }
57905 +
57906 +
57907 +#ifdef CONFIG_GRKERNSEC_IO
57908 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
57909 + grsec_disable_privio = 1;
57910 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57911 + grsec_disable_privio = 1;
57912 +#else
57913 + grsec_disable_privio = 0;
57914 +#endif
57915 +#endif
57916 +
57917 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57918 + /* for backward compatibility, tpe_invert always defaults to on if
57919 + enabled in the kernel
57920 + */
57921 + grsec_enable_tpe_invert = 1;
57922 +#endif
57923 +
57924 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57925 +#ifndef CONFIG_GRKERNSEC_SYSCTL
57926 + grsec_lock = 1;
57927 +#endif
57928 +
57929 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57930 + grsec_enable_audit_textrel = 1;
57931 +#endif
57932 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57933 + grsec_enable_log_rwxmaps = 1;
57934 +#endif
57935 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
57936 + grsec_enable_group = 1;
57937 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
57938 +#endif
57939 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57940 + grsec_enable_chdir = 1;
57941 +#endif
57942 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57943 + grsec_enable_harden_ptrace = 1;
57944 +#endif
57945 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57946 + grsec_enable_mount = 1;
57947 +#endif
57948 +#ifdef CONFIG_GRKERNSEC_LINK
57949 + grsec_enable_link = 1;
57950 +#endif
57951 +#ifdef CONFIG_GRKERNSEC_BRUTE
57952 + grsec_enable_brute = 1;
57953 +#endif
57954 +#ifdef CONFIG_GRKERNSEC_DMESG
57955 + grsec_enable_dmesg = 1;
57956 +#endif
57957 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
57958 + grsec_enable_blackhole = 1;
57959 + grsec_lastack_retries = 4;
57960 +#endif
57961 +#ifdef CONFIG_GRKERNSEC_FIFO
57962 + grsec_enable_fifo = 1;
57963 +#endif
57964 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57965 + grsec_enable_execlog = 1;
57966 +#endif
57967 +#ifdef CONFIG_GRKERNSEC_SETXID
57968 + grsec_enable_setxid = 1;
57969 +#endif
57970 +#ifdef CONFIG_GRKERNSEC_SIGNAL
57971 + grsec_enable_signal = 1;
57972 +#endif
57973 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
57974 + grsec_enable_forkfail = 1;
57975 +#endif
57976 +#ifdef CONFIG_GRKERNSEC_TIME
57977 + grsec_enable_time = 1;
57978 +#endif
57979 +#ifdef CONFIG_GRKERNSEC_RESLOG
57980 + grsec_resource_logging = 1;
57981 +#endif
57982 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57983 + grsec_enable_chroot_findtask = 1;
57984 +#endif
57985 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57986 + grsec_enable_chroot_unix = 1;
57987 +#endif
57988 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57989 + grsec_enable_chroot_mount = 1;
57990 +#endif
57991 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57992 + grsec_enable_chroot_fchdir = 1;
57993 +#endif
57994 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57995 + grsec_enable_chroot_shmat = 1;
57996 +#endif
57997 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57998 + grsec_enable_audit_ptrace = 1;
57999 +#endif
58000 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58001 + grsec_enable_chroot_double = 1;
58002 +#endif
58003 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58004 + grsec_enable_chroot_pivot = 1;
58005 +#endif
58006 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58007 + grsec_enable_chroot_chdir = 1;
58008 +#endif
58009 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58010 + grsec_enable_chroot_chmod = 1;
58011 +#endif
58012 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
58013 + grsec_enable_chroot_mknod = 1;
58014 +#endif
58015 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58016 + grsec_enable_chroot_nice = 1;
58017 +#endif
58018 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
58019 + grsec_enable_chroot_execlog = 1;
58020 +#endif
58021 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58022 + grsec_enable_chroot_caps = 1;
58023 +#endif
58024 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
58025 + grsec_enable_chroot_sysctl = 1;
58026 +#endif
58027 +#ifdef CONFIG_GRKERNSEC_TPE
58028 + grsec_enable_tpe = 1;
58029 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
58030 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
58031 + grsec_enable_tpe_all = 1;
58032 +#endif
58033 +#endif
58034 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58035 + grsec_enable_socket_all = 1;
58036 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
58037 +#endif
58038 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58039 + grsec_enable_socket_client = 1;
58040 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
58041 +#endif
58042 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58043 + grsec_enable_socket_server = 1;
58044 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
58045 +#endif
58046 +#endif
58047 +
58048 + return;
58049 +}
58050 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
58051 new file mode 100644
58052 index 0000000..3efe141
58053 --- /dev/null
58054 +++ b/grsecurity/grsec_link.c
58055 @@ -0,0 +1,43 @@
58056 +#include <linux/kernel.h>
58057 +#include <linux/sched.h>
58058 +#include <linux/fs.h>
58059 +#include <linux/file.h>
58060 +#include <linux/grinternal.h>
58061 +
58062 +int
58063 +gr_handle_follow_link(const struct inode *parent,
58064 + const struct inode *inode,
58065 + const struct dentry *dentry, const struct vfsmount *mnt)
58066 +{
58067 +#ifdef CONFIG_GRKERNSEC_LINK
58068 + const struct cred *cred = current_cred();
58069 +
58070 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
58071 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
58072 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
58073 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
58074 + return -EACCES;
58075 + }
58076 +#endif
58077 + return 0;
58078 +}
58079 +
58080 +int
58081 +gr_handle_hardlink(const struct dentry *dentry,
58082 + const struct vfsmount *mnt,
58083 + struct inode *inode, const int mode, const char *to)
58084 +{
58085 +#ifdef CONFIG_GRKERNSEC_LINK
58086 + const struct cred *cred = current_cred();
58087 +
58088 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
58089 + (!S_ISREG(mode) || (mode & S_ISUID) ||
58090 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
58091 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
58092 + !capable(CAP_FOWNER) && cred->uid) {
58093 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
58094 + return -EPERM;
58095 + }
58096 +#endif
58097 + return 0;
58098 +}
58099 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
58100 new file mode 100644
58101 index 0000000..a45d2e9
58102 --- /dev/null
58103 +++ b/grsecurity/grsec_log.c
58104 @@ -0,0 +1,322 @@
58105 +#include <linux/kernel.h>
58106 +#include <linux/sched.h>
58107 +#include <linux/file.h>
58108 +#include <linux/tty.h>
58109 +#include <linux/fs.h>
58110 +#include <linux/grinternal.h>
58111 +
58112 +#ifdef CONFIG_TREE_PREEMPT_RCU
58113 +#define DISABLE_PREEMPT() preempt_disable()
58114 +#define ENABLE_PREEMPT() preempt_enable()
58115 +#else
58116 +#define DISABLE_PREEMPT()
58117 +#define ENABLE_PREEMPT()
58118 +#endif
58119 +
58120 +#define BEGIN_LOCKS(x) \
58121 + DISABLE_PREEMPT(); \
58122 + rcu_read_lock(); \
58123 + read_lock(&tasklist_lock); \
58124 + read_lock(&grsec_exec_file_lock); \
58125 + if (x != GR_DO_AUDIT) \
58126 + spin_lock(&grsec_alert_lock); \
58127 + else \
58128 + spin_lock(&grsec_audit_lock)
58129 +
58130 +#define END_LOCKS(x) \
58131 + if (x != GR_DO_AUDIT) \
58132 + spin_unlock(&grsec_alert_lock); \
58133 + else \
58134 + spin_unlock(&grsec_audit_lock); \
58135 + read_unlock(&grsec_exec_file_lock); \
58136 + read_unlock(&tasklist_lock); \
58137 + rcu_read_unlock(); \
58138 + ENABLE_PREEMPT(); \
58139 + if (x == GR_DONT_AUDIT) \
58140 + gr_handle_alertkill(current)
58141 +
58142 +enum {
58143 + FLOODING,
58144 + NO_FLOODING
58145 +};
58146 +
58147 +extern char *gr_alert_log_fmt;
58148 +extern char *gr_audit_log_fmt;
58149 +extern char *gr_alert_log_buf;
58150 +extern char *gr_audit_log_buf;
58151 +
58152 +static int gr_log_start(int audit)
58153 +{
58154 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
58155 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
58156 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58157 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
58158 + unsigned long curr_secs = get_seconds();
58159 +
58160 + if (audit == GR_DO_AUDIT)
58161 + goto set_fmt;
58162 +
58163 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
58164 + grsec_alert_wtime = curr_secs;
58165 + grsec_alert_fyet = 0;
58166 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
58167 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
58168 + grsec_alert_fyet++;
58169 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
58170 + grsec_alert_wtime = curr_secs;
58171 + grsec_alert_fyet++;
58172 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
58173 + return FLOODING;
58174 + }
58175 + else return FLOODING;
58176 +
58177 +set_fmt:
58178 +#endif
58179 + memset(buf, 0, PAGE_SIZE);
58180 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
58181 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
58182 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58183 + } else if (current->signal->curr_ip) {
58184 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
58185 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
58186 + } else if (gr_acl_is_enabled()) {
58187 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
58188 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58189 + } else {
58190 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
58191 + strcpy(buf, fmt);
58192 + }
58193 +
58194 + return NO_FLOODING;
58195 +}
58196 +
58197 +static void gr_log_middle(int audit, const char *msg, va_list ap)
58198 + __attribute__ ((format (printf, 2, 0)));
58199 +
58200 +static void gr_log_middle(int audit, const char *msg, va_list ap)
58201 +{
58202 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58203 + unsigned int len = strlen(buf);
58204 +
58205 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
58206 +
58207 + return;
58208 +}
58209 +
58210 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
58211 + __attribute__ ((format (printf, 2, 3)));
58212 +
58213 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
58214 +{
58215 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58216 + unsigned int len = strlen(buf);
58217 + va_list ap;
58218 +
58219 + va_start(ap, msg);
58220 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
58221 + va_end(ap);
58222 +
58223 + return;
58224 +}
58225 +
58226 +static void gr_log_end(int audit, int append_default)
58227 +{
58228 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58229 +
58230 + if (append_default) {
58231 + unsigned int len = strlen(buf);
58232 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
58233 + }
58234 +
58235 + printk("%s\n", buf);
58236 +
58237 + return;
58238 +}
58239 +
58240 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
58241 +{
58242 + int logtype;
58243 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
58244 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
58245 + void *voidptr = NULL;
58246 + int num1 = 0, num2 = 0;
58247 + unsigned long ulong1 = 0, ulong2 = 0;
58248 + struct dentry *dentry = NULL;
58249 + struct vfsmount *mnt = NULL;
58250 + struct file *file = NULL;
58251 + struct task_struct *task = NULL;
58252 + const struct cred *cred, *pcred;
58253 + va_list ap;
58254 +
58255 + BEGIN_LOCKS(audit);
58256 + logtype = gr_log_start(audit);
58257 + if (logtype == FLOODING) {
58258 + END_LOCKS(audit);
58259 + return;
58260 + }
58261 + va_start(ap, argtypes);
58262 + switch (argtypes) {
58263 + case GR_TTYSNIFF:
58264 + task = va_arg(ap, struct task_struct *);
58265 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
58266 + break;
58267 + case GR_SYSCTL_HIDDEN:
58268 + str1 = va_arg(ap, char *);
58269 + gr_log_middle_varargs(audit, msg, result, str1);
58270 + break;
58271 + case GR_RBAC:
58272 + dentry = va_arg(ap, struct dentry *);
58273 + mnt = va_arg(ap, struct vfsmount *);
58274 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
58275 + break;
58276 + case GR_RBAC_STR:
58277 + dentry = va_arg(ap, struct dentry *);
58278 + mnt = va_arg(ap, struct vfsmount *);
58279 + str1 = va_arg(ap, char *);
58280 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
58281 + break;
58282 + case GR_STR_RBAC:
58283 + str1 = va_arg(ap, char *);
58284 + dentry = va_arg(ap, struct dentry *);
58285 + mnt = va_arg(ap, struct vfsmount *);
58286 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
58287 + break;
58288 + case GR_RBAC_MODE2:
58289 + dentry = va_arg(ap, struct dentry *);
58290 + mnt = va_arg(ap, struct vfsmount *);
58291 + str1 = va_arg(ap, char *);
58292 + str2 = va_arg(ap, char *);
58293 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
58294 + break;
58295 + case GR_RBAC_MODE3:
58296 + dentry = va_arg(ap, struct dentry *);
58297 + mnt = va_arg(ap, struct vfsmount *);
58298 + str1 = va_arg(ap, char *);
58299 + str2 = va_arg(ap, char *);
58300 + str3 = va_arg(ap, char *);
58301 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
58302 + break;
58303 + case GR_FILENAME:
58304 + dentry = va_arg(ap, struct dentry *);
58305 + mnt = va_arg(ap, struct vfsmount *);
58306 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
58307 + break;
58308 + case GR_STR_FILENAME:
58309 + str1 = va_arg(ap, char *);
58310 + dentry = va_arg(ap, struct dentry *);
58311 + mnt = va_arg(ap, struct vfsmount *);
58312 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
58313 + break;
58314 + case GR_FILENAME_STR:
58315 + dentry = va_arg(ap, struct dentry *);
58316 + mnt = va_arg(ap, struct vfsmount *);
58317 + str1 = va_arg(ap, char *);
58318 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
58319 + break;
58320 + case GR_FILENAME_TWO_INT:
58321 + dentry = va_arg(ap, struct dentry *);
58322 + mnt = va_arg(ap, struct vfsmount *);
58323 + num1 = va_arg(ap, int);
58324 + num2 = va_arg(ap, int);
58325 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
58326 + break;
58327 + case GR_FILENAME_TWO_INT_STR:
58328 + dentry = va_arg(ap, struct dentry *);
58329 + mnt = va_arg(ap, struct vfsmount *);
58330 + num1 = va_arg(ap, int);
58331 + num2 = va_arg(ap, int);
58332 + str1 = va_arg(ap, char *);
58333 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
58334 + break;
58335 + case GR_TEXTREL:
58336 + file = va_arg(ap, struct file *);
58337 + ulong1 = va_arg(ap, unsigned long);
58338 + ulong2 = va_arg(ap, unsigned long);
58339 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
58340 + break;
58341 + case GR_PTRACE:
58342 + task = va_arg(ap, struct task_struct *);
58343 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
58344 + break;
58345 + case GR_RESOURCE:
58346 + task = va_arg(ap, struct task_struct *);
58347 + cred = __task_cred(task);
58348 + pcred = __task_cred(task->real_parent);
58349 + ulong1 = va_arg(ap, unsigned long);
58350 + str1 = va_arg(ap, char *);
58351 + ulong2 = va_arg(ap, unsigned long);
58352 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58353 + break;
58354 + case GR_CAP:
58355 + task = va_arg(ap, struct task_struct *);
58356 + cred = __task_cred(task);
58357 + pcred = __task_cred(task->real_parent);
58358 + str1 = va_arg(ap, char *);
58359 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58360 + break;
58361 + case GR_SIG:
58362 + str1 = va_arg(ap, char *);
58363 + voidptr = va_arg(ap, void *);
58364 + gr_log_middle_varargs(audit, msg, str1, voidptr);
58365 + break;
58366 + case GR_SIG2:
58367 + task = va_arg(ap, struct task_struct *);
58368 + cred = __task_cred(task);
58369 + pcred = __task_cred(task->real_parent);
58370 + num1 = va_arg(ap, int);
58371 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58372 + break;
58373 + case GR_CRASH1:
58374 + task = va_arg(ap, struct task_struct *);
58375 + cred = __task_cred(task);
58376 + pcred = __task_cred(task->real_parent);
58377 + ulong1 = va_arg(ap, unsigned long);
58378 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
58379 + break;
58380 + case GR_CRASH2:
58381 + task = va_arg(ap, struct task_struct *);
58382 + cred = __task_cred(task);
58383 + pcred = __task_cred(task->real_parent);
58384 + ulong1 = va_arg(ap, unsigned long);
58385 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
58386 + break;
58387 + case GR_RWXMAP:
58388 + file = va_arg(ap, struct file *);
58389 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
58390 + break;
58391 + case GR_PSACCT:
58392 + {
58393 + unsigned int wday, cday;
58394 + __u8 whr, chr;
58395 + __u8 wmin, cmin;
58396 + __u8 wsec, csec;
58397 + char cur_tty[64] = { 0 };
58398 + char parent_tty[64] = { 0 };
58399 +
58400 + task = va_arg(ap, struct task_struct *);
58401 + wday = va_arg(ap, unsigned int);
58402 + cday = va_arg(ap, unsigned int);
58403 + whr = va_arg(ap, int);
58404 + chr = va_arg(ap, int);
58405 + wmin = va_arg(ap, int);
58406 + cmin = va_arg(ap, int);
58407 + wsec = va_arg(ap, int);
58408 + csec = va_arg(ap, int);
58409 + ulong1 = va_arg(ap, unsigned long);
58410 + cred = __task_cred(task);
58411 + pcred = __task_cred(task->real_parent);
58412 +
58413 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58414 + }
58415 + break;
58416 + default:
58417 + gr_log_middle(audit, msg, ap);
58418 + }
58419 + va_end(ap);
58420 + // these don't need DEFAULTSECARGS printed on the end
58421 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
58422 + gr_log_end(audit, 0);
58423 + else
58424 + gr_log_end(audit, 1);
58425 + END_LOCKS(audit);
58426 +}
58427 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
58428 new file mode 100644
58429 index 0000000..6c0416b
58430 --- /dev/null
58431 +++ b/grsecurity/grsec_mem.c
58432 @@ -0,0 +1,33 @@
58433 +#include <linux/kernel.h>
58434 +#include <linux/sched.h>
58435 +#include <linux/mm.h>
58436 +#include <linux/mman.h>
58437 +#include <linux/grinternal.h>
58438 +
58439 +void
58440 +gr_handle_ioperm(void)
58441 +{
58442 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
58443 + return;
58444 +}
58445 +
58446 +void
58447 +gr_handle_iopl(void)
58448 +{
58449 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
58450 + return;
58451 +}
58452 +
58453 +void
58454 +gr_handle_mem_readwrite(u64 from, u64 to)
58455 +{
58456 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
58457 + return;
58458 +}
58459 +
58460 +void
58461 +gr_handle_vm86(void)
58462 +{
58463 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
58464 + return;
58465 +}
58466 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
58467 new file mode 100644
58468 index 0000000..2131422
58469 --- /dev/null
58470 +++ b/grsecurity/grsec_mount.c
58471 @@ -0,0 +1,62 @@
58472 +#include <linux/kernel.h>
58473 +#include <linux/sched.h>
58474 +#include <linux/mount.h>
58475 +#include <linux/grsecurity.h>
58476 +#include <linux/grinternal.h>
58477 +
58478 +void
58479 +gr_log_remount(const char *devname, const int retval)
58480 +{
58481 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58482 + if (grsec_enable_mount && (retval >= 0))
58483 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
58484 +#endif
58485 + return;
58486 +}
58487 +
58488 +void
58489 +gr_log_unmount(const char *devname, const int retval)
58490 +{
58491 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58492 + if (grsec_enable_mount && (retval >= 0))
58493 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
58494 +#endif
58495 + return;
58496 +}
58497 +
58498 +void
58499 +gr_log_mount(const char *from, const char *to, const int retval)
58500 +{
58501 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58502 + if (grsec_enable_mount && (retval >= 0))
58503 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
58504 +#endif
58505 + return;
58506 +}
58507 +
58508 +int
58509 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
58510 +{
58511 +#ifdef CONFIG_GRKERNSEC_ROFS
58512 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
58513 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
58514 + return -EPERM;
58515 + } else
58516 + return 0;
58517 +#endif
58518 + return 0;
58519 +}
58520 +
58521 +int
58522 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
58523 +{
58524 +#ifdef CONFIG_GRKERNSEC_ROFS
58525 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
58526 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
58527 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
58528 + return -EPERM;
58529 + } else
58530 + return 0;
58531 +#endif
58532 + return 0;
58533 +}
58534 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
58535 new file mode 100644
58536 index 0000000..a3b12a0
58537 --- /dev/null
58538 +++ b/grsecurity/grsec_pax.c
58539 @@ -0,0 +1,36 @@
58540 +#include <linux/kernel.h>
58541 +#include <linux/sched.h>
58542 +#include <linux/mm.h>
58543 +#include <linux/file.h>
58544 +#include <linux/grinternal.h>
58545 +#include <linux/grsecurity.h>
58546 +
58547 +void
58548 +gr_log_textrel(struct vm_area_struct * vma)
58549 +{
58550 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58551 + if (grsec_enable_audit_textrel)
58552 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
58553 +#endif
58554 + return;
58555 +}
58556 +
58557 +void
58558 +gr_log_rwxmmap(struct file *file)
58559 +{
58560 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58561 + if (grsec_enable_log_rwxmaps)
58562 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
58563 +#endif
58564 + return;
58565 +}
58566 +
58567 +void
58568 +gr_log_rwxmprotect(struct file *file)
58569 +{
58570 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58571 + if (grsec_enable_log_rwxmaps)
58572 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
58573 +#endif
58574 + return;
58575 +}
58576 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
58577 new file mode 100644
58578 index 0000000..472c1d6
58579 --- /dev/null
58580 +++ b/grsecurity/grsec_ptrace.c
58581 @@ -0,0 +1,14 @@
58582 +#include <linux/kernel.h>
58583 +#include <linux/sched.h>
58584 +#include <linux/grinternal.h>
58585 +#include <linux/grsecurity.h>
58586 +
58587 +void
58588 +gr_audit_ptrace(struct task_struct *task)
58589 +{
58590 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58591 + if (grsec_enable_audit_ptrace)
58592 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
58593 +#endif
58594 + return;
58595 +}
58596 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
58597 new file mode 100644
58598 index 0000000..cf090b3
58599 --- /dev/null
58600 +++ b/grsecurity/grsec_sig.c
58601 @@ -0,0 +1,206 @@
58602 +#include <linux/kernel.h>
58603 +#include <linux/sched.h>
58604 +#include <linux/delay.h>
58605 +#include <linux/grsecurity.h>
58606 +#include <linux/grinternal.h>
58607 +#include <linux/hardirq.h>
58608 +
58609 +char *signames[] = {
58610 + [SIGSEGV] = "Segmentation fault",
58611 + [SIGILL] = "Illegal instruction",
58612 + [SIGABRT] = "Abort",
58613 + [SIGBUS] = "Invalid alignment/Bus error"
58614 +};
58615 +
58616 +void
58617 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
58618 +{
58619 +#ifdef CONFIG_GRKERNSEC_SIGNAL
58620 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
58621 + (sig == SIGABRT) || (sig == SIGBUS))) {
58622 + if (t->pid == current->pid) {
58623 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
58624 + } else {
58625 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
58626 + }
58627 + }
58628 +#endif
58629 + return;
58630 +}
58631 +
58632 +int
58633 +gr_handle_signal(const struct task_struct *p, const int sig)
58634 +{
58635 +#ifdef CONFIG_GRKERNSEC
58636 + if (current->pid > 1 && gr_check_protected_task(p)) {
58637 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
58638 + return -EPERM;
58639 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
58640 + return -EPERM;
58641 + }
58642 +#endif
58643 + return 0;
58644 +}
58645 +
58646 +#ifdef CONFIG_GRKERNSEC
58647 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
58648 +
58649 +int gr_fake_force_sig(int sig, struct task_struct *t)
58650 +{
58651 + unsigned long int flags;
58652 + int ret, blocked, ignored;
58653 + struct k_sigaction *action;
58654 +
58655 + spin_lock_irqsave(&t->sighand->siglock, flags);
58656 + action = &t->sighand->action[sig-1];
58657 + ignored = action->sa.sa_handler == SIG_IGN;
58658 + blocked = sigismember(&t->blocked, sig);
58659 + if (blocked || ignored) {
58660 + action->sa.sa_handler = SIG_DFL;
58661 + if (blocked) {
58662 + sigdelset(&t->blocked, sig);
58663 + recalc_sigpending_and_wake(t);
58664 + }
58665 + }
58666 + if (action->sa.sa_handler == SIG_DFL)
58667 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
58668 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
58669 +
58670 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
58671 +
58672 + return ret;
58673 +}
58674 +#endif
58675 +
58676 +#ifdef CONFIG_GRKERNSEC_BRUTE
58677 +#define GR_USER_BAN_TIME (15 * 60)
58678 +
58679 +static int __get_dumpable(unsigned long mm_flags)
58680 +{
58681 + int ret;
58682 +
58683 + ret = mm_flags & MMF_DUMPABLE_MASK;
58684 + return (ret >= 2) ? 2 : ret;
58685 +}
58686 +#endif
58687 +
58688 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
58689 +{
58690 +#ifdef CONFIG_GRKERNSEC_BRUTE
58691 + uid_t uid = 0;
58692 +
58693 + if (!grsec_enable_brute)
58694 + return;
58695 +
58696 + rcu_read_lock();
58697 + read_lock(&tasklist_lock);
58698 + read_lock(&grsec_exec_file_lock);
58699 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
58700 + p->real_parent->brute = 1;
58701 + else {
58702 + const struct cred *cred = __task_cred(p), *cred2;
58703 + struct task_struct *tsk, *tsk2;
58704 +
58705 + if (!__get_dumpable(mm_flags) && cred->uid) {
58706 + struct user_struct *user;
58707 +
58708 + uid = cred->uid;
58709 +
58710 + /* this is put upon execution past expiration */
58711 + user = find_user(uid);
58712 + if (user == NULL)
58713 + goto unlock;
58714 + user->banned = 1;
58715 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
58716 + if (user->ban_expires == ~0UL)
58717 + user->ban_expires--;
58718 +
58719 + do_each_thread(tsk2, tsk) {
58720 + cred2 = __task_cred(tsk);
58721 + if (tsk != p && cred2->uid == uid)
58722 + gr_fake_force_sig(SIGKILL, tsk);
58723 + } while_each_thread(tsk2, tsk);
58724 + }
58725 + }
58726 +unlock:
58727 + read_unlock(&grsec_exec_file_lock);
58728 + read_unlock(&tasklist_lock);
58729 + rcu_read_unlock();
58730 +
58731 + if (uid)
58732 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
58733 +
58734 +#endif
58735 + return;
58736 +}
58737 +
58738 +void gr_handle_brute_check(void)
58739 +{
58740 +#ifdef CONFIG_GRKERNSEC_BRUTE
58741 + if (current->brute)
58742 + msleep(30 * 1000);
58743 +#endif
58744 + return;
58745 +}
58746 +
58747 +void gr_handle_kernel_exploit(void)
58748 +{
58749 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
58750 + const struct cred *cred;
58751 + struct task_struct *tsk, *tsk2;
58752 + struct user_struct *user;
58753 + uid_t uid;
58754 +
58755 + if (in_irq() || in_serving_softirq() || in_nmi())
58756 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
58757 +
58758 + uid = current_uid();
58759 +
58760 + if (uid == 0)
58761 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
58762 + else {
58763 + /* kill all the processes of this user, hold a reference
58764 + to their creds struct, and prevent them from creating
58765 + another process until system reset
58766 + */
58767 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
58768 + /* we intentionally leak this ref */
58769 + user = get_uid(current->cred->user);
58770 + if (user) {
58771 + user->banned = 1;
58772 + user->ban_expires = ~0UL;
58773 + }
58774 +
58775 + read_lock(&tasklist_lock);
58776 + do_each_thread(tsk2, tsk) {
58777 + cred = __task_cred(tsk);
58778 + if (cred->uid == uid)
58779 + gr_fake_force_sig(SIGKILL, tsk);
58780 + } while_each_thread(tsk2, tsk);
58781 + read_unlock(&tasklist_lock);
58782 + }
58783 +#endif
58784 +}
58785 +
58786 +int __gr_process_user_ban(struct user_struct *user)
58787 +{
58788 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58789 + if (unlikely(user->banned)) {
58790 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
58791 + user->banned = 0;
58792 + user->ban_expires = 0;
58793 + free_uid(user);
58794 + } else
58795 + return -EPERM;
58796 + }
58797 +#endif
58798 + return 0;
58799 +}
58800 +
58801 +int gr_process_user_ban(void)
58802 +{
58803 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58804 + return __gr_process_user_ban(current->cred->user);
58805 +#endif
58806 + return 0;
58807 +}
58808 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
58809 new file mode 100644
58810 index 0000000..4030d57
58811 --- /dev/null
58812 +++ b/grsecurity/grsec_sock.c
58813 @@ -0,0 +1,244 @@
58814 +#include <linux/kernel.h>
58815 +#include <linux/module.h>
58816 +#include <linux/sched.h>
58817 +#include <linux/file.h>
58818 +#include <linux/net.h>
58819 +#include <linux/in.h>
58820 +#include <linux/ip.h>
58821 +#include <net/sock.h>
58822 +#include <net/inet_sock.h>
58823 +#include <linux/grsecurity.h>
58824 +#include <linux/grinternal.h>
58825 +#include <linux/gracl.h>
58826 +
58827 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
58828 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
58829 +
58830 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
58831 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
58832 +
58833 +#ifdef CONFIG_UNIX_MODULE
58834 +EXPORT_SYMBOL(gr_acl_handle_unix);
58835 +EXPORT_SYMBOL(gr_acl_handle_mknod);
58836 +EXPORT_SYMBOL(gr_handle_chroot_unix);
58837 +EXPORT_SYMBOL(gr_handle_create);
58838 +#endif
58839 +
58840 +#ifdef CONFIG_GRKERNSEC
58841 +#define gr_conn_table_size 32749
58842 +struct conn_table_entry {
58843 + struct conn_table_entry *next;
58844 + struct signal_struct *sig;
58845 +};
58846 +
58847 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
58848 +DEFINE_SPINLOCK(gr_conn_table_lock);
58849 +
58850 +extern const char * gr_socktype_to_name(unsigned char type);
58851 +extern const char * gr_proto_to_name(unsigned char proto);
58852 +extern const char * gr_sockfamily_to_name(unsigned char family);
58853 +
58854 +static __inline__ int
58855 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
58856 +{
58857 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
58858 +}
58859 +
58860 +static __inline__ int
58861 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
58862 + __u16 sport, __u16 dport)
58863 +{
58864 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
58865 + sig->gr_sport == sport && sig->gr_dport == dport))
58866 + return 1;
58867 + else
58868 + return 0;
58869 +}
58870 +
58871 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
58872 +{
58873 + struct conn_table_entry **match;
58874 + unsigned int index;
58875 +
58876 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58877 + sig->gr_sport, sig->gr_dport,
58878 + gr_conn_table_size);
58879 +
58880 + newent->sig = sig;
58881 +
58882 + match = &gr_conn_table[index];
58883 + newent->next = *match;
58884 + *match = newent;
58885 +
58886 + return;
58887 +}
58888 +
58889 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
58890 +{
58891 + struct conn_table_entry *match, *last = NULL;
58892 + unsigned int index;
58893 +
58894 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58895 + sig->gr_sport, sig->gr_dport,
58896 + gr_conn_table_size);
58897 +
58898 + match = gr_conn_table[index];
58899 + while (match && !conn_match(match->sig,
58900 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
58901 + sig->gr_dport)) {
58902 + last = match;
58903 + match = match->next;
58904 + }
58905 +
58906 + if (match) {
58907 + if (last)
58908 + last->next = match->next;
58909 + else
58910 + gr_conn_table[index] = NULL;
58911 + kfree(match);
58912 + }
58913 +
58914 + return;
58915 +}
58916 +
58917 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
58918 + __u16 sport, __u16 dport)
58919 +{
58920 + struct conn_table_entry *match;
58921 + unsigned int index;
58922 +
58923 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
58924 +
58925 + match = gr_conn_table[index];
58926 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
58927 + match = match->next;
58928 +
58929 + if (match)
58930 + return match->sig;
58931 + else
58932 + return NULL;
58933 +}
58934 +
58935 +#endif
58936 +
58937 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
58938 +{
58939 +#ifdef CONFIG_GRKERNSEC
58940 + struct signal_struct *sig = task->signal;
58941 + struct conn_table_entry *newent;
58942 +
58943 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
58944 + if (newent == NULL)
58945 + return;
58946 + /* no bh lock needed since we are called with bh disabled */
58947 + spin_lock(&gr_conn_table_lock);
58948 + gr_del_task_from_ip_table_nolock(sig);
58949 + sig->gr_saddr = inet->inet_rcv_saddr;
58950 + sig->gr_daddr = inet->inet_daddr;
58951 + sig->gr_sport = inet->inet_sport;
58952 + sig->gr_dport = inet->inet_dport;
58953 + gr_add_to_task_ip_table_nolock(sig, newent);
58954 + spin_unlock(&gr_conn_table_lock);
58955 +#endif
58956 + return;
58957 +}
58958 +
58959 +void gr_del_task_from_ip_table(struct task_struct *task)
58960 +{
58961 +#ifdef CONFIG_GRKERNSEC
58962 + spin_lock_bh(&gr_conn_table_lock);
58963 + gr_del_task_from_ip_table_nolock(task->signal);
58964 + spin_unlock_bh(&gr_conn_table_lock);
58965 +#endif
58966 + return;
58967 +}
58968 +
58969 +void
58970 +gr_attach_curr_ip(const struct sock *sk)
58971 +{
58972 +#ifdef CONFIG_GRKERNSEC
58973 + struct signal_struct *p, *set;
58974 + const struct inet_sock *inet = inet_sk(sk);
58975 +
58976 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
58977 + return;
58978 +
58979 + set = current->signal;
58980 +
58981 + spin_lock_bh(&gr_conn_table_lock);
58982 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
58983 + inet->inet_dport, inet->inet_sport);
58984 + if (unlikely(p != NULL)) {
58985 + set->curr_ip = p->curr_ip;
58986 + set->used_accept = 1;
58987 + gr_del_task_from_ip_table_nolock(p);
58988 + spin_unlock_bh(&gr_conn_table_lock);
58989 + return;
58990 + }
58991 + spin_unlock_bh(&gr_conn_table_lock);
58992 +
58993 + set->curr_ip = inet->inet_daddr;
58994 + set->used_accept = 1;
58995 +#endif
58996 + return;
58997 +}
58998 +
58999 +int
59000 +gr_handle_sock_all(const int family, const int type, const int protocol)
59001 +{
59002 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59003 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
59004 + (family != AF_UNIX)) {
59005 + if (family == AF_INET)
59006 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
59007 + else
59008 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
59009 + return -EACCES;
59010 + }
59011 +#endif
59012 + return 0;
59013 +}
59014 +
59015 +int
59016 +gr_handle_sock_server(const struct sockaddr *sck)
59017 +{
59018 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59019 + if (grsec_enable_socket_server &&
59020 + in_group_p(grsec_socket_server_gid) &&
59021 + sck && (sck->sa_family != AF_UNIX) &&
59022 + (sck->sa_family != AF_LOCAL)) {
59023 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
59024 + return -EACCES;
59025 + }
59026 +#endif
59027 + return 0;
59028 +}
59029 +
59030 +int
59031 +gr_handle_sock_server_other(const struct sock *sck)
59032 +{
59033 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59034 + if (grsec_enable_socket_server &&
59035 + in_group_p(grsec_socket_server_gid) &&
59036 + sck && (sck->sk_family != AF_UNIX) &&
59037 + (sck->sk_family != AF_LOCAL)) {
59038 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
59039 + return -EACCES;
59040 + }
59041 +#endif
59042 + return 0;
59043 +}
59044 +
59045 +int
59046 +gr_handle_sock_client(const struct sockaddr *sck)
59047 +{
59048 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59049 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
59050 + sck && (sck->sa_family != AF_UNIX) &&
59051 + (sck->sa_family != AF_LOCAL)) {
59052 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
59053 + return -EACCES;
59054 + }
59055 +#endif
59056 + return 0;
59057 +}
59058 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
59059 new file mode 100644
59060 index 0000000..bceef2f
59061 --- /dev/null
59062 +++ b/grsecurity/grsec_sysctl.c
59063 @@ -0,0 +1,442 @@
59064 +#include <linux/kernel.h>
59065 +#include <linux/sched.h>
59066 +#include <linux/sysctl.h>
59067 +#include <linux/grsecurity.h>
59068 +#include <linux/grinternal.h>
59069 +
59070 +int
59071 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
59072 +{
59073 +#ifdef CONFIG_GRKERNSEC_SYSCTL
59074 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
59075 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
59076 + return -EACCES;
59077 + }
59078 +#endif
59079 + return 0;
59080 +}
59081 +
59082 +#ifdef CONFIG_GRKERNSEC_ROFS
59083 +static int __maybe_unused one = 1;
59084 +#endif
59085 +
59086 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
59087 +struct ctl_table grsecurity_table[] = {
59088 +#ifdef CONFIG_GRKERNSEC_SYSCTL
59089 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
59090 +#ifdef CONFIG_GRKERNSEC_IO
59091 + {
59092 + .procname = "disable_priv_io",
59093 + .data = &grsec_disable_privio,
59094 + .maxlen = sizeof(int),
59095 + .mode = 0600,
59096 + .proc_handler = &proc_dointvec,
59097 + },
59098 +#endif
59099 +#endif
59100 +#ifdef CONFIG_GRKERNSEC_LINK
59101 + {
59102 + .procname = "linking_restrictions",
59103 + .data = &grsec_enable_link,
59104 + .maxlen = sizeof(int),
59105 + .mode = 0600,
59106 + .proc_handler = &proc_dointvec,
59107 + },
59108 +#endif
59109 +#ifdef CONFIG_GRKERNSEC_BRUTE
59110 + {
59111 + .procname = "deter_bruteforce",
59112 + .data = &grsec_enable_brute,
59113 + .maxlen = sizeof(int),
59114 + .mode = 0600,
59115 + .proc_handler = &proc_dointvec,
59116 + },
59117 +#endif
59118 +#ifdef CONFIG_GRKERNSEC_FIFO
59119 + {
59120 + .procname = "fifo_restrictions",
59121 + .data = &grsec_enable_fifo,
59122 + .maxlen = sizeof(int),
59123 + .mode = 0600,
59124 + .proc_handler = &proc_dointvec,
59125 + },
59126 +#endif
59127 +#ifdef CONFIG_GRKERNSEC_SETXID
59128 + {
59129 + .procname = "consistent_setxid",
59130 + .data = &grsec_enable_setxid,
59131 + .maxlen = sizeof(int),
59132 + .mode = 0600,
59133 + .proc_handler = &proc_dointvec,
59134 + },
59135 +#endif
59136 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
59137 + {
59138 + .procname = "ip_blackhole",
59139 + .data = &grsec_enable_blackhole,
59140 + .maxlen = sizeof(int),
59141 + .mode = 0600,
59142 + .proc_handler = &proc_dointvec,
59143 + },
59144 + {
59145 + .procname = "lastack_retries",
59146 + .data = &grsec_lastack_retries,
59147 + .maxlen = sizeof(int),
59148 + .mode = 0600,
59149 + .proc_handler = &proc_dointvec,
59150 + },
59151 +#endif
59152 +#ifdef CONFIG_GRKERNSEC_EXECLOG
59153 + {
59154 + .procname = "exec_logging",
59155 + .data = &grsec_enable_execlog,
59156 + .maxlen = sizeof(int),
59157 + .mode = 0600,
59158 + .proc_handler = &proc_dointvec,
59159 + },
59160 +#endif
59161 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59162 + {
59163 + .procname = "rwxmap_logging",
59164 + .data = &grsec_enable_log_rwxmaps,
59165 + .maxlen = sizeof(int),
59166 + .mode = 0600,
59167 + .proc_handler = &proc_dointvec,
59168 + },
59169 +#endif
59170 +#ifdef CONFIG_GRKERNSEC_SIGNAL
59171 + {
59172 + .procname = "signal_logging",
59173 + .data = &grsec_enable_signal,
59174 + .maxlen = sizeof(int),
59175 + .mode = 0600,
59176 + .proc_handler = &proc_dointvec,
59177 + },
59178 +#endif
59179 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
59180 + {
59181 + .procname = "forkfail_logging",
59182 + .data = &grsec_enable_forkfail,
59183 + .maxlen = sizeof(int),
59184 + .mode = 0600,
59185 + .proc_handler = &proc_dointvec,
59186 + },
59187 +#endif
59188 +#ifdef CONFIG_GRKERNSEC_TIME
59189 + {
59190 + .procname = "timechange_logging",
59191 + .data = &grsec_enable_time,
59192 + .maxlen = sizeof(int),
59193 + .mode = 0600,
59194 + .proc_handler = &proc_dointvec,
59195 + },
59196 +#endif
59197 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
59198 + {
59199 + .procname = "chroot_deny_shmat",
59200 + .data = &grsec_enable_chroot_shmat,
59201 + .maxlen = sizeof(int),
59202 + .mode = 0600,
59203 + .proc_handler = &proc_dointvec,
59204 + },
59205 +#endif
59206 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
59207 + {
59208 + .procname = "chroot_deny_unix",
59209 + .data = &grsec_enable_chroot_unix,
59210 + .maxlen = sizeof(int),
59211 + .mode = 0600,
59212 + .proc_handler = &proc_dointvec,
59213 + },
59214 +#endif
59215 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
59216 + {
59217 + .procname = "chroot_deny_mount",
59218 + .data = &grsec_enable_chroot_mount,
59219 + .maxlen = sizeof(int),
59220 + .mode = 0600,
59221 + .proc_handler = &proc_dointvec,
59222 + },
59223 +#endif
59224 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
59225 + {
59226 + .procname = "chroot_deny_fchdir",
59227 + .data = &grsec_enable_chroot_fchdir,
59228 + .maxlen = sizeof(int),
59229 + .mode = 0600,
59230 + .proc_handler = &proc_dointvec,
59231 + },
59232 +#endif
59233 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
59234 + {
59235 + .procname = "chroot_deny_chroot",
59236 + .data = &grsec_enable_chroot_double,
59237 + .maxlen = sizeof(int),
59238 + .mode = 0600,
59239 + .proc_handler = &proc_dointvec,
59240 + },
59241 +#endif
59242 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
59243 + {
59244 + .procname = "chroot_deny_pivot",
59245 + .data = &grsec_enable_chroot_pivot,
59246 + .maxlen = sizeof(int),
59247 + .mode = 0600,
59248 + .proc_handler = &proc_dointvec,
59249 + },
59250 +#endif
59251 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
59252 + {
59253 + .procname = "chroot_enforce_chdir",
59254 + .data = &grsec_enable_chroot_chdir,
59255 + .maxlen = sizeof(int),
59256 + .mode = 0600,
59257 + .proc_handler = &proc_dointvec,
59258 + },
59259 +#endif
59260 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
59261 + {
59262 + .procname = "chroot_deny_chmod",
59263 + .data = &grsec_enable_chroot_chmod,
59264 + .maxlen = sizeof(int),
59265 + .mode = 0600,
59266 + .proc_handler = &proc_dointvec,
59267 + },
59268 +#endif
59269 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
59270 + {
59271 + .procname = "chroot_deny_mknod",
59272 + .data = &grsec_enable_chroot_mknod,
59273 + .maxlen = sizeof(int),
59274 + .mode = 0600,
59275 + .proc_handler = &proc_dointvec,
59276 + },
59277 +#endif
59278 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
59279 + {
59280 + .procname = "chroot_restrict_nice",
59281 + .data = &grsec_enable_chroot_nice,
59282 + .maxlen = sizeof(int),
59283 + .mode = 0600,
59284 + .proc_handler = &proc_dointvec,
59285 + },
59286 +#endif
59287 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
59288 + {
59289 + .procname = "chroot_execlog",
59290 + .data = &grsec_enable_chroot_execlog,
59291 + .maxlen = sizeof(int),
59292 + .mode = 0600,
59293 + .proc_handler = &proc_dointvec,
59294 + },
59295 +#endif
59296 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59297 + {
59298 + .procname = "chroot_caps",
59299 + .data = &grsec_enable_chroot_caps,
59300 + .maxlen = sizeof(int),
59301 + .mode = 0600,
59302 + .proc_handler = &proc_dointvec,
59303 + },
59304 +#endif
59305 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
59306 + {
59307 + .procname = "chroot_deny_sysctl",
59308 + .data = &grsec_enable_chroot_sysctl,
59309 + .maxlen = sizeof(int),
59310 + .mode = 0600,
59311 + .proc_handler = &proc_dointvec,
59312 + },
59313 +#endif
59314 +#ifdef CONFIG_GRKERNSEC_TPE
59315 + {
59316 + .procname = "tpe",
59317 + .data = &grsec_enable_tpe,
59318 + .maxlen = sizeof(int),
59319 + .mode = 0600,
59320 + .proc_handler = &proc_dointvec,
59321 + },
59322 + {
59323 + .procname = "tpe_gid",
59324 + .data = &grsec_tpe_gid,
59325 + .maxlen = sizeof(int),
59326 + .mode = 0600,
59327 + .proc_handler = &proc_dointvec,
59328 + },
59329 +#endif
59330 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59331 + {
59332 + .procname = "tpe_invert",
59333 + .data = &grsec_enable_tpe_invert,
59334 + .maxlen = sizeof(int),
59335 + .mode = 0600,
59336 + .proc_handler = &proc_dointvec,
59337 + },
59338 +#endif
59339 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
59340 + {
59341 + .procname = "tpe_restrict_all",
59342 + .data = &grsec_enable_tpe_all,
59343 + .maxlen = sizeof(int),
59344 + .mode = 0600,
59345 + .proc_handler = &proc_dointvec,
59346 + },
59347 +#endif
59348 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59349 + {
59350 + .procname = "socket_all",
59351 + .data = &grsec_enable_socket_all,
59352 + .maxlen = sizeof(int),
59353 + .mode = 0600,
59354 + .proc_handler = &proc_dointvec,
59355 + },
59356 + {
59357 + .procname = "socket_all_gid",
59358 + .data = &grsec_socket_all_gid,
59359 + .maxlen = sizeof(int),
59360 + .mode = 0600,
59361 + .proc_handler = &proc_dointvec,
59362 + },
59363 +#endif
59364 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59365 + {
59366 + .procname = "socket_client",
59367 + .data = &grsec_enable_socket_client,
59368 + .maxlen = sizeof(int),
59369 + .mode = 0600,
59370 + .proc_handler = &proc_dointvec,
59371 + },
59372 + {
59373 + .procname = "socket_client_gid",
59374 + .data = &grsec_socket_client_gid,
59375 + .maxlen = sizeof(int),
59376 + .mode = 0600,
59377 + .proc_handler = &proc_dointvec,
59378 + },
59379 +#endif
59380 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59381 + {
59382 + .procname = "socket_server",
59383 + .data = &grsec_enable_socket_server,
59384 + .maxlen = sizeof(int),
59385 + .mode = 0600,
59386 + .proc_handler = &proc_dointvec,
59387 + },
59388 + {
59389 + .procname = "socket_server_gid",
59390 + .data = &grsec_socket_server_gid,
59391 + .maxlen = sizeof(int),
59392 + .mode = 0600,
59393 + .proc_handler = &proc_dointvec,
59394 + },
59395 +#endif
59396 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
59397 + {
59398 + .procname = "audit_group",
59399 + .data = &grsec_enable_group,
59400 + .maxlen = sizeof(int),
59401 + .mode = 0600,
59402 + .proc_handler = &proc_dointvec,
59403 + },
59404 + {
59405 + .procname = "audit_gid",
59406 + .data = &grsec_audit_gid,
59407 + .maxlen = sizeof(int),
59408 + .mode = 0600,
59409 + .proc_handler = &proc_dointvec,
59410 + },
59411 +#endif
59412 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
59413 + {
59414 + .procname = "audit_chdir",
59415 + .data = &grsec_enable_chdir,
59416 + .maxlen = sizeof(int),
59417 + .mode = 0600,
59418 + .proc_handler = &proc_dointvec,
59419 + },
59420 +#endif
59421 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59422 + {
59423 + .procname = "audit_mount",
59424 + .data = &grsec_enable_mount,
59425 + .maxlen = sizeof(int),
59426 + .mode = 0600,
59427 + .proc_handler = &proc_dointvec,
59428 + },
59429 +#endif
59430 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
59431 + {
59432 + .procname = "audit_textrel",
59433 + .data = &grsec_enable_audit_textrel,
59434 + .maxlen = sizeof(int),
59435 + .mode = 0600,
59436 + .proc_handler = &proc_dointvec,
59437 + },
59438 +#endif
59439 +#ifdef CONFIG_GRKERNSEC_DMESG
59440 + {
59441 + .procname = "dmesg",
59442 + .data = &grsec_enable_dmesg,
59443 + .maxlen = sizeof(int),
59444 + .mode = 0600,
59445 + .proc_handler = &proc_dointvec,
59446 + },
59447 +#endif
59448 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59449 + {
59450 + .procname = "chroot_findtask",
59451 + .data = &grsec_enable_chroot_findtask,
59452 + .maxlen = sizeof(int),
59453 + .mode = 0600,
59454 + .proc_handler = &proc_dointvec,
59455 + },
59456 +#endif
59457 +#ifdef CONFIG_GRKERNSEC_RESLOG
59458 + {
59459 + .procname = "resource_logging",
59460 + .data = &grsec_resource_logging,
59461 + .maxlen = sizeof(int),
59462 + .mode = 0600,
59463 + .proc_handler = &proc_dointvec,
59464 + },
59465 +#endif
59466 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
59467 + {
59468 + .procname = "audit_ptrace",
59469 + .data = &grsec_enable_audit_ptrace,
59470 + .maxlen = sizeof(int),
59471 + .mode = 0600,
59472 + .proc_handler = &proc_dointvec,
59473 + },
59474 +#endif
59475 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59476 + {
59477 + .procname = "harden_ptrace",
59478 + .data = &grsec_enable_harden_ptrace,
59479 + .maxlen = sizeof(int),
59480 + .mode = 0600,
59481 + .proc_handler = &proc_dointvec,
59482 + },
59483 +#endif
59484 + {
59485 + .procname = "grsec_lock",
59486 + .data = &grsec_lock,
59487 + .maxlen = sizeof(int),
59488 + .mode = 0600,
59489 + .proc_handler = &proc_dointvec,
59490 + },
59491 +#endif
59492 +#ifdef CONFIG_GRKERNSEC_ROFS
59493 + {
59494 + .procname = "romount_protect",
59495 + .data = &grsec_enable_rofs,
59496 + .maxlen = sizeof(int),
59497 + .mode = 0600,
59498 + .proc_handler = &proc_dointvec_minmax,
59499 + .extra1 = &one,
59500 + .extra2 = &one,
59501 + },
59502 +#endif
59503 + { }
59504 +};
59505 +#endif
59506 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
59507 new file mode 100644
59508 index 0000000..0dc13c3
59509 --- /dev/null
59510 +++ b/grsecurity/grsec_time.c
59511 @@ -0,0 +1,16 @@
59512 +#include <linux/kernel.h>
59513 +#include <linux/sched.h>
59514 +#include <linux/grinternal.h>
59515 +#include <linux/module.h>
59516 +
59517 +void
59518 +gr_log_timechange(void)
59519 +{
59520 +#ifdef CONFIG_GRKERNSEC_TIME
59521 + if (grsec_enable_time)
59522 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
59523 +#endif
59524 + return;
59525 +}
59526 +
59527 +EXPORT_SYMBOL(gr_log_timechange);
59528 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
59529 new file mode 100644
59530 index 0000000..4a78774
59531 --- /dev/null
59532 +++ b/grsecurity/grsec_tpe.c
59533 @@ -0,0 +1,39 @@
59534 +#include <linux/kernel.h>
59535 +#include <linux/sched.h>
59536 +#include <linux/file.h>
59537 +#include <linux/fs.h>
59538 +#include <linux/grinternal.h>
59539 +
59540 +extern int gr_acl_tpe_check(void);
59541 +
59542 +int
59543 +gr_tpe_allow(const struct file *file)
59544 +{
59545 +#ifdef CONFIG_GRKERNSEC
59546 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
59547 + const struct cred *cred = current_cred();
59548 +
59549 + if (cred->uid && ((grsec_enable_tpe &&
59550 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59551 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
59552 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
59553 +#else
59554 + in_group_p(grsec_tpe_gid)
59555 +#endif
59556 + ) || gr_acl_tpe_check()) &&
59557 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
59558 + (inode->i_mode & S_IWOTH))))) {
59559 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
59560 + return 0;
59561 + }
59562 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
59563 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
59564 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
59565 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
59566 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
59567 + return 0;
59568 + }
59569 +#endif
59570 +#endif
59571 + return 1;
59572 +}
59573 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
59574 new file mode 100644
59575 index 0000000..9f7b1ac
59576 --- /dev/null
59577 +++ b/grsecurity/grsum.c
59578 @@ -0,0 +1,61 @@
59579 +#include <linux/err.h>
59580 +#include <linux/kernel.h>
59581 +#include <linux/sched.h>
59582 +#include <linux/mm.h>
59583 +#include <linux/scatterlist.h>
59584 +#include <linux/crypto.h>
59585 +#include <linux/gracl.h>
59586 +
59587 +
59588 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
59589 +#error "crypto and sha256 must be built into the kernel"
59590 +#endif
59591 +
59592 +int
59593 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
59594 +{
59595 + char *p;
59596 + struct crypto_hash *tfm;
59597 + struct hash_desc desc;
59598 + struct scatterlist sg;
59599 + unsigned char temp_sum[GR_SHA_LEN];
59600 + volatile int retval = 0;
59601 + volatile int dummy = 0;
59602 + unsigned int i;
59603 +
59604 + sg_init_table(&sg, 1);
59605 +
59606 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
59607 + if (IS_ERR(tfm)) {
59608 + /* should never happen, since sha256 should be built in */
59609 + return 1;
59610 + }
59611 +
59612 + desc.tfm = tfm;
59613 + desc.flags = 0;
59614 +
59615 + crypto_hash_init(&desc);
59616 +
59617 + p = salt;
59618 + sg_set_buf(&sg, p, GR_SALT_LEN);
59619 + crypto_hash_update(&desc, &sg, sg.length);
59620 +
59621 + p = entry->pw;
59622 + sg_set_buf(&sg, p, strlen(p));
59623 +
59624 + crypto_hash_update(&desc, &sg, sg.length);
59625 +
59626 + crypto_hash_final(&desc, temp_sum);
59627 +
59628 + memset(entry->pw, 0, GR_PW_LEN);
59629 +
59630 + for (i = 0; i < GR_SHA_LEN; i++)
59631 + if (sum[i] != temp_sum[i])
59632 + retval = 1;
59633 + else
59634 + dummy = 1; // waste a cycle
59635 +
59636 + crypto_free_hash(tfm);
59637 +
59638 + return retval;
59639 +}
59640 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
59641 index 6cd5b64..f620d2d 100644
59642 --- a/include/acpi/acpi_bus.h
59643 +++ b/include/acpi/acpi_bus.h
59644 @@ -107,7 +107,7 @@ struct acpi_device_ops {
59645 acpi_op_bind bind;
59646 acpi_op_unbind unbind;
59647 acpi_op_notify notify;
59648 -};
59649 +} __no_const;
59650
59651 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
59652
59653 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
59654 index b7babf0..71e4e74 100644
59655 --- a/include/asm-generic/atomic-long.h
59656 +++ b/include/asm-generic/atomic-long.h
59657 @@ -22,6 +22,12 @@
59658
59659 typedef atomic64_t atomic_long_t;
59660
59661 +#ifdef CONFIG_PAX_REFCOUNT
59662 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
59663 +#else
59664 +typedef atomic64_t atomic_long_unchecked_t;
59665 +#endif
59666 +
59667 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
59668
59669 static inline long atomic_long_read(atomic_long_t *l)
59670 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
59671 return (long)atomic64_read(v);
59672 }
59673
59674 +#ifdef CONFIG_PAX_REFCOUNT
59675 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59676 +{
59677 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59678 +
59679 + return (long)atomic64_read_unchecked(v);
59680 +}
59681 +#endif
59682 +
59683 static inline void atomic_long_set(atomic_long_t *l, long i)
59684 {
59685 atomic64_t *v = (atomic64_t *)l;
59686 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59687 atomic64_set(v, i);
59688 }
59689
59690 +#ifdef CONFIG_PAX_REFCOUNT
59691 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59692 +{
59693 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59694 +
59695 + atomic64_set_unchecked(v, i);
59696 +}
59697 +#endif
59698 +
59699 static inline void atomic_long_inc(atomic_long_t *l)
59700 {
59701 atomic64_t *v = (atomic64_t *)l;
59702 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59703 atomic64_inc(v);
59704 }
59705
59706 +#ifdef CONFIG_PAX_REFCOUNT
59707 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59708 +{
59709 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59710 +
59711 + atomic64_inc_unchecked(v);
59712 +}
59713 +#endif
59714 +
59715 static inline void atomic_long_dec(atomic_long_t *l)
59716 {
59717 atomic64_t *v = (atomic64_t *)l;
59718 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59719 atomic64_dec(v);
59720 }
59721
59722 +#ifdef CONFIG_PAX_REFCOUNT
59723 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59724 +{
59725 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59726 +
59727 + atomic64_dec_unchecked(v);
59728 +}
59729 +#endif
59730 +
59731 static inline void atomic_long_add(long i, atomic_long_t *l)
59732 {
59733 atomic64_t *v = (atomic64_t *)l;
59734 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59735 atomic64_add(i, v);
59736 }
59737
59738 +#ifdef CONFIG_PAX_REFCOUNT
59739 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59740 +{
59741 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59742 +
59743 + atomic64_add_unchecked(i, v);
59744 +}
59745 +#endif
59746 +
59747 static inline void atomic_long_sub(long i, atomic_long_t *l)
59748 {
59749 atomic64_t *v = (atomic64_t *)l;
59750 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59751 atomic64_sub(i, v);
59752 }
59753
59754 +#ifdef CONFIG_PAX_REFCOUNT
59755 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59756 +{
59757 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59758 +
59759 + atomic64_sub_unchecked(i, v);
59760 +}
59761 +#endif
59762 +
59763 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59764 {
59765 atomic64_t *v = (atomic64_t *)l;
59766 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59767 return (long)atomic64_inc_return(v);
59768 }
59769
59770 +#ifdef CONFIG_PAX_REFCOUNT
59771 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59772 +{
59773 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59774 +
59775 + return (long)atomic64_inc_return_unchecked(v);
59776 +}
59777 +#endif
59778 +
59779 static inline long atomic_long_dec_return(atomic_long_t *l)
59780 {
59781 atomic64_t *v = (atomic64_t *)l;
59782 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59783
59784 typedef atomic_t atomic_long_t;
59785
59786 +#ifdef CONFIG_PAX_REFCOUNT
59787 +typedef atomic_unchecked_t atomic_long_unchecked_t;
59788 +#else
59789 +typedef atomic_t atomic_long_unchecked_t;
59790 +#endif
59791 +
59792 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
59793 static inline long atomic_long_read(atomic_long_t *l)
59794 {
59795 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
59796 return (long)atomic_read(v);
59797 }
59798
59799 +#ifdef CONFIG_PAX_REFCOUNT
59800 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59801 +{
59802 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59803 +
59804 + return (long)atomic_read_unchecked(v);
59805 +}
59806 +#endif
59807 +
59808 static inline void atomic_long_set(atomic_long_t *l, long i)
59809 {
59810 atomic_t *v = (atomic_t *)l;
59811 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59812 atomic_set(v, i);
59813 }
59814
59815 +#ifdef CONFIG_PAX_REFCOUNT
59816 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59817 +{
59818 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59819 +
59820 + atomic_set_unchecked(v, i);
59821 +}
59822 +#endif
59823 +
59824 static inline void atomic_long_inc(atomic_long_t *l)
59825 {
59826 atomic_t *v = (atomic_t *)l;
59827 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59828 atomic_inc(v);
59829 }
59830
59831 +#ifdef CONFIG_PAX_REFCOUNT
59832 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59833 +{
59834 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59835 +
59836 + atomic_inc_unchecked(v);
59837 +}
59838 +#endif
59839 +
59840 static inline void atomic_long_dec(atomic_long_t *l)
59841 {
59842 atomic_t *v = (atomic_t *)l;
59843 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59844 atomic_dec(v);
59845 }
59846
59847 +#ifdef CONFIG_PAX_REFCOUNT
59848 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59849 +{
59850 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59851 +
59852 + atomic_dec_unchecked(v);
59853 +}
59854 +#endif
59855 +
59856 static inline void atomic_long_add(long i, atomic_long_t *l)
59857 {
59858 atomic_t *v = (atomic_t *)l;
59859 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59860 atomic_add(i, v);
59861 }
59862
59863 +#ifdef CONFIG_PAX_REFCOUNT
59864 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59865 +{
59866 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59867 +
59868 + atomic_add_unchecked(i, v);
59869 +}
59870 +#endif
59871 +
59872 static inline void atomic_long_sub(long i, atomic_long_t *l)
59873 {
59874 atomic_t *v = (atomic_t *)l;
59875 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59876 atomic_sub(i, v);
59877 }
59878
59879 +#ifdef CONFIG_PAX_REFCOUNT
59880 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59881 +{
59882 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59883 +
59884 + atomic_sub_unchecked(i, v);
59885 +}
59886 +#endif
59887 +
59888 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59889 {
59890 atomic_t *v = (atomic_t *)l;
59891 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59892 return (long)atomic_inc_return(v);
59893 }
59894
59895 +#ifdef CONFIG_PAX_REFCOUNT
59896 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59897 +{
59898 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59899 +
59900 + return (long)atomic_inc_return_unchecked(v);
59901 +}
59902 +#endif
59903 +
59904 static inline long atomic_long_dec_return(atomic_long_t *l)
59905 {
59906 atomic_t *v = (atomic_t *)l;
59907 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59908
59909 #endif /* BITS_PER_LONG == 64 */
59910
59911 +#ifdef CONFIG_PAX_REFCOUNT
59912 +static inline void pax_refcount_needs_these_functions(void)
59913 +{
59914 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
59915 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
59916 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
59917 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
59918 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
59919 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
59920 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
59921 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
59922 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
59923 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
59924 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
59925 +
59926 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
59927 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
59928 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
59929 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
59930 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
59931 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
59932 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
59933 +}
59934 +#else
59935 +#define atomic_read_unchecked(v) atomic_read(v)
59936 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
59937 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
59938 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
59939 +#define atomic_inc_unchecked(v) atomic_inc(v)
59940 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
59941 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
59942 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
59943 +#define atomic_dec_unchecked(v) atomic_dec(v)
59944 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
59945 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
59946 +
59947 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
59948 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
59949 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
59950 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
59951 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
59952 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
59953 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
59954 +#endif
59955 +
59956 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
59957 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
59958 index b18ce4f..2ee2843 100644
59959 --- a/include/asm-generic/atomic64.h
59960 +++ b/include/asm-generic/atomic64.h
59961 @@ -16,6 +16,8 @@ typedef struct {
59962 long long counter;
59963 } atomic64_t;
59964
59965 +typedef atomic64_t atomic64_unchecked_t;
59966 +
59967 #define ATOMIC64_INIT(i) { (i) }
59968
59969 extern long long atomic64_read(const atomic64_t *v);
59970 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
59971 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
59972 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
59973
59974 +#define atomic64_read_unchecked(v) atomic64_read(v)
59975 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
59976 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
59977 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
59978 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
59979 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
59980 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
59981 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
59982 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
59983 +
59984 #endif /* _ASM_GENERIC_ATOMIC64_H */
59985 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
59986 index 1bfcfe5..e04c5c9 100644
59987 --- a/include/asm-generic/cache.h
59988 +++ b/include/asm-generic/cache.h
59989 @@ -6,7 +6,7 @@
59990 * cache lines need to provide their own cache.h.
59991 */
59992
59993 -#define L1_CACHE_SHIFT 5
59994 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
59995 +#define L1_CACHE_SHIFT 5UL
59996 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
59997
59998 #endif /* __ASM_GENERIC_CACHE_H */
59999 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
60000 index 1ca3efc..e3dc852 100644
60001 --- a/include/asm-generic/int-l64.h
60002 +++ b/include/asm-generic/int-l64.h
60003 @@ -46,6 +46,8 @@ typedef unsigned int u32;
60004 typedef signed long s64;
60005 typedef unsigned long u64;
60006
60007 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
60008 +
60009 #define S8_C(x) x
60010 #define U8_C(x) x ## U
60011 #define S16_C(x) x
60012 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
60013 index f394147..b6152b9 100644
60014 --- a/include/asm-generic/int-ll64.h
60015 +++ b/include/asm-generic/int-ll64.h
60016 @@ -51,6 +51,8 @@ typedef unsigned int u32;
60017 typedef signed long long s64;
60018 typedef unsigned long long u64;
60019
60020 +typedef unsigned long long intoverflow_t;
60021 +
60022 #define S8_C(x) x
60023 #define U8_C(x) x ## U
60024 #define S16_C(x) x
60025 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
60026 index 0232ccb..13d9165 100644
60027 --- a/include/asm-generic/kmap_types.h
60028 +++ b/include/asm-generic/kmap_types.h
60029 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
60030 KMAP_D(17) KM_NMI,
60031 KMAP_D(18) KM_NMI_PTE,
60032 KMAP_D(19) KM_KDB,
60033 +KMAP_D(20) KM_CLEARPAGE,
60034 /*
60035 * Remember to update debug_kmap_atomic() when adding new kmap types!
60036 */
60037 -KMAP_D(20) KM_TYPE_NR
60038 +KMAP_D(21) KM_TYPE_NR
60039 };
60040
60041 #undef KMAP_D
60042 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
60043 index 725612b..9cc513a 100644
60044 --- a/include/asm-generic/pgtable-nopmd.h
60045 +++ b/include/asm-generic/pgtable-nopmd.h
60046 @@ -1,14 +1,19 @@
60047 #ifndef _PGTABLE_NOPMD_H
60048 #define _PGTABLE_NOPMD_H
60049
60050 -#ifndef __ASSEMBLY__
60051 -
60052 #include <asm-generic/pgtable-nopud.h>
60053
60054 -struct mm_struct;
60055 -
60056 #define __PAGETABLE_PMD_FOLDED
60057
60058 +#define PMD_SHIFT PUD_SHIFT
60059 +#define PTRS_PER_PMD 1
60060 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
60061 +#define PMD_MASK (~(PMD_SIZE-1))
60062 +
60063 +#ifndef __ASSEMBLY__
60064 +
60065 +struct mm_struct;
60066 +
60067 /*
60068 * Having the pmd type consist of a pud gets the size right, and allows
60069 * us to conceptually access the pud entry that this pmd is folded into
60070 @@ -16,11 +21,6 @@ struct mm_struct;
60071 */
60072 typedef struct { pud_t pud; } pmd_t;
60073
60074 -#define PMD_SHIFT PUD_SHIFT
60075 -#define PTRS_PER_PMD 1
60076 -#define PMD_SIZE (1UL << PMD_SHIFT)
60077 -#define PMD_MASK (~(PMD_SIZE-1))
60078 -
60079 /*
60080 * The "pud_xxx()" functions here are trivial for a folded two-level
60081 * setup: the pmd is never bad, and a pmd always exists (as it's folded
60082 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
60083 index 810431d..ccc3638 100644
60084 --- a/include/asm-generic/pgtable-nopud.h
60085 +++ b/include/asm-generic/pgtable-nopud.h
60086 @@ -1,10 +1,15 @@
60087 #ifndef _PGTABLE_NOPUD_H
60088 #define _PGTABLE_NOPUD_H
60089
60090 -#ifndef __ASSEMBLY__
60091 -
60092 #define __PAGETABLE_PUD_FOLDED
60093
60094 +#define PUD_SHIFT PGDIR_SHIFT
60095 +#define PTRS_PER_PUD 1
60096 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
60097 +#define PUD_MASK (~(PUD_SIZE-1))
60098 +
60099 +#ifndef __ASSEMBLY__
60100 +
60101 /*
60102 * Having the pud type consist of a pgd gets the size right, and allows
60103 * us to conceptually access the pgd entry that this pud is folded into
60104 @@ -12,11 +17,6 @@
60105 */
60106 typedef struct { pgd_t pgd; } pud_t;
60107
60108 -#define PUD_SHIFT PGDIR_SHIFT
60109 -#define PTRS_PER_PUD 1
60110 -#define PUD_SIZE (1UL << PUD_SHIFT)
60111 -#define PUD_MASK (~(PUD_SIZE-1))
60112 -
60113 /*
60114 * The "pgd_xxx()" functions here are trivial for a folded two-level
60115 * setup: the pud is never bad, and a pud always exists (as it's folded
60116 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
60117 index 76bff2b..c7a14e2 100644
60118 --- a/include/asm-generic/pgtable.h
60119 +++ b/include/asm-generic/pgtable.h
60120 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
60121 #endif /* __HAVE_ARCH_PMD_WRITE */
60122 #endif
60123
60124 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
60125 +static inline unsigned long pax_open_kernel(void) { return 0; }
60126 +#endif
60127 +
60128 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
60129 +static inline unsigned long pax_close_kernel(void) { return 0; }
60130 +#endif
60131 +
60132 #endif /* !__ASSEMBLY__ */
60133
60134 #endif /* _ASM_GENERIC_PGTABLE_H */
60135 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
60136 index db22d13..1f2e3e1 100644
60137 --- a/include/asm-generic/vmlinux.lds.h
60138 +++ b/include/asm-generic/vmlinux.lds.h
60139 @@ -217,6 +217,7 @@
60140 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
60141 VMLINUX_SYMBOL(__start_rodata) = .; \
60142 *(.rodata) *(.rodata.*) \
60143 + *(.data..read_only) \
60144 *(__vermagic) /* Kernel version magic */ \
60145 . = ALIGN(8); \
60146 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
60147 @@ -723,17 +724,18 @@
60148 * section in the linker script will go there too. @phdr should have
60149 * a leading colon.
60150 *
60151 - * Note that this macros defines __per_cpu_load as an absolute symbol.
60152 + * Note that this macros defines per_cpu_load as an absolute symbol.
60153 * If there is no need to put the percpu section at a predetermined
60154 * address, use PERCPU_SECTION.
60155 */
60156 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
60157 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
60158 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
60159 + per_cpu_load = .; \
60160 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
60161 - LOAD_OFFSET) { \
60162 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
60163 PERCPU_INPUT(cacheline) \
60164 } phdr \
60165 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
60166 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
60167
60168 /**
60169 * PERCPU_SECTION - define output section for percpu area, simple version
60170 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
60171 index 9b7c2bb..76b7d1e 100644
60172 --- a/include/drm/drmP.h
60173 +++ b/include/drm/drmP.h
60174 @@ -73,6 +73,7 @@
60175 #include <linux/workqueue.h>
60176 #include <linux/poll.h>
60177 #include <asm/pgalloc.h>
60178 +#include <asm/local.h>
60179 #include "drm.h"
60180
60181 #include <linux/idr.h>
60182 @@ -1035,7 +1036,7 @@ struct drm_device {
60183
60184 /** \name Usage Counters */
60185 /*@{ */
60186 - int open_count; /**< Outstanding files open */
60187 + local_t open_count; /**< Outstanding files open */
60188 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
60189 atomic_t vma_count; /**< Outstanding vma areas open */
60190 int buf_use; /**< Buffers in use -- cannot alloc */
60191 @@ -1046,7 +1047,7 @@ struct drm_device {
60192 /*@{ */
60193 unsigned long counters;
60194 enum drm_stat_type types[15];
60195 - atomic_t counts[15];
60196 + atomic_unchecked_t counts[15];
60197 /*@} */
60198
60199 struct list_head filelist;
60200 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
60201 index 73b0712..0b7ef2f 100644
60202 --- a/include/drm/drm_crtc_helper.h
60203 +++ b/include/drm/drm_crtc_helper.h
60204 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
60205
60206 /* disable crtc when not in use - more explicit than dpms off */
60207 void (*disable)(struct drm_crtc *crtc);
60208 -};
60209 +} __no_const;
60210
60211 struct drm_encoder_helper_funcs {
60212 void (*dpms)(struct drm_encoder *encoder, int mode);
60213 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
60214 struct drm_connector *connector);
60215 /* disable encoder when not in use - more explicit than dpms off */
60216 void (*disable)(struct drm_encoder *encoder);
60217 -};
60218 +} __no_const;
60219
60220 struct drm_connector_helper_funcs {
60221 int (*get_modes)(struct drm_connector *connector);
60222 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
60223 index 26c1f78..6722682 100644
60224 --- a/include/drm/ttm/ttm_memory.h
60225 +++ b/include/drm/ttm/ttm_memory.h
60226 @@ -47,7 +47,7 @@
60227
60228 struct ttm_mem_shrink {
60229 int (*do_shrink) (struct ttm_mem_shrink *);
60230 -};
60231 +} __no_const;
60232
60233 /**
60234 * struct ttm_mem_global - Global memory accounting structure.
60235 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
60236 index e86dfca..40cc55f 100644
60237 --- a/include/linux/a.out.h
60238 +++ b/include/linux/a.out.h
60239 @@ -39,6 +39,14 @@ enum machine_type {
60240 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
60241 };
60242
60243 +/* Constants for the N_FLAGS field */
60244 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60245 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
60246 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
60247 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
60248 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60249 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60250 +
60251 #if !defined (N_MAGIC)
60252 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
60253 #endif
60254 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
60255 index 49a83ca..df96b54 100644
60256 --- a/include/linux/atmdev.h
60257 +++ b/include/linux/atmdev.h
60258 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
60259 #endif
60260
60261 struct k_atm_aal_stats {
60262 -#define __HANDLE_ITEM(i) atomic_t i
60263 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
60264 __AAL_STAT_ITEMS
60265 #undef __HANDLE_ITEM
60266 };
60267 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
60268 index fd88a39..f4d0bad 100644
60269 --- a/include/linux/binfmts.h
60270 +++ b/include/linux/binfmts.h
60271 @@ -88,6 +88,7 @@ struct linux_binfmt {
60272 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
60273 int (*load_shlib)(struct file *);
60274 int (*core_dump)(struct coredump_params *cprm);
60275 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
60276 unsigned long min_coredump; /* minimal dump size */
60277 };
60278
60279 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
60280 index 5e30b45..5fdcf66 100644
60281 --- a/include/linux/blkdev.h
60282 +++ b/include/linux/blkdev.h
60283 @@ -1318,7 +1318,7 @@ struct block_device_operations {
60284 /* this callback is with swap_lock and sometimes page table lock held */
60285 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
60286 struct module *owner;
60287 -};
60288 +} __do_const;
60289
60290 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
60291 unsigned long);
60292 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
60293 index 8e9e4bc..88bd457 100644
60294 --- a/include/linux/blktrace_api.h
60295 +++ b/include/linux/blktrace_api.h
60296 @@ -162,7 +162,7 @@ struct blk_trace {
60297 struct dentry *dir;
60298 struct dentry *dropped_file;
60299 struct dentry *msg_file;
60300 - atomic_t dropped;
60301 + atomic_unchecked_t dropped;
60302 };
60303
60304 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
60305 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
60306 index 83195fb..0b0f77d 100644
60307 --- a/include/linux/byteorder/little_endian.h
60308 +++ b/include/linux/byteorder/little_endian.h
60309 @@ -42,51 +42,51 @@
60310
60311 static inline __le64 __cpu_to_le64p(const __u64 *p)
60312 {
60313 - return (__force __le64)*p;
60314 + return (__force const __le64)*p;
60315 }
60316 static inline __u64 __le64_to_cpup(const __le64 *p)
60317 {
60318 - return (__force __u64)*p;
60319 + return (__force const __u64)*p;
60320 }
60321 static inline __le32 __cpu_to_le32p(const __u32 *p)
60322 {
60323 - return (__force __le32)*p;
60324 + return (__force const __le32)*p;
60325 }
60326 static inline __u32 __le32_to_cpup(const __le32 *p)
60327 {
60328 - return (__force __u32)*p;
60329 + return (__force const __u32)*p;
60330 }
60331 static inline __le16 __cpu_to_le16p(const __u16 *p)
60332 {
60333 - return (__force __le16)*p;
60334 + return (__force const __le16)*p;
60335 }
60336 static inline __u16 __le16_to_cpup(const __le16 *p)
60337 {
60338 - return (__force __u16)*p;
60339 + return (__force const __u16)*p;
60340 }
60341 static inline __be64 __cpu_to_be64p(const __u64 *p)
60342 {
60343 - return (__force __be64)__swab64p(p);
60344 + return (__force const __be64)__swab64p(p);
60345 }
60346 static inline __u64 __be64_to_cpup(const __be64 *p)
60347 {
60348 - return __swab64p((__u64 *)p);
60349 + return __swab64p((const __u64 *)p);
60350 }
60351 static inline __be32 __cpu_to_be32p(const __u32 *p)
60352 {
60353 - return (__force __be32)__swab32p(p);
60354 + return (__force const __be32)__swab32p(p);
60355 }
60356 static inline __u32 __be32_to_cpup(const __be32 *p)
60357 {
60358 - return __swab32p((__u32 *)p);
60359 + return __swab32p((const __u32 *)p);
60360 }
60361 static inline __be16 __cpu_to_be16p(const __u16 *p)
60362 {
60363 - return (__force __be16)__swab16p(p);
60364 + return (__force const __be16)__swab16p(p);
60365 }
60366 static inline __u16 __be16_to_cpup(const __be16 *p)
60367 {
60368 - return __swab16p((__u16 *)p);
60369 + return __swab16p((const __u16 *)p);
60370 }
60371 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
60372 #define __le64_to_cpus(x) do { (void)(x); } while (0)
60373 diff --git a/include/linux/cache.h b/include/linux/cache.h
60374 index 4c57065..4307975 100644
60375 --- a/include/linux/cache.h
60376 +++ b/include/linux/cache.h
60377 @@ -16,6 +16,10 @@
60378 #define __read_mostly
60379 #endif
60380
60381 +#ifndef __read_only
60382 +#define __read_only __read_mostly
60383 +#endif
60384 +
60385 #ifndef ____cacheline_aligned
60386 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
60387 #endif
60388 diff --git a/include/linux/capability.h b/include/linux/capability.h
60389 index c421123..e343179 100644
60390 --- a/include/linux/capability.h
60391 +++ b/include/linux/capability.h
60392 @@ -547,6 +547,9 @@ extern bool capable(int cap);
60393 extern bool ns_capable(struct user_namespace *ns, int cap);
60394 extern bool task_ns_capable(struct task_struct *t, int cap);
60395 extern bool nsown_capable(int cap);
60396 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
60397 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
60398 +extern bool capable_nolog(int cap);
60399
60400 /* audit system wants to get cap info from files as well */
60401 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
60402 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
60403 index 04ffb2e..6799180 100644
60404 --- a/include/linux/cleancache.h
60405 +++ b/include/linux/cleancache.h
60406 @@ -31,7 +31,7 @@ struct cleancache_ops {
60407 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
60408 void (*flush_inode)(int, struct cleancache_filekey);
60409 void (*flush_fs)(int);
60410 -};
60411 +} __no_const;
60412
60413 extern struct cleancache_ops
60414 cleancache_register_ops(struct cleancache_ops *ops);
60415 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
60416 index dfadc96..c0e70c1 100644
60417 --- a/include/linux/compiler-gcc4.h
60418 +++ b/include/linux/compiler-gcc4.h
60419 @@ -31,6 +31,12 @@
60420
60421
60422 #if __GNUC_MINOR__ >= 5
60423 +
60424 +#ifdef CONSTIFY_PLUGIN
60425 +#define __no_const __attribute__((no_const))
60426 +#define __do_const __attribute__((do_const))
60427 +#endif
60428 +
60429 /*
60430 * Mark a position in code as unreachable. This can be used to
60431 * suppress control flow warnings after asm blocks that transfer
60432 @@ -46,6 +52,11 @@
60433 #define __noclone __attribute__((__noclone__))
60434
60435 #endif
60436 +
60437 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
60438 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
60439 +#define __bos0(ptr) __bos((ptr), 0)
60440 +#define __bos1(ptr) __bos((ptr), 1)
60441 #endif
60442
60443 #if __GNUC_MINOR__ > 0
60444 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
60445 index 320d6c9..8573a1c 100644
60446 --- a/include/linux/compiler.h
60447 +++ b/include/linux/compiler.h
60448 @@ -5,31 +5,62 @@
60449
60450 #ifdef __CHECKER__
60451 # define __user __attribute__((noderef, address_space(1)))
60452 +# define __force_user __force __user
60453 # define __kernel __attribute__((address_space(0)))
60454 +# define __force_kernel __force __kernel
60455 # define __safe __attribute__((safe))
60456 # define __force __attribute__((force))
60457 # define __nocast __attribute__((nocast))
60458 # define __iomem __attribute__((noderef, address_space(2)))
60459 +# define __force_iomem __force __iomem
60460 # define __acquires(x) __attribute__((context(x,0,1)))
60461 # define __releases(x) __attribute__((context(x,1,0)))
60462 # define __acquire(x) __context__(x,1)
60463 # define __release(x) __context__(x,-1)
60464 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
60465 # define __percpu __attribute__((noderef, address_space(3)))
60466 +# define __force_percpu __force __percpu
60467 #ifdef CONFIG_SPARSE_RCU_POINTER
60468 # define __rcu __attribute__((noderef, address_space(4)))
60469 +# define __force_rcu __force __rcu
60470 #else
60471 # define __rcu
60472 +# define __force_rcu
60473 #endif
60474 extern void __chk_user_ptr(const volatile void __user *);
60475 extern void __chk_io_ptr(const volatile void __iomem *);
60476 +#elif defined(CHECKER_PLUGIN)
60477 +//# define __user
60478 +//# define __force_user
60479 +//# define __kernel
60480 +//# define __force_kernel
60481 +# define __safe
60482 +# define __force
60483 +# define __nocast
60484 +# define __iomem
60485 +# define __force_iomem
60486 +# define __chk_user_ptr(x) (void)0
60487 +# define __chk_io_ptr(x) (void)0
60488 +# define __builtin_warning(x, y...) (1)
60489 +# define __acquires(x)
60490 +# define __releases(x)
60491 +# define __acquire(x) (void)0
60492 +# define __release(x) (void)0
60493 +# define __cond_lock(x,c) (c)
60494 +# define __percpu
60495 +# define __force_percpu
60496 +# define __rcu
60497 +# define __force_rcu
60498 #else
60499 # define __user
60500 +# define __force_user
60501 # define __kernel
60502 +# define __force_kernel
60503 # define __safe
60504 # define __force
60505 # define __nocast
60506 # define __iomem
60507 +# define __force_iomem
60508 # define __chk_user_ptr(x) (void)0
60509 # define __chk_io_ptr(x) (void)0
60510 # define __builtin_warning(x, y...) (1)
60511 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
60512 # define __release(x) (void)0
60513 # define __cond_lock(x,c) (c)
60514 # define __percpu
60515 +# define __force_percpu
60516 # define __rcu
60517 +# define __force_rcu
60518 #endif
60519
60520 #ifdef __KERNEL__
60521 @@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60522 # define __attribute_const__ /* unimplemented */
60523 #endif
60524
60525 +#ifndef __no_const
60526 +# define __no_const
60527 +#endif
60528 +
60529 +#ifndef __do_const
60530 +# define __do_const
60531 +#endif
60532 +
60533 /*
60534 * Tell gcc if a function is cold. The compiler will assume any path
60535 * directly leading to the call is unlikely.
60536 @@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60537 #define __cold
60538 #endif
60539
60540 +#ifndef __alloc_size
60541 +#define __alloc_size(...)
60542 +#endif
60543 +
60544 +#ifndef __bos
60545 +#define __bos(ptr, arg)
60546 +#endif
60547 +
60548 +#ifndef __bos0
60549 +#define __bos0(ptr)
60550 +#endif
60551 +
60552 +#ifndef __bos1
60553 +#define __bos1(ptr)
60554 +#endif
60555 +
60556 /* Simple shorthand for a section definition */
60557 #ifndef __section
60558 # define __section(S) __attribute__ ((__section__(#S)))
60559 @@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60560 * use is to mediate communication between process-level code and irq/NMI
60561 * handlers, all running on the same CPU.
60562 */
60563 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
60564 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
60565 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
60566
60567 #endif /* __LINUX_COMPILER_H */
60568 diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
60569 index e9eaec5..bfeb9bb 100644
60570 --- a/include/linux/cpuset.h
60571 +++ b/include/linux/cpuset.h
60572 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
60573 * nodemask.
60574 */
60575 smp_mb();
60576 - --ACCESS_ONCE(current->mems_allowed_change_disable);
60577 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
60578 }
60579
60580 static inline void set_mems_allowed(nodemask_t nodemask)
60581 diff --git a/include/linux/cred.h b/include/linux/cred.h
60582 index 4030896..a5c9f09 100644
60583 --- a/include/linux/cred.h
60584 +++ b/include/linux/cred.h
60585 @@ -196,6 +196,11 @@ do { \
60586 __validate_process_creds(current, __FILE__, __LINE__); \
60587 } while(0)
60588
60589 +#define validate_task_creds(task) \
60590 +do { \
60591 + __validate_process_creds((task), __FILE__, __LINE__); \
60592 +} while(0)
60593 +
60594 extern void validate_creds_for_do_exit(struct task_struct *);
60595 #else
60596 static inline void validate_creds(const struct cred *cred)
60597 @@ -207,6 +212,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
60598 static inline void validate_process_creds(void)
60599 {
60600 }
60601 +static inline void validate_task_creds(struct task_struct *task)
60602 +{
60603 +}
60604 #endif
60605
60606 /**
60607 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
60608 index e5e468e..f079672 100644
60609 --- a/include/linux/crypto.h
60610 +++ b/include/linux/crypto.h
60611 @@ -361,7 +361,7 @@ struct cipher_tfm {
60612 const u8 *key, unsigned int keylen);
60613 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60614 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60615 -};
60616 +} __no_const;
60617
60618 struct hash_tfm {
60619 int (*init)(struct hash_desc *desc);
60620 @@ -382,13 +382,13 @@ struct compress_tfm {
60621 int (*cot_decompress)(struct crypto_tfm *tfm,
60622 const u8 *src, unsigned int slen,
60623 u8 *dst, unsigned int *dlen);
60624 -};
60625 +} __no_const;
60626
60627 struct rng_tfm {
60628 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
60629 unsigned int dlen);
60630 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
60631 -};
60632 +} __no_const;
60633
60634 #define crt_ablkcipher crt_u.ablkcipher
60635 #define crt_aead crt_u.aead
60636 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
60637 index 7925bf0..d5143d2 100644
60638 --- a/include/linux/decompress/mm.h
60639 +++ b/include/linux/decompress/mm.h
60640 @@ -77,7 +77,7 @@ static void free(void *where)
60641 * warnings when not needed (indeed large_malloc / large_free are not
60642 * needed by inflate */
60643
60644 -#define malloc(a) kmalloc(a, GFP_KERNEL)
60645 +#define malloc(a) kmalloc((a), GFP_KERNEL)
60646 #define free(a) kfree(a)
60647
60648 #define large_malloc(a) vmalloc(a)
60649 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
60650 index 347fdc3..cd01657 100644
60651 --- a/include/linux/dma-mapping.h
60652 +++ b/include/linux/dma-mapping.h
60653 @@ -42,7 +42,7 @@ struct dma_map_ops {
60654 int (*dma_supported)(struct device *dev, u64 mask);
60655 int (*set_dma_mask)(struct device *dev, u64 mask);
60656 int is_phys;
60657 -};
60658 +} __do_const;
60659
60660 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
60661
60662 diff --git a/include/linux/efi.h b/include/linux/efi.h
60663 index 2362a0b..cfaf8fcc 100644
60664 --- a/include/linux/efi.h
60665 +++ b/include/linux/efi.h
60666 @@ -446,7 +446,7 @@ struct efivar_operations {
60667 efi_get_variable_t *get_variable;
60668 efi_get_next_variable_t *get_next_variable;
60669 efi_set_variable_t *set_variable;
60670 -};
60671 +} __no_const;
60672
60673 struct efivars {
60674 /*
60675 diff --git a/include/linux/elf.h b/include/linux/elf.h
60676 index 110821c..cb14c08 100644
60677 --- a/include/linux/elf.h
60678 +++ b/include/linux/elf.h
60679 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
60680 #define PT_GNU_EH_FRAME 0x6474e550
60681
60682 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
60683 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
60684 +
60685 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
60686 +
60687 +/* Constants for the e_flags field */
60688 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60689 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
60690 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
60691 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
60692 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60693 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60694
60695 /*
60696 * Extended Numbering
60697 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
60698 #define DT_DEBUG 21
60699 #define DT_TEXTREL 22
60700 #define DT_JMPREL 23
60701 +#define DT_FLAGS 30
60702 + #define DF_TEXTREL 0x00000004
60703 #define DT_ENCODING 32
60704 #define OLD_DT_LOOS 0x60000000
60705 #define DT_LOOS 0x6000000d
60706 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
60707 #define PF_W 0x2
60708 #define PF_X 0x1
60709
60710 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
60711 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
60712 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
60713 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
60714 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
60715 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
60716 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
60717 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
60718 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
60719 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
60720 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
60721 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
60722 +
60723 typedef struct elf32_phdr{
60724 Elf32_Word p_type;
60725 Elf32_Off p_offset;
60726 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
60727 #define EI_OSABI 7
60728 #define EI_PAD 8
60729
60730 +#define EI_PAX 14
60731 +
60732 #define ELFMAG0 0x7f /* EI_MAG */
60733 #define ELFMAG1 'E'
60734 #define ELFMAG2 'L'
60735 @@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
60736 #define elf_note elf32_note
60737 #define elf_addr_t Elf32_Off
60738 #define Elf_Half Elf32_Half
60739 +#define elf_dyn Elf32_Dyn
60740
60741 #else
60742
60743 @@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
60744 #define elf_note elf64_note
60745 #define elf_addr_t Elf64_Off
60746 #define Elf_Half Elf64_Half
60747 +#define elf_dyn Elf64_Dyn
60748
60749 #endif
60750
60751 diff --git a/include/linux/filter.h b/include/linux/filter.h
60752 index 741956f..f02f482 100644
60753 --- a/include/linux/filter.h
60754 +++ b/include/linux/filter.h
60755 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
60756
60757 struct sk_buff;
60758 struct sock;
60759 +struct bpf_jit_work;
60760
60761 struct sk_filter
60762 {
60763 @@ -141,6 +142,9 @@ struct sk_filter
60764 unsigned int len; /* Number of filter blocks */
60765 unsigned int (*bpf_func)(const struct sk_buff *skb,
60766 const struct sock_filter *filter);
60767 +#ifdef CONFIG_BPF_JIT
60768 + struct bpf_jit_work *work;
60769 +#endif
60770 struct rcu_head rcu;
60771 struct sock_filter insns[0];
60772 };
60773 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
60774 index 84ccf8e..2e9b14c 100644
60775 --- a/include/linux/firewire.h
60776 +++ b/include/linux/firewire.h
60777 @@ -428,7 +428,7 @@ struct fw_iso_context {
60778 union {
60779 fw_iso_callback_t sc;
60780 fw_iso_mc_callback_t mc;
60781 - } callback;
60782 + } __no_const callback;
60783 void *callback_data;
60784 };
60785
60786 diff --git a/include/linux/fs.h b/include/linux/fs.h
60787 index cf7bc25..0d2babf 100644
60788 --- a/include/linux/fs.h
60789 +++ b/include/linux/fs.h
60790 @@ -1588,7 +1588,8 @@ struct file_operations {
60791 int (*setlease)(struct file *, long, struct file_lock **);
60792 long (*fallocate)(struct file *file, int mode, loff_t offset,
60793 loff_t len);
60794 -};
60795 +} __do_const;
60796 +typedef struct file_operations __no_const file_operations_no_const;
60797
60798 struct inode_operations {
60799 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
60800 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
60801 index 003dc0f..3c4ea97 100644
60802 --- a/include/linux/fs_struct.h
60803 +++ b/include/linux/fs_struct.h
60804 @@ -6,7 +6,7 @@
60805 #include <linux/seqlock.h>
60806
60807 struct fs_struct {
60808 - int users;
60809 + atomic_t users;
60810 spinlock_t lock;
60811 seqcount_t seq;
60812 int umask;
60813 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
60814 index af095b5..cf1220c 100644
60815 --- a/include/linux/fscache-cache.h
60816 +++ b/include/linux/fscache-cache.h
60817 @@ -102,7 +102,7 @@ struct fscache_operation {
60818 fscache_operation_release_t release;
60819 };
60820
60821 -extern atomic_t fscache_op_debug_id;
60822 +extern atomic_unchecked_t fscache_op_debug_id;
60823 extern void fscache_op_work_func(struct work_struct *work);
60824
60825 extern void fscache_enqueue_operation(struct fscache_operation *);
60826 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
60827 {
60828 INIT_WORK(&op->work, fscache_op_work_func);
60829 atomic_set(&op->usage, 1);
60830 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
60831 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
60832 op->processor = processor;
60833 op->release = release;
60834 INIT_LIST_HEAD(&op->pend_link);
60835 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
60836 index 2a53f10..0187fdf 100644
60837 --- a/include/linux/fsnotify.h
60838 +++ b/include/linux/fsnotify.h
60839 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
60840 */
60841 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
60842 {
60843 - return kstrdup(name, GFP_KERNEL);
60844 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
60845 }
60846
60847 /*
60848 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
60849 index 91d0e0a3..035666b 100644
60850 --- a/include/linux/fsnotify_backend.h
60851 +++ b/include/linux/fsnotify_backend.h
60852 @@ -105,6 +105,7 @@ struct fsnotify_ops {
60853 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
60854 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
60855 };
60856 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
60857
60858 /*
60859 * A group is a "thing" that wants to receive notification about filesystem
60860 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
60861 index 96efa67..1261547 100644
60862 --- a/include/linux/ftrace_event.h
60863 +++ b/include/linux/ftrace_event.h
60864 @@ -97,7 +97,7 @@ struct trace_event_functions {
60865 trace_print_func raw;
60866 trace_print_func hex;
60867 trace_print_func binary;
60868 -};
60869 +} __no_const;
60870
60871 struct trace_event {
60872 struct hlist_node node;
60873 @@ -252,7 +252,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
60874 extern int trace_add_event_call(struct ftrace_event_call *call);
60875 extern void trace_remove_event_call(struct ftrace_event_call *call);
60876
60877 -#define is_signed_type(type) (((type)(-1)) < 0)
60878 +#define is_signed_type(type) (((type)(-1)) < (type)1)
60879
60880 int trace_set_clr_event(const char *system, const char *event, int set);
60881
60882 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
60883 index 02fa469..a15f279 100644
60884 --- a/include/linux/genhd.h
60885 +++ b/include/linux/genhd.h
60886 @@ -184,7 +184,7 @@ struct gendisk {
60887 struct kobject *slave_dir;
60888
60889 struct timer_rand_state *random;
60890 - atomic_t sync_io; /* RAID */
60891 + atomic_unchecked_t sync_io; /* RAID */
60892 struct disk_events *ev;
60893 #ifdef CONFIG_BLK_DEV_INTEGRITY
60894 struct blk_integrity *integrity;
60895 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
60896 new file mode 100644
60897 index 0000000..0dc3943
60898 --- /dev/null
60899 +++ b/include/linux/gracl.h
60900 @@ -0,0 +1,317 @@
60901 +#ifndef GR_ACL_H
60902 +#define GR_ACL_H
60903 +
60904 +#include <linux/grdefs.h>
60905 +#include <linux/resource.h>
60906 +#include <linux/capability.h>
60907 +#include <linux/dcache.h>
60908 +#include <asm/resource.h>
60909 +
60910 +/* Major status information */
60911 +
60912 +#define GR_VERSION "grsecurity 2.2.2"
60913 +#define GRSECURITY_VERSION 0x2202
60914 +
60915 +enum {
60916 + GR_SHUTDOWN = 0,
60917 + GR_ENABLE = 1,
60918 + GR_SPROLE = 2,
60919 + GR_RELOAD = 3,
60920 + GR_SEGVMOD = 4,
60921 + GR_STATUS = 5,
60922 + GR_UNSPROLE = 6,
60923 + GR_PASSSET = 7,
60924 + GR_SPROLEPAM = 8,
60925 +};
60926 +
60927 +/* Password setup definitions
60928 + * kernel/grhash.c */
60929 +enum {
60930 + GR_PW_LEN = 128,
60931 + GR_SALT_LEN = 16,
60932 + GR_SHA_LEN = 32,
60933 +};
60934 +
60935 +enum {
60936 + GR_SPROLE_LEN = 64,
60937 +};
60938 +
60939 +enum {
60940 + GR_NO_GLOB = 0,
60941 + GR_REG_GLOB,
60942 + GR_CREATE_GLOB
60943 +};
60944 +
60945 +#define GR_NLIMITS 32
60946 +
60947 +/* Begin Data Structures */
60948 +
60949 +struct sprole_pw {
60950 + unsigned char *rolename;
60951 + unsigned char salt[GR_SALT_LEN];
60952 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
60953 +};
60954 +
60955 +struct name_entry {
60956 + __u32 key;
60957 + ino_t inode;
60958 + dev_t device;
60959 + char *name;
60960 + __u16 len;
60961 + __u8 deleted;
60962 + struct name_entry *prev;
60963 + struct name_entry *next;
60964 +};
60965 +
60966 +struct inodev_entry {
60967 + struct name_entry *nentry;
60968 + struct inodev_entry *prev;
60969 + struct inodev_entry *next;
60970 +};
60971 +
60972 +struct acl_role_db {
60973 + struct acl_role_label **r_hash;
60974 + __u32 r_size;
60975 +};
60976 +
60977 +struct inodev_db {
60978 + struct inodev_entry **i_hash;
60979 + __u32 i_size;
60980 +};
60981 +
60982 +struct name_db {
60983 + struct name_entry **n_hash;
60984 + __u32 n_size;
60985 +};
60986 +
60987 +struct crash_uid {
60988 + uid_t uid;
60989 + unsigned long expires;
60990 +};
60991 +
60992 +struct gr_hash_struct {
60993 + void **table;
60994 + void **nametable;
60995 + void *first;
60996 + __u32 table_size;
60997 + __u32 used_size;
60998 + int type;
60999 +};
61000 +
61001 +/* Userspace Grsecurity ACL data structures */
61002 +
61003 +struct acl_subject_label {
61004 + char *filename;
61005 + ino_t inode;
61006 + dev_t device;
61007 + __u32 mode;
61008 + kernel_cap_t cap_mask;
61009 + kernel_cap_t cap_lower;
61010 + kernel_cap_t cap_invert_audit;
61011 +
61012 + struct rlimit res[GR_NLIMITS];
61013 + __u32 resmask;
61014 +
61015 + __u8 user_trans_type;
61016 + __u8 group_trans_type;
61017 + uid_t *user_transitions;
61018 + gid_t *group_transitions;
61019 + __u16 user_trans_num;
61020 + __u16 group_trans_num;
61021 +
61022 + __u32 sock_families[2];
61023 + __u32 ip_proto[8];
61024 + __u32 ip_type;
61025 + struct acl_ip_label **ips;
61026 + __u32 ip_num;
61027 + __u32 inaddr_any_override;
61028 +
61029 + __u32 crashes;
61030 + unsigned long expires;
61031 +
61032 + struct acl_subject_label *parent_subject;
61033 + struct gr_hash_struct *hash;
61034 + struct acl_subject_label *prev;
61035 + struct acl_subject_label *next;
61036 +
61037 + struct acl_object_label **obj_hash;
61038 + __u32 obj_hash_size;
61039 + __u16 pax_flags;
61040 +};
61041 +
61042 +struct role_allowed_ip {
61043 + __u32 addr;
61044 + __u32 netmask;
61045 +
61046 + struct role_allowed_ip *prev;
61047 + struct role_allowed_ip *next;
61048 +};
61049 +
61050 +struct role_transition {
61051 + char *rolename;
61052 +
61053 + struct role_transition *prev;
61054 + struct role_transition *next;
61055 +};
61056 +
61057 +struct acl_role_label {
61058 + char *rolename;
61059 + uid_t uidgid;
61060 + __u16 roletype;
61061 +
61062 + __u16 auth_attempts;
61063 + unsigned long expires;
61064 +
61065 + struct acl_subject_label *root_label;
61066 + struct gr_hash_struct *hash;
61067 +
61068 + struct acl_role_label *prev;
61069 + struct acl_role_label *next;
61070 +
61071 + struct role_transition *transitions;
61072 + struct role_allowed_ip *allowed_ips;
61073 + uid_t *domain_children;
61074 + __u16 domain_child_num;
61075 +
61076 + struct acl_subject_label **subj_hash;
61077 + __u32 subj_hash_size;
61078 +};
61079 +
61080 +struct user_acl_role_db {
61081 + struct acl_role_label **r_table;
61082 + __u32 num_pointers; /* Number of allocations to track */
61083 + __u32 num_roles; /* Number of roles */
61084 + __u32 num_domain_children; /* Number of domain children */
61085 + __u32 num_subjects; /* Number of subjects */
61086 + __u32 num_objects; /* Number of objects */
61087 +};
61088 +
61089 +struct acl_object_label {
61090 + char *filename;
61091 + ino_t inode;
61092 + dev_t device;
61093 + __u32 mode;
61094 +
61095 + struct acl_subject_label *nested;
61096 + struct acl_object_label *globbed;
61097 +
61098 + /* next two structures not used */
61099 +
61100 + struct acl_object_label *prev;
61101 + struct acl_object_label *next;
61102 +};
61103 +
61104 +struct acl_ip_label {
61105 + char *iface;
61106 + __u32 addr;
61107 + __u32 netmask;
61108 + __u16 low, high;
61109 + __u8 mode;
61110 + __u32 type;
61111 + __u32 proto[8];
61112 +
61113 + /* next two structures not used */
61114 +
61115 + struct acl_ip_label *prev;
61116 + struct acl_ip_label *next;
61117 +};
61118 +
61119 +struct gr_arg {
61120 + struct user_acl_role_db role_db;
61121 + unsigned char pw[GR_PW_LEN];
61122 + unsigned char salt[GR_SALT_LEN];
61123 + unsigned char sum[GR_SHA_LEN];
61124 + unsigned char sp_role[GR_SPROLE_LEN];
61125 + struct sprole_pw *sprole_pws;
61126 + dev_t segv_device;
61127 + ino_t segv_inode;
61128 + uid_t segv_uid;
61129 + __u16 num_sprole_pws;
61130 + __u16 mode;
61131 +};
61132 +
61133 +struct gr_arg_wrapper {
61134 + struct gr_arg *arg;
61135 + __u32 version;
61136 + __u32 size;
61137 +};
61138 +
61139 +struct subject_map {
61140 + struct acl_subject_label *user;
61141 + struct acl_subject_label *kernel;
61142 + struct subject_map *prev;
61143 + struct subject_map *next;
61144 +};
61145 +
61146 +struct acl_subj_map_db {
61147 + struct subject_map **s_hash;
61148 + __u32 s_size;
61149 +};
61150 +
61151 +/* End Data Structures Section */
61152 +
61153 +/* Hash functions generated by empirical testing by Brad Spengler
61154 + Makes good use of the low bits of the inode. Generally 0-1 times
61155 + in loop for successful match. 0-3 for unsuccessful match.
61156 + Shift/add algorithm with modulus of table size and an XOR*/
61157 +
61158 +static __inline__ unsigned int
61159 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
61160 +{
61161 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
61162 +}
61163 +
61164 + static __inline__ unsigned int
61165 +shash(const struct acl_subject_label *userp, const unsigned int sz)
61166 +{
61167 + return ((const unsigned long)userp % sz);
61168 +}
61169 +
61170 +static __inline__ unsigned int
61171 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
61172 +{
61173 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
61174 +}
61175 +
61176 +static __inline__ unsigned int
61177 +nhash(const char *name, const __u16 len, const unsigned int sz)
61178 +{
61179 + return full_name_hash((const unsigned char *)name, len) % sz;
61180 +}
61181 +
61182 +#define FOR_EACH_ROLE_START(role) \
61183 + role = role_list; \
61184 + while (role) {
61185 +
61186 +#define FOR_EACH_ROLE_END(role) \
61187 + role = role->prev; \
61188 + }
61189 +
61190 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
61191 + subj = NULL; \
61192 + iter = 0; \
61193 + while (iter < role->subj_hash_size) { \
61194 + if (subj == NULL) \
61195 + subj = role->subj_hash[iter]; \
61196 + if (subj == NULL) { \
61197 + iter++; \
61198 + continue; \
61199 + }
61200 +
61201 +#define FOR_EACH_SUBJECT_END(subj,iter) \
61202 + subj = subj->next; \
61203 + if (subj == NULL) \
61204 + iter++; \
61205 + }
61206 +
61207 +
61208 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
61209 + subj = role->hash->first; \
61210 + while (subj != NULL) {
61211 +
61212 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
61213 + subj = subj->next; \
61214 + }
61215 +
61216 +#endif
61217 +
61218 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
61219 new file mode 100644
61220 index 0000000..323ecf2
61221 --- /dev/null
61222 +++ b/include/linux/gralloc.h
61223 @@ -0,0 +1,9 @@
61224 +#ifndef __GRALLOC_H
61225 +#define __GRALLOC_H
61226 +
61227 +void acl_free_all(void);
61228 +int acl_alloc_stack_init(unsigned long size);
61229 +void *acl_alloc(unsigned long len);
61230 +void *acl_alloc_num(unsigned long num, unsigned long len);
61231 +
61232 +#endif
61233 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
61234 new file mode 100644
61235 index 0000000..b30e9bc
61236 --- /dev/null
61237 +++ b/include/linux/grdefs.h
61238 @@ -0,0 +1,140 @@
61239 +#ifndef GRDEFS_H
61240 +#define GRDEFS_H
61241 +
61242 +/* Begin grsecurity status declarations */
61243 +
61244 +enum {
61245 + GR_READY = 0x01,
61246 + GR_STATUS_INIT = 0x00 // disabled state
61247 +};
61248 +
61249 +/* Begin ACL declarations */
61250 +
61251 +/* Role flags */
61252 +
61253 +enum {
61254 + GR_ROLE_USER = 0x0001,
61255 + GR_ROLE_GROUP = 0x0002,
61256 + GR_ROLE_DEFAULT = 0x0004,
61257 + GR_ROLE_SPECIAL = 0x0008,
61258 + GR_ROLE_AUTH = 0x0010,
61259 + GR_ROLE_NOPW = 0x0020,
61260 + GR_ROLE_GOD = 0x0040,
61261 + GR_ROLE_LEARN = 0x0080,
61262 + GR_ROLE_TPE = 0x0100,
61263 + GR_ROLE_DOMAIN = 0x0200,
61264 + GR_ROLE_PAM = 0x0400,
61265 + GR_ROLE_PERSIST = 0x0800
61266 +};
61267 +
61268 +/* ACL Subject and Object mode flags */
61269 +enum {
61270 + GR_DELETED = 0x80000000
61271 +};
61272 +
61273 +/* ACL Object-only mode flags */
61274 +enum {
61275 + GR_READ = 0x00000001,
61276 + GR_APPEND = 0x00000002,
61277 + GR_WRITE = 0x00000004,
61278 + GR_EXEC = 0x00000008,
61279 + GR_FIND = 0x00000010,
61280 + GR_INHERIT = 0x00000020,
61281 + GR_SETID = 0x00000040,
61282 + GR_CREATE = 0x00000080,
61283 + GR_DELETE = 0x00000100,
61284 + GR_LINK = 0x00000200,
61285 + GR_AUDIT_READ = 0x00000400,
61286 + GR_AUDIT_APPEND = 0x00000800,
61287 + GR_AUDIT_WRITE = 0x00001000,
61288 + GR_AUDIT_EXEC = 0x00002000,
61289 + GR_AUDIT_FIND = 0x00004000,
61290 + GR_AUDIT_INHERIT= 0x00008000,
61291 + GR_AUDIT_SETID = 0x00010000,
61292 + GR_AUDIT_CREATE = 0x00020000,
61293 + GR_AUDIT_DELETE = 0x00040000,
61294 + GR_AUDIT_LINK = 0x00080000,
61295 + GR_PTRACERD = 0x00100000,
61296 + GR_NOPTRACE = 0x00200000,
61297 + GR_SUPPRESS = 0x00400000,
61298 + GR_NOLEARN = 0x00800000,
61299 + GR_INIT_TRANSFER= 0x01000000
61300 +};
61301 +
61302 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
61303 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
61304 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
61305 +
61306 +/* ACL subject-only mode flags */
61307 +enum {
61308 + GR_KILL = 0x00000001,
61309 + GR_VIEW = 0x00000002,
61310 + GR_PROTECTED = 0x00000004,
61311 + GR_LEARN = 0x00000008,
61312 + GR_OVERRIDE = 0x00000010,
61313 + /* just a placeholder, this mode is only used in userspace */
61314 + GR_DUMMY = 0x00000020,
61315 + GR_PROTSHM = 0x00000040,
61316 + GR_KILLPROC = 0x00000080,
61317 + GR_KILLIPPROC = 0x00000100,
61318 + /* just a placeholder, this mode is only used in userspace */
61319 + GR_NOTROJAN = 0x00000200,
61320 + GR_PROTPROCFD = 0x00000400,
61321 + GR_PROCACCT = 0x00000800,
61322 + GR_RELAXPTRACE = 0x00001000,
61323 + GR_NESTED = 0x00002000,
61324 + GR_INHERITLEARN = 0x00004000,
61325 + GR_PROCFIND = 0x00008000,
61326 + GR_POVERRIDE = 0x00010000,
61327 + GR_KERNELAUTH = 0x00020000,
61328 + GR_ATSECURE = 0x00040000,
61329 + GR_SHMEXEC = 0x00080000
61330 +};
61331 +
61332 +enum {
61333 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
61334 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
61335 + GR_PAX_ENABLE_MPROTECT = 0x0004,
61336 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
61337 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
61338 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
61339 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
61340 + GR_PAX_DISABLE_MPROTECT = 0x0400,
61341 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
61342 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
61343 +};
61344 +
61345 +enum {
61346 + GR_ID_USER = 0x01,
61347 + GR_ID_GROUP = 0x02,
61348 +};
61349 +
61350 +enum {
61351 + GR_ID_ALLOW = 0x01,
61352 + GR_ID_DENY = 0x02,
61353 +};
61354 +
61355 +#define GR_CRASH_RES 31
61356 +#define GR_UIDTABLE_MAX 500
61357 +
61358 +/* begin resource learning section */
61359 +enum {
61360 + GR_RLIM_CPU_BUMP = 60,
61361 + GR_RLIM_FSIZE_BUMP = 50000,
61362 + GR_RLIM_DATA_BUMP = 10000,
61363 + GR_RLIM_STACK_BUMP = 1000,
61364 + GR_RLIM_CORE_BUMP = 10000,
61365 + GR_RLIM_RSS_BUMP = 500000,
61366 + GR_RLIM_NPROC_BUMP = 1,
61367 + GR_RLIM_NOFILE_BUMP = 5,
61368 + GR_RLIM_MEMLOCK_BUMP = 50000,
61369 + GR_RLIM_AS_BUMP = 500000,
61370 + GR_RLIM_LOCKS_BUMP = 2,
61371 + GR_RLIM_SIGPENDING_BUMP = 5,
61372 + GR_RLIM_MSGQUEUE_BUMP = 10000,
61373 + GR_RLIM_NICE_BUMP = 1,
61374 + GR_RLIM_RTPRIO_BUMP = 1,
61375 + GR_RLIM_RTTIME_BUMP = 1000000
61376 +};
61377 +
61378 +#endif
61379 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
61380 new file mode 100644
61381 index 0000000..60cda84
61382 --- /dev/null
61383 +++ b/include/linux/grinternal.h
61384 @@ -0,0 +1,220 @@
61385 +#ifndef __GRINTERNAL_H
61386 +#define __GRINTERNAL_H
61387 +
61388 +#ifdef CONFIG_GRKERNSEC
61389 +
61390 +#include <linux/fs.h>
61391 +#include <linux/mnt_namespace.h>
61392 +#include <linux/nsproxy.h>
61393 +#include <linux/gracl.h>
61394 +#include <linux/grdefs.h>
61395 +#include <linux/grmsg.h>
61396 +
61397 +void gr_add_learn_entry(const char *fmt, ...)
61398 + __attribute__ ((format (printf, 1, 2)));
61399 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
61400 + const struct vfsmount *mnt);
61401 +__u32 gr_check_create(const struct dentry *new_dentry,
61402 + const struct dentry *parent,
61403 + const struct vfsmount *mnt, const __u32 mode);
61404 +int gr_check_protected_task(const struct task_struct *task);
61405 +__u32 to_gr_audit(const __u32 reqmode);
61406 +int gr_set_acls(const int type);
61407 +int gr_apply_subject_to_task(struct task_struct *task);
61408 +int gr_acl_is_enabled(void);
61409 +char gr_roletype_to_char(void);
61410 +
61411 +void gr_handle_alertkill(struct task_struct *task);
61412 +char *gr_to_filename(const struct dentry *dentry,
61413 + const struct vfsmount *mnt);
61414 +char *gr_to_filename1(const struct dentry *dentry,
61415 + const struct vfsmount *mnt);
61416 +char *gr_to_filename2(const struct dentry *dentry,
61417 + const struct vfsmount *mnt);
61418 +char *gr_to_filename3(const struct dentry *dentry,
61419 + const struct vfsmount *mnt);
61420 +
61421 +extern int grsec_enable_harden_ptrace;
61422 +extern int grsec_enable_link;
61423 +extern int grsec_enable_fifo;
61424 +extern int grsec_enable_execve;
61425 +extern int grsec_enable_shm;
61426 +extern int grsec_enable_execlog;
61427 +extern int grsec_enable_signal;
61428 +extern int grsec_enable_audit_ptrace;
61429 +extern int grsec_enable_forkfail;
61430 +extern int grsec_enable_time;
61431 +extern int grsec_enable_rofs;
61432 +extern int grsec_enable_chroot_shmat;
61433 +extern int grsec_enable_chroot_mount;
61434 +extern int grsec_enable_chroot_double;
61435 +extern int grsec_enable_chroot_pivot;
61436 +extern int grsec_enable_chroot_chdir;
61437 +extern int grsec_enable_chroot_chmod;
61438 +extern int grsec_enable_chroot_mknod;
61439 +extern int grsec_enable_chroot_fchdir;
61440 +extern int grsec_enable_chroot_nice;
61441 +extern int grsec_enable_chroot_execlog;
61442 +extern int grsec_enable_chroot_caps;
61443 +extern int grsec_enable_chroot_sysctl;
61444 +extern int grsec_enable_chroot_unix;
61445 +extern int grsec_enable_tpe;
61446 +extern int grsec_tpe_gid;
61447 +extern int grsec_enable_tpe_all;
61448 +extern int grsec_enable_tpe_invert;
61449 +extern int grsec_enable_socket_all;
61450 +extern int grsec_socket_all_gid;
61451 +extern int grsec_enable_socket_client;
61452 +extern int grsec_socket_client_gid;
61453 +extern int grsec_enable_socket_server;
61454 +extern int grsec_socket_server_gid;
61455 +extern int grsec_audit_gid;
61456 +extern int grsec_enable_group;
61457 +extern int grsec_enable_audit_textrel;
61458 +extern int grsec_enable_log_rwxmaps;
61459 +extern int grsec_enable_mount;
61460 +extern int grsec_enable_chdir;
61461 +extern int grsec_resource_logging;
61462 +extern int grsec_enable_blackhole;
61463 +extern int grsec_lastack_retries;
61464 +extern int grsec_enable_brute;
61465 +extern int grsec_lock;
61466 +
61467 +extern spinlock_t grsec_alert_lock;
61468 +extern unsigned long grsec_alert_wtime;
61469 +extern unsigned long grsec_alert_fyet;
61470 +
61471 +extern spinlock_t grsec_audit_lock;
61472 +
61473 +extern rwlock_t grsec_exec_file_lock;
61474 +
61475 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
61476 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
61477 + (tsk)->exec_file->f_vfsmnt) : "/")
61478 +
61479 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
61480 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
61481 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
61482 +
61483 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
61484 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
61485 + (tsk)->exec_file->f_vfsmnt) : "/")
61486 +
61487 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
61488 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
61489 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
61490 +
61491 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
61492 +
61493 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
61494 +
61495 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
61496 + (task)->pid, (cred)->uid, \
61497 + (cred)->euid, (cred)->gid, (cred)->egid, \
61498 + gr_parent_task_fullpath(task), \
61499 + (task)->real_parent->comm, (task)->real_parent->pid, \
61500 + (pcred)->uid, (pcred)->euid, \
61501 + (pcred)->gid, (pcred)->egid
61502 +
61503 +#define GR_CHROOT_CAPS {{ \
61504 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
61505 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
61506 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
61507 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
61508 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
61509 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
61510 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
61511 +
61512 +#define security_learn(normal_msg,args...) \
61513 +({ \
61514 + read_lock(&grsec_exec_file_lock); \
61515 + gr_add_learn_entry(normal_msg "\n", ## args); \
61516 + read_unlock(&grsec_exec_file_lock); \
61517 +})
61518 +
61519 +enum {
61520 + GR_DO_AUDIT,
61521 + GR_DONT_AUDIT,
61522 + /* used for non-audit messages that we shouldn't kill the task on */
61523 + GR_DONT_AUDIT_GOOD
61524 +};
61525 +
61526 +enum {
61527 + GR_TTYSNIFF,
61528 + GR_RBAC,
61529 + GR_RBAC_STR,
61530 + GR_STR_RBAC,
61531 + GR_RBAC_MODE2,
61532 + GR_RBAC_MODE3,
61533 + GR_FILENAME,
61534 + GR_SYSCTL_HIDDEN,
61535 + GR_NOARGS,
61536 + GR_ONE_INT,
61537 + GR_ONE_INT_TWO_STR,
61538 + GR_ONE_STR,
61539 + GR_STR_INT,
61540 + GR_TWO_STR_INT,
61541 + GR_TWO_INT,
61542 + GR_TWO_U64,
61543 + GR_THREE_INT,
61544 + GR_FIVE_INT_TWO_STR,
61545 + GR_TWO_STR,
61546 + GR_THREE_STR,
61547 + GR_FOUR_STR,
61548 + GR_STR_FILENAME,
61549 + GR_FILENAME_STR,
61550 + GR_FILENAME_TWO_INT,
61551 + GR_FILENAME_TWO_INT_STR,
61552 + GR_TEXTREL,
61553 + GR_PTRACE,
61554 + GR_RESOURCE,
61555 + GR_CAP,
61556 + GR_SIG,
61557 + GR_SIG2,
61558 + GR_CRASH1,
61559 + GR_CRASH2,
61560 + GR_PSACCT,
61561 + GR_RWXMAP
61562 +};
61563 +
61564 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
61565 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
61566 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
61567 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
61568 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
61569 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
61570 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
61571 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
61572 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
61573 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
61574 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
61575 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
61576 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
61577 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
61578 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
61579 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
61580 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
61581 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
61582 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
61583 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
61584 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
61585 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
61586 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
61587 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
61588 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
61589 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
61590 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
61591 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
61592 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
61593 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
61594 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
61595 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
61596 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
61597 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
61598 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
61599 +
61600 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
61601 +
61602 +#endif
61603 +
61604 +#endif
61605 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
61606 new file mode 100644
61607 index 0000000..9d5fd4a
61608 --- /dev/null
61609 +++ b/include/linux/grmsg.h
61610 @@ -0,0 +1,108 @@
61611 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
61612 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
61613 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
61614 +#define GR_STOPMOD_MSG "denied modification of module state by "
61615 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
61616 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
61617 +#define GR_IOPERM_MSG "denied use of ioperm() by "
61618 +#define GR_IOPL_MSG "denied use of iopl() by "
61619 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
61620 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
61621 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
61622 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
61623 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
61624 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
61625 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
61626 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
61627 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
61628 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
61629 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
61630 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
61631 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
61632 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
61633 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
61634 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
61635 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
61636 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
61637 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
61638 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
61639 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
61640 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
61641 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
61642 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
61643 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
61644 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
61645 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
61646 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
61647 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
61648 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
61649 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
61650 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
61651 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
61652 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
61653 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
61654 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
61655 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
61656 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
61657 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
61658 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
61659 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
61660 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
61661 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
61662 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
61663 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
61664 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
61665 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
61666 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
61667 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
61668 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
61669 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
61670 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
61671 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
61672 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
61673 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
61674 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
61675 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
61676 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
61677 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
61678 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
61679 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
61680 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
61681 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
61682 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
61683 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
61684 +#define GR_NICE_CHROOT_MSG "denied priority change by "
61685 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
61686 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
61687 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
61688 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
61689 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
61690 +#define GR_TIME_MSG "time set by "
61691 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
61692 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
61693 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
61694 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
61695 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
61696 +#define GR_BIND_MSG "denied bind() by "
61697 +#define GR_CONNECT_MSG "denied connect() by "
61698 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
61699 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
61700 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
61701 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
61702 +#define GR_CAP_ACL_MSG "use of %s denied for "
61703 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
61704 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
61705 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
61706 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
61707 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
61708 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
61709 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
61710 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
61711 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
61712 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
61713 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
61714 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
61715 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
61716 +#define GR_VM86_MSG "denied use of vm86 by "
61717 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
61718 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
61719 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
61720 new file mode 100644
61721 index 0000000..4620f36
61722 --- /dev/null
61723 +++ b/include/linux/grsecurity.h
61724 @@ -0,0 +1,231 @@
61725 +#ifndef GR_SECURITY_H
61726 +#define GR_SECURITY_H
61727 +#include <linux/fs.h>
61728 +#include <linux/fs_struct.h>
61729 +#include <linux/binfmts.h>
61730 +#include <linux/gracl.h>
61731 +
61732 +/* notify of brain-dead configs */
61733 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61734 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
61735 +#endif
61736 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
61737 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
61738 +#endif
61739 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
61740 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
61741 +#endif
61742 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
61743 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
61744 +#endif
61745 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
61746 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
61747 +#endif
61748 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
61749 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
61750 +#endif
61751 +
61752 +#include <linux/compat.h>
61753 +
61754 +struct user_arg_ptr {
61755 +#ifdef CONFIG_COMPAT
61756 + bool is_compat;
61757 +#endif
61758 + union {
61759 + const char __user *const __user *native;
61760 +#ifdef CONFIG_COMPAT
61761 + compat_uptr_t __user *compat;
61762 +#endif
61763 + } ptr;
61764 +};
61765 +
61766 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
61767 +void gr_handle_brute_check(void);
61768 +void gr_handle_kernel_exploit(void);
61769 +int gr_process_user_ban(void);
61770 +
61771 +char gr_roletype_to_char(void);
61772 +
61773 +int gr_acl_enable_at_secure(void);
61774 +
61775 +int gr_check_user_change(int real, int effective, int fs);
61776 +int gr_check_group_change(int real, int effective, int fs);
61777 +
61778 +void gr_del_task_from_ip_table(struct task_struct *p);
61779 +
61780 +int gr_pid_is_chrooted(struct task_struct *p);
61781 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
61782 +int gr_handle_chroot_nice(void);
61783 +int gr_handle_chroot_sysctl(const int op);
61784 +int gr_handle_chroot_setpriority(struct task_struct *p,
61785 + const int niceval);
61786 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
61787 +int gr_handle_chroot_chroot(const struct dentry *dentry,
61788 + const struct vfsmount *mnt);
61789 +void gr_handle_chroot_chdir(struct path *path);
61790 +int gr_handle_chroot_chmod(const struct dentry *dentry,
61791 + const struct vfsmount *mnt, const int mode);
61792 +int gr_handle_chroot_mknod(const struct dentry *dentry,
61793 + const struct vfsmount *mnt, const int mode);
61794 +int gr_handle_chroot_mount(const struct dentry *dentry,
61795 + const struct vfsmount *mnt,
61796 + const char *dev_name);
61797 +int gr_handle_chroot_pivot(void);
61798 +int gr_handle_chroot_unix(const pid_t pid);
61799 +
61800 +int gr_handle_rawio(const struct inode *inode);
61801 +
61802 +void gr_handle_ioperm(void);
61803 +void gr_handle_iopl(void);
61804 +
61805 +int gr_tpe_allow(const struct file *file);
61806 +
61807 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
61808 +void gr_clear_chroot_entries(struct task_struct *task);
61809 +
61810 +void gr_log_forkfail(const int retval);
61811 +void gr_log_timechange(void);
61812 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
61813 +void gr_log_chdir(const struct dentry *dentry,
61814 + const struct vfsmount *mnt);
61815 +void gr_log_chroot_exec(const struct dentry *dentry,
61816 + const struct vfsmount *mnt);
61817 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
61818 +void gr_log_remount(const char *devname, const int retval);
61819 +void gr_log_unmount(const char *devname, const int retval);
61820 +void gr_log_mount(const char *from, const char *to, const int retval);
61821 +void gr_log_textrel(struct vm_area_struct *vma);
61822 +void gr_log_rwxmmap(struct file *file);
61823 +void gr_log_rwxmprotect(struct file *file);
61824 +
61825 +int gr_handle_follow_link(const struct inode *parent,
61826 + const struct inode *inode,
61827 + const struct dentry *dentry,
61828 + const struct vfsmount *mnt);
61829 +int gr_handle_fifo(const struct dentry *dentry,
61830 + const struct vfsmount *mnt,
61831 + const struct dentry *dir, const int flag,
61832 + const int acc_mode);
61833 +int gr_handle_hardlink(const struct dentry *dentry,
61834 + const struct vfsmount *mnt,
61835 + struct inode *inode,
61836 + const int mode, const char *to);
61837 +
61838 +int gr_is_capable(const int cap);
61839 +int gr_is_capable_nolog(const int cap);
61840 +void gr_learn_resource(const struct task_struct *task, const int limit,
61841 + const unsigned long wanted, const int gt);
61842 +void gr_copy_label(struct task_struct *tsk);
61843 +void gr_handle_crash(struct task_struct *task, const int sig);
61844 +int gr_handle_signal(const struct task_struct *p, const int sig);
61845 +int gr_check_crash_uid(const uid_t uid);
61846 +int gr_check_protected_task(const struct task_struct *task);
61847 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
61848 +int gr_acl_handle_mmap(const struct file *file,
61849 + const unsigned long prot);
61850 +int gr_acl_handle_mprotect(const struct file *file,
61851 + const unsigned long prot);
61852 +int gr_check_hidden_task(const struct task_struct *tsk);
61853 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
61854 + const struct vfsmount *mnt);
61855 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
61856 + const struct vfsmount *mnt);
61857 +__u32 gr_acl_handle_access(const struct dentry *dentry,
61858 + const struct vfsmount *mnt, const int fmode);
61859 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
61860 + const struct vfsmount *mnt, mode_t mode);
61861 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
61862 + const struct vfsmount *mnt, mode_t mode);
61863 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
61864 + const struct vfsmount *mnt);
61865 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
61866 + const struct vfsmount *mnt);
61867 +int gr_handle_ptrace(struct task_struct *task, const long request);
61868 +int gr_handle_proc_ptrace(struct task_struct *task);
61869 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
61870 + const struct vfsmount *mnt);
61871 +int gr_check_crash_exec(const struct file *filp);
61872 +int gr_acl_is_enabled(void);
61873 +void gr_set_kernel_label(struct task_struct *task);
61874 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
61875 + const gid_t gid);
61876 +int gr_set_proc_label(const struct dentry *dentry,
61877 + const struct vfsmount *mnt,
61878 + const int unsafe_share);
61879 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
61880 + const struct vfsmount *mnt);
61881 +__u32 gr_acl_handle_open(const struct dentry *dentry,
61882 + const struct vfsmount *mnt, int acc_mode);
61883 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
61884 + const struct dentry *p_dentry,
61885 + const struct vfsmount *p_mnt,
61886 + int open_flags, int acc_mode, const int imode);
61887 +void gr_handle_create(const struct dentry *dentry,
61888 + const struct vfsmount *mnt);
61889 +void gr_handle_proc_create(const struct dentry *dentry,
61890 + const struct inode *inode);
61891 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
61892 + const struct dentry *parent_dentry,
61893 + const struct vfsmount *parent_mnt,
61894 + const int mode);
61895 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
61896 + const struct dentry *parent_dentry,
61897 + const struct vfsmount *parent_mnt);
61898 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
61899 + const struct vfsmount *mnt);
61900 +void gr_handle_delete(const ino_t ino, const dev_t dev);
61901 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
61902 + const struct vfsmount *mnt);
61903 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
61904 + const struct dentry *parent_dentry,
61905 + const struct vfsmount *parent_mnt,
61906 + const char *from);
61907 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
61908 + const struct dentry *parent_dentry,
61909 + const struct vfsmount *parent_mnt,
61910 + const struct dentry *old_dentry,
61911 + const struct vfsmount *old_mnt, const char *to);
61912 +int gr_acl_handle_rename(struct dentry *new_dentry,
61913 + struct dentry *parent_dentry,
61914 + const struct vfsmount *parent_mnt,
61915 + struct dentry *old_dentry,
61916 + struct inode *old_parent_inode,
61917 + struct vfsmount *old_mnt, const char *newname);
61918 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61919 + struct dentry *old_dentry,
61920 + struct dentry *new_dentry,
61921 + struct vfsmount *mnt, const __u8 replace);
61922 +__u32 gr_check_link(const struct dentry *new_dentry,
61923 + const struct dentry *parent_dentry,
61924 + const struct vfsmount *parent_mnt,
61925 + const struct dentry *old_dentry,
61926 + const struct vfsmount *old_mnt);
61927 +int gr_acl_handle_filldir(const struct file *file, const char *name,
61928 + const unsigned int namelen, const ino_t ino);
61929 +
61930 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
61931 + const struct vfsmount *mnt);
61932 +void gr_acl_handle_exit(void);
61933 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
61934 +int gr_acl_handle_procpidmem(const struct task_struct *task);
61935 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
61936 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
61937 +void gr_audit_ptrace(struct task_struct *task);
61938 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
61939 +
61940 +#ifdef CONFIG_GRKERNSEC
61941 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
61942 +void gr_handle_vm86(void);
61943 +void gr_handle_mem_readwrite(u64 from, u64 to);
61944 +
61945 +extern int grsec_enable_dmesg;
61946 +extern int grsec_disable_privio;
61947 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61948 +extern int grsec_enable_chroot_findtask;
61949 +#endif
61950 +#ifdef CONFIG_GRKERNSEC_SETXID
61951 +extern int grsec_enable_setxid;
61952 +#endif
61953 +#endif
61954 +
61955 +#endif
61956 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
61957 new file mode 100644
61958 index 0000000..e7ffaaf
61959 --- /dev/null
61960 +++ b/include/linux/grsock.h
61961 @@ -0,0 +1,19 @@
61962 +#ifndef __GRSOCK_H
61963 +#define __GRSOCK_H
61964 +
61965 +extern void gr_attach_curr_ip(const struct sock *sk);
61966 +extern int gr_handle_sock_all(const int family, const int type,
61967 + const int protocol);
61968 +extern int gr_handle_sock_server(const struct sockaddr *sck);
61969 +extern int gr_handle_sock_server_other(const struct sock *sck);
61970 +extern int gr_handle_sock_client(const struct sockaddr *sck);
61971 +extern int gr_search_connect(struct socket * sock,
61972 + struct sockaddr_in * addr);
61973 +extern int gr_search_bind(struct socket * sock,
61974 + struct sockaddr_in * addr);
61975 +extern int gr_search_listen(struct socket * sock);
61976 +extern int gr_search_accept(struct socket * sock);
61977 +extern int gr_search_socket(const int domain, const int type,
61978 + const int protocol);
61979 +
61980 +#endif
61981 diff --git a/include/linux/hid.h b/include/linux/hid.h
61982 index 9cf8e7a..5ec94d0 100644
61983 --- a/include/linux/hid.h
61984 +++ b/include/linux/hid.h
61985 @@ -676,7 +676,7 @@ struct hid_ll_driver {
61986 unsigned int code, int value);
61987
61988 int (*parse)(struct hid_device *hdev);
61989 -};
61990 +} __no_const;
61991
61992 #define PM_HINT_FULLON 1<<5
61993 #define PM_HINT_NORMAL 1<<1
61994 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
61995 index 3a93f73..b19d0b3 100644
61996 --- a/include/linux/highmem.h
61997 +++ b/include/linux/highmem.h
61998 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
61999 kunmap_atomic(kaddr, KM_USER0);
62000 }
62001
62002 +static inline void sanitize_highpage(struct page *page)
62003 +{
62004 + void *kaddr;
62005 + unsigned long flags;
62006 +
62007 + local_irq_save(flags);
62008 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
62009 + clear_page(kaddr);
62010 + kunmap_atomic(kaddr, KM_CLEARPAGE);
62011 + local_irq_restore(flags);
62012 +}
62013 +
62014 static inline void zero_user_segments(struct page *page,
62015 unsigned start1, unsigned end1,
62016 unsigned start2, unsigned end2)
62017 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
62018 index a6c652e..1f5878f 100644
62019 --- a/include/linux/i2c.h
62020 +++ b/include/linux/i2c.h
62021 @@ -346,6 +346,7 @@ struct i2c_algorithm {
62022 /* To determine what the adapter supports */
62023 u32 (*functionality) (struct i2c_adapter *);
62024 };
62025 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
62026
62027 /*
62028 * i2c_adapter is the structure used to identify a physical i2c bus along
62029 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
62030 index a6deef4..c56a7f2 100644
62031 --- a/include/linux/i2o.h
62032 +++ b/include/linux/i2o.h
62033 @@ -564,7 +564,7 @@ struct i2o_controller {
62034 struct i2o_device *exec; /* Executive */
62035 #if BITS_PER_LONG == 64
62036 spinlock_t context_list_lock; /* lock for context_list */
62037 - atomic_t context_list_counter; /* needed for unique contexts */
62038 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
62039 struct list_head context_list; /* list of context id's
62040 and pointers */
62041 #endif
62042 diff --git a/include/linux/init.h b/include/linux/init.h
62043 index 9146f39..885354d 100644
62044 --- a/include/linux/init.h
62045 +++ b/include/linux/init.h
62046 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
62047
62048 /* Each module must use one module_init(). */
62049 #define module_init(initfn) \
62050 - static inline initcall_t __inittest(void) \
62051 + static inline __used initcall_t __inittest(void) \
62052 { return initfn; } \
62053 int init_module(void) __attribute__((alias(#initfn)));
62054
62055 /* This is only required if you want to be unloadable. */
62056 #define module_exit(exitfn) \
62057 - static inline exitcall_t __exittest(void) \
62058 + static inline __used exitcall_t __exittest(void) \
62059 { return exitfn; } \
62060 void cleanup_module(void) __attribute__((alias(#exitfn)));
62061
62062 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
62063 index d14e058..4162929 100644
62064 --- a/include/linux/init_task.h
62065 +++ b/include/linux/init_task.h
62066 @@ -126,6 +126,12 @@ extern struct cred init_cred;
62067 # define INIT_PERF_EVENTS(tsk)
62068 #endif
62069
62070 +#ifdef CONFIG_X86
62071 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
62072 +#else
62073 +#define INIT_TASK_THREAD_INFO
62074 +#endif
62075 +
62076 /*
62077 * INIT_TASK is used to set up the first task table, touch at
62078 * your own risk!. Base=0, limit=0x1fffff (=2MB)
62079 @@ -164,6 +170,7 @@ extern struct cred init_cred;
62080 RCU_INIT_POINTER(.cred, &init_cred), \
62081 .comm = "swapper", \
62082 .thread = INIT_THREAD, \
62083 + INIT_TASK_THREAD_INFO \
62084 .fs = &init_fs, \
62085 .files = &init_files, \
62086 .signal = &init_signals, \
62087 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
62088 index 9310c69..6ebb244 100644
62089 --- a/include/linux/intel-iommu.h
62090 +++ b/include/linux/intel-iommu.h
62091 @@ -296,7 +296,7 @@ struct iommu_flush {
62092 u8 fm, u64 type);
62093 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
62094 unsigned int size_order, u64 type);
62095 -};
62096 +} __no_const;
62097
62098 enum {
62099 SR_DMAR_FECTL_REG,
62100 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
62101 index f51a81b..adfcb44 100644
62102 --- a/include/linux/interrupt.h
62103 +++ b/include/linux/interrupt.h
62104 @@ -425,7 +425,7 @@ enum
62105 /* map softirq index to softirq name. update 'softirq_to_name' in
62106 * kernel/softirq.c when adding a new softirq.
62107 */
62108 -extern char *softirq_to_name[NR_SOFTIRQS];
62109 +extern const char * const softirq_to_name[NR_SOFTIRQS];
62110
62111 /* softirq mask and active fields moved to irq_cpustat_t in
62112 * asm/hardirq.h to get better cache usage. KAO
62113 @@ -433,12 +433,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
62114
62115 struct softirq_action
62116 {
62117 - void (*action)(struct softirq_action *);
62118 + void (*action)(void);
62119 };
62120
62121 asmlinkage void do_softirq(void);
62122 asmlinkage void __do_softirq(void);
62123 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
62124 +extern void open_softirq(int nr, void (*action)(void));
62125 extern void softirq_init(void);
62126 static inline void __raise_softirq_irqoff(unsigned int nr)
62127 {
62128 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
62129 index 0df513b..fe901a2 100644
62130 --- a/include/linux/kallsyms.h
62131 +++ b/include/linux/kallsyms.h
62132 @@ -15,7 +15,8 @@
62133
62134 struct module;
62135
62136 -#ifdef CONFIG_KALLSYMS
62137 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
62138 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62139 /* Lookup the address for a symbol. Returns 0 if not found. */
62140 unsigned long kallsyms_lookup_name(const char *name);
62141
62142 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
62143 /* Stupid that this does nothing, but I didn't create this mess. */
62144 #define __print_symbol(fmt, addr)
62145 #endif /*CONFIG_KALLSYMS*/
62146 +#else /* when included by kallsyms.c, vsnprintf.c, or
62147 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
62148 +extern void __print_symbol(const char *fmt, unsigned long address);
62149 +extern int sprint_backtrace(char *buffer, unsigned long address);
62150 +extern int sprint_symbol(char *buffer, unsigned long address);
62151 +const char *kallsyms_lookup(unsigned long addr,
62152 + unsigned long *symbolsize,
62153 + unsigned long *offset,
62154 + char **modname, char *namebuf);
62155 +#endif
62156
62157 /* This macro allows us to keep printk typechecking */
62158 static void __check_printsym_format(const char *fmt, ...)
62159 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
62160 index fa39183..40160be 100644
62161 --- a/include/linux/kgdb.h
62162 +++ b/include/linux/kgdb.h
62163 @@ -53,7 +53,7 @@ extern int kgdb_connected;
62164 extern int kgdb_io_module_registered;
62165
62166 extern atomic_t kgdb_setting_breakpoint;
62167 -extern atomic_t kgdb_cpu_doing_single_step;
62168 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
62169
62170 extern struct task_struct *kgdb_usethread;
62171 extern struct task_struct *kgdb_contthread;
62172 @@ -251,7 +251,7 @@ struct kgdb_arch {
62173 void (*disable_hw_break)(struct pt_regs *regs);
62174 void (*remove_all_hw_break)(void);
62175 void (*correct_hw_break)(void);
62176 -};
62177 +} __do_const;
62178
62179 /**
62180 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
62181 @@ -276,7 +276,7 @@ struct kgdb_io {
62182 void (*pre_exception) (void);
62183 void (*post_exception) (void);
62184 int is_console;
62185 -};
62186 +} __do_const;
62187
62188 extern struct kgdb_arch arch_kgdb_ops;
62189
62190 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
62191 index 0da38cf..d23f05f 100644
62192 --- a/include/linux/kmod.h
62193 +++ b/include/linux/kmod.h
62194 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
62195 * usually useless though. */
62196 extern int __request_module(bool wait, const char *name, ...) \
62197 __attribute__((format(printf, 2, 3)));
62198 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
62199 + __attribute__((format(printf, 3, 4)));
62200 #define request_module(mod...) __request_module(true, mod)
62201 #define request_module_nowait(mod...) __request_module(false, mod)
62202 #define try_then_request_module(x, mod...) \
62203 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
62204 index eabb21a..3f030f4 100644
62205 --- a/include/linux/kvm_host.h
62206 +++ b/include/linux/kvm_host.h
62207 @@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
62208 void vcpu_load(struct kvm_vcpu *vcpu);
62209 void vcpu_put(struct kvm_vcpu *vcpu);
62210
62211 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
62212 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
62213 struct module *module);
62214 void kvm_exit(void);
62215
62216 @@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
62217 struct kvm_guest_debug *dbg);
62218 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
62219
62220 -int kvm_arch_init(void *opaque);
62221 +int kvm_arch_init(const void *opaque);
62222 void kvm_arch_exit(void);
62223
62224 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
62225 diff --git a/include/linux/libata.h b/include/linux/libata.h
62226 index efd6f98..5f5fd37 100644
62227 --- a/include/linux/libata.h
62228 +++ b/include/linux/libata.h
62229 @@ -909,7 +909,7 @@ struct ata_port_operations {
62230 * fields must be pointers.
62231 */
62232 const struct ata_port_operations *inherits;
62233 -};
62234 +} __do_const;
62235
62236 struct ata_port_info {
62237 unsigned long flags;
62238 diff --git a/include/linux/mca.h b/include/linux/mca.h
62239 index 3797270..7765ede 100644
62240 --- a/include/linux/mca.h
62241 +++ b/include/linux/mca.h
62242 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
62243 int region);
62244 void * (*mca_transform_memory)(struct mca_device *,
62245 void *memory);
62246 -};
62247 +} __no_const;
62248
62249 struct mca_bus {
62250 u64 default_dma_mask;
62251 diff --git a/include/linux/memory.h b/include/linux/memory.h
62252 index 935699b..11042cc 100644
62253 --- a/include/linux/memory.h
62254 +++ b/include/linux/memory.h
62255 @@ -144,7 +144,7 @@ struct memory_accessor {
62256 size_t count);
62257 ssize_t (*write)(struct memory_accessor *, const char *buf,
62258 off_t offset, size_t count);
62259 -};
62260 +} __no_const;
62261
62262 /*
62263 * Kernel text modification mutex, used for code patching. Users of this lock
62264 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
62265 index 896b5e4..1159ad0 100644
62266 --- a/include/linux/mfd/abx500.h
62267 +++ b/include/linux/mfd/abx500.h
62268 @@ -234,6 +234,7 @@ struct abx500_ops {
62269 int (*event_registers_startup_state_get) (struct device *, u8 *);
62270 int (*startup_irq_enabled) (struct device *, unsigned int);
62271 };
62272 +typedef struct abx500_ops __no_const abx500_ops_no_const;
62273
62274 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
62275 void abx500_remove_ops(struct device *dev);
62276 diff --git a/include/linux/mm.h b/include/linux/mm.h
62277 index fedc5f0..7cedb6d 100644
62278 --- a/include/linux/mm.h
62279 +++ b/include/linux/mm.h
62280 @@ -114,7 +114,14 @@ extern unsigned int kobjsize(const void *objp);
62281
62282 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
62283 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
62284 +
62285 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62286 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
62287 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
62288 +#else
62289 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
62290 +#endif
62291 +
62292 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
62293 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
62294
62295 @@ -1011,34 +1018,6 @@ int set_page_dirty(struct page *page);
62296 int set_page_dirty_lock(struct page *page);
62297 int clear_page_dirty_for_io(struct page *page);
62298
62299 -/* Is the vma a continuation of the stack vma above it? */
62300 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
62301 -{
62302 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
62303 -}
62304 -
62305 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
62306 - unsigned long addr)
62307 -{
62308 - return (vma->vm_flags & VM_GROWSDOWN) &&
62309 - (vma->vm_start == addr) &&
62310 - !vma_growsdown(vma->vm_prev, addr);
62311 -}
62312 -
62313 -/* Is the vma a continuation of the stack vma below it? */
62314 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
62315 -{
62316 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
62317 -}
62318 -
62319 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
62320 - unsigned long addr)
62321 -{
62322 - return (vma->vm_flags & VM_GROWSUP) &&
62323 - (vma->vm_end == addr) &&
62324 - !vma_growsup(vma->vm_next, addr);
62325 -}
62326 -
62327 extern unsigned long move_page_tables(struct vm_area_struct *vma,
62328 unsigned long old_addr, struct vm_area_struct *new_vma,
62329 unsigned long new_addr, unsigned long len);
62330 @@ -1133,6 +1112,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
62331 }
62332 #endif
62333
62334 +#ifdef CONFIG_MMU
62335 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
62336 +#else
62337 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
62338 +{
62339 + return __pgprot(0);
62340 +}
62341 +#endif
62342 +
62343 int vma_wants_writenotify(struct vm_area_struct *vma);
62344
62345 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
62346 @@ -1417,6 +1405,7 @@ out:
62347 }
62348
62349 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
62350 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
62351
62352 extern unsigned long do_brk(unsigned long, unsigned long);
62353
62354 @@ -1474,6 +1463,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
62355 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
62356 struct vm_area_struct **pprev);
62357
62358 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
62359 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
62360 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
62361 +
62362 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
62363 NULL if none. Assume start_addr < end_addr. */
62364 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
62365 @@ -1490,15 +1483,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
62366 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
62367 }
62368
62369 -#ifdef CONFIG_MMU
62370 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
62371 -#else
62372 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
62373 -{
62374 - return __pgprot(0);
62375 -}
62376 -#endif
62377 -
62378 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
62379 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
62380 unsigned long pfn, unsigned long size, pgprot_t);
62381 @@ -1612,7 +1596,7 @@ extern int unpoison_memory(unsigned long pfn);
62382 extern int sysctl_memory_failure_early_kill;
62383 extern int sysctl_memory_failure_recovery;
62384 extern void shake_page(struct page *p, int access);
62385 -extern atomic_long_t mce_bad_pages;
62386 +extern atomic_long_unchecked_t mce_bad_pages;
62387 extern int soft_offline_page(struct page *page, int flags);
62388
62389 extern void dump_page(struct page *page);
62390 @@ -1626,5 +1610,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
62391 unsigned int pages_per_huge_page);
62392 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
62393
62394 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
62395 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
62396 +#else
62397 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
62398 +#endif
62399 +
62400 #endif /* __KERNEL__ */
62401 #endif /* _LINUX_MM_H */
62402 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
62403 index 10a2f62..d655142 100644
62404 --- a/include/linux/mm_types.h
62405 +++ b/include/linux/mm_types.h
62406 @@ -230,6 +230,8 @@ struct vm_area_struct {
62407 #ifdef CONFIG_NUMA
62408 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
62409 #endif
62410 +
62411 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
62412 };
62413
62414 struct core_thread {
62415 @@ -362,6 +364,24 @@ struct mm_struct {
62416 #ifdef CONFIG_CPUMASK_OFFSTACK
62417 struct cpumask cpumask_allocation;
62418 #endif
62419 +
62420 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62421 + unsigned long pax_flags;
62422 +#endif
62423 +
62424 +#ifdef CONFIG_PAX_DLRESOLVE
62425 + unsigned long call_dl_resolve;
62426 +#endif
62427 +
62428 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
62429 + unsigned long call_syscall;
62430 +#endif
62431 +
62432 +#ifdef CONFIG_PAX_ASLR
62433 + unsigned long delta_mmap; /* randomized offset */
62434 + unsigned long delta_stack; /* randomized offset */
62435 +#endif
62436 +
62437 };
62438
62439 static inline void mm_init_cpumask(struct mm_struct *mm)
62440 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
62441 index 1d1b1e1..2a13c78 100644
62442 --- a/include/linux/mmu_notifier.h
62443 +++ b/include/linux/mmu_notifier.h
62444 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
62445 */
62446 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
62447 ({ \
62448 - pte_t __pte; \
62449 + pte_t ___pte; \
62450 struct vm_area_struct *___vma = __vma; \
62451 unsigned long ___address = __address; \
62452 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
62453 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
62454 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
62455 - __pte; \
62456 + ___pte; \
62457 })
62458
62459 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
62460 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
62461 index be1ac8d..26868ce 100644
62462 --- a/include/linux/mmzone.h
62463 +++ b/include/linux/mmzone.h
62464 @@ -356,7 +356,7 @@ struct zone {
62465 unsigned long flags; /* zone flags, see below */
62466
62467 /* Zone statistics */
62468 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62469 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62470
62471 /*
62472 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
62473 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
62474 index ae28e93..1ac2233 100644
62475 --- a/include/linux/mod_devicetable.h
62476 +++ b/include/linux/mod_devicetable.h
62477 @@ -12,7 +12,7 @@
62478 typedef unsigned long kernel_ulong_t;
62479 #endif
62480
62481 -#define PCI_ANY_ID (~0)
62482 +#define PCI_ANY_ID ((__u16)~0)
62483
62484 struct pci_device_id {
62485 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
62486 @@ -131,7 +131,7 @@ struct usb_device_id {
62487 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
62488 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
62489
62490 -#define HID_ANY_ID (~0)
62491 +#define HID_ANY_ID (~0U)
62492
62493 struct hid_device_id {
62494 __u16 bus;
62495 diff --git a/include/linux/module.h b/include/linux/module.h
62496 index 1c30087..fc2a442 100644
62497 --- a/include/linux/module.h
62498 +++ b/include/linux/module.h
62499 @@ -16,6 +16,7 @@
62500 #include <linux/kobject.h>
62501 #include <linux/moduleparam.h>
62502 #include <linux/tracepoint.h>
62503 +#include <linux/fs.h>
62504
62505 #include <linux/percpu.h>
62506 #include <asm/module.h>
62507 @@ -327,19 +328,16 @@ struct module
62508 int (*init)(void);
62509
62510 /* If this is non-NULL, vfree after init() returns */
62511 - void *module_init;
62512 + void *module_init_rx, *module_init_rw;
62513
62514 /* Here is the actual code + data, vfree'd on unload. */
62515 - void *module_core;
62516 + void *module_core_rx, *module_core_rw;
62517
62518 /* Here are the sizes of the init and core sections */
62519 - unsigned int init_size, core_size;
62520 + unsigned int init_size_rw, core_size_rw;
62521
62522 /* The size of the executable code in each section. */
62523 - unsigned int init_text_size, core_text_size;
62524 -
62525 - /* Size of RO sections of the module (text+rodata) */
62526 - unsigned int init_ro_size, core_ro_size;
62527 + unsigned int init_size_rx, core_size_rx;
62528
62529 /* Arch-specific module values */
62530 struct mod_arch_specific arch;
62531 @@ -395,6 +393,10 @@ struct module
62532 #ifdef CONFIG_EVENT_TRACING
62533 struct ftrace_event_call **trace_events;
62534 unsigned int num_trace_events;
62535 + struct file_operations trace_id;
62536 + struct file_operations trace_enable;
62537 + struct file_operations trace_format;
62538 + struct file_operations trace_filter;
62539 #endif
62540 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
62541 unsigned int num_ftrace_callsites;
62542 @@ -445,16 +447,46 @@ bool is_module_address(unsigned long addr);
62543 bool is_module_percpu_address(unsigned long addr);
62544 bool is_module_text_address(unsigned long addr);
62545
62546 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
62547 +{
62548 +
62549 +#ifdef CONFIG_PAX_KERNEXEC
62550 + if (ktla_ktva(addr) >= (unsigned long)start &&
62551 + ktla_ktva(addr) < (unsigned long)start + size)
62552 + return 1;
62553 +#endif
62554 +
62555 + return ((void *)addr >= start && (void *)addr < start + size);
62556 +}
62557 +
62558 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
62559 +{
62560 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
62561 +}
62562 +
62563 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
62564 +{
62565 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
62566 +}
62567 +
62568 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
62569 +{
62570 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
62571 +}
62572 +
62573 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
62574 +{
62575 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
62576 +}
62577 +
62578 static inline int within_module_core(unsigned long addr, struct module *mod)
62579 {
62580 - return (unsigned long)mod->module_core <= addr &&
62581 - addr < (unsigned long)mod->module_core + mod->core_size;
62582 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
62583 }
62584
62585 static inline int within_module_init(unsigned long addr, struct module *mod)
62586 {
62587 - return (unsigned long)mod->module_init <= addr &&
62588 - addr < (unsigned long)mod->module_init + mod->init_size;
62589 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
62590 }
62591
62592 /* Search for module by name: must hold module_mutex. */
62593 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
62594 index b2be02e..6a9fdb1 100644
62595 --- a/include/linux/moduleloader.h
62596 +++ b/include/linux/moduleloader.h
62597 @@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
62598 sections. Returns NULL on failure. */
62599 void *module_alloc(unsigned long size);
62600
62601 +#ifdef CONFIG_PAX_KERNEXEC
62602 +void *module_alloc_exec(unsigned long size);
62603 +#else
62604 +#define module_alloc_exec(x) module_alloc(x)
62605 +#endif
62606 +
62607 /* Free memory returned from module_alloc. */
62608 void module_free(struct module *mod, void *module_region);
62609
62610 +#ifdef CONFIG_PAX_KERNEXEC
62611 +void module_free_exec(struct module *mod, void *module_region);
62612 +#else
62613 +#define module_free_exec(x, y) module_free((x), (y))
62614 +#endif
62615 +
62616 /* Apply the given relocation to the (simplified) ELF. Return -error
62617 or 0. */
62618 int apply_relocate(Elf_Shdr *sechdrs,
62619 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
62620 index ddaae98..3c70938 100644
62621 --- a/include/linux/moduleparam.h
62622 +++ b/include/linux/moduleparam.h
62623 @@ -255,7 +255,7 @@ static inline void __kernel_param_unlock(void)
62624 * @len is usually just sizeof(string).
62625 */
62626 #define module_param_string(name, string, len, perm) \
62627 - static const struct kparam_string __param_string_##name \
62628 + static const struct kparam_string __param_string_##name __used \
62629 = { len, string }; \
62630 __module_param_call(MODULE_PARAM_PREFIX, name, \
62631 &param_ops_string, \
62632 @@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
62633 * module_param_named() for why this might be necessary.
62634 */
62635 #define module_param_array_named(name, array, type, nump, perm) \
62636 - static const struct kparam_array __param_arr_##name \
62637 + static const struct kparam_array __param_arr_##name __used \
62638 = { .max = ARRAY_SIZE(array), .num = nump, \
62639 .ops = &param_ops_##type, \
62640 .elemsize = sizeof(array[0]), .elem = array }; \
62641 diff --git a/include/linux/namei.h b/include/linux/namei.h
62642 index ffc0213..2c1f2cb 100644
62643 --- a/include/linux/namei.h
62644 +++ b/include/linux/namei.h
62645 @@ -24,7 +24,7 @@ struct nameidata {
62646 unsigned seq;
62647 int last_type;
62648 unsigned depth;
62649 - char *saved_names[MAX_NESTED_LINKS + 1];
62650 + const char *saved_names[MAX_NESTED_LINKS + 1];
62651
62652 /* Intent data */
62653 union {
62654 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
62655 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
62656 extern void unlock_rename(struct dentry *, struct dentry *);
62657
62658 -static inline void nd_set_link(struct nameidata *nd, char *path)
62659 +static inline void nd_set_link(struct nameidata *nd, const char *path)
62660 {
62661 nd->saved_names[nd->depth] = path;
62662 }
62663
62664 -static inline char *nd_get_link(struct nameidata *nd)
62665 +static inline const char *nd_get_link(const struct nameidata *nd)
62666 {
62667 return nd->saved_names[nd->depth];
62668 }
62669 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
62670 index ddee79b..67af106 100644
62671 --- a/include/linux/netdevice.h
62672 +++ b/include/linux/netdevice.h
62673 @@ -944,6 +944,7 @@ struct net_device_ops {
62674 int (*ndo_set_features)(struct net_device *dev,
62675 u32 features);
62676 };
62677 +typedef struct net_device_ops __no_const net_device_ops_no_const;
62678
62679 /*
62680 * The DEVICE structure.
62681 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
62682 new file mode 100644
62683 index 0000000..33f4af8
62684 --- /dev/null
62685 +++ b/include/linux/netfilter/xt_gradm.h
62686 @@ -0,0 +1,9 @@
62687 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
62688 +#define _LINUX_NETFILTER_XT_GRADM_H 1
62689 +
62690 +struct xt_gradm_mtinfo {
62691 + __u16 flags;
62692 + __u16 invflags;
62693 +};
62694 +
62695 +#endif
62696 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
62697 index c65a18a..0c05f3a 100644
62698 --- a/include/linux/of_pdt.h
62699 +++ b/include/linux/of_pdt.h
62700 @@ -32,7 +32,7 @@ struct of_pdt_ops {
62701
62702 /* return 0 on success; fill in 'len' with number of bytes in path */
62703 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
62704 -};
62705 +} __no_const;
62706
62707 extern void *prom_early_alloc(unsigned long size);
62708
62709 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
62710 index 49c8727..34d2ae1 100644
62711 --- a/include/linux/oprofile.h
62712 +++ b/include/linux/oprofile.h
62713 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
62714 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
62715 char const * name, ulong * val);
62716
62717 -/** Create a file for read-only access to an atomic_t. */
62718 +/** Create a file for read-only access to an atomic_unchecked_t. */
62719 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
62720 - char const * name, atomic_t * val);
62721 + char const * name, atomic_unchecked_t * val);
62722
62723 /** create a directory */
62724 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
62725 diff --git a/include/linux/padata.h b/include/linux/padata.h
62726 index 4633b2f..988bc08 100644
62727 --- a/include/linux/padata.h
62728 +++ b/include/linux/padata.h
62729 @@ -129,7 +129,7 @@ struct parallel_data {
62730 struct padata_instance *pinst;
62731 struct padata_parallel_queue __percpu *pqueue;
62732 struct padata_serial_queue __percpu *squeue;
62733 - atomic_t seq_nr;
62734 + atomic_unchecked_t seq_nr;
62735 atomic_t reorder_objects;
62736 atomic_t refcnt;
62737 unsigned int max_seq_nr;
62738 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
62739 index c816075..cd28c4d 100644
62740 --- a/include/linux/perf_event.h
62741 +++ b/include/linux/perf_event.h
62742 @@ -745,8 +745,8 @@ struct perf_event {
62743
62744 enum perf_event_active_state state;
62745 unsigned int attach_state;
62746 - local64_t count;
62747 - atomic64_t child_count;
62748 + local64_t count; /* PaX: fix it one day */
62749 + atomic64_unchecked_t child_count;
62750
62751 /*
62752 * These are the total time in nanoseconds that the event
62753 @@ -797,8 +797,8 @@ struct perf_event {
62754 * These accumulate total time (in nanoseconds) that children
62755 * events have been enabled and running, respectively.
62756 */
62757 - atomic64_t child_total_time_enabled;
62758 - atomic64_t child_total_time_running;
62759 + atomic64_unchecked_t child_total_time_enabled;
62760 + atomic64_unchecked_t child_total_time_running;
62761
62762 /*
62763 * Protect attach/detach and child_list:
62764 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
62765 index 77257c9..51d473a 100644
62766 --- a/include/linux/pipe_fs_i.h
62767 +++ b/include/linux/pipe_fs_i.h
62768 @@ -46,9 +46,9 @@ struct pipe_buffer {
62769 struct pipe_inode_info {
62770 wait_queue_head_t wait;
62771 unsigned int nrbufs, curbuf, buffers;
62772 - unsigned int readers;
62773 - unsigned int writers;
62774 - unsigned int waiting_writers;
62775 + atomic_t readers;
62776 + atomic_t writers;
62777 + atomic_t waiting_writers;
62778 unsigned int r_counter;
62779 unsigned int w_counter;
62780 struct page *tmp_page;
62781 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
62782 index daac05d..c6802ce 100644
62783 --- a/include/linux/pm_runtime.h
62784 +++ b/include/linux/pm_runtime.h
62785 @@ -99,7 +99,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
62786
62787 static inline void pm_runtime_mark_last_busy(struct device *dev)
62788 {
62789 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
62790 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
62791 }
62792
62793 #else /* !CONFIG_PM_RUNTIME */
62794 diff --git a/include/linux/poison.h b/include/linux/poison.h
62795 index 79159de..f1233a9 100644
62796 --- a/include/linux/poison.h
62797 +++ b/include/linux/poison.h
62798 @@ -19,8 +19,8 @@
62799 * under normal circumstances, used to verify that nobody uses
62800 * non-initialized list entries.
62801 */
62802 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
62803 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
62804 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
62805 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
62806
62807 /********** include/linux/timer.h **********/
62808 /*
62809 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
62810 index 58969b2..ead129b 100644
62811 --- a/include/linux/preempt.h
62812 +++ b/include/linux/preempt.h
62813 @@ -123,7 +123,7 @@ struct preempt_ops {
62814 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
62815 void (*sched_out)(struct preempt_notifier *notifier,
62816 struct task_struct *next);
62817 -};
62818 +} __no_const;
62819
62820 /**
62821 * preempt_notifier - key for installing preemption notifiers
62822 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
62823 index 643b96c..ef55a9c 100644
62824 --- a/include/linux/proc_fs.h
62825 +++ b/include/linux/proc_fs.h
62826 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
62827 return proc_create_data(name, mode, parent, proc_fops, NULL);
62828 }
62829
62830 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
62831 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
62832 +{
62833 +#ifdef CONFIG_GRKERNSEC_PROC_USER
62834 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
62835 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62836 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
62837 +#else
62838 + return proc_create_data(name, mode, parent, proc_fops, NULL);
62839 +#endif
62840 +}
62841 +
62842 +
62843 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
62844 mode_t mode, struct proc_dir_entry *base,
62845 read_proc_t *read_proc, void * data)
62846 @@ -258,7 +271,7 @@ union proc_op {
62847 int (*proc_show)(struct seq_file *m,
62848 struct pid_namespace *ns, struct pid *pid,
62849 struct task_struct *task);
62850 -};
62851 +} __no_const;
62852
62853 struct ctl_table_header;
62854 struct ctl_table;
62855 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
62856 index 800f113..af90cc8 100644
62857 --- a/include/linux/ptrace.h
62858 +++ b/include/linux/ptrace.h
62859 @@ -129,10 +129,10 @@ extern void __ptrace_unlink(struct task_struct *child);
62860 extern void exit_ptrace(struct task_struct *tracer);
62861 #define PTRACE_MODE_READ 1
62862 #define PTRACE_MODE_ATTACH 2
62863 -/* Returns 0 on success, -errno on denial. */
62864 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
62865 /* Returns true on success, false on denial. */
62866 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
62867 +/* Returns true on success, false on denial. */
62868 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
62869
62870 static inline int ptrace_reparented(struct task_struct *child)
62871 {
62872 diff --git a/include/linux/random.h b/include/linux/random.h
62873 index d13059f..2eaafaa 100644
62874 --- a/include/linux/random.h
62875 +++ b/include/linux/random.h
62876 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
62877
62878 u32 prandom32(struct rnd_state *);
62879
62880 +static inline unsigned long pax_get_random_long(void)
62881 +{
62882 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
62883 +}
62884 +
62885 /*
62886 * Handle minimum values for seeds
62887 */
62888 static inline u32 __seed(u32 x, u32 m)
62889 {
62890 - return (x < m) ? x + m : x;
62891 + return (x <= m) ? x + m + 1 : x;
62892 }
62893
62894 /**
62895 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
62896 index e0879a7..a12f962 100644
62897 --- a/include/linux/reboot.h
62898 +++ b/include/linux/reboot.h
62899 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
62900 * Architecture-specific implementations of sys_reboot commands.
62901 */
62902
62903 -extern void machine_restart(char *cmd);
62904 -extern void machine_halt(void);
62905 -extern void machine_power_off(void);
62906 +extern void machine_restart(char *cmd) __noreturn;
62907 +extern void machine_halt(void) __noreturn;
62908 +extern void machine_power_off(void) __noreturn;
62909
62910 extern void machine_shutdown(void);
62911 struct pt_regs;
62912 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
62913 */
62914
62915 extern void kernel_restart_prepare(char *cmd);
62916 -extern void kernel_restart(char *cmd);
62917 -extern void kernel_halt(void);
62918 -extern void kernel_power_off(void);
62919 +extern void kernel_restart(char *cmd) __noreturn;
62920 +extern void kernel_halt(void) __noreturn;
62921 +extern void kernel_power_off(void) __noreturn;
62922
62923 extern int C_A_D; /* for sysctl */
62924 void ctrl_alt_del(void);
62925 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
62926 * Emergency restart, callable from an interrupt handler.
62927 */
62928
62929 -extern void emergency_restart(void);
62930 +extern void emergency_restart(void) __noreturn;
62931 #include <asm/emergency-restart.h>
62932
62933 #endif
62934 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
62935 index 96d465f..b084e05 100644
62936 --- a/include/linux/reiserfs_fs.h
62937 +++ b/include/linux/reiserfs_fs.h
62938 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
62939 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
62940
62941 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
62942 -#define get_generation(s) atomic_read (&fs_generation(s))
62943 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
62944 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
62945 #define __fs_changed(gen,s) (gen != get_generation (s))
62946 #define fs_changed(gen,s) \
62947 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
62948 index 52c83b6..18ed7eb 100644
62949 --- a/include/linux/reiserfs_fs_sb.h
62950 +++ b/include/linux/reiserfs_fs_sb.h
62951 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
62952 /* Comment? -Hans */
62953 wait_queue_head_t s_wait;
62954 /* To be obsoleted soon by per buffer seals.. -Hans */
62955 - atomic_t s_generation_counter; // increased by one every time the
62956 + atomic_unchecked_t s_generation_counter; // increased by one every time the
62957 // tree gets re-balanced
62958 unsigned long s_properties; /* File system properties. Currently holds
62959 on-disk FS format */
62960 diff --git a/include/linux/relay.h b/include/linux/relay.h
62961 index 14a86bc..17d0700 100644
62962 --- a/include/linux/relay.h
62963 +++ b/include/linux/relay.h
62964 @@ -159,7 +159,7 @@ struct rchan_callbacks
62965 * The callback should return 0 if successful, negative if not.
62966 */
62967 int (*remove_buf_file)(struct dentry *dentry);
62968 -};
62969 +} __no_const;
62970
62971 /*
62972 * CONFIG_RELAY kernel API, kernel/relay.c
62973 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
62974 index c6c6084..5bf1212 100644
62975 --- a/include/linux/rfkill.h
62976 +++ b/include/linux/rfkill.h
62977 @@ -147,6 +147,7 @@ struct rfkill_ops {
62978 void (*query)(struct rfkill *rfkill, void *data);
62979 int (*set_block)(void *data, bool blocked);
62980 };
62981 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
62982
62983 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
62984 /**
62985 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
62986 index 2148b12..519b820 100644
62987 --- a/include/linux/rmap.h
62988 +++ b/include/linux/rmap.h
62989 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
62990 void anon_vma_init(void); /* create anon_vma_cachep */
62991 int anon_vma_prepare(struct vm_area_struct *);
62992 void unlink_anon_vmas(struct vm_area_struct *);
62993 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
62994 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
62995 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
62996 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
62997 void __anon_vma_link(struct vm_area_struct *);
62998
62999 static inline void anon_vma_merge(struct vm_area_struct *vma,
63000 diff --git a/include/linux/sched.h b/include/linux/sched.h
63001 index 41d0237..5a64056 100644
63002 --- a/include/linux/sched.h
63003 +++ b/include/linux/sched.h
63004 @@ -100,6 +100,7 @@ struct bio_list;
63005 struct fs_struct;
63006 struct perf_event_context;
63007 struct blk_plug;
63008 +struct linux_binprm;
63009
63010 /*
63011 * List of flags we want to share for kernel threads,
63012 @@ -380,10 +381,13 @@ struct user_namespace;
63013 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
63014
63015 extern int sysctl_max_map_count;
63016 +extern unsigned long sysctl_heap_stack_gap;
63017
63018 #include <linux/aio.h>
63019
63020 #ifdef CONFIG_MMU
63021 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
63022 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
63023 extern void arch_pick_mmap_layout(struct mm_struct *mm);
63024 extern unsigned long
63025 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
63026 @@ -629,6 +633,17 @@ struct signal_struct {
63027 #ifdef CONFIG_TASKSTATS
63028 struct taskstats *stats;
63029 #endif
63030 +
63031 +#ifdef CONFIG_GRKERNSEC
63032 + u32 curr_ip;
63033 + u32 saved_ip;
63034 + u32 gr_saddr;
63035 + u32 gr_daddr;
63036 + u16 gr_sport;
63037 + u16 gr_dport;
63038 + u8 used_accept:1;
63039 +#endif
63040 +
63041 #ifdef CONFIG_AUDIT
63042 unsigned audit_tty;
63043 struct tty_audit_buf *tty_audit_buf;
63044 @@ -710,6 +725,11 @@ struct user_struct {
63045 struct key *session_keyring; /* UID's default session keyring */
63046 #endif
63047
63048 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63049 + unsigned int banned;
63050 + unsigned long ban_expires;
63051 +#endif
63052 +
63053 /* Hash table maintenance information */
63054 struct hlist_node uidhash_node;
63055 uid_t uid;
63056 @@ -1340,8 +1360,8 @@ struct task_struct {
63057 struct list_head thread_group;
63058
63059 struct completion *vfork_done; /* for vfork() */
63060 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
63061 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63062 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
63063 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63064
63065 cputime_t utime, stime, utimescaled, stimescaled;
63066 cputime_t gtime;
63067 @@ -1357,13 +1377,6 @@ struct task_struct {
63068 struct task_cputime cputime_expires;
63069 struct list_head cpu_timers[3];
63070
63071 -/* process credentials */
63072 - const struct cred __rcu *real_cred; /* objective and real subjective task
63073 - * credentials (COW) */
63074 - const struct cred __rcu *cred; /* effective (overridable) subjective task
63075 - * credentials (COW) */
63076 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
63077 -
63078 char comm[TASK_COMM_LEN]; /* executable name excluding path
63079 - access with [gs]et_task_comm (which lock
63080 it with task_lock())
63081 @@ -1380,8 +1393,16 @@ struct task_struct {
63082 #endif
63083 /* CPU-specific state of this task */
63084 struct thread_struct thread;
63085 +/* thread_info moved to task_struct */
63086 +#ifdef CONFIG_X86
63087 + struct thread_info tinfo;
63088 +#endif
63089 /* filesystem information */
63090 struct fs_struct *fs;
63091 +
63092 + const struct cred __rcu *cred; /* effective (overridable) subjective task
63093 + * credentials (COW) */
63094 +
63095 /* open file information */
63096 struct files_struct *files;
63097 /* namespaces */
63098 @@ -1428,6 +1449,11 @@ struct task_struct {
63099 struct rt_mutex_waiter *pi_blocked_on;
63100 #endif
63101
63102 +/* process credentials */
63103 + const struct cred __rcu *real_cred; /* objective and real subjective task
63104 + * credentials (COW) */
63105 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
63106 +
63107 #ifdef CONFIG_DEBUG_MUTEXES
63108 /* mutex deadlock detection */
63109 struct mutex_waiter *blocked_on;
63110 @@ -1537,6 +1563,21 @@ struct task_struct {
63111 unsigned long default_timer_slack_ns;
63112
63113 struct list_head *scm_work_list;
63114 +
63115 +#ifdef CONFIG_GRKERNSEC
63116 + /* grsecurity */
63117 + struct dentry *gr_chroot_dentry;
63118 + struct acl_subject_label *acl;
63119 + struct acl_role_label *role;
63120 + struct file *exec_file;
63121 + u16 acl_role_id;
63122 + /* is this the task that authenticated to the special role */
63123 + u8 acl_sp_role;
63124 + u8 is_writable;
63125 + u8 brute;
63126 + u8 gr_is_chrooted;
63127 +#endif
63128 +
63129 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
63130 /* Index of current stored address in ret_stack */
63131 int curr_ret_stack;
63132 @@ -1571,6 +1612,57 @@ struct task_struct {
63133 #endif
63134 };
63135
63136 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
63137 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
63138 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
63139 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
63140 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
63141 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
63142 +
63143 +#ifdef CONFIG_PAX_SOFTMODE
63144 +extern int pax_softmode;
63145 +#endif
63146 +
63147 +extern int pax_check_flags(unsigned long *);
63148 +
63149 +/* if tsk != current then task_lock must be held on it */
63150 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
63151 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
63152 +{
63153 + if (likely(tsk->mm))
63154 + return tsk->mm->pax_flags;
63155 + else
63156 + return 0UL;
63157 +}
63158 +
63159 +/* if tsk != current then task_lock must be held on it */
63160 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
63161 +{
63162 + if (likely(tsk->mm)) {
63163 + tsk->mm->pax_flags = flags;
63164 + return 0;
63165 + }
63166 + return -EINVAL;
63167 +}
63168 +#endif
63169 +
63170 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
63171 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
63172 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
63173 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
63174 +#endif
63175 +
63176 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
63177 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
63178 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
63179 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
63180 +
63181 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
63182 +extern void pax_track_stack(void);
63183 +#else
63184 +static inline void pax_track_stack(void) {}
63185 +#endif
63186 +
63187 /* Future-safe accessor for struct task_struct's cpus_allowed. */
63188 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
63189
63190 @@ -2074,7 +2166,9 @@ void yield(void);
63191 extern struct exec_domain default_exec_domain;
63192
63193 union thread_union {
63194 +#ifndef CONFIG_X86
63195 struct thread_info thread_info;
63196 +#endif
63197 unsigned long stack[THREAD_SIZE/sizeof(long)];
63198 };
63199
63200 @@ -2107,6 +2201,7 @@ extern struct pid_namespace init_pid_ns;
63201 */
63202
63203 extern struct task_struct *find_task_by_vpid(pid_t nr);
63204 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
63205 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
63206 struct pid_namespace *ns);
63207
63208 @@ -2243,7 +2338,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
63209 extern void exit_itimers(struct signal_struct *);
63210 extern void flush_itimer_signals(void);
63211
63212 -extern NORET_TYPE void do_group_exit(int);
63213 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
63214
63215 extern void daemonize(const char *, ...);
63216 extern int allow_signal(int);
63217 @@ -2408,13 +2503,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
63218
63219 #endif
63220
63221 -static inline int object_is_on_stack(void *obj)
63222 +static inline int object_starts_on_stack(void *obj)
63223 {
63224 - void *stack = task_stack_page(current);
63225 + const void *stack = task_stack_page(current);
63226
63227 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
63228 }
63229
63230 +#ifdef CONFIG_PAX_USERCOPY
63231 +extern int object_is_on_stack(const void *obj, unsigned long len);
63232 +#endif
63233 +
63234 extern void thread_info_cache_init(void);
63235
63236 #ifdef CONFIG_DEBUG_STACK_USAGE
63237 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
63238 index 899fbb4..1cb4138 100644
63239 --- a/include/linux/screen_info.h
63240 +++ b/include/linux/screen_info.h
63241 @@ -43,7 +43,8 @@ struct screen_info {
63242 __u16 pages; /* 0x32 */
63243 __u16 vesa_attributes; /* 0x34 */
63244 __u32 capabilities; /* 0x36 */
63245 - __u8 _reserved[6]; /* 0x3a */
63246 + __u16 vesapm_size; /* 0x3a */
63247 + __u8 _reserved[4]; /* 0x3c */
63248 } __attribute__((packed));
63249
63250 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
63251 diff --git a/include/linux/security.h b/include/linux/security.h
63252 index ebd2a53..2d949ae 100644
63253 --- a/include/linux/security.h
63254 +++ b/include/linux/security.h
63255 @@ -36,6 +36,7 @@
63256 #include <linux/key.h>
63257 #include <linux/xfrm.h>
63258 #include <linux/slab.h>
63259 +#include <linux/grsecurity.h>
63260 #include <net/flow.h>
63261
63262 /* Maximum number of letters for an LSM name string */
63263 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
63264 index be720cd..a0e1b94 100644
63265 --- a/include/linux/seq_file.h
63266 +++ b/include/linux/seq_file.h
63267 @@ -33,6 +33,7 @@ struct seq_operations {
63268 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
63269 int (*show) (struct seq_file *m, void *v);
63270 };
63271 +typedef struct seq_operations __no_const seq_operations_no_const;
63272
63273 #define SEQ_SKIP 1
63274
63275 diff --git a/include/linux/shm.h b/include/linux/shm.h
63276 index 92808b8..c28cac4 100644
63277 --- a/include/linux/shm.h
63278 +++ b/include/linux/shm.h
63279 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
63280
63281 /* The task created the shm object. NULL if the task is dead. */
63282 struct task_struct *shm_creator;
63283 +#ifdef CONFIG_GRKERNSEC
63284 + time_t shm_createtime;
63285 + pid_t shm_lapid;
63286 +#endif
63287 };
63288
63289 /* shm_mode upper byte flags */
63290 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
63291 index 0f96646..cfb757a 100644
63292 --- a/include/linux/skbuff.h
63293 +++ b/include/linux/skbuff.h
63294 @@ -610,7 +610,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
63295 */
63296 static inline int skb_queue_empty(const struct sk_buff_head *list)
63297 {
63298 - return list->next == (struct sk_buff *)list;
63299 + return list->next == (const struct sk_buff *)list;
63300 }
63301
63302 /**
63303 @@ -623,7 +623,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
63304 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63305 const struct sk_buff *skb)
63306 {
63307 - return skb->next == (struct sk_buff *)list;
63308 + return skb->next == (const struct sk_buff *)list;
63309 }
63310
63311 /**
63312 @@ -636,7 +636,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63313 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
63314 const struct sk_buff *skb)
63315 {
63316 - return skb->prev == (struct sk_buff *)list;
63317 + return skb->prev == (const struct sk_buff *)list;
63318 }
63319
63320 /**
63321 @@ -1458,7 +1458,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
63322 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
63323 */
63324 #ifndef NET_SKB_PAD
63325 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
63326 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
63327 #endif
63328
63329 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
63330 diff --git a/include/linux/slab.h b/include/linux/slab.h
63331 index 573c809..e84c132 100644
63332 --- a/include/linux/slab.h
63333 +++ b/include/linux/slab.h
63334 @@ -11,12 +11,20 @@
63335
63336 #include <linux/gfp.h>
63337 #include <linux/types.h>
63338 +#include <linux/err.h>
63339
63340 /*
63341 * Flags to pass to kmem_cache_create().
63342 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
63343 */
63344 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
63345 +
63346 +#ifdef CONFIG_PAX_USERCOPY
63347 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
63348 +#else
63349 +#define SLAB_USERCOPY 0x00000000UL
63350 +#endif
63351 +
63352 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
63353 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
63354 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
63355 @@ -87,10 +95,13 @@
63356 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
63357 * Both make kfree a no-op.
63358 */
63359 -#define ZERO_SIZE_PTR ((void *)16)
63360 +#define ZERO_SIZE_PTR \
63361 +({ \
63362 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
63363 + (void *)(-MAX_ERRNO-1L); \
63364 +})
63365
63366 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
63367 - (unsigned long)ZERO_SIZE_PTR)
63368 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
63369
63370 /*
63371 * struct kmem_cache related prototypes
63372 @@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
63373 void kfree(const void *);
63374 void kzfree(const void *);
63375 size_t ksize(const void *);
63376 +void check_object_size(const void *ptr, unsigned long n, bool to);
63377
63378 /*
63379 * Allocator specific definitions. These are mainly used to establish optimized
63380 @@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
63381
63382 void __init kmem_cache_init_late(void);
63383
63384 +#define kmalloc(x, y) \
63385 +({ \
63386 + void *___retval; \
63387 + intoverflow_t ___x = (intoverflow_t)x; \
63388 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
63389 + ___retval = NULL; \
63390 + else \
63391 + ___retval = kmalloc((size_t)___x, (y)); \
63392 + ___retval; \
63393 +})
63394 +
63395 +#define kmalloc_node(x, y, z) \
63396 +({ \
63397 + void *___retval; \
63398 + intoverflow_t ___x = (intoverflow_t)x; \
63399 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
63400 + ___retval = NULL; \
63401 + else \
63402 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
63403 + ___retval; \
63404 +})
63405 +
63406 +#define kzalloc(x, y) \
63407 +({ \
63408 + void *___retval; \
63409 + intoverflow_t ___x = (intoverflow_t)x; \
63410 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
63411 + ___retval = NULL; \
63412 + else \
63413 + ___retval = kzalloc((size_t)___x, (y)); \
63414 + ___retval; \
63415 +})
63416 +
63417 +#define __krealloc(x, y, z) \
63418 +({ \
63419 + void *___retval; \
63420 + intoverflow_t ___y = (intoverflow_t)y; \
63421 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
63422 + ___retval = NULL; \
63423 + else \
63424 + ___retval = __krealloc((x), (size_t)___y, (z)); \
63425 + ___retval; \
63426 +})
63427 +
63428 +#define krealloc(x, y, z) \
63429 +({ \
63430 + void *___retval; \
63431 + intoverflow_t ___y = (intoverflow_t)y; \
63432 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
63433 + ___retval = NULL; \
63434 + else \
63435 + ___retval = krealloc((x), (size_t)___y, (z)); \
63436 + ___retval; \
63437 +})
63438 +
63439 #endif /* _LINUX_SLAB_H */
63440 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
63441 index d00e0ba..1b3bf7b 100644
63442 --- a/include/linux/slab_def.h
63443 +++ b/include/linux/slab_def.h
63444 @@ -68,10 +68,10 @@ struct kmem_cache {
63445 unsigned long node_allocs;
63446 unsigned long node_frees;
63447 unsigned long node_overflow;
63448 - atomic_t allochit;
63449 - atomic_t allocmiss;
63450 - atomic_t freehit;
63451 - atomic_t freemiss;
63452 + atomic_unchecked_t allochit;
63453 + atomic_unchecked_t allocmiss;
63454 + atomic_unchecked_t freehit;
63455 + atomic_unchecked_t freemiss;
63456
63457 /*
63458 * If debugging is enabled, then the allocator can add additional
63459 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
63460 index f58d641..c56bf9c 100644
63461 --- a/include/linux/slub_def.h
63462 +++ b/include/linux/slub_def.h
63463 @@ -85,7 +85,7 @@ struct kmem_cache {
63464 struct kmem_cache_order_objects max;
63465 struct kmem_cache_order_objects min;
63466 gfp_t allocflags; /* gfp flags to use on each alloc */
63467 - int refcount; /* Refcount for slab cache destroy */
63468 + atomic_t refcount; /* Refcount for slab cache destroy */
63469 void (*ctor)(void *);
63470 int inuse; /* Offset to metadata */
63471 int align; /* Alignment */
63472 @@ -211,7 +211,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
63473 }
63474
63475 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
63476 -void *__kmalloc(size_t size, gfp_t flags);
63477 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
63478
63479 static __always_inline void *
63480 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
63481 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
63482 index de8832d..0147b46 100644
63483 --- a/include/linux/sonet.h
63484 +++ b/include/linux/sonet.h
63485 @@ -61,7 +61,7 @@ struct sonet_stats {
63486 #include <linux/atomic.h>
63487
63488 struct k_sonet_stats {
63489 -#define __HANDLE_ITEM(i) atomic_t i
63490 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
63491 __SONET_ITEMS
63492 #undef __HANDLE_ITEM
63493 };
63494 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
63495 index db7bcaf..1aca77e 100644
63496 --- a/include/linux/sunrpc/clnt.h
63497 +++ b/include/linux/sunrpc/clnt.h
63498 @@ -169,9 +169,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
63499 {
63500 switch (sap->sa_family) {
63501 case AF_INET:
63502 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
63503 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
63504 case AF_INET6:
63505 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
63506 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
63507 }
63508 return 0;
63509 }
63510 @@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
63511 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
63512 const struct sockaddr *src)
63513 {
63514 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
63515 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
63516 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
63517
63518 dsin->sin_family = ssin->sin_family;
63519 @@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
63520 if (sa->sa_family != AF_INET6)
63521 return 0;
63522
63523 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
63524 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
63525 }
63526
63527 #endif /* __KERNEL__ */
63528 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
63529 index e775689..9e206d9 100644
63530 --- a/include/linux/sunrpc/sched.h
63531 +++ b/include/linux/sunrpc/sched.h
63532 @@ -105,6 +105,7 @@ struct rpc_call_ops {
63533 void (*rpc_call_done)(struct rpc_task *, void *);
63534 void (*rpc_release)(void *);
63535 };
63536 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
63537
63538 struct rpc_task_setup {
63539 struct rpc_task *task;
63540 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
63541 index c14fe86..393245e 100644
63542 --- a/include/linux/sunrpc/svc_rdma.h
63543 +++ b/include/linux/sunrpc/svc_rdma.h
63544 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
63545 extern unsigned int svcrdma_max_requests;
63546 extern unsigned int svcrdma_max_req_size;
63547
63548 -extern atomic_t rdma_stat_recv;
63549 -extern atomic_t rdma_stat_read;
63550 -extern atomic_t rdma_stat_write;
63551 -extern atomic_t rdma_stat_sq_starve;
63552 -extern atomic_t rdma_stat_rq_starve;
63553 -extern atomic_t rdma_stat_rq_poll;
63554 -extern atomic_t rdma_stat_rq_prod;
63555 -extern atomic_t rdma_stat_sq_poll;
63556 -extern atomic_t rdma_stat_sq_prod;
63557 +extern atomic_unchecked_t rdma_stat_recv;
63558 +extern atomic_unchecked_t rdma_stat_read;
63559 +extern atomic_unchecked_t rdma_stat_write;
63560 +extern atomic_unchecked_t rdma_stat_sq_starve;
63561 +extern atomic_unchecked_t rdma_stat_rq_starve;
63562 +extern atomic_unchecked_t rdma_stat_rq_poll;
63563 +extern atomic_unchecked_t rdma_stat_rq_prod;
63564 +extern atomic_unchecked_t rdma_stat_sq_poll;
63565 +extern atomic_unchecked_t rdma_stat_sq_prod;
63566
63567 #define RPCRDMA_VERSION 1
63568
63569 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
63570 index 11684d9..0d245eb 100644
63571 --- a/include/linux/sysctl.h
63572 +++ b/include/linux/sysctl.h
63573 @@ -155,7 +155,11 @@ enum
63574 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
63575 };
63576
63577 -
63578 +#ifdef CONFIG_PAX_SOFTMODE
63579 +enum {
63580 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
63581 +};
63582 +#endif
63583
63584 /* CTL_VM names: */
63585 enum
63586 @@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
63587
63588 extern int proc_dostring(struct ctl_table *, int,
63589 void __user *, size_t *, loff_t *);
63590 +extern int proc_dostring_modpriv(struct ctl_table *, int,
63591 + void __user *, size_t *, loff_t *);
63592 extern int proc_dointvec(struct ctl_table *, int,
63593 void __user *, size_t *, loff_t *);
63594 extern int proc_dointvec_minmax(struct ctl_table *, int,
63595 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
63596 index ff7dc08..893e1bd 100644
63597 --- a/include/linux/tty_ldisc.h
63598 +++ b/include/linux/tty_ldisc.h
63599 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
63600
63601 struct module *owner;
63602
63603 - int refcount;
63604 + atomic_t refcount;
63605 };
63606
63607 struct tty_ldisc {
63608 diff --git a/include/linux/types.h b/include/linux/types.h
63609 index 176da8c..e45e473 100644
63610 --- a/include/linux/types.h
63611 +++ b/include/linux/types.h
63612 @@ -213,10 +213,26 @@ typedef struct {
63613 int counter;
63614 } atomic_t;
63615
63616 +#ifdef CONFIG_PAX_REFCOUNT
63617 +typedef struct {
63618 + int counter;
63619 +} atomic_unchecked_t;
63620 +#else
63621 +typedef atomic_t atomic_unchecked_t;
63622 +#endif
63623 +
63624 #ifdef CONFIG_64BIT
63625 typedef struct {
63626 long counter;
63627 } atomic64_t;
63628 +
63629 +#ifdef CONFIG_PAX_REFCOUNT
63630 +typedef struct {
63631 + long counter;
63632 +} atomic64_unchecked_t;
63633 +#else
63634 +typedef atomic64_t atomic64_unchecked_t;
63635 +#endif
63636 #endif
63637
63638 struct list_head {
63639 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
63640 index 5ca0951..ab496a5 100644
63641 --- a/include/linux/uaccess.h
63642 +++ b/include/linux/uaccess.h
63643 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
63644 long ret; \
63645 mm_segment_t old_fs = get_fs(); \
63646 \
63647 - set_fs(KERNEL_DS); \
63648 pagefault_disable(); \
63649 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
63650 - pagefault_enable(); \
63651 + set_fs(KERNEL_DS); \
63652 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
63653 set_fs(old_fs); \
63654 + pagefault_enable(); \
63655 ret; \
63656 })
63657
63658 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
63659 index 99c1b4d..bb94261 100644
63660 --- a/include/linux/unaligned/access_ok.h
63661 +++ b/include/linux/unaligned/access_ok.h
63662 @@ -6,32 +6,32 @@
63663
63664 static inline u16 get_unaligned_le16(const void *p)
63665 {
63666 - return le16_to_cpup((__le16 *)p);
63667 + return le16_to_cpup((const __le16 *)p);
63668 }
63669
63670 static inline u32 get_unaligned_le32(const void *p)
63671 {
63672 - return le32_to_cpup((__le32 *)p);
63673 + return le32_to_cpup((const __le32 *)p);
63674 }
63675
63676 static inline u64 get_unaligned_le64(const void *p)
63677 {
63678 - return le64_to_cpup((__le64 *)p);
63679 + return le64_to_cpup((const __le64 *)p);
63680 }
63681
63682 static inline u16 get_unaligned_be16(const void *p)
63683 {
63684 - return be16_to_cpup((__be16 *)p);
63685 + return be16_to_cpup((const __be16 *)p);
63686 }
63687
63688 static inline u32 get_unaligned_be32(const void *p)
63689 {
63690 - return be32_to_cpup((__be32 *)p);
63691 + return be32_to_cpup((const __be32 *)p);
63692 }
63693
63694 static inline u64 get_unaligned_be64(const void *p)
63695 {
63696 - return be64_to_cpup((__be64 *)p);
63697 + return be64_to_cpup((const __be64 *)p);
63698 }
63699
63700 static inline void put_unaligned_le16(u16 val, void *p)
63701 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
63702 index cf97b5b..40ebc87 100644
63703 --- a/include/linux/vermagic.h
63704 +++ b/include/linux/vermagic.h
63705 @@ -26,9 +26,35 @@
63706 #define MODULE_ARCH_VERMAGIC ""
63707 #endif
63708
63709 +#ifdef CONFIG_PAX_REFCOUNT
63710 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
63711 +#else
63712 +#define MODULE_PAX_REFCOUNT ""
63713 +#endif
63714 +
63715 +#ifdef CONSTIFY_PLUGIN
63716 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
63717 +#else
63718 +#define MODULE_CONSTIFY_PLUGIN ""
63719 +#endif
63720 +
63721 +#ifdef STACKLEAK_PLUGIN
63722 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
63723 +#else
63724 +#define MODULE_STACKLEAK_PLUGIN ""
63725 +#endif
63726 +
63727 +#ifdef CONFIG_GRKERNSEC
63728 +#define MODULE_GRSEC "GRSEC "
63729 +#else
63730 +#define MODULE_GRSEC ""
63731 +#endif
63732 +
63733 #define VERMAGIC_STRING \
63734 UTS_RELEASE " " \
63735 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
63736 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
63737 - MODULE_ARCH_VERMAGIC
63738 + MODULE_ARCH_VERMAGIC \
63739 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
63740 + MODULE_GRSEC
63741
63742 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
63743 index 687fb11..b342358 100644
63744 --- a/include/linux/vmalloc.h
63745 +++ b/include/linux/vmalloc.h
63746 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
63747 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
63748 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
63749 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
63750 +
63751 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
63752 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
63753 +#endif
63754 +
63755 /* bits [20..32] reserved for arch specific ioremap internals */
63756
63757 /*
63758 @@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
63759 # endif
63760 #endif
63761
63762 +#define vmalloc(x) \
63763 +({ \
63764 + void *___retval; \
63765 + intoverflow_t ___x = (intoverflow_t)x; \
63766 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
63767 + ___retval = NULL; \
63768 + else \
63769 + ___retval = vmalloc((unsigned long)___x); \
63770 + ___retval; \
63771 +})
63772 +
63773 +#define vzalloc(x) \
63774 +({ \
63775 + void *___retval; \
63776 + intoverflow_t ___x = (intoverflow_t)x; \
63777 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
63778 + ___retval = NULL; \
63779 + else \
63780 + ___retval = vzalloc((unsigned long)___x); \
63781 + ___retval; \
63782 +})
63783 +
63784 +#define __vmalloc(x, y, z) \
63785 +({ \
63786 + void *___retval; \
63787 + intoverflow_t ___x = (intoverflow_t)x; \
63788 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
63789 + ___retval = NULL; \
63790 + else \
63791 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
63792 + ___retval; \
63793 +})
63794 +
63795 +#define vmalloc_user(x) \
63796 +({ \
63797 + void *___retval; \
63798 + intoverflow_t ___x = (intoverflow_t)x; \
63799 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
63800 + ___retval = NULL; \
63801 + else \
63802 + ___retval = vmalloc_user((unsigned long)___x); \
63803 + ___retval; \
63804 +})
63805 +
63806 +#define vmalloc_exec(x) \
63807 +({ \
63808 + void *___retval; \
63809 + intoverflow_t ___x = (intoverflow_t)x; \
63810 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
63811 + ___retval = NULL; \
63812 + else \
63813 + ___retval = vmalloc_exec((unsigned long)___x); \
63814 + ___retval; \
63815 +})
63816 +
63817 +#define vmalloc_node(x, y) \
63818 +({ \
63819 + void *___retval; \
63820 + intoverflow_t ___x = (intoverflow_t)x; \
63821 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
63822 + ___retval = NULL; \
63823 + else \
63824 + ___retval = vmalloc_node((unsigned long)___x, (y));\
63825 + ___retval; \
63826 +})
63827 +
63828 +#define vzalloc_node(x, y) \
63829 +({ \
63830 + void *___retval; \
63831 + intoverflow_t ___x = (intoverflow_t)x; \
63832 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
63833 + ___retval = NULL; \
63834 + else \
63835 + ___retval = vzalloc_node((unsigned long)___x, (y));\
63836 + ___retval; \
63837 +})
63838 +
63839 +#define vmalloc_32(x) \
63840 +({ \
63841 + void *___retval; \
63842 + intoverflow_t ___x = (intoverflow_t)x; \
63843 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
63844 + ___retval = NULL; \
63845 + else \
63846 + ___retval = vmalloc_32((unsigned long)___x); \
63847 + ___retval; \
63848 +})
63849 +
63850 +#define vmalloc_32_user(x) \
63851 +({ \
63852 +void *___retval; \
63853 + intoverflow_t ___x = (intoverflow_t)x; \
63854 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
63855 + ___retval = NULL; \
63856 + else \
63857 + ___retval = vmalloc_32_user((unsigned long)___x);\
63858 + ___retval; \
63859 +})
63860 +
63861 #endif /* _LINUX_VMALLOC_H */
63862 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
63863 index 65efb92..137adbb 100644
63864 --- a/include/linux/vmstat.h
63865 +++ b/include/linux/vmstat.h
63866 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
63867 /*
63868 * Zone based page accounting with per cpu differentials.
63869 */
63870 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63871 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63872
63873 static inline void zone_page_state_add(long x, struct zone *zone,
63874 enum zone_stat_item item)
63875 {
63876 - atomic_long_add(x, &zone->vm_stat[item]);
63877 - atomic_long_add(x, &vm_stat[item]);
63878 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
63879 + atomic_long_add_unchecked(x, &vm_stat[item]);
63880 }
63881
63882 static inline unsigned long global_page_state(enum zone_stat_item item)
63883 {
63884 - long x = atomic_long_read(&vm_stat[item]);
63885 + long x = atomic_long_read_unchecked(&vm_stat[item]);
63886 #ifdef CONFIG_SMP
63887 if (x < 0)
63888 x = 0;
63889 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
63890 static inline unsigned long zone_page_state(struct zone *zone,
63891 enum zone_stat_item item)
63892 {
63893 - long x = atomic_long_read(&zone->vm_stat[item]);
63894 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63895 #ifdef CONFIG_SMP
63896 if (x < 0)
63897 x = 0;
63898 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
63899 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
63900 enum zone_stat_item item)
63901 {
63902 - long x = atomic_long_read(&zone->vm_stat[item]);
63903 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63904
63905 #ifdef CONFIG_SMP
63906 int cpu;
63907 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
63908
63909 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
63910 {
63911 - atomic_long_inc(&zone->vm_stat[item]);
63912 - atomic_long_inc(&vm_stat[item]);
63913 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
63914 + atomic_long_inc_unchecked(&vm_stat[item]);
63915 }
63916
63917 static inline void __inc_zone_page_state(struct page *page,
63918 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
63919
63920 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
63921 {
63922 - atomic_long_dec(&zone->vm_stat[item]);
63923 - atomic_long_dec(&vm_stat[item]);
63924 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
63925 + atomic_long_dec_unchecked(&vm_stat[item]);
63926 }
63927
63928 static inline void __dec_zone_page_state(struct page *page,
63929 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
63930 index aed54c5..3e07f7a 100644
63931 --- a/include/linux/xattr.h
63932 +++ b/include/linux/xattr.h
63933 @@ -49,6 +49,11 @@
63934 #define XATTR_CAPS_SUFFIX "capability"
63935 #define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX
63936
63937 +/* User namespace */
63938 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
63939 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
63940 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
63941 +
63942 #ifdef __KERNEL__
63943
63944 #include <linux/types.h>
63945 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
63946 index 4aeff96..b378cdc 100644
63947 --- a/include/media/saa7146_vv.h
63948 +++ b/include/media/saa7146_vv.h
63949 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
63950 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
63951
63952 /* the extension can override this */
63953 - struct v4l2_ioctl_ops ops;
63954 + v4l2_ioctl_ops_no_const ops;
63955 /* pointer to the saa7146 core ops */
63956 const struct v4l2_ioctl_ops *core_ops;
63957
63958 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
63959 index c7c40f1..4f01585 100644
63960 --- a/include/media/v4l2-dev.h
63961 +++ b/include/media/v4l2-dev.h
63962 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
63963
63964
63965 struct v4l2_file_operations {
63966 - struct module *owner;
63967 + struct module * const owner;
63968 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
63969 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
63970 unsigned int (*poll) (struct file *, struct poll_table_struct *);
63971 @@ -68,6 +68,7 @@ struct v4l2_file_operations {
63972 int (*open) (struct file *);
63973 int (*release) (struct file *);
63974 };
63975 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
63976
63977 /*
63978 * Newer version of video_device, handled by videodev2.c
63979 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
63980 index dd9f1e7..8c4dd86 100644
63981 --- a/include/media/v4l2-ioctl.h
63982 +++ b/include/media/v4l2-ioctl.h
63983 @@ -272,7 +272,7 @@ struct v4l2_ioctl_ops {
63984 long (*vidioc_default) (struct file *file, void *fh,
63985 bool valid_prio, int cmd, void *arg);
63986 };
63987 -
63988 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
63989
63990 /* v4l debugging and diagnostics */
63991
63992 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
63993 index c5dedd8..a93b07b 100644
63994 --- a/include/net/caif/caif_hsi.h
63995 +++ b/include/net/caif/caif_hsi.h
63996 @@ -94,7 +94,7 @@ struct cfhsi_drv {
63997 void (*rx_done_cb) (struct cfhsi_drv *drv);
63998 void (*wake_up_cb) (struct cfhsi_drv *drv);
63999 void (*wake_down_cb) (struct cfhsi_drv *drv);
64000 -};
64001 +} __no_const;
64002
64003 /* Structure implemented by HSI device. */
64004 struct cfhsi_dev {
64005 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
64006 index 9e5425b..8136ffc 100644
64007 --- a/include/net/caif/cfctrl.h
64008 +++ b/include/net/caif/cfctrl.h
64009 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
64010 void (*radioset_rsp)(void);
64011 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
64012 struct cflayer *client_layer);
64013 -};
64014 +} __no_const;
64015
64016 /* Link Setup Parameters for CAIF-Links. */
64017 struct cfctrl_link_param {
64018 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
64019 struct cfctrl {
64020 struct cfsrvl serv;
64021 struct cfctrl_rsp res;
64022 - atomic_t req_seq_no;
64023 - atomic_t rsp_seq_no;
64024 + atomic_unchecked_t req_seq_no;
64025 + atomic_unchecked_t rsp_seq_no;
64026 struct list_head list;
64027 /* Protects from simultaneous access to first_req list */
64028 spinlock_t info_list_lock;
64029 diff --git a/include/net/flow.h b/include/net/flow.h
64030 index 57f15a7..0de26c6 100644
64031 --- a/include/net/flow.h
64032 +++ b/include/net/flow.h
64033 @@ -208,6 +208,6 @@ extern struct flow_cache_object *flow_cache_lookup(
64034
64035 extern void flow_cache_flush(void);
64036 extern void flow_cache_flush_deferred(void);
64037 -extern atomic_t flow_cache_genid;
64038 +extern atomic_unchecked_t flow_cache_genid;
64039
64040 #endif
64041 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
64042 index e9ff3fc..9d3e5c7 100644
64043 --- a/include/net/inetpeer.h
64044 +++ b/include/net/inetpeer.h
64045 @@ -48,8 +48,8 @@ struct inet_peer {
64046 */
64047 union {
64048 struct {
64049 - atomic_t rid; /* Frag reception counter */
64050 - atomic_t ip_id_count; /* IP ID for the next packet */
64051 + atomic_unchecked_t rid; /* Frag reception counter */
64052 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
64053 __u32 tcp_ts;
64054 __u32 tcp_ts_stamp;
64055 };
64056 @@ -113,11 +113,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
64057 more++;
64058 inet_peer_refcheck(p);
64059 do {
64060 - old = atomic_read(&p->ip_id_count);
64061 + old = atomic_read_unchecked(&p->ip_id_count);
64062 new = old + more;
64063 if (!new)
64064 new = 1;
64065 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
64066 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
64067 return new;
64068 }
64069
64070 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
64071 index 10422ef..662570f 100644
64072 --- a/include/net/ip_fib.h
64073 +++ b/include/net/ip_fib.h
64074 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
64075
64076 #define FIB_RES_SADDR(net, res) \
64077 ((FIB_RES_NH(res).nh_saddr_genid == \
64078 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
64079 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
64080 FIB_RES_NH(res).nh_saddr : \
64081 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
64082 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
64083 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
64084 index 8fa4430..05dd772 100644
64085 --- a/include/net/ip_vs.h
64086 +++ b/include/net/ip_vs.h
64087 @@ -509,7 +509,7 @@ struct ip_vs_conn {
64088 struct ip_vs_conn *control; /* Master control connection */
64089 atomic_t n_control; /* Number of controlled ones */
64090 struct ip_vs_dest *dest; /* real server */
64091 - atomic_t in_pkts; /* incoming packet counter */
64092 + atomic_unchecked_t in_pkts; /* incoming packet counter */
64093
64094 /* packet transmitter for different forwarding methods. If it
64095 mangles the packet, it must return NF_DROP or better NF_STOLEN,
64096 @@ -647,7 +647,7 @@ struct ip_vs_dest {
64097 __be16 port; /* port number of the server */
64098 union nf_inet_addr addr; /* IP address of the server */
64099 volatile unsigned flags; /* dest status flags */
64100 - atomic_t conn_flags; /* flags to copy to conn */
64101 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
64102 atomic_t weight; /* server weight */
64103
64104 atomic_t refcnt; /* reference counter */
64105 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
64106 index 69b610a..fe3962c 100644
64107 --- a/include/net/irda/ircomm_core.h
64108 +++ b/include/net/irda/ircomm_core.h
64109 @@ -51,7 +51,7 @@ typedef struct {
64110 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
64111 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
64112 struct ircomm_info *);
64113 -} call_t;
64114 +} __no_const call_t;
64115
64116 struct ircomm_cb {
64117 irda_queue_t queue;
64118 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
64119 index 59ba38bc..d515662 100644
64120 --- a/include/net/irda/ircomm_tty.h
64121 +++ b/include/net/irda/ircomm_tty.h
64122 @@ -35,6 +35,7 @@
64123 #include <linux/termios.h>
64124 #include <linux/timer.h>
64125 #include <linux/tty.h> /* struct tty_struct */
64126 +#include <asm/local.h>
64127
64128 #include <net/irda/irias_object.h>
64129 #include <net/irda/ircomm_core.h>
64130 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
64131 unsigned short close_delay;
64132 unsigned short closing_wait; /* time to wait before closing */
64133
64134 - int open_count;
64135 - int blocked_open; /* # of blocked opens */
64136 + local_t open_count;
64137 + local_t blocked_open; /* # of blocked opens */
64138
64139 /* Protect concurent access to :
64140 * o self->open_count
64141 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
64142 index f82a1e8..82d81e8 100644
64143 --- a/include/net/iucv/af_iucv.h
64144 +++ b/include/net/iucv/af_iucv.h
64145 @@ -87,7 +87,7 @@ struct iucv_sock {
64146 struct iucv_sock_list {
64147 struct hlist_head head;
64148 rwlock_t lock;
64149 - atomic_t autobind_name;
64150 + atomic_unchecked_t autobind_name;
64151 };
64152
64153 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
64154 diff --git a/include/net/lapb.h b/include/net/lapb.h
64155 index 96cb5dd..25e8d4f 100644
64156 --- a/include/net/lapb.h
64157 +++ b/include/net/lapb.h
64158 @@ -95,7 +95,7 @@ struct lapb_cb {
64159 struct sk_buff_head write_queue;
64160 struct sk_buff_head ack_queue;
64161 unsigned char window;
64162 - struct lapb_register_struct callbacks;
64163 + struct lapb_register_struct *callbacks;
64164
64165 /* FRMR control information */
64166 struct lapb_frame frmr_data;
64167 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
64168 index 2720884..3aa5c25 100644
64169 --- a/include/net/neighbour.h
64170 +++ b/include/net/neighbour.h
64171 @@ -122,7 +122,7 @@ struct neigh_ops {
64172 void (*error_report)(struct neighbour *, struct sk_buff *);
64173 int (*output)(struct neighbour *, struct sk_buff *);
64174 int (*connected_output)(struct neighbour *, struct sk_buff *);
64175 -};
64176 +} __do_const;
64177
64178 struct pneigh_entry {
64179 struct pneigh_entry *next;
64180 diff --git a/include/net/netlink.h b/include/net/netlink.h
64181 index 98c1854..d4add7b 100644
64182 --- a/include/net/netlink.h
64183 +++ b/include/net/netlink.h
64184 @@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
64185 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
64186 {
64187 if (mark)
64188 - skb_trim(skb, (unsigned char *) mark - skb->data);
64189 + skb_trim(skb, (const unsigned char *) mark - skb->data);
64190 }
64191
64192 /**
64193 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
64194 index d786b4f..4c3dd41 100644
64195 --- a/include/net/netns/ipv4.h
64196 +++ b/include/net/netns/ipv4.h
64197 @@ -56,8 +56,8 @@ struct netns_ipv4 {
64198
64199 unsigned int sysctl_ping_group_range[2];
64200
64201 - atomic_t rt_genid;
64202 - atomic_t dev_addr_genid;
64203 + atomic_unchecked_t rt_genid;
64204 + atomic_unchecked_t dev_addr_genid;
64205
64206 #ifdef CONFIG_IP_MROUTE
64207 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
64208 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
64209 index 6a72a58..e6a127d 100644
64210 --- a/include/net/sctp/sctp.h
64211 +++ b/include/net/sctp/sctp.h
64212 @@ -318,9 +318,9 @@ do { \
64213
64214 #else /* SCTP_DEBUG */
64215
64216 -#define SCTP_DEBUG_PRINTK(whatever...)
64217 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
64218 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
64219 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
64220 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
64221 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
64222 #define SCTP_ENABLE_DEBUG
64223 #define SCTP_DISABLE_DEBUG
64224 #define SCTP_ASSERT(expr, str, func)
64225 diff --git a/include/net/sock.h b/include/net/sock.h
64226 index 8e4062f..77b041e 100644
64227 --- a/include/net/sock.h
64228 +++ b/include/net/sock.h
64229 @@ -278,7 +278,7 @@ struct sock {
64230 #ifdef CONFIG_RPS
64231 __u32 sk_rxhash;
64232 #endif
64233 - atomic_t sk_drops;
64234 + atomic_unchecked_t sk_drops;
64235 int sk_rcvbuf;
64236
64237 struct sk_filter __rcu *sk_filter;
64238 @@ -1391,7 +1391,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
64239 }
64240
64241 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
64242 - char __user *from, char *to,
64243 + char __user *from, unsigned char *to,
64244 int copy, int offset)
64245 {
64246 if (skb->ip_summed == CHECKSUM_NONE) {
64247 diff --git a/include/net/tcp.h b/include/net/tcp.h
64248 index acc620a..f4d99c6 100644
64249 --- a/include/net/tcp.h
64250 +++ b/include/net/tcp.h
64251 @@ -1401,8 +1401,8 @@ enum tcp_seq_states {
64252 struct tcp_seq_afinfo {
64253 char *name;
64254 sa_family_t family;
64255 - struct file_operations seq_fops;
64256 - struct seq_operations seq_ops;
64257 + file_operations_no_const seq_fops;
64258 + seq_operations_no_const seq_ops;
64259 };
64260
64261 struct tcp_iter_state {
64262 diff --git a/include/net/udp.h b/include/net/udp.h
64263 index 67ea6fc..e42aee8 100644
64264 --- a/include/net/udp.h
64265 +++ b/include/net/udp.h
64266 @@ -234,8 +234,8 @@ struct udp_seq_afinfo {
64267 char *name;
64268 sa_family_t family;
64269 struct udp_table *udp_table;
64270 - struct file_operations seq_fops;
64271 - struct seq_operations seq_ops;
64272 + file_operations_no_const seq_fops;
64273 + seq_operations_no_const seq_ops;
64274 };
64275
64276 struct udp_iter_state {
64277 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
64278 index b203e14..1df3991 100644
64279 --- a/include/net/xfrm.h
64280 +++ b/include/net/xfrm.h
64281 @@ -505,7 +505,7 @@ struct xfrm_policy {
64282 struct timer_list timer;
64283
64284 struct flow_cache_object flo;
64285 - atomic_t genid;
64286 + atomic_unchecked_t genid;
64287 u32 priority;
64288 u32 index;
64289 struct xfrm_mark mark;
64290 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
64291 index 2d0191c..a55797d 100644
64292 --- a/include/rdma/iw_cm.h
64293 +++ b/include/rdma/iw_cm.h
64294 @@ -120,7 +120,7 @@ struct iw_cm_verbs {
64295 int backlog);
64296
64297 int (*destroy_listen)(struct iw_cm_id *cm_id);
64298 -};
64299 +} __no_const;
64300
64301 /**
64302 * iw_create_cm_id - Create an IW CM identifier.
64303 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
64304 index 7d96829..4ba78d3 100644
64305 --- a/include/scsi/libfc.h
64306 +++ b/include/scsi/libfc.h
64307 @@ -758,6 +758,7 @@ struct libfc_function_template {
64308 */
64309 void (*disc_stop_final) (struct fc_lport *);
64310 };
64311 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
64312
64313 /**
64314 * struct fc_disc - Discovery context
64315 @@ -861,7 +862,7 @@ struct fc_lport {
64316 struct fc_vport *vport;
64317
64318 /* Operational Information */
64319 - struct libfc_function_template tt;
64320 + libfc_function_template_no_const tt;
64321 u8 link_up;
64322 u8 qfull;
64323 enum fc_lport_state state;
64324 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
64325 index d371c3c..e228a8c 100644
64326 --- a/include/scsi/scsi_device.h
64327 +++ b/include/scsi/scsi_device.h
64328 @@ -161,9 +161,9 @@ struct scsi_device {
64329 unsigned int max_device_blocked; /* what device_blocked counts down from */
64330 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
64331
64332 - atomic_t iorequest_cnt;
64333 - atomic_t iodone_cnt;
64334 - atomic_t ioerr_cnt;
64335 + atomic_unchecked_t iorequest_cnt;
64336 + atomic_unchecked_t iodone_cnt;
64337 + atomic_unchecked_t ioerr_cnt;
64338
64339 struct device sdev_gendev,
64340 sdev_dev;
64341 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
64342 index 2a65167..91e01f8 100644
64343 --- a/include/scsi/scsi_transport_fc.h
64344 +++ b/include/scsi/scsi_transport_fc.h
64345 @@ -711,7 +711,7 @@ struct fc_function_template {
64346 unsigned long show_host_system_hostname:1;
64347
64348 unsigned long disable_target_scan:1;
64349 -};
64350 +} __do_const;
64351
64352
64353 /**
64354 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
64355 index 030b87c..98a6954 100644
64356 --- a/include/sound/ak4xxx-adda.h
64357 +++ b/include/sound/ak4xxx-adda.h
64358 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
64359 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
64360 unsigned char val);
64361 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
64362 -};
64363 +} __no_const;
64364
64365 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
64366
64367 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
64368 index 8c05e47..2b5df97 100644
64369 --- a/include/sound/hwdep.h
64370 +++ b/include/sound/hwdep.h
64371 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
64372 struct snd_hwdep_dsp_status *status);
64373 int (*dsp_load)(struct snd_hwdep *hw,
64374 struct snd_hwdep_dsp_image *image);
64375 -};
64376 +} __no_const;
64377
64378 struct snd_hwdep {
64379 struct snd_card *card;
64380 diff --git a/include/sound/info.h b/include/sound/info.h
64381 index 4e94cf1..76748b1 100644
64382 --- a/include/sound/info.h
64383 +++ b/include/sound/info.h
64384 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
64385 struct snd_info_buffer *buffer);
64386 void (*write)(struct snd_info_entry *entry,
64387 struct snd_info_buffer *buffer);
64388 -};
64389 +} __no_const;
64390
64391 struct snd_info_entry_ops {
64392 int (*open)(struct snd_info_entry *entry,
64393 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
64394 index 57e71fa..a2c7534 100644
64395 --- a/include/sound/pcm.h
64396 +++ b/include/sound/pcm.h
64397 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
64398 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
64399 int (*ack)(struct snd_pcm_substream *substream);
64400 };
64401 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
64402
64403 /*
64404 *
64405 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
64406 index af1b49e..a5d55a5 100644
64407 --- a/include/sound/sb16_csp.h
64408 +++ b/include/sound/sb16_csp.h
64409 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
64410 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
64411 int (*csp_stop) (struct snd_sb_csp * p);
64412 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
64413 -};
64414 +} __no_const;
64415
64416 /*
64417 * CSP private data
64418 diff --git a/include/sound/soc.h b/include/sound/soc.h
64419 index aa19f5a..a5b8208 100644
64420 --- a/include/sound/soc.h
64421 +++ b/include/sound/soc.h
64422 @@ -676,7 +676,7 @@ struct snd_soc_platform_driver {
64423 /* platform IO - used for platform DAPM */
64424 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
64425 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
64426 -};
64427 +} __do_const;
64428
64429 struct snd_soc_platform {
64430 const char *name;
64431 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
64432 index 444cd6b..3327cc5 100644
64433 --- a/include/sound/ymfpci.h
64434 +++ b/include/sound/ymfpci.h
64435 @@ -358,7 +358,7 @@ struct snd_ymfpci {
64436 spinlock_t reg_lock;
64437 spinlock_t voice_lock;
64438 wait_queue_head_t interrupt_sleep;
64439 - atomic_t interrupt_sleep_count;
64440 + atomic_unchecked_t interrupt_sleep_count;
64441 struct snd_info_entry *proc_entry;
64442 const struct firmware *dsp_microcode;
64443 const struct firmware *controller_microcode;
64444 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
64445 index 2704065..e10f3ef 100644
64446 --- a/include/target/target_core_base.h
64447 +++ b/include/target/target_core_base.h
64448 @@ -356,7 +356,7 @@ struct t10_reservation_ops {
64449 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
64450 int (*t10_pr_register)(struct se_cmd *);
64451 int (*t10_pr_clear)(struct se_cmd *);
64452 -};
64453 +} __no_const;
64454
64455 struct t10_reservation {
64456 /* Reservation effects all target ports */
64457 @@ -496,8 +496,8 @@ struct se_cmd {
64458 atomic_t t_task_cdbs_left;
64459 atomic_t t_task_cdbs_ex_left;
64460 atomic_t t_task_cdbs_timeout_left;
64461 - atomic_t t_task_cdbs_sent;
64462 - atomic_t t_transport_aborted;
64463 + atomic_unchecked_t t_task_cdbs_sent;
64464 + atomic_unchecked_t t_transport_aborted;
64465 atomic_t t_transport_active;
64466 atomic_t t_transport_complete;
64467 atomic_t t_transport_queue_active;
64468 @@ -744,7 +744,7 @@ struct se_device {
64469 atomic_t active_cmds;
64470 atomic_t simple_cmds;
64471 atomic_t depth_left;
64472 - atomic_t dev_ordered_id;
64473 + atomic_unchecked_t dev_ordered_id;
64474 atomic_t dev_tur_active;
64475 atomic_t execute_tasks;
64476 atomic_t dev_status_thr_count;
64477 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
64478 index 1c09820..7f5ec79 100644
64479 --- a/include/trace/events/irq.h
64480 +++ b/include/trace/events/irq.h
64481 @@ -36,7 +36,7 @@ struct softirq_action;
64482 */
64483 TRACE_EVENT(irq_handler_entry,
64484
64485 - TP_PROTO(int irq, struct irqaction *action),
64486 + TP_PROTO(int irq, const struct irqaction *action),
64487
64488 TP_ARGS(irq, action),
64489
64490 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
64491 */
64492 TRACE_EVENT(irq_handler_exit,
64493
64494 - TP_PROTO(int irq, struct irqaction *action, int ret),
64495 + TP_PROTO(int irq, const struct irqaction *action, int ret),
64496
64497 TP_ARGS(irq, action, ret),
64498
64499 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
64500 index 69d485a..dd0bee7 100644
64501 --- a/include/video/udlfb.h
64502 +++ b/include/video/udlfb.h
64503 @@ -51,10 +51,10 @@ struct dlfb_data {
64504 int base8;
64505 u32 pseudo_palette[256];
64506 /* blit-only rendering path metrics, exposed through sysfs */
64507 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64508 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
64509 - atomic_t bytes_sent; /* to usb, after compression including overhead */
64510 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
64511 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64512 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
64513 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
64514 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
64515 };
64516
64517 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
64518 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
64519 index 0993a22..32ba2fe 100644
64520 --- a/include/video/uvesafb.h
64521 +++ b/include/video/uvesafb.h
64522 @@ -177,6 +177,7 @@ struct uvesafb_par {
64523 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
64524 u8 pmi_setpal; /* PMI for palette changes */
64525 u16 *pmi_base; /* protected mode interface location */
64526 + u8 *pmi_code; /* protected mode code location */
64527 void *pmi_start;
64528 void *pmi_pal;
64529 u8 *vbe_state_orig; /*
64530 diff --git a/init/Kconfig b/init/Kconfig
64531 index d627783..693a9f3 100644
64532 --- a/init/Kconfig
64533 +++ b/init/Kconfig
64534 @@ -1202,7 +1202,7 @@ config SLUB_DEBUG
64535
64536 config COMPAT_BRK
64537 bool "Disable heap randomization"
64538 - default y
64539 + default n
64540 help
64541 Randomizing heap placement makes heap exploits harder, but it
64542 also breaks ancient binaries (including anything libc5 based).
64543 diff --git a/init/do_mounts.c b/init/do_mounts.c
64544 index c0851a8..4f8977d 100644
64545 --- a/init/do_mounts.c
64546 +++ b/init/do_mounts.c
64547 @@ -287,11 +287,11 @@ static void __init get_fs_names(char *page)
64548
64549 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
64550 {
64551 - int err = sys_mount(name, "/root", fs, flags, data);
64552 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
64553 if (err)
64554 return err;
64555
64556 - sys_chdir((const char __user __force *)"/root");
64557 + sys_chdir((const char __force_user*)"/root");
64558 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
64559 printk(KERN_INFO
64560 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
64561 @@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...)
64562 va_start(args, fmt);
64563 vsprintf(buf, fmt, args);
64564 va_end(args);
64565 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
64566 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
64567 if (fd >= 0) {
64568 sys_ioctl(fd, FDEJECT, 0);
64569 sys_close(fd);
64570 }
64571 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
64572 - fd = sys_open("/dev/console", O_RDWR, 0);
64573 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
64574 if (fd >= 0) {
64575 sys_ioctl(fd, TCGETS, (long)&termios);
64576 termios.c_lflag &= ~ICANON;
64577 sys_ioctl(fd, TCSETSF, (long)&termios);
64578 - sys_read(fd, &c, 1);
64579 + sys_read(fd, (char __user *)&c, 1);
64580 termios.c_lflag |= ICANON;
64581 sys_ioctl(fd, TCSETSF, (long)&termios);
64582 sys_close(fd);
64583 @@ -488,6 +488,6 @@ void __init prepare_namespace(void)
64584 mount_root();
64585 out:
64586 devtmpfs_mount("dev");
64587 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
64588 - sys_chroot((const char __user __force *)".");
64589 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64590 + sys_chroot((const char __force_user *)".");
64591 }
64592 diff --git a/init/do_mounts.h b/init/do_mounts.h
64593 index f5b978a..69dbfe8 100644
64594 --- a/init/do_mounts.h
64595 +++ b/init/do_mounts.h
64596 @@ -15,15 +15,15 @@ extern int root_mountflags;
64597
64598 static inline int create_dev(char *name, dev_t dev)
64599 {
64600 - sys_unlink(name);
64601 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
64602 + sys_unlink((char __force_user *)name);
64603 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
64604 }
64605
64606 #if BITS_PER_LONG == 32
64607 static inline u32 bstat(char *name)
64608 {
64609 struct stat64 stat;
64610 - if (sys_stat64(name, &stat) != 0)
64611 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
64612 return 0;
64613 if (!S_ISBLK(stat.st_mode))
64614 return 0;
64615 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
64616 static inline u32 bstat(char *name)
64617 {
64618 struct stat stat;
64619 - if (sys_newstat(name, &stat) != 0)
64620 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
64621 return 0;
64622 if (!S_ISBLK(stat.st_mode))
64623 return 0;
64624 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
64625 index 3098a38..253064e 100644
64626 --- a/init/do_mounts_initrd.c
64627 +++ b/init/do_mounts_initrd.c
64628 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
64629 create_dev("/dev/root.old", Root_RAM0);
64630 /* mount initrd on rootfs' /root */
64631 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
64632 - sys_mkdir("/old", 0700);
64633 - root_fd = sys_open("/", 0, 0);
64634 - old_fd = sys_open("/old", 0, 0);
64635 + sys_mkdir((const char __force_user *)"/old", 0700);
64636 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
64637 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
64638 /* move initrd over / and chdir/chroot in initrd root */
64639 - sys_chdir("/root");
64640 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
64641 - sys_chroot(".");
64642 + sys_chdir((const char __force_user *)"/root");
64643 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64644 + sys_chroot((const char __force_user *)".");
64645
64646 /*
64647 * In case that a resume from disk is carried out by linuxrc or one of
64648 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
64649
64650 /* move initrd to rootfs' /old */
64651 sys_fchdir(old_fd);
64652 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
64653 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
64654 /* switch root and cwd back to / of rootfs */
64655 sys_fchdir(root_fd);
64656 - sys_chroot(".");
64657 + sys_chroot((const char __force_user *)".");
64658 sys_close(old_fd);
64659 sys_close(root_fd);
64660
64661 if (new_decode_dev(real_root_dev) == Root_RAM0) {
64662 - sys_chdir("/old");
64663 + sys_chdir((const char __force_user *)"/old");
64664 return;
64665 }
64666
64667 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
64668 mount_root();
64669
64670 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
64671 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
64672 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
64673 if (!error)
64674 printk("okay\n");
64675 else {
64676 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
64677 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
64678 if (error == -ENOENT)
64679 printk("/initrd does not exist. Ignored.\n");
64680 else
64681 printk("failed\n");
64682 printk(KERN_NOTICE "Unmounting old root\n");
64683 - sys_umount("/old", MNT_DETACH);
64684 + sys_umount((char __force_user *)"/old", MNT_DETACH);
64685 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
64686 if (fd < 0) {
64687 error = fd;
64688 @@ -116,11 +116,11 @@ int __init initrd_load(void)
64689 * mounted in the normal path.
64690 */
64691 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
64692 - sys_unlink("/initrd.image");
64693 + sys_unlink((const char __force_user *)"/initrd.image");
64694 handle_initrd();
64695 return 1;
64696 }
64697 }
64698 - sys_unlink("/initrd.image");
64699 + sys_unlink((const char __force_user *)"/initrd.image");
64700 return 0;
64701 }
64702 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
64703 index 32c4799..c27ee74 100644
64704 --- a/init/do_mounts_md.c
64705 +++ b/init/do_mounts_md.c
64706 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
64707 partitioned ? "_d" : "", minor,
64708 md_setup_args[ent].device_names);
64709
64710 - fd = sys_open(name, 0, 0);
64711 + fd = sys_open((char __force_user *)name, 0, 0);
64712 if (fd < 0) {
64713 printk(KERN_ERR "md: open failed - cannot start "
64714 "array %s\n", name);
64715 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
64716 * array without it
64717 */
64718 sys_close(fd);
64719 - fd = sys_open(name, 0, 0);
64720 + fd = sys_open((char __force_user *)name, 0, 0);
64721 sys_ioctl(fd, BLKRRPART, 0);
64722 }
64723 sys_close(fd);
64724 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
64725
64726 wait_for_device_probe();
64727
64728 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
64729 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
64730 if (fd >= 0) {
64731 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
64732 sys_close(fd);
64733 diff --git a/init/initramfs.c b/init/initramfs.c
64734 index 2531811..040d4d4 100644
64735 --- a/init/initramfs.c
64736 +++ b/init/initramfs.c
64737 @@ -74,7 +74,7 @@ static void __init free_hash(void)
64738 }
64739 }
64740
64741 -static long __init do_utime(char __user *filename, time_t mtime)
64742 +static long __init do_utime(__force char __user *filename, time_t mtime)
64743 {
64744 struct timespec t[2];
64745
64746 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
64747 struct dir_entry *de, *tmp;
64748 list_for_each_entry_safe(de, tmp, &dir_list, list) {
64749 list_del(&de->list);
64750 - do_utime(de->name, de->mtime);
64751 + do_utime((char __force_user *)de->name, de->mtime);
64752 kfree(de->name);
64753 kfree(de);
64754 }
64755 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
64756 if (nlink >= 2) {
64757 char *old = find_link(major, minor, ino, mode, collected);
64758 if (old)
64759 - return (sys_link(old, collected) < 0) ? -1 : 1;
64760 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
64761 }
64762 return 0;
64763 }
64764 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
64765 {
64766 struct stat st;
64767
64768 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
64769 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
64770 if (S_ISDIR(st.st_mode))
64771 - sys_rmdir(path);
64772 + sys_rmdir((char __force_user *)path);
64773 else
64774 - sys_unlink(path);
64775 + sys_unlink((char __force_user *)path);
64776 }
64777 }
64778
64779 @@ -305,7 +305,7 @@ static int __init do_name(void)
64780 int openflags = O_WRONLY|O_CREAT;
64781 if (ml != 1)
64782 openflags |= O_TRUNC;
64783 - wfd = sys_open(collected, openflags, mode);
64784 + wfd = sys_open((char __force_user *)collected, openflags, mode);
64785
64786 if (wfd >= 0) {
64787 sys_fchown(wfd, uid, gid);
64788 @@ -317,17 +317,17 @@ static int __init do_name(void)
64789 }
64790 }
64791 } else if (S_ISDIR(mode)) {
64792 - sys_mkdir(collected, mode);
64793 - sys_chown(collected, uid, gid);
64794 - sys_chmod(collected, mode);
64795 + sys_mkdir((char __force_user *)collected, mode);
64796 + sys_chown((char __force_user *)collected, uid, gid);
64797 + sys_chmod((char __force_user *)collected, mode);
64798 dir_add(collected, mtime);
64799 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
64800 S_ISFIFO(mode) || S_ISSOCK(mode)) {
64801 if (maybe_link() == 0) {
64802 - sys_mknod(collected, mode, rdev);
64803 - sys_chown(collected, uid, gid);
64804 - sys_chmod(collected, mode);
64805 - do_utime(collected, mtime);
64806 + sys_mknod((char __force_user *)collected, mode, rdev);
64807 + sys_chown((char __force_user *)collected, uid, gid);
64808 + sys_chmod((char __force_user *)collected, mode);
64809 + do_utime((char __force_user *)collected, mtime);
64810 }
64811 }
64812 return 0;
64813 @@ -336,15 +336,15 @@ static int __init do_name(void)
64814 static int __init do_copy(void)
64815 {
64816 if (count >= body_len) {
64817 - sys_write(wfd, victim, body_len);
64818 + sys_write(wfd, (char __force_user *)victim, body_len);
64819 sys_close(wfd);
64820 - do_utime(vcollected, mtime);
64821 + do_utime((char __force_user *)vcollected, mtime);
64822 kfree(vcollected);
64823 eat(body_len);
64824 state = SkipIt;
64825 return 0;
64826 } else {
64827 - sys_write(wfd, victim, count);
64828 + sys_write(wfd, (char __force_user *)victim, count);
64829 body_len -= count;
64830 eat(count);
64831 return 1;
64832 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
64833 {
64834 collected[N_ALIGN(name_len) + body_len] = '\0';
64835 clean_path(collected, 0);
64836 - sys_symlink(collected + N_ALIGN(name_len), collected);
64837 - sys_lchown(collected, uid, gid);
64838 - do_utime(collected, mtime);
64839 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
64840 + sys_lchown((char __force_user *)collected, uid, gid);
64841 + do_utime((char __force_user *)collected, mtime);
64842 state = SkipIt;
64843 next_state = Reset;
64844 return 0;
64845 diff --git a/init/main.c b/init/main.c
64846 index 03b408d..5777f59 100644
64847 --- a/init/main.c
64848 +++ b/init/main.c
64849 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
64850 extern void tc_init(void);
64851 #endif
64852
64853 +extern void grsecurity_init(void);
64854 +
64855 /*
64856 * Debug helper: via this flag we know that we are in 'early bootup code'
64857 * where only the boot processor is running with IRQ disabled. This means
64858 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
64859
64860 __setup("reset_devices", set_reset_devices);
64861
64862 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
64863 +extern char pax_enter_kernel_user[];
64864 +extern char pax_exit_kernel_user[];
64865 +extern pgdval_t clone_pgd_mask;
64866 +#endif
64867 +
64868 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
64869 +static int __init setup_pax_nouderef(char *str)
64870 +{
64871 +#ifdef CONFIG_X86_32
64872 + unsigned int cpu;
64873 + struct desc_struct *gdt;
64874 +
64875 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
64876 + gdt = get_cpu_gdt_table(cpu);
64877 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
64878 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
64879 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
64880 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
64881 + }
64882 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
64883 +#else
64884 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
64885 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
64886 + clone_pgd_mask = ~(pgdval_t)0UL;
64887 +#endif
64888 +
64889 + return 0;
64890 +}
64891 +early_param("pax_nouderef", setup_pax_nouderef);
64892 +#endif
64893 +
64894 +#ifdef CONFIG_PAX_SOFTMODE
64895 +int pax_softmode;
64896 +
64897 +static int __init setup_pax_softmode(char *str)
64898 +{
64899 + get_option(&str, &pax_softmode);
64900 + return 1;
64901 +}
64902 +__setup("pax_softmode=", setup_pax_softmode);
64903 +#endif
64904 +
64905 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
64906 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
64907 static const char *panic_later, *panic_param;
64908 @@ -678,6 +723,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
64909 {
64910 int count = preempt_count();
64911 int ret;
64912 + const char *msg1 = "", *msg2 = "";
64913
64914 if (initcall_debug)
64915 ret = do_one_initcall_debug(fn);
64916 @@ -690,15 +736,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
64917 sprintf(msgbuf, "error code %d ", ret);
64918
64919 if (preempt_count() != count) {
64920 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
64921 + msg1 = " preemption imbalance";
64922 preempt_count() = count;
64923 }
64924 if (irqs_disabled()) {
64925 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
64926 + msg2 = " disabled interrupts";
64927 local_irq_enable();
64928 }
64929 - if (msgbuf[0]) {
64930 - printk("initcall %pF returned with %s\n", fn, msgbuf);
64931 + if (msgbuf[0] || *msg1 || *msg2) {
64932 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
64933 }
64934
64935 return ret;
64936 @@ -817,7 +863,7 @@ static int __init kernel_init(void * unused)
64937 do_basic_setup();
64938
64939 /* Open the /dev/console on the rootfs, this should never fail */
64940 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
64941 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
64942 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
64943
64944 (void) sys_dup(0);
64945 @@ -830,11 +876,13 @@ static int __init kernel_init(void * unused)
64946 if (!ramdisk_execute_command)
64947 ramdisk_execute_command = "/init";
64948
64949 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
64950 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
64951 ramdisk_execute_command = NULL;
64952 prepare_namespace();
64953 }
64954
64955 + grsecurity_init();
64956 +
64957 /*
64958 * Ok, we have completed the initial bootup, and
64959 * we're essentially up and running. Get rid of the
64960 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
64961 index ed049ea..6442f7f 100644
64962 --- a/ipc/mqueue.c
64963 +++ b/ipc/mqueue.c
64964 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
64965 mq_bytes = (mq_msg_tblsz +
64966 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
64967
64968 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
64969 spin_lock(&mq_lock);
64970 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
64971 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
64972 diff --git a/ipc/msg.c b/ipc/msg.c
64973 index 7385de2..a8180e0 100644
64974 --- a/ipc/msg.c
64975 +++ b/ipc/msg.c
64976 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
64977 return security_msg_queue_associate(msq, msgflg);
64978 }
64979
64980 +static struct ipc_ops msg_ops = {
64981 + .getnew = newque,
64982 + .associate = msg_security,
64983 + .more_checks = NULL
64984 +};
64985 +
64986 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
64987 {
64988 struct ipc_namespace *ns;
64989 - struct ipc_ops msg_ops;
64990 struct ipc_params msg_params;
64991
64992 ns = current->nsproxy->ipc_ns;
64993
64994 - msg_ops.getnew = newque;
64995 - msg_ops.associate = msg_security;
64996 - msg_ops.more_checks = NULL;
64997 -
64998 msg_params.key = key;
64999 msg_params.flg = msgflg;
65000
65001 diff --git a/ipc/sem.c b/ipc/sem.c
65002 index c8e00f8..1135c4e 100644
65003 --- a/ipc/sem.c
65004 +++ b/ipc/sem.c
65005 @@ -318,10 +318,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
65006 return 0;
65007 }
65008
65009 +static struct ipc_ops sem_ops = {
65010 + .getnew = newary,
65011 + .associate = sem_security,
65012 + .more_checks = sem_more_checks
65013 +};
65014 +
65015 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
65016 {
65017 struct ipc_namespace *ns;
65018 - struct ipc_ops sem_ops;
65019 struct ipc_params sem_params;
65020
65021 ns = current->nsproxy->ipc_ns;
65022 @@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
65023 if (nsems < 0 || nsems > ns->sc_semmsl)
65024 return -EINVAL;
65025
65026 - sem_ops.getnew = newary;
65027 - sem_ops.associate = sem_security;
65028 - sem_ops.more_checks = sem_more_checks;
65029 -
65030 sem_params.key = key;
65031 sem_params.flg = semflg;
65032 sem_params.u.nsems = nsems;
65033 @@ -848,6 +849,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
65034 int nsems;
65035 struct list_head tasks;
65036
65037 + pax_track_stack();
65038 +
65039 sma = sem_lock_check(ns, semid);
65040 if (IS_ERR(sma))
65041 return PTR_ERR(sma);
65042 @@ -1295,6 +1298,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
65043 struct ipc_namespace *ns;
65044 struct list_head tasks;
65045
65046 + pax_track_stack();
65047 +
65048 ns = current->nsproxy->ipc_ns;
65049
65050 if (nsops < 1 || semid < 0)
65051 diff --git a/ipc/shm.c b/ipc/shm.c
65052 index 02ecf2c..c8f5627 100644
65053 --- a/ipc/shm.c
65054 +++ b/ipc/shm.c
65055 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
65056 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
65057 #endif
65058
65059 +#ifdef CONFIG_GRKERNSEC
65060 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
65061 + const time_t shm_createtime, const uid_t cuid,
65062 + const int shmid);
65063 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
65064 + const time_t shm_createtime);
65065 +#endif
65066 +
65067 void shm_init_ns(struct ipc_namespace *ns)
65068 {
65069 ns->shm_ctlmax = SHMMAX;
65070 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
65071 shp->shm_lprid = 0;
65072 shp->shm_atim = shp->shm_dtim = 0;
65073 shp->shm_ctim = get_seconds();
65074 +#ifdef CONFIG_GRKERNSEC
65075 + {
65076 + struct timespec timeval;
65077 + do_posix_clock_monotonic_gettime(&timeval);
65078 +
65079 + shp->shm_createtime = timeval.tv_sec;
65080 + }
65081 +#endif
65082 shp->shm_segsz = size;
65083 shp->shm_nattch = 0;
65084 shp->shm_file = file;
65085 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
65086 return 0;
65087 }
65088
65089 +static struct ipc_ops shm_ops = {
65090 + .getnew = newseg,
65091 + .associate = shm_security,
65092 + .more_checks = shm_more_checks
65093 +};
65094 +
65095 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
65096 {
65097 struct ipc_namespace *ns;
65098 - struct ipc_ops shm_ops;
65099 struct ipc_params shm_params;
65100
65101 ns = current->nsproxy->ipc_ns;
65102
65103 - shm_ops.getnew = newseg;
65104 - shm_ops.associate = shm_security;
65105 - shm_ops.more_checks = shm_more_checks;
65106 -
65107 shm_params.key = key;
65108 shm_params.flg = shmflg;
65109 shm_params.u.size = size;
65110 @@ -870,8 +887,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
65111 case SHM_LOCK:
65112 case SHM_UNLOCK:
65113 {
65114 - struct file *uninitialized_var(shm_file);
65115 -
65116 lru_add_drain_all(); /* drain pagevecs to lru lists */
65117
65118 shp = shm_lock_check(ns, shmid);
65119 @@ -1004,9 +1019,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
65120 if (err)
65121 goto out_unlock;
65122
65123 +#ifdef CONFIG_GRKERNSEC
65124 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
65125 + shp->shm_perm.cuid, shmid) ||
65126 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
65127 + err = -EACCES;
65128 + goto out_unlock;
65129 + }
65130 +#endif
65131 +
65132 path = shp->shm_file->f_path;
65133 path_get(&path);
65134 shp->shm_nattch++;
65135 +#ifdef CONFIG_GRKERNSEC
65136 + shp->shm_lapid = current->pid;
65137 +#endif
65138 size = i_size_read(path.dentry->d_inode);
65139 shm_unlock(shp);
65140
65141 diff --git a/kernel/acct.c b/kernel/acct.c
65142 index fa7eb3d..7faf116 100644
65143 --- a/kernel/acct.c
65144 +++ b/kernel/acct.c
65145 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
65146 */
65147 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
65148 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
65149 - file->f_op->write(file, (char *)&ac,
65150 + file->f_op->write(file, (char __force_user *)&ac,
65151 sizeof(acct_t), &file->f_pos);
65152 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
65153 set_fs(fs);
65154 diff --git a/kernel/audit.c b/kernel/audit.c
65155 index 0a1355c..dca420f 100644
65156 --- a/kernel/audit.c
65157 +++ b/kernel/audit.c
65158 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
65159 3) suppressed due to audit_rate_limit
65160 4) suppressed due to audit_backlog_limit
65161 */
65162 -static atomic_t audit_lost = ATOMIC_INIT(0);
65163 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
65164
65165 /* The netlink socket. */
65166 static struct sock *audit_sock;
65167 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
65168 unsigned long now;
65169 int print;
65170
65171 - atomic_inc(&audit_lost);
65172 + atomic_inc_unchecked(&audit_lost);
65173
65174 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
65175
65176 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
65177 printk(KERN_WARNING
65178 "audit: audit_lost=%d audit_rate_limit=%d "
65179 "audit_backlog_limit=%d\n",
65180 - atomic_read(&audit_lost),
65181 + atomic_read_unchecked(&audit_lost),
65182 audit_rate_limit,
65183 audit_backlog_limit);
65184 audit_panic(message);
65185 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
65186 status_set.pid = audit_pid;
65187 status_set.rate_limit = audit_rate_limit;
65188 status_set.backlog_limit = audit_backlog_limit;
65189 - status_set.lost = atomic_read(&audit_lost);
65190 + status_set.lost = atomic_read_unchecked(&audit_lost);
65191 status_set.backlog = skb_queue_len(&audit_skb_queue);
65192 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
65193 &status_set, sizeof(status_set));
65194 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
65195 index ce4b054..8139ed7 100644
65196 --- a/kernel/auditsc.c
65197 +++ b/kernel/auditsc.c
65198 @@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
65199 }
65200
65201 /* global counter which is incremented every time something logs in */
65202 -static atomic_t session_id = ATOMIC_INIT(0);
65203 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
65204
65205 /**
65206 * audit_set_loginuid - set a task's audit_context loginuid
65207 @@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
65208 */
65209 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
65210 {
65211 - unsigned int sessionid = atomic_inc_return(&session_id);
65212 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
65213 struct audit_context *context = task->audit_context;
65214
65215 if (context && context->in_syscall) {
65216 diff --git a/kernel/capability.c b/kernel/capability.c
65217 index 283c529..36ac81e 100644
65218 --- a/kernel/capability.c
65219 +++ b/kernel/capability.c
65220 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
65221 * before modification is attempted and the application
65222 * fails.
65223 */
65224 + if (tocopy > ARRAY_SIZE(kdata))
65225 + return -EFAULT;
65226 +
65227 if (copy_to_user(dataptr, kdata, tocopy
65228 * sizeof(struct __user_cap_data_struct))) {
65229 return -EFAULT;
65230 @@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
65231 BUG();
65232 }
65233
65234 - if (security_capable(ns, current_cred(), cap) == 0) {
65235 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
65236 current->flags |= PF_SUPERPRIV;
65237 return true;
65238 }
65239 @@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
65240 }
65241 EXPORT_SYMBOL(ns_capable);
65242
65243 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
65244 +{
65245 + if (unlikely(!cap_valid(cap))) {
65246 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
65247 + BUG();
65248 + }
65249 +
65250 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
65251 + current->flags |= PF_SUPERPRIV;
65252 + return true;
65253 + }
65254 + return false;
65255 +}
65256 +EXPORT_SYMBOL(ns_capable_nolog);
65257 +
65258 +bool capable_nolog(int cap)
65259 +{
65260 + return ns_capable_nolog(&init_user_ns, cap);
65261 +}
65262 +EXPORT_SYMBOL(capable_nolog);
65263 +
65264 /**
65265 * task_ns_capable - Determine whether current task has a superior
65266 * capability targeted at a specific task's user namespace.
65267 @@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
65268 }
65269 EXPORT_SYMBOL(task_ns_capable);
65270
65271 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
65272 +{
65273 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
65274 +}
65275 +EXPORT_SYMBOL(task_ns_capable_nolog);
65276 +
65277 /**
65278 * nsown_capable - Check superior capability to one's own user_ns
65279 * @cap: The capability in question
65280 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
65281 index b7ab0b8..b3a88d2 100644
65282 --- a/kernel/cgroup.c
65283 +++ b/kernel/cgroup.c
65284 @@ -595,6 +595,8 @@ static struct css_set *find_css_set(
65285 struct hlist_head *hhead;
65286 struct cg_cgroup_link *link;
65287
65288 + pax_track_stack();
65289 +
65290 /* First see if we already have a cgroup group that matches
65291 * the desired set */
65292 read_lock(&css_set_lock);
65293 diff --git a/kernel/compat.c b/kernel/compat.c
65294 index e2435ee..8e82199 100644
65295 --- a/kernel/compat.c
65296 +++ b/kernel/compat.c
65297 @@ -13,6 +13,7 @@
65298
65299 #include <linux/linkage.h>
65300 #include <linux/compat.h>
65301 +#include <linux/module.h>
65302 #include <linux/errno.h>
65303 #include <linux/time.h>
65304 #include <linux/signal.h>
65305 @@ -167,7 +168,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
65306 mm_segment_t oldfs;
65307 long ret;
65308
65309 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
65310 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
65311 oldfs = get_fs();
65312 set_fs(KERNEL_DS);
65313 ret = hrtimer_nanosleep_restart(restart);
65314 @@ -199,7 +200,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
65315 oldfs = get_fs();
65316 set_fs(KERNEL_DS);
65317 ret = hrtimer_nanosleep(&tu,
65318 - rmtp ? (struct timespec __user *)&rmt : NULL,
65319 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
65320 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
65321 set_fs(oldfs);
65322
65323 @@ -308,7 +309,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
65324 mm_segment_t old_fs = get_fs();
65325
65326 set_fs(KERNEL_DS);
65327 - ret = sys_sigpending((old_sigset_t __user *) &s);
65328 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
65329 set_fs(old_fs);
65330 if (ret == 0)
65331 ret = put_user(s, set);
65332 @@ -331,8 +332,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
65333 old_fs = get_fs();
65334 set_fs(KERNEL_DS);
65335 ret = sys_sigprocmask(how,
65336 - set ? (old_sigset_t __user *) &s : NULL,
65337 - oset ? (old_sigset_t __user *) &s : NULL);
65338 + set ? (old_sigset_t __force_user *) &s : NULL,
65339 + oset ? (old_sigset_t __force_user *) &s : NULL);
65340 set_fs(old_fs);
65341 if (ret == 0)
65342 if (oset)
65343 @@ -369,7 +370,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
65344 mm_segment_t old_fs = get_fs();
65345
65346 set_fs(KERNEL_DS);
65347 - ret = sys_old_getrlimit(resource, &r);
65348 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
65349 set_fs(old_fs);
65350
65351 if (!ret) {
65352 @@ -441,7 +442,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
65353 mm_segment_t old_fs = get_fs();
65354
65355 set_fs(KERNEL_DS);
65356 - ret = sys_getrusage(who, (struct rusage __user *) &r);
65357 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
65358 set_fs(old_fs);
65359
65360 if (ret)
65361 @@ -468,8 +469,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
65362 set_fs (KERNEL_DS);
65363 ret = sys_wait4(pid,
65364 (stat_addr ?
65365 - (unsigned int __user *) &status : NULL),
65366 - options, (struct rusage __user *) &r);
65367 + (unsigned int __force_user *) &status : NULL),
65368 + options, (struct rusage __force_user *) &r);
65369 set_fs (old_fs);
65370
65371 if (ret > 0) {
65372 @@ -494,8 +495,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
65373 memset(&info, 0, sizeof(info));
65374
65375 set_fs(KERNEL_DS);
65376 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
65377 - uru ? (struct rusage __user *)&ru : NULL);
65378 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
65379 + uru ? (struct rusage __force_user *)&ru : NULL);
65380 set_fs(old_fs);
65381
65382 if ((ret < 0) || (info.si_signo == 0))
65383 @@ -625,8 +626,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
65384 oldfs = get_fs();
65385 set_fs(KERNEL_DS);
65386 err = sys_timer_settime(timer_id, flags,
65387 - (struct itimerspec __user *) &newts,
65388 - (struct itimerspec __user *) &oldts);
65389 + (struct itimerspec __force_user *) &newts,
65390 + (struct itimerspec __force_user *) &oldts);
65391 set_fs(oldfs);
65392 if (!err && old && put_compat_itimerspec(old, &oldts))
65393 return -EFAULT;
65394 @@ -643,7 +644,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
65395 oldfs = get_fs();
65396 set_fs(KERNEL_DS);
65397 err = sys_timer_gettime(timer_id,
65398 - (struct itimerspec __user *) &ts);
65399 + (struct itimerspec __force_user *) &ts);
65400 set_fs(oldfs);
65401 if (!err && put_compat_itimerspec(setting, &ts))
65402 return -EFAULT;
65403 @@ -662,7 +663,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
65404 oldfs = get_fs();
65405 set_fs(KERNEL_DS);
65406 err = sys_clock_settime(which_clock,
65407 - (struct timespec __user *) &ts);
65408 + (struct timespec __force_user *) &ts);
65409 set_fs(oldfs);
65410 return err;
65411 }
65412 @@ -677,7 +678,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
65413 oldfs = get_fs();
65414 set_fs(KERNEL_DS);
65415 err = sys_clock_gettime(which_clock,
65416 - (struct timespec __user *) &ts);
65417 + (struct timespec __force_user *) &ts);
65418 set_fs(oldfs);
65419 if (!err && put_compat_timespec(&ts, tp))
65420 return -EFAULT;
65421 @@ -697,7 +698,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
65422
65423 oldfs = get_fs();
65424 set_fs(KERNEL_DS);
65425 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
65426 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
65427 set_fs(oldfs);
65428
65429 err = compat_put_timex(utp, &txc);
65430 @@ -717,7 +718,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
65431 oldfs = get_fs();
65432 set_fs(KERNEL_DS);
65433 err = sys_clock_getres(which_clock,
65434 - (struct timespec __user *) &ts);
65435 + (struct timespec __force_user *) &ts);
65436 set_fs(oldfs);
65437 if (!err && tp && put_compat_timespec(&ts, tp))
65438 return -EFAULT;
65439 @@ -729,9 +730,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
65440 long err;
65441 mm_segment_t oldfs;
65442 struct timespec tu;
65443 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
65444 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
65445
65446 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
65447 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
65448 oldfs = get_fs();
65449 set_fs(KERNEL_DS);
65450 err = clock_nanosleep_restart(restart);
65451 @@ -763,8 +764,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
65452 oldfs = get_fs();
65453 set_fs(KERNEL_DS);
65454 err = sys_clock_nanosleep(which_clock, flags,
65455 - (struct timespec __user *) &in,
65456 - (struct timespec __user *) &out);
65457 + (struct timespec __force_user *) &in,
65458 + (struct timespec __force_user *) &out);
65459 set_fs(oldfs);
65460
65461 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
65462 diff --git a/kernel/configs.c b/kernel/configs.c
65463 index 42e8fa0..9e7406b 100644
65464 --- a/kernel/configs.c
65465 +++ b/kernel/configs.c
65466 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
65467 struct proc_dir_entry *entry;
65468
65469 /* create the current config file */
65470 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
65471 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
65472 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
65473 + &ikconfig_file_ops);
65474 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65475 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
65476 + &ikconfig_file_ops);
65477 +#endif
65478 +#else
65479 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
65480 &ikconfig_file_ops);
65481 +#endif
65482 +
65483 if (!entry)
65484 return -ENOMEM;
65485
65486 diff --git a/kernel/cred.c b/kernel/cred.c
65487 index 8ef31f5..b5620e6 100644
65488 --- a/kernel/cred.c
65489 +++ b/kernel/cred.c
65490 @@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
65491 */
65492 void __put_cred(struct cred *cred)
65493 {
65494 + pax_track_stack();
65495 +
65496 kdebug("__put_cred(%p{%d,%d})", cred,
65497 atomic_read(&cred->usage),
65498 read_cred_subscribers(cred));
65499 @@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
65500 {
65501 struct cred *cred;
65502
65503 + pax_track_stack();
65504 +
65505 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
65506 atomic_read(&tsk->cred->usage),
65507 read_cred_subscribers(tsk->cred));
65508 @@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct task_struct *task)
65509 {
65510 const struct cred *cred;
65511
65512 + pax_track_stack();
65513 +
65514 rcu_read_lock();
65515
65516 do {
65517 @@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
65518 {
65519 struct cred *new;
65520
65521 + pax_track_stack();
65522 +
65523 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
65524 if (!new)
65525 return NULL;
65526 @@ -281,13 +289,15 @@ error:
65527 *
65528 * Call commit_creds() or abort_creds() to clean up.
65529 */
65530 -struct cred *prepare_creds(void)
65531 +
65532 +static struct cred *__prepare_creds(struct task_struct *task)
65533 {
65534 - struct task_struct *task = current;
65535 const struct cred *old;
65536 struct cred *new;
65537
65538 - validate_process_creds();
65539 + pax_track_stack();
65540 +
65541 + validate_task_creds(task);
65542
65543 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
65544 if (!new)
65545 @@ -322,6 +332,11 @@ error:
65546 abort_creds(new);
65547 return NULL;
65548 }
65549 +
65550 +struct cred *prepare_creds(void)
65551 +{
65552 + return __prepare_creds(current);
65553 +}
65554 EXPORT_SYMBOL(prepare_creds);
65555
65556 /*
65557 @@ -333,6 +348,8 @@ struct cred *prepare_exec_creds(void)
65558 struct thread_group_cred *tgcred = NULL;
65559 struct cred *new;
65560
65561 + pax_track_stack();
65562 +
65563 #ifdef CONFIG_KEYS
65564 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
65565 if (!tgcred)
65566 @@ -385,6 +402,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
65567 struct cred *new;
65568 int ret;
65569
65570 + pax_track_stack();
65571 +
65572 if (
65573 #ifdef CONFIG_KEYS
65574 !p->cred->thread_keyring &&
65575 @@ -470,11 +489,12 @@ error_put:
65576 * Always returns 0 thus allowing this function to be tail-called at the end
65577 * of, say, sys_setgid().
65578 */
65579 -int commit_creds(struct cred *new)
65580 +static int __commit_creds(struct task_struct *task, struct cred *new)
65581 {
65582 - struct task_struct *task = current;
65583 const struct cred *old = task->real_cred;
65584
65585 + pax_track_stack();
65586 +
65587 kdebug("commit_creds(%p{%d,%d})", new,
65588 atomic_read(&new->usage),
65589 read_cred_subscribers(new));
65590 @@ -489,6 +509,8 @@ int commit_creds(struct cred *new)
65591
65592 get_cred(new); /* we will require a ref for the subj creds too */
65593
65594 + gr_set_role_label(task, new->uid, new->gid);
65595 +
65596 /* dumpability changes */
65597 if (old->euid != new->euid ||
65598 old->egid != new->egid ||
65599 @@ -538,6 +560,64 @@ int commit_creds(struct cred *new)
65600 put_cred(old);
65601 return 0;
65602 }
65603 +
65604 +int commit_creds(struct cred *new)
65605 +{
65606 +#ifdef CONFIG_GRKERNSEC_SETXID
65607 + struct task_struct *t;
65608 + struct cred *ncred;
65609 + const struct cred *old;
65610 +
65611 + if (grsec_enable_setxid && !current_is_single_threaded() &&
65612 + !current_uid() && new->uid) {
65613 + rcu_read_lock();
65614 + read_lock(&tasklist_lock);
65615 + for (t = next_thread(current); t != current;
65616 + t = next_thread(t)) {
65617 + old = __task_cred(t);
65618 + if (old->uid)
65619 + continue;
65620 + ncred = __prepare_creds(t);
65621 + if (!ncred)
65622 + goto die;
65623 + // uids
65624 + ncred->uid = new->uid;
65625 + ncred->euid = new->euid;
65626 + ncred->suid = new->suid;
65627 + ncred->fsuid = new->fsuid;
65628 + // gids
65629 + ncred->gid = new->gid;
65630 + ncred->egid = new->egid;
65631 + ncred->sgid = new->sgid;
65632 + ncred->fsgid = new->fsgid;
65633 + // groups
65634 + if (set_groups(ncred, new->group_info) < 0) {
65635 + abort_creds(ncred);
65636 + goto die;
65637 + }
65638 + // caps
65639 + ncred->securebits = new->securebits;
65640 + ncred->cap_inheritable = new->cap_inheritable;
65641 + ncred->cap_permitted = new->cap_permitted;
65642 + ncred->cap_effective = new->cap_effective;
65643 + ncred->cap_bset = new->cap_bset;
65644 +
65645 + __commit_creds(t, ncred);
65646 + }
65647 + read_unlock(&tasklist_lock);
65648 + rcu_read_unlock();
65649 + }
65650 +#endif
65651 + return __commit_creds(current, new);
65652 +#ifdef CONFIG_GRKERNSEC_SETXID
65653 +die:
65654 + read_unlock(&tasklist_lock);
65655 + rcu_read_unlock();
65656 + abort_creds(new);
65657 + do_group_exit(SIGKILL);
65658 +#endif
65659 +}
65660 +
65661 EXPORT_SYMBOL(commit_creds);
65662
65663 /**
65664 @@ -549,6 +629,8 @@ EXPORT_SYMBOL(commit_creds);
65665 */
65666 void abort_creds(struct cred *new)
65667 {
65668 + pax_track_stack();
65669 +
65670 kdebug("abort_creds(%p{%d,%d})", new,
65671 atomic_read(&new->usage),
65672 read_cred_subscribers(new));
65673 @@ -572,6 +654,8 @@ const struct cred *override_creds(const struct cred *new)
65674 {
65675 const struct cred *old = current->cred;
65676
65677 + pax_track_stack();
65678 +
65679 kdebug("override_creds(%p{%d,%d})", new,
65680 atomic_read(&new->usage),
65681 read_cred_subscribers(new));
65682 @@ -601,6 +685,8 @@ void revert_creds(const struct cred *old)
65683 {
65684 const struct cred *override = current->cred;
65685
65686 + pax_track_stack();
65687 +
65688 kdebug("revert_creds(%p{%d,%d})", old,
65689 atomic_read(&old->usage),
65690 read_cred_subscribers(old));
65691 @@ -647,6 +733,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
65692 const struct cred *old;
65693 struct cred *new;
65694
65695 + pax_track_stack();
65696 +
65697 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
65698 if (!new)
65699 return NULL;
65700 @@ -701,6 +789,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
65701 */
65702 int set_security_override(struct cred *new, u32 secid)
65703 {
65704 + pax_track_stack();
65705 +
65706 return security_kernel_act_as(new, secid);
65707 }
65708 EXPORT_SYMBOL(set_security_override);
65709 @@ -720,6 +810,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
65710 u32 secid;
65711 int ret;
65712
65713 + pax_track_stack();
65714 +
65715 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
65716 if (ret < 0)
65717 return ret;
65718 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
65719 index 0d7c087..01b8cef 100644
65720 --- a/kernel/debug/debug_core.c
65721 +++ b/kernel/debug/debug_core.c
65722 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
65723 */
65724 static atomic_t masters_in_kgdb;
65725 static atomic_t slaves_in_kgdb;
65726 -static atomic_t kgdb_break_tasklet_var;
65727 +static atomic_unchecked_t kgdb_break_tasklet_var;
65728 atomic_t kgdb_setting_breakpoint;
65729
65730 struct task_struct *kgdb_usethread;
65731 @@ -129,7 +129,7 @@ int kgdb_single_step;
65732 static pid_t kgdb_sstep_pid;
65733
65734 /* to keep track of the CPU which is doing the single stepping*/
65735 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65736 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65737
65738 /*
65739 * If you are debugging a problem where roundup (the collection of
65740 @@ -542,7 +542,7 @@ return_normal:
65741 * kernel will only try for the value of sstep_tries before
65742 * giving up and continuing on.
65743 */
65744 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
65745 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
65746 (kgdb_info[cpu].task &&
65747 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
65748 atomic_set(&kgdb_active, -1);
65749 @@ -636,8 +636,8 @@ cpu_master_loop:
65750 }
65751
65752 kgdb_restore:
65753 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
65754 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
65755 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
65756 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
65757 if (kgdb_info[sstep_cpu].task)
65758 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
65759 else
65760 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
65761 static void kgdb_tasklet_bpt(unsigned long ing)
65762 {
65763 kgdb_breakpoint();
65764 - atomic_set(&kgdb_break_tasklet_var, 0);
65765 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
65766 }
65767
65768 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
65769
65770 void kgdb_schedule_breakpoint(void)
65771 {
65772 - if (atomic_read(&kgdb_break_tasklet_var) ||
65773 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
65774 atomic_read(&kgdb_active) != -1 ||
65775 atomic_read(&kgdb_setting_breakpoint))
65776 return;
65777 - atomic_inc(&kgdb_break_tasklet_var);
65778 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
65779 tasklet_schedule(&kgdb_tasklet_breakpoint);
65780 }
65781 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
65782 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
65783 index 63786e7..0780cac 100644
65784 --- a/kernel/debug/kdb/kdb_main.c
65785 +++ b/kernel/debug/kdb/kdb_main.c
65786 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
65787 list_for_each_entry(mod, kdb_modules, list) {
65788
65789 kdb_printf("%-20s%8u 0x%p ", mod->name,
65790 - mod->core_size, (void *)mod);
65791 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
65792 #ifdef CONFIG_MODULE_UNLOAD
65793 kdb_printf("%4d ", module_refcount(mod));
65794 #endif
65795 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
65796 kdb_printf(" (Loading)");
65797 else
65798 kdb_printf(" (Live)");
65799 - kdb_printf(" 0x%p", mod->module_core);
65800 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
65801
65802 #ifdef CONFIG_MODULE_UNLOAD
65803 {
65804 diff --git a/kernel/events/core.c b/kernel/events/core.c
65805 index 0f85778..0d43716 100644
65806 --- a/kernel/events/core.c
65807 +++ b/kernel/events/core.c
65808 @@ -172,7 +172,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
65809 return 0;
65810 }
65811
65812 -static atomic64_t perf_event_id;
65813 +static atomic64_unchecked_t perf_event_id;
65814
65815 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
65816 enum event_type_t event_type);
65817 @@ -2535,7 +2535,7 @@ static void __perf_event_read(void *info)
65818
65819 static inline u64 perf_event_count(struct perf_event *event)
65820 {
65821 - return local64_read(&event->count) + atomic64_read(&event->child_count);
65822 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
65823 }
65824
65825 static u64 perf_event_read(struct perf_event *event)
65826 @@ -3060,9 +3060,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
65827 mutex_lock(&event->child_mutex);
65828 total += perf_event_read(event);
65829 *enabled += event->total_time_enabled +
65830 - atomic64_read(&event->child_total_time_enabled);
65831 + atomic64_read_unchecked(&event->child_total_time_enabled);
65832 *running += event->total_time_running +
65833 - atomic64_read(&event->child_total_time_running);
65834 + atomic64_read_unchecked(&event->child_total_time_running);
65835
65836 list_for_each_entry(child, &event->child_list, child_list) {
65837 total += perf_event_read(child);
65838 @@ -3448,10 +3448,10 @@ void perf_event_update_userpage(struct perf_event *event)
65839 userpg->offset -= local64_read(&event->hw.prev_count);
65840
65841 userpg->time_enabled = enabled +
65842 - atomic64_read(&event->child_total_time_enabled);
65843 + atomic64_read_unchecked(&event->child_total_time_enabled);
65844
65845 userpg->time_running = running +
65846 - atomic64_read(&event->child_total_time_running);
65847 + atomic64_read_unchecked(&event->child_total_time_running);
65848
65849 barrier();
65850 ++userpg->lock;
65851 @@ -3822,11 +3822,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
65852 values[n++] = perf_event_count(event);
65853 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
65854 values[n++] = enabled +
65855 - atomic64_read(&event->child_total_time_enabled);
65856 + atomic64_read_unchecked(&event->child_total_time_enabled);
65857 }
65858 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
65859 values[n++] = running +
65860 - atomic64_read(&event->child_total_time_running);
65861 + atomic64_read_unchecked(&event->child_total_time_running);
65862 }
65863 if (read_format & PERF_FORMAT_ID)
65864 values[n++] = primary_event_id(event);
65865 @@ -4477,12 +4477,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
65866 * need to add enough zero bytes after the string to handle
65867 * the 64bit alignment we do later.
65868 */
65869 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
65870 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
65871 if (!buf) {
65872 name = strncpy(tmp, "//enomem", sizeof(tmp));
65873 goto got_name;
65874 }
65875 - name = d_path(&file->f_path, buf, PATH_MAX);
65876 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
65877 if (IS_ERR(name)) {
65878 name = strncpy(tmp, "//toolong", sizeof(tmp));
65879 goto got_name;
65880 @@ -5833,7 +5833,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
65881 event->parent = parent_event;
65882
65883 event->ns = get_pid_ns(current->nsproxy->pid_ns);
65884 - event->id = atomic64_inc_return(&perf_event_id);
65885 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
65886
65887 event->state = PERF_EVENT_STATE_INACTIVE;
65888
65889 @@ -6355,10 +6355,10 @@ static void sync_child_event(struct perf_event *child_event,
65890 /*
65891 * Add back the child's count to the parent's count:
65892 */
65893 - atomic64_add(child_val, &parent_event->child_count);
65894 - atomic64_add(child_event->total_time_enabled,
65895 + atomic64_add_unchecked(child_val, &parent_event->child_count);
65896 + atomic64_add_unchecked(child_event->total_time_enabled,
65897 &parent_event->child_total_time_enabled);
65898 - atomic64_add(child_event->total_time_running,
65899 + atomic64_add_unchecked(child_event->total_time_running,
65900 &parent_event->child_total_time_running);
65901
65902 /*
65903 diff --git a/kernel/exit.c b/kernel/exit.c
65904 index 9e316ae..b3656d5 100644
65905 --- a/kernel/exit.c
65906 +++ b/kernel/exit.c
65907 @@ -57,6 +57,10 @@
65908 #include <asm/pgtable.h>
65909 #include <asm/mmu_context.h>
65910
65911 +#ifdef CONFIG_GRKERNSEC
65912 +extern rwlock_t grsec_exec_file_lock;
65913 +#endif
65914 +
65915 static void exit_mm(struct task_struct * tsk);
65916
65917 static void __unhash_process(struct task_struct *p, bool group_dead)
65918 @@ -168,6 +172,10 @@ void release_task(struct task_struct * p)
65919 struct task_struct *leader;
65920 int zap_leader;
65921 repeat:
65922 +#ifdef CONFIG_NET
65923 + gr_del_task_from_ip_table(p);
65924 +#endif
65925 +
65926 /* don't need to get the RCU readlock here - the process is dead and
65927 * can't be modifying its own credentials. But shut RCU-lockdep up */
65928 rcu_read_lock();
65929 @@ -380,7 +388,7 @@ int allow_signal(int sig)
65930 * know it'll be handled, so that they don't get converted to
65931 * SIGKILL or just silently dropped.
65932 */
65933 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
65934 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
65935 recalc_sigpending();
65936 spin_unlock_irq(&current->sighand->siglock);
65937 return 0;
65938 @@ -416,6 +424,17 @@ void daemonize(const char *name, ...)
65939 vsnprintf(current->comm, sizeof(current->comm), name, args);
65940 va_end(args);
65941
65942 +#ifdef CONFIG_GRKERNSEC
65943 + write_lock(&grsec_exec_file_lock);
65944 + if (current->exec_file) {
65945 + fput(current->exec_file);
65946 + current->exec_file = NULL;
65947 + }
65948 + write_unlock(&grsec_exec_file_lock);
65949 +#endif
65950 +
65951 + gr_set_kernel_label(current);
65952 +
65953 /*
65954 * If we were started as result of loading a module, close all of the
65955 * user space pages. We don't need them, and if we didn't close them
65956 @@ -895,6 +914,8 @@ NORET_TYPE void do_exit(long code)
65957 struct task_struct *tsk = current;
65958 int group_dead;
65959
65960 + set_fs(USER_DS);
65961 +
65962 profile_task_exit(tsk);
65963
65964 WARN_ON(blk_needs_flush_plug(tsk));
65965 @@ -911,7 +932,6 @@ NORET_TYPE void do_exit(long code)
65966 * mm_release()->clear_child_tid() from writing to a user-controlled
65967 * kernel address.
65968 */
65969 - set_fs(USER_DS);
65970
65971 ptrace_event(PTRACE_EVENT_EXIT, code);
65972
65973 @@ -973,6 +993,9 @@ NORET_TYPE void do_exit(long code)
65974 tsk->exit_code = code;
65975 taskstats_exit(tsk, group_dead);
65976
65977 + gr_acl_handle_psacct(tsk, code);
65978 + gr_acl_handle_exit();
65979 +
65980 exit_mm(tsk);
65981
65982 if (group_dead)
65983 diff --git a/kernel/fork.c b/kernel/fork.c
65984 index 8e6b6f4..9dccf00 100644
65985 --- a/kernel/fork.c
65986 +++ b/kernel/fork.c
65987 @@ -285,7 +285,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
65988 *stackend = STACK_END_MAGIC; /* for overflow detection */
65989
65990 #ifdef CONFIG_CC_STACKPROTECTOR
65991 - tsk->stack_canary = get_random_int();
65992 + tsk->stack_canary = pax_get_random_long();
65993 #endif
65994
65995 /*
65996 @@ -309,13 +309,77 @@ out:
65997 }
65998
65999 #ifdef CONFIG_MMU
66000 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
66001 +{
66002 + struct vm_area_struct *tmp;
66003 + unsigned long charge;
66004 + struct mempolicy *pol;
66005 + struct file *file;
66006 +
66007 + charge = 0;
66008 + if (mpnt->vm_flags & VM_ACCOUNT) {
66009 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
66010 + if (security_vm_enough_memory(len))
66011 + goto fail_nomem;
66012 + charge = len;
66013 + }
66014 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
66015 + if (!tmp)
66016 + goto fail_nomem;
66017 + *tmp = *mpnt;
66018 + tmp->vm_mm = mm;
66019 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
66020 + pol = mpol_dup(vma_policy(mpnt));
66021 + if (IS_ERR(pol))
66022 + goto fail_nomem_policy;
66023 + vma_set_policy(tmp, pol);
66024 + if (anon_vma_fork(tmp, mpnt))
66025 + goto fail_nomem_anon_vma_fork;
66026 + tmp->vm_flags &= ~VM_LOCKED;
66027 + tmp->vm_next = tmp->vm_prev = NULL;
66028 + tmp->vm_mirror = NULL;
66029 + file = tmp->vm_file;
66030 + if (file) {
66031 + struct inode *inode = file->f_path.dentry->d_inode;
66032 + struct address_space *mapping = file->f_mapping;
66033 +
66034 + get_file(file);
66035 + if (tmp->vm_flags & VM_DENYWRITE)
66036 + atomic_dec(&inode->i_writecount);
66037 + mutex_lock(&mapping->i_mmap_mutex);
66038 + if (tmp->vm_flags & VM_SHARED)
66039 + mapping->i_mmap_writable++;
66040 + flush_dcache_mmap_lock(mapping);
66041 + /* insert tmp into the share list, just after mpnt */
66042 + vma_prio_tree_add(tmp, mpnt);
66043 + flush_dcache_mmap_unlock(mapping);
66044 + mutex_unlock(&mapping->i_mmap_mutex);
66045 + }
66046 +
66047 + /*
66048 + * Clear hugetlb-related page reserves for children. This only
66049 + * affects MAP_PRIVATE mappings. Faults generated by the child
66050 + * are not guaranteed to succeed, even if read-only
66051 + */
66052 + if (is_vm_hugetlb_page(tmp))
66053 + reset_vma_resv_huge_pages(tmp);
66054 +
66055 + return tmp;
66056 +
66057 +fail_nomem_anon_vma_fork:
66058 + mpol_put(pol);
66059 +fail_nomem_policy:
66060 + kmem_cache_free(vm_area_cachep, tmp);
66061 +fail_nomem:
66062 + vm_unacct_memory(charge);
66063 + return NULL;
66064 +}
66065 +
66066 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66067 {
66068 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
66069 struct rb_node **rb_link, *rb_parent;
66070 int retval;
66071 - unsigned long charge;
66072 - struct mempolicy *pol;
66073
66074 down_write(&oldmm->mmap_sem);
66075 flush_cache_dup_mm(oldmm);
66076 @@ -327,8 +391,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66077 mm->locked_vm = 0;
66078 mm->mmap = NULL;
66079 mm->mmap_cache = NULL;
66080 - mm->free_area_cache = oldmm->mmap_base;
66081 - mm->cached_hole_size = ~0UL;
66082 + mm->free_area_cache = oldmm->free_area_cache;
66083 + mm->cached_hole_size = oldmm->cached_hole_size;
66084 mm->map_count = 0;
66085 cpumask_clear(mm_cpumask(mm));
66086 mm->mm_rb = RB_ROOT;
66087 @@ -344,8 +408,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66088
66089 prev = NULL;
66090 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
66091 - struct file *file;
66092 -
66093 if (mpnt->vm_flags & VM_DONTCOPY) {
66094 long pages = vma_pages(mpnt);
66095 mm->total_vm -= pages;
66096 @@ -353,53 +415,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66097 -pages);
66098 continue;
66099 }
66100 - charge = 0;
66101 - if (mpnt->vm_flags & VM_ACCOUNT) {
66102 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
66103 - if (security_vm_enough_memory(len))
66104 - goto fail_nomem;
66105 - charge = len;
66106 + tmp = dup_vma(mm, mpnt);
66107 + if (!tmp) {
66108 + retval = -ENOMEM;
66109 + goto out;
66110 }
66111 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
66112 - if (!tmp)
66113 - goto fail_nomem;
66114 - *tmp = *mpnt;
66115 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
66116 - pol = mpol_dup(vma_policy(mpnt));
66117 - retval = PTR_ERR(pol);
66118 - if (IS_ERR(pol))
66119 - goto fail_nomem_policy;
66120 - vma_set_policy(tmp, pol);
66121 - tmp->vm_mm = mm;
66122 - if (anon_vma_fork(tmp, mpnt))
66123 - goto fail_nomem_anon_vma_fork;
66124 - tmp->vm_flags &= ~VM_LOCKED;
66125 - tmp->vm_next = tmp->vm_prev = NULL;
66126 - file = tmp->vm_file;
66127 - if (file) {
66128 - struct inode *inode = file->f_path.dentry->d_inode;
66129 - struct address_space *mapping = file->f_mapping;
66130 -
66131 - get_file(file);
66132 - if (tmp->vm_flags & VM_DENYWRITE)
66133 - atomic_dec(&inode->i_writecount);
66134 - mutex_lock(&mapping->i_mmap_mutex);
66135 - if (tmp->vm_flags & VM_SHARED)
66136 - mapping->i_mmap_writable++;
66137 - flush_dcache_mmap_lock(mapping);
66138 - /* insert tmp into the share list, just after mpnt */
66139 - vma_prio_tree_add(tmp, mpnt);
66140 - flush_dcache_mmap_unlock(mapping);
66141 - mutex_unlock(&mapping->i_mmap_mutex);
66142 - }
66143 -
66144 - /*
66145 - * Clear hugetlb-related page reserves for children. This only
66146 - * affects MAP_PRIVATE mappings. Faults generated by the child
66147 - * are not guaranteed to succeed, even if read-only
66148 - */
66149 - if (is_vm_hugetlb_page(tmp))
66150 - reset_vma_resv_huge_pages(tmp);
66151
66152 /*
66153 * Link in the new vma and copy the page table entries.
66154 @@ -422,6 +442,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66155 if (retval)
66156 goto out;
66157 }
66158 +
66159 +#ifdef CONFIG_PAX_SEGMEXEC
66160 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
66161 + struct vm_area_struct *mpnt_m;
66162 +
66163 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
66164 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
66165 +
66166 + if (!mpnt->vm_mirror)
66167 + continue;
66168 +
66169 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
66170 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
66171 + mpnt->vm_mirror = mpnt_m;
66172 + } else {
66173 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
66174 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
66175 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
66176 + mpnt->vm_mirror->vm_mirror = mpnt;
66177 + }
66178 + }
66179 + BUG_ON(mpnt_m);
66180 + }
66181 +#endif
66182 +
66183 /* a new mm has just been created */
66184 arch_dup_mmap(oldmm, mm);
66185 retval = 0;
66186 @@ -430,14 +475,6 @@ out:
66187 flush_tlb_mm(oldmm);
66188 up_write(&oldmm->mmap_sem);
66189 return retval;
66190 -fail_nomem_anon_vma_fork:
66191 - mpol_put(pol);
66192 -fail_nomem_policy:
66193 - kmem_cache_free(vm_area_cachep, tmp);
66194 -fail_nomem:
66195 - retval = -ENOMEM;
66196 - vm_unacct_memory(charge);
66197 - goto out;
66198 }
66199
66200 static inline int mm_alloc_pgd(struct mm_struct *mm)
66201 @@ -837,13 +874,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
66202 spin_unlock(&fs->lock);
66203 return -EAGAIN;
66204 }
66205 - fs->users++;
66206 + atomic_inc(&fs->users);
66207 spin_unlock(&fs->lock);
66208 return 0;
66209 }
66210 tsk->fs = copy_fs_struct(fs);
66211 if (!tsk->fs)
66212 return -ENOMEM;
66213 + gr_set_chroot_entries(tsk, &tsk->fs->root);
66214 return 0;
66215 }
66216
66217 @@ -1105,6 +1143,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
66218 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
66219 #endif
66220 retval = -EAGAIN;
66221 +
66222 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
66223 +
66224 if (atomic_read(&p->real_cred->user->processes) >=
66225 task_rlimit(p, RLIMIT_NPROC)) {
66226 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
66227 @@ -1264,6 +1305,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
66228 if (clone_flags & CLONE_THREAD)
66229 p->tgid = current->tgid;
66230
66231 + gr_copy_label(p);
66232 +
66233 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
66234 /*
66235 * Clear TID on mm_release()?
66236 @@ -1428,6 +1471,8 @@ bad_fork_cleanup_count:
66237 bad_fork_free:
66238 free_task(p);
66239 fork_out:
66240 + gr_log_forkfail(retval);
66241 +
66242 return ERR_PTR(retval);
66243 }
66244
66245 @@ -1528,6 +1573,8 @@ long do_fork(unsigned long clone_flags,
66246 if (clone_flags & CLONE_PARENT_SETTID)
66247 put_user(nr, parent_tidptr);
66248
66249 + gr_handle_brute_check();
66250 +
66251 if (clone_flags & CLONE_VFORK) {
66252 p->vfork_done = &vfork;
66253 init_completion(&vfork);
66254 @@ -1637,7 +1684,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
66255 return 0;
66256
66257 /* don't need lock here; in the worst case we'll do useless copy */
66258 - if (fs->users == 1)
66259 + if (atomic_read(&fs->users) == 1)
66260 return 0;
66261
66262 *new_fsp = copy_fs_struct(fs);
66263 @@ -1726,7 +1773,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
66264 fs = current->fs;
66265 spin_lock(&fs->lock);
66266 current->fs = new_fs;
66267 - if (--fs->users)
66268 + gr_set_chroot_entries(current, &current->fs->root);
66269 + if (atomic_dec_return(&fs->users))
66270 new_fs = NULL;
66271 else
66272 new_fs = fs;
66273 diff --git a/kernel/futex.c b/kernel/futex.c
66274 index e6160fa..edf9565 100644
66275 --- a/kernel/futex.c
66276 +++ b/kernel/futex.c
66277 @@ -54,6 +54,7 @@
66278 #include <linux/mount.h>
66279 #include <linux/pagemap.h>
66280 #include <linux/syscalls.h>
66281 +#include <linux/ptrace.h>
66282 #include <linux/signal.h>
66283 #include <linux/module.h>
66284 #include <linux/magic.h>
66285 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
66286 struct page *page, *page_head;
66287 int err, ro = 0;
66288
66289 +#ifdef CONFIG_PAX_SEGMEXEC
66290 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
66291 + return -EFAULT;
66292 +#endif
66293 +
66294 /*
66295 * The futex address must be "naturally" aligned.
66296 */
66297 @@ -1875,6 +1881,8 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
66298 struct futex_q q = futex_q_init;
66299 int ret;
66300
66301 + pax_track_stack();
66302 +
66303 if (!bitset)
66304 return -EINVAL;
66305 q.bitset = bitset;
66306 @@ -2271,6 +2279,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
66307 struct futex_q q = futex_q_init;
66308 int res, ret;
66309
66310 + pax_track_stack();
66311 +
66312 if (!bitset)
66313 return -EINVAL;
66314
66315 @@ -2459,6 +2469,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
66316 if (!p)
66317 goto err_unlock;
66318 ret = -EPERM;
66319 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66320 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
66321 + goto err_unlock;
66322 +#endif
66323 pcred = __task_cred(p);
66324 /* If victim is in different user_ns, then uids are not
66325 comparable, so we must have CAP_SYS_PTRACE */
66326 @@ -2724,6 +2738,7 @@ static int __init futex_init(void)
66327 {
66328 u32 curval;
66329 int i;
66330 + mm_segment_t oldfs;
66331
66332 /*
66333 * This will fail and we want it. Some arch implementations do
66334 @@ -2735,8 +2750,11 @@ static int __init futex_init(void)
66335 * implementation, the non-functional ones will return
66336 * -ENOSYS.
66337 */
66338 + oldfs = get_fs();
66339 + set_fs(USER_DS);
66340 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
66341 futex_cmpxchg_enabled = 1;
66342 + set_fs(oldfs);
66343
66344 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
66345 plist_head_init(&futex_queues[i].chain);
66346 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
66347 index 5f9e689..582d46d 100644
66348 --- a/kernel/futex_compat.c
66349 +++ b/kernel/futex_compat.c
66350 @@ -10,6 +10,7 @@
66351 #include <linux/compat.h>
66352 #include <linux/nsproxy.h>
66353 #include <linux/futex.h>
66354 +#include <linux/ptrace.h>
66355
66356 #include <asm/uaccess.h>
66357
66358 @@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
66359 {
66360 struct compat_robust_list_head __user *head;
66361 unsigned long ret;
66362 - const struct cred *cred = current_cred(), *pcred;
66363 + const struct cred *cred = current_cred();
66364 + const struct cred *pcred;
66365
66366 if (!futex_cmpxchg_enabled)
66367 return -ENOSYS;
66368 @@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
66369 if (!p)
66370 goto err_unlock;
66371 ret = -EPERM;
66372 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66373 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
66374 + goto err_unlock;
66375 +#endif
66376 pcred = __task_cred(p);
66377 /* If victim is in different user_ns, then uids are not
66378 comparable, so we must have CAP_SYS_PTRACE */
66379 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
66380 index 9b22d03..6295b62 100644
66381 --- a/kernel/gcov/base.c
66382 +++ b/kernel/gcov/base.c
66383 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
66384 }
66385
66386 #ifdef CONFIG_MODULES
66387 -static inline int within(void *addr, void *start, unsigned long size)
66388 -{
66389 - return ((addr >= start) && (addr < start + size));
66390 -}
66391 -
66392 /* Update list and generate events when modules are unloaded. */
66393 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66394 void *data)
66395 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66396 prev = NULL;
66397 /* Remove entries located in module from linked list. */
66398 for (info = gcov_info_head; info; info = info->next) {
66399 - if (within(info, mod->module_core, mod->core_size)) {
66400 + if (within_module_core_rw((unsigned long)info, mod)) {
66401 if (prev)
66402 prev->next = info->next;
66403 else
66404 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
66405 index 2043c08..ec81a69 100644
66406 --- a/kernel/hrtimer.c
66407 +++ b/kernel/hrtimer.c
66408 @@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
66409 local_irq_restore(flags);
66410 }
66411
66412 -static void run_hrtimer_softirq(struct softirq_action *h)
66413 +static void run_hrtimer_softirq(void)
66414 {
66415 hrtimer_peek_ahead_timers();
66416 }
66417 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
66418 index e6f1f24..6c19597 100644
66419 --- a/kernel/jump_label.c
66420 +++ b/kernel/jump_label.c
66421 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
66422
66423 size = (((unsigned long)stop - (unsigned long)start)
66424 / sizeof(struct jump_entry));
66425 + pax_open_kernel();
66426 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
66427 + pax_close_kernel();
66428 }
66429
66430 static void jump_label_update(struct jump_label_key *key, int enable);
66431 @@ -298,10 +300,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
66432 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
66433 struct jump_entry *iter;
66434
66435 + pax_open_kernel();
66436 for (iter = iter_start; iter < iter_stop; iter++) {
66437 if (within_module_init(iter->code, mod))
66438 iter->code = 0;
66439 }
66440 + pax_close_kernel();
66441 }
66442
66443 static int
66444 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
66445 index 079f1d3..a407562 100644
66446 --- a/kernel/kallsyms.c
66447 +++ b/kernel/kallsyms.c
66448 @@ -11,6 +11,9 @@
66449 * Changed the compression method from stem compression to "table lookup"
66450 * compression (see scripts/kallsyms.c for a more complete description)
66451 */
66452 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66453 +#define __INCLUDED_BY_HIDESYM 1
66454 +#endif
66455 #include <linux/kallsyms.h>
66456 #include <linux/module.h>
66457 #include <linux/init.h>
66458 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
66459
66460 static inline int is_kernel_inittext(unsigned long addr)
66461 {
66462 + if (system_state != SYSTEM_BOOTING)
66463 + return 0;
66464 +
66465 if (addr >= (unsigned long)_sinittext
66466 && addr <= (unsigned long)_einittext)
66467 return 1;
66468 return 0;
66469 }
66470
66471 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66472 +#ifdef CONFIG_MODULES
66473 +static inline int is_module_text(unsigned long addr)
66474 +{
66475 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
66476 + return 1;
66477 +
66478 + addr = ktla_ktva(addr);
66479 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
66480 +}
66481 +#else
66482 +static inline int is_module_text(unsigned long addr)
66483 +{
66484 + return 0;
66485 +}
66486 +#endif
66487 +#endif
66488 +
66489 static inline int is_kernel_text(unsigned long addr)
66490 {
66491 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
66492 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
66493
66494 static inline int is_kernel(unsigned long addr)
66495 {
66496 +
66497 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66498 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
66499 + return 1;
66500 +
66501 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
66502 +#else
66503 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
66504 +#endif
66505 +
66506 return 1;
66507 return in_gate_area_no_mm(addr);
66508 }
66509
66510 static int is_ksym_addr(unsigned long addr)
66511 {
66512 +
66513 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66514 + if (is_module_text(addr))
66515 + return 0;
66516 +#endif
66517 +
66518 if (all_var)
66519 return is_kernel(addr);
66520
66521 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
66522
66523 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
66524 {
66525 - iter->name[0] = '\0';
66526 iter->nameoff = get_symbol_offset(new_pos);
66527 iter->pos = new_pos;
66528 }
66529 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
66530 {
66531 struct kallsym_iter *iter = m->private;
66532
66533 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66534 + if (current_uid())
66535 + return 0;
66536 +#endif
66537 +
66538 /* Some debugging symbols have no name. Ignore them. */
66539 if (!iter->name[0])
66540 return 0;
66541 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
66542 struct kallsym_iter *iter;
66543 int ret;
66544
66545 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
66546 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
66547 if (!iter)
66548 return -ENOMEM;
66549 reset_iter(iter, 0);
66550 diff --git a/kernel/kexec.c b/kernel/kexec.c
66551 index 296fbc8..84cb857 100644
66552 --- a/kernel/kexec.c
66553 +++ b/kernel/kexec.c
66554 @@ -1033,7 +1033,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
66555 unsigned long flags)
66556 {
66557 struct compat_kexec_segment in;
66558 - struct kexec_segment out, __user *ksegments;
66559 + struct kexec_segment out;
66560 + struct kexec_segment __user *ksegments;
66561 unsigned long i, result;
66562
66563 /* Don't allow clients that don't understand the native
66564 diff --git a/kernel/kmod.c b/kernel/kmod.c
66565 index a4bea97..7a1ae9a 100644
66566 --- a/kernel/kmod.c
66567 +++ b/kernel/kmod.c
66568 @@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
66569 * If module auto-loading support is disabled then this function
66570 * becomes a no-operation.
66571 */
66572 -int __request_module(bool wait, const char *fmt, ...)
66573 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
66574 {
66575 - va_list args;
66576 char module_name[MODULE_NAME_LEN];
66577 unsigned int max_modprobes;
66578 int ret;
66579 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
66580 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
66581 static char *envp[] = { "HOME=/",
66582 "TERM=linux",
66583 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
66584 @@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...)
66585 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
66586 static int kmod_loop_msg;
66587
66588 - va_start(args, fmt);
66589 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
66590 - va_end(args);
66591 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
66592 if (ret >= MODULE_NAME_LEN)
66593 return -ENAMETOOLONG;
66594
66595 @@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...)
66596 if (ret)
66597 return ret;
66598
66599 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66600 + if (!current_uid()) {
66601 + /* hack to workaround consolekit/udisks stupidity */
66602 + read_lock(&tasklist_lock);
66603 + if (!strcmp(current->comm, "mount") &&
66604 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
66605 + read_unlock(&tasklist_lock);
66606 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
66607 + return -EPERM;
66608 + }
66609 + read_unlock(&tasklist_lock);
66610 + }
66611 +#endif
66612 +
66613 /* If modprobe needs a service that is in a module, we get a recursive
66614 * loop. Limit the number of running kmod threads to max_threads/2 or
66615 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
66616 @@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...)
66617 atomic_dec(&kmod_concurrent);
66618 return ret;
66619 }
66620 +
66621 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
66622 +{
66623 + va_list args;
66624 + int ret;
66625 +
66626 + va_start(args, fmt);
66627 + ret = ____request_module(wait, module_param, fmt, args);
66628 + va_end(args);
66629 +
66630 + return ret;
66631 +}
66632 +
66633 +int __request_module(bool wait, const char *fmt, ...)
66634 +{
66635 + va_list args;
66636 + int ret;
66637 +
66638 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66639 + if (current_uid()) {
66640 + char module_param[MODULE_NAME_LEN];
66641 +
66642 + memset(module_param, 0, sizeof(module_param));
66643 +
66644 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
66645 +
66646 + va_start(args, fmt);
66647 + ret = ____request_module(wait, module_param, fmt, args);
66648 + va_end(args);
66649 +
66650 + return ret;
66651 + }
66652 +#endif
66653 +
66654 + va_start(args, fmt);
66655 + ret = ____request_module(wait, NULL, fmt, args);
66656 + va_end(args);
66657 +
66658 + return ret;
66659 +}
66660 +
66661 EXPORT_SYMBOL(__request_module);
66662 #endif /* CONFIG_MODULES */
66663
66664 @@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
66665 *
66666 * Thus the __user pointer cast is valid here.
66667 */
66668 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
66669 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
66670
66671 /*
66672 * If ret is 0, either ____call_usermodehelper failed and the
66673 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
66674 index b30fd54..11821ec 100644
66675 --- a/kernel/kprobes.c
66676 +++ b/kernel/kprobes.c
66677 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
66678 * kernel image and loaded module images reside. This is required
66679 * so x86_64 can correctly handle the %rip-relative fixups.
66680 */
66681 - kip->insns = module_alloc(PAGE_SIZE);
66682 + kip->insns = module_alloc_exec(PAGE_SIZE);
66683 if (!kip->insns) {
66684 kfree(kip);
66685 return NULL;
66686 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
66687 */
66688 if (!list_is_singular(&kip->list)) {
66689 list_del(&kip->list);
66690 - module_free(NULL, kip->insns);
66691 + module_free_exec(NULL, kip->insns);
66692 kfree(kip);
66693 }
66694 return 1;
66695 @@ -1949,7 +1949,7 @@ static int __init init_kprobes(void)
66696 {
66697 int i, err = 0;
66698 unsigned long offset = 0, size = 0;
66699 - char *modname, namebuf[128];
66700 + char *modname, namebuf[KSYM_NAME_LEN];
66701 const char *symbol_name;
66702 void *addr;
66703 struct kprobe_blackpoint *kb;
66704 @@ -2075,7 +2075,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
66705 const char *sym = NULL;
66706 unsigned int i = *(loff_t *) v;
66707 unsigned long offset = 0;
66708 - char *modname, namebuf[128];
66709 + char *modname, namebuf[KSYM_NAME_LEN];
66710
66711 head = &kprobe_table[i];
66712 preempt_disable();
66713 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
66714 index 4479606..4036bea 100644
66715 --- a/kernel/lockdep.c
66716 +++ b/kernel/lockdep.c
66717 @@ -584,6 +584,10 @@ static int static_obj(void *obj)
66718 end = (unsigned long) &_end,
66719 addr = (unsigned long) obj;
66720
66721 +#ifdef CONFIG_PAX_KERNEXEC
66722 + start = ktla_ktva(start);
66723 +#endif
66724 +
66725 /*
66726 * static variable?
66727 */
66728 @@ -719,6 +723,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
66729 if (!static_obj(lock->key)) {
66730 debug_locks_off();
66731 printk("INFO: trying to register non-static key.\n");
66732 + printk("lock:%pS key:%pS.\n", lock, lock->key);
66733 printk("the code is fine but needs lockdep annotation.\n");
66734 printk("turning off the locking correctness validator.\n");
66735 dump_stack();
66736 @@ -2954,7 +2959,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
66737 if (!class)
66738 return 0;
66739 }
66740 - atomic_inc((atomic_t *)&class->ops);
66741 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
66742 if (very_verbose(class)) {
66743 printk("\nacquire class [%p] %s", class->key, class->name);
66744 if (class->name_version > 1)
66745 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
66746 index 71edd2f..e0542a5 100644
66747 --- a/kernel/lockdep_proc.c
66748 +++ b/kernel/lockdep_proc.c
66749 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
66750
66751 static void print_name(struct seq_file *m, struct lock_class *class)
66752 {
66753 - char str[128];
66754 + char str[KSYM_NAME_LEN];
66755 const char *name = class->name;
66756
66757 if (!name) {
66758 diff --git a/kernel/module.c b/kernel/module.c
66759 index 04379f92..fba2faf 100644
66760 --- a/kernel/module.c
66761 +++ b/kernel/module.c
66762 @@ -58,6 +58,7 @@
66763 #include <linux/jump_label.h>
66764 #include <linux/pfn.h>
66765 #include <linux/bsearch.h>
66766 +#include <linux/grsecurity.h>
66767
66768 #define CREATE_TRACE_POINTS
66769 #include <trace/events/module.h>
66770 @@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
66771
66772 /* Bounds of module allocation, for speeding __module_address.
66773 * Protected by module_mutex. */
66774 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
66775 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
66776 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
66777
66778 int register_module_notifier(struct notifier_block * nb)
66779 {
66780 @@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66781 return true;
66782
66783 list_for_each_entry_rcu(mod, &modules, list) {
66784 - struct symsearch arr[] = {
66785 + struct symsearch modarr[] = {
66786 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
66787 NOT_GPL_ONLY, false },
66788 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
66789 @@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66790 #endif
66791 };
66792
66793 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
66794 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
66795 return true;
66796 }
66797 return false;
66798 @@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
66799 static int percpu_modalloc(struct module *mod,
66800 unsigned long size, unsigned long align)
66801 {
66802 - if (align > PAGE_SIZE) {
66803 + if (align-1 >= PAGE_SIZE) {
66804 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
66805 mod->name, align, PAGE_SIZE);
66806 align = PAGE_SIZE;
66807 @@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
66808 */
66809 #ifdef CONFIG_SYSFS
66810
66811 -#ifdef CONFIG_KALLSYMS
66812 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66813 static inline bool sect_empty(const Elf_Shdr *sect)
66814 {
66815 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
66816 @@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base,
66817
66818 static void unset_module_core_ro_nx(struct module *mod)
66819 {
66820 - set_page_attributes(mod->module_core + mod->core_text_size,
66821 - mod->module_core + mod->core_size,
66822 + set_page_attributes(mod->module_core_rw,
66823 + mod->module_core_rw + mod->core_size_rw,
66824 set_memory_x);
66825 - set_page_attributes(mod->module_core,
66826 - mod->module_core + mod->core_ro_size,
66827 + set_page_attributes(mod->module_core_rx,
66828 + mod->module_core_rx + mod->core_size_rx,
66829 set_memory_rw);
66830 }
66831
66832 static void unset_module_init_ro_nx(struct module *mod)
66833 {
66834 - set_page_attributes(mod->module_init + mod->init_text_size,
66835 - mod->module_init + mod->init_size,
66836 + set_page_attributes(mod->module_init_rw,
66837 + mod->module_init_rw + mod->init_size_rw,
66838 set_memory_x);
66839 - set_page_attributes(mod->module_init,
66840 - mod->module_init + mod->init_ro_size,
66841 + set_page_attributes(mod->module_init_rx,
66842 + mod->module_init_rx + mod->init_size_rx,
66843 set_memory_rw);
66844 }
66845
66846 @@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
66847
66848 mutex_lock(&module_mutex);
66849 list_for_each_entry_rcu(mod, &modules, list) {
66850 - if ((mod->module_core) && (mod->core_text_size)) {
66851 - set_page_attributes(mod->module_core,
66852 - mod->module_core + mod->core_text_size,
66853 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66854 + set_page_attributes(mod->module_core_rx,
66855 + mod->module_core_rx + mod->core_size_rx,
66856 set_memory_rw);
66857 }
66858 - if ((mod->module_init) && (mod->init_text_size)) {
66859 - set_page_attributes(mod->module_init,
66860 - mod->module_init + mod->init_text_size,
66861 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66862 + set_page_attributes(mod->module_init_rx,
66863 + mod->module_init_rx + mod->init_size_rx,
66864 set_memory_rw);
66865 }
66866 }
66867 @@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
66868
66869 mutex_lock(&module_mutex);
66870 list_for_each_entry_rcu(mod, &modules, list) {
66871 - if ((mod->module_core) && (mod->core_text_size)) {
66872 - set_page_attributes(mod->module_core,
66873 - mod->module_core + mod->core_text_size,
66874 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66875 + set_page_attributes(mod->module_core_rx,
66876 + mod->module_core_rx + mod->core_size_rx,
66877 set_memory_ro);
66878 }
66879 - if ((mod->module_init) && (mod->init_text_size)) {
66880 - set_page_attributes(mod->module_init,
66881 - mod->module_init + mod->init_text_size,
66882 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66883 + set_page_attributes(mod->module_init_rx,
66884 + mod->module_init_rx + mod->init_size_rx,
66885 set_memory_ro);
66886 }
66887 }
66888 @@ -1748,16 +1750,19 @@ static void free_module(struct module *mod)
66889
66890 /* This may be NULL, but that's OK */
66891 unset_module_init_ro_nx(mod);
66892 - module_free(mod, mod->module_init);
66893 + module_free(mod, mod->module_init_rw);
66894 + module_free_exec(mod, mod->module_init_rx);
66895 kfree(mod->args);
66896 percpu_modfree(mod);
66897
66898 /* Free lock-classes: */
66899 - lockdep_free_key_range(mod->module_core, mod->core_size);
66900 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
66901 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
66902
66903 /* Finally, free the core (containing the module structure) */
66904 unset_module_core_ro_nx(mod);
66905 - module_free(mod, mod->module_core);
66906 + module_free_exec(mod, mod->module_core_rx);
66907 + module_free(mod, mod->module_core_rw);
66908
66909 #ifdef CONFIG_MPU
66910 update_protections(current->mm);
66911 @@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66912 unsigned int i;
66913 int ret = 0;
66914 const struct kernel_symbol *ksym;
66915 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66916 + int is_fs_load = 0;
66917 + int register_filesystem_found = 0;
66918 + char *p;
66919 +
66920 + p = strstr(mod->args, "grsec_modharden_fs");
66921 + if (p) {
66922 + char *endptr = p + strlen("grsec_modharden_fs");
66923 + /* copy \0 as well */
66924 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
66925 + is_fs_load = 1;
66926 + }
66927 +#endif
66928
66929 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
66930 const char *name = info->strtab + sym[i].st_name;
66931
66932 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66933 + /* it's a real shame this will never get ripped and copied
66934 + upstream! ;(
66935 + */
66936 + if (is_fs_load && !strcmp(name, "register_filesystem"))
66937 + register_filesystem_found = 1;
66938 +#endif
66939 +
66940 switch (sym[i].st_shndx) {
66941 case SHN_COMMON:
66942 /* We compiled with -fno-common. These are not
66943 @@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66944 ksym = resolve_symbol_wait(mod, info, name);
66945 /* Ok if resolved. */
66946 if (ksym && !IS_ERR(ksym)) {
66947 + pax_open_kernel();
66948 sym[i].st_value = ksym->value;
66949 + pax_close_kernel();
66950 break;
66951 }
66952
66953 @@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66954 secbase = (unsigned long)mod_percpu(mod);
66955 else
66956 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
66957 + pax_open_kernel();
66958 sym[i].st_value += secbase;
66959 + pax_close_kernel();
66960 break;
66961 }
66962 }
66963
66964 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66965 + if (is_fs_load && !register_filesystem_found) {
66966 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
66967 + ret = -EPERM;
66968 + }
66969 +#endif
66970 +
66971 return ret;
66972 }
66973
66974 @@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
66975 || s->sh_entsize != ~0UL
66976 || strstarts(sname, ".init"))
66977 continue;
66978 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
66979 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66980 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
66981 + else
66982 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
66983 DEBUGP("\t%s\n", name);
66984 }
66985 - switch (m) {
66986 - case 0: /* executable */
66987 - mod->core_size = debug_align(mod->core_size);
66988 - mod->core_text_size = mod->core_size;
66989 - break;
66990 - case 1: /* RO: text and ro-data */
66991 - mod->core_size = debug_align(mod->core_size);
66992 - mod->core_ro_size = mod->core_size;
66993 - break;
66994 - case 3: /* whole core */
66995 - mod->core_size = debug_align(mod->core_size);
66996 - break;
66997 - }
66998 }
66999
67000 DEBUGP("Init section allocation order:\n");
67001 @@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
67002 || s->sh_entsize != ~0UL
67003 || !strstarts(sname, ".init"))
67004 continue;
67005 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
67006 - | INIT_OFFSET_MASK);
67007 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
67008 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
67009 + else
67010 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
67011 + s->sh_entsize |= INIT_OFFSET_MASK;
67012 DEBUGP("\t%s\n", sname);
67013 }
67014 - switch (m) {
67015 - case 0: /* executable */
67016 - mod->init_size = debug_align(mod->init_size);
67017 - mod->init_text_size = mod->init_size;
67018 - break;
67019 - case 1: /* RO: text and ro-data */
67020 - mod->init_size = debug_align(mod->init_size);
67021 - mod->init_ro_size = mod->init_size;
67022 - break;
67023 - case 3: /* whole init */
67024 - mod->init_size = debug_align(mod->init_size);
67025 - break;
67026 - }
67027 }
67028 }
67029
67030 @@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
67031
67032 /* Put symbol section at end of init part of module. */
67033 symsect->sh_flags |= SHF_ALLOC;
67034 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
67035 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
67036 info->index.sym) | INIT_OFFSET_MASK;
67037 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
67038
67039 @@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
67040 }
67041
67042 /* Append room for core symbols at end of core part. */
67043 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
67044 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
67045 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
67046 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
67047
67048 /* Put string table section at end of init part of module. */
67049 strsect->sh_flags |= SHF_ALLOC;
67050 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
67051 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
67052 info->index.str) | INIT_OFFSET_MASK;
67053 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
67054
67055 /* Append room for core symbols' strings at end of core part. */
67056 - info->stroffs = mod->core_size;
67057 + info->stroffs = mod->core_size_rx;
67058 __set_bit(0, info->strmap);
67059 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
67060 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
67061 }
67062
67063 static void add_kallsyms(struct module *mod, const struct load_info *info)
67064 @@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
67065 /* Make sure we get permanent strtab: don't use info->strtab. */
67066 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
67067
67068 + pax_open_kernel();
67069 +
67070 /* Set types up while we still have access to sections. */
67071 for (i = 0; i < mod->num_symtab; i++)
67072 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
67073
67074 - mod->core_symtab = dst = mod->module_core + info->symoffs;
67075 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
67076 src = mod->symtab;
67077 *dst = *src;
67078 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
67079 @@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
67080 }
67081 mod->core_num_syms = ndst;
67082
67083 - mod->core_strtab = s = mod->module_core + info->stroffs;
67084 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
67085 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
67086 if (test_bit(i, info->strmap))
67087 *++s = mod->strtab[i];
67088 +
67089 + pax_close_kernel();
67090 }
67091 #else
67092 static inline void layout_symtab(struct module *mod, struct load_info *info)
67093 @@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size)
67094 return size == 0 ? NULL : vmalloc_exec(size);
67095 }
67096
67097 -static void *module_alloc_update_bounds(unsigned long size)
67098 +static void *module_alloc_update_bounds_rw(unsigned long size)
67099 {
67100 void *ret = module_alloc(size);
67101
67102 if (ret) {
67103 mutex_lock(&module_mutex);
67104 /* Update module bounds. */
67105 - if ((unsigned long)ret < module_addr_min)
67106 - module_addr_min = (unsigned long)ret;
67107 - if ((unsigned long)ret + size > module_addr_max)
67108 - module_addr_max = (unsigned long)ret + size;
67109 + if ((unsigned long)ret < module_addr_min_rw)
67110 + module_addr_min_rw = (unsigned long)ret;
67111 + if ((unsigned long)ret + size > module_addr_max_rw)
67112 + module_addr_max_rw = (unsigned long)ret + size;
67113 + mutex_unlock(&module_mutex);
67114 + }
67115 + return ret;
67116 +}
67117 +
67118 +static void *module_alloc_update_bounds_rx(unsigned long size)
67119 +{
67120 + void *ret = module_alloc_exec(size);
67121 +
67122 + if (ret) {
67123 + mutex_lock(&module_mutex);
67124 + /* Update module bounds. */
67125 + if ((unsigned long)ret < module_addr_min_rx)
67126 + module_addr_min_rx = (unsigned long)ret;
67127 + if ((unsigned long)ret + size > module_addr_max_rx)
67128 + module_addr_max_rx = (unsigned long)ret + size;
67129 mutex_unlock(&module_mutex);
67130 }
67131 return ret;
67132 @@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info)
67133 static int check_modinfo(struct module *mod, struct load_info *info)
67134 {
67135 const char *modmagic = get_modinfo(info, "vermagic");
67136 + const char *license = get_modinfo(info, "license");
67137 int err;
67138
67139 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
67140 + if (!license || !license_is_gpl_compatible(license))
67141 + return -ENOEXEC;
67142 +#endif
67143 +
67144 /* This is allowed: modprobe --force will invalidate it. */
67145 if (!modmagic) {
67146 err = try_to_force_load(mod, "bad vermagic");
67147 @@ -2495,7 +2538,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
67148 }
67149
67150 /* Set up license info based on the info section */
67151 - set_license(mod, get_modinfo(info, "license"));
67152 + set_license(mod, license);
67153
67154 return 0;
67155 }
67156 @@ -2589,7 +2632,7 @@ static int move_module(struct module *mod, struct load_info *info)
67157 void *ptr;
67158
67159 /* Do the allocs. */
67160 - ptr = module_alloc_update_bounds(mod->core_size);
67161 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
67162 /*
67163 * The pointer to this block is stored in the module structure
67164 * which is inside the block. Just mark it as not being a
67165 @@ -2599,23 +2642,50 @@ static int move_module(struct module *mod, struct load_info *info)
67166 if (!ptr)
67167 return -ENOMEM;
67168
67169 - memset(ptr, 0, mod->core_size);
67170 - mod->module_core = ptr;
67171 + memset(ptr, 0, mod->core_size_rw);
67172 + mod->module_core_rw = ptr;
67173
67174 - ptr = module_alloc_update_bounds(mod->init_size);
67175 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
67176 /*
67177 * The pointer to this block is stored in the module structure
67178 * which is inside the block. This block doesn't need to be
67179 * scanned as it contains data and code that will be freed
67180 * after the module is initialized.
67181 */
67182 - kmemleak_ignore(ptr);
67183 - if (!ptr && mod->init_size) {
67184 - module_free(mod, mod->module_core);
67185 + kmemleak_not_leak(ptr);
67186 + if (!ptr && mod->init_size_rw) {
67187 + module_free(mod, mod->module_core_rw);
67188 return -ENOMEM;
67189 }
67190 - memset(ptr, 0, mod->init_size);
67191 - mod->module_init = ptr;
67192 + memset(ptr, 0, mod->init_size_rw);
67193 + mod->module_init_rw = ptr;
67194 +
67195 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
67196 + kmemleak_not_leak(ptr);
67197 + if (!ptr) {
67198 + module_free(mod, mod->module_init_rw);
67199 + module_free(mod, mod->module_core_rw);
67200 + return -ENOMEM;
67201 + }
67202 +
67203 + pax_open_kernel();
67204 + memset(ptr, 0, mod->core_size_rx);
67205 + pax_close_kernel();
67206 + mod->module_core_rx = ptr;
67207 +
67208 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
67209 + kmemleak_not_leak(ptr);
67210 + if (!ptr && mod->init_size_rx) {
67211 + module_free_exec(mod, mod->module_core_rx);
67212 + module_free(mod, mod->module_init_rw);
67213 + module_free(mod, mod->module_core_rw);
67214 + return -ENOMEM;
67215 + }
67216 +
67217 + pax_open_kernel();
67218 + memset(ptr, 0, mod->init_size_rx);
67219 + pax_close_kernel();
67220 + mod->module_init_rx = ptr;
67221
67222 /* Transfer each section which specifies SHF_ALLOC */
67223 DEBUGP("final section addresses:\n");
67224 @@ -2626,16 +2696,45 @@ static int move_module(struct module *mod, struct load_info *info)
67225 if (!(shdr->sh_flags & SHF_ALLOC))
67226 continue;
67227
67228 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
67229 - dest = mod->module_init
67230 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67231 - else
67232 - dest = mod->module_core + shdr->sh_entsize;
67233 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
67234 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
67235 + dest = mod->module_init_rw
67236 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67237 + else
67238 + dest = mod->module_init_rx
67239 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67240 + } else {
67241 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
67242 + dest = mod->module_core_rw + shdr->sh_entsize;
67243 + else
67244 + dest = mod->module_core_rx + shdr->sh_entsize;
67245 + }
67246 +
67247 + if (shdr->sh_type != SHT_NOBITS) {
67248 +
67249 +#ifdef CONFIG_PAX_KERNEXEC
67250 +#ifdef CONFIG_X86_64
67251 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
67252 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
67253 +#endif
67254 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
67255 + pax_open_kernel();
67256 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
67257 + pax_close_kernel();
67258 + } else
67259 +#endif
67260
67261 - if (shdr->sh_type != SHT_NOBITS)
67262 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
67263 + }
67264 /* Update sh_addr to point to copy in image. */
67265 - shdr->sh_addr = (unsigned long)dest;
67266 +
67267 +#ifdef CONFIG_PAX_KERNEXEC
67268 + if (shdr->sh_flags & SHF_EXECINSTR)
67269 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
67270 + else
67271 +#endif
67272 +
67273 + shdr->sh_addr = (unsigned long)dest;
67274 DEBUGP("\t0x%lx %s\n",
67275 shdr->sh_addr, info->secstrings + shdr->sh_name);
67276 }
67277 @@ -2686,12 +2785,12 @@ static void flush_module_icache(const struct module *mod)
67278 * Do it before processing of module parameters, so the module
67279 * can provide parameter accessor functions of its own.
67280 */
67281 - if (mod->module_init)
67282 - flush_icache_range((unsigned long)mod->module_init,
67283 - (unsigned long)mod->module_init
67284 - + mod->init_size);
67285 - flush_icache_range((unsigned long)mod->module_core,
67286 - (unsigned long)mod->module_core + mod->core_size);
67287 + if (mod->module_init_rx)
67288 + flush_icache_range((unsigned long)mod->module_init_rx,
67289 + (unsigned long)mod->module_init_rx
67290 + + mod->init_size_rx);
67291 + flush_icache_range((unsigned long)mod->module_core_rx,
67292 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
67293
67294 set_fs(old_fs);
67295 }
67296 @@ -2771,8 +2870,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
67297 {
67298 kfree(info->strmap);
67299 percpu_modfree(mod);
67300 - module_free(mod, mod->module_init);
67301 - module_free(mod, mod->module_core);
67302 + module_free_exec(mod, mod->module_init_rx);
67303 + module_free_exec(mod, mod->module_core_rx);
67304 + module_free(mod, mod->module_init_rw);
67305 + module_free(mod, mod->module_core_rw);
67306 }
67307
67308 int __weak module_finalize(const Elf_Ehdr *hdr,
67309 @@ -2836,9 +2937,38 @@ static struct module *load_module(void __user *umod,
67310 if (err)
67311 goto free_unload;
67312
67313 + /* Now copy in args */
67314 + mod->args = strndup_user(uargs, ~0UL >> 1);
67315 + if (IS_ERR(mod->args)) {
67316 + err = PTR_ERR(mod->args);
67317 + goto free_unload;
67318 + }
67319 +
67320 /* Set up MODINFO_ATTR fields */
67321 setup_modinfo(mod, &info);
67322
67323 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67324 + {
67325 + char *p, *p2;
67326 +
67327 + if (strstr(mod->args, "grsec_modharden_netdev")) {
67328 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
67329 + err = -EPERM;
67330 + goto free_modinfo;
67331 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
67332 + p += strlen("grsec_modharden_normal");
67333 + p2 = strstr(p, "_");
67334 + if (p2) {
67335 + *p2 = '\0';
67336 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
67337 + *p2 = '_';
67338 + }
67339 + err = -EPERM;
67340 + goto free_modinfo;
67341 + }
67342 + }
67343 +#endif
67344 +
67345 /* Fix up syms, so that st_value is a pointer to location. */
67346 err = simplify_symbols(mod, &info);
67347 if (err < 0)
67348 @@ -2854,13 +2984,6 @@ static struct module *load_module(void __user *umod,
67349
67350 flush_module_icache(mod);
67351
67352 - /* Now copy in args */
67353 - mod->args = strndup_user(uargs, ~0UL >> 1);
67354 - if (IS_ERR(mod->args)) {
67355 - err = PTR_ERR(mod->args);
67356 - goto free_arch_cleanup;
67357 - }
67358 -
67359 /* Mark state as coming so strong_try_module_get() ignores us. */
67360 mod->state = MODULE_STATE_COMING;
67361
67362 @@ -2920,11 +3043,10 @@ static struct module *load_module(void __user *umod,
67363 unlock:
67364 mutex_unlock(&module_mutex);
67365 synchronize_sched();
67366 - kfree(mod->args);
67367 - free_arch_cleanup:
67368 module_arch_cleanup(mod);
67369 free_modinfo:
67370 free_modinfo(mod);
67371 + kfree(mod->args);
67372 free_unload:
67373 module_unload_free(mod);
67374 free_module:
67375 @@ -2965,16 +3087,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
67376 MODULE_STATE_COMING, mod);
67377
67378 /* Set RO and NX regions for core */
67379 - set_section_ro_nx(mod->module_core,
67380 - mod->core_text_size,
67381 - mod->core_ro_size,
67382 - mod->core_size);
67383 + set_section_ro_nx(mod->module_core_rx,
67384 + mod->core_size_rx,
67385 + mod->core_size_rx,
67386 + mod->core_size_rx);
67387
67388 /* Set RO and NX regions for init */
67389 - set_section_ro_nx(mod->module_init,
67390 - mod->init_text_size,
67391 - mod->init_ro_size,
67392 - mod->init_size);
67393 + set_section_ro_nx(mod->module_init_rx,
67394 + mod->init_size_rx,
67395 + mod->init_size_rx,
67396 + mod->init_size_rx);
67397
67398 do_mod_ctors(mod);
67399 /* Start the module */
67400 @@ -3020,11 +3142,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
67401 mod->strtab = mod->core_strtab;
67402 #endif
67403 unset_module_init_ro_nx(mod);
67404 - module_free(mod, mod->module_init);
67405 - mod->module_init = NULL;
67406 - mod->init_size = 0;
67407 - mod->init_ro_size = 0;
67408 - mod->init_text_size = 0;
67409 + module_free(mod, mod->module_init_rw);
67410 + module_free_exec(mod, mod->module_init_rx);
67411 + mod->module_init_rw = NULL;
67412 + mod->module_init_rx = NULL;
67413 + mod->init_size_rw = 0;
67414 + mod->init_size_rx = 0;
67415 mutex_unlock(&module_mutex);
67416
67417 return 0;
67418 @@ -3055,10 +3178,16 @@ static const char *get_ksymbol(struct module *mod,
67419 unsigned long nextval;
67420
67421 /* At worse, next value is at end of module */
67422 - if (within_module_init(addr, mod))
67423 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
67424 + if (within_module_init_rx(addr, mod))
67425 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
67426 + else if (within_module_init_rw(addr, mod))
67427 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
67428 + else if (within_module_core_rx(addr, mod))
67429 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
67430 + else if (within_module_core_rw(addr, mod))
67431 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
67432 else
67433 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
67434 + return NULL;
67435
67436 /* Scan for closest preceding symbol, and next symbol. (ELF
67437 starts real symbols at 1). */
67438 @@ -3304,7 +3433,7 @@ static int m_show(struct seq_file *m, void *p)
67439 char buf[8];
67440
67441 seq_printf(m, "%s %u",
67442 - mod->name, mod->init_size + mod->core_size);
67443 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
67444 print_unload_info(m, mod);
67445
67446 /* Informative for users. */
67447 @@ -3313,7 +3442,7 @@ static int m_show(struct seq_file *m, void *p)
67448 mod->state == MODULE_STATE_COMING ? "Loading":
67449 "Live");
67450 /* Used by oprofile and other similar tools. */
67451 - seq_printf(m, " 0x%pK", mod->module_core);
67452 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
67453
67454 /* Taints info */
67455 if (mod->taints)
67456 @@ -3349,7 +3478,17 @@ static const struct file_operations proc_modules_operations = {
67457
67458 static int __init proc_modules_init(void)
67459 {
67460 +#ifndef CONFIG_GRKERNSEC_HIDESYM
67461 +#ifdef CONFIG_GRKERNSEC_PROC_USER
67462 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67463 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67464 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
67465 +#else
67466 proc_create("modules", 0, NULL, &proc_modules_operations);
67467 +#endif
67468 +#else
67469 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67470 +#endif
67471 return 0;
67472 }
67473 module_init(proc_modules_init);
67474 @@ -3408,12 +3547,12 @@ struct module *__module_address(unsigned long addr)
67475 {
67476 struct module *mod;
67477
67478 - if (addr < module_addr_min || addr > module_addr_max)
67479 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
67480 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
67481 return NULL;
67482
67483 list_for_each_entry_rcu(mod, &modules, list)
67484 - if (within_module_core(addr, mod)
67485 - || within_module_init(addr, mod))
67486 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
67487 return mod;
67488 return NULL;
67489 }
67490 @@ -3447,11 +3586,20 @@ bool is_module_text_address(unsigned long addr)
67491 */
67492 struct module *__module_text_address(unsigned long addr)
67493 {
67494 - struct module *mod = __module_address(addr);
67495 + struct module *mod;
67496 +
67497 +#ifdef CONFIG_X86_32
67498 + addr = ktla_ktva(addr);
67499 +#endif
67500 +
67501 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
67502 + return NULL;
67503 +
67504 + mod = __module_address(addr);
67505 +
67506 if (mod) {
67507 /* Make sure it's within the text section. */
67508 - if (!within(addr, mod->module_init, mod->init_text_size)
67509 - && !within(addr, mod->module_core, mod->core_text_size))
67510 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
67511 mod = NULL;
67512 }
67513 return mod;
67514 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
67515 index 73da83a..fe46e99 100644
67516 --- a/kernel/mutex-debug.c
67517 +++ b/kernel/mutex-debug.c
67518 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
67519 }
67520
67521 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67522 - struct thread_info *ti)
67523 + struct task_struct *task)
67524 {
67525 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
67526
67527 /* Mark the current thread as blocked on the lock: */
67528 - ti->task->blocked_on = waiter;
67529 + task->blocked_on = waiter;
67530 }
67531
67532 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67533 - struct thread_info *ti)
67534 + struct task_struct *task)
67535 {
67536 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
67537 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
67538 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
67539 - ti->task->blocked_on = NULL;
67540 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
67541 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
67542 + task->blocked_on = NULL;
67543
67544 list_del_init(&waiter->list);
67545 waiter->task = NULL;
67546 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
67547 index 0799fd3..d06ae3b 100644
67548 --- a/kernel/mutex-debug.h
67549 +++ b/kernel/mutex-debug.h
67550 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
67551 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
67552 extern void debug_mutex_add_waiter(struct mutex *lock,
67553 struct mutex_waiter *waiter,
67554 - struct thread_info *ti);
67555 + struct task_struct *task);
67556 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67557 - struct thread_info *ti);
67558 + struct task_struct *task);
67559 extern void debug_mutex_unlock(struct mutex *lock);
67560 extern void debug_mutex_init(struct mutex *lock, const char *name,
67561 struct lock_class_key *key);
67562 diff --git a/kernel/mutex.c b/kernel/mutex.c
67563 index d607ed5..58d0a52 100644
67564 --- a/kernel/mutex.c
67565 +++ b/kernel/mutex.c
67566 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67567 spin_lock_mutex(&lock->wait_lock, flags);
67568
67569 debug_mutex_lock_common(lock, &waiter);
67570 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
67571 + debug_mutex_add_waiter(lock, &waiter, task);
67572
67573 /* add waiting tasks to the end of the waitqueue (FIFO): */
67574 list_add_tail(&waiter.list, &lock->wait_list);
67575 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67576 * TASK_UNINTERRUPTIBLE case.)
67577 */
67578 if (unlikely(signal_pending_state(state, task))) {
67579 - mutex_remove_waiter(lock, &waiter,
67580 - task_thread_info(task));
67581 + mutex_remove_waiter(lock, &waiter, task);
67582 mutex_release(&lock->dep_map, 1, ip);
67583 spin_unlock_mutex(&lock->wait_lock, flags);
67584
67585 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67586 done:
67587 lock_acquired(&lock->dep_map, ip);
67588 /* got the lock - rejoice! */
67589 - mutex_remove_waiter(lock, &waiter, current_thread_info());
67590 + mutex_remove_waiter(lock, &waiter, task);
67591 mutex_set_owner(lock);
67592
67593 /* set it to 0 if there are no waiters left: */
67594 diff --git a/kernel/padata.c b/kernel/padata.c
67595 index b91941d..0871d60 100644
67596 --- a/kernel/padata.c
67597 +++ b/kernel/padata.c
67598 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
67599 padata->pd = pd;
67600 padata->cb_cpu = cb_cpu;
67601
67602 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
67603 - atomic_set(&pd->seq_nr, -1);
67604 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
67605 + atomic_set_unchecked(&pd->seq_nr, -1);
67606
67607 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
67608 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
67609
67610 target_cpu = padata_cpu_hash(padata);
67611 queue = per_cpu_ptr(pd->pqueue, target_cpu);
67612 @@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
67613 padata_init_pqueues(pd);
67614 padata_init_squeues(pd);
67615 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
67616 - atomic_set(&pd->seq_nr, -1);
67617 + atomic_set_unchecked(&pd->seq_nr, -1);
67618 atomic_set(&pd->reorder_objects, 0);
67619 atomic_set(&pd->refcnt, 0);
67620 pd->pinst = pinst;
67621 diff --git a/kernel/panic.c b/kernel/panic.c
67622 index d7bb697..9ef9f19 100644
67623 --- a/kernel/panic.c
67624 +++ b/kernel/panic.c
67625 @@ -371,7 +371,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
67626 const char *board;
67627
67628 printk(KERN_WARNING "------------[ cut here ]------------\n");
67629 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
67630 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
67631 board = dmi_get_system_info(DMI_PRODUCT_NAME);
67632 if (board)
67633 printk(KERN_WARNING "Hardware name: %s\n", board);
67634 @@ -426,7 +426,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
67635 */
67636 void __stack_chk_fail(void)
67637 {
67638 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
67639 + dump_stack();
67640 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
67641 __builtin_return_address(0));
67642 }
67643 EXPORT_SYMBOL(__stack_chk_fail);
67644 diff --git a/kernel/pid.c b/kernel/pid.c
67645 index e432057..a2b2ac5 100644
67646 --- a/kernel/pid.c
67647 +++ b/kernel/pid.c
67648 @@ -33,6 +33,7 @@
67649 #include <linux/rculist.h>
67650 #include <linux/bootmem.h>
67651 #include <linux/hash.h>
67652 +#include <linux/security.h>
67653 #include <linux/pid_namespace.h>
67654 #include <linux/init_task.h>
67655 #include <linux/syscalls.h>
67656 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
67657
67658 int pid_max = PID_MAX_DEFAULT;
67659
67660 -#define RESERVED_PIDS 300
67661 +#define RESERVED_PIDS 500
67662
67663 int pid_max_min = RESERVED_PIDS + 1;
67664 int pid_max_max = PID_MAX_LIMIT;
67665 @@ -418,8 +419,15 @@ EXPORT_SYMBOL(pid_task);
67666 */
67667 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
67668 {
67669 + struct task_struct *task;
67670 +
67671 rcu_lockdep_assert(rcu_read_lock_held());
67672 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67673 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67674 +
67675 + if (gr_pid_is_chrooted(task))
67676 + return NULL;
67677 +
67678 + return task;
67679 }
67680
67681 struct task_struct *find_task_by_vpid(pid_t vnr)
67682 @@ -427,6 +435,12 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
67683 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
67684 }
67685
67686 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
67687 +{
67688 + rcu_lockdep_assert(rcu_read_lock_held());
67689 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
67690 +}
67691 +
67692 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
67693 {
67694 struct pid *pid;
67695 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
67696 index 640ded8..3dafb85 100644
67697 --- a/kernel/posix-cpu-timers.c
67698 +++ b/kernel/posix-cpu-timers.c
67699 @@ -6,6 +6,7 @@
67700 #include <linux/posix-timers.h>
67701 #include <linux/errno.h>
67702 #include <linux/math64.h>
67703 +#include <linux/security.h>
67704 #include <asm/uaccess.h>
67705 #include <linux/kernel_stat.h>
67706 #include <trace/events/timer.h>
67707 @@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
67708
67709 static __init int init_posix_cpu_timers(void)
67710 {
67711 - struct k_clock process = {
67712 + static struct k_clock process = {
67713 .clock_getres = process_cpu_clock_getres,
67714 .clock_get = process_cpu_clock_get,
67715 .timer_create = process_cpu_timer_create,
67716 .nsleep = process_cpu_nsleep,
67717 .nsleep_restart = process_cpu_nsleep_restart,
67718 };
67719 - struct k_clock thread = {
67720 + static struct k_clock thread = {
67721 .clock_getres = thread_cpu_clock_getres,
67722 .clock_get = thread_cpu_clock_get,
67723 .timer_create = thread_cpu_timer_create,
67724 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
67725 index 4556182..9335419 100644
67726 --- a/kernel/posix-timers.c
67727 +++ b/kernel/posix-timers.c
67728 @@ -43,6 +43,7 @@
67729 #include <linux/idr.h>
67730 #include <linux/posix-clock.h>
67731 #include <linux/posix-timers.h>
67732 +#include <linux/grsecurity.h>
67733 #include <linux/syscalls.h>
67734 #include <linux/wait.h>
67735 #include <linux/workqueue.h>
67736 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
67737 * which we beg off on and pass to do_sys_settimeofday().
67738 */
67739
67740 -static struct k_clock posix_clocks[MAX_CLOCKS];
67741 +static struct k_clock *posix_clocks[MAX_CLOCKS];
67742
67743 /*
67744 * These ones are defined below.
67745 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
67746 */
67747 static __init int init_posix_timers(void)
67748 {
67749 - struct k_clock clock_realtime = {
67750 + static struct k_clock clock_realtime = {
67751 .clock_getres = hrtimer_get_res,
67752 .clock_get = posix_clock_realtime_get,
67753 .clock_set = posix_clock_realtime_set,
67754 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
67755 .timer_get = common_timer_get,
67756 .timer_del = common_timer_del,
67757 };
67758 - struct k_clock clock_monotonic = {
67759 + static struct k_clock clock_monotonic = {
67760 .clock_getres = hrtimer_get_res,
67761 .clock_get = posix_ktime_get_ts,
67762 .nsleep = common_nsleep,
67763 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
67764 .timer_get = common_timer_get,
67765 .timer_del = common_timer_del,
67766 };
67767 - struct k_clock clock_monotonic_raw = {
67768 + static struct k_clock clock_monotonic_raw = {
67769 .clock_getres = hrtimer_get_res,
67770 .clock_get = posix_get_monotonic_raw,
67771 };
67772 - struct k_clock clock_realtime_coarse = {
67773 + static struct k_clock clock_realtime_coarse = {
67774 .clock_getres = posix_get_coarse_res,
67775 .clock_get = posix_get_realtime_coarse,
67776 };
67777 - struct k_clock clock_monotonic_coarse = {
67778 + static struct k_clock clock_monotonic_coarse = {
67779 .clock_getres = posix_get_coarse_res,
67780 .clock_get = posix_get_monotonic_coarse,
67781 };
67782 - struct k_clock clock_boottime = {
67783 + static struct k_clock clock_boottime = {
67784 .clock_getres = hrtimer_get_res,
67785 .clock_get = posix_get_boottime,
67786 .nsleep = common_nsleep,
67787 @@ -272,6 +273,8 @@ static __init int init_posix_timers(void)
67788 .timer_del = common_timer_del,
67789 };
67790
67791 + pax_track_stack();
67792 +
67793 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
67794 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
67795 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
67796 @@ -473,7 +476,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
67797 return;
67798 }
67799
67800 - posix_clocks[clock_id] = *new_clock;
67801 + posix_clocks[clock_id] = new_clock;
67802 }
67803 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
67804
67805 @@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
67806 return (id & CLOCKFD_MASK) == CLOCKFD ?
67807 &clock_posix_dynamic : &clock_posix_cpu;
67808
67809 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
67810 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
67811 return NULL;
67812 - return &posix_clocks[id];
67813 + return posix_clocks[id];
67814 }
67815
67816 static int common_timer_create(struct k_itimer *new_timer)
67817 @@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
67818 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
67819 return -EFAULT;
67820
67821 + /* only the CLOCK_REALTIME clock can be set, all other clocks
67822 + have their clock_set fptr set to a nosettime dummy function
67823 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
67824 + call common_clock_set, which calls do_sys_settimeofday, which
67825 + we hook
67826 + */
67827 +
67828 return kc->clock_set(which_clock, &new_tp);
67829 }
67830
67831 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
67832 index d523593..68197a4 100644
67833 --- a/kernel/power/poweroff.c
67834 +++ b/kernel/power/poweroff.c
67835 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
67836 .enable_mask = SYSRQ_ENABLE_BOOT,
67837 };
67838
67839 -static int pm_sysrq_init(void)
67840 +static int __init pm_sysrq_init(void)
67841 {
67842 register_sysrq_key('o', &sysrq_poweroff_op);
67843 return 0;
67844 diff --git a/kernel/power/process.c b/kernel/power/process.c
67845 index 0cf3a27..5481be4 100644
67846 --- a/kernel/power/process.c
67847 +++ b/kernel/power/process.c
67848 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
67849 u64 elapsed_csecs64;
67850 unsigned int elapsed_csecs;
67851 bool wakeup = false;
67852 + bool timedout = false;
67853
67854 do_gettimeofday(&start);
67855
67856 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
67857
67858 while (true) {
67859 todo = 0;
67860 + if (time_after(jiffies, end_time))
67861 + timedout = true;
67862 read_lock(&tasklist_lock);
67863 do_each_thread(g, p) {
67864 if (frozen(p) || !freezable(p))
67865 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
67866 * try_to_stop() after schedule() in ptrace/signal
67867 * stop sees TIF_FREEZE.
67868 */
67869 - if (!task_is_stopped_or_traced(p) &&
67870 - !freezer_should_skip(p))
67871 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
67872 todo++;
67873 + if (timedout) {
67874 + printk(KERN_ERR "Task refusing to freeze:\n");
67875 + sched_show_task(p);
67876 + }
67877 + }
67878 } while_each_thread(g, p);
67879 read_unlock(&tasklist_lock);
67880
67881 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
67882 todo += wq_busy;
67883 }
67884
67885 - if (!todo || time_after(jiffies, end_time))
67886 + if (!todo || timedout)
67887 break;
67888
67889 if (pm_wakeup_pending()) {
67890 diff --git a/kernel/printk.c b/kernel/printk.c
67891 index 28a40d8..2411bec 100644
67892 --- a/kernel/printk.c
67893 +++ b/kernel/printk.c
67894 @@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
67895 if (from_file && type != SYSLOG_ACTION_OPEN)
67896 return 0;
67897
67898 +#ifdef CONFIG_GRKERNSEC_DMESG
67899 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
67900 + return -EPERM;
67901 +#endif
67902 +
67903 if (syslog_action_restricted(type)) {
67904 if (capable(CAP_SYSLOG))
67905 return 0;
67906 diff --git a/kernel/profile.c b/kernel/profile.c
67907 index 961b389..c451353 100644
67908 --- a/kernel/profile.c
67909 +++ b/kernel/profile.c
67910 @@ -39,7 +39,7 @@ struct profile_hit {
67911 /* Oprofile timer tick hook */
67912 static int (*timer_hook)(struct pt_regs *) __read_mostly;
67913
67914 -static atomic_t *prof_buffer;
67915 +static atomic_unchecked_t *prof_buffer;
67916 static unsigned long prof_len, prof_shift;
67917
67918 int prof_on __read_mostly;
67919 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
67920 hits[i].pc = 0;
67921 continue;
67922 }
67923 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67924 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67925 hits[i].hits = hits[i].pc = 0;
67926 }
67927 }
67928 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67929 * Add the current hit(s) and flush the write-queue out
67930 * to the global buffer:
67931 */
67932 - atomic_add(nr_hits, &prof_buffer[pc]);
67933 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
67934 for (i = 0; i < NR_PROFILE_HIT; ++i) {
67935 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67936 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67937 hits[i].pc = hits[i].hits = 0;
67938 }
67939 out:
67940 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67941 {
67942 unsigned long pc;
67943 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
67944 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67945 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67946 }
67947 #endif /* !CONFIG_SMP */
67948
67949 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
67950 return -EFAULT;
67951 buf++; p++; count--; read++;
67952 }
67953 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
67954 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
67955 if (copy_to_user(buf, (void *)pnt, count))
67956 return -EFAULT;
67957 read += count;
67958 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
67959 }
67960 #endif
67961 profile_discard_flip_buffers();
67962 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
67963 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
67964 return count;
67965 }
67966
67967 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
67968 index 67d1fdd..1af21e2 100644
67969 --- a/kernel/ptrace.c
67970 +++ b/kernel/ptrace.c
67971 @@ -172,7 +172,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
67972 return ret;
67973 }
67974
67975 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
67976 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
67977 + unsigned int log)
67978 {
67979 const struct cred *cred = current_cred(), *tcred;
67980
67981 @@ -198,7 +199,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
67982 cred->gid == tcred->sgid &&
67983 cred->gid == tcred->gid))
67984 goto ok;
67985 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
67986 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
67987 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
67988 goto ok;
67989 rcu_read_unlock();
67990 return -EPERM;
67991 @@ -207,7 +209,9 @@ ok:
67992 smp_rmb();
67993 if (task->mm)
67994 dumpable = get_dumpable(task->mm);
67995 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
67996 + if (!dumpable &&
67997 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
67998 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
67999 return -EPERM;
68000
68001 return security_ptrace_access_check(task, mode);
68002 @@ -217,7 +221,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
68003 {
68004 int err;
68005 task_lock(task);
68006 - err = __ptrace_may_access(task, mode);
68007 + err = __ptrace_may_access(task, mode, 0);
68008 + task_unlock(task);
68009 + return !err;
68010 +}
68011 +
68012 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
68013 +{
68014 + int err;
68015 + task_lock(task);
68016 + err = __ptrace_may_access(task, mode, 1);
68017 task_unlock(task);
68018 return !err;
68019 }
68020 @@ -262,7 +275,7 @@ static int ptrace_attach(struct task_struct *task, long request,
68021 goto out;
68022
68023 task_lock(task);
68024 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
68025 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
68026 task_unlock(task);
68027 if (retval)
68028 goto unlock_creds;
68029 @@ -277,7 +290,7 @@ static int ptrace_attach(struct task_struct *task, long request,
68030 task->ptrace = PT_PTRACED;
68031 if (seize)
68032 task->ptrace |= PT_SEIZED;
68033 - if (task_ns_capable(task, CAP_SYS_PTRACE))
68034 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
68035 task->ptrace |= PT_PTRACE_CAP;
68036
68037 __ptrace_link(task, current);
68038 @@ -472,6 +485,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
68039 {
68040 int copied = 0;
68041
68042 + pax_track_stack();
68043 +
68044 while (len > 0) {
68045 char buf[128];
68046 int this_len, retval;
68047 @@ -483,7 +498,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
68048 break;
68049 return -EIO;
68050 }
68051 - if (copy_to_user(dst, buf, retval))
68052 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
68053 return -EFAULT;
68054 copied += retval;
68055 src += retval;
68056 @@ -497,6 +512,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
68057 {
68058 int copied = 0;
68059
68060 + pax_track_stack();
68061 +
68062 while (len > 0) {
68063 char buf[128];
68064 int this_len, retval;
68065 @@ -680,10 +697,12 @@ int ptrace_request(struct task_struct *child, long request,
68066 bool seized = child->ptrace & PT_SEIZED;
68067 int ret = -EIO;
68068 siginfo_t siginfo, *si;
68069 - void __user *datavp = (void __user *) data;
68070 + void __user *datavp = (__force void __user *) data;
68071 unsigned long __user *datalp = datavp;
68072 unsigned long flags;
68073
68074 + pax_track_stack();
68075 +
68076 switch (request) {
68077 case PTRACE_PEEKTEXT:
68078 case PTRACE_PEEKDATA:
68079 @@ -882,14 +901,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
68080 goto out;
68081 }
68082
68083 + if (gr_handle_ptrace(child, request)) {
68084 + ret = -EPERM;
68085 + goto out_put_task_struct;
68086 + }
68087 +
68088 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
68089 ret = ptrace_attach(child, request, data);
68090 /*
68091 * Some architectures need to do book-keeping after
68092 * a ptrace attach.
68093 */
68094 - if (!ret)
68095 + if (!ret) {
68096 arch_ptrace_attach(child);
68097 + gr_audit_ptrace(child);
68098 + }
68099 goto out_put_task_struct;
68100 }
68101
68102 @@ -915,7 +941,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
68103 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
68104 if (copied != sizeof(tmp))
68105 return -EIO;
68106 - return put_user(tmp, (unsigned long __user *)data);
68107 + return put_user(tmp, (__force unsigned long __user *)data);
68108 }
68109
68110 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
68111 @@ -938,6 +964,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
68112 siginfo_t siginfo;
68113 int ret;
68114
68115 + pax_track_stack();
68116 +
68117 switch (request) {
68118 case PTRACE_PEEKTEXT:
68119 case PTRACE_PEEKDATA:
68120 @@ -1025,14 +1053,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
68121 goto out;
68122 }
68123
68124 + if (gr_handle_ptrace(child, request)) {
68125 + ret = -EPERM;
68126 + goto out_put_task_struct;
68127 + }
68128 +
68129 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
68130 ret = ptrace_attach(child, request, data);
68131 /*
68132 * Some architectures need to do book-keeping after
68133 * a ptrace attach.
68134 */
68135 - if (!ret)
68136 + if (!ret) {
68137 arch_ptrace_attach(child);
68138 + gr_audit_ptrace(child);
68139 + }
68140 goto out_put_task_struct;
68141 }
68142
68143 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
68144 index 98f51b1..30b950c 100644
68145 --- a/kernel/rcutorture.c
68146 +++ b/kernel/rcutorture.c
68147 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
68148 { 0 };
68149 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
68150 { 0 };
68151 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
68152 -static atomic_t n_rcu_torture_alloc;
68153 -static atomic_t n_rcu_torture_alloc_fail;
68154 -static atomic_t n_rcu_torture_free;
68155 -static atomic_t n_rcu_torture_mberror;
68156 -static atomic_t n_rcu_torture_error;
68157 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
68158 +static atomic_unchecked_t n_rcu_torture_alloc;
68159 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
68160 +static atomic_unchecked_t n_rcu_torture_free;
68161 +static atomic_unchecked_t n_rcu_torture_mberror;
68162 +static atomic_unchecked_t n_rcu_torture_error;
68163 static long n_rcu_torture_boost_ktrerror;
68164 static long n_rcu_torture_boost_rterror;
68165 static long n_rcu_torture_boost_failure;
68166 @@ -223,11 +223,11 @@ rcu_torture_alloc(void)
68167
68168 spin_lock_bh(&rcu_torture_lock);
68169 if (list_empty(&rcu_torture_freelist)) {
68170 - atomic_inc(&n_rcu_torture_alloc_fail);
68171 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
68172 spin_unlock_bh(&rcu_torture_lock);
68173 return NULL;
68174 }
68175 - atomic_inc(&n_rcu_torture_alloc);
68176 + atomic_inc_unchecked(&n_rcu_torture_alloc);
68177 p = rcu_torture_freelist.next;
68178 list_del_init(p);
68179 spin_unlock_bh(&rcu_torture_lock);
68180 @@ -240,7 +240,7 @@ rcu_torture_alloc(void)
68181 static void
68182 rcu_torture_free(struct rcu_torture *p)
68183 {
68184 - atomic_inc(&n_rcu_torture_free);
68185 + atomic_inc_unchecked(&n_rcu_torture_free);
68186 spin_lock_bh(&rcu_torture_lock);
68187 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
68188 spin_unlock_bh(&rcu_torture_lock);
68189 @@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
68190 i = rp->rtort_pipe_count;
68191 if (i > RCU_TORTURE_PIPE_LEN)
68192 i = RCU_TORTURE_PIPE_LEN;
68193 - atomic_inc(&rcu_torture_wcount[i]);
68194 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
68195 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
68196 rp->rtort_mbtest = 0;
68197 rcu_torture_free(rp);
68198 @@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
68199 i = rp->rtort_pipe_count;
68200 if (i > RCU_TORTURE_PIPE_LEN)
68201 i = RCU_TORTURE_PIPE_LEN;
68202 - atomic_inc(&rcu_torture_wcount[i]);
68203 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
68204 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
68205 rp->rtort_mbtest = 0;
68206 list_del(&rp->rtort_free);
68207 @@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
68208 i = old_rp->rtort_pipe_count;
68209 if (i > RCU_TORTURE_PIPE_LEN)
68210 i = RCU_TORTURE_PIPE_LEN;
68211 - atomic_inc(&rcu_torture_wcount[i]);
68212 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
68213 old_rp->rtort_pipe_count++;
68214 cur_ops->deferred_free(old_rp);
68215 }
68216 @@ -950,7 +950,7 @@ static void rcu_torture_timer(unsigned long unused)
68217 return;
68218 }
68219 if (p->rtort_mbtest == 0)
68220 - atomic_inc(&n_rcu_torture_mberror);
68221 + atomic_inc_unchecked(&n_rcu_torture_mberror);
68222 spin_lock(&rand_lock);
68223 cur_ops->read_delay(&rand);
68224 n_rcu_torture_timers++;
68225 @@ -1011,7 +1011,7 @@ rcu_torture_reader(void *arg)
68226 continue;
68227 }
68228 if (p->rtort_mbtest == 0)
68229 - atomic_inc(&n_rcu_torture_mberror);
68230 + atomic_inc_unchecked(&n_rcu_torture_mberror);
68231 cur_ops->read_delay(&rand);
68232 preempt_disable();
68233 pipe_count = p->rtort_pipe_count;
68234 @@ -1070,16 +1070,16 @@ rcu_torture_printk(char *page)
68235 rcu_torture_current,
68236 rcu_torture_current_version,
68237 list_empty(&rcu_torture_freelist),
68238 - atomic_read(&n_rcu_torture_alloc),
68239 - atomic_read(&n_rcu_torture_alloc_fail),
68240 - atomic_read(&n_rcu_torture_free),
68241 - atomic_read(&n_rcu_torture_mberror),
68242 + atomic_read_unchecked(&n_rcu_torture_alloc),
68243 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
68244 + atomic_read_unchecked(&n_rcu_torture_free),
68245 + atomic_read_unchecked(&n_rcu_torture_mberror),
68246 n_rcu_torture_boost_ktrerror,
68247 n_rcu_torture_boost_rterror,
68248 n_rcu_torture_boost_failure,
68249 n_rcu_torture_boosts,
68250 n_rcu_torture_timers);
68251 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
68252 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
68253 n_rcu_torture_boost_ktrerror != 0 ||
68254 n_rcu_torture_boost_rterror != 0 ||
68255 n_rcu_torture_boost_failure != 0)
68256 @@ -1087,7 +1087,7 @@ rcu_torture_printk(char *page)
68257 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
68258 if (i > 1) {
68259 cnt += sprintf(&page[cnt], "!!! ");
68260 - atomic_inc(&n_rcu_torture_error);
68261 + atomic_inc_unchecked(&n_rcu_torture_error);
68262 WARN_ON_ONCE(1);
68263 }
68264 cnt += sprintf(&page[cnt], "Reader Pipe: ");
68265 @@ -1101,7 +1101,7 @@ rcu_torture_printk(char *page)
68266 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
68267 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
68268 cnt += sprintf(&page[cnt], " %d",
68269 - atomic_read(&rcu_torture_wcount[i]));
68270 + atomic_read_unchecked(&rcu_torture_wcount[i]));
68271 }
68272 cnt += sprintf(&page[cnt], "\n");
68273 if (cur_ops->stats)
68274 @@ -1410,7 +1410,7 @@ rcu_torture_cleanup(void)
68275
68276 if (cur_ops->cleanup)
68277 cur_ops->cleanup();
68278 - if (atomic_read(&n_rcu_torture_error))
68279 + if (atomic_read_unchecked(&n_rcu_torture_error))
68280 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
68281 else
68282 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
68283 @@ -1474,17 +1474,17 @@ rcu_torture_init(void)
68284
68285 rcu_torture_current = NULL;
68286 rcu_torture_current_version = 0;
68287 - atomic_set(&n_rcu_torture_alloc, 0);
68288 - atomic_set(&n_rcu_torture_alloc_fail, 0);
68289 - atomic_set(&n_rcu_torture_free, 0);
68290 - atomic_set(&n_rcu_torture_mberror, 0);
68291 - atomic_set(&n_rcu_torture_error, 0);
68292 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
68293 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
68294 + atomic_set_unchecked(&n_rcu_torture_free, 0);
68295 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
68296 + atomic_set_unchecked(&n_rcu_torture_error, 0);
68297 n_rcu_torture_boost_ktrerror = 0;
68298 n_rcu_torture_boost_rterror = 0;
68299 n_rcu_torture_boost_failure = 0;
68300 n_rcu_torture_boosts = 0;
68301 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
68302 - atomic_set(&rcu_torture_wcount[i], 0);
68303 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
68304 for_each_possible_cpu(cpu) {
68305 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
68306 per_cpu(rcu_torture_count, cpu)[i] = 0;
68307 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
68308 index ba06207..85d8ba8 100644
68309 --- a/kernel/rcutree.c
68310 +++ b/kernel/rcutree.c
68311 @@ -356,9 +356,9 @@ void rcu_enter_nohz(void)
68312 }
68313 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68314 smp_mb__before_atomic_inc(); /* See above. */
68315 - atomic_inc(&rdtp->dynticks);
68316 + atomic_inc_unchecked(&rdtp->dynticks);
68317 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
68318 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68319 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68320 local_irq_restore(flags);
68321
68322 /* If the interrupt queued a callback, get out of dyntick mode. */
68323 @@ -387,10 +387,10 @@ void rcu_exit_nohz(void)
68324 return;
68325 }
68326 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
68327 - atomic_inc(&rdtp->dynticks);
68328 + atomic_inc_unchecked(&rdtp->dynticks);
68329 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68330 smp_mb__after_atomic_inc(); /* See above. */
68331 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68332 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68333 local_irq_restore(flags);
68334 }
68335
68336 @@ -406,14 +406,14 @@ void rcu_nmi_enter(void)
68337 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
68338
68339 if (rdtp->dynticks_nmi_nesting == 0 &&
68340 - (atomic_read(&rdtp->dynticks) & 0x1))
68341 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
68342 return;
68343 rdtp->dynticks_nmi_nesting++;
68344 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
68345 - atomic_inc(&rdtp->dynticks);
68346 + atomic_inc_unchecked(&rdtp->dynticks);
68347 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68348 smp_mb__after_atomic_inc(); /* See above. */
68349 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68350 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68351 }
68352
68353 /**
68354 @@ -432,9 +432,9 @@ void rcu_nmi_exit(void)
68355 return;
68356 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68357 smp_mb__before_atomic_inc(); /* See above. */
68358 - atomic_inc(&rdtp->dynticks);
68359 + atomic_inc_unchecked(&rdtp->dynticks);
68360 smp_mb__after_atomic_inc(); /* Force delay to next write. */
68361 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68362 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68363 }
68364
68365 /**
68366 @@ -469,7 +469,7 @@ void rcu_irq_exit(void)
68367 */
68368 static int dyntick_save_progress_counter(struct rcu_data *rdp)
68369 {
68370 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
68371 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68372 return 0;
68373 }
68374
68375 @@ -484,7 +484,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
68376 unsigned long curr;
68377 unsigned long snap;
68378
68379 - curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
68380 + curr = (unsigned long)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68381 snap = (unsigned long)rdp->dynticks_snap;
68382
68383 /*
68384 @@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
68385 /*
68386 * Do softirq processing for the current CPU.
68387 */
68388 -static void rcu_process_callbacks(struct softirq_action *unused)
68389 +static void rcu_process_callbacks(void)
68390 {
68391 __rcu_process_callbacks(&rcu_sched_state,
68392 &__get_cpu_var(rcu_sched_data));
68393 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
68394 index 01b2ccd..4f5d80a 100644
68395 --- a/kernel/rcutree.h
68396 +++ b/kernel/rcutree.h
68397 @@ -86,7 +86,7 @@
68398 struct rcu_dynticks {
68399 int dynticks_nesting; /* Track irq/process nesting level. */
68400 int dynticks_nmi_nesting; /* Track NMI nesting level. */
68401 - atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
68402 + atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
68403 };
68404
68405 /* RCU's kthread states for tracing. */
68406 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
68407 index 8aafbb8..2fca109 100644
68408 --- a/kernel/rcutree_plugin.h
68409 +++ b/kernel/rcutree_plugin.h
68410 @@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
68411
68412 /* Clean up and exit. */
68413 smp_mb(); /* ensure expedited GP seen before counter increment. */
68414 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
68415 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
68416 unlock_mb_ret:
68417 mutex_unlock(&sync_rcu_preempt_exp_mutex);
68418 mb_ret:
68419 @@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
68420
68421 #else /* #ifndef CONFIG_SMP */
68422
68423 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
68424 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
68425 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
68426 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
68427
68428 static int synchronize_sched_expedited_cpu_stop(void *data)
68429 {
68430 @@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
68431 int firstsnap, s, snap, trycount = 0;
68432
68433 /* Note that atomic_inc_return() implies full memory barrier. */
68434 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
68435 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
68436 get_online_cpus();
68437
68438 /*
68439 @@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
68440 }
68441
68442 /* Check to see if someone else did our work for us. */
68443 - s = atomic_read(&sync_sched_expedited_done);
68444 + s = atomic_read_unchecked(&sync_sched_expedited_done);
68445 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
68446 smp_mb(); /* ensure test happens before caller kfree */
68447 return;
68448 @@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
68449 * grace period works for us.
68450 */
68451 get_online_cpus();
68452 - snap = atomic_read(&sync_sched_expedited_started) - 1;
68453 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
68454 smp_mb(); /* ensure read is before try_stop_cpus(). */
68455 }
68456
68457 @@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
68458 * than we did beat us to the punch.
68459 */
68460 do {
68461 - s = atomic_read(&sync_sched_expedited_done);
68462 + s = atomic_read_unchecked(&sync_sched_expedited_done);
68463 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
68464 smp_mb(); /* ensure test happens before caller kfree */
68465 break;
68466 }
68467 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
68468 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
68469
68470 put_online_cpus();
68471 }
68472 @@ -1953,7 +1953,7 @@ int rcu_needs_cpu(int cpu)
68473 for_each_online_cpu(thatcpu) {
68474 if (thatcpu == cpu)
68475 continue;
68476 - snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
68477 + snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
68478 thatcpu).dynticks);
68479 smp_mb(); /* Order sampling of snap with end of grace period. */
68480 if ((snap & 0x1) != 0) {
68481 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
68482 index 3b0c098..43ba2d8 100644
68483 --- a/kernel/rcutree_trace.c
68484 +++ b/kernel/rcutree_trace.c
68485 @@ -74,7 +74,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
68486 rdp->qs_pending);
68487 #ifdef CONFIG_NO_HZ
68488 seq_printf(m, " dt=%d/%d/%d df=%lu",
68489 - atomic_read(&rdp->dynticks->dynticks),
68490 + atomic_read_unchecked(&rdp->dynticks->dynticks),
68491 rdp->dynticks->dynticks_nesting,
68492 rdp->dynticks->dynticks_nmi_nesting,
68493 rdp->dynticks_fqs);
68494 @@ -148,7 +148,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
68495 rdp->qs_pending);
68496 #ifdef CONFIG_NO_HZ
68497 seq_printf(m, ",%d,%d,%d,%lu",
68498 - atomic_read(&rdp->dynticks->dynticks),
68499 + atomic_read_unchecked(&rdp->dynticks->dynticks),
68500 rdp->dynticks->dynticks_nesting,
68501 rdp->dynticks->dynticks_nmi_nesting,
68502 rdp->dynticks_fqs);
68503 diff --git a/kernel/relay.c b/kernel/relay.c
68504 index 859ea5a..096e2fe 100644
68505 --- a/kernel/relay.c
68506 +++ b/kernel/relay.c
68507 @@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struct file *in,
68508 };
68509 ssize_t ret;
68510
68511 + pax_track_stack();
68512 +
68513 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
68514 return 0;
68515 if (splice_grow_spd(pipe, &spd))
68516 diff --git a/kernel/resource.c b/kernel/resource.c
68517 index c8dc249..f1e2359 100644
68518 --- a/kernel/resource.c
68519 +++ b/kernel/resource.c
68520 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
68521
68522 static int __init ioresources_init(void)
68523 {
68524 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68525 +#ifdef CONFIG_GRKERNSEC_PROC_USER
68526 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
68527 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
68528 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68529 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
68530 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
68531 +#endif
68532 +#else
68533 proc_create("ioports", 0, NULL, &proc_ioports_operations);
68534 proc_create("iomem", 0, NULL, &proc_iomem_operations);
68535 +#endif
68536 return 0;
68537 }
68538 __initcall(ioresources_init);
68539 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
68540 index 5c9ccd3..a35e22b 100644
68541 --- a/kernel/rtmutex-tester.c
68542 +++ b/kernel/rtmutex-tester.c
68543 @@ -20,7 +20,7 @@
68544 #define MAX_RT_TEST_MUTEXES 8
68545
68546 static spinlock_t rttest_lock;
68547 -static atomic_t rttest_event;
68548 +static atomic_unchecked_t rttest_event;
68549
68550 struct test_thread_data {
68551 int opcode;
68552 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68553
68554 case RTTEST_LOCKCONT:
68555 td->mutexes[td->opdata] = 1;
68556 - td->event = atomic_add_return(1, &rttest_event);
68557 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68558 return 0;
68559
68560 case RTTEST_RESET:
68561 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68562 return 0;
68563
68564 case RTTEST_RESETEVENT:
68565 - atomic_set(&rttest_event, 0);
68566 + atomic_set_unchecked(&rttest_event, 0);
68567 return 0;
68568
68569 default:
68570 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68571 return ret;
68572
68573 td->mutexes[id] = 1;
68574 - td->event = atomic_add_return(1, &rttest_event);
68575 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68576 rt_mutex_lock(&mutexes[id]);
68577 - td->event = atomic_add_return(1, &rttest_event);
68578 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68579 td->mutexes[id] = 4;
68580 return 0;
68581
68582 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68583 return ret;
68584
68585 td->mutexes[id] = 1;
68586 - td->event = atomic_add_return(1, &rttest_event);
68587 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68588 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
68589 - td->event = atomic_add_return(1, &rttest_event);
68590 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68591 td->mutexes[id] = ret ? 0 : 4;
68592 return ret ? -EINTR : 0;
68593
68594 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68595 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
68596 return ret;
68597
68598 - td->event = atomic_add_return(1, &rttest_event);
68599 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68600 rt_mutex_unlock(&mutexes[id]);
68601 - td->event = atomic_add_return(1, &rttest_event);
68602 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68603 td->mutexes[id] = 0;
68604 return 0;
68605
68606 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68607 break;
68608
68609 td->mutexes[dat] = 2;
68610 - td->event = atomic_add_return(1, &rttest_event);
68611 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68612 break;
68613
68614 default:
68615 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68616 return;
68617
68618 td->mutexes[dat] = 3;
68619 - td->event = atomic_add_return(1, &rttest_event);
68620 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68621 break;
68622
68623 case RTTEST_LOCKNOWAIT:
68624 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68625 return;
68626
68627 td->mutexes[dat] = 1;
68628 - td->event = atomic_add_return(1, &rttest_event);
68629 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68630 return;
68631
68632 default:
68633 diff --git a/kernel/sched.c b/kernel/sched.c
68634 index b50b0f0..1c6c591 100644
68635 --- a/kernel/sched.c
68636 +++ b/kernel/sched.c
68637 @@ -4264,6 +4264,8 @@ static void __sched __schedule(void)
68638 struct rq *rq;
68639 int cpu;
68640
68641 + pax_track_stack();
68642 +
68643 need_resched:
68644 preempt_disable();
68645 cpu = smp_processor_id();
68646 @@ -4950,6 +4952,8 @@ int can_nice(const struct task_struct *p, const int nice)
68647 /* convert nice value [19,-20] to rlimit style value [1,40] */
68648 int nice_rlim = 20 - nice;
68649
68650 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
68651 +
68652 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
68653 capable(CAP_SYS_NICE));
68654 }
68655 @@ -4983,7 +4987,8 @@ SYSCALL_DEFINE1(nice, int, increment)
68656 if (nice > 19)
68657 nice = 19;
68658
68659 - if (increment < 0 && !can_nice(current, nice))
68660 + if (increment < 0 && (!can_nice(current, nice) ||
68661 + gr_handle_chroot_nice()))
68662 return -EPERM;
68663
68664 retval = security_task_setnice(current, nice);
68665 @@ -5127,6 +5132,7 @@ recheck:
68666 unsigned long rlim_rtprio =
68667 task_rlimit(p, RLIMIT_RTPRIO);
68668
68669 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
68670 /* can't set/change the rt policy */
68671 if (policy != p->policy && !rlim_rtprio)
68672 return -EPERM;
68673 diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
68674 index 429242f..d7cca82 100644
68675 --- a/kernel/sched_autogroup.c
68676 +++ b/kernel/sched_autogroup.c
68677 @@ -7,7 +7,7 @@
68678
68679 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
68680 static struct autogroup autogroup_default;
68681 -static atomic_t autogroup_seq_nr;
68682 +static atomic_unchecked_t autogroup_seq_nr;
68683
68684 static void __init autogroup_init(struct task_struct *init_task)
68685 {
68686 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
68687
68688 kref_init(&ag->kref);
68689 init_rwsem(&ag->lock);
68690 - ag->id = atomic_inc_return(&autogroup_seq_nr);
68691 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
68692 ag->tg = tg;
68693 #ifdef CONFIG_RT_GROUP_SCHED
68694 /*
68695 diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
68696 index bc8ee99..b6f6492 100644
68697 --- a/kernel/sched_fair.c
68698 +++ b/kernel/sched_fair.c
68699 @@ -4062,7 +4062,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
68700 * run_rebalance_domains is triggered when needed from the scheduler tick.
68701 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
68702 */
68703 -static void run_rebalance_domains(struct softirq_action *h)
68704 +static void run_rebalance_domains(void)
68705 {
68706 int this_cpu = smp_processor_id();
68707 struct rq *this_rq = cpu_rq(this_cpu);
68708 diff --git a/kernel/signal.c b/kernel/signal.c
68709 index 195331c..e89634ce 100644
68710 --- a/kernel/signal.c
68711 +++ b/kernel/signal.c
68712 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
68713
68714 int print_fatal_signals __read_mostly;
68715
68716 -static void __user *sig_handler(struct task_struct *t, int sig)
68717 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
68718 {
68719 return t->sighand->action[sig - 1].sa.sa_handler;
68720 }
68721
68722 -static int sig_handler_ignored(void __user *handler, int sig)
68723 +static int sig_handler_ignored(__sighandler_t handler, int sig)
68724 {
68725 /* Is it explicitly or implicitly ignored? */
68726 return handler == SIG_IGN ||
68727 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
68728 static int sig_task_ignored(struct task_struct *t, int sig,
68729 int from_ancestor_ns)
68730 {
68731 - void __user *handler;
68732 + __sighandler_t handler;
68733
68734 handler = sig_handler(t, sig);
68735
68736 @@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
68737 atomic_inc(&user->sigpending);
68738 rcu_read_unlock();
68739
68740 + if (!override_rlimit)
68741 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
68742 +
68743 if (override_rlimit ||
68744 atomic_read(&user->sigpending) <=
68745 task_rlimit(t, RLIMIT_SIGPENDING)) {
68746 @@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
68747
68748 int unhandled_signal(struct task_struct *tsk, int sig)
68749 {
68750 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
68751 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
68752 if (is_global_init(tsk))
68753 return 1;
68754 if (handler != SIG_IGN && handler != SIG_DFL)
68755 @@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
68756 }
68757 }
68758
68759 + /* allow glibc communication via tgkill to other threads in our
68760 + thread group */
68761 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
68762 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
68763 + && gr_handle_signal(t, sig))
68764 + return -EPERM;
68765 +
68766 return security_task_kill(t, info, sig, 0);
68767 }
68768
68769 @@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68770 return send_signal(sig, info, p, 1);
68771 }
68772
68773 -static int
68774 +int
68775 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68776 {
68777 return send_signal(sig, info, t, 0);
68778 @@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68779 unsigned long int flags;
68780 int ret, blocked, ignored;
68781 struct k_sigaction *action;
68782 + int is_unhandled = 0;
68783
68784 spin_lock_irqsave(&t->sighand->siglock, flags);
68785 action = &t->sighand->action[sig-1];
68786 @@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68787 }
68788 if (action->sa.sa_handler == SIG_DFL)
68789 t->signal->flags &= ~SIGNAL_UNKILLABLE;
68790 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
68791 + is_unhandled = 1;
68792 ret = specific_send_sig_info(sig, info, t);
68793 spin_unlock_irqrestore(&t->sighand->siglock, flags);
68794
68795 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
68796 + normal operation */
68797 + if (is_unhandled) {
68798 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
68799 + gr_handle_crash(t, sig);
68800 + }
68801 +
68802 return ret;
68803 }
68804
68805 @@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68806 ret = check_kill_permission(sig, info, p);
68807 rcu_read_unlock();
68808
68809 - if (!ret && sig)
68810 + if (!ret && sig) {
68811 ret = do_send_sig_info(sig, info, p, true);
68812 + if (!ret)
68813 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
68814 + }
68815
68816 return ret;
68817 }
68818 @@ -1909,6 +1932,8 @@ static void ptrace_do_notify(int signr, int exit_code, int why)
68819 {
68820 siginfo_t info;
68821
68822 + pax_track_stack();
68823 +
68824 memset(&info, 0, sizeof info);
68825 info.si_signo = signr;
68826 info.si_code = exit_code;
68827 @@ -2746,7 +2771,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
68828 int error = -ESRCH;
68829
68830 rcu_read_lock();
68831 - p = find_task_by_vpid(pid);
68832 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68833 + /* allow glibc communication via tgkill to other threads in our
68834 + thread group */
68835 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
68836 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
68837 + p = find_task_by_vpid_unrestricted(pid);
68838 + else
68839 +#endif
68840 + p = find_task_by_vpid(pid);
68841 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
68842 error = check_kill_permission(sig, info, p);
68843 /*
68844 diff --git a/kernel/smp.c b/kernel/smp.c
68845 index fb67dfa..f819e2e 100644
68846 --- a/kernel/smp.c
68847 +++ b/kernel/smp.c
68848 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
68849 }
68850 EXPORT_SYMBOL(smp_call_function);
68851
68852 -void ipi_call_lock(void)
68853 +void ipi_call_lock(void) __acquires(call_function.lock)
68854 {
68855 raw_spin_lock(&call_function.lock);
68856 }
68857
68858 -void ipi_call_unlock(void)
68859 +void ipi_call_unlock(void) __releases(call_function.lock)
68860 {
68861 raw_spin_unlock(&call_function.lock);
68862 }
68863
68864 -void ipi_call_lock_irq(void)
68865 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
68866 {
68867 raw_spin_lock_irq(&call_function.lock);
68868 }
68869
68870 -void ipi_call_unlock_irq(void)
68871 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
68872 {
68873 raw_spin_unlock_irq(&call_function.lock);
68874 }
68875 diff --git a/kernel/softirq.c b/kernel/softirq.c
68876 index fca82c3..1db9690 100644
68877 --- a/kernel/softirq.c
68878 +++ b/kernel/softirq.c
68879 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
68880
68881 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
68882
68883 -char *softirq_to_name[NR_SOFTIRQS] = {
68884 +const char * const softirq_to_name[NR_SOFTIRQS] = {
68885 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
68886 "TASKLET", "SCHED", "HRTIMER", "RCU"
68887 };
68888 @@ -235,7 +235,7 @@ restart:
68889 kstat_incr_softirqs_this_cpu(vec_nr);
68890
68891 trace_softirq_entry(vec_nr);
68892 - h->action(h);
68893 + h->action();
68894 trace_softirq_exit(vec_nr);
68895 if (unlikely(prev_count != preempt_count())) {
68896 printk(KERN_ERR "huh, entered softirq %u %s %p"
68897 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
68898 local_irq_restore(flags);
68899 }
68900
68901 -void open_softirq(int nr, void (*action)(struct softirq_action *))
68902 +void open_softirq(int nr, void (*action)(void))
68903 {
68904 - softirq_vec[nr].action = action;
68905 + pax_open_kernel();
68906 + *(void **)&softirq_vec[nr].action = action;
68907 + pax_close_kernel();
68908 }
68909
68910 /*
68911 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
68912
68913 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
68914
68915 -static void tasklet_action(struct softirq_action *a)
68916 +static void tasklet_action(void)
68917 {
68918 struct tasklet_struct *list;
68919
68920 @@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
68921 }
68922 }
68923
68924 -static void tasklet_hi_action(struct softirq_action *a)
68925 +static void tasklet_hi_action(void)
68926 {
68927 struct tasklet_struct *list;
68928
68929 diff --git a/kernel/sys.c b/kernel/sys.c
68930 index 1dbbe69..e96e1dd 100644
68931 --- a/kernel/sys.c
68932 +++ b/kernel/sys.c
68933 @@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
68934 error = -EACCES;
68935 goto out;
68936 }
68937 +
68938 + if (gr_handle_chroot_setpriority(p, niceval)) {
68939 + error = -EACCES;
68940 + goto out;
68941 + }
68942 +
68943 no_nice = security_task_setnice(p, niceval);
68944 if (no_nice) {
68945 error = no_nice;
68946 @@ -571,6 +577,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
68947 goto error;
68948 }
68949
68950 + if (gr_check_group_change(new->gid, new->egid, -1))
68951 + goto error;
68952 +
68953 if (rgid != (gid_t) -1 ||
68954 (egid != (gid_t) -1 && egid != old->gid))
68955 new->sgid = new->egid;
68956 @@ -600,6 +609,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
68957 old = current_cred();
68958
68959 retval = -EPERM;
68960 +
68961 + if (gr_check_group_change(gid, gid, gid))
68962 + goto error;
68963 +
68964 if (nsown_capable(CAP_SETGID))
68965 new->gid = new->egid = new->sgid = new->fsgid = gid;
68966 else if (gid == old->gid || gid == old->sgid)
68967 @@ -687,6 +700,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
68968 goto error;
68969 }
68970
68971 + if (gr_check_user_change(new->uid, new->euid, -1))
68972 + goto error;
68973 +
68974 if (new->uid != old->uid) {
68975 retval = set_user(new);
68976 if (retval < 0)
68977 @@ -731,6 +747,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
68978 old = current_cred();
68979
68980 retval = -EPERM;
68981 +
68982 + if (gr_check_crash_uid(uid))
68983 + goto error;
68984 + if (gr_check_user_change(uid, uid, uid))
68985 + goto error;
68986 +
68987 if (nsown_capable(CAP_SETUID)) {
68988 new->suid = new->uid = uid;
68989 if (uid != old->uid) {
68990 @@ -785,6 +807,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
68991 goto error;
68992 }
68993
68994 + if (gr_check_user_change(ruid, euid, -1))
68995 + goto error;
68996 +
68997 if (ruid != (uid_t) -1) {
68998 new->uid = ruid;
68999 if (ruid != old->uid) {
69000 @@ -849,6 +874,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
69001 goto error;
69002 }
69003
69004 + if (gr_check_group_change(rgid, egid, -1))
69005 + goto error;
69006 +
69007 if (rgid != (gid_t) -1)
69008 new->gid = rgid;
69009 if (egid != (gid_t) -1)
69010 @@ -895,6 +923,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
69011 old = current_cred();
69012 old_fsuid = old->fsuid;
69013
69014 + if (gr_check_user_change(-1, -1, uid))
69015 + goto error;
69016 +
69017 if (uid == old->uid || uid == old->euid ||
69018 uid == old->suid || uid == old->fsuid ||
69019 nsown_capable(CAP_SETUID)) {
69020 @@ -905,6 +936,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
69021 }
69022 }
69023
69024 +error:
69025 abort_creds(new);
69026 return old_fsuid;
69027
69028 @@ -931,12 +963,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
69029 if (gid == old->gid || gid == old->egid ||
69030 gid == old->sgid || gid == old->fsgid ||
69031 nsown_capable(CAP_SETGID)) {
69032 + if (gr_check_group_change(-1, -1, gid))
69033 + goto error;
69034 +
69035 if (gid != old_fsgid) {
69036 new->fsgid = gid;
69037 goto change_okay;
69038 }
69039 }
69040
69041 +error:
69042 abort_creds(new);
69043 return old_fsgid;
69044
69045 @@ -1188,7 +1224,10 @@ static int override_release(char __user *release, int len)
69046 }
69047 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
69048 snprintf(buf, len, "2.6.%u%s", v, rest);
69049 - ret = copy_to_user(release, buf, len);
69050 + if (len > sizeof(buf))
69051 + ret = -EFAULT;
69052 + else
69053 + ret = copy_to_user(release, buf, len);
69054 }
69055 return ret;
69056 }
69057 @@ -1242,19 +1281,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
69058 return -EFAULT;
69059
69060 down_read(&uts_sem);
69061 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
69062 + error = __copy_to_user(name->sysname, &utsname()->sysname,
69063 __OLD_UTS_LEN);
69064 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
69065 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
69066 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
69067 __OLD_UTS_LEN);
69068 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
69069 - error |= __copy_to_user(&name->release, &utsname()->release,
69070 + error |= __copy_to_user(name->release, &utsname()->release,
69071 __OLD_UTS_LEN);
69072 error |= __put_user(0, name->release + __OLD_UTS_LEN);
69073 - error |= __copy_to_user(&name->version, &utsname()->version,
69074 + error |= __copy_to_user(name->version, &utsname()->version,
69075 __OLD_UTS_LEN);
69076 error |= __put_user(0, name->version + __OLD_UTS_LEN);
69077 - error |= __copy_to_user(&name->machine, &utsname()->machine,
69078 + error |= __copy_to_user(name->machine, &utsname()->machine,
69079 __OLD_UTS_LEN);
69080 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
69081 up_read(&uts_sem);
69082 @@ -1717,7 +1756,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
69083 error = get_dumpable(me->mm);
69084 break;
69085 case PR_SET_DUMPABLE:
69086 - if (arg2 < 0 || arg2 > 1) {
69087 + if (arg2 > 1) {
69088 error = -EINVAL;
69089 break;
69090 }
69091 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
69092 index 11d65b5..6957b37 100644
69093 --- a/kernel/sysctl.c
69094 +++ b/kernel/sysctl.c
69095 @@ -85,6 +85,13 @@
69096
69097
69098 #if defined(CONFIG_SYSCTL)
69099 +#include <linux/grsecurity.h>
69100 +#include <linux/grinternal.h>
69101 +
69102 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
69103 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
69104 + const int op);
69105 +extern int gr_handle_chroot_sysctl(const int op);
69106
69107 /* External variables not in a header file. */
69108 extern int sysctl_overcommit_memory;
69109 @@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
69110 }
69111
69112 #endif
69113 +extern struct ctl_table grsecurity_table[];
69114
69115 static struct ctl_table root_table[];
69116 static struct ctl_table_root sysctl_table_root;
69117 @@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
69118 int sysctl_legacy_va_layout;
69119 #endif
69120
69121 +#ifdef CONFIG_PAX_SOFTMODE
69122 +static ctl_table pax_table[] = {
69123 + {
69124 + .procname = "softmode",
69125 + .data = &pax_softmode,
69126 + .maxlen = sizeof(unsigned int),
69127 + .mode = 0600,
69128 + .proc_handler = &proc_dointvec,
69129 + },
69130 +
69131 + { }
69132 +};
69133 +#endif
69134 +
69135 /* The default sysctl tables: */
69136
69137 static struct ctl_table root_table[] = {
69138 @@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
69139 #endif
69140
69141 static struct ctl_table kern_table[] = {
69142 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
69143 + {
69144 + .procname = "grsecurity",
69145 + .mode = 0500,
69146 + .child = grsecurity_table,
69147 + },
69148 +#endif
69149 +
69150 +#ifdef CONFIG_PAX_SOFTMODE
69151 + {
69152 + .procname = "pax",
69153 + .mode = 0500,
69154 + .child = pax_table,
69155 + },
69156 +#endif
69157 +
69158 {
69159 .procname = "sched_child_runs_first",
69160 .data = &sysctl_sched_child_runs_first,
69161 @@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
69162 .data = &modprobe_path,
69163 .maxlen = KMOD_PATH_LEN,
69164 .mode = 0644,
69165 - .proc_handler = proc_dostring,
69166 + .proc_handler = proc_dostring_modpriv,
69167 },
69168 {
69169 .procname = "modules_disabled",
69170 @@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
69171 .extra1 = &zero,
69172 .extra2 = &one,
69173 },
69174 +#endif
69175 {
69176 .procname = "kptr_restrict",
69177 .data = &kptr_restrict,
69178 .maxlen = sizeof(int),
69179 .mode = 0644,
69180 .proc_handler = proc_dmesg_restrict,
69181 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69182 + .extra1 = &two,
69183 +#else
69184 .extra1 = &zero,
69185 +#endif
69186 .extra2 = &two,
69187 },
69188 -#endif
69189 {
69190 .procname = "ngroups_max",
69191 .data = &ngroups_max,
69192 @@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
69193 .proc_handler = proc_dointvec_minmax,
69194 .extra1 = &zero,
69195 },
69196 + {
69197 + .procname = "heap_stack_gap",
69198 + .data = &sysctl_heap_stack_gap,
69199 + .maxlen = sizeof(sysctl_heap_stack_gap),
69200 + .mode = 0644,
69201 + .proc_handler = proc_doulongvec_minmax,
69202 + },
69203 #else
69204 {
69205 .procname = "nr_trim_pages",
69206 @@ -1709,6 +1758,17 @@ static int test_perm(int mode, int op)
69207 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
69208 {
69209 int mode;
69210 + int error;
69211 +
69212 + if (table->parent != NULL && table->parent->procname != NULL &&
69213 + table->procname != NULL &&
69214 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
69215 + return -EACCES;
69216 + if (gr_handle_chroot_sysctl(op))
69217 + return -EACCES;
69218 + error = gr_handle_sysctl(table, op);
69219 + if (error)
69220 + return error;
69221
69222 if (root->permissions)
69223 mode = root->permissions(root, current->nsproxy, table);
69224 @@ -2113,6 +2173,16 @@ int proc_dostring(struct ctl_table *table, int write,
69225 buffer, lenp, ppos);
69226 }
69227
69228 +int proc_dostring_modpriv(struct ctl_table *table, int write,
69229 + void __user *buffer, size_t *lenp, loff_t *ppos)
69230 +{
69231 + if (write && !capable(CAP_SYS_MODULE))
69232 + return -EPERM;
69233 +
69234 + return _proc_do_string(table->data, table->maxlen, write,
69235 + buffer, lenp, ppos);
69236 +}
69237 +
69238 static size_t proc_skip_spaces(char **buf)
69239 {
69240 size_t ret;
69241 @@ -2218,6 +2288,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
69242 len = strlen(tmp);
69243 if (len > *size)
69244 len = *size;
69245 + if (len > sizeof(tmp))
69246 + len = sizeof(tmp);
69247 if (copy_to_user(*buf, tmp, len))
69248 return -EFAULT;
69249 *size -= len;
69250 @@ -2534,8 +2606,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
69251 *i = val;
69252 } else {
69253 val = convdiv * (*i) / convmul;
69254 - if (!first)
69255 + if (!first) {
69256 err = proc_put_char(&buffer, &left, '\t');
69257 + if (err)
69258 + break;
69259 + }
69260 err = proc_put_long(&buffer, &left, val, false);
69261 if (err)
69262 break;
69263 @@ -2930,6 +3005,12 @@ int proc_dostring(struct ctl_table *table, int write,
69264 return -ENOSYS;
69265 }
69266
69267 +int proc_dostring_modpriv(struct ctl_table *table, int write,
69268 + void __user *buffer, size_t *lenp, loff_t *ppos)
69269 +{
69270 + return -ENOSYS;
69271 +}
69272 +
69273 int proc_dointvec(struct ctl_table *table, int write,
69274 void __user *buffer, size_t *lenp, loff_t *ppos)
69275 {
69276 @@ -2986,6 +3067,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
69277 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
69278 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
69279 EXPORT_SYMBOL(proc_dostring);
69280 +EXPORT_SYMBOL(proc_dostring_modpriv);
69281 EXPORT_SYMBOL(proc_doulongvec_minmax);
69282 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
69283 EXPORT_SYMBOL(register_sysctl_table);
69284 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
69285 index 2ce1b30..82bf0a4 100644
69286 --- a/kernel/sysctl_binary.c
69287 +++ b/kernel/sysctl_binary.c
69288 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
69289 int i;
69290
69291 set_fs(KERNEL_DS);
69292 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
69293 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
69294 set_fs(old_fs);
69295 if (result < 0)
69296 goto out_kfree;
69297 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
69298 }
69299
69300 set_fs(KERNEL_DS);
69301 - result = vfs_write(file, buffer, str - buffer, &pos);
69302 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69303 set_fs(old_fs);
69304 if (result < 0)
69305 goto out_kfree;
69306 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
69307 int i;
69308
69309 set_fs(KERNEL_DS);
69310 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
69311 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
69312 set_fs(old_fs);
69313 if (result < 0)
69314 goto out_kfree;
69315 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
69316 }
69317
69318 set_fs(KERNEL_DS);
69319 - result = vfs_write(file, buffer, str - buffer, &pos);
69320 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69321 set_fs(old_fs);
69322 if (result < 0)
69323 goto out_kfree;
69324 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
69325 int i;
69326
69327 set_fs(KERNEL_DS);
69328 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69329 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69330 set_fs(old_fs);
69331 if (result < 0)
69332 goto out;
69333 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69334 __le16 dnaddr;
69335
69336 set_fs(KERNEL_DS);
69337 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69338 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69339 set_fs(old_fs);
69340 if (result < 0)
69341 goto out;
69342 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69343 le16_to_cpu(dnaddr) & 0x3ff);
69344
69345 set_fs(KERNEL_DS);
69346 - result = vfs_write(file, buf, len, &pos);
69347 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
69348 set_fs(old_fs);
69349 if (result < 0)
69350 goto out;
69351 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
69352 index 362da65..ab8ef8c 100644
69353 --- a/kernel/sysctl_check.c
69354 +++ b/kernel/sysctl_check.c
69355 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
69356 set_fail(&fail, table, "Directory with extra2");
69357 } else {
69358 if ((table->proc_handler == proc_dostring) ||
69359 + (table->proc_handler == proc_dostring_modpriv) ||
69360 (table->proc_handler == proc_dointvec) ||
69361 (table->proc_handler == proc_dointvec_minmax) ||
69362 (table->proc_handler == proc_dointvec_jiffies) ||
69363 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
69364 index e660464..c8b9e67 100644
69365 --- a/kernel/taskstats.c
69366 +++ b/kernel/taskstats.c
69367 @@ -27,9 +27,12 @@
69368 #include <linux/cgroup.h>
69369 #include <linux/fs.h>
69370 #include <linux/file.h>
69371 +#include <linux/grsecurity.h>
69372 #include <net/genetlink.h>
69373 #include <linux/atomic.h>
69374
69375 +extern int gr_is_taskstats_denied(int pid);
69376 +
69377 /*
69378 * Maximum length of a cpumask that can be specified in
69379 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
69380 @@ -556,6 +559,9 @@ err:
69381
69382 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
69383 {
69384 + if (gr_is_taskstats_denied(current->pid))
69385 + return -EACCES;
69386 +
69387 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
69388 return cmd_attr_register_cpumask(info);
69389 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
69390 diff --git a/kernel/time.c b/kernel/time.c
69391 index d776062..fa8d186 100644
69392 --- a/kernel/time.c
69393 +++ b/kernel/time.c
69394 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
69395 return error;
69396
69397 if (tz) {
69398 + /* we log in do_settimeofday called below, so don't log twice
69399 + */
69400 + if (!tv)
69401 + gr_log_timechange();
69402 +
69403 /* SMP safe, global irq locking makes it work. */
69404 sys_tz = *tz;
69405 update_vsyscall_tz();
69406 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
69407 index 8b70c76..923e9f5 100644
69408 --- a/kernel/time/alarmtimer.c
69409 +++ b/kernel/time/alarmtimer.c
69410 @@ -693,7 +693,7 @@ static int __init alarmtimer_init(void)
69411 {
69412 int error = 0;
69413 int i;
69414 - struct k_clock alarm_clock = {
69415 + static struct k_clock alarm_clock = {
69416 .clock_getres = alarm_clock_getres,
69417 .clock_get = alarm_clock_get,
69418 .timer_create = alarm_timer_create,
69419 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
69420 index 7a90d02..6d8585a 100644
69421 --- a/kernel/time/tick-broadcast.c
69422 +++ b/kernel/time/tick-broadcast.c
69423 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
69424 * then clear the broadcast bit.
69425 */
69426 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
69427 - int cpu = smp_processor_id();
69428 + cpu = smp_processor_id();
69429
69430 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
69431 tick_broadcast_clear_oneshot(cpu);
69432 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
69433 index 6f9798b..f8c4087 100644
69434 --- a/kernel/time/timekeeping.c
69435 +++ b/kernel/time/timekeeping.c
69436 @@ -14,6 +14,7 @@
69437 #include <linux/init.h>
69438 #include <linux/mm.h>
69439 #include <linux/sched.h>
69440 +#include <linux/grsecurity.h>
69441 #include <linux/syscore_ops.h>
69442 #include <linux/clocksource.h>
69443 #include <linux/jiffies.h>
69444 @@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
69445 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
69446 return -EINVAL;
69447
69448 + gr_log_timechange();
69449 +
69450 write_seqlock_irqsave(&xtime_lock, flags);
69451
69452 timekeeping_forward_now();
69453 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
69454 index 3258455..f35227d 100644
69455 --- a/kernel/time/timer_list.c
69456 +++ b/kernel/time/timer_list.c
69457 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
69458
69459 static void print_name_offset(struct seq_file *m, void *sym)
69460 {
69461 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69462 + SEQ_printf(m, "<%p>", NULL);
69463 +#else
69464 char symname[KSYM_NAME_LEN];
69465
69466 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
69467 SEQ_printf(m, "<%pK>", sym);
69468 else
69469 SEQ_printf(m, "%s", symname);
69470 +#endif
69471 }
69472
69473 static void
69474 @@ -112,7 +116,11 @@ next_one:
69475 static void
69476 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
69477 {
69478 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69479 + SEQ_printf(m, " .base: %p\n", NULL);
69480 +#else
69481 SEQ_printf(m, " .base: %pK\n", base);
69482 +#endif
69483 SEQ_printf(m, " .index: %d\n",
69484 base->index);
69485 SEQ_printf(m, " .resolution: %Lu nsecs\n",
69486 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
69487 {
69488 struct proc_dir_entry *pe;
69489
69490 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69491 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
69492 +#else
69493 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
69494 +#endif
69495 if (!pe)
69496 return -ENOMEM;
69497 return 0;
69498 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
69499 index a5d0a3a..60c7948 100644
69500 --- a/kernel/time/timer_stats.c
69501 +++ b/kernel/time/timer_stats.c
69502 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
69503 static unsigned long nr_entries;
69504 static struct entry entries[MAX_ENTRIES];
69505
69506 -static atomic_t overflow_count;
69507 +static atomic_unchecked_t overflow_count;
69508
69509 /*
69510 * The entries are in a hash-table, for fast lookup:
69511 @@ -140,7 +140,7 @@ static void reset_entries(void)
69512 nr_entries = 0;
69513 memset(entries, 0, sizeof(entries));
69514 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
69515 - atomic_set(&overflow_count, 0);
69516 + atomic_set_unchecked(&overflow_count, 0);
69517 }
69518
69519 static struct entry *alloc_entry(void)
69520 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
69521 if (likely(entry))
69522 entry->count++;
69523 else
69524 - atomic_inc(&overflow_count);
69525 + atomic_inc_unchecked(&overflow_count);
69526
69527 out_unlock:
69528 raw_spin_unlock_irqrestore(lock, flags);
69529 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
69530
69531 static void print_name_offset(struct seq_file *m, unsigned long addr)
69532 {
69533 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69534 + seq_printf(m, "<%p>", NULL);
69535 +#else
69536 char symname[KSYM_NAME_LEN];
69537
69538 if (lookup_symbol_name(addr, symname) < 0)
69539 seq_printf(m, "<%p>", (void *)addr);
69540 else
69541 seq_printf(m, "%s", symname);
69542 +#endif
69543 }
69544
69545 static int tstats_show(struct seq_file *m, void *v)
69546 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
69547
69548 seq_puts(m, "Timer Stats Version: v0.2\n");
69549 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
69550 - if (atomic_read(&overflow_count))
69551 + if (atomic_read_unchecked(&overflow_count))
69552 seq_printf(m, "Overflow: %d entries\n",
69553 - atomic_read(&overflow_count));
69554 + atomic_read_unchecked(&overflow_count));
69555
69556 for (i = 0; i < nr_entries; i++) {
69557 entry = entries + i;
69558 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
69559 {
69560 struct proc_dir_entry *pe;
69561
69562 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69563 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
69564 +#else
69565 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
69566 +#endif
69567 if (!pe)
69568 return -ENOMEM;
69569 return 0;
69570 diff --git a/kernel/timer.c b/kernel/timer.c
69571 index 8cff361..0fb5cd8 100644
69572 --- a/kernel/timer.c
69573 +++ b/kernel/timer.c
69574 @@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
69575 /*
69576 * This function runs timers and the timer-tq in bottom half context.
69577 */
69578 -static void run_timer_softirq(struct softirq_action *h)
69579 +static void run_timer_softirq(void)
69580 {
69581 struct tvec_base *base = __this_cpu_read(tvec_bases);
69582
69583 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
69584 index 7c910a5..8b72104 100644
69585 --- a/kernel/trace/blktrace.c
69586 +++ b/kernel/trace/blktrace.c
69587 @@ -323,7 +323,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
69588 struct blk_trace *bt = filp->private_data;
69589 char buf[16];
69590
69591 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
69592 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
69593
69594 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
69595 }
69596 @@ -388,7 +388,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
69597 return 1;
69598
69599 bt = buf->chan->private_data;
69600 - atomic_inc(&bt->dropped);
69601 + atomic_inc_unchecked(&bt->dropped);
69602 return 0;
69603 }
69604
69605 @@ -489,7 +489,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
69606
69607 bt->dir = dir;
69608 bt->dev = dev;
69609 - atomic_set(&bt->dropped, 0);
69610 + atomic_set_unchecked(&bt->dropped, 0);
69611
69612 ret = -EIO;
69613 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
69614 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
69615 index 48d3762..3b61fce 100644
69616 --- a/kernel/trace/ftrace.c
69617 +++ b/kernel/trace/ftrace.c
69618 @@ -1584,12 +1584,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
69619 if (unlikely(ftrace_disabled))
69620 return 0;
69621
69622 + ret = ftrace_arch_code_modify_prepare();
69623 + FTRACE_WARN_ON(ret);
69624 + if (ret)
69625 + return 0;
69626 +
69627 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
69628 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
69629 if (ret) {
69630 ftrace_bug(ret, ip);
69631 - return 0;
69632 }
69633 - return 1;
69634 + return ret ? 0 : 1;
69635 }
69636
69637 /*
69638 @@ -2606,7 +2611,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
69639
69640 int
69641 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
69642 - void *data)
69643 + void *data)
69644 {
69645 struct ftrace_func_probe *entry;
69646 struct ftrace_page *pg;
69647 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
69648 index 17a2d44..85907e2 100644
69649 --- a/kernel/trace/trace.c
69650 +++ b/kernel/trace/trace.c
69651 @@ -3451,6 +3451,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
69652 size_t rem;
69653 unsigned int i;
69654
69655 + pax_track_stack();
69656 +
69657 if (splice_grow_spd(pipe, &spd))
69658 return -ENOMEM;
69659
69660 @@ -3926,6 +3928,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
69661 int entries, size, i;
69662 size_t ret;
69663
69664 + pax_track_stack();
69665 +
69666 if (splice_grow_spd(pipe, &spd))
69667 return -ENOMEM;
69668
69669 @@ -4093,10 +4097,9 @@ static const struct file_operations tracing_dyn_info_fops = {
69670 };
69671 #endif
69672
69673 -static struct dentry *d_tracer;
69674 -
69675 struct dentry *tracing_init_dentry(void)
69676 {
69677 + static struct dentry *d_tracer;
69678 static int once;
69679
69680 if (d_tracer)
69681 @@ -4116,10 +4119,9 @@ struct dentry *tracing_init_dentry(void)
69682 return d_tracer;
69683 }
69684
69685 -static struct dentry *d_percpu;
69686 -
69687 struct dentry *tracing_dentry_percpu(void)
69688 {
69689 + static struct dentry *d_percpu;
69690 static int once;
69691 struct dentry *d_tracer;
69692
69693 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
69694 index c212a7f..7b02394 100644
69695 --- a/kernel/trace/trace_events.c
69696 +++ b/kernel/trace/trace_events.c
69697 @@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
69698 struct ftrace_module_file_ops {
69699 struct list_head list;
69700 struct module *mod;
69701 - struct file_operations id;
69702 - struct file_operations enable;
69703 - struct file_operations format;
69704 - struct file_operations filter;
69705 };
69706
69707 static struct ftrace_module_file_ops *
69708 @@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
69709
69710 file_ops->mod = mod;
69711
69712 - file_ops->id = ftrace_event_id_fops;
69713 - file_ops->id.owner = mod;
69714 -
69715 - file_ops->enable = ftrace_enable_fops;
69716 - file_ops->enable.owner = mod;
69717 -
69718 - file_ops->filter = ftrace_event_filter_fops;
69719 - file_ops->filter.owner = mod;
69720 -
69721 - file_ops->format = ftrace_event_format_fops;
69722 - file_ops->format.owner = mod;
69723 + pax_open_kernel();
69724 + *(void **)&mod->trace_id.owner = mod;
69725 + *(void **)&mod->trace_enable.owner = mod;
69726 + *(void **)&mod->trace_filter.owner = mod;
69727 + *(void **)&mod->trace_format.owner = mod;
69728 + pax_close_kernel();
69729
69730 list_add(&file_ops->list, &ftrace_module_file_list);
69731
69732 @@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
69733
69734 for_each_event(call, start, end) {
69735 __trace_add_event_call(*call, mod,
69736 - &file_ops->id, &file_ops->enable,
69737 - &file_ops->filter, &file_ops->format);
69738 + &mod->trace_id, &mod->trace_enable,
69739 + &mod->trace_filter, &mod->trace_format);
69740 }
69741 }
69742
69743 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
69744 index 00d527c..7c5b1a3 100644
69745 --- a/kernel/trace/trace_kprobe.c
69746 +++ b/kernel/trace/trace_kprobe.c
69747 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69748 long ret;
69749 int maxlen = get_rloc_len(*(u32 *)dest);
69750 u8 *dst = get_rloc_data(dest);
69751 - u8 *src = addr;
69752 + const u8 __user *src = (const u8 __force_user *)addr;
69753 mm_segment_t old_fs = get_fs();
69754 if (!maxlen)
69755 return;
69756 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69757 pagefault_disable();
69758 do
69759 ret = __copy_from_user_inatomic(dst++, src++, 1);
69760 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
69761 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
69762 dst[-1] = '\0';
69763 pagefault_enable();
69764 set_fs(old_fs);
69765 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69766 ((u8 *)get_rloc_data(dest))[0] = '\0';
69767 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
69768 } else
69769 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
69770 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
69771 get_rloc_offs(*(u32 *)dest));
69772 }
69773 /* Return the length of string -- including null terminal byte */
69774 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
69775 set_fs(KERNEL_DS);
69776 pagefault_disable();
69777 do {
69778 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
69779 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
69780 len++;
69781 } while (c && ret == 0 && len < MAX_STRING_SIZE);
69782 pagefault_enable();
69783 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
69784 index fd3c8aa..5f324a6 100644
69785 --- a/kernel/trace/trace_mmiotrace.c
69786 +++ b/kernel/trace/trace_mmiotrace.c
69787 @@ -24,7 +24,7 @@ struct header_iter {
69788 static struct trace_array *mmio_trace_array;
69789 static bool overrun_detected;
69790 static unsigned long prev_overruns;
69791 -static atomic_t dropped_count;
69792 +static atomic_unchecked_t dropped_count;
69793
69794 static void mmio_reset_data(struct trace_array *tr)
69795 {
69796 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
69797
69798 static unsigned long count_overruns(struct trace_iterator *iter)
69799 {
69800 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
69801 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
69802 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
69803
69804 if (over > prev_overruns)
69805 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
69806 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
69807 sizeof(*entry), 0, pc);
69808 if (!event) {
69809 - atomic_inc(&dropped_count);
69810 + atomic_inc_unchecked(&dropped_count);
69811 return;
69812 }
69813 entry = ring_buffer_event_data(event);
69814 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
69815 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
69816 sizeof(*entry), 0, pc);
69817 if (!event) {
69818 - atomic_inc(&dropped_count);
69819 + atomic_inc_unchecked(&dropped_count);
69820 return;
69821 }
69822 entry = ring_buffer_event_data(event);
69823 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
69824 index 5199930..26c73a0 100644
69825 --- a/kernel/trace/trace_output.c
69826 +++ b/kernel/trace/trace_output.c
69827 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
69828
69829 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
69830 if (!IS_ERR(p)) {
69831 - p = mangle_path(s->buffer + s->len, p, "\n");
69832 + p = mangle_path(s->buffer + s->len, p, "\n\\");
69833 if (p) {
69834 s->len = p - s->buffer;
69835 return 1;
69836 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
69837 index 77575b3..6e623d1 100644
69838 --- a/kernel/trace/trace_stack.c
69839 +++ b/kernel/trace/trace_stack.c
69840 @@ -50,7 +50,7 @@ static inline void check_stack(void)
69841 return;
69842
69843 /* we do not handle interrupt stacks yet */
69844 - if (!object_is_on_stack(&this_size))
69845 + if (!object_starts_on_stack(&this_size))
69846 return;
69847
69848 local_irq_save(flags);
69849 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
69850 index 209b379..7f76423 100644
69851 --- a/kernel/trace/trace_workqueue.c
69852 +++ b/kernel/trace/trace_workqueue.c
69853 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
69854 int cpu;
69855 pid_t pid;
69856 /* Can be inserted from interrupt or user context, need to be atomic */
69857 - atomic_t inserted;
69858 + atomic_unchecked_t inserted;
69859 /*
69860 * Don't need to be atomic, works are serialized in a single workqueue thread
69861 * on a single CPU.
69862 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
69863 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
69864 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
69865 if (node->pid == wq_thread->pid) {
69866 - atomic_inc(&node->inserted);
69867 + atomic_inc_unchecked(&node->inserted);
69868 goto found;
69869 }
69870 }
69871 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
69872 tsk = get_pid_task(pid, PIDTYPE_PID);
69873 if (tsk) {
69874 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
69875 - atomic_read(&cws->inserted), cws->executed,
69876 + atomic_read_unchecked(&cws->inserted), cws->executed,
69877 tsk->comm);
69878 put_task_struct(tsk);
69879 }
69880 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
69881 index c0cb9c4..f33aa89 100644
69882 --- a/lib/Kconfig.debug
69883 +++ b/lib/Kconfig.debug
69884 @@ -1091,6 +1091,7 @@ config LATENCYTOP
69885 depends on DEBUG_KERNEL
69886 depends on STACKTRACE_SUPPORT
69887 depends on PROC_FS
69888 + depends on !GRKERNSEC_HIDESYM
69889 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
69890 select KALLSYMS
69891 select KALLSYMS_ALL
69892 diff --git a/lib/bitmap.c b/lib/bitmap.c
69893 index 2f4412e..a557e27 100644
69894 --- a/lib/bitmap.c
69895 +++ b/lib/bitmap.c
69896 @@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
69897 {
69898 int c, old_c, totaldigits, ndigits, nchunks, nbits;
69899 u32 chunk;
69900 - const char __user *ubuf = buf;
69901 + const char __user *ubuf = (const char __force_user *)buf;
69902
69903 bitmap_zero(maskp, nmaskbits);
69904
69905 @@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
69906 {
69907 if (!access_ok(VERIFY_READ, ubuf, ulen))
69908 return -EFAULT;
69909 - return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
69910 + return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
69911 }
69912 EXPORT_SYMBOL(bitmap_parse_user);
69913
69914 @@ -594,7 +594,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
69915 {
69916 unsigned a, b;
69917 int c, old_c, totaldigits;
69918 - const char __user *ubuf = buf;
69919 + const char __user *ubuf = (const char __force_user *)buf;
69920 int exp_digit, in_range;
69921
69922 totaldigits = c = 0;
69923 @@ -694,7 +694,7 @@ int bitmap_parselist_user(const char __user *ubuf,
69924 {
69925 if (!access_ok(VERIFY_READ, ubuf, ulen))
69926 return -EFAULT;
69927 - return __bitmap_parselist((const char *)ubuf,
69928 + return __bitmap_parselist((const char __force_kernel *)ubuf,
69929 ulen, 1, maskp, nmaskbits);
69930 }
69931 EXPORT_SYMBOL(bitmap_parselist_user);
69932 diff --git a/lib/bug.c b/lib/bug.c
69933 index 1955209..cbbb2ad 100644
69934 --- a/lib/bug.c
69935 +++ b/lib/bug.c
69936 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
69937 return BUG_TRAP_TYPE_NONE;
69938
69939 bug = find_bug(bugaddr);
69940 + if (!bug)
69941 + return BUG_TRAP_TYPE_NONE;
69942
69943 file = NULL;
69944 line = 0;
69945 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
69946 index a78b7c6..2c73084 100644
69947 --- a/lib/debugobjects.c
69948 +++ b/lib/debugobjects.c
69949 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
69950 if (limit > 4)
69951 return;
69952
69953 - is_on_stack = object_is_on_stack(addr);
69954 + is_on_stack = object_starts_on_stack(addr);
69955 if (is_on_stack == onstack)
69956 return;
69957
69958 diff --git a/lib/devres.c b/lib/devres.c
69959 index 7c0e953..f642b5c 100644
69960 --- a/lib/devres.c
69961 +++ b/lib/devres.c
69962 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
69963 void devm_iounmap(struct device *dev, void __iomem *addr)
69964 {
69965 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
69966 - (void *)addr));
69967 + (void __force *)addr));
69968 iounmap(addr);
69969 }
69970 EXPORT_SYMBOL(devm_iounmap);
69971 @@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
69972 {
69973 ioport_unmap(addr);
69974 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
69975 - devm_ioport_map_match, (void *)addr));
69976 + devm_ioport_map_match, (void __force *)addr));
69977 }
69978 EXPORT_SYMBOL(devm_ioport_unmap);
69979
69980 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
69981 index db07bfd..719b5ab 100644
69982 --- a/lib/dma-debug.c
69983 +++ b/lib/dma-debug.c
69984 @@ -870,7 +870,7 @@ out:
69985
69986 static void check_for_stack(struct device *dev, void *addr)
69987 {
69988 - if (object_is_on_stack(addr))
69989 + if (object_starts_on_stack(addr))
69990 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
69991 "stack [addr=%p]\n", addr);
69992 }
69993 diff --git a/lib/extable.c b/lib/extable.c
69994 index 4cac81e..63e9b8f 100644
69995 --- a/lib/extable.c
69996 +++ b/lib/extable.c
69997 @@ -13,6 +13,7 @@
69998 #include <linux/init.h>
69999 #include <linux/sort.h>
70000 #include <asm/uaccess.h>
70001 +#include <asm/pgtable.h>
70002
70003 #ifndef ARCH_HAS_SORT_EXTABLE
70004 /*
70005 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
70006 void sort_extable(struct exception_table_entry *start,
70007 struct exception_table_entry *finish)
70008 {
70009 + pax_open_kernel();
70010 sort(start, finish - start, sizeof(struct exception_table_entry),
70011 cmp_ex, NULL);
70012 + pax_close_kernel();
70013 }
70014
70015 #ifdef CONFIG_MODULES
70016 diff --git a/lib/inflate.c b/lib/inflate.c
70017 index 013a761..c28f3fc 100644
70018 --- a/lib/inflate.c
70019 +++ b/lib/inflate.c
70020 @@ -269,7 +269,7 @@ static void free(void *where)
70021 malloc_ptr = free_mem_ptr;
70022 }
70023 #else
70024 -#define malloc(a) kmalloc(a, GFP_KERNEL)
70025 +#define malloc(a) kmalloc((a), GFP_KERNEL)
70026 #define free(a) kfree(a)
70027 #endif
70028
70029 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
70030 index bd2bea9..6b3c95e 100644
70031 --- a/lib/is_single_threaded.c
70032 +++ b/lib/is_single_threaded.c
70033 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
70034 struct task_struct *p, *t;
70035 bool ret;
70036
70037 + if (!mm)
70038 + return true;
70039 +
70040 if (atomic_read(&task->signal->live) != 1)
70041 return false;
70042
70043 diff --git a/lib/kref.c b/lib/kref.c
70044 index 3efb882..8492f4c 100644
70045 --- a/lib/kref.c
70046 +++ b/lib/kref.c
70047 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
70048 */
70049 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
70050 {
70051 - WARN_ON(release == NULL);
70052 + BUG_ON(release == NULL);
70053 WARN_ON(release == (void (*)(struct kref *))kfree);
70054
70055 if (atomic_dec_and_test(&kref->refcount)) {
70056 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
70057 index a2f9da5..3bcadb6 100644
70058 --- a/lib/radix-tree.c
70059 +++ b/lib/radix-tree.c
70060 @@ -80,7 +80,7 @@ struct radix_tree_preload {
70061 int nr;
70062 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
70063 };
70064 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
70065 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
70066
70067 static inline void *ptr_to_indirect(void *ptr)
70068 {
70069 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
70070 index d7222a9..2172edc 100644
70071 --- a/lib/vsprintf.c
70072 +++ b/lib/vsprintf.c
70073 @@ -16,6 +16,9 @@
70074 * - scnprintf and vscnprintf
70075 */
70076
70077 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70078 +#define __INCLUDED_BY_HIDESYM 1
70079 +#endif
70080 #include <stdarg.h>
70081 #include <linux/module.h>
70082 #include <linux/types.h>
70083 @@ -432,7 +435,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
70084 char sym[KSYM_SYMBOL_LEN];
70085 if (ext == 'B')
70086 sprint_backtrace(sym, value);
70087 - else if (ext != 'f' && ext != 's')
70088 + else if (ext != 'f' && ext != 's' && ext != 'a')
70089 sprint_symbol(sym, value);
70090 else
70091 kallsyms_lookup(value, NULL, NULL, NULL, sym);
70092 @@ -796,7 +799,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
70093 return string(buf, end, uuid, spec);
70094 }
70095
70096 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70097 +int kptr_restrict __read_mostly = 2;
70098 +#else
70099 int kptr_restrict __read_mostly;
70100 +#endif
70101
70102 /*
70103 * Show a '%p' thing. A kernel extension is that the '%p' is followed
70104 @@ -810,6 +817,8 @@ int kptr_restrict __read_mostly;
70105 * - 'S' For symbolic direct pointers with offset
70106 * - 's' For symbolic direct pointers without offset
70107 * - 'B' For backtraced symbolic direct pointers with offset
70108 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
70109 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
70110 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
70111 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
70112 * - 'M' For a 6-byte MAC address, it prints the address in the
70113 @@ -854,12 +863,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70114 {
70115 if (!ptr && *fmt != 'K') {
70116 /*
70117 - * Print (null) with the same width as a pointer so it makes
70118 + * Print (nil) with the same width as a pointer so it makes
70119 * tabular output look nice.
70120 */
70121 if (spec.field_width == -1)
70122 spec.field_width = 2 * sizeof(void *);
70123 - return string(buf, end, "(null)", spec);
70124 + return string(buf, end, "(nil)", spec);
70125 }
70126
70127 switch (*fmt) {
70128 @@ -869,6 +878,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70129 /* Fallthrough */
70130 case 'S':
70131 case 's':
70132 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70133 + break;
70134 +#else
70135 + return symbol_string(buf, end, ptr, spec, *fmt);
70136 +#endif
70137 + case 'A':
70138 + case 'a':
70139 case 'B':
70140 return symbol_string(buf, end, ptr, spec, *fmt);
70141 case 'R':
70142 @@ -1627,11 +1643,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
70143 typeof(type) value; \
70144 if (sizeof(type) == 8) { \
70145 args = PTR_ALIGN(args, sizeof(u32)); \
70146 - *(u32 *)&value = *(u32 *)args; \
70147 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
70148 + *(u32 *)&value = *(const u32 *)args; \
70149 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
70150 } else { \
70151 args = PTR_ALIGN(args, sizeof(type)); \
70152 - value = *(typeof(type) *)args; \
70153 + value = *(const typeof(type) *)args; \
70154 } \
70155 args += sizeof(type); \
70156 value; \
70157 @@ -1694,7 +1710,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
70158 case FORMAT_TYPE_STR: {
70159 const char *str_arg = args;
70160 args += strlen(str_arg) + 1;
70161 - str = string(str, end, (char *)str_arg, spec);
70162 + str = string(str, end, str_arg, spec);
70163 break;
70164 }
70165
70166 diff --git a/localversion-grsec b/localversion-grsec
70167 new file mode 100644
70168 index 0000000..7cd6065
70169 --- /dev/null
70170 +++ b/localversion-grsec
70171 @@ -0,0 +1 @@
70172 +-grsec
70173 diff --git a/mm/Kconfig b/mm/Kconfig
70174 index f2f1ca1..0645f06 100644
70175 --- a/mm/Kconfig
70176 +++ b/mm/Kconfig
70177 @@ -238,10 +238,10 @@ config KSM
70178 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
70179
70180 config DEFAULT_MMAP_MIN_ADDR
70181 - int "Low address space to protect from user allocation"
70182 + int "Low address space to protect from user allocation"
70183 depends on MMU
70184 - default 4096
70185 - help
70186 + default 65536
70187 + help
70188 This is the portion of low virtual memory which should be protected
70189 from userspace allocation. Keeping a user from writing to low pages
70190 can help reduce the impact of kernel NULL pointer bugs.
70191 diff --git a/mm/filemap.c b/mm/filemap.c
70192 index b91f3aa..d0ac1d4 100644
70193 --- a/mm/filemap.c
70194 +++ b/mm/filemap.c
70195 @@ -1784,7 +1784,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
70196 struct address_space *mapping = file->f_mapping;
70197
70198 if (!mapping->a_ops->readpage)
70199 - return -ENOEXEC;
70200 + return -ENODEV;
70201 file_accessed(file);
70202 vma->vm_ops = &generic_file_vm_ops;
70203 vma->vm_flags |= VM_CAN_NONLINEAR;
70204 @@ -2187,6 +2187,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
70205 *pos = i_size_read(inode);
70206
70207 if (limit != RLIM_INFINITY) {
70208 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
70209 if (*pos >= limit) {
70210 send_sig(SIGXFSZ, current, 0);
70211 return -EFBIG;
70212 diff --git a/mm/fremap.c b/mm/fremap.c
70213 index b8e0e2d..076e171 100644
70214 --- a/mm/fremap.c
70215 +++ b/mm/fremap.c
70216 @@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
70217 retry:
70218 vma = find_vma(mm, start);
70219
70220 +#ifdef CONFIG_PAX_SEGMEXEC
70221 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
70222 + goto out;
70223 +#endif
70224 +
70225 /*
70226 * Make sure the vma is shared, that it supports prefaulting,
70227 * and that the remapped range is valid and fully within
70228 diff --git a/mm/highmem.c b/mm/highmem.c
70229 index 5ef672c..d7660f4 100644
70230 --- a/mm/highmem.c
70231 +++ b/mm/highmem.c
70232 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
70233 * So no dangers, even with speculative execution.
70234 */
70235 page = pte_page(pkmap_page_table[i]);
70236 + pax_open_kernel();
70237 pte_clear(&init_mm, (unsigned long)page_address(page),
70238 &pkmap_page_table[i]);
70239 -
70240 + pax_close_kernel();
70241 set_page_address(page, NULL);
70242 need_flush = 1;
70243 }
70244 @@ -186,9 +187,11 @@ start:
70245 }
70246 }
70247 vaddr = PKMAP_ADDR(last_pkmap_nr);
70248 +
70249 + pax_open_kernel();
70250 set_pte_at(&init_mm, vaddr,
70251 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
70252 -
70253 + pax_close_kernel();
70254 pkmap_count[last_pkmap_nr] = 1;
70255 set_page_address(page, (void *)vaddr);
70256
70257 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
70258 index d819d93..468e18f 100644
70259 --- a/mm/huge_memory.c
70260 +++ b/mm/huge_memory.c
70261 @@ -702,7 +702,7 @@ out:
70262 * run pte_offset_map on the pmd, if an huge pmd could
70263 * materialize from under us from a different thread.
70264 */
70265 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
70266 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
70267 return VM_FAULT_OOM;
70268 /* if an huge pmd materialized from under us just retry later */
70269 if (unlikely(pmd_trans_huge(*pmd)))
70270 @@ -829,7 +829,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
70271
70272 for (i = 0; i < HPAGE_PMD_NR; i++) {
70273 copy_user_highpage(pages[i], page + i,
70274 - haddr + PAGE_SHIFT*i, vma);
70275 + haddr + PAGE_SIZE*i, vma);
70276 __SetPageUptodate(pages[i]);
70277 cond_resched();
70278 }
70279 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
70280 index 2316840..b418671 100644
70281 --- a/mm/hugetlb.c
70282 +++ b/mm/hugetlb.c
70283 @@ -2347,6 +2347,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
70284 return 1;
70285 }
70286
70287 +#ifdef CONFIG_PAX_SEGMEXEC
70288 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
70289 +{
70290 + struct mm_struct *mm = vma->vm_mm;
70291 + struct vm_area_struct *vma_m;
70292 + unsigned long address_m;
70293 + pte_t *ptep_m;
70294 +
70295 + vma_m = pax_find_mirror_vma(vma);
70296 + if (!vma_m)
70297 + return;
70298 +
70299 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70300 + address_m = address + SEGMEXEC_TASK_SIZE;
70301 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
70302 + get_page(page_m);
70303 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
70304 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
70305 +}
70306 +#endif
70307 +
70308 /*
70309 * Hugetlb_cow() should be called with page lock of the original hugepage held.
70310 */
70311 @@ -2450,6 +2471,11 @@ retry_avoidcopy:
70312 make_huge_pte(vma, new_page, 1));
70313 page_remove_rmap(old_page);
70314 hugepage_add_new_anon_rmap(new_page, vma, address);
70315 +
70316 +#ifdef CONFIG_PAX_SEGMEXEC
70317 + pax_mirror_huge_pte(vma, address, new_page);
70318 +#endif
70319 +
70320 /* Make the old page be freed below */
70321 new_page = old_page;
70322 mmu_notifier_invalidate_range_end(mm,
70323 @@ -2601,6 +2627,10 @@ retry:
70324 && (vma->vm_flags & VM_SHARED)));
70325 set_huge_pte_at(mm, address, ptep, new_pte);
70326
70327 +#ifdef CONFIG_PAX_SEGMEXEC
70328 + pax_mirror_huge_pte(vma, address, page);
70329 +#endif
70330 +
70331 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
70332 /* Optimization, do the COW without a second fault */
70333 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
70334 @@ -2630,6 +2660,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70335 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
70336 struct hstate *h = hstate_vma(vma);
70337
70338 +#ifdef CONFIG_PAX_SEGMEXEC
70339 + struct vm_area_struct *vma_m;
70340 +#endif
70341 +
70342 ptep = huge_pte_offset(mm, address);
70343 if (ptep) {
70344 entry = huge_ptep_get(ptep);
70345 @@ -2641,6 +2675,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70346 VM_FAULT_SET_HINDEX(h - hstates);
70347 }
70348
70349 +#ifdef CONFIG_PAX_SEGMEXEC
70350 + vma_m = pax_find_mirror_vma(vma);
70351 + if (vma_m) {
70352 + unsigned long address_m;
70353 +
70354 + if (vma->vm_start > vma_m->vm_start) {
70355 + address_m = address;
70356 + address -= SEGMEXEC_TASK_SIZE;
70357 + vma = vma_m;
70358 + h = hstate_vma(vma);
70359 + } else
70360 + address_m = address + SEGMEXEC_TASK_SIZE;
70361 +
70362 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
70363 + return VM_FAULT_OOM;
70364 + address_m &= HPAGE_MASK;
70365 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
70366 + }
70367 +#endif
70368 +
70369 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
70370 if (!ptep)
70371 return VM_FAULT_OOM;
70372 diff --git a/mm/internal.h b/mm/internal.h
70373 index 2189af4..f2ca332 100644
70374 --- a/mm/internal.h
70375 +++ b/mm/internal.h
70376 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
70377 * in mm/page_alloc.c
70378 */
70379 extern void __free_pages_bootmem(struct page *page, unsigned int order);
70380 +extern void free_compound_page(struct page *page);
70381 extern void prep_compound_page(struct page *page, unsigned long order);
70382 #ifdef CONFIG_MEMORY_FAILURE
70383 extern bool is_free_buddy_page(struct page *page);
70384 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
70385 index d6880f5..ed77913 100644
70386 --- a/mm/kmemleak.c
70387 +++ b/mm/kmemleak.c
70388 @@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
70389
70390 for (i = 0; i < object->trace_len; i++) {
70391 void *ptr = (void *)object->trace[i];
70392 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
70393 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
70394 }
70395 }
70396
70397 diff --git a/mm/maccess.c b/mm/maccess.c
70398 index 4cee182..e00511d 100644
70399 --- a/mm/maccess.c
70400 +++ b/mm/maccess.c
70401 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
70402 set_fs(KERNEL_DS);
70403 pagefault_disable();
70404 ret = __copy_from_user_inatomic(dst,
70405 - (__force const void __user *)src, size);
70406 + (const void __force_user *)src, size);
70407 pagefault_enable();
70408 set_fs(old_fs);
70409
70410 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
70411
70412 set_fs(KERNEL_DS);
70413 pagefault_disable();
70414 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
70415 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
70416 pagefault_enable();
70417 set_fs(old_fs);
70418
70419 diff --git a/mm/madvise.c b/mm/madvise.c
70420 index 74bf193..feb6fd3 100644
70421 --- a/mm/madvise.c
70422 +++ b/mm/madvise.c
70423 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
70424 pgoff_t pgoff;
70425 unsigned long new_flags = vma->vm_flags;
70426
70427 +#ifdef CONFIG_PAX_SEGMEXEC
70428 + struct vm_area_struct *vma_m;
70429 +#endif
70430 +
70431 switch (behavior) {
70432 case MADV_NORMAL:
70433 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
70434 @@ -110,6 +114,13 @@ success:
70435 /*
70436 * vm_flags is protected by the mmap_sem held in write mode.
70437 */
70438 +
70439 +#ifdef CONFIG_PAX_SEGMEXEC
70440 + vma_m = pax_find_mirror_vma(vma);
70441 + if (vma_m)
70442 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
70443 +#endif
70444 +
70445 vma->vm_flags = new_flags;
70446
70447 out:
70448 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
70449 struct vm_area_struct ** prev,
70450 unsigned long start, unsigned long end)
70451 {
70452 +
70453 +#ifdef CONFIG_PAX_SEGMEXEC
70454 + struct vm_area_struct *vma_m;
70455 +#endif
70456 +
70457 *prev = vma;
70458 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
70459 return -EINVAL;
70460 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
70461 zap_page_range(vma, start, end - start, &details);
70462 } else
70463 zap_page_range(vma, start, end - start, NULL);
70464 +
70465 +#ifdef CONFIG_PAX_SEGMEXEC
70466 + vma_m = pax_find_mirror_vma(vma);
70467 + if (vma_m) {
70468 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
70469 + struct zap_details details = {
70470 + .nonlinear_vma = vma_m,
70471 + .last_index = ULONG_MAX,
70472 + };
70473 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
70474 + } else
70475 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
70476 + }
70477 +#endif
70478 +
70479 return 0;
70480 }
70481
70482 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
70483 if (end < start)
70484 goto out;
70485
70486 +#ifdef CONFIG_PAX_SEGMEXEC
70487 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70488 + if (end > SEGMEXEC_TASK_SIZE)
70489 + goto out;
70490 + } else
70491 +#endif
70492 +
70493 + if (end > TASK_SIZE)
70494 + goto out;
70495 +
70496 error = 0;
70497 if (end == start)
70498 goto out;
70499 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
70500 index 2b43ba0..fc09657 100644
70501 --- a/mm/memory-failure.c
70502 +++ b/mm/memory-failure.c
70503 @@ -60,7 +60,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
70504
70505 int sysctl_memory_failure_recovery __read_mostly = 1;
70506
70507 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70508 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70509
70510 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
70511
70512 @@ -201,7 +201,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
70513 si.si_signo = SIGBUS;
70514 si.si_errno = 0;
70515 si.si_code = BUS_MCEERR_AO;
70516 - si.si_addr = (void *)addr;
70517 + si.si_addr = (void __user *)addr;
70518 #ifdef __ARCH_SI_TRAPNO
70519 si.si_trapno = trapno;
70520 #endif
70521 @@ -1009,7 +1009,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
70522 }
70523
70524 nr_pages = 1 << compound_trans_order(hpage);
70525 - atomic_long_add(nr_pages, &mce_bad_pages);
70526 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
70527
70528 /*
70529 * We need/can do nothing about count=0 pages.
70530 @@ -1039,7 +1039,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
70531 if (!PageHWPoison(hpage)
70532 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
70533 || (p != hpage && TestSetPageHWPoison(hpage))) {
70534 - atomic_long_sub(nr_pages, &mce_bad_pages);
70535 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70536 return 0;
70537 }
70538 set_page_hwpoison_huge_page(hpage);
70539 @@ -1097,7 +1097,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
70540 }
70541 if (hwpoison_filter(p)) {
70542 if (TestClearPageHWPoison(p))
70543 - atomic_long_sub(nr_pages, &mce_bad_pages);
70544 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70545 unlock_page(hpage);
70546 put_page(hpage);
70547 return 0;
70548 @@ -1314,7 +1314,7 @@ int unpoison_memory(unsigned long pfn)
70549 return 0;
70550 }
70551 if (TestClearPageHWPoison(p))
70552 - atomic_long_sub(nr_pages, &mce_bad_pages);
70553 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70554 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
70555 return 0;
70556 }
70557 @@ -1328,7 +1328,7 @@ int unpoison_memory(unsigned long pfn)
70558 */
70559 if (TestClearPageHWPoison(page)) {
70560 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
70561 - atomic_long_sub(nr_pages, &mce_bad_pages);
70562 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70563 freeit = 1;
70564 if (PageHuge(page))
70565 clear_page_hwpoison_huge_page(page);
70566 @@ -1441,7 +1441,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
70567 }
70568 done:
70569 if (!PageHWPoison(hpage))
70570 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
70571 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
70572 set_page_hwpoison_huge_page(hpage);
70573 dequeue_hwpoisoned_huge_page(hpage);
70574 /* keep elevated page count for bad page */
70575 @@ -1572,7 +1572,7 @@ int soft_offline_page(struct page *page, int flags)
70576 return ret;
70577
70578 done:
70579 - atomic_long_add(1, &mce_bad_pages);
70580 + atomic_long_add_unchecked(1, &mce_bad_pages);
70581 SetPageHWPoison(page);
70582 /* keep elevated page count for bad page */
70583 return ret;
70584 diff --git a/mm/memory.c b/mm/memory.c
70585 index b2b8731..6080174 100644
70586 --- a/mm/memory.c
70587 +++ b/mm/memory.c
70588 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
70589 return;
70590
70591 pmd = pmd_offset(pud, start);
70592 +
70593 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
70594 pud_clear(pud);
70595 pmd_free_tlb(tlb, pmd, start);
70596 +#endif
70597 +
70598 }
70599
70600 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
70601 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
70602 if (end - 1 > ceiling - 1)
70603 return;
70604
70605 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
70606 pud = pud_offset(pgd, start);
70607 pgd_clear(pgd);
70608 pud_free_tlb(tlb, pud, start);
70609 +#endif
70610 +
70611 }
70612
70613 /*
70614 @@ -1566,12 +1573,6 @@ no_page_table:
70615 return page;
70616 }
70617
70618 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
70619 -{
70620 - return stack_guard_page_start(vma, addr) ||
70621 - stack_guard_page_end(vma, addr+PAGE_SIZE);
70622 -}
70623 -
70624 /**
70625 * __get_user_pages() - pin user pages in memory
70626 * @tsk: task_struct of target task
70627 @@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70628 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
70629 i = 0;
70630
70631 - do {
70632 + while (nr_pages) {
70633 struct vm_area_struct *vma;
70634
70635 - vma = find_extend_vma(mm, start);
70636 + vma = find_vma(mm, start);
70637 if (!vma && in_gate_area(mm, start)) {
70638 unsigned long pg = start & PAGE_MASK;
70639 pgd_t *pgd;
70640 @@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70641 goto next_page;
70642 }
70643
70644 - if (!vma ||
70645 + if (!vma || start < vma->vm_start ||
70646 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
70647 !(vm_flags & vma->vm_flags))
70648 return i ? : -EFAULT;
70649 @@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70650 int ret;
70651 unsigned int fault_flags = 0;
70652
70653 - /* For mlock, just skip the stack guard page. */
70654 - if (foll_flags & FOLL_MLOCK) {
70655 - if (stack_guard_page(vma, start))
70656 - goto next_page;
70657 - }
70658 if (foll_flags & FOLL_WRITE)
70659 fault_flags |= FAULT_FLAG_WRITE;
70660 if (nonblocking)
70661 @@ -1800,7 +1796,7 @@ next_page:
70662 start += PAGE_SIZE;
70663 nr_pages--;
70664 } while (nr_pages && start < vma->vm_end);
70665 - } while (nr_pages);
70666 + }
70667 return i;
70668 }
70669 EXPORT_SYMBOL(__get_user_pages);
70670 @@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
70671 page_add_file_rmap(page);
70672 set_pte_at(mm, addr, pte, mk_pte(page, prot));
70673
70674 +#ifdef CONFIG_PAX_SEGMEXEC
70675 + pax_mirror_file_pte(vma, addr, page, ptl);
70676 +#endif
70677 +
70678 retval = 0;
70679 pte_unmap_unlock(pte, ptl);
70680 return retval;
70681 @@ -2041,10 +2041,22 @@ out:
70682 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
70683 struct page *page)
70684 {
70685 +
70686 +#ifdef CONFIG_PAX_SEGMEXEC
70687 + struct vm_area_struct *vma_m;
70688 +#endif
70689 +
70690 if (addr < vma->vm_start || addr >= vma->vm_end)
70691 return -EFAULT;
70692 if (!page_count(page))
70693 return -EINVAL;
70694 +
70695 +#ifdef CONFIG_PAX_SEGMEXEC
70696 + vma_m = pax_find_mirror_vma(vma);
70697 + if (vma_m)
70698 + vma_m->vm_flags |= VM_INSERTPAGE;
70699 +#endif
70700 +
70701 vma->vm_flags |= VM_INSERTPAGE;
70702 return insert_page(vma, addr, page, vma->vm_page_prot);
70703 }
70704 @@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
70705 unsigned long pfn)
70706 {
70707 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
70708 + BUG_ON(vma->vm_mirror);
70709
70710 if (addr < vma->vm_start || addr >= vma->vm_end)
70711 return -EFAULT;
70712 @@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
70713 copy_user_highpage(dst, src, va, vma);
70714 }
70715
70716 +#ifdef CONFIG_PAX_SEGMEXEC
70717 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
70718 +{
70719 + struct mm_struct *mm = vma->vm_mm;
70720 + spinlock_t *ptl;
70721 + pte_t *pte, entry;
70722 +
70723 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
70724 + entry = *pte;
70725 + if (!pte_present(entry)) {
70726 + if (!pte_none(entry)) {
70727 + BUG_ON(pte_file(entry));
70728 + free_swap_and_cache(pte_to_swp_entry(entry));
70729 + pte_clear_not_present_full(mm, address, pte, 0);
70730 + }
70731 + } else {
70732 + struct page *page;
70733 +
70734 + flush_cache_page(vma, address, pte_pfn(entry));
70735 + entry = ptep_clear_flush(vma, address, pte);
70736 + BUG_ON(pte_dirty(entry));
70737 + page = vm_normal_page(vma, address, entry);
70738 + if (page) {
70739 + update_hiwater_rss(mm);
70740 + if (PageAnon(page))
70741 + dec_mm_counter_fast(mm, MM_ANONPAGES);
70742 + else
70743 + dec_mm_counter_fast(mm, MM_FILEPAGES);
70744 + page_remove_rmap(page);
70745 + page_cache_release(page);
70746 + }
70747 + }
70748 + pte_unmap_unlock(pte, ptl);
70749 +}
70750 +
70751 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
70752 + *
70753 + * the ptl of the lower mapped page is held on entry and is not released on exit
70754 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
70755 + */
70756 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70757 +{
70758 + struct mm_struct *mm = vma->vm_mm;
70759 + unsigned long address_m;
70760 + spinlock_t *ptl_m;
70761 + struct vm_area_struct *vma_m;
70762 + pmd_t *pmd_m;
70763 + pte_t *pte_m, entry_m;
70764 +
70765 + BUG_ON(!page_m || !PageAnon(page_m));
70766 +
70767 + vma_m = pax_find_mirror_vma(vma);
70768 + if (!vma_m)
70769 + return;
70770 +
70771 + BUG_ON(!PageLocked(page_m));
70772 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70773 + address_m = address + SEGMEXEC_TASK_SIZE;
70774 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70775 + pte_m = pte_offset_map(pmd_m, address_m);
70776 + ptl_m = pte_lockptr(mm, pmd_m);
70777 + if (ptl != ptl_m) {
70778 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70779 + if (!pte_none(*pte_m))
70780 + goto out;
70781 + }
70782 +
70783 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70784 + page_cache_get(page_m);
70785 + page_add_anon_rmap(page_m, vma_m, address_m);
70786 + inc_mm_counter_fast(mm, MM_ANONPAGES);
70787 + set_pte_at(mm, address_m, pte_m, entry_m);
70788 + update_mmu_cache(vma_m, address_m, entry_m);
70789 +out:
70790 + if (ptl != ptl_m)
70791 + spin_unlock(ptl_m);
70792 + pte_unmap(pte_m);
70793 + unlock_page(page_m);
70794 +}
70795 +
70796 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70797 +{
70798 + struct mm_struct *mm = vma->vm_mm;
70799 + unsigned long address_m;
70800 + spinlock_t *ptl_m;
70801 + struct vm_area_struct *vma_m;
70802 + pmd_t *pmd_m;
70803 + pte_t *pte_m, entry_m;
70804 +
70805 + BUG_ON(!page_m || PageAnon(page_m));
70806 +
70807 + vma_m = pax_find_mirror_vma(vma);
70808 + if (!vma_m)
70809 + return;
70810 +
70811 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70812 + address_m = address + SEGMEXEC_TASK_SIZE;
70813 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70814 + pte_m = pte_offset_map(pmd_m, address_m);
70815 + ptl_m = pte_lockptr(mm, pmd_m);
70816 + if (ptl != ptl_m) {
70817 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70818 + if (!pte_none(*pte_m))
70819 + goto out;
70820 + }
70821 +
70822 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70823 + page_cache_get(page_m);
70824 + page_add_file_rmap(page_m);
70825 + inc_mm_counter_fast(mm, MM_FILEPAGES);
70826 + set_pte_at(mm, address_m, pte_m, entry_m);
70827 + update_mmu_cache(vma_m, address_m, entry_m);
70828 +out:
70829 + if (ptl != ptl_m)
70830 + spin_unlock(ptl_m);
70831 + pte_unmap(pte_m);
70832 +}
70833 +
70834 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
70835 +{
70836 + struct mm_struct *mm = vma->vm_mm;
70837 + unsigned long address_m;
70838 + spinlock_t *ptl_m;
70839 + struct vm_area_struct *vma_m;
70840 + pmd_t *pmd_m;
70841 + pte_t *pte_m, entry_m;
70842 +
70843 + vma_m = pax_find_mirror_vma(vma);
70844 + if (!vma_m)
70845 + return;
70846 +
70847 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70848 + address_m = address + SEGMEXEC_TASK_SIZE;
70849 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70850 + pte_m = pte_offset_map(pmd_m, address_m);
70851 + ptl_m = pte_lockptr(mm, pmd_m);
70852 + if (ptl != ptl_m) {
70853 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70854 + if (!pte_none(*pte_m))
70855 + goto out;
70856 + }
70857 +
70858 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
70859 + set_pte_at(mm, address_m, pte_m, entry_m);
70860 +out:
70861 + if (ptl != ptl_m)
70862 + spin_unlock(ptl_m);
70863 + pte_unmap(pte_m);
70864 +}
70865 +
70866 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
70867 +{
70868 + struct page *page_m;
70869 + pte_t entry;
70870 +
70871 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
70872 + goto out;
70873 +
70874 + entry = *pte;
70875 + page_m = vm_normal_page(vma, address, entry);
70876 + if (!page_m)
70877 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
70878 + else if (PageAnon(page_m)) {
70879 + if (pax_find_mirror_vma(vma)) {
70880 + pte_unmap_unlock(pte, ptl);
70881 + lock_page(page_m);
70882 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
70883 + if (pte_same(entry, *pte))
70884 + pax_mirror_anon_pte(vma, address, page_m, ptl);
70885 + else
70886 + unlock_page(page_m);
70887 + }
70888 + } else
70889 + pax_mirror_file_pte(vma, address, page_m, ptl);
70890 +
70891 +out:
70892 + pte_unmap_unlock(pte, ptl);
70893 +}
70894 +#endif
70895 +
70896 /*
70897 * This routine handles present pages, when users try to write
70898 * to a shared page. It is done by copying the page to a new address
70899 @@ -2656,6 +2849,12 @@ gotten:
70900 */
70901 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70902 if (likely(pte_same(*page_table, orig_pte))) {
70903 +
70904 +#ifdef CONFIG_PAX_SEGMEXEC
70905 + if (pax_find_mirror_vma(vma))
70906 + BUG_ON(!trylock_page(new_page));
70907 +#endif
70908 +
70909 if (old_page) {
70910 if (!PageAnon(old_page)) {
70911 dec_mm_counter_fast(mm, MM_FILEPAGES);
70912 @@ -2707,6 +2906,10 @@ gotten:
70913 page_remove_rmap(old_page);
70914 }
70915
70916 +#ifdef CONFIG_PAX_SEGMEXEC
70917 + pax_mirror_anon_pte(vma, address, new_page, ptl);
70918 +#endif
70919 +
70920 /* Free the old page.. */
70921 new_page = old_page;
70922 ret |= VM_FAULT_WRITE;
70923 @@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70924 swap_free(entry);
70925 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
70926 try_to_free_swap(page);
70927 +
70928 +#ifdef CONFIG_PAX_SEGMEXEC
70929 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
70930 +#endif
70931 +
70932 unlock_page(page);
70933 if (swapcache) {
70934 /*
70935 @@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70936
70937 /* No need to invalidate - it was non-present before */
70938 update_mmu_cache(vma, address, page_table);
70939 +
70940 +#ifdef CONFIG_PAX_SEGMEXEC
70941 + pax_mirror_anon_pte(vma, address, page, ptl);
70942 +#endif
70943 +
70944 unlock:
70945 pte_unmap_unlock(page_table, ptl);
70946 out:
70947 @@ -3028,40 +3241,6 @@ out_release:
70948 }
70949
70950 /*
70951 - * This is like a special single-page "expand_{down|up}wards()",
70952 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
70953 - * doesn't hit another vma.
70954 - */
70955 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
70956 -{
70957 - address &= PAGE_MASK;
70958 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
70959 - struct vm_area_struct *prev = vma->vm_prev;
70960 -
70961 - /*
70962 - * Is there a mapping abutting this one below?
70963 - *
70964 - * That's only ok if it's the same stack mapping
70965 - * that has gotten split..
70966 - */
70967 - if (prev && prev->vm_end == address)
70968 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
70969 -
70970 - expand_downwards(vma, address - PAGE_SIZE);
70971 - }
70972 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
70973 - struct vm_area_struct *next = vma->vm_next;
70974 -
70975 - /* As VM_GROWSDOWN but s/below/above/ */
70976 - if (next && next->vm_start == address + PAGE_SIZE)
70977 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
70978 -
70979 - expand_upwards(vma, address + PAGE_SIZE);
70980 - }
70981 - return 0;
70982 -}
70983 -
70984 -/*
70985 * We enter with non-exclusive mmap_sem (to exclude vma changes,
70986 * but allow concurrent faults), and pte mapped but not yet locked.
70987 * We return with mmap_sem still held, but pte unmapped and unlocked.
70988 @@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70989 unsigned long address, pte_t *page_table, pmd_t *pmd,
70990 unsigned int flags)
70991 {
70992 - struct page *page;
70993 + struct page *page = NULL;
70994 spinlock_t *ptl;
70995 pte_t entry;
70996
70997 - pte_unmap(page_table);
70998 -
70999 - /* Check if we need to add a guard page to the stack */
71000 - if (check_stack_guard_page(vma, address) < 0)
71001 - return VM_FAULT_SIGBUS;
71002 -
71003 - /* Use the zero-page for reads */
71004 if (!(flags & FAULT_FLAG_WRITE)) {
71005 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
71006 vma->vm_page_prot));
71007 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
71008 + ptl = pte_lockptr(mm, pmd);
71009 + spin_lock(ptl);
71010 if (!pte_none(*page_table))
71011 goto unlock;
71012 goto setpte;
71013 }
71014
71015 /* Allocate our own private page. */
71016 + pte_unmap(page_table);
71017 +
71018 if (unlikely(anon_vma_prepare(vma)))
71019 goto oom;
71020 page = alloc_zeroed_user_highpage_movable(vma, address);
71021 @@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
71022 if (!pte_none(*page_table))
71023 goto release;
71024
71025 +#ifdef CONFIG_PAX_SEGMEXEC
71026 + if (pax_find_mirror_vma(vma))
71027 + BUG_ON(!trylock_page(page));
71028 +#endif
71029 +
71030 inc_mm_counter_fast(mm, MM_ANONPAGES);
71031 page_add_new_anon_rmap(page, vma, address);
71032 setpte:
71033 @@ -3116,6 +3296,12 @@ setpte:
71034
71035 /* No need to invalidate - it was non-present before */
71036 update_mmu_cache(vma, address, page_table);
71037 +
71038 +#ifdef CONFIG_PAX_SEGMEXEC
71039 + if (page)
71040 + pax_mirror_anon_pte(vma, address, page, ptl);
71041 +#endif
71042 +
71043 unlock:
71044 pte_unmap_unlock(page_table, ptl);
71045 return 0;
71046 @@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71047 */
71048 /* Only go through if we didn't race with anybody else... */
71049 if (likely(pte_same(*page_table, orig_pte))) {
71050 +
71051 +#ifdef CONFIG_PAX_SEGMEXEC
71052 + if (anon && pax_find_mirror_vma(vma))
71053 + BUG_ON(!trylock_page(page));
71054 +#endif
71055 +
71056 flush_icache_page(vma, page);
71057 entry = mk_pte(page, vma->vm_page_prot);
71058 if (flags & FAULT_FLAG_WRITE)
71059 @@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71060
71061 /* no need to invalidate: a not-present page won't be cached */
71062 update_mmu_cache(vma, address, page_table);
71063 +
71064 +#ifdef CONFIG_PAX_SEGMEXEC
71065 + if (anon)
71066 + pax_mirror_anon_pte(vma, address, page, ptl);
71067 + else
71068 + pax_mirror_file_pte(vma, address, page, ptl);
71069 +#endif
71070 +
71071 } else {
71072 if (cow_page)
71073 mem_cgroup_uncharge_page(cow_page);
71074 @@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm,
71075 if (flags & FAULT_FLAG_WRITE)
71076 flush_tlb_fix_spurious_fault(vma, address);
71077 }
71078 +
71079 +#ifdef CONFIG_PAX_SEGMEXEC
71080 + pax_mirror_pte(vma, address, pte, pmd, ptl);
71081 + return 0;
71082 +#endif
71083 +
71084 unlock:
71085 pte_unmap_unlock(pte, ptl);
71086 return 0;
71087 @@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71088 pmd_t *pmd;
71089 pte_t *pte;
71090
71091 +#ifdef CONFIG_PAX_SEGMEXEC
71092 + struct vm_area_struct *vma_m;
71093 +#endif
71094 +
71095 __set_current_state(TASK_RUNNING);
71096
71097 count_vm_event(PGFAULT);
71098 @@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71099 if (unlikely(is_vm_hugetlb_page(vma)))
71100 return hugetlb_fault(mm, vma, address, flags);
71101
71102 +#ifdef CONFIG_PAX_SEGMEXEC
71103 + vma_m = pax_find_mirror_vma(vma);
71104 + if (vma_m) {
71105 + unsigned long address_m;
71106 + pgd_t *pgd_m;
71107 + pud_t *pud_m;
71108 + pmd_t *pmd_m;
71109 +
71110 + if (vma->vm_start > vma_m->vm_start) {
71111 + address_m = address;
71112 + address -= SEGMEXEC_TASK_SIZE;
71113 + vma = vma_m;
71114 + } else
71115 + address_m = address + SEGMEXEC_TASK_SIZE;
71116 +
71117 + pgd_m = pgd_offset(mm, address_m);
71118 + pud_m = pud_alloc(mm, pgd_m, address_m);
71119 + if (!pud_m)
71120 + return VM_FAULT_OOM;
71121 + pmd_m = pmd_alloc(mm, pud_m, address_m);
71122 + if (!pmd_m)
71123 + return VM_FAULT_OOM;
71124 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
71125 + return VM_FAULT_OOM;
71126 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
71127 + }
71128 +#endif
71129 +
71130 pgd = pgd_offset(mm, address);
71131 pud = pud_alloc(mm, pgd, address);
71132 if (!pud)
71133 @@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71134 * run pte_offset_map on the pmd, if an huge pmd could
71135 * materialize from under us from a different thread.
71136 */
71137 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
71138 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
71139 return VM_FAULT_OOM;
71140 /* if an huge pmd materialized from under us just retry later */
71141 if (unlikely(pmd_trans_huge(*pmd)))
71142 @@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
71143 gate_vma.vm_start = FIXADDR_USER_START;
71144 gate_vma.vm_end = FIXADDR_USER_END;
71145 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
71146 - gate_vma.vm_page_prot = __P101;
71147 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
71148 /*
71149 * Make sure the vDSO gets into every core dump.
71150 * Dumping its contents makes post-mortem fully interpretable later
71151 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
71152 index 2775fd0..f2b1c49 100644
71153 --- a/mm/mempolicy.c
71154 +++ b/mm/mempolicy.c
71155 @@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
71156 unsigned long vmstart;
71157 unsigned long vmend;
71158
71159 +#ifdef CONFIG_PAX_SEGMEXEC
71160 + struct vm_area_struct *vma_m;
71161 +#endif
71162 +
71163 vma = find_vma_prev(mm, start, &prev);
71164 if (!vma || vma->vm_start > start)
71165 return -EFAULT;
71166 @@ -678,6 +682,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
71167 err = policy_vma(vma, new_pol);
71168 if (err)
71169 goto out;
71170 +
71171 +#ifdef CONFIG_PAX_SEGMEXEC
71172 + vma_m = pax_find_mirror_vma(vma);
71173 + if (vma_m) {
71174 + err = policy_vma(vma_m, new_pol);
71175 + if (err)
71176 + goto out;
71177 + }
71178 +#endif
71179 +
71180 }
71181
71182 out:
71183 @@ -1111,6 +1125,17 @@ static long do_mbind(unsigned long start, unsigned long len,
71184
71185 if (end < start)
71186 return -EINVAL;
71187 +
71188 +#ifdef CONFIG_PAX_SEGMEXEC
71189 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71190 + if (end > SEGMEXEC_TASK_SIZE)
71191 + return -EINVAL;
71192 + } else
71193 +#endif
71194 +
71195 + if (end > TASK_SIZE)
71196 + return -EINVAL;
71197 +
71198 if (end == start)
71199 return 0;
71200
71201 @@ -1329,6 +1354,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
71202 if (!mm)
71203 goto out;
71204
71205 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71206 + if (mm != current->mm &&
71207 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
71208 + err = -EPERM;
71209 + goto out;
71210 + }
71211 +#endif
71212 +
71213 /*
71214 * Check if this process has the right to modify the specified
71215 * process. The right exists if the process has administrative
71216 @@ -1338,8 +1371,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
71217 rcu_read_lock();
71218 tcred = __task_cred(task);
71219 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
71220 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
71221 - !capable(CAP_SYS_NICE)) {
71222 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
71223 rcu_read_unlock();
71224 err = -EPERM;
71225 goto out;
71226 diff --git a/mm/migrate.c b/mm/migrate.c
71227 index 14d0a6a..0360908 100644
71228 --- a/mm/migrate.c
71229 +++ b/mm/migrate.c
71230 @@ -866,9 +866,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
71231
71232 if (anon_vma)
71233 put_anon_vma(anon_vma);
71234 -out:
71235 unlock_page(hpage);
71236
71237 +out:
71238 if (rc != -EAGAIN) {
71239 list_del(&hpage->lru);
71240 put_page(hpage);
71241 @@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
71242 unsigned long chunk_start;
71243 int err;
71244
71245 + pax_track_stack();
71246 +
71247 task_nodes = cpuset_mems_allowed(task);
71248
71249 err = -ENOMEM;
71250 @@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
71251 if (!mm)
71252 return -EINVAL;
71253
71254 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71255 + if (mm != current->mm &&
71256 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
71257 + err = -EPERM;
71258 + goto out;
71259 + }
71260 +#endif
71261 +
71262 /*
71263 * Check if this process has the right to modify the specified
71264 * process. The right exists if the process has administrative
71265 @@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
71266 rcu_read_lock();
71267 tcred = __task_cred(task);
71268 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
71269 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
71270 - !capable(CAP_SYS_NICE)) {
71271 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
71272 rcu_read_unlock();
71273 err = -EPERM;
71274 goto out;
71275 diff --git a/mm/mlock.c b/mm/mlock.c
71276 index 048260c..57f4a4e 100644
71277 --- a/mm/mlock.c
71278 +++ b/mm/mlock.c
71279 @@ -13,6 +13,7 @@
71280 #include <linux/pagemap.h>
71281 #include <linux/mempolicy.h>
71282 #include <linux/syscalls.h>
71283 +#include <linux/security.h>
71284 #include <linux/sched.h>
71285 #include <linux/module.h>
71286 #include <linux/rmap.h>
71287 @@ -377,6 +378,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
71288 return -EINVAL;
71289 if (end == start)
71290 return 0;
71291 + if (end > TASK_SIZE)
71292 + return -EINVAL;
71293 +
71294 vma = find_vma_prev(current->mm, start, &prev);
71295 if (!vma || vma->vm_start > start)
71296 return -ENOMEM;
71297 @@ -387,6 +391,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
71298 for (nstart = start ; ; ) {
71299 vm_flags_t newflags;
71300
71301 +#ifdef CONFIG_PAX_SEGMEXEC
71302 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
71303 + break;
71304 +#endif
71305 +
71306 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
71307
71308 newflags = vma->vm_flags | VM_LOCKED;
71309 @@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
71310 lock_limit >>= PAGE_SHIFT;
71311
71312 /* check against resource limits */
71313 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
71314 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
71315 error = do_mlock(start, len, 1);
71316 up_write(&current->mm->mmap_sem);
71317 @@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
71318 static int do_mlockall(int flags)
71319 {
71320 struct vm_area_struct * vma, * prev = NULL;
71321 - unsigned int def_flags = 0;
71322
71323 if (flags & MCL_FUTURE)
71324 - def_flags = VM_LOCKED;
71325 - current->mm->def_flags = def_flags;
71326 + current->mm->def_flags |= VM_LOCKED;
71327 + else
71328 + current->mm->def_flags &= ~VM_LOCKED;
71329 if (flags == MCL_FUTURE)
71330 goto out;
71331
71332 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
71333 vm_flags_t newflags;
71334
71335 +#ifdef CONFIG_PAX_SEGMEXEC
71336 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
71337 + break;
71338 +#endif
71339 +
71340 + BUG_ON(vma->vm_end > TASK_SIZE);
71341 newflags = vma->vm_flags | VM_LOCKED;
71342 if (!(flags & MCL_CURRENT))
71343 newflags &= ~VM_LOCKED;
71344 @@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
71345 lock_limit >>= PAGE_SHIFT;
71346
71347 ret = -ENOMEM;
71348 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
71349 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
71350 capable(CAP_IPC_LOCK))
71351 ret = do_mlockall(flags);
71352 diff --git a/mm/mmap.c b/mm/mmap.c
71353 index a65efd4..17d61ff 100644
71354 --- a/mm/mmap.c
71355 +++ b/mm/mmap.c
71356 @@ -46,6 +46,16 @@
71357 #define arch_rebalance_pgtables(addr, len) (addr)
71358 #endif
71359
71360 +static inline void verify_mm_writelocked(struct mm_struct *mm)
71361 +{
71362 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
71363 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71364 + up_read(&mm->mmap_sem);
71365 + BUG();
71366 + }
71367 +#endif
71368 +}
71369 +
71370 static void unmap_region(struct mm_struct *mm,
71371 struct vm_area_struct *vma, struct vm_area_struct *prev,
71372 unsigned long start, unsigned long end);
71373 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
71374 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
71375 *
71376 */
71377 -pgprot_t protection_map[16] = {
71378 +pgprot_t protection_map[16] __read_only = {
71379 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
71380 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
71381 };
71382
71383 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
71384 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
71385 {
71386 - return __pgprot(pgprot_val(protection_map[vm_flags &
71387 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
71388 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
71389 pgprot_val(arch_vm_get_page_prot(vm_flags)));
71390 +
71391 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71392 + if (!(__supported_pte_mask & _PAGE_NX) &&
71393 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
71394 + (vm_flags & (VM_READ | VM_WRITE)))
71395 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
71396 +#endif
71397 +
71398 + return prot;
71399 }
71400 EXPORT_SYMBOL(vm_get_page_prot);
71401
71402 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
71403 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
71404 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
71405 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
71406 /*
71407 * Make sure vm_committed_as in one cacheline and not cacheline shared with
71408 * other variables. It can be updated by several CPUs frequently.
71409 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
71410 struct vm_area_struct *next = vma->vm_next;
71411
71412 might_sleep();
71413 + BUG_ON(vma->vm_mirror);
71414 if (vma->vm_ops && vma->vm_ops->close)
71415 vma->vm_ops->close(vma);
71416 if (vma->vm_file) {
71417 @@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
71418 * not page aligned -Ram Gupta
71419 */
71420 rlim = rlimit(RLIMIT_DATA);
71421 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
71422 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
71423 (mm->end_data - mm->start_data) > rlim)
71424 goto out;
71425 @@ -689,6 +711,12 @@ static int
71426 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
71427 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71428 {
71429 +
71430 +#ifdef CONFIG_PAX_SEGMEXEC
71431 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
71432 + return 0;
71433 +#endif
71434 +
71435 if (is_mergeable_vma(vma, file, vm_flags) &&
71436 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
71437 if (vma->vm_pgoff == vm_pgoff)
71438 @@ -708,6 +736,12 @@ static int
71439 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
71440 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71441 {
71442 +
71443 +#ifdef CONFIG_PAX_SEGMEXEC
71444 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
71445 + return 0;
71446 +#endif
71447 +
71448 if (is_mergeable_vma(vma, file, vm_flags) &&
71449 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
71450 pgoff_t vm_pglen;
71451 @@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
71452 struct vm_area_struct *vma_merge(struct mm_struct *mm,
71453 struct vm_area_struct *prev, unsigned long addr,
71454 unsigned long end, unsigned long vm_flags,
71455 - struct anon_vma *anon_vma, struct file *file,
71456 + struct anon_vma *anon_vma, struct file *file,
71457 pgoff_t pgoff, struct mempolicy *policy)
71458 {
71459 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
71460 struct vm_area_struct *area, *next;
71461 int err;
71462
71463 +#ifdef CONFIG_PAX_SEGMEXEC
71464 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
71465 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
71466 +
71467 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
71468 +#endif
71469 +
71470 /*
71471 * We later require that vma->vm_flags == vm_flags,
71472 * so this tests vma->vm_flags & VM_SPECIAL, too.
71473 @@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71474 if (next && next->vm_end == end) /* cases 6, 7, 8 */
71475 next = next->vm_next;
71476
71477 +#ifdef CONFIG_PAX_SEGMEXEC
71478 + if (prev)
71479 + prev_m = pax_find_mirror_vma(prev);
71480 + if (area)
71481 + area_m = pax_find_mirror_vma(area);
71482 + if (next)
71483 + next_m = pax_find_mirror_vma(next);
71484 +#endif
71485 +
71486 /*
71487 * Can it merge with the predecessor?
71488 */
71489 @@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71490 /* cases 1, 6 */
71491 err = vma_adjust(prev, prev->vm_start,
71492 next->vm_end, prev->vm_pgoff, NULL);
71493 - } else /* cases 2, 5, 7 */
71494 +
71495 +#ifdef CONFIG_PAX_SEGMEXEC
71496 + if (!err && prev_m)
71497 + err = vma_adjust(prev_m, prev_m->vm_start,
71498 + next_m->vm_end, prev_m->vm_pgoff, NULL);
71499 +#endif
71500 +
71501 + } else { /* cases 2, 5, 7 */
71502 err = vma_adjust(prev, prev->vm_start,
71503 end, prev->vm_pgoff, NULL);
71504 +
71505 +#ifdef CONFIG_PAX_SEGMEXEC
71506 + if (!err && prev_m)
71507 + err = vma_adjust(prev_m, prev_m->vm_start,
71508 + end_m, prev_m->vm_pgoff, NULL);
71509 +#endif
71510 +
71511 + }
71512 if (err)
71513 return NULL;
71514 khugepaged_enter_vma_merge(prev);
71515 @@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71516 mpol_equal(policy, vma_policy(next)) &&
71517 can_vma_merge_before(next, vm_flags,
71518 anon_vma, file, pgoff+pglen)) {
71519 - if (prev && addr < prev->vm_end) /* case 4 */
71520 + if (prev && addr < prev->vm_end) { /* case 4 */
71521 err = vma_adjust(prev, prev->vm_start,
71522 addr, prev->vm_pgoff, NULL);
71523 - else /* cases 3, 8 */
71524 +
71525 +#ifdef CONFIG_PAX_SEGMEXEC
71526 + if (!err && prev_m)
71527 + err = vma_adjust(prev_m, prev_m->vm_start,
71528 + addr_m, prev_m->vm_pgoff, NULL);
71529 +#endif
71530 +
71531 + } else { /* cases 3, 8 */
71532 err = vma_adjust(area, addr, next->vm_end,
71533 next->vm_pgoff - pglen, NULL);
71534 +
71535 +#ifdef CONFIG_PAX_SEGMEXEC
71536 + if (!err && area_m)
71537 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
71538 + next_m->vm_pgoff - pglen, NULL);
71539 +#endif
71540 +
71541 + }
71542 if (err)
71543 return NULL;
71544 khugepaged_enter_vma_merge(area);
71545 @@ -921,14 +1001,11 @@ none:
71546 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
71547 struct file *file, long pages)
71548 {
71549 - const unsigned long stack_flags
71550 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
71551 -
71552 if (file) {
71553 mm->shared_vm += pages;
71554 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
71555 mm->exec_vm += pages;
71556 - } else if (flags & stack_flags)
71557 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
71558 mm->stack_vm += pages;
71559 if (flags & (VM_RESERVED|VM_IO))
71560 mm->reserved_vm += pages;
71561 @@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71562 * (the exception is when the underlying filesystem is noexec
71563 * mounted, in which case we dont add PROT_EXEC.)
71564 */
71565 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
71566 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
71567 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
71568 prot |= PROT_EXEC;
71569
71570 @@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71571 /* Obtain the address to map to. we verify (or select) it and ensure
71572 * that it represents a valid section of the address space.
71573 */
71574 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
71575 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
71576 if (addr & ~PAGE_MASK)
71577 return addr;
71578
71579 @@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71580 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
71581 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
71582
71583 +#ifdef CONFIG_PAX_MPROTECT
71584 + if (mm->pax_flags & MF_PAX_MPROTECT) {
71585 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
71586 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
71587 + gr_log_rwxmmap(file);
71588 +
71589 +#ifdef CONFIG_PAX_EMUPLT
71590 + vm_flags &= ~VM_EXEC;
71591 +#else
71592 + return -EPERM;
71593 +#endif
71594 +
71595 + }
71596 +
71597 + if (!(vm_flags & VM_EXEC))
71598 + vm_flags &= ~VM_MAYEXEC;
71599 +#else
71600 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
71601 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
71602 +#endif
71603 + else
71604 + vm_flags &= ~VM_MAYWRITE;
71605 + }
71606 +#endif
71607 +
71608 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71609 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
71610 + vm_flags &= ~VM_PAGEEXEC;
71611 +#endif
71612 +
71613 if (flags & MAP_LOCKED)
71614 if (!can_do_mlock())
71615 return -EPERM;
71616 @@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71617 locked += mm->locked_vm;
71618 lock_limit = rlimit(RLIMIT_MEMLOCK);
71619 lock_limit >>= PAGE_SHIFT;
71620 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71621 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
71622 return -EAGAIN;
71623 }
71624 @@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71625 if (error)
71626 return error;
71627
71628 + if (!gr_acl_handle_mmap(file, prot))
71629 + return -EACCES;
71630 +
71631 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
71632 }
71633 EXPORT_SYMBOL(do_mmap_pgoff);
71634 @@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
71635 vm_flags_t vm_flags = vma->vm_flags;
71636
71637 /* If it was private or non-writable, the write bit is already clear */
71638 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
71639 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
71640 return 0;
71641
71642 /* The backer wishes to know when pages are first written to? */
71643 @@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
71644 unsigned long charged = 0;
71645 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
71646
71647 +#ifdef CONFIG_PAX_SEGMEXEC
71648 + struct vm_area_struct *vma_m = NULL;
71649 +#endif
71650 +
71651 + /*
71652 + * mm->mmap_sem is required to protect against another thread
71653 + * changing the mappings in case we sleep.
71654 + */
71655 + verify_mm_writelocked(mm);
71656 +
71657 /* Clear old maps */
71658 error = -ENOMEM;
71659 -munmap_back:
71660 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71661 if (vma && vma->vm_start < addr + len) {
71662 if (do_munmap(mm, addr, len))
71663 return -ENOMEM;
71664 - goto munmap_back;
71665 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71666 + BUG_ON(vma && vma->vm_start < addr + len);
71667 }
71668
71669 /* Check against address space limit. */
71670 @@ -1258,6 +1379,16 @@ munmap_back:
71671 goto unacct_error;
71672 }
71673
71674 +#ifdef CONFIG_PAX_SEGMEXEC
71675 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
71676 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71677 + if (!vma_m) {
71678 + error = -ENOMEM;
71679 + goto free_vma;
71680 + }
71681 + }
71682 +#endif
71683 +
71684 vma->vm_mm = mm;
71685 vma->vm_start = addr;
71686 vma->vm_end = addr + len;
71687 @@ -1281,6 +1412,19 @@ munmap_back:
71688 error = file->f_op->mmap(file, vma);
71689 if (error)
71690 goto unmap_and_free_vma;
71691 +
71692 +#ifdef CONFIG_PAX_SEGMEXEC
71693 + if (vma_m && (vm_flags & VM_EXECUTABLE))
71694 + added_exe_file_vma(mm);
71695 +#endif
71696 +
71697 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71698 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
71699 + vma->vm_flags |= VM_PAGEEXEC;
71700 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71701 + }
71702 +#endif
71703 +
71704 if (vm_flags & VM_EXECUTABLE)
71705 added_exe_file_vma(mm);
71706
71707 @@ -1316,6 +1460,11 @@ munmap_back:
71708 vma_link(mm, vma, prev, rb_link, rb_parent);
71709 file = vma->vm_file;
71710
71711 +#ifdef CONFIG_PAX_SEGMEXEC
71712 + if (vma_m)
71713 + BUG_ON(pax_mirror_vma(vma_m, vma));
71714 +#endif
71715 +
71716 /* Once vma denies write, undo our temporary denial count */
71717 if (correct_wcount)
71718 atomic_inc(&inode->i_writecount);
71719 @@ -1324,6 +1473,7 @@ out:
71720
71721 mm->total_vm += len >> PAGE_SHIFT;
71722 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
71723 + track_exec_limit(mm, addr, addr + len, vm_flags);
71724 if (vm_flags & VM_LOCKED) {
71725 if (!mlock_vma_pages_range(vma, addr, addr + len))
71726 mm->locked_vm += (len >> PAGE_SHIFT);
71727 @@ -1341,6 +1491,12 @@ unmap_and_free_vma:
71728 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
71729 charged = 0;
71730 free_vma:
71731 +
71732 +#ifdef CONFIG_PAX_SEGMEXEC
71733 + if (vma_m)
71734 + kmem_cache_free(vm_area_cachep, vma_m);
71735 +#endif
71736 +
71737 kmem_cache_free(vm_area_cachep, vma);
71738 unacct_error:
71739 if (charged)
71740 @@ -1348,6 +1504,44 @@ unacct_error:
71741 return error;
71742 }
71743
71744 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
71745 +{
71746 + if (!vma) {
71747 +#ifdef CONFIG_STACK_GROWSUP
71748 + if (addr > sysctl_heap_stack_gap)
71749 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
71750 + else
71751 + vma = find_vma(current->mm, 0);
71752 + if (vma && (vma->vm_flags & VM_GROWSUP))
71753 + return false;
71754 +#endif
71755 + return true;
71756 + }
71757 +
71758 + if (addr + len > vma->vm_start)
71759 + return false;
71760 +
71761 + if (vma->vm_flags & VM_GROWSDOWN)
71762 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
71763 +#ifdef CONFIG_STACK_GROWSUP
71764 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
71765 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
71766 +#endif
71767 +
71768 + return true;
71769 +}
71770 +
71771 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
71772 +{
71773 + if (vma->vm_start < len)
71774 + return -ENOMEM;
71775 + if (!(vma->vm_flags & VM_GROWSDOWN))
71776 + return vma->vm_start - len;
71777 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
71778 + return vma->vm_start - len - sysctl_heap_stack_gap;
71779 + return -ENOMEM;
71780 +}
71781 +
71782 /* Get an address range which is currently unmapped.
71783 * For shmat() with addr=0.
71784 *
71785 @@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
71786 if (flags & MAP_FIXED)
71787 return addr;
71788
71789 +#ifdef CONFIG_PAX_RANDMMAP
71790 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71791 +#endif
71792 +
71793 if (addr) {
71794 addr = PAGE_ALIGN(addr);
71795 - vma = find_vma(mm, addr);
71796 - if (TASK_SIZE - len >= addr &&
71797 - (!vma || addr + len <= vma->vm_start))
71798 - return addr;
71799 + if (TASK_SIZE - len >= addr) {
71800 + vma = find_vma(mm, addr);
71801 + if (check_heap_stack_gap(vma, addr, len))
71802 + return addr;
71803 + }
71804 }
71805 if (len > mm->cached_hole_size) {
71806 - start_addr = addr = mm->free_area_cache;
71807 + start_addr = addr = mm->free_area_cache;
71808 } else {
71809 - start_addr = addr = TASK_UNMAPPED_BASE;
71810 - mm->cached_hole_size = 0;
71811 + start_addr = addr = mm->mmap_base;
71812 + mm->cached_hole_size = 0;
71813 }
71814
71815 full_search:
71816 @@ -1396,34 +1595,40 @@ full_search:
71817 * Start a new search - just in case we missed
71818 * some holes.
71819 */
71820 - if (start_addr != TASK_UNMAPPED_BASE) {
71821 - addr = TASK_UNMAPPED_BASE;
71822 - start_addr = addr;
71823 + if (start_addr != mm->mmap_base) {
71824 + start_addr = addr = mm->mmap_base;
71825 mm->cached_hole_size = 0;
71826 goto full_search;
71827 }
71828 return -ENOMEM;
71829 }
71830 - if (!vma || addr + len <= vma->vm_start) {
71831 - /*
71832 - * Remember the place where we stopped the search:
71833 - */
71834 - mm->free_area_cache = addr + len;
71835 - return addr;
71836 - }
71837 + if (check_heap_stack_gap(vma, addr, len))
71838 + break;
71839 if (addr + mm->cached_hole_size < vma->vm_start)
71840 mm->cached_hole_size = vma->vm_start - addr;
71841 addr = vma->vm_end;
71842 }
71843 +
71844 + /*
71845 + * Remember the place where we stopped the search:
71846 + */
71847 + mm->free_area_cache = addr + len;
71848 + return addr;
71849 }
71850 #endif
71851
71852 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
71853 {
71854 +
71855 +#ifdef CONFIG_PAX_SEGMEXEC
71856 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71857 + return;
71858 +#endif
71859 +
71860 /*
71861 * Is this a new hole at the lowest possible address?
71862 */
71863 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
71864 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
71865 mm->free_area_cache = addr;
71866 mm->cached_hole_size = ~0UL;
71867 }
71868 @@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71869 {
71870 struct vm_area_struct *vma;
71871 struct mm_struct *mm = current->mm;
71872 - unsigned long addr = addr0;
71873 + unsigned long base = mm->mmap_base, addr = addr0;
71874
71875 /* requested length too big for entire address space */
71876 if (len > TASK_SIZE)
71877 @@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71878 if (flags & MAP_FIXED)
71879 return addr;
71880
71881 +#ifdef CONFIG_PAX_RANDMMAP
71882 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71883 +#endif
71884 +
71885 /* requesting a specific address */
71886 if (addr) {
71887 addr = PAGE_ALIGN(addr);
71888 - vma = find_vma(mm, addr);
71889 - if (TASK_SIZE - len >= addr &&
71890 - (!vma || addr + len <= vma->vm_start))
71891 - return addr;
71892 + if (TASK_SIZE - len >= addr) {
71893 + vma = find_vma(mm, addr);
71894 + if (check_heap_stack_gap(vma, addr, len))
71895 + return addr;
71896 + }
71897 }
71898
71899 /* check if free_area_cache is useful for us */
71900 @@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71901 /* make sure it can fit in the remaining address space */
71902 if (addr > len) {
71903 vma = find_vma(mm, addr-len);
71904 - if (!vma || addr <= vma->vm_start)
71905 + if (check_heap_stack_gap(vma, addr - len, len))
71906 /* remember the address as a hint for next time */
71907 return (mm->free_area_cache = addr-len);
71908 }
71909 @@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71910 * return with success:
71911 */
71912 vma = find_vma(mm, addr);
71913 - if (!vma || addr+len <= vma->vm_start)
71914 + if (check_heap_stack_gap(vma, addr, len))
71915 /* remember the address as a hint for next time */
71916 return (mm->free_area_cache = addr);
71917
71918 @@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71919 mm->cached_hole_size = vma->vm_start - addr;
71920
71921 /* try just below the current vma->vm_start */
71922 - addr = vma->vm_start-len;
71923 - } while (len < vma->vm_start);
71924 + addr = skip_heap_stack_gap(vma, len);
71925 + } while (!IS_ERR_VALUE(addr));
71926
71927 bottomup:
71928 /*
71929 @@ -1507,13 +1717,21 @@ bottomup:
71930 * can happen with large stack limits and large mmap()
71931 * allocations.
71932 */
71933 + mm->mmap_base = TASK_UNMAPPED_BASE;
71934 +
71935 +#ifdef CONFIG_PAX_RANDMMAP
71936 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71937 + mm->mmap_base += mm->delta_mmap;
71938 +#endif
71939 +
71940 + mm->free_area_cache = mm->mmap_base;
71941 mm->cached_hole_size = ~0UL;
71942 - mm->free_area_cache = TASK_UNMAPPED_BASE;
71943 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
71944 /*
71945 * Restore the topdown base:
71946 */
71947 - mm->free_area_cache = mm->mmap_base;
71948 + mm->mmap_base = base;
71949 + mm->free_area_cache = base;
71950 mm->cached_hole_size = ~0UL;
71951
71952 return addr;
71953 @@ -1522,6 +1740,12 @@ bottomup:
71954
71955 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71956 {
71957 +
71958 +#ifdef CONFIG_PAX_SEGMEXEC
71959 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71960 + return;
71961 +#endif
71962 +
71963 /*
71964 * Is this a new hole at the highest possible address?
71965 */
71966 @@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71967 mm->free_area_cache = addr;
71968
71969 /* dont allow allocations above current base */
71970 - if (mm->free_area_cache > mm->mmap_base)
71971 + if (mm->free_area_cache > mm->mmap_base) {
71972 mm->free_area_cache = mm->mmap_base;
71973 + mm->cached_hole_size = ~0UL;
71974 + }
71975 }
71976
71977 unsigned long
71978 @@ -1638,6 +1864,28 @@ out:
71979 return prev ? prev->vm_next : vma;
71980 }
71981
71982 +#ifdef CONFIG_PAX_SEGMEXEC
71983 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
71984 +{
71985 + struct vm_area_struct *vma_m;
71986 +
71987 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
71988 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
71989 + BUG_ON(vma->vm_mirror);
71990 + return NULL;
71991 + }
71992 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
71993 + vma_m = vma->vm_mirror;
71994 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
71995 + BUG_ON(vma->vm_file != vma_m->vm_file);
71996 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
71997 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
71998 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
71999 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
72000 + return vma_m;
72001 +}
72002 +#endif
72003 +
72004 /*
72005 * Verify that the stack growth is acceptable and
72006 * update accounting. This is shared with both the
72007 @@ -1654,6 +1902,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
72008 return -ENOMEM;
72009
72010 /* Stack limit test */
72011 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
72012 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
72013 return -ENOMEM;
72014
72015 @@ -1664,6 +1913,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
72016 locked = mm->locked_vm + grow;
72017 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
72018 limit >>= PAGE_SHIFT;
72019 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
72020 if (locked > limit && !capable(CAP_IPC_LOCK))
72021 return -ENOMEM;
72022 }
72023 @@ -1694,37 +1944,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
72024 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
72025 * vma is the last one with address > vma->vm_end. Have to extend vma.
72026 */
72027 +#ifndef CONFIG_IA64
72028 +static
72029 +#endif
72030 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
72031 {
72032 int error;
72033 + bool locknext;
72034
72035 if (!(vma->vm_flags & VM_GROWSUP))
72036 return -EFAULT;
72037
72038 + /* Also guard against wrapping around to address 0. */
72039 + if (address < PAGE_ALIGN(address+1))
72040 + address = PAGE_ALIGN(address+1);
72041 + else
72042 + return -ENOMEM;
72043 +
72044 /*
72045 * We must make sure the anon_vma is allocated
72046 * so that the anon_vma locking is not a noop.
72047 */
72048 if (unlikely(anon_vma_prepare(vma)))
72049 return -ENOMEM;
72050 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
72051 + if (locknext && anon_vma_prepare(vma->vm_next))
72052 + return -ENOMEM;
72053 vma_lock_anon_vma(vma);
72054 + if (locknext)
72055 + vma_lock_anon_vma(vma->vm_next);
72056
72057 /*
72058 * vma->vm_start/vm_end cannot change under us because the caller
72059 * is required to hold the mmap_sem in read mode. We need the
72060 - * anon_vma lock to serialize against concurrent expand_stacks.
72061 - * Also guard against wrapping around to address 0.
72062 + * anon_vma locks to serialize against concurrent expand_stacks
72063 + * and expand_upwards.
72064 */
72065 - if (address < PAGE_ALIGN(address+4))
72066 - address = PAGE_ALIGN(address+4);
72067 - else {
72068 - vma_unlock_anon_vma(vma);
72069 - return -ENOMEM;
72070 - }
72071 error = 0;
72072
72073 /* Somebody else might have raced and expanded it already */
72074 - if (address > vma->vm_end) {
72075 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
72076 + error = -ENOMEM;
72077 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
72078 unsigned long size, grow;
72079
72080 size = address - vma->vm_start;
72081 @@ -1739,6 +2000,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
72082 }
72083 }
72084 }
72085 + if (locknext)
72086 + vma_unlock_anon_vma(vma->vm_next);
72087 vma_unlock_anon_vma(vma);
72088 khugepaged_enter_vma_merge(vma);
72089 return error;
72090 @@ -1752,6 +2015,8 @@ int expand_downwards(struct vm_area_struct *vma,
72091 unsigned long address)
72092 {
72093 int error;
72094 + bool lockprev = false;
72095 + struct vm_area_struct *prev;
72096
72097 /*
72098 * We must make sure the anon_vma is allocated
72099 @@ -1765,6 +2030,15 @@ int expand_downwards(struct vm_area_struct *vma,
72100 if (error)
72101 return error;
72102
72103 + prev = vma->vm_prev;
72104 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
72105 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
72106 +#endif
72107 + if (lockprev && anon_vma_prepare(prev))
72108 + return -ENOMEM;
72109 + if (lockprev)
72110 + vma_lock_anon_vma(prev);
72111 +
72112 vma_lock_anon_vma(vma);
72113
72114 /*
72115 @@ -1774,9 +2048,17 @@ int expand_downwards(struct vm_area_struct *vma,
72116 */
72117
72118 /* Somebody else might have raced and expanded it already */
72119 - if (address < vma->vm_start) {
72120 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
72121 + error = -ENOMEM;
72122 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
72123 unsigned long size, grow;
72124
72125 +#ifdef CONFIG_PAX_SEGMEXEC
72126 + struct vm_area_struct *vma_m;
72127 +
72128 + vma_m = pax_find_mirror_vma(vma);
72129 +#endif
72130 +
72131 size = vma->vm_end - address;
72132 grow = (vma->vm_start - address) >> PAGE_SHIFT;
72133
72134 @@ -1786,11 +2068,22 @@ int expand_downwards(struct vm_area_struct *vma,
72135 if (!error) {
72136 vma->vm_start = address;
72137 vma->vm_pgoff -= grow;
72138 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
72139 +
72140 +#ifdef CONFIG_PAX_SEGMEXEC
72141 + if (vma_m) {
72142 + vma_m->vm_start -= grow << PAGE_SHIFT;
72143 + vma_m->vm_pgoff -= grow;
72144 + }
72145 +#endif
72146 +
72147 perf_event_mmap(vma);
72148 }
72149 }
72150 }
72151 vma_unlock_anon_vma(vma);
72152 + if (lockprev)
72153 + vma_unlock_anon_vma(prev);
72154 khugepaged_enter_vma_merge(vma);
72155 return error;
72156 }
72157 @@ -1860,6 +2153,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
72158 do {
72159 long nrpages = vma_pages(vma);
72160
72161 +#ifdef CONFIG_PAX_SEGMEXEC
72162 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
72163 + vma = remove_vma(vma);
72164 + continue;
72165 + }
72166 +#endif
72167 +
72168 mm->total_vm -= nrpages;
72169 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
72170 vma = remove_vma(vma);
72171 @@ -1905,6 +2205,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
72172 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
72173 vma->vm_prev = NULL;
72174 do {
72175 +
72176 +#ifdef CONFIG_PAX_SEGMEXEC
72177 + if (vma->vm_mirror) {
72178 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
72179 + vma->vm_mirror->vm_mirror = NULL;
72180 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
72181 + vma->vm_mirror = NULL;
72182 + }
72183 +#endif
72184 +
72185 rb_erase(&vma->vm_rb, &mm->mm_rb);
72186 mm->map_count--;
72187 tail_vma = vma;
72188 @@ -1933,14 +2243,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72189 struct vm_area_struct *new;
72190 int err = -ENOMEM;
72191
72192 +#ifdef CONFIG_PAX_SEGMEXEC
72193 + struct vm_area_struct *vma_m, *new_m = NULL;
72194 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
72195 +#endif
72196 +
72197 if (is_vm_hugetlb_page(vma) && (addr &
72198 ~(huge_page_mask(hstate_vma(vma)))))
72199 return -EINVAL;
72200
72201 +#ifdef CONFIG_PAX_SEGMEXEC
72202 + vma_m = pax_find_mirror_vma(vma);
72203 +#endif
72204 +
72205 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
72206 if (!new)
72207 goto out_err;
72208
72209 +#ifdef CONFIG_PAX_SEGMEXEC
72210 + if (vma_m) {
72211 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
72212 + if (!new_m) {
72213 + kmem_cache_free(vm_area_cachep, new);
72214 + goto out_err;
72215 + }
72216 + }
72217 +#endif
72218 +
72219 /* most fields are the same, copy all, and then fixup */
72220 *new = *vma;
72221
72222 @@ -1953,6 +2282,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72223 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
72224 }
72225
72226 +#ifdef CONFIG_PAX_SEGMEXEC
72227 + if (vma_m) {
72228 + *new_m = *vma_m;
72229 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
72230 + new_m->vm_mirror = new;
72231 + new->vm_mirror = new_m;
72232 +
72233 + if (new_below)
72234 + new_m->vm_end = addr_m;
72235 + else {
72236 + new_m->vm_start = addr_m;
72237 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
72238 + }
72239 + }
72240 +#endif
72241 +
72242 pol = mpol_dup(vma_policy(vma));
72243 if (IS_ERR(pol)) {
72244 err = PTR_ERR(pol);
72245 @@ -1978,6 +2323,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72246 else
72247 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
72248
72249 +#ifdef CONFIG_PAX_SEGMEXEC
72250 + if (!err && vma_m) {
72251 + if (anon_vma_clone(new_m, vma_m))
72252 + goto out_free_mpol;
72253 +
72254 + mpol_get(pol);
72255 + vma_set_policy(new_m, pol);
72256 +
72257 + if (new_m->vm_file) {
72258 + get_file(new_m->vm_file);
72259 + if (vma_m->vm_flags & VM_EXECUTABLE)
72260 + added_exe_file_vma(mm);
72261 + }
72262 +
72263 + if (new_m->vm_ops && new_m->vm_ops->open)
72264 + new_m->vm_ops->open(new_m);
72265 +
72266 + if (new_below)
72267 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
72268 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
72269 + else
72270 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
72271 +
72272 + if (err) {
72273 + if (new_m->vm_ops && new_m->vm_ops->close)
72274 + new_m->vm_ops->close(new_m);
72275 + if (new_m->vm_file) {
72276 + if (vma_m->vm_flags & VM_EXECUTABLE)
72277 + removed_exe_file_vma(mm);
72278 + fput(new_m->vm_file);
72279 + }
72280 + mpol_put(pol);
72281 + }
72282 + }
72283 +#endif
72284 +
72285 /* Success. */
72286 if (!err)
72287 return 0;
72288 @@ -1990,10 +2371,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72289 removed_exe_file_vma(mm);
72290 fput(new->vm_file);
72291 }
72292 - unlink_anon_vmas(new);
72293 out_free_mpol:
72294 mpol_put(pol);
72295 out_free_vma:
72296 +
72297 +#ifdef CONFIG_PAX_SEGMEXEC
72298 + if (new_m) {
72299 + unlink_anon_vmas(new_m);
72300 + kmem_cache_free(vm_area_cachep, new_m);
72301 + }
72302 +#endif
72303 +
72304 + unlink_anon_vmas(new);
72305 kmem_cache_free(vm_area_cachep, new);
72306 out_err:
72307 return err;
72308 @@ -2006,6 +2395,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72309 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72310 unsigned long addr, int new_below)
72311 {
72312 +
72313 +#ifdef CONFIG_PAX_SEGMEXEC
72314 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
72315 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
72316 + if (mm->map_count >= sysctl_max_map_count-1)
72317 + return -ENOMEM;
72318 + } else
72319 +#endif
72320 +
72321 if (mm->map_count >= sysctl_max_map_count)
72322 return -ENOMEM;
72323
72324 @@ -2017,11 +2415,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72325 * work. This now handles partial unmappings.
72326 * Jeremy Fitzhardinge <jeremy@goop.org>
72327 */
72328 +#ifdef CONFIG_PAX_SEGMEXEC
72329 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72330 {
72331 + int ret = __do_munmap(mm, start, len);
72332 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
72333 + return ret;
72334 +
72335 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
72336 +}
72337 +
72338 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72339 +#else
72340 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72341 +#endif
72342 +{
72343 unsigned long end;
72344 struct vm_area_struct *vma, *prev, *last;
72345
72346 + /*
72347 + * mm->mmap_sem is required to protect against another thread
72348 + * changing the mappings in case we sleep.
72349 + */
72350 + verify_mm_writelocked(mm);
72351 +
72352 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
72353 return -EINVAL;
72354
72355 @@ -2096,6 +2513,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72356 /* Fix up all other VM information */
72357 remove_vma_list(mm, vma);
72358
72359 + track_exec_limit(mm, start, end, 0UL);
72360 +
72361 return 0;
72362 }
72363
72364 @@ -2108,22 +2527,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
72365
72366 profile_munmap(addr);
72367
72368 +#ifdef CONFIG_PAX_SEGMEXEC
72369 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
72370 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
72371 + return -EINVAL;
72372 +#endif
72373 +
72374 down_write(&mm->mmap_sem);
72375 ret = do_munmap(mm, addr, len);
72376 up_write(&mm->mmap_sem);
72377 return ret;
72378 }
72379
72380 -static inline void verify_mm_writelocked(struct mm_struct *mm)
72381 -{
72382 -#ifdef CONFIG_DEBUG_VM
72383 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
72384 - WARN_ON(1);
72385 - up_read(&mm->mmap_sem);
72386 - }
72387 -#endif
72388 -}
72389 -
72390 /*
72391 * this is really a simplified "do_mmap". it only handles
72392 * anonymous maps. eventually we may be able to do some
72393 @@ -2137,6 +2552,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72394 struct rb_node ** rb_link, * rb_parent;
72395 pgoff_t pgoff = addr >> PAGE_SHIFT;
72396 int error;
72397 + unsigned long charged;
72398
72399 len = PAGE_ALIGN(len);
72400 if (!len)
72401 @@ -2148,16 +2564,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72402
72403 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
72404
72405 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
72406 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
72407 + flags &= ~VM_EXEC;
72408 +
72409 +#ifdef CONFIG_PAX_MPROTECT
72410 + if (mm->pax_flags & MF_PAX_MPROTECT)
72411 + flags &= ~VM_MAYEXEC;
72412 +#endif
72413 +
72414 + }
72415 +#endif
72416 +
72417 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
72418 if (error & ~PAGE_MASK)
72419 return error;
72420
72421 + charged = len >> PAGE_SHIFT;
72422 +
72423 /*
72424 * mlock MCL_FUTURE?
72425 */
72426 if (mm->def_flags & VM_LOCKED) {
72427 unsigned long locked, lock_limit;
72428 - locked = len >> PAGE_SHIFT;
72429 + locked = charged;
72430 locked += mm->locked_vm;
72431 lock_limit = rlimit(RLIMIT_MEMLOCK);
72432 lock_limit >>= PAGE_SHIFT;
72433 @@ -2174,22 +2604,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72434 /*
72435 * Clear old maps. this also does some error checking for us
72436 */
72437 - munmap_back:
72438 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72439 if (vma && vma->vm_start < addr + len) {
72440 if (do_munmap(mm, addr, len))
72441 return -ENOMEM;
72442 - goto munmap_back;
72443 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72444 + BUG_ON(vma && vma->vm_start < addr + len);
72445 }
72446
72447 /* Check against address space limits *after* clearing old maps... */
72448 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
72449 + if (!may_expand_vm(mm, charged))
72450 return -ENOMEM;
72451
72452 if (mm->map_count > sysctl_max_map_count)
72453 return -ENOMEM;
72454
72455 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
72456 + if (security_vm_enough_memory(charged))
72457 return -ENOMEM;
72458
72459 /* Can we just expand an old private anonymous mapping? */
72460 @@ -2203,7 +2633,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72461 */
72462 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72463 if (!vma) {
72464 - vm_unacct_memory(len >> PAGE_SHIFT);
72465 + vm_unacct_memory(charged);
72466 return -ENOMEM;
72467 }
72468
72469 @@ -2217,11 +2647,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72470 vma_link(mm, vma, prev, rb_link, rb_parent);
72471 out:
72472 perf_event_mmap(vma);
72473 - mm->total_vm += len >> PAGE_SHIFT;
72474 + mm->total_vm += charged;
72475 if (flags & VM_LOCKED) {
72476 if (!mlock_vma_pages_range(vma, addr, addr + len))
72477 - mm->locked_vm += (len >> PAGE_SHIFT);
72478 + mm->locked_vm += charged;
72479 }
72480 + track_exec_limit(mm, addr, addr + len, flags);
72481 return addr;
72482 }
72483
72484 @@ -2268,8 +2699,10 @@ void exit_mmap(struct mm_struct *mm)
72485 * Walk the list again, actually closing and freeing it,
72486 * with preemption enabled, without holding any MM locks.
72487 */
72488 - while (vma)
72489 + while (vma) {
72490 + vma->vm_mirror = NULL;
72491 vma = remove_vma(vma);
72492 + }
72493
72494 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
72495 }
72496 @@ -2283,6 +2716,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
72497 struct vm_area_struct * __vma, * prev;
72498 struct rb_node ** rb_link, * rb_parent;
72499
72500 +#ifdef CONFIG_PAX_SEGMEXEC
72501 + struct vm_area_struct *vma_m = NULL;
72502 +#endif
72503 +
72504 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
72505 + return -EPERM;
72506 +
72507 /*
72508 * The vm_pgoff of a purely anonymous vma should be irrelevant
72509 * until its first write fault, when page's anon_vma and index
72510 @@ -2305,7 +2745,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
72511 if ((vma->vm_flags & VM_ACCOUNT) &&
72512 security_vm_enough_memory_mm(mm, vma_pages(vma)))
72513 return -ENOMEM;
72514 +
72515 +#ifdef CONFIG_PAX_SEGMEXEC
72516 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
72517 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72518 + if (!vma_m)
72519 + return -ENOMEM;
72520 + }
72521 +#endif
72522 +
72523 vma_link(mm, vma, prev, rb_link, rb_parent);
72524 +
72525 +#ifdef CONFIG_PAX_SEGMEXEC
72526 + if (vma_m)
72527 + BUG_ON(pax_mirror_vma(vma_m, vma));
72528 +#endif
72529 +
72530 return 0;
72531 }
72532
72533 @@ -2323,6 +2778,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
72534 struct rb_node **rb_link, *rb_parent;
72535 struct mempolicy *pol;
72536
72537 + BUG_ON(vma->vm_mirror);
72538 +
72539 /*
72540 * If anonymous vma has not yet been faulted, update new pgoff
72541 * to match new location, to increase its chance of merging.
72542 @@ -2373,6 +2830,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
72543 return NULL;
72544 }
72545
72546 +#ifdef CONFIG_PAX_SEGMEXEC
72547 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
72548 +{
72549 + struct vm_area_struct *prev_m;
72550 + struct rb_node **rb_link_m, *rb_parent_m;
72551 + struct mempolicy *pol_m;
72552 +
72553 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
72554 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
72555 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
72556 + *vma_m = *vma;
72557 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
72558 + if (anon_vma_clone(vma_m, vma))
72559 + return -ENOMEM;
72560 + pol_m = vma_policy(vma_m);
72561 + mpol_get(pol_m);
72562 + vma_set_policy(vma_m, pol_m);
72563 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
72564 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
72565 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
72566 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
72567 + if (vma_m->vm_file)
72568 + get_file(vma_m->vm_file);
72569 + if (vma_m->vm_ops && vma_m->vm_ops->open)
72570 + vma_m->vm_ops->open(vma_m);
72571 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
72572 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
72573 + vma_m->vm_mirror = vma;
72574 + vma->vm_mirror = vma_m;
72575 + return 0;
72576 +}
72577 +#endif
72578 +
72579 /*
72580 * Return true if the calling process may expand its vm space by the passed
72581 * number of pages
72582 @@ -2383,7 +2873,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
72583 unsigned long lim;
72584
72585 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
72586 -
72587 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
72588 if (cur + npages > lim)
72589 return 0;
72590 return 1;
72591 @@ -2454,6 +2944,22 @@ int install_special_mapping(struct mm_struct *mm,
72592 vma->vm_start = addr;
72593 vma->vm_end = addr + len;
72594
72595 +#ifdef CONFIG_PAX_MPROTECT
72596 + if (mm->pax_flags & MF_PAX_MPROTECT) {
72597 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
72598 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
72599 + return -EPERM;
72600 + if (!(vm_flags & VM_EXEC))
72601 + vm_flags &= ~VM_MAYEXEC;
72602 +#else
72603 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
72604 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
72605 +#endif
72606 + else
72607 + vm_flags &= ~VM_MAYWRITE;
72608 + }
72609 +#endif
72610 +
72611 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
72612 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
72613
72614 diff --git a/mm/mprotect.c b/mm/mprotect.c
72615 index 5a688a2..27e031c 100644
72616 --- a/mm/mprotect.c
72617 +++ b/mm/mprotect.c
72618 @@ -23,10 +23,16 @@
72619 #include <linux/mmu_notifier.h>
72620 #include <linux/migrate.h>
72621 #include <linux/perf_event.h>
72622 +
72623 +#ifdef CONFIG_PAX_MPROTECT
72624 +#include <linux/elf.h>
72625 +#endif
72626 +
72627 #include <asm/uaccess.h>
72628 #include <asm/pgtable.h>
72629 #include <asm/cacheflush.h>
72630 #include <asm/tlbflush.h>
72631 +#include <asm/mmu_context.h>
72632
72633 #ifndef pgprot_modify
72634 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
72635 @@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
72636 flush_tlb_range(vma, start, end);
72637 }
72638
72639 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72640 +/* called while holding the mmap semaphor for writing except stack expansion */
72641 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
72642 +{
72643 + unsigned long oldlimit, newlimit = 0UL;
72644 +
72645 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
72646 + return;
72647 +
72648 + spin_lock(&mm->page_table_lock);
72649 + oldlimit = mm->context.user_cs_limit;
72650 + if ((prot & VM_EXEC) && oldlimit < end)
72651 + /* USER_CS limit moved up */
72652 + newlimit = end;
72653 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
72654 + /* USER_CS limit moved down */
72655 + newlimit = start;
72656 +
72657 + if (newlimit) {
72658 + mm->context.user_cs_limit = newlimit;
72659 +
72660 +#ifdef CONFIG_SMP
72661 + wmb();
72662 + cpus_clear(mm->context.cpu_user_cs_mask);
72663 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
72664 +#endif
72665 +
72666 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
72667 + }
72668 + spin_unlock(&mm->page_table_lock);
72669 + if (newlimit == end) {
72670 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
72671 +
72672 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
72673 + if (is_vm_hugetlb_page(vma))
72674 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
72675 + else
72676 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
72677 + }
72678 +}
72679 +#endif
72680 +
72681 int
72682 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72683 unsigned long start, unsigned long end, unsigned long newflags)
72684 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72685 int error;
72686 int dirty_accountable = 0;
72687
72688 +#ifdef CONFIG_PAX_SEGMEXEC
72689 + struct vm_area_struct *vma_m = NULL;
72690 + unsigned long start_m, end_m;
72691 +
72692 + start_m = start + SEGMEXEC_TASK_SIZE;
72693 + end_m = end + SEGMEXEC_TASK_SIZE;
72694 +#endif
72695 +
72696 if (newflags == oldflags) {
72697 *pprev = vma;
72698 return 0;
72699 }
72700
72701 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
72702 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
72703 +
72704 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
72705 + return -ENOMEM;
72706 +
72707 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
72708 + return -ENOMEM;
72709 + }
72710 +
72711 /*
72712 * If we make a private mapping writable we increase our commit;
72713 * but (without finer accounting) cannot reduce our commit if we
72714 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72715 }
72716 }
72717
72718 +#ifdef CONFIG_PAX_SEGMEXEC
72719 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
72720 + if (start != vma->vm_start) {
72721 + error = split_vma(mm, vma, start, 1);
72722 + if (error)
72723 + goto fail;
72724 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
72725 + *pprev = (*pprev)->vm_next;
72726 + }
72727 +
72728 + if (end != vma->vm_end) {
72729 + error = split_vma(mm, vma, end, 0);
72730 + if (error)
72731 + goto fail;
72732 + }
72733 +
72734 + if (pax_find_mirror_vma(vma)) {
72735 + error = __do_munmap(mm, start_m, end_m - start_m);
72736 + if (error)
72737 + goto fail;
72738 + } else {
72739 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72740 + if (!vma_m) {
72741 + error = -ENOMEM;
72742 + goto fail;
72743 + }
72744 + vma->vm_flags = newflags;
72745 + error = pax_mirror_vma(vma_m, vma);
72746 + if (error) {
72747 + vma->vm_flags = oldflags;
72748 + goto fail;
72749 + }
72750 + }
72751 + }
72752 +#endif
72753 +
72754 /*
72755 * First try to merge with previous and/or next vma.
72756 */
72757 @@ -204,9 +306,21 @@ success:
72758 * vm_flags and vm_page_prot are protected by the mmap_sem
72759 * held in write mode.
72760 */
72761 +
72762 +#ifdef CONFIG_PAX_SEGMEXEC
72763 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
72764 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
72765 +#endif
72766 +
72767 vma->vm_flags = newflags;
72768 +
72769 +#ifdef CONFIG_PAX_MPROTECT
72770 + if (mm->binfmt && mm->binfmt->handle_mprotect)
72771 + mm->binfmt->handle_mprotect(vma, newflags);
72772 +#endif
72773 +
72774 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
72775 - vm_get_page_prot(newflags));
72776 + vm_get_page_prot(vma->vm_flags));
72777
72778 if (vma_wants_writenotify(vma)) {
72779 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
72780 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72781 end = start + len;
72782 if (end <= start)
72783 return -ENOMEM;
72784 +
72785 +#ifdef CONFIG_PAX_SEGMEXEC
72786 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
72787 + if (end > SEGMEXEC_TASK_SIZE)
72788 + return -EINVAL;
72789 + } else
72790 +#endif
72791 +
72792 + if (end > TASK_SIZE)
72793 + return -EINVAL;
72794 +
72795 if (!arch_validate_prot(prot))
72796 return -EINVAL;
72797
72798 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72799 /*
72800 * Does the application expect PROT_READ to imply PROT_EXEC:
72801 */
72802 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72803 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72804 prot |= PROT_EXEC;
72805
72806 vm_flags = calc_vm_prot_bits(prot);
72807 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72808 if (start > vma->vm_start)
72809 prev = vma;
72810
72811 +#ifdef CONFIG_PAX_MPROTECT
72812 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
72813 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
72814 +#endif
72815 +
72816 for (nstart = start ; ; ) {
72817 unsigned long newflags;
72818
72819 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72820
72821 /* newflags >> 4 shift VM_MAY% in place of VM_% */
72822 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
72823 + if (prot & (PROT_WRITE | PROT_EXEC))
72824 + gr_log_rwxmprotect(vma->vm_file);
72825 +
72826 + error = -EACCES;
72827 + goto out;
72828 + }
72829 +
72830 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
72831 error = -EACCES;
72832 goto out;
72833 }
72834 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72835 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
72836 if (error)
72837 goto out;
72838 +
72839 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
72840 +
72841 nstart = tmp;
72842
72843 if (nstart < prev->vm_end)
72844 diff --git a/mm/mremap.c b/mm/mremap.c
72845 index 506fa44..ccc0ba9 100644
72846 --- a/mm/mremap.c
72847 +++ b/mm/mremap.c
72848 @@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
72849 continue;
72850 pte = ptep_clear_flush(vma, old_addr, old_pte);
72851 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
72852 +
72853 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72854 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
72855 + pte = pte_exprotect(pte);
72856 +#endif
72857 +
72858 set_pte_at(mm, new_addr, new_pte, pte);
72859 }
72860
72861 @@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
72862 if (is_vm_hugetlb_page(vma))
72863 goto Einval;
72864
72865 +#ifdef CONFIG_PAX_SEGMEXEC
72866 + if (pax_find_mirror_vma(vma))
72867 + goto Einval;
72868 +#endif
72869 +
72870 /* We can't remap across vm area boundaries */
72871 if (old_len > vma->vm_end - addr)
72872 goto Efault;
72873 @@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned long addr,
72874 unsigned long ret = -EINVAL;
72875 unsigned long charged = 0;
72876 unsigned long map_flags;
72877 + unsigned long pax_task_size = TASK_SIZE;
72878
72879 if (new_addr & ~PAGE_MASK)
72880 goto out;
72881
72882 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
72883 +#ifdef CONFIG_PAX_SEGMEXEC
72884 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72885 + pax_task_size = SEGMEXEC_TASK_SIZE;
72886 +#endif
72887 +
72888 + pax_task_size -= PAGE_SIZE;
72889 +
72890 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
72891 goto out;
72892
72893 /* Check if the location we're moving into overlaps the
72894 * old location at all, and fail if it does.
72895 */
72896 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
72897 - goto out;
72898 -
72899 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
72900 + if (addr + old_len > new_addr && new_addr + new_len > addr)
72901 goto out;
72902
72903 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72904 @@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long addr,
72905 struct vm_area_struct *vma;
72906 unsigned long ret = -EINVAL;
72907 unsigned long charged = 0;
72908 + unsigned long pax_task_size = TASK_SIZE;
72909
72910 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
72911 goto out;
72912 @@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long addr,
72913 if (!new_len)
72914 goto out;
72915
72916 +#ifdef CONFIG_PAX_SEGMEXEC
72917 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72918 + pax_task_size = SEGMEXEC_TASK_SIZE;
72919 +#endif
72920 +
72921 + pax_task_size -= PAGE_SIZE;
72922 +
72923 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
72924 + old_len > pax_task_size || addr > pax_task_size-old_len)
72925 + goto out;
72926 +
72927 if (flags & MREMAP_FIXED) {
72928 if (flags & MREMAP_MAYMOVE)
72929 ret = mremap_to(addr, old_len, new_addr, new_len);
72930 @@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long addr,
72931 addr + new_len);
72932 }
72933 ret = addr;
72934 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
72935 goto out;
72936 }
72937 }
72938 @@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long addr,
72939 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72940 if (ret)
72941 goto out;
72942 +
72943 + map_flags = vma->vm_flags;
72944 ret = move_vma(vma, addr, old_len, new_len, new_addr);
72945 + if (!(ret & ~PAGE_MASK)) {
72946 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
72947 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
72948 + }
72949 }
72950 out:
72951 if (ret & ~PAGE_MASK)
72952 diff --git a/mm/nobootmem.c b/mm/nobootmem.c
72953 index 6e93dc7..c98df0c 100644
72954 --- a/mm/nobootmem.c
72955 +++ b/mm/nobootmem.c
72956 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
72957 unsigned long __init free_all_memory_core_early(int nodeid)
72958 {
72959 int i;
72960 - u64 start, end;
72961 + u64 start, end, startrange, endrange;
72962 unsigned long count = 0;
72963 - struct range *range = NULL;
72964 + struct range *range = NULL, rangerange = { 0, 0 };
72965 int nr_range;
72966
72967 nr_range = get_free_all_memory_range(&range, nodeid);
72968 + startrange = __pa(range) >> PAGE_SHIFT;
72969 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
72970
72971 for (i = 0; i < nr_range; i++) {
72972 start = range[i].start;
72973 end = range[i].end;
72974 + if (start <= endrange && startrange < end) {
72975 + BUG_ON(rangerange.start | rangerange.end);
72976 + rangerange = range[i];
72977 + continue;
72978 + }
72979 count += end - start;
72980 __free_pages_memory(start, end);
72981 }
72982 + start = rangerange.start;
72983 + end = rangerange.end;
72984 + count += end - start;
72985 + __free_pages_memory(start, end);
72986
72987 return count;
72988 }
72989 diff --git a/mm/nommu.c b/mm/nommu.c
72990 index 4358032..e79b99f 100644
72991 --- a/mm/nommu.c
72992 +++ b/mm/nommu.c
72993 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
72994 int sysctl_overcommit_ratio = 50; /* default is 50% */
72995 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
72996 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
72997 -int heap_stack_gap = 0;
72998
72999 atomic_long_t mmap_pages_allocated;
73000
73001 @@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
73002 EXPORT_SYMBOL(find_vma);
73003
73004 /*
73005 - * find a VMA
73006 - * - we don't extend stack VMAs under NOMMU conditions
73007 - */
73008 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
73009 -{
73010 - return find_vma(mm, addr);
73011 -}
73012 -
73013 -/*
73014 * expand a stack to a given address
73015 * - not supported under NOMMU conditions
73016 */
73017 @@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
73018
73019 /* most fields are the same, copy all, and then fixup */
73020 *new = *vma;
73021 + INIT_LIST_HEAD(&new->anon_vma_chain);
73022 *region = *vma->vm_region;
73023 new->vm_region = region;
73024
73025 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
73026 index e8fae15..18c0442 100644
73027 --- a/mm/page_alloc.c
73028 +++ b/mm/page_alloc.c
73029 @@ -340,7 +340,7 @@ out:
73030 * This usage means that zero-order pages may not be compound.
73031 */
73032
73033 -static void free_compound_page(struct page *page)
73034 +void free_compound_page(struct page *page)
73035 {
73036 __free_pages_ok(page, compound_order(page));
73037 }
73038 @@ -653,6 +653,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
73039 int i;
73040 int bad = 0;
73041
73042 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
73043 + unsigned long index = 1UL << order;
73044 +#endif
73045 +
73046 trace_mm_page_free_direct(page, order);
73047 kmemcheck_free_shadow(page, order);
73048
73049 @@ -668,6 +672,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
73050 debug_check_no_obj_freed(page_address(page),
73051 PAGE_SIZE << order);
73052 }
73053 +
73054 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
73055 + for (; index; --index)
73056 + sanitize_highpage(page + index - 1);
73057 +#endif
73058 +
73059 arch_free_page(page, order);
73060 kernel_map_pages(page, 1 << order, 0);
73061
73062 @@ -783,8 +793,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
73063 arch_alloc_page(page, order);
73064 kernel_map_pages(page, 1 << order, 1);
73065
73066 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
73067 if (gfp_flags & __GFP_ZERO)
73068 prep_zero_page(page, order, gfp_flags);
73069 +#endif
73070
73071 if (order && (gfp_flags & __GFP_COMP))
73072 prep_compound_page(page, order);
73073 @@ -2539,6 +2551,8 @@ void show_free_areas(unsigned int filter)
73074 int cpu;
73075 struct zone *zone;
73076
73077 + pax_track_stack();
73078 +
73079 for_each_populated_zone(zone) {
73080 if (skip_free_areas_node(filter, zone_to_nid(zone)))
73081 continue;
73082 @@ -3350,7 +3364,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
73083 unsigned long pfn;
73084
73085 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
73086 +#ifdef CONFIG_X86_32
73087 + /* boot failures in VMware 8 on 32bit vanilla since
73088 + this change */
73089 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
73090 +#else
73091 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
73092 +#endif
73093 return 1;
73094 }
73095 return 0;
73096 diff --git a/mm/percpu.c b/mm/percpu.c
73097 index 0ae7a09..613118e 100644
73098 --- a/mm/percpu.c
73099 +++ b/mm/percpu.c
73100 @@ -121,7 +121,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
73101 static unsigned int pcpu_high_unit_cpu __read_mostly;
73102
73103 /* the address of the first chunk which starts with the kernel static area */
73104 -void *pcpu_base_addr __read_mostly;
73105 +void *pcpu_base_addr __read_only;
73106 EXPORT_SYMBOL_GPL(pcpu_base_addr);
73107
73108 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
73109 diff --git a/mm/rmap.c b/mm/rmap.c
73110 index 8005080..198c2cd 100644
73111 --- a/mm/rmap.c
73112 +++ b/mm/rmap.c
73113 @@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73114 struct anon_vma *anon_vma = vma->anon_vma;
73115 struct anon_vma_chain *avc;
73116
73117 +#ifdef CONFIG_PAX_SEGMEXEC
73118 + struct anon_vma_chain *avc_m = NULL;
73119 +#endif
73120 +
73121 might_sleep();
73122 if (unlikely(!anon_vma)) {
73123 struct mm_struct *mm = vma->vm_mm;
73124 @@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73125 if (!avc)
73126 goto out_enomem;
73127
73128 +#ifdef CONFIG_PAX_SEGMEXEC
73129 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
73130 + if (!avc_m)
73131 + goto out_enomem_free_avc;
73132 +#endif
73133 +
73134 anon_vma = find_mergeable_anon_vma(vma);
73135 allocated = NULL;
73136 if (!anon_vma) {
73137 @@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73138 /* page_table_lock to protect against threads */
73139 spin_lock(&mm->page_table_lock);
73140 if (likely(!vma->anon_vma)) {
73141 +
73142 +#ifdef CONFIG_PAX_SEGMEXEC
73143 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
73144 +
73145 + if (vma_m) {
73146 + BUG_ON(vma_m->anon_vma);
73147 + vma_m->anon_vma = anon_vma;
73148 + avc_m->anon_vma = anon_vma;
73149 + avc_m->vma = vma;
73150 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
73151 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
73152 + avc_m = NULL;
73153 + }
73154 +#endif
73155 +
73156 vma->anon_vma = anon_vma;
73157 avc->anon_vma = anon_vma;
73158 avc->vma = vma;
73159 @@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73160
73161 if (unlikely(allocated))
73162 put_anon_vma(allocated);
73163 +
73164 +#ifdef CONFIG_PAX_SEGMEXEC
73165 + if (unlikely(avc_m))
73166 + anon_vma_chain_free(avc_m);
73167 +#endif
73168 +
73169 if (unlikely(avc))
73170 anon_vma_chain_free(avc);
73171 }
73172 return 0;
73173
73174 out_enomem_free_avc:
73175 +
73176 +#ifdef CONFIG_PAX_SEGMEXEC
73177 + if (avc_m)
73178 + anon_vma_chain_free(avc_m);
73179 +#endif
73180 +
73181 anon_vma_chain_free(avc);
73182 out_enomem:
73183 return -ENOMEM;
73184 @@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
73185 * Attach the anon_vmas from src to dst.
73186 * Returns 0 on success, -ENOMEM on failure.
73187 */
73188 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
73189 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
73190 {
73191 struct anon_vma_chain *avc, *pavc;
73192 struct anon_vma *root = NULL;
73193 @@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
73194 * the corresponding VMA in the parent process is attached to.
73195 * Returns 0 on success, non-zero on failure.
73196 */
73197 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
73198 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
73199 {
73200 struct anon_vma_chain *avc;
73201 struct anon_vma *anon_vma;
73202 diff --git a/mm/shmem.c b/mm/shmem.c
73203 index 32f6763..431c405 100644
73204 --- a/mm/shmem.c
73205 +++ b/mm/shmem.c
73206 @@ -31,7 +31,7 @@
73207 #include <linux/module.h>
73208 #include <linux/swap.h>
73209
73210 -static struct vfsmount *shm_mnt;
73211 +struct vfsmount *shm_mnt;
73212
73213 #ifdef CONFIG_SHMEM
73214 /*
73215 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
73216 #define BOGO_DIRENT_SIZE 20
73217
73218 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
73219 -#define SHORT_SYMLINK_LEN 128
73220 +#define SHORT_SYMLINK_LEN 64
73221
73222 struct shmem_xattr {
73223 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
73224 @@ -769,6 +769,8 @@ static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
73225 struct mempolicy mpol, *spol;
73226 struct vm_area_struct pvma;
73227
73228 + pax_track_stack();
73229 +
73230 spol = mpol_cond_copy(&mpol,
73231 mpol_shared_policy_lookup(&info->policy, index));
73232
73233 @@ -2149,8 +2151,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
73234 int err = -ENOMEM;
73235
73236 /* Round up to L1_CACHE_BYTES to resist false sharing */
73237 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
73238 - L1_CACHE_BYTES), GFP_KERNEL);
73239 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
73240 if (!sbinfo)
73241 return -ENOMEM;
73242
73243 diff --git a/mm/slab.c b/mm/slab.c
73244 index 893c76d..a742de2 100644
73245 --- a/mm/slab.c
73246 +++ b/mm/slab.c
73247 @@ -151,7 +151,7 @@
73248
73249 /* Legal flag mask for kmem_cache_create(). */
73250 #if DEBUG
73251 -# define CREATE_MASK (SLAB_RED_ZONE | \
73252 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
73253 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
73254 SLAB_CACHE_DMA | \
73255 SLAB_STORE_USER | \
73256 @@ -159,7 +159,7 @@
73257 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
73258 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
73259 #else
73260 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
73261 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
73262 SLAB_CACHE_DMA | \
73263 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
73264 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
73265 @@ -288,7 +288,7 @@ struct kmem_list3 {
73266 * Need this for bootstrapping a per node allocator.
73267 */
73268 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
73269 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
73270 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
73271 #define CACHE_CACHE 0
73272 #define SIZE_AC MAX_NUMNODES
73273 #define SIZE_L3 (2 * MAX_NUMNODES)
73274 @@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
73275 if ((x)->max_freeable < i) \
73276 (x)->max_freeable = i; \
73277 } while (0)
73278 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
73279 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
73280 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
73281 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
73282 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
73283 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
73284 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
73285 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
73286 #else
73287 #define STATS_INC_ACTIVE(x) do { } while (0)
73288 #define STATS_DEC_ACTIVE(x) do { } while (0)
73289 @@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
73290 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
73291 */
73292 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
73293 - const struct slab *slab, void *obj)
73294 + const struct slab *slab, const void *obj)
73295 {
73296 u32 offset = (obj - slab->s_mem);
73297 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
73298 @@ -564,7 +564,7 @@ struct cache_names {
73299 static struct cache_names __initdata cache_names[] = {
73300 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
73301 #include <linux/kmalloc_sizes.h>
73302 - {NULL,}
73303 + {NULL}
73304 #undef CACHE
73305 };
73306
73307 @@ -1572,7 +1572,7 @@ void __init kmem_cache_init(void)
73308 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
73309 sizes[INDEX_AC].cs_size,
73310 ARCH_KMALLOC_MINALIGN,
73311 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73312 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73313 NULL);
73314
73315 if (INDEX_AC != INDEX_L3) {
73316 @@ -1580,7 +1580,7 @@ void __init kmem_cache_init(void)
73317 kmem_cache_create(names[INDEX_L3].name,
73318 sizes[INDEX_L3].cs_size,
73319 ARCH_KMALLOC_MINALIGN,
73320 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73321 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73322 NULL);
73323 }
73324
73325 @@ -1598,7 +1598,7 @@ void __init kmem_cache_init(void)
73326 sizes->cs_cachep = kmem_cache_create(names->name,
73327 sizes->cs_size,
73328 ARCH_KMALLOC_MINALIGN,
73329 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73330 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73331 NULL);
73332 }
73333 #ifdef CONFIG_ZONE_DMA
73334 @@ -4327,10 +4327,10 @@ static int s_show(struct seq_file *m, void *p)
73335 }
73336 /* cpu stats */
73337 {
73338 - unsigned long allochit = atomic_read(&cachep->allochit);
73339 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
73340 - unsigned long freehit = atomic_read(&cachep->freehit);
73341 - unsigned long freemiss = atomic_read(&cachep->freemiss);
73342 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
73343 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
73344 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
73345 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
73346
73347 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
73348 allochit, allocmiss, freehit, freemiss);
73349 @@ -4587,15 +4587,70 @@ static const struct file_operations proc_slabstats_operations = {
73350
73351 static int __init slab_proc_init(void)
73352 {
73353 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
73354 + mode_t gr_mode = S_IRUGO;
73355 +
73356 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
73357 + gr_mode = S_IRUSR;
73358 +#endif
73359 +
73360 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
73361 #ifdef CONFIG_DEBUG_SLAB_LEAK
73362 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
73363 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
73364 #endif
73365 return 0;
73366 }
73367 module_init(slab_proc_init);
73368 #endif
73369
73370 +void check_object_size(const void *ptr, unsigned long n, bool to)
73371 +{
73372 +
73373 +#ifdef CONFIG_PAX_USERCOPY
73374 + struct page *page;
73375 + struct kmem_cache *cachep = NULL;
73376 + struct slab *slabp;
73377 + unsigned int objnr;
73378 + unsigned long offset;
73379 + const char *type;
73380 +
73381 + if (!n)
73382 + return;
73383 +
73384 + type = "<null>";
73385 + if (ZERO_OR_NULL_PTR(ptr))
73386 + goto report;
73387 +
73388 + if (!virt_addr_valid(ptr))
73389 + return;
73390 +
73391 + page = virt_to_head_page(ptr);
73392 +
73393 + type = "<process stack>";
73394 + if (!PageSlab(page)) {
73395 + if (object_is_on_stack(ptr, n) == -1)
73396 + goto report;
73397 + return;
73398 + }
73399 +
73400 + cachep = page_get_cache(page);
73401 + type = cachep->name;
73402 + if (!(cachep->flags & SLAB_USERCOPY))
73403 + goto report;
73404 +
73405 + slabp = page_get_slab(page);
73406 + objnr = obj_to_index(cachep, slabp, ptr);
73407 + BUG_ON(objnr >= cachep->num);
73408 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
73409 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
73410 + return;
73411 +
73412 +report:
73413 + pax_report_usercopy(ptr, n, to, type);
73414 +#endif
73415 +
73416 +}
73417 +EXPORT_SYMBOL(check_object_size);
73418 +
73419 /**
73420 * ksize - get the actual amount of memory allocated for a given object
73421 * @objp: Pointer to the object
73422 diff --git a/mm/slob.c b/mm/slob.c
73423 index bf39181..727f7a3 100644
73424 --- a/mm/slob.c
73425 +++ b/mm/slob.c
73426 @@ -29,7 +29,7 @@
73427 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
73428 * alloc_pages() directly, allocating compound pages so the page order
73429 * does not have to be separately tracked, and also stores the exact
73430 - * allocation size in page->private so that it can be used to accurately
73431 + * allocation size in slob_page->size so that it can be used to accurately
73432 * provide ksize(). These objects are detected in kfree() because slob_page()
73433 * is false for them.
73434 *
73435 @@ -58,6 +58,7 @@
73436 */
73437
73438 #include <linux/kernel.h>
73439 +#include <linux/sched.h>
73440 #include <linux/slab.h>
73441 #include <linux/mm.h>
73442 #include <linux/swap.h> /* struct reclaim_state */
73443 @@ -102,7 +103,8 @@ struct slob_page {
73444 unsigned long flags; /* mandatory */
73445 atomic_t _count; /* mandatory */
73446 slobidx_t units; /* free units left in page */
73447 - unsigned long pad[2];
73448 + unsigned long pad[1];
73449 + unsigned long size; /* size when >=PAGE_SIZE */
73450 slob_t *free; /* first free slob_t in page */
73451 struct list_head list; /* linked list of free pages */
73452 };
73453 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
73454 */
73455 static inline int is_slob_page(struct slob_page *sp)
73456 {
73457 - return PageSlab((struct page *)sp);
73458 + return PageSlab((struct page *)sp) && !sp->size;
73459 }
73460
73461 static inline void set_slob_page(struct slob_page *sp)
73462 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
73463
73464 static inline struct slob_page *slob_page(const void *addr)
73465 {
73466 - return (struct slob_page *)virt_to_page(addr);
73467 + return (struct slob_page *)virt_to_head_page(addr);
73468 }
73469
73470 /*
73471 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
73472 /*
73473 * Return the size of a slob block.
73474 */
73475 -static slobidx_t slob_units(slob_t *s)
73476 +static slobidx_t slob_units(const slob_t *s)
73477 {
73478 if (s->units > 0)
73479 return s->units;
73480 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
73481 /*
73482 * Return the next free slob block pointer after this one.
73483 */
73484 -static slob_t *slob_next(slob_t *s)
73485 +static slob_t *slob_next(const slob_t *s)
73486 {
73487 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
73488 slobidx_t next;
73489 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
73490 /*
73491 * Returns true if s is the last free block in its page.
73492 */
73493 -static int slob_last(slob_t *s)
73494 +static int slob_last(const slob_t *s)
73495 {
73496 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
73497 }
73498 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
73499 if (!page)
73500 return NULL;
73501
73502 + set_slob_page(page);
73503 return page_address(page);
73504 }
73505
73506 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
73507 if (!b)
73508 return NULL;
73509 sp = slob_page(b);
73510 - set_slob_page(sp);
73511
73512 spin_lock_irqsave(&slob_lock, flags);
73513 sp->units = SLOB_UNITS(PAGE_SIZE);
73514 sp->free = b;
73515 + sp->size = 0;
73516 INIT_LIST_HEAD(&sp->list);
73517 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
73518 set_slob_page_free(sp, slob_list);
73519 @@ -476,10 +479,9 @@ out:
73520 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
73521 */
73522
73523 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73524 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
73525 {
73526 - unsigned int *m;
73527 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73528 + slob_t *m;
73529 void *ret;
73530
73531 gfp &= gfp_allowed_mask;
73532 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73533
73534 if (!m)
73535 return NULL;
73536 - *m = size;
73537 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
73538 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
73539 + m[0].units = size;
73540 + m[1].units = align;
73541 ret = (void *)m + align;
73542
73543 trace_kmalloc_node(_RET_IP_, ret,
73544 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73545 gfp |= __GFP_COMP;
73546 ret = slob_new_pages(gfp, order, node);
73547 if (ret) {
73548 - struct page *page;
73549 - page = virt_to_page(ret);
73550 - page->private = size;
73551 + struct slob_page *sp;
73552 + sp = slob_page(ret);
73553 + sp->size = size;
73554 }
73555
73556 trace_kmalloc_node(_RET_IP_, ret,
73557 size, PAGE_SIZE << order, gfp, node);
73558 }
73559
73560 - kmemleak_alloc(ret, size, 1, gfp);
73561 + return ret;
73562 +}
73563 +
73564 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73565 +{
73566 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73567 + void *ret = __kmalloc_node_align(size, gfp, node, align);
73568 +
73569 + if (!ZERO_OR_NULL_PTR(ret))
73570 + kmemleak_alloc(ret, size, 1, gfp);
73571 return ret;
73572 }
73573 EXPORT_SYMBOL(__kmalloc_node);
73574 @@ -533,13 +547,92 @@ void kfree(const void *block)
73575 sp = slob_page(block);
73576 if (is_slob_page(sp)) {
73577 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73578 - unsigned int *m = (unsigned int *)(block - align);
73579 - slob_free(m, *m + align);
73580 - } else
73581 + slob_t *m = (slob_t *)(block - align);
73582 + slob_free(m, m[0].units + align);
73583 + } else {
73584 + clear_slob_page(sp);
73585 + free_slob_page(sp);
73586 + sp->size = 0;
73587 put_page(&sp->page);
73588 + }
73589 }
73590 EXPORT_SYMBOL(kfree);
73591
73592 +void check_object_size(const void *ptr, unsigned long n, bool to)
73593 +{
73594 +
73595 +#ifdef CONFIG_PAX_USERCOPY
73596 + struct slob_page *sp;
73597 + const slob_t *free;
73598 + const void *base;
73599 + unsigned long flags;
73600 + const char *type;
73601 +
73602 + if (!n)
73603 + return;
73604 +
73605 + type = "<null>";
73606 + if (ZERO_OR_NULL_PTR(ptr))
73607 + goto report;
73608 +
73609 + if (!virt_addr_valid(ptr))
73610 + return;
73611 +
73612 + type = "<process stack>";
73613 + sp = slob_page(ptr);
73614 + if (!PageSlab((struct page*)sp)) {
73615 + if (object_is_on_stack(ptr, n) == -1)
73616 + goto report;
73617 + return;
73618 + }
73619 +
73620 + type = "<slob>";
73621 + if (sp->size) {
73622 + base = page_address(&sp->page);
73623 + if (base <= ptr && n <= sp->size - (ptr - base))
73624 + return;
73625 + goto report;
73626 + }
73627 +
73628 + /* some tricky double walking to find the chunk */
73629 + spin_lock_irqsave(&slob_lock, flags);
73630 + base = (void *)((unsigned long)ptr & PAGE_MASK);
73631 + free = sp->free;
73632 +
73633 + while (!slob_last(free) && (void *)free <= ptr) {
73634 + base = free + slob_units(free);
73635 + free = slob_next(free);
73636 + }
73637 +
73638 + while (base < (void *)free) {
73639 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
73640 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
73641 + int offset;
73642 +
73643 + if (ptr < base + align)
73644 + break;
73645 +
73646 + offset = ptr - base - align;
73647 + if (offset >= m) {
73648 + base += size;
73649 + continue;
73650 + }
73651 +
73652 + if (n > m - offset)
73653 + break;
73654 +
73655 + spin_unlock_irqrestore(&slob_lock, flags);
73656 + return;
73657 + }
73658 +
73659 + spin_unlock_irqrestore(&slob_lock, flags);
73660 +report:
73661 + pax_report_usercopy(ptr, n, to, type);
73662 +#endif
73663 +
73664 +}
73665 +EXPORT_SYMBOL(check_object_size);
73666 +
73667 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
73668 size_t ksize(const void *block)
73669 {
73670 @@ -552,10 +645,10 @@ size_t ksize(const void *block)
73671 sp = slob_page(block);
73672 if (is_slob_page(sp)) {
73673 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73674 - unsigned int *m = (unsigned int *)(block - align);
73675 - return SLOB_UNITS(*m) * SLOB_UNIT;
73676 + slob_t *m = (slob_t *)(block - align);
73677 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
73678 } else
73679 - return sp->page.private;
73680 + return sp->size;
73681 }
73682 EXPORT_SYMBOL(ksize);
73683
73684 @@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73685 {
73686 struct kmem_cache *c;
73687
73688 +#ifdef CONFIG_PAX_USERCOPY
73689 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
73690 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
73691 +#else
73692 c = slob_alloc(sizeof(struct kmem_cache),
73693 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
73694 +#endif
73695
73696 if (c) {
73697 c->name = name;
73698 @@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
73699
73700 lockdep_trace_alloc(flags);
73701
73702 +#ifdef CONFIG_PAX_USERCOPY
73703 + b = __kmalloc_node_align(c->size, flags, node, c->align);
73704 +#else
73705 if (c->size < PAGE_SIZE) {
73706 b = slob_alloc(c->size, flags, c->align, node);
73707 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73708 SLOB_UNITS(c->size) * SLOB_UNIT,
73709 flags, node);
73710 } else {
73711 + struct slob_page *sp;
73712 +
73713 b = slob_new_pages(flags, get_order(c->size), node);
73714 + sp = slob_page(b);
73715 + sp->size = c->size;
73716 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73717 PAGE_SIZE << get_order(c->size),
73718 flags, node);
73719 }
73720 +#endif
73721
73722 if (c->ctor)
73723 c->ctor(b);
73724 @@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
73725
73726 static void __kmem_cache_free(void *b, int size)
73727 {
73728 - if (size < PAGE_SIZE)
73729 + struct slob_page *sp = slob_page(b);
73730 +
73731 + if (is_slob_page(sp))
73732 slob_free(b, size);
73733 - else
73734 + else {
73735 + clear_slob_page(sp);
73736 + free_slob_page(sp);
73737 + sp->size = 0;
73738 slob_free_pages(b, get_order(size));
73739 + }
73740 }
73741
73742 static void kmem_rcu_free(struct rcu_head *head)
73743 @@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
73744
73745 void kmem_cache_free(struct kmem_cache *c, void *b)
73746 {
73747 + int size = c->size;
73748 +
73749 +#ifdef CONFIG_PAX_USERCOPY
73750 + if (size + c->align < PAGE_SIZE) {
73751 + size += c->align;
73752 + b -= c->align;
73753 + }
73754 +#endif
73755 +
73756 kmemleak_free_recursive(b, c->flags);
73757 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
73758 struct slob_rcu *slob_rcu;
73759 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
73760 - slob_rcu->size = c->size;
73761 + slob_rcu = b + (size - sizeof(struct slob_rcu));
73762 + slob_rcu->size = size;
73763 call_rcu(&slob_rcu->head, kmem_rcu_free);
73764 } else {
73765 - __kmem_cache_free(b, c->size);
73766 + __kmem_cache_free(b, size);
73767 }
73768
73769 +#ifdef CONFIG_PAX_USERCOPY
73770 + trace_kfree(_RET_IP_, b);
73771 +#else
73772 trace_kmem_cache_free(_RET_IP_, b);
73773 +#endif
73774 +
73775 }
73776 EXPORT_SYMBOL(kmem_cache_free);
73777
73778 diff --git a/mm/slub.c b/mm/slub.c
73779 index 7c54fe8..ce9940d 100644
73780 --- a/mm/slub.c
73781 +++ b/mm/slub.c
73782 @@ -208,7 +208,7 @@ struct track {
73783
73784 enum track_item { TRACK_ALLOC, TRACK_FREE };
73785
73786 -#ifdef CONFIG_SYSFS
73787 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73788 static int sysfs_slab_add(struct kmem_cache *);
73789 static int sysfs_slab_alias(struct kmem_cache *, const char *);
73790 static void sysfs_slab_remove(struct kmem_cache *);
73791 @@ -556,7 +556,7 @@ static void print_track(const char *s, struct track *t)
73792 if (!t->addr)
73793 return;
73794
73795 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
73796 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
73797 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
73798 #ifdef CONFIG_STACKTRACE
73799 {
73800 @@ -2077,6 +2077,11 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
73801 goto new_slab;
73802 }
73803
73804 + /* must check again c->freelist in case of cpu migration or IRQ */
73805 + object = c->freelist;
73806 + if (object)
73807 + goto load_freelist;
73808 +
73809 stat(s, ALLOC_SLOWPATH);
73810
73811 do {
73812 @@ -2456,6 +2461,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
73813
73814 page = virt_to_head_page(x);
73815
73816 + BUG_ON(!PageSlab(page));
73817 +
73818 slab_free(s, page, x, _RET_IP_);
73819
73820 trace_kmem_cache_free(_RET_IP_, x);
73821 @@ -2489,7 +2496,7 @@ static int slub_min_objects;
73822 * Merge control. If this is set then no merging of slab caches will occur.
73823 * (Could be removed. This was introduced to pacify the merge skeptics.)
73824 */
73825 -static int slub_nomerge;
73826 +static int slub_nomerge = 1;
73827
73828 /*
73829 * Calculate the order of allocation given an slab object size.
73830 @@ -2912,7 +2919,7 @@ static int kmem_cache_open(struct kmem_cache *s,
73831 * list to avoid pounding the page allocator excessively.
73832 */
73833 set_min_partial(s, ilog2(s->size));
73834 - s->refcount = 1;
73835 + atomic_set(&s->refcount, 1);
73836 #ifdef CONFIG_NUMA
73837 s->remote_node_defrag_ratio = 1000;
73838 #endif
73839 @@ -3017,8 +3024,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
73840 void kmem_cache_destroy(struct kmem_cache *s)
73841 {
73842 down_write(&slub_lock);
73843 - s->refcount--;
73844 - if (!s->refcount) {
73845 + if (atomic_dec_and_test(&s->refcount)) {
73846 list_del(&s->list);
73847 if (kmem_cache_close(s)) {
73848 printk(KERN_ERR "SLUB %s: %s called for cache that "
73849 @@ -3228,6 +3234,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
73850 EXPORT_SYMBOL(__kmalloc_node);
73851 #endif
73852
73853 +void check_object_size(const void *ptr, unsigned long n, bool to)
73854 +{
73855 +
73856 +#ifdef CONFIG_PAX_USERCOPY
73857 + struct page *page;
73858 + struct kmem_cache *s = NULL;
73859 + unsigned long offset;
73860 + const char *type;
73861 +
73862 + if (!n)
73863 + return;
73864 +
73865 + type = "<null>";
73866 + if (ZERO_OR_NULL_PTR(ptr))
73867 + goto report;
73868 +
73869 + if (!virt_addr_valid(ptr))
73870 + return;
73871 +
73872 + page = virt_to_head_page(ptr);
73873 +
73874 + type = "<process stack>";
73875 + if (!PageSlab(page)) {
73876 + if (object_is_on_stack(ptr, n) == -1)
73877 + goto report;
73878 + return;
73879 + }
73880 +
73881 + s = page->slab;
73882 + type = s->name;
73883 + if (!(s->flags & SLAB_USERCOPY))
73884 + goto report;
73885 +
73886 + offset = (ptr - page_address(page)) % s->size;
73887 + if (offset <= s->objsize && n <= s->objsize - offset)
73888 + return;
73889 +
73890 +report:
73891 + pax_report_usercopy(ptr, n, to, type);
73892 +#endif
73893 +
73894 +}
73895 +EXPORT_SYMBOL(check_object_size);
73896 +
73897 size_t ksize(const void *object)
73898 {
73899 struct page *page;
73900 @@ -3502,7 +3552,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
73901 int node;
73902
73903 list_add(&s->list, &slab_caches);
73904 - s->refcount = -1;
73905 + atomic_set(&s->refcount, -1);
73906
73907 for_each_node_state(node, N_NORMAL_MEMORY) {
73908 struct kmem_cache_node *n = get_node(s, node);
73909 @@ -3619,17 +3669,17 @@ void __init kmem_cache_init(void)
73910
73911 /* Caches that are not of the two-to-the-power-of size */
73912 if (KMALLOC_MIN_SIZE <= 32) {
73913 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
73914 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
73915 caches++;
73916 }
73917
73918 if (KMALLOC_MIN_SIZE <= 64) {
73919 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
73920 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
73921 caches++;
73922 }
73923
73924 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
73925 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
73926 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
73927 caches++;
73928 }
73929
73930 @@ -3697,7 +3747,7 @@ static int slab_unmergeable(struct kmem_cache *s)
73931 /*
73932 * We may have set a slab to be unmergeable during bootstrap.
73933 */
73934 - if (s->refcount < 0)
73935 + if (atomic_read(&s->refcount) < 0)
73936 return 1;
73937
73938 return 0;
73939 @@ -3756,7 +3806,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73940 down_write(&slub_lock);
73941 s = find_mergeable(size, align, flags, name, ctor);
73942 if (s) {
73943 - s->refcount++;
73944 + atomic_inc(&s->refcount);
73945 /*
73946 * Adjust the object sizes so that we clear
73947 * the complete object on kzalloc.
73948 @@ -3765,7 +3815,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73949 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
73950
73951 if (sysfs_slab_alias(s, name)) {
73952 - s->refcount--;
73953 + atomic_dec(&s->refcount);
73954 goto err;
73955 }
73956 up_write(&slub_lock);
73957 @@ -3893,7 +3943,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
73958 }
73959 #endif
73960
73961 -#ifdef CONFIG_SYSFS
73962 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73963 static int count_inuse(struct page *page)
73964 {
73965 return page->inuse;
73966 @@ -4280,12 +4330,12 @@ static void resiliency_test(void)
73967 validate_slab_cache(kmalloc_caches[9]);
73968 }
73969 #else
73970 -#ifdef CONFIG_SYSFS
73971 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73972 static void resiliency_test(void) {};
73973 #endif
73974 #endif
73975
73976 -#ifdef CONFIG_SYSFS
73977 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73978 enum slab_stat_type {
73979 SL_ALL, /* All slabs */
73980 SL_PARTIAL, /* Only partially allocated slabs */
73981 @@ -4495,7 +4545,7 @@ SLAB_ATTR_RO(ctor);
73982
73983 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
73984 {
73985 - return sprintf(buf, "%d\n", s->refcount - 1);
73986 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
73987 }
73988 SLAB_ATTR_RO(aliases);
73989
73990 @@ -5025,6 +5075,7 @@ static char *create_unique_id(struct kmem_cache *s)
73991 return name;
73992 }
73993
73994 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73995 static int sysfs_slab_add(struct kmem_cache *s)
73996 {
73997 int err;
73998 @@ -5087,6 +5138,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
73999 kobject_del(&s->kobj);
74000 kobject_put(&s->kobj);
74001 }
74002 +#endif
74003
74004 /*
74005 * Need to buffer aliases during bootup until sysfs becomes
74006 @@ -5100,6 +5152,7 @@ struct saved_alias {
74007
74008 static struct saved_alias *alias_list;
74009
74010 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74011 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
74012 {
74013 struct saved_alias *al;
74014 @@ -5122,6 +5175,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
74015 alias_list = al;
74016 return 0;
74017 }
74018 +#endif
74019
74020 static int __init slab_sysfs_init(void)
74021 {
74022 @@ -5257,7 +5311,13 @@ static const struct file_operations proc_slabinfo_operations = {
74023
74024 static int __init slab_proc_init(void)
74025 {
74026 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
74027 + mode_t gr_mode = S_IRUGO;
74028 +
74029 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
74030 + gr_mode = S_IRUSR;
74031 +#endif
74032 +
74033 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
74034 return 0;
74035 }
74036 module_init(slab_proc_init);
74037 diff --git a/mm/swap.c b/mm/swap.c
74038 index 87627f1..8a9eb34 100644
74039 --- a/mm/swap.c
74040 +++ b/mm/swap.c
74041 @@ -31,6 +31,7 @@
74042 #include <linux/backing-dev.h>
74043 #include <linux/memcontrol.h>
74044 #include <linux/gfp.h>
74045 +#include <linux/hugetlb.h>
74046
74047 #include "internal.h"
74048
74049 @@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
74050
74051 __page_cache_release(page);
74052 dtor = get_compound_page_dtor(page);
74053 + if (!PageHuge(page))
74054 + BUG_ON(dtor != free_compound_page);
74055 (*dtor)(page);
74056 }
74057
74058 diff --git a/mm/swapfile.c b/mm/swapfile.c
74059 index 17bc224..1677059 100644
74060 --- a/mm/swapfile.c
74061 +++ b/mm/swapfile.c
74062 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
74063
74064 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
74065 /* Activity counter to indicate that a swapon or swapoff has occurred */
74066 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
74067 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
74068
74069 static inline unsigned char swap_count(unsigned char ent)
74070 {
74071 @@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
74072 }
74073 filp_close(swap_file, NULL);
74074 err = 0;
74075 - atomic_inc(&proc_poll_event);
74076 + atomic_inc_unchecked(&proc_poll_event);
74077 wake_up_interruptible(&proc_poll_wait);
74078
74079 out_dput:
74080 @@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
74081
74082 poll_wait(file, &proc_poll_wait, wait);
74083
74084 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
74085 - seq->poll_event = atomic_read(&proc_poll_event);
74086 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
74087 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
74088 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
74089 }
74090
74091 @@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inode, struct file *file)
74092 return ret;
74093
74094 seq = file->private_data;
74095 - seq->poll_event = atomic_read(&proc_poll_event);
74096 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
74097 return 0;
74098 }
74099
74100 @@ -2124,7 +2124,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
74101 (p->flags & SWP_DISCARDABLE) ? "D" : "");
74102
74103 mutex_unlock(&swapon_mutex);
74104 - atomic_inc(&proc_poll_event);
74105 + atomic_inc_unchecked(&proc_poll_event);
74106 wake_up_interruptible(&proc_poll_wait);
74107
74108 if (S_ISREG(inode->i_mode))
74109 diff --git a/mm/util.c b/mm/util.c
74110 index 88ea1bd..0f1dfdb 100644
74111 --- a/mm/util.c
74112 +++ b/mm/util.c
74113 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
74114 * allocated buffer. Use this if you don't want to free the buffer immediately
74115 * like, for example, with RCU.
74116 */
74117 +#undef __krealloc
74118 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
74119 {
74120 void *ret;
74121 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
74122 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
74123 * %NULL pointer, the object pointed to is freed.
74124 */
74125 +#undef krealloc
74126 void *krealloc(const void *p, size_t new_size, gfp_t flags)
74127 {
74128 void *ret;
74129 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
74130 void arch_pick_mmap_layout(struct mm_struct *mm)
74131 {
74132 mm->mmap_base = TASK_UNMAPPED_BASE;
74133 +
74134 +#ifdef CONFIG_PAX_RANDMMAP
74135 + if (mm->pax_flags & MF_PAX_RANDMMAP)
74136 + mm->mmap_base += mm->delta_mmap;
74137 +#endif
74138 +
74139 mm->get_unmapped_area = arch_get_unmapped_area;
74140 mm->unmap_area = arch_unmap_area;
74141 }
74142 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
74143 index 3a65d6f7..39d5e33 100644
74144 --- a/mm/vmalloc.c
74145 +++ b/mm/vmalloc.c
74146 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
74147
74148 pte = pte_offset_kernel(pmd, addr);
74149 do {
74150 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
74151 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
74152 +
74153 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74154 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
74155 + BUG_ON(!pte_exec(*pte));
74156 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
74157 + continue;
74158 + }
74159 +#endif
74160 +
74161 + {
74162 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
74163 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
74164 + }
74165 } while (pte++, addr += PAGE_SIZE, addr != end);
74166 }
74167
74168 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
74169 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
74170 {
74171 pte_t *pte;
74172 + int ret = -ENOMEM;
74173
74174 /*
74175 * nr is a running index into the array which helps higher level
74176 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
74177 pte = pte_alloc_kernel(pmd, addr);
74178 if (!pte)
74179 return -ENOMEM;
74180 +
74181 + pax_open_kernel();
74182 do {
74183 struct page *page = pages[*nr];
74184
74185 - if (WARN_ON(!pte_none(*pte)))
74186 - return -EBUSY;
74187 - if (WARN_ON(!page))
74188 - return -ENOMEM;
74189 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74190 + if (pgprot_val(prot) & _PAGE_NX)
74191 +#endif
74192 +
74193 + if (WARN_ON(!pte_none(*pte))) {
74194 + ret = -EBUSY;
74195 + goto out;
74196 + }
74197 + if (WARN_ON(!page)) {
74198 + ret = -ENOMEM;
74199 + goto out;
74200 + }
74201 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
74202 (*nr)++;
74203 } while (pte++, addr += PAGE_SIZE, addr != end);
74204 - return 0;
74205 + ret = 0;
74206 +out:
74207 + pax_close_kernel();
74208 + return ret;
74209 }
74210
74211 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
74212 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
74213 * and fall back on vmalloc() if that fails. Others
74214 * just put it in the vmalloc space.
74215 */
74216 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
74217 +#ifdef CONFIG_MODULES
74218 +#ifdef MODULES_VADDR
74219 unsigned long addr = (unsigned long)x;
74220 if (addr >= MODULES_VADDR && addr < MODULES_END)
74221 return 1;
74222 #endif
74223 +
74224 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74225 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
74226 + return 1;
74227 +#endif
74228 +
74229 +#endif
74230 +
74231 return is_vmalloc_addr(x);
74232 }
74233
74234 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
74235
74236 if (!pgd_none(*pgd)) {
74237 pud_t *pud = pud_offset(pgd, addr);
74238 +#ifdef CONFIG_X86
74239 + if (!pud_large(*pud))
74240 +#endif
74241 if (!pud_none(*pud)) {
74242 pmd_t *pmd = pmd_offset(pud, addr);
74243 +#ifdef CONFIG_X86
74244 + if (!pmd_large(*pmd))
74245 +#endif
74246 if (!pmd_none(*pmd)) {
74247 pte_t *ptep, pte;
74248
74249 @@ -1290,10 +1330,20 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
74250 unsigned long align, unsigned long flags, unsigned long start,
74251 unsigned long end, int node, gfp_t gfp_mask, void *caller)
74252 {
74253 - static struct vmap_area *va;
74254 + struct vmap_area *va;
74255 struct vm_struct *area;
74256
74257 BUG_ON(in_interrupt());
74258 +
74259 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74260 + if (flags & VM_KERNEXEC) {
74261 + if (start != VMALLOC_START || end != VMALLOC_END)
74262 + return NULL;
74263 + start = (unsigned long)MODULES_EXEC_VADDR;
74264 + end = (unsigned long)MODULES_EXEC_END;
74265 + }
74266 +#endif
74267 +
74268 if (flags & VM_IOREMAP) {
74269 int bit = fls(size);
74270
74271 @@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count,
74272 if (count > totalram_pages)
74273 return NULL;
74274
74275 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74276 + if (!(pgprot_val(prot) & _PAGE_NX))
74277 + flags |= VM_KERNEXEC;
74278 +#endif
74279 +
74280 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
74281 __builtin_return_address(0));
74282 if (!area)
74283 @@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
74284 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
74285 return NULL;
74286
74287 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74288 + if (!(pgprot_val(prot) & _PAGE_NX))
74289 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
74290 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
74291 + else
74292 +#endif
74293 +
74294 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
74295 start, end, node, gfp_mask, caller);
74296
74297 @@ -1674,6 +1736,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
74298 gfp_mask, prot, node, caller);
74299 }
74300
74301 +#undef __vmalloc
74302 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
74303 {
74304 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
74305 @@ -1697,6 +1760,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
74306 * For tight control over page level allocator and protection flags
74307 * use __vmalloc() instead.
74308 */
74309 +#undef vmalloc
74310 void *vmalloc(unsigned long size)
74311 {
74312 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
74313 @@ -1713,6 +1777,7 @@ EXPORT_SYMBOL(vmalloc);
74314 * For tight control over page level allocator and protection flags
74315 * use __vmalloc() instead.
74316 */
74317 +#undef vzalloc
74318 void *vzalloc(unsigned long size)
74319 {
74320 return __vmalloc_node_flags(size, -1,
74321 @@ -1727,6 +1792,7 @@ EXPORT_SYMBOL(vzalloc);
74322 * The resulting memory area is zeroed so it can be mapped to userspace
74323 * without leaking data.
74324 */
74325 +#undef vmalloc_user
74326 void *vmalloc_user(unsigned long size)
74327 {
74328 struct vm_struct *area;
74329 @@ -1754,6 +1820,7 @@ EXPORT_SYMBOL(vmalloc_user);
74330 * For tight control over page level allocator and protection flags
74331 * use __vmalloc() instead.
74332 */
74333 +#undef vmalloc_node
74334 void *vmalloc_node(unsigned long size, int node)
74335 {
74336 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
74337 @@ -1773,6 +1840,7 @@ EXPORT_SYMBOL(vmalloc_node);
74338 * For tight control over page level allocator and protection flags
74339 * use __vmalloc_node() instead.
74340 */
74341 +#undef vzalloc_node
74342 void *vzalloc_node(unsigned long size, int node)
74343 {
74344 return __vmalloc_node_flags(size, node,
74345 @@ -1795,10 +1863,10 @@ EXPORT_SYMBOL(vzalloc_node);
74346 * For tight control over page level allocator and protection flags
74347 * use __vmalloc() instead.
74348 */
74349 -
74350 +#undef vmalloc_exec
74351 void *vmalloc_exec(unsigned long size)
74352 {
74353 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
74354 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
74355 -1, __builtin_return_address(0));
74356 }
74357
74358 @@ -1817,6 +1885,7 @@ void *vmalloc_exec(unsigned long size)
74359 * Allocate enough 32bit PA addressable pages to cover @size from the
74360 * page level allocator and map them into contiguous kernel virtual space.
74361 */
74362 +#undef vmalloc_32
74363 void *vmalloc_32(unsigned long size)
74364 {
74365 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
74366 @@ -1831,6 +1900,7 @@ EXPORT_SYMBOL(vmalloc_32);
74367 * The resulting memory area is 32bit addressable and zeroed so it can be
74368 * mapped to userspace without leaking data.
74369 */
74370 +#undef vmalloc_32_user
74371 void *vmalloc_32_user(unsigned long size)
74372 {
74373 struct vm_struct *area;
74374 @@ -2093,6 +2163,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
74375 unsigned long uaddr = vma->vm_start;
74376 unsigned long usize = vma->vm_end - vma->vm_start;
74377
74378 + BUG_ON(vma->vm_mirror);
74379 +
74380 if ((PAGE_SIZE-1) & (unsigned long)addr)
74381 return -EINVAL;
74382
74383 diff --git a/mm/vmstat.c b/mm/vmstat.c
74384 index d52b13d..381d1ac 100644
74385 --- a/mm/vmstat.c
74386 +++ b/mm/vmstat.c
74387 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
74388 *
74389 * vm_stat contains the global counters
74390 */
74391 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
74392 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
74393 EXPORT_SYMBOL(vm_stat);
74394
74395 #ifdef CONFIG_SMP
74396 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
74397 v = p->vm_stat_diff[i];
74398 p->vm_stat_diff[i] = 0;
74399 local_irq_restore(flags);
74400 - atomic_long_add(v, &zone->vm_stat[i]);
74401 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
74402 global_diff[i] += v;
74403 #ifdef CONFIG_NUMA
74404 /* 3 seconds idle till flush */
74405 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
74406
74407 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
74408 if (global_diff[i])
74409 - atomic_long_add(global_diff[i], &vm_stat[i]);
74410 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
74411 }
74412
74413 #endif
74414 @@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
74415 start_cpu_timer(cpu);
74416 #endif
74417 #ifdef CONFIG_PROC_FS
74418 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
74419 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
74420 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
74421 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
74422 + {
74423 + mode_t gr_mode = S_IRUGO;
74424 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
74425 + gr_mode = S_IRUSR;
74426 +#endif
74427 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
74428 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
74429 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74430 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
74431 +#else
74432 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
74433 +#endif
74434 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
74435 + }
74436 #endif
74437 return 0;
74438 }
74439 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
74440 index 8970ba1..e3361fe 100644
74441 --- a/net/8021q/vlan.c
74442 +++ b/net/8021q/vlan.c
74443 @@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
74444 err = -EPERM;
74445 if (!capable(CAP_NET_ADMIN))
74446 break;
74447 - if ((args.u.name_type >= 0) &&
74448 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
74449 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
74450 struct vlan_net *vn;
74451
74452 vn = net_generic(net, vlan_net_id);
74453 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
74454 index fdfdb57..38d368c 100644
74455 --- a/net/9p/trans_fd.c
74456 +++ b/net/9p/trans_fd.c
74457 @@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
74458 oldfs = get_fs();
74459 set_fs(get_ds());
74460 /* The cast to a user pointer is valid due to the set_fs() */
74461 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
74462 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
74463 set_fs(oldfs);
74464
74465 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
74466 diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
74467 index e317583..3c8aeaf 100644
74468 --- a/net/9p/trans_virtio.c
74469 +++ b/net/9p/trans_virtio.c
74470 @@ -327,7 +327,7 @@ req_retry_pinned:
74471 } else {
74472 char *pbuf;
74473 if (req->tc->pubuf)
74474 - pbuf = (__force char *) req->tc->pubuf;
74475 + pbuf = (char __force_kernel *) req->tc->pubuf;
74476 else
74477 pbuf = req->tc->pkbuf;
74478 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
74479 @@ -357,7 +357,7 @@ req_retry_pinned:
74480 } else {
74481 char *pbuf;
74482 if (req->tc->pubuf)
74483 - pbuf = (__force char *) req->tc->pubuf;
74484 + pbuf = (char __force_kernel *) req->tc->pubuf;
74485 else
74486 pbuf = req->tc->pkbuf;
74487
74488 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
74489 index f41f026..fe76ea8 100644
74490 --- a/net/atm/atm_misc.c
74491 +++ b/net/atm/atm_misc.c
74492 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
74493 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
74494 return 1;
74495 atm_return(vcc, truesize);
74496 - atomic_inc(&vcc->stats->rx_drop);
74497 + atomic_inc_unchecked(&vcc->stats->rx_drop);
74498 return 0;
74499 }
74500 EXPORT_SYMBOL(atm_charge);
74501 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
74502 }
74503 }
74504 atm_return(vcc, guess);
74505 - atomic_inc(&vcc->stats->rx_drop);
74506 + atomic_inc_unchecked(&vcc->stats->rx_drop);
74507 return NULL;
74508 }
74509 EXPORT_SYMBOL(atm_alloc_charge);
74510 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
74511
74512 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
74513 {
74514 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74515 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74516 __SONET_ITEMS
74517 #undef __HANDLE_ITEM
74518 }
74519 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
74520
74521 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
74522 {
74523 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
74524 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
74525 __SONET_ITEMS
74526 #undef __HANDLE_ITEM
74527 }
74528 diff --git a/net/atm/lec.h b/net/atm/lec.h
74529 index dfc0719..47c5322 100644
74530 --- a/net/atm/lec.h
74531 +++ b/net/atm/lec.h
74532 @@ -48,7 +48,7 @@ struct lane2_ops {
74533 const u8 *tlvs, u32 sizeoftlvs);
74534 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
74535 const u8 *tlvs, u32 sizeoftlvs);
74536 -};
74537 +} __no_const;
74538
74539 /*
74540 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
74541 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
74542 index 0919a88..a23d54e 100644
74543 --- a/net/atm/mpc.h
74544 +++ b/net/atm/mpc.h
74545 @@ -33,7 +33,7 @@ struct mpoa_client {
74546 struct mpc_parameters parameters; /* parameters for this client */
74547
74548 const struct net_device_ops *old_ops;
74549 - struct net_device_ops new_ops;
74550 + net_device_ops_no_const new_ops;
74551 };
74552
74553
74554 diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
74555 index d1b2d9a..7cc2219 100644
74556 --- a/net/atm/mpoa_caches.c
74557 +++ b/net/atm/mpoa_caches.c
74558 @@ -255,6 +255,8 @@ static void check_resolving_entries(struct mpoa_client *client)
74559 struct timeval now;
74560 struct k_message msg;
74561
74562 + pax_track_stack();
74563 +
74564 do_gettimeofday(&now);
74565
74566 read_lock_bh(&client->ingress_lock);
74567 diff --git a/net/atm/proc.c b/net/atm/proc.c
74568 index 0d020de..011c7bb 100644
74569 --- a/net/atm/proc.c
74570 +++ b/net/atm/proc.c
74571 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
74572 const struct k_atm_aal_stats *stats)
74573 {
74574 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
74575 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
74576 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
74577 - atomic_read(&stats->rx_drop));
74578 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
74579 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
74580 + atomic_read_unchecked(&stats->rx_drop));
74581 }
74582
74583 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
74584 diff --git a/net/atm/resources.c b/net/atm/resources.c
74585 index 23f45ce..c748f1a 100644
74586 --- a/net/atm/resources.c
74587 +++ b/net/atm/resources.c
74588 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
74589 static void copy_aal_stats(struct k_atm_aal_stats *from,
74590 struct atm_aal_stats *to)
74591 {
74592 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74593 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74594 __AAL_STAT_ITEMS
74595 #undef __HANDLE_ITEM
74596 }
74597 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
74598 static void subtract_aal_stats(struct k_atm_aal_stats *from,
74599 struct atm_aal_stats *to)
74600 {
74601 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
74602 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
74603 __AAL_STAT_ITEMS
74604 #undef __HANDLE_ITEM
74605 }
74606 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
74607 index db7aacf..991e539 100644
74608 --- a/net/batman-adv/hard-interface.c
74609 +++ b/net/batman-adv/hard-interface.c
74610 @@ -347,8 +347,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
74611 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
74612 dev_add_pack(&hard_iface->batman_adv_ptype);
74613
74614 - atomic_set(&hard_iface->seqno, 1);
74615 - atomic_set(&hard_iface->frag_seqno, 1);
74616 + atomic_set_unchecked(&hard_iface->seqno, 1);
74617 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
74618 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
74619 hard_iface->net_dev->name);
74620
74621 diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
74622 index 0f32c81..82d1895 100644
74623 --- a/net/batman-adv/routing.c
74624 +++ b/net/batman-adv/routing.c
74625 @@ -656,7 +656,7 @@ void receive_bat_packet(const struct ethhdr *ethhdr,
74626 return;
74627
74628 /* could be changed by schedule_own_packet() */
74629 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
74630 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
74631
74632 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
74633
74634 diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
74635 index 58d1447..2a66c8c 100644
74636 --- a/net/batman-adv/send.c
74637 +++ b/net/batman-adv/send.c
74638 @@ -326,7 +326,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
74639
74640 /* change sequence number to network order */
74641 batman_packet->seqno =
74642 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
74643 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
74644
74645 batman_packet->ttvn = atomic_read(&bat_priv->ttvn);
74646 batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc));
74647 @@ -343,7 +343,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
74648 else
74649 batman_packet->gw_flags = NO_FLAGS;
74650
74651 - atomic_inc(&hard_iface->seqno);
74652 + atomic_inc_unchecked(&hard_iface->seqno);
74653
74654 slide_own_bcast_window(hard_iface);
74655 send_time = own_send_time(bat_priv);
74656 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
74657 index 05dd351..2ecd19b 100644
74658 --- a/net/batman-adv/soft-interface.c
74659 +++ b/net/batman-adv/soft-interface.c
74660 @@ -632,7 +632,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
74661
74662 /* set broadcast sequence number */
74663 bcast_packet->seqno =
74664 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
74665 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
74666
74667 add_bcast_packet_to_list(bat_priv, skb, 1);
74668
74669 @@ -824,7 +824,7 @@ struct net_device *softif_create(const char *name)
74670 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
74671
74672 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
74673 - atomic_set(&bat_priv->bcast_seqno, 1);
74674 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
74675 atomic_set(&bat_priv->ttvn, 0);
74676 atomic_set(&bat_priv->tt_local_changes, 0);
74677 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
74678 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
74679 index 51a0db7..b8a62be 100644
74680 --- a/net/batman-adv/types.h
74681 +++ b/net/batman-adv/types.h
74682 @@ -38,8 +38,8 @@ struct hard_iface {
74683 int16_t if_num;
74684 char if_status;
74685 struct net_device *net_dev;
74686 - atomic_t seqno;
74687 - atomic_t frag_seqno;
74688 + atomic_unchecked_t seqno;
74689 + atomic_unchecked_t frag_seqno;
74690 unsigned char *packet_buff;
74691 int packet_len;
74692 struct kobject *hardif_obj;
74693 @@ -153,7 +153,7 @@ struct bat_priv {
74694 atomic_t orig_interval; /* uint */
74695 atomic_t hop_penalty; /* uint */
74696 atomic_t log_level; /* uint */
74697 - atomic_t bcast_seqno;
74698 + atomic_unchecked_t bcast_seqno;
74699 atomic_t bcast_queue_left;
74700 atomic_t batman_queue_left;
74701 atomic_t ttvn; /* tranlation table version number */
74702 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
74703 index 32b125f..f1447e0 100644
74704 --- a/net/batman-adv/unicast.c
74705 +++ b/net/batman-adv/unicast.c
74706 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
74707 frag1->flags = UNI_FRAG_HEAD | large_tail;
74708 frag2->flags = large_tail;
74709
74710 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
74711 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
74712 frag1->seqno = htons(seqno - 1);
74713 frag2->seqno = htons(seqno);
74714
74715 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
74716 index ea7f031..0615edc 100644
74717 --- a/net/bluetooth/hci_conn.c
74718 +++ b/net/bluetooth/hci_conn.c
74719 @@ -218,7 +218,7 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
74720 cp.handle = cpu_to_le16(conn->handle);
74721 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
74722 cp.ediv = ediv;
74723 - memcpy(cp.rand, rand, sizeof(rand));
74724 + memcpy(cp.rand, rand, sizeof(cp.rand));
74725
74726 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
74727 }
74728 @@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
74729 memset(&cp, 0, sizeof(cp));
74730
74731 cp.handle = cpu_to_le16(conn->handle);
74732 - memcpy(cp.ltk, ltk, sizeof(ltk));
74733 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
74734
74735 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
74736 }
74737 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
74738 index b3bdb48..7ad90ac 100644
74739 --- a/net/bluetooth/l2cap_core.c
74740 +++ b/net/bluetooth/l2cap_core.c
74741 @@ -2145,7 +2145,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
74742 void *ptr = req->data;
74743 int type, olen;
74744 unsigned long val;
74745 - struct l2cap_conf_rfc rfc;
74746 + struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
74747
74748 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
74749
74750 @@ -2169,8 +2169,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
74751 break;
74752
74753 case L2CAP_CONF_RFC:
74754 - if (olen == sizeof(rfc))
74755 - memcpy(&rfc, (void *)val, olen);
74756 + if (olen != sizeof(rfc))
74757 + break;
74758 +
74759 + memcpy(&rfc, (void *)val, olen);
74760
74761 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
74762 rfc.mode != chan->mode)
74763 @@ -2258,12 +2260,24 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
74764
74765 switch (type) {
74766 case L2CAP_CONF_RFC:
74767 - if (olen == sizeof(rfc))
74768 - memcpy(&rfc, (void *)val, olen);
74769 + if (olen != sizeof(rfc))
74770 + break;
74771 +
74772 + memcpy(&rfc, (void *)val, olen);
74773 goto done;
74774 }
74775 }
74776
74777 + /* Use sane default values in case a misbehaving remote device
74778 + * did not send an RFC option.
74779 + */
74780 + rfc.mode = chan->mode;
74781 + rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
74782 + rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
74783 + rfc.max_pdu_size = cpu_to_le16(chan->imtu);
74784 +
74785 + BT_ERR("Expected RFC option was not found, using defaults");
74786 +
74787 done:
74788 switch (rfc.mode) {
74789 case L2CAP_MODE_ERTM:
74790 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
74791 index e79ff75..215b57d 100644
74792 --- a/net/bridge/br_multicast.c
74793 +++ b/net/bridge/br_multicast.c
74794 @@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
74795 nexthdr = ip6h->nexthdr;
74796 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
74797
74798 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
74799 + if (nexthdr != IPPROTO_ICMPV6)
74800 return 0;
74801
74802 /* Okay, we found ICMPv6 header */
74803 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
74804 index 5864cc4..94cab18 100644
74805 --- a/net/bridge/netfilter/ebtables.c
74806 +++ b/net/bridge/netfilter/ebtables.c
74807 @@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
74808 tmp.valid_hooks = t->table->valid_hooks;
74809 }
74810 mutex_unlock(&ebt_mutex);
74811 - if (copy_to_user(user, &tmp, *len) != 0){
74812 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
74813 BUGPRINT("c2u Didn't work\n");
74814 ret = -EFAULT;
74815 break;
74816 @@ -1781,6 +1781,8 @@ static int compat_copy_everything_to_user(struct ebt_table *t,
74817 int ret;
74818 void __user *pos;
74819
74820 + pax_track_stack();
74821 +
74822 memset(&tinfo, 0, sizeof(tinfo));
74823
74824 if (cmd == EBT_SO_GET_ENTRIES) {
74825 diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
74826 index a986280..13444a1 100644
74827 --- a/net/caif/caif_socket.c
74828 +++ b/net/caif/caif_socket.c
74829 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
74830 #ifdef CONFIG_DEBUG_FS
74831 struct debug_fs_counter {
74832 atomic_t caif_nr_socks;
74833 - atomic_t caif_sock_create;
74834 - atomic_t num_connect_req;
74835 - atomic_t num_connect_resp;
74836 - atomic_t num_connect_fail_resp;
74837 - atomic_t num_disconnect;
74838 - atomic_t num_remote_shutdown_ind;
74839 - atomic_t num_tx_flow_off_ind;
74840 - atomic_t num_tx_flow_on_ind;
74841 - atomic_t num_rx_flow_off;
74842 - atomic_t num_rx_flow_on;
74843 + atomic_unchecked_t caif_sock_create;
74844 + atomic_unchecked_t num_connect_req;
74845 + atomic_unchecked_t num_connect_resp;
74846 + atomic_unchecked_t num_connect_fail_resp;
74847 + atomic_unchecked_t num_disconnect;
74848 + atomic_unchecked_t num_remote_shutdown_ind;
74849 + atomic_unchecked_t num_tx_flow_off_ind;
74850 + atomic_unchecked_t num_tx_flow_on_ind;
74851 + atomic_unchecked_t num_rx_flow_off;
74852 + atomic_unchecked_t num_rx_flow_on;
74853 };
74854 static struct debug_fs_counter cnt;
74855 #define dbfs_atomic_inc(v) atomic_inc_return(v)
74856 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
74857 #define dbfs_atomic_dec(v) atomic_dec_return(v)
74858 #else
74859 #define dbfs_atomic_inc(v) 0
74860 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74861 atomic_read(&cf_sk->sk.sk_rmem_alloc),
74862 sk_rcvbuf_lowwater(cf_sk));
74863 set_rx_flow_off(cf_sk);
74864 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
74865 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
74866 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
74867 }
74868
74869 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74870 set_rx_flow_off(cf_sk);
74871 if (net_ratelimit())
74872 pr_debug("sending flow OFF due to rmem_schedule\n");
74873 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
74874 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
74875 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
74876 }
74877 skb->dev = NULL;
74878 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
74879 switch (flow) {
74880 case CAIF_CTRLCMD_FLOW_ON_IND:
74881 /* OK from modem to start sending again */
74882 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
74883 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
74884 set_tx_flow_on(cf_sk);
74885 cf_sk->sk.sk_state_change(&cf_sk->sk);
74886 break;
74887
74888 case CAIF_CTRLCMD_FLOW_OFF_IND:
74889 /* Modem asks us to shut up */
74890 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
74891 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
74892 set_tx_flow_off(cf_sk);
74893 cf_sk->sk.sk_state_change(&cf_sk->sk);
74894 break;
74895 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
74896 /* We're now connected */
74897 caif_client_register_refcnt(&cf_sk->layer,
74898 cfsk_hold, cfsk_put);
74899 - dbfs_atomic_inc(&cnt.num_connect_resp);
74900 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
74901 cf_sk->sk.sk_state = CAIF_CONNECTED;
74902 set_tx_flow_on(cf_sk);
74903 cf_sk->sk.sk_state_change(&cf_sk->sk);
74904 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
74905
74906 case CAIF_CTRLCMD_INIT_FAIL_RSP:
74907 /* Connect request failed */
74908 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
74909 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
74910 cf_sk->sk.sk_err = ECONNREFUSED;
74911 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
74912 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
74913 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
74914
74915 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
74916 /* Modem has closed this connection, or device is down. */
74917 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
74918 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
74919 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
74920 cf_sk->sk.sk_err = ECONNRESET;
74921 set_rx_flow_on(cf_sk);
74922 @@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
74923 return;
74924
74925 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
74926 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
74927 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
74928 set_rx_flow_on(cf_sk);
74929 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
74930 }
74931 @@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
74932 /*ifindex = id of the interface.*/
74933 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
74934
74935 - dbfs_atomic_inc(&cnt.num_connect_req);
74936 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
74937 cf_sk->layer.receive = caif_sktrecv_cb;
74938
74939 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
74940 @@ -943,7 +944,7 @@ static int caif_release(struct socket *sock)
74941 spin_unlock_bh(&sk->sk_receive_queue.lock);
74942 sock->sk = NULL;
74943
74944 - dbfs_atomic_inc(&cnt.num_disconnect);
74945 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
74946
74947 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
74948 if (cf_sk->debugfs_socket_dir != NULL)
74949 @@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
74950 cf_sk->conn_req.protocol = protocol;
74951 /* Increase the number of sockets created. */
74952 dbfs_atomic_inc(&cnt.caif_nr_socks);
74953 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
74954 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
74955 #ifdef CONFIG_DEBUG_FS
74956 if (!IS_ERR(debugfsdir)) {
74957
74958 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
74959 index e22671b..6598ea0 100644
74960 --- a/net/caif/cfctrl.c
74961 +++ b/net/caif/cfctrl.c
74962 @@ -9,6 +9,7 @@
74963 #include <linux/stddef.h>
74964 #include <linux/spinlock.h>
74965 #include <linux/slab.h>
74966 +#include <linux/sched.h>
74967 #include <net/caif/caif_layer.h>
74968 #include <net/caif/cfpkt.h>
74969 #include <net/caif/cfctrl.h>
74970 @@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
74971 dev_info.id = 0xff;
74972 memset(this, 0, sizeof(*this));
74973 cfsrvl_init(&this->serv, 0, &dev_info, false);
74974 - atomic_set(&this->req_seq_no, 1);
74975 - atomic_set(&this->rsp_seq_no, 1);
74976 + atomic_set_unchecked(&this->req_seq_no, 1);
74977 + atomic_set_unchecked(&this->rsp_seq_no, 1);
74978 this->serv.layer.receive = cfctrl_recv;
74979 sprintf(this->serv.layer.name, "ctrl");
74980 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
74981 @@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
74982 struct cfctrl_request_info *req)
74983 {
74984 spin_lock_bh(&ctrl->info_list_lock);
74985 - atomic_inc(&ctrl->req_seq_no);
74986 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
74987 + atomic_inc_unchecked(&ctrl->req_seq_no);
74988 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
74989 list_add_tail(&req->list, &ctrl->list);
74990 spin_unlock_bh(&ctrl->info_list_lock);
74991 }
74992 @@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
74993 if (p != first)
74994 pr_warn("Requests are not received in order\n");
74995
74996 - atomic_set(&ctrl->rsp_seq_no,
74997 + atomic_set_unchecked(&ctrl->rsp_seq_no,
74998 p->sequence_no);
74999 list_del(&p->list);
75000 goto out;
75001 @@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
75002 struct cfctrl *cfctrl = container_obj(layer);
75003 struct cfctrl_request_info rsp, *req;
75004
75005 + pax_track_stack();
75006
75007 cfpkt_extr_head(pkt, &cmdrsp, 1);
75008 cmd = cmdrsp & CFCTRL_CMD_MASK;
75009 diff --git a/net/compat.c b/net/compat.c
75010 index c578d93..257fab7 100644
75011 --- a/net/compat.c
75012 +++ b/net/compat.c
75013 @@ -70,9 +70,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
75014 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
75015 __get_user(kmsg->msg_flags, &umsg->msg_flags))
75016 return -EFAULT;
75017 - kmsg->msg_name = compat_ptr(tmp1);
75018 - kmsg->msg_iov = compat_ptr(tmp2);
75019 - kmsg->msg_control = compat_ptr(tmp3);
75020 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
75021 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
75022 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
75023 return 0;
75024 }
75025
75026 @@ -84,7 +84,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
75027
75028 if (kern_msg->msg_namelen) {
75029 if (mode == VERIFY_READ) {
75030 - int err = move_addr_to_kernel(kern_msg->msg_name,
75031 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
75032 kern_msg->msg_namelen,
75033 kern_address);
75034 if (err < 0)
75035 @@ -95,7 +95,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
75036 kern_msg->msg_name = NULL;
75037
75038 tot_len = iov_from_user_compat_to_kern(kern_iov,
75039 - (struct compat_iovec __user *)kern_msg->msg_iov,
75040 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
75041 kern_msg->msg_iovlen);
75042 if (tot_len >= 0)
75043 kern_msg->msg_iov = kern_iov;
75044 @@ -115,20 +115,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
75045
75046 #define CMSG_COMPAT_FIRSTHDR(msg) \
75047 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
75048 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
75049 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
75050 (struct compat_cmsghdr __user *)NULL)
75051
75052 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
75053 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
75054 (ucmlen) <= (unsigned long) \
75055 ((mhdr)->msg_controllen - \
75056 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
75057 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
75058
75059 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
75060 struct compat_cmsghdr __user *cmsg, int cmsg_len)
75061 {
75062 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
75063 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
75064 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
75065 msg->msg_controllen)
75066 return NULL;
75067 return (struct compat_cmsghdr __user *)ptr;
75068 @@ -220,7 +220,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
75069 {
75070 struct compat_timeval ctv;
75071 struct compat_timespec cts[3];
75072 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
75073 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
75074 struct compat_cmsghdr cmhdr;
75075 int cmlen;
75076
75077 @@ -272,7 +272,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
75078
75079 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
75080 {
75081 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
75082 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
75083 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
75084 int fdnum = scm->fp->count;
75085 struct file **fp = scm->fp->fp;
75086 @@ -369,7 +369,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
75087 return -EFAULT;
75088 old_fs = get_fs();
75089 set_fs(KERNEL_DS);
75090 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
75091 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
75092 set_fs(old_fs);
75093
75094 return err;
75095 @@ -430,7 +430,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
75096 len = sizeof(ktime);
75097 old_fs = get_fs();
75098 set_fs(KERNEL_DS);
75099 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
75100 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
75101 set_fs(old_fs);
75102
75103 if (!err) {
75104 @@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
75105 case MCAST_JOIN_GROUP:
75106 case MCAST_LEAVE_GROUP:
75107 {
75108 - struct compat_group_req __user *gr32 = (void *)optval;
75109 + struct compat_group_req __user *gr32 = (void __user *)optval;
75110 struct group_req __user *kgr =
75111 compat_alloc_user_space(sizeof(struct group_req));
75112 u32 interface;
75113 @@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
75114 case MCAST_BLOCK_SOURCE:
75115 case MCAST_UNBLOCK_SOURCE:
75116 {
75117 - struct compat_group_source_req __user *gsr32 = (void *)optval;
75118 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
75119 struct group_source_req __user *kgsr = compat_alloc_user_space(
75120 sizeof(struct group_source_req));
75121 u32 interface;
75122 @@ -607,7 +607,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
75123 }
75124 case MCAST_MSFILTER:
75125 {
75126 - struct compat_group_filter __user *gf32 = (void *)optval;
75127 + struct compat_group_filter __user *gf32 = (void __user *)optval;
75128 struct group_filter __user *kgf;
75129 u32 interface, fmode, numsrc;
75130
75131 @@ -645,7 +645,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
75132 char __user *optval, int __user *optlen,
75133 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
75134 {
75135 - struct compat_group_filter __user *gf32 = (void *)optval;
75136 + struct compat_group_filter __user *gf32 = (void __user *)optval;
75137 struct group_filter __user *kgf;
75138 int __user *koptlen;
75139 u32 interface, fmode, numsrc;
75140 diff --git a/net/core/datagram.c b/net/core/datagram.c
75141 index 18ac112..fe95ed9 100644
75142 --- a/net/core/datagram.c
75143 +++ b/net/core/datagram.c
75144 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
75145 }
75146
75147 kfree_skb(skb);
75148 - atomic_inc(&sk->sk_drops);
75149 + atomic_inc_unchecked(&sk->sk_drops);
75150 sk_mem_reclaim_partial(sk);
75151
75152 return err;
75153 diff --git a/net/core/dev.c b/net/core/dev.c
75154 index ae5cf2d..2c950a1 100644
75155 --- a/net/core/dev.c
75156 +++ b/net/core/dev.c
75157 @@ -1135,10 +1135,14 @@ void dev_load(struct net *net, const char *name)
75158 if (no_module && capable(CAP_NET_ADMIN))
75159 no_module = request_module("netdev-%s", name);
75160 if (no_module && capable(CAP_SYS_MODULE)) {
75161 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
75162 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
75163 +#else
75164 if (!request_module("%s", name))
75165 pr_err("Loading kernel module for a network device "
75166 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
75167 "instead\n", name);
75168 +#endif
75169 }
75170 }
75171 EXPORT_SYMBOL(dev_load);
75172 @@ -1977,7 +1981,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
75173
75174 struct dev_gso_cb {
75175 void (*destructor)(struct sk_buff *skb);
75176 -};
75177 +} __no_const;
75178
75179 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
75180
75181 @@ -2930,7 +2934,7 @@ int netif_rx_ni(struct sk_buff *skb)
75182 }
75183 EXPORT_SYMBOL(netif_rx_ni);
75184
75185 -static void net_tx_action(struct softirq_action *h)
75186 +static void net_tx_action(void)
75187 {
75188 struct softnet_data *sd = &__get_cpu_var(softnet_data);
75189
75190 @@ -3779,7 +3783,7 @@ void netif_napi_del(struct napi_struct *napi)
75191 }
75192 EXPORT_SYMBOL(netif_napi_del);
75193
75194 -static void net_rx_action(struct softirq_action *h)
75195 +static void net_rx_action(void)
75196 {
75197 struct softnet_data *sd = &__get_cpu_var(softnet_data);
75198 unsigned long time_limit = jiffies + 2;
75199 diff --git a/net/core/flow.c b/net/core/flow.c
75200 index d6968e5..1690d9d 100644
75201 --- a/net/core/flow.c
75202 +++ b/net/core/flow.c
75203 @@ -61,7 +61,7 @@ struct flow_cache {
75204 struct timer_list rnd_timer;
75205 };
75206
75207 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
75208 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
75209 EXPORT_SYMBOL(flow_cache_genid);
75210 static struct flow_cache flow_cache_global;
75211 static struct kmem_cache *flow_cachep __read_mostly;
75212 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
75213
75214 static int flow_entry_valid(struct flow_cache_entry *fle)
75215 {
75216 - if (atomic_read(&flow_cache_genid) != fle->genid)
75217 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
75218 return 0;
75219 if (fle->object && !fle->object->ops->check(fle->object))
75220 return 0;
75221 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
75222 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
75223 fcp->hash_count++;
75224 }
75225 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
75226 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
75227 flo = fle->object;
75228 if (!flo)
75229 goto ret_object;
75230 @@ -280,7 +280,7 @@ nocache:
75231 }
75232 flo = resolver(net, key, family, dir, flo, ctx);
75233 if (fle) {
75234 - fle->genid = atomic_read(&flow_cache_genid);
75235 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
75236 if (!IS_ERR(flo))
75237 fle->object = flo;
75238 else
75239 diff --git a/net/core/iovec.c b/net/core/iovec.c
75240 index c40f27e..7f49254 100644
75241 --- a/net/core/iovec.c
75242 +++ b/net/core/iovec.c
75243 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
75244 if (m->msg_namelen) {
75245 if (mode == VERIFY_READ) {
75246 void __user *namep;
75247 - namep = (void __user __force *) m->msg_name;
75248 + namep = (void __force_user *) m->msg_name;
75249 err = move_addr_to_kernel(namep, m->msg_namelen,
75250 address);
75251 if (err < 0)
75252 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
75253 }
75254
75255 size = m->msg_iovlen * sizeof(struct iovec);
75256 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
75257 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
75258 return -EFAULT;
75259
75260 m->msg_iov = iov;
75261 diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
75262 index 1683e5d..f3621f6 100644
75263 --- a/net/core/net-sysfs.c
75264 +++ b/net/core/net-sysfs.c
75265 @@ -664,11 +664,14 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
75266 if (count) {
75267 int i;
75268
75269 - if (count > 1<<30) {
75270 - /* Enforce a limit to prevent overflow */
75271 + if (count > INT_MAX)
75272 return -EINVAL;
75273 - }
75274 count = roundup_pow_of_two(count);
75275 + if (count > (ULONG_MAX - sizeof(struct rps_dev_flow_table))
75276 + / sizeof(struct rps_dev_flow)) {
75277 + /* Enforce a limit to prevent overflow */
75278 + return -EINVAL;
75279 + }
75280 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
75281 if (!table)
75282 return -ENOMEM;
75283 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
75284 index 99d9e95..209bae2 100644
75285 --- a/net/core/rtnetlink.c
75286 +++ b/net/core/rtnetlink.c
75287 @@ -57,7 +57,7 @@ struct rtnl_link {
75288 rtnl_doit_func doit;
75289 rtnl_dumpit_func dumpit;
75290 rtnl_calcit_func calcit;
75291 -};
75292 +} __no_const;
75293
75294 static DEFINE_MUTEX(rtnl_mutex);
75295 static u16 min_ifinfo_dump_size;
75296 diff --git a/net/core/scm.c b/net/core/scm.c
75297 index 811b53f..5d6c343 100644
75298 --- a/net/core/scm.c
75299 +++ b/net/core/scm.c
75300 @@ -218,7 +218,7 @@ EXPORT_SYMBOL(__scm_send);
75301 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
75302 {
75303 struct cmsghdr __user *cm
75304 - = (__force struct cmsghdr __user *)msg->msg_control;
75305 + = (struct cmsghdr __force_user *)msg->msg_control;
75306 struct cmsghdr cmhdr;
75307 int cmlen = CMSG_LEN(len);
75308 int err;
75309 @@ -241,7 +241,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
75310 err = -EFAULT;
75311 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
75312 goto out;
75313 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
75314 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
75315 goto out;
75316 cmlen = CMSG_SPACE(len);
75317 if (msg->msg_controllen < cmlen)
75318 @@ -257,7 +257,7 @@ EXPORT_SYMBOL(put_cmsg);
75319 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
75320 {
75321 struct cmsghdr __user *cm
75322 - = (__force struct cmsghdr __user*)msg->msg_control;
75323 + = (struct cmsghdr __force_user *)msg->msg_control;
75324
75325 int fdmax = 0;
75326 int fdnum = scm->fp->count;
75327 @@ -277,7 +277,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
75328 if (fdnum < fdmax)
75329 fdmax = fdnum;
75330
75331 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
75332 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
75333 i++, cmfptr++)
75334 {
75335 int new_fd;
75336 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
75337 index 387703f..035abcf 100644
75338 --- a/net/core/skbuff.c
75339 +++ b/net/core/skbuff.c
75340 @@ -1650,6 +1650,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
75341 struct sock *sk = skb->sk;
75342 int ret = 0;
75343
75344 + pax_track_stack();
75345 +
75346 if (splice_grow_spd(pipe, &spd))
75347 return -ENOMEM;
75348
75349 diff --git a/net/core/sock.c b/net/core/sock.c
75350 index 11d67b3..df26d4b 100644
75351 --- a/net/core/sock.c
75352 +++ b/net/core/sock.c
75353 @@ -293,7 +293,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75354 */
75355 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
75356 (unsigned)sk->sk_rcvbuf) {
75357 - atomic_inc(&sk->sk_drops);
75358 + atomic_inc_unchecked(&sk->sk_drops);
75359 trace_sock_rcvqueue_full(sk, skb);
75360 return -ENOMEM;
75361 }
75362 @@ -303,7 +303,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75363 return err;
75364
75365 if (!sk_rmem_schedule(sk, skb->truesize)) {
75366 - atomic_inc(&sk->sk_drops);
75367 + atomic_inc_unchecked(&sk->sk_drops);
75368 return -ENOBUFS;
75369 }
75370
75371 @@ -323,7 +323,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75372 skb_dst_force(skb);
75373
75374 spin_lock_irqsave(&list->lock, flags);
75375 - skb->dropcount = atomic_read(&sk->sk_drops);
75376 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
75377 __skb_queue_tail(list, skb);
75378 spin_unlock_irqrestore(&list->lock, flags);
75379
75380 @@ -343,7 +343,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
75381 skb->dev = NULL;
75382
75383 if (sk_rcvqueues_full(sk, skb)) {
75384 - atomic_inc(&sk->sk_drops);
75385 + atomic_inc_unchecked(&sk->sk_drops);
75386 goto discard_and_relse;
75387 }
75388 if (nested)
75389 @@ -361,7 +361,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
75390 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
75391 } else if (sk_add_backlog(sk, skb)) {
75392 bh_unlock_sock(sk);
75393 - atomic_inc(&sk->sk_drops);
75394 + atomic_inc_unchecked(&sk->sk_drops);
75395 goto discard_and_relse;
75396 }
75397
75398 @@ -924,7 +924,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75399 if (len > sizeof(peercred))
75400 len = sizeof(peercred);
75401 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
75402 - if (copy_to_user(optval, &peercred, len))
75403 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
75404 return -EFAULT;
75405 goto lenout;
75406 }
75407 @@ -937,7 +937,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75408 return -ENOTCONN;
75409 if (lv < len)
75410 return -EINVAL;
75411 - if (copy_to_user(optval, address, len))
75412 + if (len > sizeof(address) || copy_to_user(optval, address, len))
75413 return -EFAULT;
75414 goto lenout;
75415 }
75416 @@ -970,7 +970,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75417
75418 if (len > lv)
75419 len = lv;
75420 - if (copy_to_user(optval, &v, len))
75421 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
75422 return -EFAULT;
75423 lenout:
75424 if (put_user(len, optlen))
75425 @@ -2029,7 +2029,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
75426 */
75427 smp_wmb();
75428 atomic_set(&sk->sk_refcnt, 1);
75429 - atomic_set(&sk->sk_drops, 0);
75430 + atomic_set_unchecked(&sk->sk_drops, 0);
75431 }
75432 EXPORT_SYMBOL(sock_init_data);
75433
75434 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
75435 index 02e75d1..9a57a7c 100644
75436 --- a/net/decnet/sysctl_net_decnet.c
75437 +++ b/net/decnet/sysctl_net_decnet.c
75438 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
75439
75440 if (len > *lenp) len = *lenp;
75441
75442 - if (copy_to_user(buffer, addr, len))
75443 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
75444 return -EFAULT;
75445
75446 *lenp = len;
75447 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
75448
75449 if (len > *lenp) len = *lenp;
75450
75451 - if (copy_to_user(buffer, devname, len))
75452 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
75453 return -EFAULT;
75454
75455 *lenp = len;
75456 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
75457 index 39a2d29..f39c0fe 100644
75458 --- a/net/econet/Kconfig
75459 +++ b/net/econet/Kconfig
75460 @@ -4,7 +4,7 @@
75461
75462 config ECONET
75463 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
75464 - depends on EXPERIMENTAL && INET
75465 + depends on EXPERIMENTAL && INET && BROKEN
75466 ---help---
75467 Econet is a fairly old and slow networking protocol mainly used by
75468 Acorn computers to access file and print servers. It uses native
75469 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
75470 index 92fc5f6..b790d91 100644
75471 --- a/net/ipv4/fib_frontend.c
75472 +++ b/net/ipv4/fib_frontend.c
75473 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
75474 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75475 fib_sync_up(dev);
75476 #endif
75477 - atomic_inc(&net->ipv4.dev_addr_genid);
75478 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75479 rt_cache_flush(dev_net(dev), -1);
75480 break;
75481 case NETDEV_DOWN:
75482 fib_del_ifaddr(ifa, NULL);
75483 - atomic_inc(&net->ipv4.dev_addr_genid);
75484 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75485 if (ifa->ifa_dev->ifa_list == NULL) {
75486 /* Last address was deleted from this interface.
75487 * Disable IP.
75488 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
75489 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75490 fib_sync_up(dev);
75491 #endif
75492 - atomic_inc(&net->ipv4.dev_addr_genid);
75493 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75494 rt_cache_flush(dev_net(dev), -1);
75495 break;
75496 case NETDEV_DOWN:
75497 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
75498 index 80106d8..232e898 100644
75499 --- a/net/ipv4/fib_semantics.c
75500 +++ b/net/ipv4/fib_semantics.c
75501 @@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
75502 nh->nh_saddr = inet_select_addr(nh->nh_dev,
75503 nh->nh_gw,
75504 nh->nh_parent->fib_scope);
75505 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
75506 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
75507
75508 return nh->nh_saddr;
75509 }
75510 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
75511 index 389a2e6..ac1c1de 100644
75512 --- a/net/ipv4/inet_diag.c
75513 +++ b/net/ipv4/inet_diag.c
75514 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk,
75515 r->idiag_retrans = 0;
75516
75517 r->id.idiag_if = sk->sk_bound_dev_if;
75518 +
75519 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75520 + r->id.idiag_cookie[0] = 0;
75521 + r->id.idiag_cookie[1] = 0;
75522 +#else
75523 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
75524 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
75525 +#endif
75526
75527 r->id.idiag_sport = inet->inet_sport;
75528 r->id.idiag_dport = inet->inet_dport;
75529 @@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
75530 r->idiag_family = tw->tw_family;
75531 r->idiag_retrans = 0;
75532 r->id.idiag_if = tw->tw_bound_dev_if;
75533 +
75534 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75535 + r->id.idiag_cookie[0] = 0;
75536 + r->id.idiag_cookie[1] = 0;
75537 +#else
75538 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
75539 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
75540 +#endif
75541 +
75542 r->id.idiag_sport = tw->tw_sport;
75543 r->id.idiag_dport = tw->tw_dport;
75544 r->id.idiag_src[0] = tw->tw_rcv_saddr;
75545 @@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
75546 if (sk == NULL)
75547 goto unlock;
75548
75549 +#ifndef CONFIG_GRKERNSEC_HIDESYM
75550 err = -ESTALE;
75551 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
75552 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
75553 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
75554 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
75555 goto out;
75556 +#endif
75557
75558 err = -ENOMEM;
75559 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
75560 @@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
75561 r->idiag_retrans = req->retrans;
75562
75563 r->id.idiag_if = sk->sk_bound_dev_if;
75564 +
75565 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75566 + r->id.idiag_cookie[0] = 0;
75567 + r->id.idiag_cookie[1] = 0;
75568 +#else
75569 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
75570 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
75571 +#endif
75572
75573 tmo = req->expires - jiffies;
75574 if (tmo < 0)
75575 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
75576 index 984ec65..97ac518 100644
75577 --- a/net/ipv4/inet_hashtables.c
75578 +++ b/net/ipv4/inet_hashtables.c
75579 @@ -18,12 +18,15 @@
75580 #include <linux/sched.h>
75581 #include <linux/slab.h>
75582 #include <linux/wait.h>
75583 +#include <linux/security.h>
75584
75585 #include <net/inet_connection_sock.h>
75586 #include <net/inet_hashtables.h>
75587 #include <net/secure_seq.h>
75588 #include <net/ip.h>
75589
75590 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
75591 +
75592 /*
75593 * Allocate and initialize a new local port bind bucket.
75594 * The bindhash mutex for snum's hash chain must be held here.
75595 @@ -530,6 +533,8 @@ ok:
75596 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
75597 spin_unlock(&head->lock);
75598
75599 + gr_update_task_in_ip_table(current, inet_sk(sk));
75600 +
75601 if (tw) {
75602 inet_twsk_deschedule(tw, death_row);
75603 while (twrefcnt) {
75604 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
75605 index 86f13c67..0bce60f 100644
75606 --- a/net/ipv4/inetpeer.c
75607 +++ b/net/ipv4/inetpeer.c
75608 @@ -400,6 +400,8 @@ struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create)
75609 unsigned int sequence;
75610 int invalidated, gccnt = 0;
75611
75612 + pax_track_stack();
75613 +
75614 /* Attempt a lockless lookup first.
75615 * Because of a concurrent writer, we might not find an existing entry.
75616 */
75617 @@ -436,8 +438,8 @@ relookup:
75618 if (p) {
75619 p->daddr = *daddr;
75620 atomic_set(&p->refcnt, 1);
75621 - atomic_set(&p->rid, 0);
75622 - atomic_set(&p->ip_id_count,
75623 + atomic_set_unchecked(&p->rid, 0);
75624 + atomic_set_unchecked(&p->ip_id_count,
75625 (daddr->family == AF_INET) ?
75626 secure_ip_id(daddr->addr.a4) :
75627 secure_ipv6_id(daddr->addr.a6));
75628 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
75629 index 0e0ab98..2ed7dd5 100644
75630 --- a/net/ipv4/ip_fragment.c
75631 +++ b/net/ipv4/ip_fragment.c
75632 @@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
75633 return 0;
75634
75635 start = qp->rid;
75636 - end = atomic_inc_return(&peer->rid);
75637 + end = atomic_inc_return_unchecked(&peer->rid);
75638 qp->rid = end;
75639
75640 rc = qp->q.fragments && (end - start) > max;
75641 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
75642 index 8905e92..0b179fb 100644
75643 --- a/net/ipv4/ip_sockglue.c
75644 +++ b/net/ipv4/ip_sockglue.c
75645 @@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
75646 int val;
75647 int len;
75648
75649 + pax_track_stack();
75650 +
75651 if (level != SOL_IP)
75652 return -EOPNOTSUPP;
75653
75654 @@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
75655 len = min_t(unsigned int, len, opt->optlen);
75656 if (put_user(len, optlen))
75657 return -EFAULT;
75658 - if (copy_to_user(optval, opt->__data, len))
75659 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
75660 + copy_to_user(optval, opt->__data, len))
75661 return -EFAULT;
75662 return 0;
75663 }
75664 @@ -1238,7 +1241,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
75665 if (sk->sk_type != SOCK_STREAM)
75666 return -ENOPROTOOPT;
75667
75668 - msg.msg_control = optval;
75669 + msg.msg_control = (void __force_kernel *)optval;
75670 msg.msg_controllen = len;
75671 msg.msg_flags = flags;
75672
75673 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
75674 index 004bb74..8d4a58c 100644
75675 --- a/net/ipv4/ipconfig.c
75676 +++ b/net/ipv4/ipconfig.c
75677 @@ -317,7 +317,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
75678
75679 mm_segment_t oldfs = get_fs();
75680 set_fs(get_ds());
75681 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75682 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75683 set_fs(oldfs);
75684 return res;
75685 }
75686 @@ -328,7 +328,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
75687
75688 mm_segment_t oldfs = get_fs();
75689 set_fs(get_ds());
75690 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75691 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75692 set_fs(oldfs);
75693 return res;
75694 }
75695 @@ -339,7 +339,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
75696
75697 mm_segment_t oldfs = get_fs();
75698 set_fs(get_ds());
75699 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
75700 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
75701 set_fs(oldfs);
75702 return res;
75703 }
75704 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
75705 index 076b7c8..9c8d038 100644
75706 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
75707 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
75708 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
75709
75710 *len = 0;
75711
75712 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
75713 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
75714 if (*octets == NULL) {
75715 if (net_ratelimit())
75716 pr_notice("OOM in bsalg (%d)\n", __LINE__);
75717 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
75718 index 39b403f..8e6a0a8 100644
75719 --- a/net/ipv4/ping.c
75720 +++ b/net/ipv4/ping.c
75721 @@ -837,7 +837,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
75722 sk_rmem_alloc_get(sp),
75723 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75724 atomic_read(&sp->sk_refcnt), sp,
75725 - atomic_read(&sp->sk_drops), len);
75726 + atomic_read_unchecked(&sp->sk_drops), len);
75727 }
75728
75729 static int ping_seq_show(struct seq_file *seq, void *v)
75730 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
75731 index 61714bd..c9cee6d 100644
75732 --- a/net/ipv4/raw.c
75733 +++ b/net/ipv4/raw.c
75734 @@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
75735 int raw_rcv(struct sock *sk, struct sk_buff *skb)
75736 {
75737 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
75738 - atomic_inc(&sk->sk_drops);
75739 + atomic_inc_unchecked(&sk->sk_drops);
75740 kfree_skb(skb);
75741 return NET_RX_DROP;
75742 }
75743 @@ -737,16 +737,20 @@ static int raw_init(struct sock *sk)
75744
75745 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
75746 {
75747 + struct icmp_filter filter;
75748 +
75749 if (optlen > sizeof(struct icmp_filter))
75750 optlen = sizeof(struct icmp_filter);
75751 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
75752 + if (copy_from_user(&filter, optval, optlen))
75753 return -EFAULT;
75754 + raw_sk(sk)->filter = filter;
75755 return 0;
75756 }
75757
75758 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
75759 {
75760 int len, ret = -EFAULT;
75761 + struct icmp_filter filter;
75762
75763 if (get_user(len, optlen))
75764 goto out;
75765 @@ -756,8 +760,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
75766 if (len > sizeof(struct icmp_filter))
75767 len = sizeof(struct icmp_filter);
75768 ret = -EFAULT;
75769 - if (put_user(len, optlen) ||
75770 - copy_to_user(optval, &raw_sk(sk)->filter, len))
75771 + filter = raw_sk(sk)->filter;
75772 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
75773 goto out;
75774 ret = 0;
75775 out: return ret;
75776 @@ -985,7 +989,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
75777 sk_wmem_alloc_get(sp),
75778 sk_rmem_alloc_get(sp),
75779 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75780 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
75781 + atomic_read(&sp->sk_refcnt),
75782 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75783 + NULL,
75784 +#else
75785 + sp,
75786 +#endif
75787 + atomic_read_unchecked(&sp->sk_drops));
75788 }
75789
75790 static int raw_seq_show(struct seq_file *seq, void *v)
75791 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
75792 index b563854..e03f8a6 100644
75793 --- a/net/ipv4/route.c
75794 +++ b/net/ipv4/route.c
75795 @@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
75796
75797 static inline int rt_genid(struct net *net)
75798 {
75799 - return atomic_read(&net->ipv4.rt_genid);
75800 + return atomic_read_unchecked(&net->ipv4.rt_genid);
75801 }
75802
75803 #ifdef CONFIG_PROC_FS
75804 @@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
75805 unsigned char shuffle;
75806
75807 get_random_bytes(&shuffle, sizeof(shuffle));
75808 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
75809 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
75810 redirect_genid++;
75811 }
75812
75813 @@ -3015,7 +3015,7 @@ static int rt_fill_info(struct net *net,
75814 error = rt->dst.error;
75815 if (peer) {
75816 inet_peer_refcheck(rt->peer);
75817 - id = atomic_read(&peer->ip_id_count) & 0xffff;
75818 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
75819 if (peer->tcp_ts_stamp) {
75820 ts = peer->tcp_ts;
75821 tsage = get_seconds() - peer->tcp_ts_stamp;
75822 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
75823 index 46febca..98b73a4 100644
75824 --- a/net/ipv4/tcp.c
75825 +++ b/net/ipv4/tcp.c
75826 @@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
75827 int val;
75828 int err = 0;
75829
75830 + pax_track_stack();
75831 +
75832 /* These are data/string values, all the others are ints */
75833 switch (optname) {
75834 case TCP_CONGESTION: {
75835 @@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
75836 struct tcp_sock *tp = tcp_sk(sk);
75837 int val, len;
75838
75839 + pax_track_stack();
75840 +
75841 if (get_user(len, optlen))
75842 return -EFAULT;
75843
75844 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
75845 index 7963e03..c44f5d0 100644
75846 --- a/net/ipv4/tcp_ipv4.c
75847 +++ b/net/ipv4/tcp_ipv4.c
75848 @@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
75849 int sysctl_tcp_low_latency __read_mostly;
75850 EXPORT_SYMBOL(sysctl_tcp_low_latency);
75851
75852 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75853 +extern int grsec_enable_blackhole;
75854 +#endif
75855
75856 #ifdef CONFIG_TCP_MD5SIG
75857 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
75858 @@ -1622,6 +1625,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
75859 return 0;
75860
75861 reset:
75862 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75863 + if (!grsec_enable_blackhole)
75864 +#endif
75865 tcp_v4_send_reset(rsk, skb);
75866 discard:
75867 kfree_skb(skb);
75868 @@ -1684,12 +1690,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
75869 TCP_SKB_CB(skb)->sacked = 0;
75870
75871 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75872 - if (!sk)
75873 + if (!sk) {
75874 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75875 + ret = 1;
75876 +#endif
75877 goto no_tcp_socket;
75878 -
75879 + }
75880 process:
75881 - if (sk->sk_state == TCP_TIME_WAIT)
75882 + if (sk->sk_state == TCP_TIME_WAIT) {
75883 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75884 + ret = 2;
75885 +#endif
75886 goto do_time_wait;
75887 + }
75888
75889 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
75890 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
75891 @@ -1739,6 +1752,10 @@ no_tcp_socket:
75892 bad_packet:
75893 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75894 } else {
75895 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75896 + if (!grsec_enable_blackhole || (ret == 1 &&
75897 + (skb->dev->flags & IFF_LOOPBACK)))
75898 +#endif
75899 tcp_v4_send_reset(NULL, skb);
75900 }
75901
75902 @@ -2403,7 +2420,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
75903 0, /* non standard timer */
75904 0, /* open_requests have no inode */
75905 atomic_read(&sk->sk_refcnt),
75906 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75907 + NULL,
75908 +#else
75909 req,
75910 +#endif
75911 len);
75912 }
75913
75914 @@ -2453,7 +2474,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
75915 sock_i_uid(sk),
75916 icsk->icsk_probes_out,
75917 sock_i_ino(sk),
75918 - atomic_read(&sk->sk_refcnt), sk,
75919 + atomic_read(&sk->sk_refcnt),
75920 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75921 + NULL,
75922 +#else
75923 + sk,
75924 +#endif
75925 jiffies_to_clock_t(icsk->icsk_rto),
75926 jiffies_to_clock_t(icsk->icsk_ack.ato),
75927 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
75928 @@ -2481,7 +2507,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
75929 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
75930 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
75931 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75932 - atomic_read(&tw->tw_refcnt), tw, len);
75933 + atomic_read(&tw->tw_refcnt),
75934 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75935 + NULL,
75936 +#else
75937 + tw,
75938 +#endif
75939 + len);
75940 }
75941
75942 #define TMPSZ 150
75943 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
75944 index 0ce3d06..e182e59 100644
75945 --- a/net/ipv4/tcp_minisocks.c
75946 +++ b/net/ipv4/tcp_minisocks.c
75947 @@ -27,6 +27,10 @@
75948 #include <net/inet_common.h>
75949 #include <net/xfrm.h>
75950
75951 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75952 +extern int grsec_enable_blackhole;
75953 +#endif
75954 +
75955 int sysctl_tcp_syncookies __read_mostly = 1;
75956 EXPORT_SYMBOL(sysctl_tcp_syncookies);
75957
75958 @@ -750,6 +754,10 @@ listen_overflow:
75959
75960 embryonic_reset:
75961 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
75962 +
75963 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75964 + if (!grsec_enable_blackhole)
75965 +#endif
75966 if (!(flg & TCP_FLAG_RST))
75967 req->rsk_ops->send_reset(sk, skb);
75968
75969 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
75970 index 882e0b0..2eba47f 100644
75971 --- a/net/ipv4/tcp_output.c
75972 +++ b/net/ipv4/tcp_output.c
75973 @@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
75974 int mss;
75975 int s_data_desired = 0;
75976
75977 + pax_track_stack();
75978 +
75979 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
75980 s_data_desired = cvp->s_data_desired;
75981 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
75982 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
75983 index 85ee7eb..53277ab 100644
75984 --- a/net/ipv4/tcp_probe.c
75985 +++ b/net/ipv4/tcp_probe.c
75986 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
75987 if (cnt + width >= len)
75988 break;
75989
75990 - if (copy_to_user(buf + cnt, tbuf, width))
75991 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
75992 return -EFAULT;
75993 cnt += width;
75994 }
75995 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
75996 index ecd44b0..b32fba6 100644
75997 --- a/net/ipv4/tcp_timer.c
75998 +++ b/net/ipv4/tcp_timer.c
75999 @@ -22,6 +22,10 @@
76000 #include <linux/gfp.h>
76001 #include <net/tcp.h>
76002
76003 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76004 +extern int grsec_lastack_retries;
76005 +#endif
76006 +
76007 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
76008 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
76009 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
76010 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
76011 }
76012 }
76013
76014 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76015 + if ((sk->sk_state == TCP_LAST_ACK) &&
76016 + (grsec_lastack_retries > 0) &&
76017 + (grsec_lastack_retries < retry_until))
76018 + retry_until = grsec_lastack_retries;
76019 +#endif
76020 +
76021 if (retransmits_timed_out(sk, retry_until,
76022 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
76023 /* Has it gone just too far? */
76024 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
76025 index 1b5a193..bd354b0 100644
76026 --- a/net/ipv4/udp.c
76027 +++ b/net/ipv4/udp.c
76028 @@ -86,6 +86,7 @@
76029 #include <linux/types.h>
76030 #include <linux/fcntl.h>
76031 #include <linux/module.h>
76032 +#include <linux/security.h>
76033 #include <linux/socket.h>
76034 #include <linux/sockios.h>
76035 #include <linux/igmp.h>
76036 @@ -108,6 +109,10 @@
76037 #include <trace/events/udp.h>
76038 #include "udp_impl.h"
76039
76040 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76041 +extern int grsec_enable_blackhole;
76042 +#endif
76043 +
76044 struct udp_table udp_table __read_mostly;
76045 EXPORT_SYMBOL(udp_table);
76046
76047 @@ -565,6 +570,9 @@ found:
76048 return s;
76049 }
76050
76051 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
76052 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
76053 +
76054 /*
76055 * This routine is called by the ICMP module when it gets some
76056 * sort of error condition. If err < 0 then the socket should
76057 @@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
76058 dport = usin->sin_port;
76059 if (dport == 0)
76060 return -EINVAL;
76061 +
76062 + err = gr_search_udp_sendmsg(sk, usin);
76063 + if (err)
76064 + return err;
76065 } else {
76066 if (sk->sk_state != TCP_ESTABLISHED)
76067 return -EDESTADDRREQ;
76068 +
76069 + err = gr_search_udp_sendmsg(sk, NULL);
76070 + if (err)
76071 + return err;
76072 +
76073 daddr = inet->inet_daddr;
76074 dport = inet->inet_dport;
76075 /* Open fast path for connected socket.
76076 @@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk)
76077 udp_lib_checksum_complete(skb)) {
76078 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
76079 IS_UDPLITE(sk));
76080 - atomic_inc(&sk->sk_drops);
76081 + atomic_inc_unchecked(&sk->sk_drops);
76082 __skb_unlink(skb, rcvq);
76083 __skb_queue_tail(&list_kill, skb);
76084 }
76085 @@ -1185,6 +1202,10 @@ try_again:
76086 if (!skb)
76087 goto out;
76088
76089 + err = gr_search_udp_recvmsg(sk, skb);
76090 + if (err)
76091 + goto out_free;
76092 +
76093 ulen = skb->len - sizeof(struct udphdr);
76094 if (len > ulen)
76095 len = ulen;
76096 @@ -1485,7 +1506,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
76097
76098 drop:
76099 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
76100 - atomic_inc(&sk->sk_drops);
76101 + atomic_inc_unchecked(&sk->sk_drops);
76102 kfree_skb(skb);
76103 return -1;
76104 }
76105 @@ -1504,7 +1525,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
76106 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
76107
76108 if (!skb1) {
76109 - atomic_inc(&sk->sk_drops);
76110 + atomic_inc_unchecked(&sk->sk_drops);
76111 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
76112 IS_UDPLITE(sk));
76113 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
76114 @@ -1673,6 +1694,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76115 goto csum_error;
76116
76117 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
76118 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76119 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
76120 +#endif
76121 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
76122
76123 /*
76124 @@ -2100,8 +2124,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
76125 sk_wmem_alloc_get(sp),
76126 sk_rmem_alloc_get(sp),
76127 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
76128 - atomic_read(&sp->sk_refcnt), sp,
76129 - atomic_read(&sp->sk_drops), len);
76130 + atomic_read(&sp->sk_refcnt),
76131 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76132 + NULL,
76133 +#else
76134 + sp,
76135 +#endif
76136 + atomic_read_unchecked(&sp->sk_drops), len);
76137 }
76138
76139 int udp4_seq_show(struct seq_file *seq, void *v)
76140 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
76141 index 12368c5..fbf899f 100644
76142 --- a/net/ipv6/addrconf.c
76143 +++ b/net/ipv6/addrconf.c
76144 @@ -2083,7 +2083,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
76145 p.iph.ihl = 5;
76146 p.iph.protocol = IPPROTO_IPV6;
76147 p.iph.ttl = 64;
76148 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
76149 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
76150
76151 if (ops->ndo_do_ioctl) {
76152 mm_segment_t oldfs = get_fs();
76153 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
76154 index 8a58e8c..8b5e631 100644
76155 --- a/net/ipv6/inet6_connection_sock.c
76156 +++ b/net/ipv6/inet6_connection_sock.c
76157 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
76158 #ifdef CONFIG_XFRM
76159 {
76160 struct rt6_info *rt = (struct rt6_info *)dst;
76161 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
76162 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
76163 }
76164 #endif
76165 }
76166 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
76167 #ifdef CONFIG_XFRM
76168 if (dst) {
76169 struct rt6_info *rt = (struct rt6_info *)dst;
76170 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
76171 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
76172 __sk_dst_reset(sk);
76173 dst = NULL;
76174 }
76175 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
76176 index 2fbda5f..26ed683 100644
76177 --- a/net/ipv6/ipv6_sockglue.c
76178 +++ b/net/ipv6/ipv6_sockglue.c
76179 @@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
76180 int val, valbool;
76181 int retv = -ENOPROTOOPT;
76182
76183 + pax_track_stack();
76184 +
76185 if (optval == NULL)
76186 val=0;
76187 else {
76188 @@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
76189 int len;
76190 int val;
76191
76192 + pax_track_stack();
76193 +
76194 if (ip6_mroute_opt(optname))
76195 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
76196
76197 @@ -960,7 +964,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
76198 if (sk->sk_type != SOCK_STREAM)
76199 return -ENOPROTOOPT;
76200
76201 - msg.msg_control = optval;
76202 + msg.msg_control = (void __force_kernel *)optval;
76203 msg.msg_controllen = len;
76204 msg.msg_flags = flags;
76205
76206 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
76207 index 343852e..c92bd15 100644
76208 --- a/net/ipv6/raw.c
76209 +++ b/net/ipv6/raw.c
76210 @@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
76211 {
76212 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
76213 skb_checksum_complete(skb)) {
76214 - atomic_inc(&sk->sk_drops);
76215 + atomic_inc_unchecked(&sk->sk_drops);
76216 kfree_skb(skb);
76217 return NET_RX_DROP;
76218 }
76219 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
76220 struct raw6_sock *rp = raw6_sk(sk);
76221
76222 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
76223 - atomic_inc(&sk->sk_drops);
76224 + atomic_inc_unchecked(&sk->sk_drops);
76225 kfree_skb(skb);
76226 return NET_RX_DROP;
76227 }
76228 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
76229
76230 if (inet->hdrincl) {
76231 if (skb_checksum_complete(skb)) {
76232 - atomic_inc(&sk->sk_drops);
76233 + atomic_inc_unchecked(&sk->sk_drops);
76234 kfree_skb(skb);
76235 return NET_RX_DROP;
76236 }
76237 @@ -601,7 +601,7 @@ out:
76238 return err;
76239 }
76240
76241 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
76242 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
76243 struct flowi6 *fl6, struct dst_entry **dstp,
76244 unsigned int flags)
76245 {
76246 @@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
76247 u16 proto;
76248 int err;
76249
76250 + pax_track_stack();
76251 +
76252 /* Rough check on arithmetic overflow,
76253 better check is made in ip6_append_data().
76254 */
76255 @@ -909,12 +911,15 @@ do_confirm:
76256 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
76257 char __user *optval, int optlen)
76258 {
76259 + struct icmp6_filter filter;
76260 +
76261 switch (optname) {
76262 case ICMPV6_FILTER:
76263 if (optlen > sizeof(struct icmp6_filter))
76264 optlen = sizeof(struct icmp6_filter);
76265 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
76266 + if (copy_from_user(&filter, optval, optlen))
76267 return -EFAULT;
76268 + raw6_sk(sk)->filter = filter;
76269 return 0;
76270 default:
76271 return -ENOPROTOOPT;
76272 @@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
76273 char __user *optval, int __user *optlen)
76274 {
76275 int len;
76276 + struct icmp6_filter filter;
76277
76278 switch (optname) {
76279 case ICMPV6_FILTER:
76280 @@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
76281 len = sizeof(struct icmp6_filter);
76282 if (put_user(len, optlen))
76283 return -EFAULT;
76284 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
76285 + filter = raw6_sk(sk)->filter;
76286 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
76287 return -EFAULT;
76288 return 0;
76289 default:
76290 @@ -1245,7 +1252,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
76291 0, 0L, 0,
76292 sock_i_uid(sp), 0,
76293 sock_i_ino(sp),
76294 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
76295 + atomic_read(&sp->sk_refcnt),
76296 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76297 + NULL,
76298 +#else
76299 + sp,
76300 +#endif
76301 + atomic_read_unchecked(&sp->sk_drops));
76302 }
76303
76304 static int raw6_seq_show(struct seq_file *seq, void *v)
76305 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
76306 index 7b8fc57..c6185da 100644
76307 --- a/net/ipv6/tcp_ipv6.c
76308 +++ b/net/ipv6/tcp_ipv6.c
76309 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
76310 }
76311 #endif
76312
76313 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76314 +extern int grsec_enable_blackhole;
76315 +#endif
76316 +
76317 static void tcp_v6_hash(struct sock *sk)
76318 {
76319 if (sk->sk_state != TCP_CLOSE) {
76320 @@ -1647,6 +1651,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
76321 return 0;
76322
76323 reset:
76324 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76325 + if (!grsec_enable_blackhole)
76326 +#endif
76327 tcp_v6_send_reset(sk, skb);
76328 discard:
76329 if (opt_skb)
76330 @@ -1726,12 +1733,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
76331 TCP_SKB_CB(skb)->sacked = 0;
76332
76333 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
76334 - if (!sk)
76335 + if (!sk) {
76336 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76337 + ret = 1;
76338 +#endif
76339 goto no_tcp_socket;
76340 + }
76341
76342 process:
76343 - if (sk->sk_state == TCP_TIME_WAIT)
76344 + if (sk->sk_state == TCP_TIME_WAIT) {
76345 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76346 + ret = 2;
76347 +#endif
76348 goto do_time_wait;
76349 + }
76350
76351 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
76352 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
76353 @@ -1779,6 +1794,10 @@ no_tcp_socket:
76354 bad_packet:
76355 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
76356 } else {
76357 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76358 + if (!grsec_enable_blackhole || (ret == 1 &&
76359 + (skb->dev->flags & IFF_LOOPBACK)))
76360 +#endif
76361 tcp_v6_send_reset(NULL, skb);
76362 }
76363
76364 @@ -2039,7 +2058,13 @@ static void get_openreq6(struct seq_file *seq,
76365 uid,
76366 0, /* non standard timer */
76367 0, /* open_requests have no inode */
76368 - 0, req);
76369 + 0,
76370 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76371 + NULL
76372 +#else
76373 + req
76374 +#endif
76375 + );
76376 }
76377
76378 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
76379 @@ -2089,7 +2114,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
76380 sock_i_uid(sp),
76381 icsk->icsk_probes_out,
76382 sock_i_ino(sp),
76383 - atomic_read(&sp->sk_refcnt), sp,
76384 + atomic_read(&sp->sk_refcnt),
76385 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76386 + NULL,
76387 +#else
76388 + sp,
76389 +#endif
76390 jiffies_to_clock_t(icsk->icsk_rto),
76391 jiffies_to_clock_t(icsk->icsk_ack.ato),
76392 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
76393 @@ -2124,7 +2154,13 @@ static void get_timewait6_sock(struct seq_file *seq,
76394 dest->s6_addr32[2], dest->s6_addr32[3], destp,
76395 tw->tw_substate, 0, 0,
76396 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
76397 - atomic_read(&tw->tw_refcnt), tw);
76398 + atomic_read(&tw->tw_refcnt),
76399 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76400 + NULL
76401 +#else
76402 + tw
76403 +#endif
76404 + );
76405 }
76406
76407 static int tcp6_seq_show(struct seq_file *seq, void *v)
76408 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
76409 index bb95e8e..ae0ee80 100644
76410 --- a/net/ipv6/udp.c
76411 +++ b/net/ipv6/udp.c
76412 @@ -50,6 +50,10 @@
76413 #include <linux/seq_file.h>
76414 #include "udp_impl.h"
76415
76416 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76417 +extern int grsec_enable_blackhole;
76418 +#endif
76419 +
76420 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
76421 {
76422 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
76423 @@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
76424
76425 return 0;
76426 drop:
76427 - atomic_inc(&sk->sk_drops);
76428 + atomic_inc_unchecked(&sk->sk_drops);
76429 drop_no_sk_drops_inc:
76430 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
76431 kfree_skb(skb);
76432 @@ -624,7 +628,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
76433 continue;
76434 }
76435 drop:
76436 - atomic_inc(&sk->sk_drops);
76437 + atomic_inc_unchecked(&sk->sk_drops);
76438 UDP6_INC_STATS_BH(sock_net(sk),
76439 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
76440 UDP6_INC_STATS_BH(sock_net(sk),
76441 @@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76442 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
76443 proto == IPPROTO_UDPLITE);
76444
76445 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76446 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
76447 +#endif
76448 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
76449
76450 kfree_skb(skb);
76451 @@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76452 if (!sock_owned_by_user(sk))
76453 udpv6_queue_rcv_skb(sk, skb);
76454 else if (sk_add_backlog(sk, skb)) {
76455 - atomic_inc(&sk->sk_drops);
76456 + atomic_inc_unchecked(&sk->sk_drops);
76457 bh_unlock_sock(sk);
76458 sock_put(sk);
76459 goto discard;
76460 @@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
76461 0, 0L, 0,
76462 sock_i_uid(sp), 0,
76463 sock_i_ino(sp),
76464 - atomic_read(&sp->sk_refcnt), sp,
76465 - atomic_read(&sp->sk_drops));
76466 + atomic_read(&sp->sk_refcnt),
76467 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76468 + NULL,
76469 +#else
76470 + sp,
76471 +#endif
76472 + atomic_read_unchecked(&sp->sk_drops));
76473 }
76474
76475 int udp6_seq_show(struct seq_file *seq, void *v)
76476 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
76477 index b3cc8b3..baa02d0 100644
76478 --- a/net/irda/ircomm/ircomm_tty.c
76479 +++ b/net/irda/ircomm/ircomm_tty.c
76480 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76481 add_wait_queue(&self->open_wait, &wait);
76482
76483 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
76484 - __FILE__,__LINE__, tty->driver->name, self->open_count );
76485 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
76486
76487 /* As far as I can see, we protect open_count - Jean II */
76488 spin_lock_irqsave(&self->spinlock, flags);
76489 if (!tty_hung_up_p(filp)) {
76490 extra_count = 1;
76491 - self->open_count--;
76492 + local_dec(&self->open_count);
76493 }
76494 spin_unlock_irqrestore(&self->spinlock, flags);
76495 - self->blocked_open++;
76496 + local_inc(&self->blocked_open);
76497
76498 while (1) {
76499 if (tty->termios->c_cflag & CBAUD) {
76500 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76501 }
76502
76503 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
76504 - __FILE__,__LINE__, tty->driver->name, self->open_count );
76505 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
76506
76507 schedule();
76508 }
76509 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76510 if (extra_count) {
76511 /* ++ is not atomic, so this should be protected - Jean II */
76512 spin_lock_irqsave(&self->spinlock, flags);
76513 - self->open_count++;
76514 + local_inc(&self->open_count);
76515 spin_unlock_irqrestore(&self->spinlock, flags);
76516 }
76517 - self->blocked_open--;
76518 + local_dec(&self->blocked_open);
76519
76520 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
76521 - __FILE__,__LINE__, tty->driver->name, self->open_count);
76522 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
76523
76524 if (!retval)
76525 self->flags |= ASYNC_NORMAL_ACTIVE;
76526 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
76527 }
76528 /* ++ is not atomic, so this should be protected - Jean II */
76529 spin_lock_irqsave(&self->spinlock, flags);
76530 - self->open_count++;
76531 + local_inc(&self->open_count);
76532
76533 tty->driver_data = self;
76534 self->tty = tty;
76535 spin_unlock_irqrestore(&self->spinlock, flags);
76536
76537 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
76538 - self->line, self->open_count);
76539 + self->line, local_read(&self->open_count));
76540
76541 /* Not really used by us, but lets do it anyway */
76542 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
76543 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76544 return;
76545 }
76546
76547 - if ((tty->count == 1) && (self->open_count != 1)) {
76548 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
76549 /*
76550 * Uh, oh. tty->count is 1, which means that the tty
76551 * structure will be freed. state->count should always
76552 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76553 */
76554 IRDA_DEBUG(0, "%s(), bad serial port count; "
76555 "tty->count is 1, state->count is %d\n", __func__ ,
76556 - self->open_count);
76557 - self->open_count = 1;
76558 + local_read(&self->open_count));
76559 + local_set(&self->open_count, 1);
76560 }
76561
76562 - if (--self->open_count < 0) {
76563 + if (local_dec_return(&self->open_count) < 0) {
76564 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
76565 - __func__, self->line, self->open_count);
76566 - self->open_count = 0;
76567 + __func__, self->line, local_read(&self->open_count));
76568 + local_set(&self->open_count, 0);
76569 }
76570 - if (self->open_count) {
76571 + if (local_read(&self->open_count)) {
76572 spin_unlock_irqrestore(&self->spinlock, flags);
76573
76574 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
76575 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76576 tty->closing = 0;
76577 self->tty = NULL;
76578
76579 - if (self->blocked_open) {
76580 + if (local_read(&self->blocked_open)) {
76581 if (self->close_delay)
76582 schedule_timeout_interruptible(self->close_delay);
76583 wake_up_interruptible(&self->open_wait);
76584 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
76585 spin_lock_irqsave(&self->spinlock, flags);
76586 self->flags &= ~ASYNC_NORMAL_ACTIVE;
76587 self->tty = NULL;
76588 - self->open_count = 0;
76589 + local_set(&self->open_count, 0);
76590 spin_unlock_irqrestore(&self->spinlock, flags);
76591
76592 wake_up_interruptible(&self->open_wait);
76593 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
76594 seq_putc(m, '\n');
76595
76596 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
76597 - seq_printf(m, "Open count: %d\n", self->open_count);
76598 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
76599 seq_printf(m, "Max data size: %d\n", self->max_data_size);
76600 seq_printf(m, "Max header size: %d\n", self->max_header_size);
76601
76602 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
76603 index e2013e4..edfc1e3 100644
76604 --- a/net/iucv/af_iucv.c
76605 +++ b/net/iucv/af_iucv.c
76606 @@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct sock *sk)
76607
76608 write_lock_bh(&iucv_sk_list.lock);
76609
76610 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
76611 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76612 while (__iucv_get_sock_by_name(name)) {
76613 sprintf(name, "%08x",
76614 - atomic_inc_return(&iucv_sk_list.autobind_name));
76615 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76616 }
76617
76618 write_unlock_bh(&iucv_sk_list.lock);
76619 diff --git a/net/key/af_key.c b/net/key/af_key.c
76620 index 1e733e9..c84de2f 100644
76621 --- a/net/key/af_key.c
76622 +++ b/net/key/af_key.c
76623 @@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
76624 struct xfrm_migrate m[XFRM_MAX_DEPTH];
76625 struct xfrm_kmaddress k;
76626
76627 + pax_track_stack();
76628 +
76629 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
76630 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
76631 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
76632 @@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
76633 static u32 get_acqseq(void)
76634 {
76635 u32 res;
76636 - static atomic_t acqseq;
76637 + static atomic_unchecked_t acqseq;
76638
76639 do {
76640 - res = atomic_inc_return(&acqseq);
76641 + res = atomic_inc_return_unchecked(&acqseq);
76642 } while (!res);
76643 return res;
76644 }
76645 diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
76646 index 956b7e4..f01d328 100644
76647 --- a/net/lapb/lapb_iface.c
76648 +++ b/net/lapb/lapb_iface.c
76649 @@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
76650 goto out;
76651
76652 lapb->dev = dev;
76653 - lapb->callbacks = *callbacks;
76654 + lapb->callbacks = callbacks;
76655
76656 __lapb_insert_cb(lapb);
76657
76658 @@ -380,32 +380,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
76659
76660 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
76661 {
76662 - if (lapb->callbacks.connect_confirmation)
76663 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
76664 + if (lapb->callbacks->connect_confirmation)
76665 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
76666 }
76667
76668 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
76669 {
76670 - if (lapb->callbacks.connect_indication)
76671 - lapb->callbacks.connect_indication(lapb->dev, reason);
76672 + if (lapb->callbacks->connect_indication)
76673 + lapb->callbacks->connect_indication(lapb->dev, reason);
76674 }
76675
76676 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
76677 {
76678 - if (lapb->callbacks.disconnect_confirmation)
76679 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
76680 + if (lapb->callbacks->disconnect_confirmation)
76681 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
76682 }
76683
76684 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
76685 {
76686 - if (lapb->callbacks.disconnect_indication)
76687 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
76688 + if (lapb->callbacks->disconnect_indication)
76689 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
76690 }
76691
76692 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
76693 {
76694 - if (lapb->callbacks.data_indication)
76695 - return lapb->callbacks.data_indication(lapb->dev, skb);
76696 + if (lapb->callbacks->data_indication)
76697 + return lapb->callbacks->data_indication(lapb->dev, skb);
76698
76699 kfree_skb(skb);
76700 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
76701 @@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
76702 {
76703 int used = 0;
76704
76705 - if (lapb->callbacks.data_transmit) {
76706 - lapb->callbacks.data_transmit(lapb->dev, skb);
76707 + if (lapb->callbacks->data_transmit) {
76708 + lapb->callbacks->data_transmit(lapb->dev, skb);
76709 used = 1;
76710 }
76711
76712 diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
76713 index a01d213..6a1f1ab 100644
76714 --- a/net/mac80211/debugfs_sta.c
76715 +++ b/net/mac80211/debugfs_sta.c
76716 @@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
76717 struct tid_ampdu_rx *tid_rx;
76718 struct tid_ampdu_tx *tid_tx;
76719
76720 + pax_track_stack();
76721 +
76722 rcu_read_lock();
76723
76724 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
76725 @@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
76726 struct sta_info *sta = file->private_data;
76727 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
76728
76729 + pax_track_stack();
76730 +
76731 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
76732 htc->ht_supported ? "" : "not ");
76733 if (htc->ht_supported) {
76734 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
76735 index 9fab144..7f0fc14 100644
76736 --- a/net/mac80211/ieee80211_i.h
76737 +++ b/net/mac80211/ieee80211_i.h
76738 @@ -27,6 +27,7 @@
76739 #include <net/ieee80211_radiotap.h>
76740 #include <net/cfg80211.h>
76741 #include <net/mac80211.h>
76742 +#include <asm/local.h>
76743 #include "key.h"
76744 #include "sta_info.h"
76745
76746 @@ -754,7 +755,7 @@ struct ieee80211_local {
76747 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
76748 spinlock_t queue_stop_reason_lock;
76749
76750 - int open_count;
76751 + local_t open_count;
76752 int monitors, cooked_mntrs;
76753 /* number of interfaces with corresponding FIF_ flags */
76754 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
76755 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
76756 index 556e7e6..120dcaf 100644
76757 --- a/net/mac80211/iface.c
76758 +++ b/net/mac80211/iface.c
76759 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76760 break;
76761 }
76762
76763 - if (local->open_count == 0) {
76764 + if (local_read(&local->open_count) == 0) {
76765 res = drv_start(local);
76766 if (res)
76767 goto err_del_bss;
76768 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76769 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
76770
76771 if (!is_valid_ether_addr(dev->dev_addr)) {
76772 - if (!local->open_count)
76773 + if (!local_read(&local->open_count))
76774 drv_stop(local);
76775 return -EADDRNOTAVAIL;
76776 }
76777 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76778 mutex_unlock(&local->mtx);
76779
76780 if (coming_up)
76781 - local->open_count++;
76782 + local_inc(&local->open_count);
76783
76784 if (hw_reconf_flags) {
76785 ieee80211_hw_config(local, hw_reconf_flags);
76786 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76787 err_del_interface:
76788 drv_remove_interface(local, &sdata->vif);
76789 err_stop:
76790 - if (!local->open_count)
76791 + if (!local_read(&local->open_count))
76792 drv_stop(local);
76793 err_del_bss:
76794 sdata->bss = NULL;
76795 @@ -474,7 +474,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
76796 }
76797
76798 if (going_down)
76799 - local->open_count--;
76800 + local_dec(&local->open_count);
76801
76802 switch (sdata->vif.type) {
76803 case NL80211_IFTYPE_AP_VLAN:
76804 @@ -533,7 +533,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
76805
76806 ieee80211_recalc_ps(local, -1);
76807
76808 - if (local->open_count == 0) {
76809 + if (local_read(&local->open_count) == 0) {
76810 if (local->ops->napi_poll)
76811 napi_disable(&local->napi);
76812 ieee80211_clear_tx_pending(local);
76813 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
76814 index 3d90dad..36884d5 100644
76815 --- a/net/mac80211/main.c
76816 +++ b/net/mac80211/main.c
76817 @@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
76818 local->hw.conf.power_level = power;
76819 }
76820
76821 - if (changed && local->open_count) {
76822 + if (changed && local_read(&local->open_count)) {
76823 ret = drv_config(local, changed);
76824 /*
76825 * Goal:
76826 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
76827 index 0f48368..d48e688 100644
76828 --- a/net/mac80211/mlme.c
76829 +++ b/net/mac80211/mlme.c
76830 @@ -1464,6 +1464,8 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
76831 bool have_higher_than_11mbit = false;
76832 u16 ap_ht_cap_flags;
76833
76834 + pax_track_stack();
76835 +
76836 /* AssocResp and ReassocResp have identical structure */
76837
76838 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
76839 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
76840 index 6326d34..7225f61 100644
76841 --- a/net/mac80211/pm.c
76842 +++ b/net/mac80211/pm.c
76843 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76844 struct ieee80211_sub_if_data *sdata;
76845 struct sta_info *sta;
76846
76847 - if (!local->open_count)
76848 + if (!local_read(&local->open_count))
76849 goto suspend;
76850
76851 ieee80211_scan_cancel(local);
76852 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76853 cancel_work_sync(&local->dynamic_ps_enable_work);
76854 del_timer_sync(&local->dynamic_ps_timer);
76855
76856 - local->wowlan = wowlan && local->open_count;
76857 + local->wowlan = wowlan && local_read(&local->open_count);
76858 if (local->wowlan) {
76859 int err = drv_suspend(local, wowlan);
76860 if (err < 0) {
76861 @@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76862 }
76863
76864 /* stop hardware - this must stop RX */
76865 - if (local->open_count)
76866 + if (local_read(&local->open_count))
76867 ieee80211_stop_device(local);
76868
76869 suspend:
76870 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
76871 index 3d5a2cb..b17ad48 100644
76872 --- a/net/mac80211/rate.c
76873 +++ b/net/mac80211/rate.c
76874 @@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
76875
76876 ASSERT_RTNL();
76877
76878 - if (local->open_count)
76879 + if (local_read(&local->open_count))
76880 return -EBUSY;
76881
76882 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
76883 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
76884 index 4851e9e..d860e05 100644
76885 --- a/net/mac80211/rc80211_pid_debugfs.c
76886 +++ b/net/mac80211/rc80211_pid_debugfs.c
76887 @@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
76888
76889 spin_unlock_irqrestore(&events->lock, status);
76890
76891 - if (copy_to_user(buf, pb, p))
76892 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
76893 return -EFAULT;
76894
76895 return p;
76896 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
76897 index fd031e8..84fbfcf 100644
76898 --- a/net/mac80211/util.c
76899 +++ b/net/mac80211/util.c
76900 @@ -1170,7 +1170,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
76901 drv_set_coverage_class(local, hw->wiphy->coverage_class);
76902
76903 /* everything else happens only if HW was up & running */
76904 - if (!local->open_count)
76905 + if (!local_read(&local->open_count))
76906 goto wake_up;
76907
76908 /*
76909 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
76910 index 32bff6d..d0cf986 100644
76911 --- a/net/netfilter/Kconfig
76912 +++ b/net/netfilter/Kconfig
76913 @@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
76914
76915 To compile it as a module, choose M here. If unsure, say N.
76916
76917 +config NETFILTER_XT_MATCH_GRADM
76918 + tristate '"gradm" match support'
76919 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
76920 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
76921 + ---help---
76922 + The gradm match allows to match on grsecurity RBAC being enabled.
76923 + It is useful when iptables rules are applied early on bootup to
76924 + prevent connections to the machine (except from a trusted host)
76925 + while the RBAC system is disabled.
76926 +
76927 config NETFILTER_XT_MATCH_HASHLIMIT
76928 tristate '"hashlimit" match support'
76929 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
76930 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
76931 index 1a02853..5d8c22e 100644
76932 --- a/net/netfilter/Makefile
76933 +++ b/net/netfilter/Makefile
76934 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
76935 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
76936 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
76937 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
76938 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
76939 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
76940 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
76941 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
76942 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
76943 index 12571fb..fb73976 100644
76944 --- a/net/netfilter/ipvs/ip_vs_conn.c
76945 +++ b/net/netfilter/ipvs/ip_vs_conn.c
76946 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
76947 /* Increase the refcnt counter of the dest */
76948 atomic_inc(&dest->refcnt);
76949
76950 - conn_flags = atomic_read(&dest->conn_flags);
76951 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
76952 if (cp->protocol != IPPROTO_UDP)
76953 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
76954 /* Bind with the destination and its corresponding transmitter */
76955 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
76956 atomic_set(&cp->refcnt, 1);
76957
76958 atomic_set(&cp->n_control, 0);
76959 - atomic_set(&cp->in_pkts, 0);
76960 + atomic_set_unchecked(&cp->in_pkts, 0);
76961
76962 atomic_inc(&ipvs->conn_count);
76963 if (flags & IP_VS_CONN_F_NO_CPORT)
76964 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
76965
76966 /* Don't drop the entry if its number of incoming packets is not
76967 located in [0, 8] */
76968 - i = atomic_read(&cp->in_pkts);
76969 + i = atomic_read_unchecked(&cp->in_pkts);
76970 if (i > 8 || i < 0) return 0;
76971
76972 if (!todrop_rate[i]) return 0;
76973 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
76974 index 4f77bb1..5d0bc26 100644
76975 --- a/net/netfilter/ipvs/ip_vs_core.c
76976 +++ b/net/netfilter/ipvs/ip_vs_core.c
76977 @@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
76978 ret = cp->packet_xmit(skb, cp, pd->pp);
76979 /* do not touch skb anymore */
76980
76981 - atomic_inc(&cp->in_pkts);
76982 + atomic_inc_unchecked(&cp->in_pkts);
76983 ip_vs_conn_put(cp);
76984 return ret;
76985 }
76986 @@ -1612,7 +1612,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
76987 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
76988 pkts = sysctl_sync_threshold(ipvs);
76989 else
76990 - pkts = atomic_add_return(1, &cp->in_pkts);
76991 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76992
76993 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
76994 cp->protocol == IPPROTO_SCTP) {
76995 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
76996 index e3be48b..d658c8c 100644
76997 --- a/net/netfilter/ipvs/ip_vs_ctl.c
76998 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
76999 @@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
77000 ip_vs_rs_hash(ipvs, dest);
77001 write_unlock_bh(&ipvs->rs_lock);
77002 }
77003 - atomic_set(&dest->conn_flags, conn_flags);
77004 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
77005
77006 /* bind the service */
77007 if (!dest->svc) {
77008 @@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
77009 " %-7s %-6d %-10d %-10d\n",
77010 &dest->addr.in6,
77011 ntohs(dest->port),
77012 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
77013 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
77014 atomic_read(&dest->weight),
77015 atomic_read(&dest->activeconns),
77016 atomic_read(&dest->inactconns));
77017 @@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
77018 "%-7s %-6d %-10d %-10d\n",
77019 ntohl(dest->addr.ip),
77020 ntohs(dest->port),
77021 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
77022 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
77023 atomic_read(&dest->weight),
77024 atomic_read(&dest->activeconns),
77025 atomic_read(&dest->inactconns));
77026 @@ -2285,6 +2285,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
77027 struct ip_vs_dest_user_kern udest;
77028 struct netns_ipvs *ipvs = net_ipvs(net);
77029
77030 + pax_track_stack();
77031 +
77032 if (!capable(CAP_NET_ADMIN))
77033 return -EPERM;
77034
77035 @@ -2508,7 +2510,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
77036
77037 entry.addr = dest->addr.ip;
77038 entry.port = dest->port;
77039 - entry.conn_flags = atomic_read(&dest->conn_flags);
77040 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
77041 entry.weight = atomic_read(&dest->weight);
77042 entry.u_threshold = dest->u_threshold;
77043 entry.l_threshold = dest->l_threshold;
77044 @@ -3041,7 +3043,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
77045 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
77046
77047 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
77048 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
77049 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
77050 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
77051 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
77052 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
77053 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
77054 index 3cdd479..116afa8 100644
77055 --- a/net/netfilter/ipvs/ip_vs_sync.c
77056 +++ b/net/netfilter/ipvs/ip_vs_sync.c
77057 @@ -649,7 +649,7 @@ control:
77058 * i.e only increment in_pkts for Templates.
77059 */
77060 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
77061 - int pkts = atomic_add_return(1, &cp->in_pkts);
77062 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
77063
77064 if (pkts % sysctl_sync_period(ipvs) != 1)
77065 return;
77066 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
77067
77068 if (opt)
77069 memcpy(&cp->in_seq, opt, sizeof(*opt));
77070 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
77071 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
77072 cp->state = state;
77073 cp->old_state = cp->state;
77074 /*
77075 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
77076 index ee319a4..8a285ee 100644
77077 --- a/net/netfilter/ipvs/ip_vs_xmit.c
77078 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
77079 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
77080 else
77081 rc = NF_ACCEPT;
77082 /* do not touch skb anymore */
77083 - atomic_inc(&cp->in_pkts);
77084 + atomic_inc_unchecked(&cp->in_pkts);
77085 goto out;
77086 }
77087
77088 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
77089 else
77090 rc = NF_ACCEPT;
77091 /* do not touch skb anymore */
77092 - atomic_inc(&cp->in_pkts);
77093 + atomic_inc_unchecked(&cp->in_pkts);
77094 goto out;
77095 }
77096
77097 diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
77098 index 7dec88a..0996ce3 100644
77099 --- a/net/netfilter/nf_conntrack_netlink.c
77100 +++ b/net/netfilter/nf_conntrack_netlink.c
77101 @@ -135,7 +135,7 @@ nla_put_failure:
77102 static inline int
77103 ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
77104 {
77105 - long timeout = (ct->timeout.expires - jiffies) / HZ;
77106 + long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ;
77107
77108 if (timeout < 0)
77109 timeout = 0;
77110 @@ -1638,7 +1638,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
77111 const struct nf_conntrack_expect *exp)
77112 {
77113 struct nf_conn *master = exp->master;
77114 - long timeout = (exp->timeout.expires - jiffies) / HZ;
77115 + long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
77116 struct nf_conn_help *help;
77117
77118 if (timeout < 0)
77119 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
77120 index 2d8158a..5dca296 100644
77121 --- a/net/netfilter/nfnetlink_log.c
77122 +++ b/net/netfilter/nfnetlink_log.c
77123 @@ -70,7 +70,7 @@ struct nfulnl_instance {
77124 };
77125
77126 static DEFINE_SPINLOCK(instances_lock);
77127 -static atomic_t global_seq;
77128 +static atomic_unchecked_t global_seq;
77129
77130 #define INSTANCE_BUCKETS 16
77131 static struct hlist_head instance_table[INSTANCE_BUCKETS];
77132 @@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_instance *inst,
77133 /* global sequence number */
77134 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
77135 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
77136 - htonl(atomic_inc_return(&global_seq)));
77137 + htonl(atomic_inc_return_unchecked(&global_seq)));
77138
77139 if (data_len) {
77140 struct nlattr *nla;
77141 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
77142 new file mode 100644
77143 index 0000000..6905327
77144 --- /dev/null
77145 +++ b/net/netfilter/xt_gradm.c
77146 @@ -0,0 +1,51 @@
77147 +/*
77148 + * gradm match for netfilter
77149 + * Copyright © Zbigniew Krzystolik, 2010
77150 + *
77151 + * This program is free software; you can redistribute it and/or modify
77152 + * it under the terms of the GNU General Public License; either version
77153 + * 2 or 3 as published by the Free Software Foundation.
77154 + */
77155 +#include <linux/module.h>
77156 +#include <linux/moduleparam.h>
77157 +#include <linux/skbuff.h>
77158 +#include <linux/netfilter/x_tables.h>
77159 +#include <linux/grsecurity.h>
77160 +#include <linux/netfilter/xt_gradm.h>
77161 +
77162 +static bool
77163 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
77164 +{
77165 + const struct xt_gradm_mtinfo *info = par->matchinfo;
77166 + bool retval = false;
77167 + if (gr_acl_is_enabled())
77168 + retval = true;
77169 + return retval ^ info->invflags;
77170 +}
77171 +
77172 +static struct xt_match gradm_mt_reg __read_mostly = {
77173 + .name = "gradm",
77174 + .revision = 0,
77175 + .family = NFPROTO_UNSPEC,
77176 + .match = gradm_mt,
77177 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
77178 + .me = THIS_MODULE,
77179 +};
77180 +
77181 +static int __init gradm_mt_init(void)
77182 +{
77183 + return xt_register_match(&gradm_mt_reg);
77184 +}
77185 +
77186 +static void __exit gradm_mt_exit(void)
77187 +{
77188 + xt_unregister_match(&gradm_mt_reg);
77189 +}
77190 +
77191 +module_init(gradm_mt_init);
77192 +module_exit(gradm_mt_exit);
77193 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
77194 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
77195 +MODULE_LICENSE("GPL");
77196 +MODULE_ALIAS("ipt_gradm");
77197 +MODULE_ALIAS("ip6t_gradm");
77198 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
77199 index 42ecb71..8d687c0 100644
77200 --- a/net/netfilter/xt_statistic.c
77201 +++ b/net/netfilter/xt_statistic.c
77202 @@ -18,7 +18,7 @@
77203 #include <linux/netfilter/x_tables.h>
77204
77205 struct xt_statistic_priv {
77206 - atomic_t count;
77207 + atomic_unchecked_t count;
77208 } ____cacheline_aligned_in_smp;
77209
77210 MODULE_LICENSE("GPL");
77211 @@ -41,9 +41,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
77212 break;
77213 case XT_STATISTIC_MODE_NTH:
77214 do {
77215 - oval = atomic_read(&info->master->count);
77216 + oval = atomic_read_unchecked(&info->master->count);
77217 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
77218 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
77219 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
77220 if (nval == 0)
77221 ret = !ret;
77222 break;
77223 @@ -63,7 +63,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
77224 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
77225 if (info->master == NULL)
77226 return -ENOMEM;
77227 - atomic_set(&info->master->count, info->u.nth.count);
77228 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
77229
77230 return 0;
77231 }
77232 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
77233 index 0a4db02..604f748 100644
77234 --- a/net/netlink/af_netlink.c
77235 +++ b/net/netlink/af_netlink.c
77236 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock *sk)
77237 sk->sk_error_report(sk);
77238 }
77239 }
77240 - atomic_inc(&sk->sk_drops);
77241 + atomic_inc_unchecked(&sk->sk_drops);
77242 }
77243
77244 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
77245 @@ -2000,7 +2000,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
77246 sk_wmem_alloc_get(s),
77247 nlk->cb,
77248 atomic_read(&s->sk_refcnt),
77249 - atomic_read(&s->sk_drops),
77250 + atomic_read_unchecked(&s->sk_drops),
77251 sock_i_ino(s)
77252 );
77253
77254 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
77255 index 732152f..60bb09e 100644
77256 --- a/net/netrom/af_netrom.c
77257 +++ b/net/netrom/af_netrom.c
77258 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
77259 struct sock *sk = sock->sk;
77260 struct nr_sock *nr = nr_sk(sk);
77261
77262 + memset(sax, 0, sizeof(*sax));
77263 lock_sock(sk);
77264 if (peer != 0) {
77265 if (sk->sk_state != TCP_ESTABLISHED) {
77266 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
77267 *uaddr_len = sizeof(struct full_sockaddr_ax25);
77268 } else {
77269 sax->fsa_ax25.sax25_family = AF_NETROM;
77270 - sax->fsa_ax25.sax25_ndigis = 0;
77271 sax->fsa_ax25.sax25_call = nr->source_addr;
77272 *uaddr_len = sizeof(struct sockaddr_ax25);
77273 }
77274 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
77275 index fabb4fa..37aaea0 100644
77276 --- a/net/packet/af_packet.c
77277 +++ b/net/packet/af_packet.c
77278 @@ -954,7 +954,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
77279
77280 spin_lock(&sk->sk_receive_queue.lock);
77281 po->stats.tp_packets++;
77282 - skb->dropcount = atomic_read(&sk->sk_drops);
77283 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
77284 __skb_queue_tail(&sk->sk_receive_queue, skb);
77285 spin_unlock(&sk->sk_receive_queue.lock);
77286 sk->sk_data_ready(sk, skb->len);
77287 @@ -963,7 +963,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
77288 drop_n_acct:
77289 spin_lock(&sk->sk_receive_queue.lock);
77290 po->stats.tp_drops++;
77291 - atomic_inc(&sk->sk_drops);
77292 + atomic_inc_unchecked(&sk->sk_drops);
77293 spin_unlock(&sk->sk_receive_queue.lock);
77294
77295 drop_n_restore:
77296 @@ -1691,8 +1691,12 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
77297 {
77298 struct packet_sock *po = pkt_sk(sk);
77299
77300 - if (po->fanout)
77301 + if (po->fanout) {
77302 + if (dev)
77303 + dev_put(dev);
77304 +
77305 return -EINVAL;
77306 + }
77307
77308 lock_sock(sk);
77309
77310 @@ -2479,7 +2483,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
77311 case PACKET_HDRLEN:
77312 if (len > sizeof(int))
77313 len = sizeof(int);
77314 - if (copy_from_user(&val, optval, len))
77315 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
77316 return -EFAULT;
77317 switch (val) {
77318 case TPACKET_V1:
77319 @@ -2526,7 +2530,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
77320
77321 if (put_user(len, optlen))
77322 return -EFAULT;
77323 - if (copy_to_user(optval, data, len))
77324 + if (len > sizeof(st) || copy_to_user(optval, data, len))
77325 return -EFAULT;
77326 return 0;
77327 }
77328 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
77329 index c6fffd9..a7ffa0c 100644
77330 --- a/net/phonet/af_phonet.c
77331 +++ b/net/phonet/af_phonet.c
77332 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
77333 {
77334 struct phonet_protocol *pp;
77335
77336 - if (protocol >= PHONET_NPROTO)
77337 + if (protocol < 0 || protocol >= PHONET_NPROTO)
77338 return NULL;
77339
77340 rcu_read_lock();
77341 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
77342 {
77343 int err = 0;
77344
77345 - if (protocol >= PHONET_NPROTO)
77346 + if (protocol < 0 || protocol >= PHONET_NPROTO)
77347 return -EINVAL;
77348
77349 err = proto_register(pp->prot, 1);
77350 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
77351 index f17fd84..edffce8 100644
77352 --- a/net/phonet/pep.c
77353 +++ b/net/phonet/pep.c
77354 @@ -387,7 +387,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
77355
77356 case PNS_PEP_CTRL_REQ:
77357 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
77358 - atomic_inc(&sk->sk_drops);
77359 + atomic_inc_unchecked(&sk->sk_drops);
77360 break;
77361 }
77362 __skb_pull(skb, 4);
77363 @@ -408,7 +408,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
77364 }
77365
77366 if (pn->rx_credits == 0) {
77367 - atomic_inc(&sk->sk_drops);
77368 + atomic_inc_unchecked(&sk->sk_drops);
77369 err = -ENOBUFS;
77370 break;
77371 }
77372 @@ -556,7 +556,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
77373 }
77374
77375 if (pn->rx_credits == 0) {
77376 - atomic_inc(&sk->sk_drops);
77377 + atomic_inc_unchecked(&sk->sk_drops);
77378 err = NET_RX_DROP;
77379 break;
77380 }
77381 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
77382 index ab07711..9d4ac5d 100644
77383 --- a/net/phonet/socket.c
77384 +++ b/net/phonet/socket.c
77385 @@ -612,8 +612,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
77386 pn->resource, sk->sk_state,
77387 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
77388 sock_i_uid(sk), sock_i_ino(sk),
77389 - atomic_read(&sk->sk_refcnt), sk,
77390 - atomic_read(&sk->sk_drops), &len);
77391 + atomic_read(&sk->sk_refcnt),
77392 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77393 + NULL,
77394 +#else
77395 + sk,
77396 +#endif
77397 + atomic_read_unchecked(&sk->sk_drops), &len);
77398 }
77399 seq_printf(seq, "%*s\n", 127 - len, "");
77400 return 0;
77401 diff --git a/net/rds/cong.c b/net/rds/cong.c
77402 index 6daaa49..fbf6af5 100644
77403 --- a/net/rds/cong.c
77404 +++ b/net/rds/cong.c
77405 @@ -77,7 +77,7 @@
77406 * finds that the saved generation number is smaller than the global generation
77407 * number, it wakes up the process.
77408 */
77409 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
77410 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
77411
77412 /*
77413 * Congestion monitoring
77414 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
77415 rdsdebug("waking map %p for %pI4\n",
77416 map, &map->m_addr);
77417 rds_stats_inc(s_cong_update_received);
77418 - atomic_inc(&rds_cong_generation);
77419 + atomic_inc_unchecked(&rds_cong_generation);
77420 if (waitqueue_active(&map->m_waitq))
77421 wake_up(&map->m_waitq);
77422 if (waitqueue_active(&rds_poll_waitq))
77423 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
77424
77425 int rds_cong_updated_since(unsigned long *recent)
77426 {
77427 - unsigned long gen = atomic_read(&rds_cong_generation);
77428 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
77429
77430 if (likely(*recent == gen))
77431 return 0;
77432 diff --git a/net/rds/ib.h b/net/rds/ib.h
77433 index edfaaaf..8c89879 100644
77434 --- a/net/rds/ib.h
77435 +++ b/net/rds/ib.h
77436 @@ -128,7 +128,7 @@ struct rds_ib_connection {
77437 /* sending acks */
77438 unsigned long i_ack_flags;
77439 #ifdef KERNEL_HAS_ATOMIC64
77440 - atomic64_t i_ack_next; /* next ACK to send */
77441 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
77442 #else
77443 spinlock_t i_ack_lock; /* protect i_ack_next */
77444 u64 i_ack_next; /* next ACK to send */
77445 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
77446 index cd67026..0b9a54a 100644
77447 --- a/net/rds/ib_cm.c
77448 +++ b/net/rds/ib_cm.c
77449 @@ -720,7 +720,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
77450 /* Clear the ACK state */
77451 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
77452 #ifdef KERNEL_HAS_ATOMIC64
77453 - atomic64_set(&ic->i_ack_next, 0);
77454 + atomic64_set_unchecked(&ic->i_ack_next, 0);
77455 #else
77456 ic->i_ack_next = 0;
77457 #endif
77458 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
77459 index e29e0ca..fa3a6a3 100644
77460 --- a/net/rds/ib_recv.c
77461 +++ b/net/rds/ib_recv.c
77462 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
77463 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
77464 int ack_required)
77465 {
77466 - atomic64_set(&ic->i_ack_next, seq);
77467 + atomic64_set_unchecked(&ic->i_ack_next, seq);
77468 if (ack_required) {
77469 smp_mb__before_clear_bit();
77470 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77471 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
77472 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77473 smp_mb__after_clear_bit();
77474
77475 - return atomic64_read(&ic->i_ack_next);
77476 + return atomic64_read_unchecked(&ic->i_ack_next);
77477 }
77478 #endif
77479
77480 diff --git a/net/rds/iw.h b/net/rds/iw.h
77481 index 04ce3b1..48119a6 100644
77482 --- a/net/rds/iw.h
77483 +++ b/net/rds/iw.h
77484 @@ -134,7 +134,7 @@ struct rds_iw_connection {
77485 /* sending acks */
77486 unsigned long i_ack_flags;
77487 #ifdef KERNEL_HAS_ATOMIC64
77488 - atomic64_t i_ack_next; /* next ACK to send */
77489 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
77490 #else
77491 spinlock_t i_ack_lock; /* protect i_ack_next */
77492 u64 i_ack_next; /* next ACK to send */
77493 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
77494 index 9556d28..f046d0e 100644
77495 --- a/net/rds/iw_cm.c
77496 +++ b/net/rds/iw_cm.c
77497 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
77498 /* Clear the ACK state */
77499 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
77500 #ifdef KERNEL_HAS_ATOMIC64
77501 - atomic64_set(&ic->i_ack_next, 0);
77502 + atomic64_set_unchecked(&ic->i_ack_next, 0);
77503 #else
77504 ic->i_ack_next = 0;
77505 #endif
77506 diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
77507 index 4e1de17..d121708 100644
77508 --- a/net/rds/iw_rdma.c
77509 +++ b/net/rds/iw_rdma.c
77510 @@ -184,6 +184,8 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i
77511 struct rdma_cm_id *pcm_id;
77512 int rc;
77513
77514 + pax_track_stack();
77515 +
77516 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
77517 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
77518
77519 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
77520 index 5e57347..3916042 100644
77521 --- a/net/rds/iw_recv.c
77522 +++ b/net/rds/iw_recv.c
77523 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
77524 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
77525 int ack_required)
77526 {
77527 - atomic64_set(&ic->i_ack_next, seq);
77528 + atomic64_set_unchecked(&ic->i_ack_next, seq);
77529 if (ack_required) {
77530 smp_mb__before_clear_bit();
77531 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77532 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
77533 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77534 smp_mb__after_clear_bit();
77535
77536 - return atomic64_read(&ic->i_ack_next);
77537 + return atomic64_read_unchecked(&ic->i_ack_next);
77538 }
77539 #endif
77540
77541 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
77542 index 8e0a320..ee8e38f 100644
77543 --- a/net/rds/tcp.c
77544 +++ b/net/rds/tcp.c
77545 @@ -58,7 +58,7 @@ void rds_tcp_nonagle(struct socket *sock)
77546 int val = 1;
77547
77548 set_fs(KERNEL_DS);
77549 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
77550 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
77551 sizeof(val));
77552 set_fs(oldfs);
77553 }
77554 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
77555 index 1b4fd68..2234175 100644
77556 --- a/net/rds/tcp_send.c
77557 +++ b/net/rds/tcp_send.c
77558 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
77559
77560 oldfs = get_fs();
77561 set_fs(KERNEL_DS);
77562 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
77563 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
77564 sizeof(val));
77565 set_fs(oldfs);
77566 }
77567 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
77568 index 74c064c..fdec26f 100644
77569 --- a/net/rxrpc/af_rxrpc.c
77570 +++ b/net/rxrpc/af_rxrpc.c
77571 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
77572 __be32 rxrpc_epoch;
77573
77574 /* current debugging ID */
77575 -atomic_t rxrpc_debug_id;
77576 +atomic_unchecked_t rxrpc_debug_id;
77577
77578 /* count of skbs currently in use */
77579 atomic_t rxrpc_n_skbs;
77580 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
77581 index f99cfce..3682692 100644
77582 --- a/net/rxrpc/ar-ack.c
77583 +++ b/net/rxrpc/ar-ack.c
77584 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
77585
77586 _enter("{%d,%d,%d,%d},",
77587 call->acks_hard, call->acks_unacked,
77588 - atomic_read(&call->sequence),
77589 + atomic_read_unchecked(&call->sequence),
77590 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
77591
77592 stop = 0;
77593 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
77594
77595 /* each Tx packet has a new serial number */
77596 sp->hdr.serial =
77597 - htonl(atomic_inc_return(&call->conn->serial));
77598 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
77599
77600 hdr = (struct rxrpc_header *) txb->head;
77601 hdr->serial = sp->hdr.serial;
77602 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
77603 */
77604 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
77605 {
77606 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
77607 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
77608 }
77609
77610 /*
77611 @@ -629,7 +629,7 @@ process_further:
77612
77613 latest = ntohl(sp->hdr.serial);
77614 hard = ntohl(ack.firstPacket);
77615 - tx = atomic_read(&call->sequence);
77616 + tx = atomic_read_unchecked(&call->sequence);
77617
77618 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
77619 latest,
77620 @@ -842,6 +842,8 @@ void rxrpc_process_call(struct work_struct *work)
77621 u32 abort_code = RX_PROTOCOL_ERROR;
77622 u8 *acks = NULL;
77623
77624 + pax_track_stack();
77625 +
77626 //printk("\n--------------------\n");
77627 _enter("{%d,%s,%lx} [%lu]",
77628 call->debug_id, rxrpc_call_states[call->state], call->events,
77629 @@ -1161,7 +1163,7 @@ void rxrpc_process_call(struct work_struct *work)
77630 goto maybe_reschedule;
77631
77632 send_ACK_with_skew:
77633 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
77634 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
77635 ntohl(ack.serial));
77636 send_ACK:
77637 mtu = call->conn->trans->peer->if_mtu;
77638 @@ -1173,7 +1175,7 @@ send_ACK:
77639 ackinfo.rxMTU = htonl(5692);
77640 ackinfo.jumbo_max = htonl(4);
77641
77642 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
77643 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
77644 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
77645 ntohl(hdr.serial),
77646 ntohs(ack.maxSkew),
77647 @@ -1191,7 +1193,7 @@ send_ACK:
77648 send_message:
77649 _debug("send message");
77650
77651 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
77652 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
77653 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
77654 send_message_2:
77655
77656 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
77657 index bf656c2..48f9d27 100644
77658 --- a/net/rxrpc/ar-call.c
77659 +++ b/net/rxrpc/ar-call.c
77660 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
77661 spin_lock_init(&call->lock);
77662 rwlock_init(&call->state_lock);
77663 atomic_set(&call->usage, 1);
77664 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
77665 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77666 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
77667
77668 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
77669 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
77670 index 4106ca9..a338d7a 100644
77671 --- a/net/rxrpc/ar-connection.c
77672 +++ b/net/rxrpc/ar-connection.c
77673 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
77674 rwlock_init(&conn->lock);
77675 spin_lock_init(&conn->state_lock);
77676 atomic_set(&conn->usage, 1);
77677 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
77678 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77679 conn->avail_calls = RXRPC_MAXCALLS;
77680 conn->size_align = 4;
77681 conn->header_size = sizeof(struct rxrpc_header);
77682 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
77683 index e7ed43a..6afa140 100644
77684 --- a/net/rxrpc/ar-connevent.c
77685 +++ b/net/rxrpc/ar-connevent.c
77686 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
77687
77688 len = iov[0].iov_len + iov[1].iov_len;
77689
77690 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
77691 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77692 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
77693
77694 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
77695 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
77696 index 1a2b0633..e8d1382 100644
77697 --- a/net/rxrpc/ar-input.c
77698 +++ b/net/rxrpc/ar-input.c
77699 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
77700 /* track the latest serial number on this connection for ACK packet
77701 * information */
77702 serial = ntohl(sp->hdr.serial);
77703 - hi_serial = atomic_read(&call->conn->hi_serial);
77704 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
77705 while (serial > hi_serial)
77706 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
77707 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
77708 serial);
77709
77710 /* request ACK generation for any ACK or DATA packet that requests
77711 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
77712 index 8e22bd3..f66d1c0 100644
77713 --- a/net/rxrpc/ar-internal.h
77714 +++ b/net/rxrpc/ar-internal.h
77715 @@ -272,8 +272,8 @@ struct rxrpc_connection {
77716 int error; /* error code for local abort */
77717 int debug_id; /* debug ID for printks */
77718 unsigned call_counter; /* call ID counter */
77719 - atomic_t serial; /* packet serial number counter */
77720 - atomic_t hi_serial; /* highest serial number received */
77721 + atomic_unchecked_t serial; /* packet serial number counter */
77722 + atomic_unchecked_t hi_serial; /* highest serial number received */
77723 u8 avail_calls; /* number of calls available */
77724 u8 size_align; /* data size alignment (for security) */
77725 u8 header_size; /* rxrpc + security header size */
77726 @@ -346,7 +346,7 @@ struct rxrpc_call {
77727 spinlock_t lock;
77728 rwlock_t state_lock; /* lock for state transition */
77729 atomic_t usage;
77730 - atomic_t sequence; /* Tx data packet sequence counter */
77731 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
77732 u32 abort_code; /* local/remote abort code */
77733 enum { /* current state of call */
77734 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
77735 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
77736 */
77737 extern atomic_t rxrpc_n_skbs;
77738 extern __be32 rxrpc_epoch;
77739 -extern atomic_t rxrpc_debug_id;
77740 +extern atomic_unchecked_t rxrpc_debug_id;
77741 extern struct workqueue_struct *rxrpc_workqueue;
77742
77743 /*
77744 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
77745 index 87f7135..74d3703 100644
77746 --- a/net/rxrpc/ar-local.c
77747 +++ b/net/rxrpc/ar-local.c
77748 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
77749 spin_lock_init(&local->lock);
77750 rwlock_init(&local->services_lock);
77751 atomic_set(&local->usage, 1);
77752 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
77753 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77754 memcpy(&local->srx, srx, sizeof(*srx));
77755 }
77756
77757 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
77758 index 5f22e26..e5bd20f 100644
77759 --- a/net/rxrpc/ar-output.c
77760 +++ b/net/rxrpc/ar-output.c
77761 @@ -681,9 +681,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
77762 sp->hdr.cid = call->cid;
77763 sp->hdr.callNumber = call->call_id;
77764 sp->hdr.seq =
77765 - htonl(atomic_inc_return(&call->sequence));
77766 + htonl(atomic_inc_return_unchecked(&call->sequence));
77767 sp->hdr.serial =
77768 - htonl(atomic_inc_return(&conn->serial));
77769 + htonl(atomic_inc_return_unchecked(&conn->serial));
77770 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
77771 sp->hdr.userStatus = 0;
77772 sp->hdr.securityIndex = conn->security_ix;
77773 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
77774 index 2754f09..b20e38f 100644
77775 --- a/net/rxrpc/ar-peer.c
77776 +++ b/net/rxrpc/ar-peer.c
77777 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
77778 INIT_LIST_HEAD(&peer->error_targets);
77779 spin_lock_init(&peer->lock);
77780 atomic_set(&peer->usage, 1);
77781 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
77782 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77783 memcpy(&peer->srx, srx, sizeof(*srx));
77784
77785 rxrpc_assess_MTU_size(peer);
77786 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
77787 index 38047f7..9f48511 100644
77788 --- a/net/rxrpc/ar-proc.c
77789 +++ b/net/rxrpc/ar-proc.c
77790 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
77791 atomic_read(&conn->usage),
77792 rxrpc_conn_states[conn->state],
77793 key_serial(conn->key),
77794 - atomic_read(&conn->serial),
77795 - atomic_read(&conn->hi_serial));
77796 + atomic_read_unchecked(&conn->serial),
77797 + atomic_read_unchecked(&conn->hi_serial));
77798
77799 return 0;
77800 }
77801 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
77802 index 92df566..87ec1bf 100644
77803 --- a/net/rxrpc/ar-transport.c
77804 +++ b/net/rxrpc/ar-transport.c
77805 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
77806 spin_lock_init(&trans->client_lock);
77807 rwlock_init(&trans->conn_lock);
77808 atomic_set(&trans->usage, 1);
77809 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
77810 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77811
77812 if (peer->srx.transport.family == AF_INET) {
77813 switch (peer->srx.transport_type) {
77814 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
77815 index 7635107..5000b71 100644
77816 --- a/net/rxrpc/rxkad.c
77817 +++ b/net/rxrpc/rxkad.c
77818 @@ -211,6 +211,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
77819 u16 check;
77820 int nsg;
77821
77822 + pax_track_stack();
77823 +
77824 sp = rxrpc_skb(skb);
77825
77826 _enter("");
77827 @@ -338,6 +340,8 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
77828 u16 check;
77829 int nsg;
77830
77831 + pax_track_stack();
77832 +
77833 _enter("");
77834
77835 sp = rxrpc_skb(skb);
77836 @@ -610,7 +614,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
77837
77838 len = iov[0].iov_len + iov[1].iov_len;
77839
77840 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
77841 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77842 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
77843
77844 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
77845 @@ -660,7 +664,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
77846
77847 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
77848
77849 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
77850 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77851 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
77852
77853 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
77854 diff --git a/net/sctp/auth.c b/net/sctp/auth.c
77855 index 865e68f..bf81204 100644
77856 --- a/net/sctp/auth.c
77857 +++ b/net/sctp/auth.c
77858 @@ -82,7 +82,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
77859 struct sctp_auth_bytes *key;
77860
77861 /* Verify that we are not going to overflow INT_MAX */
77862 - if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
77863 + if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
77864 return NULL;
77865
77866 /* Allocate the shared key */
77867 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
77868 index 05a6ce2..c8bf836 100644
77869 --- a/net/sctp/proc.c
77870 +++ b/net/sctp/proc.c
77871 @@ -318,7 +318,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
77872 seq_printf(seq,
77873 "%8pK %8pK %-3d %-3d %-2d %-4d "
77874 "%4d %8d %8d %7d %5lu %-5d %5d ",
77875 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
77876 + assoc, sk,
77877 + sctp_sk(sk)->type, sk->sk_state,
77878 assoc->state, hash,
77879 assoc->assoc_id,
77880 assoc->sndbuf_used,
77881 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
77882 index 4760f4e..e44d3fb 100644
77883 --- a/net/sctp/socket.c
77884 +++ b/net/sctp/socket.c
77885 @@ -4573,7 +4573,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
77886 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
77887 if (space_left < addrlen)
77888 return -ENOMEM;
77889 - if (copy_to_user(to, &temp, addrlen))
77890 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
77891 return -EFAULT;
77892 to += addrlen;
77893 cnt++;
77894 diff --git a/net/socket.c b/net/socket.c
77895 index ffe92ca..8057b85 100644
77896 --- a/net/socket.c
77897 +++ b/net/socket.c
77898 @@ -88,6 +88,7 @@
77899 #include <linux/nsproxy.h>
77900 #include <linux/magic.h>
77901 #include <linux/slab.h>
77902 +#include <linux/in.h>
77903
77904 #include <asm/uaccess.h>
77905 #include <asm/unistd.h>
77906 @@ -105,6 +106,8 @@
77907 #include <linux/sockios.h>
77908 #include <linux/atalk.h>
77909
77910 +#include <linux/grsock.h>
77911 +
77912 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
77913 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
77914 unsigned long nr_segs, loff_t pos);
77915 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
77916 &sockfs_dentry_operations, SOCKFS_MAGIC);
77917 }
77918
77919 -static struct vfsmount *sock_mnt __read_mostly;
77920 +struct vfsmount *sock_mnt __read_mostly;
77921
77922 static struct file_system_type sock_fs_type = {
77923 .name = "sockfs",
77924 @@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
77925 return -EAFNOSUPPORT;
77926 if (type < 0 || type >= SOCK_MAX)
77927 return -EINVAL;
77928 + if (protocol < 0)
77929 + return -EINVAL;
77930
77931 /* Compatibility.
77932
77933 @@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
77934 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
77935 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
77936
77937 + if(!gr_search_socket(family, type, protocol)) {
77938 + retval = -EACCES;
77939 + goto out;
77940 + }
77941 +
77942 + if (gr_handle_sock_all(family, type, protocol)) {
77943 + retval = -EACCES;
77944 + goto out;
77945 + }
77946 +
77947 retval = sock_create(family, type, protocol, &sock);
77948 if (retval < 0)
77949 goto out;
77950 @@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
77951 if (sock) {
77952 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
77953 if (err >= 0) {
77954 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
77955 + err = -EACCES;
77956 + goto error;
77957 + }
77958 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
77959 + if (err)
77960 + goto error;
77961 +
77962 err = security_socket_bind(sock,
77963 (struct sockaddr *)&address,
77964 addrlen);
77965 @@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
77966 (struct sockaddr *)
77967 &address, addrlen);
77968 }
77969 +error:
77970 fput_light(sock->file, fput_needed);
77971 }
77972 return err;
77973 @@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
77974 if ((unsigned)backlog > somaxconn)
77975 backlog = somaxconn;
77976
77977 + if (gr_handle_sock_server_other(sock->sk)) {
77978 + err = -EPERM;
77979 + goto error;
77980 + }
77981 +
77982 + err = gr_search_listen(sock);
77983 + if (err)
77984 + goto error;
77985 +
77986 err = security_socket_listen(sock, backlog);
77987 if (!err)
77988 err = sock->ops->listen(sock, backlog);
77989
77990 +error:
77991 fput_light(sock->file, fput_needed);
77992 }
77993 return err;
77994 @@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
77995 newsock->type = sock->type;
77996 newsock->ops = sock->ops;
77997
77998 + if (gr_handle_sock_server_other(sock->sk)) {
77999 + err = -EPERM;
78000 + sock_release(newsock);
78001 + goto out_put;
78002 + }
78003 +
78004 + err = gr_search_accept(sock);
78005 + if (err) {
78006 + sock_release(newsock);
78007 + goto out_put;
78008 + }
78009 +
78010 /*
78011 * We don't need try_module_get here, as the listening socket (sock)
78012 * has the protocol module (sock->ops->owner) held.
78013 @@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
78014 fd_install(newfd, newfile);
78015 err = newfd;
78016
78017 + gr_attach_curr_ip(newsock->sk);
78018 +
78019 out_put:
78020 fput_light(sock->file, fput_needed);
78021 out:
78022 @@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
78023 int, addrlen)
78024 {
78025 struct socket *sock;
78026 + struct sockaddr *sck;
78027 struct sockaddr_storage address;
78028 int err, fput_needed;
78029
78030 @@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
78031 if (err < 0)
78032 goto out_put;
78033
78034 + sck = (struct sockaddr *)&address;
78035 +
78036 + if (gr_handle_sock_client(sck)) {
78037 + err = -EACCES;
78038 + goto out_put;
78039 + }
78040 +
78041 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
78042 + if (err)
78043 + goto out_put;
78044 +
78045 err =
78046 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
78047 if (err)
78048 @@ -1890,6 +1950,8 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
78049 unsigned char *ctl_buf = ctl;
78050 int err, ctl_len, iov_size, total_len;
78051
78052 + pax_track_stack();
78053 +
78054 err = -EFAULT;
78055 if (MSG_CMSG_COMPAT & flags) {
78056 if (get_compat_msghdr(msg_sys, msg_compat))
78057 @@ -1950,7 +2012,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
78058 * checking falls down on this.
78059 */
78060 if (copy_from_user(ctl_buf,
78061 - (void __user __force *)msg_sys->msg_control,
78062 + (void __force_user *)msg_sys->msg_control,
78063 ctl_len))
78064 goto out_freectl;
78065 msg_sys->msg_control = ctl_buf;
78066 @@ -2120,7 +2182,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
78067 * kernel msghdr to use the kernel address space)
78068 */
78069
78070 - uaddr = (__force void __user *)msg_sys->msg_name;
78071 + uaddr = (void __force_user *)msg_sys->msg_name;
78072 uaddr_len = COMPAT_NAMELEN(msg);
78073 if (MSG_CMSG_COMPAT & flags) {
78074 err = verify_compat_iovec(msg_sys, iov,
78075 @@ -2748,7 +2810,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
78076 }
78077
78078 ifr = compat_alloc_user_space(buf_size);
78079 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
78080 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
78081
78082 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
78083 return -EFAULT;
78084 @@ -2772,12 +2834,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
78085 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
78086
78087 if (copy_in_user(rxnfc, compat_rxnfc,
78088 - (void *)(&rxnfc->fs.m_ext + 1) -
78089 - (void *)rxnfc) ||
78090 + (void __user *)(&rxnfc->fs.m_ext + 1) -
78091 + (void __user *)rxnfc) ||
78092 copy_in_user(&rxnfc->fs.ring_cookie,
78093 &compat_rxnfc->fs.ring_cookie,
78094 - (void *)(&rxnfc->fs.location + 1) -
78095 - (void *)&rxnfc->fs.ring_cookie) ||
78096 + (void __user *)(&rxnfc->fs.location + 1) -
78097 + (void __user *)&rxnfc->fs.ring_cookie) ||
78098 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
78099 sizeof(rxnfc->rule_cnt)))
78100 return -EFAULT;
78101 @@ -2789,12 +2851,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
78102
78103 if (convert_out) {
78104 if (copy_in_user(compat_rxnfc, rxnfc,
78105 - (const void *)(&rxnfc->fs.m_ext + 1) -
78106 - (const void *)rxnfc) ||
78107 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
78108 + (const void __user *)rxnfc) ||
78109 copy_in_user(&compat_rxnfc->fs.ring_cookie,
78110 &rxnfc->fs.ring_cookie,
78111 - (const void *)(&rxnfc->fs.location + 1) -
78112 - (const void *)&rxnfc->fs.ring_cookie) ||
78113 + (const void __user *)(&rxnfc->fs.location + 1) -
78114 + (const void __user *)&rxnfc->fs.ring_cookie) ||
78115 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
78116 sizeof(rxnfc->rule_cnt)))
78117 return -EFAULT;
78118 @@ -2864,7 +2926,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
78119 old_fs = get_fs();
78120 set_fs(KERNEL_DS);
78121 err = dev_ioctl(net, cmd,
78122 - (struct ifreq __user __force *) &kifr);
78123 + (struct ifreq __force_user *) &kifr);
78124 set_fs(old_fs);
78125
78126 return err;
78127 @@ -2973,7 +3035,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
78128
78129 old_fs = get_fs();
78130 set_fs(KERNEL_DS);
78131 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
78132 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
78133 set_fs(old_fs);
78134
78135 if (cmd == SIOCGIFMAP && !err) {
78136 @@ -3078,7 +3140,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
78137 ret |= __get_user(rtdev, &(ur4->rt_dev));
78138 if (rtdev) {
78139 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
78140 - r4.rt_dev = (char __user __force *)devname;
78141 + r4.rt_dev = (char __force_user *)devname;
78142 devname[15] = 0;
78143 } else
78144 r4.rt_dev = NULL;
78145 @@ -3318,8 +3380,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
78146 int __user *uoptlen;
78147 int err;
78148
78149 - uoptval = (char __user __force *) optval;
78150 - uoptlen = (int __user __force *) optlen;
78151 + uoptval = (char __force_user *) optval;
78152 + uoptlen = (int __force_user *) optlen;
78153
78154 set_fs(KERNEL_DS);
78155 if (level == SOL_SOCKET)
78156 @@ -3339,7 +3401,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
78157 char __user *uoptval;
78158 int err;
78159
78160 - uoptval = (char __user __force *) optval;
78161 + uoptval = (char __force_user *) optval;
78162
78163 set_fs(KERNEL_DS);
78164 if (level == SOL_SOCKET)
78165 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
78166 index d12ffa5..0b5a6e2 100644
78167 --- a/net/sunrpc/sched.c
78168 +++ b/net/sunrpc/sched.c
78169 @@ -238,9 +238,9 @@ static int rpc_wait_bit_killable(void *word)
78170 #ifdef RPC_DEBUG
78171 static void rpc_task_set_debuginfo(struct rpc_task *task)
78172 {
78173 - static atomic_t rpc_pid;
78174 + static atomic_unchecked_t rpc_pid;
78175
78176 - task->tk_pid = atomic_inc_return(&rpc_pid);
78177 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
78178 }
78179 #else
78180 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
78181 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
78182 index 767d494..fe17e9d 100644
78183 --- a/net/sunrpc/svcsock.c
78184 +++ b/net/sunrpc/svcsock.c
78185 @@ -394,7 +394,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
78186 int buflen, unsigned int base)
78187 {
78188 size_t save_iovlen;
78189 - void __user *save_iovbase;
78190 + void *save_iovbase;
78191 unsigned int i;
78192 int ret;
78193
78194 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
78195 index 09af4fa..77110a9 100644
78196 --- a/net/sunrpc/xprtrdma/svc_rdma.c
78197 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
78198 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
78199 static unsigned int min_max_inline = 4096;
78200 static unsigned int max_max_inline = 65536;
78201
78202 -atomic_t rdma_stat_recv;
78203 -atomic_t rdma_stat_read;
78204 -atomic_t rdma_stat_write;
78205 -atomic_t rdma_stat_sq_starve;
78206 -atomic_t rdma_stat_rq_starve;
78207 -atomic_t rdma_stat_rq_poll;
78208 -atomic_t rdma_stat_rq_prod;
78209 -atomic_t rdma_stat_sq_poll;
78210 -atomic_t rdma_stat_sq_prod;
78211 +atomic_unchecked_t rdma_stat_recv;
78212 +atomic_unchecked_t rdma_stat_read;
78213 +atomic_unchecked_t rdma_stat_write;
78214 +atomic_unchecked_t rdma_stat_sq_starve;
78215 +atomic_unchecked_t rdma_stat_rq_starve;
78216 +atomic_unchecked_t rdma_stat_rq_poll;
78217 +atomic_unchecked_t rdma_stat_rq_prod;
78218 +atomic_unchecked_t rdma_stat_sq_poll;
78219 +atomic_unchecked_t rdma_stat_sq_prod;
78220
78221 /* Temporary NFS request map and context caches */
78222 struct kmem_cache *svc_rdma_map_cachep;
78223 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *table, int write,
78224 len -= *ppos;
78225 if (len > *lenp)
78226 len = *lenp;
78227 - if (len && copy_to_user(buffer, str_buf, len))
78228 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
78229 return -EFAULT;
78230 *lenp = len;
78231 *ppos += len;
78232 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] = {
78233 {
78234 .procname = "rdma_stat_read",
78235 .data = &rdma_stat_read,
78236 - .maxlen = sizeof(atomic_t),
78237 + .maxlen = sizeof(atomic_unchecked_t),
78238 .mode = 0644,
78239 .proc_handler = read_reset_stat,
78240 },
78241 {
78242 .procname = "rdma_stat_recv",
78243 .data = &rdma_stat_recv,
78244 - .maxlen = sizeof(atomic_t),
78245 + .maxlen = sizeof(atomic_unchecked_t),
78246 .mode = 0644,
78247 .proc_handler = read_reset_stat,
78248 },
78249 {
78250 .procname = "rdma_stat_write",
78251 .data = &rdma_stat_write,
78252 - .maxlen = sizeof(atomic_t),
78253 + .maxlen = sizeof(atomic_unchecked_t),
78254 .mode = 0644,
78255 .proc_handler = read_reset_stat,
78256 },
78257 {
78258 .procname = "rdma_stat_sq_starve",
78259 .data = &rdma_stat_sq_starve,
78260 - .maxlen = sizeof(atomic_t),
78261 + .maxlen = sizeof(atomic_unchecked_t),
78262 .mode = 0644,
78263 .proc_handler = read_reset_stat,
78264 },
78265 {
78266 .procname = "rdma_stat_rq_starve",
78267 .data = &rdma_stat_rq_starve,
78268 - .maxlen = sizeof(atomic_t),
78269 + .maxlen = sizeof(atomic_unchecked_t),
78270 .mode = 0644,
78271 .proc_handler = read_reset_stat,
78272 },
78273 {
78274 .procname = "rdma_stat_rq_poll",
78275 .data = &rdma_stat_rq_poll,
78276 - .maxlen = sizeof(atomic_t),
78277 + .maxlen = sizeof(atomic_unchecked_t),
78278 .mode = 0644,
78279 .proc_handler = read_reset_stat,
78280 },
78281 {
78282 .procname = "rdma_stat_rq_prod",
78283 .data = &rdma_stat_rq_prod,
78284 - .maxlen = sizeof(atomic_t),
78285 + .maxlen = sizeof(atomic_unchecked_t),
78286 .mode = 0644,
78287 .proc_handler = read_reset_stat,
78288 },
78289 {
78290 .procname = "rdma_stat_sq_poll",
78291 .data = &rdma_stat_sq_poll,
78292 - .maxlen = sizeof(atomic_t),
78293 + .maxlen = sizeof(atomic_unchecked_t),
78294 .mode = 0644,
78295 .proc_handler = read_reset_stat,
78296 },
78297 {
78298 .procname = "rdma_stat_sq_prod",
78299 .data = &rdma_stat_sq_prod,
78300 - .maxlen = sizeof(atomic_t),
78301 + .maxlen = sizeof(atomic_unchecked_t),
78302 .mode = 0644,
78303 .proc_handler = read_reset_stat,
78304 },
78305 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
78306 index df67211..c354b13 100644
78307 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
78308 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
78309 @@ -499,7 +499,7 @@ next_sge:
78310 svc_rdma_put_context(ctxt, 0);
78311 goto out;
78312 }
78313 - atomic_inc(&rdma_stat_read);
78314 + atomic_inc_unchecked(&rdma_stat_read);
78315
78316 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
78317 chl_map->ch[ch_no].count -= read_wr.num_sge;
78318 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
78319 dto_q);
78320 list_del_init(&ctxt->dto_q);
78321 } else {
78322 - atomic_inc(&rdma_stat_rq_starve);
78323 + atomic_inc_unchecked(&rdma_stat_rq_starve);
78324 clear_bit(XPT_DATA, &xprt->xpt_flags);
78325 ctxt = NULL;
78326 }
78327 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
78328 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
78329 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
78330 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
78331 - atomic_inc(&rdma_stat_recv);
78332 + atomic_inc_unchecked(&rdma_stat_recv);
78333
78334 /* Build up the XDR from the receive buffers. */
78335 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
78336 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
78337 index 249a835..fb2794b 100644
78338 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
78339 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
78340 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
78341 write_wr.wr.rdma.remote_addr = to;
78342
78343 /* Post It */
78344 - atomic_inc(&rdma_stat_write);
78345 + atomic_inc_unchecked(&rdma_stat_write);
78346 if (svc_rdma_send(xprt, &write_wr))
78347 goto err;
78348 return 0;
78349 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
78350 index a385430..32254ea 100644
78351 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
78352 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
78353 @@ -299,7 +299,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
78354 return;
78355
78356 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
78357 - atomic_inc(&rdma_stat_rq_poll);
78358 + atomic_inc_unchecked(&rdma_stat_rq_poll);
78359
78360 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
78361 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
78362 @@ -321,7 +321,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
78363 }
78364
78365 if (ctxt)
78366 - atomic_inc(&rdma_stat_rq_prod);
78367 + atomic_inc_unchecked(&rdma_stat_rq_prod);
78368
78369 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
78370 /*
78371 @@ -393,7 +393,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
78372 return;
78373
78374 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
78375 - atomic_inc(&rdma_stat_sq_poll);
78376 + atomic_inc_unchecked(&rdma_stat_sq_poll);
78377 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
78378 if (wc.status != IB_WC_SUCCESS)
78379 /* Close the transport */
78380 @@ -411,7 +411,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
78381 }
78382
78383 if (ctxt)
78384 - atomic_inc(&rdma_stat_sq_prod);
78385 + atomic_inc_unchecked(&rdma_stat_sq_prod);
78386 }
78387
78388 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
78389 @@ -1273,7 +1273,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
78390 spin_lock_bh(&xprt->sc_lock);
78391 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
78392 spin_unlock_bh(&xprt->sc_lock);
78393 - atomic_inc(&rdma_stat_sq_starve);
78394 + atomic_inc_unchecked(&rdma_stat_sq_starve);
78395
78396 /* See if we can opportunistically reap SQ WR to make room */
78397 sq_cq_reap(xprt);
78398 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
78399 index ca84212..3aa338f 100644
78400 --- a/net/sysctl_net.c
78401 +++ b/net/sysctl_net.c
78402 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
78403 struct ctl_table *table)
78404 {
78405 /* Allow network administrator to have same access as root. */
78406 - if (capable(CAP_NET_ADMIN)) {
78407 + if (capable_nolog(CAP_NET_ADMIN)) {
78408 int mode = (table->mode >> 6) & 7;
78409 return (mode << 6) | (mode << 3) | mode;
78410 }
78411 diff --git a/net/tipc/link.c b/net/tipc/link.c
78412 index f89570c..016cf63 100644
78413 --- a/net/tipc/link.c
78414 +++ b/net/tipc/link.c
78415 @@ -1170,7 +1170,7 @@ static int link_send_sections_long(struct tipc_port *sender,
78416 struct tipc_msg fragm_hdr;
78417 struct sk_buff *buf, *buf_chain, *prev;
78418 u32 fragm_crs, fragm_rest, hsz, sect_rest;
78419 - const unchar *sect_crs;
78420 + const unchar __user *sect_crs;
78421 int curr_sect;
78422 u32 fragm_no;
78423
78424 @@ -1214,7 +1214,7 @@ again:
78425
78426 if (!sect_rest) {
78427 sect_rest = msg_sect[++curr_sect].iov_len;
78428 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
78429 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
78430 }
78431
78432 if (sect_rest < fragm_rest)
78433 @@ -1233,7 +1233,7 @@ error:
78434 }
78435 } else
78436 skb_copy_to_linear_data_offset(buf, fragm_crs,
78437 - sect_crs, sz);
78438 + (const void __force_kernel *)sect_crs, sz);
78439 sect_crs += sz;
78440 sect_rest -= sz;
78441 fragm_crs += sz;
78442 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
78443 index 83d5096..dcba497 100644
78444 --- a/net/tipc/msg.c
78445 +++ b/net/tipc/msg.c
78446 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
78447 msg_sect[cnt].iov_len);
78448 else
78449 skb_copy_to_linear_data_offset(*buf, pos,
78450 - msg_sect[cnt].iov_base,
78451 + (const void __force_kernel *)msg_sect[cnt].iov_base,
78452 msg_sect[cnt].iov_len);
78453 pos += msg_sect[cnt].iov_len;
78454 }
78455 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
78456 index 6cf7268..7a488ce 100644
78457 --- a/net/tipc/subscr.c
78458 +++ b/net/tipc/subscr.c
78459 @@ -101,7 +101,7 @@ static void subscr_send_event(struct subscription *sub,
78460 {
78461 struct iovec msg_sect;
78462
78463 - msg_sect.iov_base = (void *)&sub->evt;
78464 + msg_sect.iov_base = (void __force_user *)&sub->evt;
78465 msg_sect.iov_len = sizeof(struct tipc_event);
78466
78467 sub->evt.event = htohl(event, sub->swap);
78468 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
78469 index ec68e1c..fdd792f 100644
78470 --- a/net/unix/af_unix.c
78471 +++ b/net/unix/af_unix.c
78472 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(struct net *net,
78473 err = -ECONNREFUSED;
78474 if (!S_ISSOCK(inode->i_mode))
78475 goto put_fail;
78476 +
78477 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
78478 + err = -EACCES;
78479 + goto put_fail;
78480 + }
78481 +
78482 u = unix_find_socket_byinode(inode);
78483 if (!u)
78484 goto put_fail;
78485 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(struct net *net,
78486 if (u) {
78487 struct dentry *dentry;
78488 dentry = unix_sk(u)->dentry;
78489 +
78490 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
78491 + err = -EPERM;
78492 + sock_put(u);
78493 + goto fail;
78494 + }
78495 +
78496 if (dentry)
78497 touch_atime(unix_sk(u)->mnt, dentry);
78498 } else
78499 @@ -869,11 +882,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
78500 err = security_path_mknod(&path, dentry, mode, 0);
78501 if (err)
78502 goto out_mknod_drop_write;
78503 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
78504 + err = -EACCES;
78505 + goto out_mknod_drop_write;
78506 + }
78507 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
78508 out_mknod_drop_write:
78509 mnt_drop_write(path.mnt);
78510 if (err)
78511 goto out_mknod_dput;
78512 +
78513 + gr_handle_create(dentry, path.mnt);
78514 +
78515 mutex_unlock(&path.dentry->d_inode->i_mutex);
78516 dput(path.dentry);
78517 path.dentry = dentry;
78518 diff --git a/net/wireless/core.h b/net/wireless/core.h
78519 index 8672e02..48782dd 100644
78520 --- a/net/wireless/core.h
78521 +++ b/net/wireless/core.h
78522 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
78523 struct mutex mtx;
78524
78525 /* rfkill support */
78526 - struct rfkill_ops rfkill_ops;
78527 + rfkill_ops_no_const rfkill_ops;
78528 struct rfkill *rfkill;
78529 struct work_struct rfkill_sync;
78530
78531 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
78532 index fdbc23c..212d53e 100644
78533 --- a/net/wireless/wext-core.c
78534 +++ b/net/wireless/wext-core.c
78535 @@ -746,8 +746,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
78536 */
78537
78538 /* Support for very large requests */
78539 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
78540 - (user_length > descr->max_tokens)) {
78541 + if (user_length > descr->max_tokens) {
78542 /* Allow userspace to GET more than max so
78543 * we can support any size GET requests.
78544 * There is still a limit : -ENOMEM.
78545 @@ -784,22 +783,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
78546 }
78547 }
78548
78549 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
78550 - /*
78551 - * If this is a GET, but not NOMAX, it means that the extra
78552 - * data is not bounded by userspace, but by max_tokens. Thus
78553 - * set the length to max_tokens. This matches the extra data
78554 - * allocation.
78555 - * The driver should fill it with the number of tokens it
78556 - * provided, and it may check iwp->length rather than having
78557 - * knowledge of max_tokens. If the driver doesn't change the
78558 - * iwp->length, this ioctl just copies back max_token tokens
78559 - * filled with zeroes. Hopefully the driver isn't claiming
78560 - * them to be valid data.
78561 - */
78562 - iwp->length = descr->max_tokens;
78563 - }
78564 -
78565 err = handler(dev, info, (union iwreq_data *) iwp, extra);
78566
78567 iwp->length += essid_compat;
78568 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
78569 index 7e088c0..dd3f206 100644
78570 --- a/net/xfrm/xfrm_policy.c
78571 +++ b/net/xfrm/xfrm_policy.c
78572 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
78573 {
78574 policy->walk.dead = 1;
78575
78576 - atomic_inc(&policy->genid);
78577 + atomic_inc_unchecked(&policy->genid);
78578
78579 if (del_timer(&policy->timer))
78580 xfrm_pol_put(policy);
78581 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
78582 hlist_add_head(&policy->bydst, chain);
78583 xfrm_pol_hold(policy);
78584 net->xfrm.policy_count[dir]++;
78585 - atomic_inc(&flow_cache_genid);
78586 + atomic_inc_unchecked(&flow_cache_genid);
78587 if (delpol)
78588 __xfrm_policy_unlink(delpol, dir);
78589 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
78590 @@ -1530,7 +1530,7 @@ free_dst:
78591 goto out;
78592 }
78593
78594 -static int inline
78595 +static inline int
78596 xfrm_dst_alloc_copy(void **target, const void *src, int size)
78597 {
78598 if (!*target) {
78599 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
78600 return 0;
78601 }
78602
78603 -static int inline
78604 +static inline int
78605 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
78606 {
78607 #ifdef CONFIG_XFRM_SUB_POLICY
78608 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
78609 #endif
78610 }
78611
78612 -static int inline
78613 +static inline int
78614 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
78615 {
78616 #ifdef CONFIG_XFRM_SUB_POLICY
78617 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
78618
78619 xdst->num_pols = num_pols;
78620 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
78621 - xdst->policy_genid = atomic_read(&pols[0]->genid);
78622 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
78623
78624 return xdst;
78625 }
78626 @@ -2345,7 +2345,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
78627 if (xdst->xfrm_genid != dst->xfrm->genid)
78628 return 0;
78629 if (xdst->num_pols > 0 &&
78630 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
78631 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
78632 return 0;
78633
78634 mtu = dst_mtu(dst->child);
78635 @@ -2880,7 +2880,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
78636 sizeof(pol->xfrm_vec[i].saddr));
78637 pol->xfrm_vec[i].encap_family = mp->new_family;
78638 /* flush bundles */
78639 - atomic_inc(&pol->genid);
78640 + atomic_inc_unchecked(&pol->genid);
78641 }
78642 }
78643
78644 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
78645 index 0256b8a..9341ef6 100644
78646 --- a/net/xfrm/xfrm_user.c
78647 +++ b/net/xfrm/xfrm_user.c
78648 @@ -1394,6 +1394,8 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
78649 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
78650 int i;
78651
78652 + pax_track_stack();
78653 +
78654 if (xp->xfrm_nr == 0)
78655 return 0;
78656
78657 @@ -2062,6 +2064,8 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
78658 int err;
78659 int n = 0;
78660
78661 + pax_track_stack();
78662 +
78663 if (attrs[XFRMA_MIGRATE] == NULL)
78664 return -EINVAL;
78665
78666 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
78667 index a0fd502..a8e6e83 100644
78668 --- a/scripts/Makefile.build
78669 +++ b/scripts/Makefile.build
78670 @@ -109,7 +109,7 @@ endif
78671 endif
78672
78673 # Do not include host rules unless needed
78674 -ifneq ($(hostprogs-y)$(hostprogs-m),)
78675 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
78676 include scripts/Makefile.host
78677 endif
78678
78679 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
78680 index 686cb0d..9d653bf 100644
78681 --- a/scripts/Makefile.clean
78682 +++ b/scripts/Makefile.clean
78683 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
78684 __clean-files := $(extra-y) $(always) \
78685 $(targets) $(clean-files) \
78686 $(host-progs) \
78687 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
78688 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
78689 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
78690
78691 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
78692
78693 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
78694 index 1ac414f..a1c1451 100644
78695 --- a/scripts/Makefile.host
78696 +++ b/scripts/Makefile.host
78697 @@ -31,6 +31,7 @@
78698 # Note: Shared libraries consisting of C++ files are not supported
78699
78700 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
78701 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
78702
78703 # C code
78704 # Executables compiled from a single .c file
78705 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
78706 # Shared libaries (only .c supported)
78707 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
78708 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
78709 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
78710 # Remove .so files from "xxx-objs"
78711 host-cobjs := $(filter-out %.so,$(host-cobjs))
78712
78713 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
78714 index 291228e..6c55203 100644
78715 --- a/scripts/basic/fixdep.c
78716 +++ b/scripts/basic/fixdep.c
78717 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
78718 /*
78719 * Lookup a value in the configuration string.
78720 */
78721 -static int is_defined_config(const char *name, int len, unsigned int hash)
78722 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
78723 {
78724 struct item *aux;
78725
78726 @@ -211,10 +211,10 @@ static void clear_config(void)
78727 /*
78728 * Record the use of a CONFIG_* word.
78729 */
78730 -static void use_config(const char *m, int slen)
78731 +static void use_config(const char *m, unsigned int slen)
78732 {
78733 unsigned int hash = strhash(m, slen);
78734 - int c, i;
78735 + unsigned int c, i;
78736
78737 if (is_defined_config(m, slen, hash))
78738 return;
78739 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
78740
78741 static void parse_config_file(const char *map, size_t len)
78742 {
78743 - const int *end = (const int *) (map + len);
78744 + const unsigned int *end = (const unsigned int *) (map + len);
78745 /* start at +1, so that p can never be < map */
78746 - const int *m = (const int *) map + 1;
78747 + const unsigned int *m = (const unsigned int *) map + 1;
78748 const char *p, *q;
78749
78750 for (; m < end; m++) {
78751 @@ -405,7 +405,7 @@ static void print_deps(void)
78752 static void traps(void)
78753 {
78754 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
78755 - int *p = (int *)test;
78756 + unsigned int *p = (unsigned int *)test;
78757
78758 if (*p != INT_CONF) {
78759 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
78760 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
78761 new file mode 100644
78762 index 0000000..8729101
78763 --- /dev/null
78764 +++ b/scripts/gcc-plugin.sh
78765 @@ -0,0 +1,2 @@
78766 +#!/bin/sh
78767 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
78768 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
78769 index e26e2fb..f84937b 100644
78770 --- a/scripts/mod/file2alias.c
78771 +++ b/scripts/mod/file2alias.c
78772 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
78773 unsigned long size, unsigned long id_size,
78774 void *symval)
78775 {
78776 - int i;
78777 + unsigned int i;
78778
78779 if (size % id_size || size < id_size) {
78780 if (cross_build != 0)
78781 @@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
78782 /* USB is special because the bcdDevice can be matched against a numeric range */
78783 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
78784 static void do_usb_entry(struct usb_device_id *id,
78785 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
78786 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
78787 unsigned char range_lo, unsigned char range_hi,
78788 unsigned char max, struct module *mod)
78789 {
78790 @@ -203,7 +203,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
78791 {
78792 unsigned int devlo, devhi;
78793 unsigned char chi, clo, max;
78794 - int ndigits;
78795 + unsigned int ndigits;
78796
78797 id->match_flags = TO_NATIVE(id->match_flags);
78798 id->idVendor = TO_NATIVE(id->idVendor);
78799 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
78800 for (i = 0; i < count; i++) {
78801 const char *id = (char *)devs[i].id;
78802 char acpi_id[sizeof(devs[0].id)];
78803 - int j;
78804 + unsigned int j;
78805
78806 buf_printf(&mod->dev_table_buf,
78807 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
78808 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
78809
78810 for (j = 0; j < PNP_MAX_DEVICES; j++) {
78811 const char *id = (char *)card->devs[j].id;
78812 - int i2, j2;
78813 + unsigned int i2, j2;
78814 int dup = 0;
78815
78816 if (!id[0])
78817 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
78818 /* add an individual alias for every device entry */
78819 if (!dup) {
78820 char acpi_id[sizeof(card->devs[0].id)];
78821 - int k;
78822 + unsigned int k;
78823
78824 buf_printf(&mod->dev_table_buf,
78825 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
78826 @@ -786,7 +786,7 @@ static void dmi_ascii_filter(char *d, const char *s)
78827 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
78828 char *alias)
78829 {
78830 - int i, j;
78831 + unsigned int i, j;
78832
78833 sprintf(alias, "dmi*");
78834
78835 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
78836 index a509ff8..5822633 100644
78837 --- a/scripts/mod/modpost.c
78838 +++ b/scripts/mod/modpost.c
78839 @@ -919,6 +919,7 @@ enum mismatch {
78840 ANY_INIT_TO_ANY_EXIT,
78841 ANY_EXIT_TO_ANY_INIT,
78842 EXPORT_TO_INIT_EXIT,
78843 + DATA_TO_TEXT
78844 };
78845
78846 struct sectioncheck {
78847 @@ -1027,6 +1028,12 @@ const struct sectioncheck sectioncheck[] = {
78848 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
78849 .mismatch = EXPORT_TO_INIT_EXIT,
78850 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
78851 +},
78852 +/* Do not reference code from writable data */
78853 +{
78854 + .fromsec = { DATA_SECTIONS, NULL },
78855 + .tosec = { TEXT_SECTIONS, NULL },
78856 + .mismatch = DATA_TO_TEXT
78857 }
78858 };
78859
78860 @@ -1149,10 +1156,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
78861 continue;
78862 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
78863 continue;
78864 - if (sym->st_value == addr)
78865 - return sym;
78866 /* Find a symbol nearby - addr are maybe negative */
78867 d = sym->st_value - addr;
78868 + if (d == 0)
78869 + return sym;
78870 if (d < 0)
78871 d = addr - sym->st_value;
78872 if (d < distance) {
78873 @@ -1431,6 +1438,14 @@ static void report_sec_mismatch(const char *modname,
78874 tosym, prl_to, prl_to, tosym);
78875 free(prl_to);
78876 break;
78877 + case DATA_TO_TEXT:
78878 +/*
78879 + fprintf(stderr,
78880 + "The variable %s references\n"
78881 + "the %s %s%s%s\n",
78882 + fromsym, to, sec2annotation(tosec), tosym, to_p);
78883 +*/
78884 + break;
78885 }
78886 fprintf(stderr, "\n");
78887 }
78888 @@ -1656,7 +1671,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
78889 static void check_sec_ref(struct module *mod, const char *modname,
78890 struct elf_info *elf)
78891 {
78892 - int i;
78893 + unsigned int i;
78894 Elf_Shdr *sechdrs = elf->sechdrs;
78895
78896 /* Walk through all sections */
78897 @@ -1754,7 +1769,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
78898 va_end(ap);
78899 }
78900
78901 -void buf_write(struct buffer *buf, const char *s, int len)
78902 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
78903 {
78904 if (buf->size - buf->pos < len) {
78905 buf->size += len + SZ;
78906 @@ -1966,7 +1981,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
78907 if (fstat(fileno(file), &st) < 0)
78908 goto close_write;
78909
78910 - if (st.st_size != b->pos)
78911 + if (st.st_size != (off_t)b->pos)
78912 goto close_write;
78913
78914 tmp = NOFAIL(malloc(b->pos));
78915 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
78916 index 2031119..b5433af 100644
78917 --- a/scripts/mod/modpost.h
78918 +++ b/scripts/mod/modpost.h
78919 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
78920
78921 struct buffer {
78922 char *p;
78923 - int pos;
78924 - int size;
78925 + unsigned int pos;
78926 + unsigned int size;
78927 };
78928
78929 void __attribute__((format(printf, 2, 3)))
78930 buf_printf(struct buffer *buf, const char *fmt, ...);
78931
78932 void
78933 -buf_write(struct buffer *buf, const char *s, int len);
78934 +buf_write(struct buffer *buf, const char *s, unsigned int len);
78935
78936 struct module {
78937 struct module *next;
78938 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
78939 index 9dfcd6d..099068e 100644
78940 --- a/scripts/mod/sumversion.c
78941 +++ b/scripts/mod/sumversion.c
78942 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
78943 goto out;
78944 }
78945
78946 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
78947 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
78948 warn("writing sum in %s failed: %s\n",
78949 filename, strerror(errno));
78950 goto out;
78951 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
78952 index 5c11312..72742b5 100644
78953 --- a/scripts/pnmtologo.c
78954 +++ b/scripts/pnmtologo.c
78955 @@ -237,14 +237,14 @@ static void write_header(void)
78956 fprintf(out, " * Linux logo %s\n", logoname);
78957 fputs(" */\n\n", out);
78958 fputs("#include <linux/linux_logo.h>\n\n", out);
78959 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
78960 + fprintf(out, "static unsigned char %s_data[] = {\n",
78961 logoname);
78962 }
78963
78964 static void write_footer(void)
78965 {
78966 fputs("\n};\n\n", out);
78967 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
78968 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
78969 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
78970 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
78971 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
78972 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
78973 fputs("\n};\n\n", out);
78974
78975 /* write logo clut */
78976 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
78977 + fprintf(out, "static unsigned char %s_clut[] = {\n",
78978 logoname);
78979 write_hex_cnt = 0;
78980 for (i = 0; i < logo_clutsize; i++) {
78981 diff --git a/security/Kconfig b/security/Kconfig
78982 index e0f08b5..649220f 100644
78983 --- a/security/Kconfig
78984 +++ b/security/Kconfig
78985 @@ -4,6 +4,626 @@
78986
78987 menu "Security options"
78988
78989 +source grsecurity/Kconfig
78990 +
78991 +menu "PaX"
78992 +
78993 + config ARCH_TRACK_EXEC_LIMIT
78994 + bool
78995 +
78996 + config PAX_KERNEXEC_PLUGIN
78997 + bool
78998 +
78999 + config PAX_PER_CPU_PGD
79000 + bool
79001 +
79002 + config TASK_SIZE_MAX_SHIFT
79003 + int
79004 + depends on X86_64
79005 + default 47 if !PAX_PER_CPU_PGD
79006 + default 42 if PAX_PER_CPU_PGD
79007 +
79008 + config PAX_ENABLE_PAE
79009 + bool
79010 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
79011 +
79012 +config PAX
79013 + bool "Enable various PaX features"
79014 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
79015 + help
79016 + This allows you to enable various PaX features. PaX adds
79017 + intrusion prevention mechanisms to the kernel that reduce
79018 + the risks posed by exploitable memory corruption bugs.
79019 +
79020 +menu "PaX Control"
79021 + depends on PAX
79022 +
79023 +config PAX_SOFTMODE
79024 + bool 'Support soft mode'
79025 + help
79026 + Enabling this option will allow you to run PaX in soft mode, that
79027 + is, PaX features will not be enforced by default, only on executables
79028 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
79029 + support as they are the only way to mark executables for soft mode use.
79030 +
79031 + Soft mode can be activated by using the "pax_softmode=1" kernel command
79032 + line option on boot. Furthermore you can control various PaX features
79033 + at runtime via the entries in /proc/sys/kernel/pax.
79034 +
79035 +config PAX_EI_PAX
79036 + bool 'Use legacy ELF header marking'
79037 + help
79038 + Enabling this option will allow you to control PaX features on
79039 + a per executable basis via the 'chpax' utility available at
79040 + http://pax.grsecurity.net/. The control flags will be read from
79041 + an otherwise reserved part of the ELF header. This marking has
79042 + numerous drawbacks (no support for soft-mode, toolchain does not
79043 + know about the non-standard use of the ELF header) therefore it
79044 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
79045 + support.
79046 +
79047 + If you have applications not marked by the PT_PAX_FLAGS ELF program
79048 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
79049 + option otherwise they will not get any protection.
79050 +
79051 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
79052 + support as well, they will override the legacy EI_PAX marks.
79053 +
79054 +config PAX_PT_PAX_FLAGS
79055 + bool 'Use ELF program header marking'
79056 + help
79057 + Enabling this option will allow you to control PaX features on
79058 + a per executable basis via the 'paxctl' utility available at
79059 + http://pax.grsecurity.net/. The control flags will be read from
79060 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
79061 + has the benefits of supporting both soft mode and being fully
79062 + integrated into the toolchain (the binutils patch is available
79063 + from http://pax.grsecurity.net).
79064 +
79065 + If you have applications not marked by the PT_PAX_FLAGS ELF program
79066 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
79067 + support otherwise they will not get any protection.
79068 +
79069 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
79070 + must make sure that the marks are the same if a binary has both marks.
79071 +
79072 + Note that if you enable the legacy EI_PAX marking support as well,
79073 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
79074 +
79075 +config PAX_XATTR_PAX_FLAGS
79076 + bool 'Use filesystem extended attributes marking'
79077 + depends on EXPERT
79078 + select CIFS_XATTR if CIFS
79079 + select EXT2_FS_XATTR if EXT2_FS
79080 + select EXT3_FS_XATTR if EXT3_FS
79081 + select EXT4_FS_XATTR if EXT4_FS
79082 + select JFFS2_FS_XATTR if JFFS2_FS
79083 + select REISERFS_FS_XATTR if REISERFS_FS
79084 + select SQUASHFS_XATTR if SQUASHFS
79085 + select TMPFS_XATTR if TMPFS
79086 + select UBIFS_FS_XATTR if UBIFS_FS
79087 + help
79088 + Enabling this option will allow you to control PaX features on
79089 + a per executable basis via the 'setfattr' utility. The control
79090 + flags will be read from the user.pax.flags extended attribute of
79091 + the file. This marking has the benefit of supporting binary-only
79092 + applications that self-check themselves (e.g., skype) and would
79093 + not tolerate chpax/paxctl changes. The main drawback is that
79094 + extended attributes are not supported by some filesystems (e.g.,
79095 + isofs, udf, vfat) so copying files through such filesystems will
79096 + lose the extended attributes and these PaX markings.
79097 +
79098 + If you have applications not marked by the PT_PAX_FLAGS ELF program
79099 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
79100 + support otherwise they will not get any protection.
79101 +
79102 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
79103 + must make sure that the marks are the same if a binary has both marks.
79104 +
79105 + Note that if you enable the legacy EI_PAX marking support as well,
79106 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
79107 +
79108 +choice
79109 + prompt 'MAC system integration'
79110 + default PAX_HAVE_ACL_FLAGS
79111 + help
79112 + Mandatory Access Control systems have the option of controlling
79113 + PaX flags on a per executable basis, choose the method supported
79114 + by your particular system.
79115 +
79116 + - "none": if your MAC system does not interact with PaX,
79117 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
79118 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
79119 +
79120 + NOTE: this option is for developers/integrators only.
79121 +
79122 + config PAX_NO_ACL_FLAGS
79123 + bool 'none'
79124 +
79125 + config PAX_HAVE_ACL_FLAGS
79126 + bool 'direct'
79127 +
79128 + config PAX_HOOK_ACL_FLAGS
79129 + bool 'hook'
79130 +endchoice
79131 +
79132 +endmenu
79133 +
79134 +menu "Non-executable pages"
79135 + depends on PAX
79136 +
79137 +config PAX_NOEXEC
79138 + bool "Enforce non-executable pages"
79139 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
79140 + help
79141 + By design some architectures do not allow for protecting memory
79142 + pages against execution or even if they do, Linux does not make
79143 + use of this feature. In practice this means that if a page is
79144 + readable (such as the stack or heap) it is also executable.
79145 +
79146 + There is a well known exploit technique that makes use of this
79147 + fact and a common programming mistake where an attacker can
79148 + introduce code of his choice somewhere in the attacked program's
79149 + memory (typically the stack or the heap) and then execute it.
79150 +
79151 + If the attacked program was running with different (typically
79152 + higher) privileges than that of the attacker, then he can elevate
79153 + his own privilege level (e.g. get a root shell, write to files for
79154 + which he does not have write access to, etc).
79155 +
79156 + Enabling this option will let you choose from various features
79157 + that prevent the injection and execution of 'foreign' code in
79158 + a program.
79159 +
79160 + This will also break programs that rely on the old behaviour and
79161 + expect that dynamically allocated memory via the malloc() family
79162 + of functions is executable (which it is not). Notable examples
79163 + are the XFree86 4.x server, the java runtime and wine.
79164 +
79165 +config PAX_PAGEEXEC
79166 + bool "Paging based non-executable pages"
79167 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
79168 + select S390_SWITCH_AMODE if S390
79169 + select S390_EXEC_PROTECT if S390
79170 + select ARCH_TRACK_EXEC_LIMIT if X86_32
79171 + help
79172 + This implementation is based on the paging feature of the CPU.
79173 + On i386 without hardware non-executable bit support there is a
79174 + variable but usually low performance impact, however on Intel's
79175 + P4 core based CPUs it is very high so you should not enable this
79176 + for kernels meant to be used on such CPUs.
79177 +
79178 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
79179 + with hardware non-executable bit support there is no performance
79180 + impact, on ppc the impact is negligible.
79181 +
79182 + Note that several architectures require various emulations due to
79183 + badly designed userland ABIs, this will cause a performance impact
79184 + but will disappear as soon as userland is fixed. For example, ppc
79185 + userland MUST have been built with secure-plt by a recent toolchain.
79186 +
79187 +config PAX_SEGMEXEC
79188 + bool "Segmentation based non-executable pages"
79189 + depends on PAX_NOEXEC && X86_32
79190 + help
79191 + This implementation is based on the segmentation feature of the
79192 + CPU and has a very small performance impact, however applications
79193 + will be limited to a 1.5 GB address space instead of the normal
79194 + 3 GB.
79195 +
79196 +config PAX_EMUTRAMP
79197 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
79198 + default y if PARISC
79199 + help
79200 + There are some programs and libraries that for one reason or
79201 + another attempt to execute special small code snippets from
79202 + non-executable memory pages. Most notable examples are the
79203 + signal handler return code generated by the kernel itself and
79204 + the GCC trampolines.
79205 +
79206 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
79207 + such programs will no longer work under your kernel.
79208 +
79209 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
79210 + utilities to enable trampoline emulation for the affected programs
79211 + yet still have the protection provided by the non-executable pages.
79212 +
79213 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
79214 + your system will not even boot.
79215 +
79216 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
79217 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
79218 + for the affected files.
79219 +
79220 + NOTE: enabling this feature *may* open up a loophole in the
79221 + protection provided by non-executable pages that an attacker
79222 + could abuse. Therefore the best solution is to not have any
79223 + files on your system that would require this option. This can
79224 + be achieved by not using libc5 (which relies on the kernel
79225 + signal handler return code) and not using or rewriting programs
79226 + that make use of the nested function implementation of GCC.
79227 + Skilled users can just fix GCC itself so that it implements
79228 + nested function calls in a way that does not interfere with PaX.
79229 +
79230 +config PAX_EMUSIGRT
79231 + bool "Automatically emulate sigreturn trampolines"
79232 + depends on PAX_EMUTRAMP && PARISC
79233 + default y
79234 + help
79235 + Enabling this option will have the kernel automatically detect
79236 + and emulate signal return trampolines executing on the stack
79237 + that would otherwise lead to task termination.
79238 +
79239 + This solution is intended as a temporary one for users with
79240 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
79241 + Modula-3 runtime, etc) or executables linked to such, basically
79242 + everything that does not specify its own SA_RESTORER function in
79243 + normal executable memory like glibc 2.1+ does.
79244 +
79245 + On parisc you MUST enable this option, otherwise your system will
79246 + not even boot.
79247 +
79248 + NOTE: this feature cannot be disabled on a per executable basis
79249 + and since it *does* open up a loophole in the protection provided
79250 + by non-executable pages, the best solution is to not have any
79251 + files on your system that would require this option.
79252 +
79253 +config PAX_MPROTECT
79254 + bool "Restrict mprotect()"
79255 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
79256 + help
79257 + Enabling this option will prevent programs from
79258 + - changing the executable status of memory pages that were
79259 + not originally created as executable,
79260 + - making read-only executable pages writable again,
79261 + - creating executable pages from anonymous memory,
79262 + - making read-only-after-relocations (RELRO) data pages writable again.
79263 +
79264 + You should say Y here to complete the protection provided by
79265 + the enforcement of non-executable pages.
79266 +
79267 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
79268 + this feature on a per file basis.
79269 +
79270 +config PAX_MPROTECT_COMPAT
79271 + bool "Use legacy/compat protection demoting (read help)"
79272 + depends on PAX_MPROTECT
79273 + default n
79274 + help
79275 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
79276 + by sending the proper error code to the application. For some broken
79277 + userland, this can cause problems with Python or other applications. The
79278 + current implementation however allows for applications like clamav to
79279 + detect if JIT compilation/execution is allowed and to fall back gracefully
79280 + to an interpreter-based mode if it does not. While we encourage everyone
79281 + to use the current implementation as-is and push upstream to fix broken
79282 + userland (note that the RWX logging option can assist with this), in some
79283 + environments this may not be possible. Having to disable MPROTECT
79284 + completely on certain binaries reduces the security benefit of PaX,
79285 + so this option is provided for those environments to revert to the old
79286 + behavior.
79287 +
79288 +config PAX_ELFRELOCS
79289 + bool "Allow ELF text relocations (read help)"
79290 + depends on PAX_MPROTECT
79291 + default n
79292 + help
79293 + Non-executable pages and mprotect() restrictions are effective
79294 + in preventing the introduction of new executable code into an
79295 + attacked task's address space. There remain only two venues
79296 + for this kind of attack: if the attacker can execute already
79297 + existing code in the attacked task then he can either have it
79298 + create and mmap() a file containing his code or have it mmap()
79299 + an already existing ELF library that does not have position
79300 + independent code in it and use mprotect() on it to make it
79301 + writable and copy his code there. While protecting against
79302 + the former approach is beyond PaX, the latter can be prevented
79303 + by having only PIC ELF libraries on one's system (which do not
79304 + need to relocate their code). If you are sure this is your case,
79305 + as is the case with all modern Linux distributions, then leave
79306 + this option disabled. You should say 'n' here.
79307 +
79308 +config PAX_ETEXECRELOCS
79309 + bool "Allow ELF ET_EXEC text relocations"
79310 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
79311 + select PAX_ELFRELOCS
79312 + default y
79313 + help
79314 + On some architectures there are incorrectly created applications
79315 + that require text relocations and would not work without enabling
79316 + this option. If you are an alpha, ia64 or parisc user, you should
79317 + enable this option and disable it once you have made sure that
79318 + none of your applications need it.
79319 +
79320 +config PAX_EMUPLT
79321 + bool "Automatically emulate ELF PLT"
79322 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
79323 + default y
79324 + help
79325 + Enabling this option will have the kernel automatically detect
79326 + and emulate the Procedure Linkage Table entries in ELF files.
79327 + On some architectures such entries are in writable memory, and
79328 + become non-executable leading to task termination. Therefore
79329 + it is mandatory that you enable this option on alpha, parisc,
79330 + sparc and sparc64, otherwise your system would not even boot.
79331 +
79332 + NOTE: this feature *does* open up a loophole in the protection
79333 + provided by the non-executable pages, therefore the proper
79334 + solution is to modify the toolchain to produce a PLT that does
79335 + not need to be writable.
79336 +
79337 +config PAX_DLRESOLVE
79338 + bool 'Emulate old glibc resolver stub'
79339 + depends on PAX_EMUPLT && SPARC
79340 + default n
79341 + help
79342 + This option is needed if userland has an old glibc (before 2.4)
79343 + that puts a 'save' instruction into the runtime generated resolver
79344 + stub that needs special emulation.
79345 +
79346 +config PAX_KERNEXEC
79347 + bool "Enforce non-executable kernel pages"
79348 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
79349 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
79350 + select PAX_KERNEXEC_PLUGIN if X86_64
79351 + help
79352 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
79353 + that is, enabling this option will make it harder to inject
79354 + and execute 'foreign' code in kernel memory itself.
79355 +
79356 + Note that on x86_64 kernels there is a known regression when
79357 + this feature and KVM/VMX are both enabled in the host kernel.
79358 +
79359 +choice
79360 + prompt "Return Address Instrumentation Method"
79361 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
79362 + depends on PAX_KERNEXEC_PLUGIN
79363 + help
79364 + Select the method used to instrument function pointer dereferences.
79365 + Note that binary modules cannot be instrumented by this approach.
79366 +
79367 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
79368 + bool "bts"
79369 + help
79370 + This method is compatible with binary only modules but has
79371 + a higher runtime overhead.
79372 +
79373 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
79374 + bool "or"
79375 + depends on !PARAVIRT
79376 + help
79377 + This method is incompatible with binary only modules but has
79378 + a lower runtime overhead.
79379 +endchoice
79380 +
79381 +config PAX_KERNEXEC_PLUGIN_METHOD
79382 + string
79383 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
79384 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
79385 + default ""
79386 +
79387 +config PAX_KERNEXEC_MODULE_TEXT
79388 + int "Minimum amount of memory reserved for module code"
79389 + default "4"
79390 + depends on PAX_KERNEXEC && X86_32 && MODULES
79391 + help
79392 + Due to implementation details the kernel must reserve a fixed
79393 + amount of memory for module code at compile time that cannot be
79394 + changed at runtime. Here you can specify the minimum amount
79395 + in MB that will be reserved. Due to the same implementation
79396 + details this size will always be rounded up to the next 2/4 MB
79397 + boundary (depends on PAE) so the actually available memory for
79398 + module code will usually be more than this minimum.
79399 +
79400 + The default 4 MB should be enough for most users but if you have
79401 + an excessive number of modules (e.g., most distribution configs
79402 + compile many drivers as modules) or use huge modules such as
79403 + nvidia's kernel driver, you will need to adjust this amount.
79404 + A good rule of thumb is to look at your currently loaded kernel
79405 + modules and add up their sizes.
79406 +
79407 +endmenu
79408 +
79409 +menu "Address Space Layout Randomization"
79410 + depends on PAX
79411 +
79412 +config PAX_ASLR
79413 + bool "Address Space Layout Randomization"
79414 + help
79415 + Many if not most exploit techniques rely on the knowledge of
79416 + certain addresses in the attacked program. The following options
79417 + will allow the kernel to apply a certain amount of randomization
79418 + to specific parts of the program thereby forcing an attacker to
79419 + guess them in most cases. Any failed guess will most likely crash
79420 + the attacked program which allows the kernel to detect such attempts
79421 + and react on them. PaX itself provides no reaction mechanisms,
79422 + instead it is strongly encouraged that you make use of Nergal's
79423 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
79424 + (http://www.grsecurity.net/) built-in crash detection features or
79425 + develop one yourself.
79426 +
79427 + By saying Y here you can choose to randomize the following areas:
79428 + - top of the task's kernel stack
79429 + - top of the task's userland stack
79430 + - base address for mmap() requests that do not specify one
79431 + (this includes all libraries)
79432 + - base address of the main executable
79433 +
79434 + It is strongly recommended to say Y here as address space layout
79435 + randomization has negligible impact on performance yet it provides
79436 + a very effective protection.
79437 +
79438 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
79439 + this feature on a per file basis.
79440 +
79441 +config PAX_RANDKSTACK
79442 + bool "Randomize kernel stack base"
79443 + depends on X86_TSC && X86
79444 + help
79445 + By saying Y here the kernel will randomize every task's kernel
79446 + stack on every system call. This will not only force an attacker
79447 + to guess it but also prevent him from making use of possible
79448 + leaked information about it.
79449 +
79450 + Since the kernel stack is a rather scarce resource, randomization
79451 + may cause unexpected stack overflows, therefore you should very
79452 + carefully test your system. Note that once enabled in the kernel
79453 + configuration, this feature cannot be disabled on a per file basis.
79454 +
79455 +config PAX_RANDUSTACK
79456 + bool "Randomize user stack base"
79457 + depends on PAX_ASLR
79458 + help
79459 + By saying Y here the kernel will randomize every task's userland
79460 + stack. The randomization is done in two steps where the second
79461 + one may apply a big amount of shift to the top of the stack and
79462 + cause problems for programs that want to use lots of memory (more
79463 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
79464 + For this reason the second step can be controlled by 'chpax' or
79465 + 'paxctl' on a per file basis.
79466 +
79467 +config PAX_RANDMMAP
79468 + bool "Randomize mmap() base"
79469 + depends on PAX_ASLR
79470 + help
79471 + By saying Y here the kernel will use a randomized base address for
79472 + mmap() requests that do not specify one themselves. As a result
79473 + all dynamically loaded libraries will appear at random addresses
79474 + and therefore be harder to exploit by a technique where an attacker
79475 + attempts to execute library code for his purposes (e.g. spawn a
79476 + shell from an exploited program that is running at an elevated
79477 + privilege level).
79478 +
79479 + Furthermore, if a program is relinked as a dynamic ELF file, its
79480 + base address will be randomized as well, completing the full
79481 + randomization of the address space layout. Attacking such programs
79482 + becomes a guess game. You can find an example of doing this at
79483 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
79484 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
79485 +
79486 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
79487 + feature on a per file basis.
79488 +
79489 +endmenu
79490 +
79491 +menu "Miscellaneous hardening features"
79492 +
79493 +config PAX_MEMORY_SANITIZE
79494 + bool "Sanitize all freed memory"
79495 + help
79496 + By saying Y here the kernel will erase memory pages as soon as they
79497 + are freed. This in turn reduces the lifetime of data stored in the
79498 + pages, making it less likely that sensitive information such as
79499 + passwords, cryptographic secrets, etc stay in memory for too long.
79500 +
79501 + This is especially useful for programs whose runtime is short, long
79502 + lived processes and the kernel itself benefit from this as long as
79503 + they operate on whole memory pages and ensure timely freeing of pages
79504 + that may hold sensitive information.
79505 +
79506 + The tradeoff is performance impact, on a single CPU system kernel
79507 + compilation sees a 3% slowdown, other systems and workloads may vary
79508 + and you are advised to test this feature on your expected workload
79509 + before deploying it.
79510 +
79511 + Note that this feature does not protect data stored in live pages,
79512 + e.g., process memory swapped to disk may stay there for a long time.
79513 +
79514 +config PAX_MEMORY_STACKLEAK
79515 + bool "Sanitize kernel stack"
79516 + depends on X86
79517 + help
79518 + By saying Y here the kernel will erase the kernel stack before it
79519 + returns from a system call. This in turn reduces the information
79520 + that a kernel stack leak bug can reveal.
79521 +
79522 + Note that such a bug can still leak information that was put on
79523 + the stack by the current system call (the one eventually triggering
79524 + the bug) but traces of earlier system calls on the kernel stack
79525 + cannot leak anymore.
79526 +
79527 + The tradeoff is performance impact: on a single CPU system kernel
79528 + compilation sees a 1% slowdown, other systems and workloads may vary
79529 + and you are advised to test this feature on your expected workload
79530 + before deploying it.
79531 +
79532 + Note: full support for this feature requires gcc with plugin support
79533 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
79534 + versions means that functions with large enough stack frames may
79535 + leave uninitialized memory behind that may be exposed to a later
79536 + syscall leaking the stack.
79537 +
79538 +config PAX_MEMORY_UDEREF
79539 + bool "Prevent invalid userland pointer dereference"
79540 + depends on X86 && !UML_X86 && !XEN
79541 + select PAX_PER_CPU_PGD if X86_64
79542 + help
79543 + By saying Y here the kernel will be prevented from dereferencing
79544 + userland pointers in contexts where the kernel expects only kernel
79545 + pointers. This is both a useful runtime debugging feature and a
79546 + security measure that prevents exploiting a class of kernel bugs.
79547 +
79548 + The tradeoff is that some virtualization solutions may experience
79549 + a huge slowdown and therefore you should not enable this feature
79550 + for kernels meant to run in such environments. Whether a given VM
79551 + solution is affected or not is best determined by simply trying it
79552 + out, the performance impact will be obvious right on boot as this
79553 + mechanism engages from very early on. A good rule of thumb is that
79554 + VMs running on CPUs without hardware virtualization support (i.e.,
79555 + the majority of IA-32 CPUs) will likely experience the slowdown.
79556 +
79557 +config PAX_REFCOUNT
79558 + bool "Prevent various kernel object reference counter overflows"
79559 + depends on GRKERNSEC && (X86 || SPARC64)
79560 + help
79561 + By saying Y here the kernel will detect and prevent overflowing
79562 + various (but not all) kinds of object reference counters. Such
79563 + overflows can normally occur due to bugs only and are often, if
79564 + not always, exploitable.
79565 +
79566 + The tradeoff is that data structures protected by an overflowed
79567 + refcount will never be freed and therefore will leak memory. Note
79568 + that this leak also happens even without this protection but in
79569 + that case the overflow can eventually trigger the freeing of the
79570 + data structure while it is still being used elsewhere, resulting
79571 + in the exploitable situation that this feature prevents.
79572 +
79573 + Since this has a negligible performance impact, you should enable
79574 + this feature.
79575 +
79576 +config PAX_USERCOPY
79577 + bool "Harden heap object copies between kernel and userland"
79578 + depends on X86 || PPC || SPARC || ARM
79579 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
79580 + help
79581 + By saying Y here the kernel will enforce the size of heap objects
79582 + when they are copied in either direction between the kernel and
79583 + userland, even if only a part of the heap object is copied.
79584 +
79585 + Specifically, this checking prevents information leaking from the
79586 + kernel heap during kernel to userland copies (if the kernel heap
79587 + object is otherwise fully initialized) and prevents kernel heap
79588 + overflows during userland to kernel copies.
79589 +
79590 + Note that the current implementation provides the strictest bounds
79591 + checks for the SLUB allocator.
79592 +
79593 + Enabling this option also enables per-slab cache protection against
79594 + data in a given cache being copied into/out of via userland
79595 + accessors. Though the whitelist of regions will be reduced over
79596 + time, it notably protects important data structures like task structs.
79597 +
79598 + If frame pointers are enabled on x86, this option will also restrict
79599 + copies into and out of the kernel stack to local variables within a
79600 + single frame.
79601 +
79602 + Since this has a negligible performance impact, you should enable
79603 + this feature.
79604 +
79605 +endmenu
79606 +
79607 +endmenu
79608 +
79609 config KEYS
79610 bool "Enable access key retention support"
79611 help
79612 @@ -167,7 +787,7 @@ config INTEL_TXT
79613 config LSM_MMAP_MIN_ADDR
79614 int "Low address space for LSM to protect from user allocation"
79615 depends on SECURITY && SECURITY_SELINUX
79616 - default 32768 if ARM
79617 + default 32768 if ALPHA || ARM || PARISC || SPARC32
79618 default 65536
79619 help
79620 This is the portion of low virtual memory which should be protected
79621 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
79622 index 3783202..1852837 100644
79623 --- a/security/apparmor/lsm.c
79624 +++ b/security/apparmor/lsm.c
79625 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
79626 return error;
79627 }
79628
79629 -static struct security_operations apparmor_ops = {
79630 +static struct security_operations apparmor_ops __read_only = {
79631 .name = "apparmor",
79632
79633 .ptrace_access_check = apparmor_ptrace_access_check,
79634 diff --git a/security/commoncap.c b/security/commoncap.c
79635 index a93b3b7..4410df9 100644
79636 --- a/security/commoncap.c
79637 +++ b/security/commoncap.c
79638 @@ -28,6 +28,7 @@
79639 #include <linux/prctl.h>
79640 #include <linux/securebits.h>
79641 #include <linux/user_namespace.h>
79642 +#include <net/sock.h>
79643
79644 /*
79645 * If a non-root user executes a setuid-root binary in
79646 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
79647
79648 int cap_netlink_recv(struct sk_buff *skb, int cap)
79649 {
79650 - if (!cap_raised(current_cap(), cap))
79651 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
79652 return -EPERM;
79653 return 0;
79654 }
79655 @@ -575,6 +576,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
79656 {
79657 const struct cred *cred = current_cred();
79658
79659 + if (gr_acl_enable_at_secure())
79660 + return 1;
79661 +
79662 if (cred->uid != 0) {
79663 if (bprm->cap_effective)
79664 return 1;
79665 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
79666 index 08408bd..67e6e78 100644
79667 --- a/security/integrity/ima/ima.h
79668 +++ b/security/integrity/ima/ima.h
79669 @@ -85,8 +85,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
79670 extern spinlock_t ima_queue_lock;
79671
79672 struct ima_h_table {
79673 - atomic_long_t len; /* number of stored measurements in the list */
79674 - atomic_long_t violations;
79675 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
79676 + atomic_long_unchecked_t violations;
79677 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
79678 };
79679 extern struct ima_h_table ima_htable;
79680 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
79681 index da36d2c..e1e1965 100644
79682 --- a/security/integrity/ima/ima_api.c
79683 +++ b/security/integrity/ima/ima_api.c
79684 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
79685 int result;
79686
79687 /* can overflow, only indicator */
79688 - atomic_long_inc(&ima_htable.violations);
79689 + atomic_long_inc_unchecked(&ima_htable.violations);
79690
79691 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
79692 if (!entry) {
79693 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
79694 index ef21b96..d53e674 100644
79695 --- a/security/integrity/ima/ima_fs.c
79696 +++ b/security/integrity/ima/ima_fs.c
79697 @@ -28,12 +28,12 @@
79698 static int valid_policy = 1;
79699 #define TMPBUFLEN 12
79700 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
79701 - loff_t *ppos, atomic_long_t *val)
79702 + loff_t *ppos, atomic_long_unchecked_t *val)
79703 {
79704 char tmpbuf[TMPBUFLEN];
79705 ssize_t len;
79706
79707 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
79708 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
79709 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
79710 }
79711
79712 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
79713 index 8e28f04..d5951b1 100644
79714 --- a/security/integrity/ima/ima_queue.c
79715 +++ b/security/integrity/ima/ima_queue.c
79716 @@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
79717 INIT_LIST_HEAD(&qe->later);
79718 list_add_tail_rcu(&qe->later, &ima_measurements);
79719
79720 - atomic_long_inc(&ima_htable.len);
79721 + atomic_long_inc_unchecked(&ima_htable.len);
79722 key = ima_hash_key(entry->digest);
79723 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
79724 return 0;
79725 diff --git a/security/keys/compat.c b/security/keys/compat.c
79726 index 338b510..a235861 100644
79727 --- a/security/keys/compat.c
79728 +++ b/security/keys/compat.c
79729 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
79730 if (ret == 0)
79731 goto no_payload_free;
79732
79733 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
79734 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
79735
79736 if (iov != iovstack)
79737 kfree(iov);
79738 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
79739 index eca5191..da9c7f0 100644
79740 --- a/security/keys/keyctl.c
79741 +++ b/security/keys/keyctl.c
79742 @@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
79743 /*
79744 * Copy the iovec data from userspace
79745 */
79746 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
79747 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
79748 unsigned ioc)
79749 {
79750 for (; ioc > 0; ioc--) {
79751 @@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
79752 * If successful, 0 will be returned.
79753 */
79754 long keyctl_instantiate_key_common(key_serial_t id,
79755 - const struct iovec *payload_iov,
79756 + const struct iovec __user *payload_iov,
79757 unsigned ioc,
79758 size_t plen,
79759 key_serial_t ringid)
79760 @@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t id,
79761 [0].iov_len = plen
79762 };
79763
79764 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
79765 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
79766 }
79767
79768 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
79769 @@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
79770 if (ret == 0)
79771 goto no_payload_free;
79772
79773 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
79774 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
79775
79776 if (iov != iovstack)
79777 kfree(iov);
79778 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
79779 index 30e242f..ec111ab 100644
79780 --- a/security/keys/keyring.c
79781 +++ b/security/keys/keyring.c
79782 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
79783 ret = -EFAULT;
79784
79785 for (loop = 0; loop < klist->nkeys; loop++) {
79786 + key_serial_t serial;
79787 key = klist->keys[loop];
79788 + serial = key->serial;
79789
79790 tmp = sizeof(key_serial_t);
79791 if (tmp > buflen)
79792 tmp = buflen;
79793
79794 - if (copy_to_user(buffer,
79795 - &key->serial,
79796 - tmp) != 0)
79797 + if (copy_to_user(buffer, &serial, tmp))
79798 goto error;
79799
79800 buflen -= tmp;
79801 diff --git a/security/min_addr.c b/security/min_addr.c
79802 index f728728..6457a0c 100644
79803 --- a/security/min_addr.c
79804 +++ b/security/min_addr.c
79805 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
79806 */
79807 static void update_mmap_min_addr(void)
79808 {
79809 +#ifndef SPARC
79810 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
79811 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
79812 mmap_min_addr = dac_mmap_min_addr;
79813 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
79814 #else
79815 mmap_min_addr = dac_mmap_min_addr;
79816 #endif
79817 +#endif
79818 }
79819
79820 /*
79821 diff --git a/security/security.c b/security/security.c
79822 index d9e1533..91427f2 100644
79823 --- a/security/security.c
79824 +++ b/security/security.c
79825 @@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
79826 /* things that live in capability.c */
79827 extern void __init security_fixup_ops(struct security_operations *ops);
79828
79829 -static struct security_operations *security_ops;
79830 -static struct security_operations default_security_ops = {
79831 +static struct security_operations *security_ops __read_only;
79832 +static struct security_operations default_security_ops __read_only = {
79833 .name = "default",
79834 };
79835
79836 @@ -67,7 +67,9 @@ int __init security_init(void)
79837
79838 void reset_security_ops(void)
79839 {
79840 + pax_open_kernel();
79841 security_ops = &default_security_ops;
79842 + pax_close_kernel();
79843 }
79844
79845 /* Save user chosen LSM */
79846 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
79847 index 266a229..61bd553 100644
79848 --- a/security/selinux/hooks.c
79849 +++ b/security/selinux/hooks.c
79850 @@ -93,7 +93,6 @@
79851 #define NUM_SEL_MNT_OPTS 5
79852
79853 extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
79854 -extern struct security_operations *security_ops;
79855
79856 /* SECMARK reference count */
79857 atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
79858 @@ -5455,7 +5454,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
79859
79860 #endif
79861
79862 -static struct security_operations selinux_ops = {
79863 +static struct security_operations selinux_ops __read_only = {
79864 .name = "selinux",
79865
79866 .ptrace_access_check = selinux_ptrace_access_check,
79867 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
79868 index b43813c..74be837 100644
79869 --- a/security/selinux/include/xfrm.h
79870 +++ b/security/selinux/include/xfrm.h
79871 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
79872
79873 static inline void selinux_xfrm_notify_policyload(void)
79874 {
79875 - atomic_inc(&flow_cache_genid);
79876 + atomic_inc_unchecked(&flow_cache_genid);
79877 }
79878 #else
79879 static inline int selinux_xfrm_enabled(void)
79880 diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
79881 index f6917bc..8e8713e 100644
79882 --- a/security/selinux/ss/services.c
79883 +++ b/security/selinux/ss/services.c
79884 @@ -1814,6 +1814,8 @@ int security_load_policy(void *data, size_t len)
79885 int rc = 0;
79886 struct policy_file file = { data, len }, *fp = &file;
79887
79888 + pax_track_stack();
79889 +
79890 if (!ss_initialized) {
79891 avtab_cache_init();
79892 rc = policydb_read(&policydb, fp);
79893 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
79894 index b9c5e14..20ab779 100644
79895 --- a/security/smack/smack_lsm.c
79896 +++ b/security/smack/smack_lsm.c
79897 @@ -3393,7 +3393,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
79898 return 0;
79899 }
79900
79901 -struct security_operations smack_ops = {
79902 +struct security_operations smack_ops __read_only = {
79903 .name = "smack",
79904
79905 .ptrace_access_check = smack_ptrace_access_check,
79906 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
79907 index f776400..f95b158c 100644
79908 --- a/security/tomoyo/tomoyo.c
79909 +++ b/security/tomoyo/tomoyo.c
79910 @@ -446,7 +446,7 @@ static int tomoyo_sb_pivotroot(struct path *old_path, struct path *new_path)
79911 * tomoyo_security_ops is a "struct security_operations" which is used for
79912 * registering TOMOYO.
79913 */
79914 -static struct security_operations tomoyo_security_ops = {
79915 +static struct security_operations tomoyo_security_ops __read_only = {
79916 .name = "tomoyo",
79917 .cred_alloc_blank = tomoyo_cred_alloc_blank,
79918 .cred_prepare = tomoyo_cred_prepare,
79919 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
79920 index 3687a6c..652565e 100644
79921 --- a/sound/aoa/codecs/onyx.c
79922 +++ b/sound/aoa/codecs/onyx.c
79923 @@ -54,7 +54,7 @@ struct onyx {
79924 spdif_locked:1,
79925 analog_locked:1,
79926 original_mute:2;
79927 - int open_count;
79928 + local_t open_count;
79929 struct codec_info *codec_info;
79930
79931 /* mutex serializes concurrent access to the device
79932 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
79933 struct onyx *onyx = cii->codec_data;
79934
79935 mutex_lock(&onyx->mutex);
79936 - onyx->open_count++;
79937 + local_inc(&onyx->open_count);
79938 mutex_unlock(&onyx->mutex);
79939
79940 return 0;
79941 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
79942 struct onyx *onyx = cii->codec_data;
79943
79944 mutex_lock(&onyx->mutex);
79945 - onyx->open_count--;
79946 - if (!onyx->open_count)
79947 + if (local_dec_and_test(&onyx->open_count))
79948 onyx->spdif_locked = onyx->analog_locked = 0;
79949 mutex_unlock(&onyx->mutex);
79950
79951 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
79952 index ffd2025..df062c9 100644
79953 --- a/sound/aoa/codecs/onyx.h
79954 +++ b/sound/aoa/codecs/onyx.h
79955 @@ -11,6 +11,7 @@
79956 #include <linux/i2c.h>
79957 #include <asm/pmac_low_i2c.h>
79958 #include <asm/prom.h>
79959 +#include <asm/local.h>
79960
79961 /* PCM3052 register definitions */
79962
79963 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
79964 index 23c34a0..a2673a5 100644
79965 --- a/sound/core/oss/pcm_oss.c
79966 +++ b/sound/core/oss/pcm_oss.c
79967 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
79968 if (in_kernel) {
79969 mm_segment_t fs;
79970 fs = snd_enter_user();
79971 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
79972 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
79973 snd_leave_user(fs);
79974 } else {
79975 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
79976 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
79977 }
79978 if (ret != -EPIPE && ret != -ESTRPIPE)
79979 break;
79980 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
79981 if (in_kernel) {
79982 mm_segment_t fs;
79983 fs = snd_enter_user();
79984 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
79985 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
79986 snd_leave_user(fs);
79987 } else {
79988 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
79989 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
79990 }
79991 if (ret == -EPIPE) {
79992 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
79993 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
79994 struct snd_pcm_plugin_channel *channels;
79995 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
79996 if (!in_kernel) {
79997 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
79998 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
79999 return -EFAULT;
80000 buf = runtime->oss.buffer;
80001 }
80002 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
80003 }
80004 } else {
80005 tmp = snd_pcm_oss_write2(substream,
80006 - (const char __force *)buf,
80007 + (const char __force_kernel *)buf,
80008 runtime->oss.period_bytes, 0);
80009 if (tmp <= 0)
80010 goto err;
80011 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
80012 struct snd_pcm_runtime *runtime = substream->runtime;
80013 snd_pcm_sframes_t frames, frames1;
80014 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
80015 - char __user *final_dst = (char __force __user *)buf;
80016 + char __user *final_dst = (char __force_user *)buf;
80017 if (runtime->oss.plugin_first) {
80018 struct snd_pcm_plugin_channel *channels;
80019 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
80020 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
80021 xfer += tmp;
80022 runtime->oss.buffer_used -= tmp;
80023 } else {
80024 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
80025 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
80026 runtime->oss.period_bytes, 0);
80027 if (tmp <= 0)
80028 goto err;
80029 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
80030 size1);
80031 size1 /= runtime->channels; /* frames */
80032 fs = snd_enter_user();
80033 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
80034 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
80035 snd_leave_user(fs);
80036 }
80037 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
80038 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
80039 index 91cdf94..4085161 100644
80040 --- a/sound/core/pcm_compat.c
80041 +++ b/sound/core/pcm_compat.c
80042 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
80043 int err;
80044
80045 fs = snd_enter_user();
80046 - err = snd_pcm_delay(substream, &delay);
80047 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
80048 snd_leave_user(fs);
80049 if (err < 0)
80050 return err;
80051 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
80052 index 1c6be91..c761a59 100644
80053 --- a/sound/core/pcm_native.c
80054 +++ b/sound/core/pcm_native.c
80055 @@ -2770,11 +2770,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
80056 switch (substream->stream) {
80057 case SNDRV_PCM_STREAM_PLAYBACK:
80058 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
80059 - (void __user *)arg);
80060 + (void __force_user *)arg);
80061 break;
80062 case SNDRV_PCM_STREAM_CAPTURE:
80063 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
80064 - (void __user *)arg);
80065 + (void __force_user *)arg);
80066 break;
80067 default:
80068 result = -EINVAL;
80069 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
80070 index 1f99767..14636533 100644
80071 --- a/sound/core/seq/seq_device.c
80072 +++ b/sound/core/seq/seq_device.c
80073 @@ -63,7 +63,7 @@ struct ops_list {
80074 int argsize; /* argument size */
80075
80076 /* operators */
80077 - struct snd_seq_dev_ops ops;
80078 + struct snd_seq_dev_ops *ops;
80079
80080 /* registred devices */
80081 struct list_head dev_list; /* list of devices */
80082 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
80083
80084 mutex_lock(&ops->reg_mutex);
80085 /* copy driver operators */
80086 - ops->ops = *entry;
80087 + ops->ops = entry;
80088 ops->driver |= DRIVER_LOADED;
80089 ops->argsize = argsize;
80090
80091 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
80092 dev->name, ops->id, ops->argsize, dev->argsize);
80093 return -EINVAL;
80094 }
80095 - if (ops->ops.init_device(dev) >= 0) {
80096 + if (ops->ops->init_device(dev) >= 0) {
80097 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
80098 ops->num_init_devices++;
80099 } else {
80100 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
80101 dev->name, ops->id, ops->argsize, dev->argsize);
80102 return -EINVAL;
80103 }
80104 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
80105 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
80106 dev->status = SNDRV_SEQ_DEVICE_FREE;
80107 dev->driver_data = NULL;
80108 ops->num_init_devices--;
80109 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
80110 index 8539ab0..be8a121 100644
80111 --- a/sound/drivers/mts64.c
80112 +++ b/sound/drivers/mts64.c
80113 @@ -28,6 +28,7 @@
80114 #include <sound/initval.h>
80115 #include <sound/rawmidi.h>
80116 #include <sound/control.h>
80117 +#include <asm/local.h>
80118
80119 #define CARD_NAME "Miditerminal 4140"
80120 #define DRIVER_NAME "MTS64"
80121 @@ -66,7 +67,7 @@ struct mts64 {
80122 struct pardevice *pardev;
80123 int pardev_claimed;
80124
80125 - int open_count;
80126 + local_t open_count;
80127 int current_midi_output_port;
80128 int current_midi_input_port;
80129 u8 mode[MTS64_NUM_INPUT_PORTS];
80130 @@ -696,7 +697,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
80131 {
80132 struct mts64 *mts = substream->rmidi->private_data;
80133
80134 - if (mts->open_count == 0) {
80135 + if (local_read(&mts->open_count) == 0) {
80136 /* We don't need a spinlock here, because this is just called
80137 if the device has not been opened before.
80138 So there aren't any IRQs from the device */
80139 @@ -704,7 +705,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
80140
80141 msleep(50);
80142 }
80143 - ++(mts->open_count);
80144 + local_inc(&mts->open_count);
80145
80146 return 0;
80147 }
80148 @@ -714,8 +715,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
80149 struct mts64 *mts = substream->rmidi->private_data;
80150 unsigned long flags;
80151
80152 - --(mts->open_count);
80153 - if (mts->open_count == 0) {
80154 + if (local_dec_return(&mts->open_count) == 0) {
80155 /* We need the spinlock_irqsave here because we can still
80156 have IRQs at this point */
80157 spin_lock_irqsave(&mts->lock, flags);
80158 @@ -724,8 +724,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
80159
80160 msleep(500);
80161
80162 - } else if (mts->open_count < 0)
80163 - mts->open_count = 0;
80164 + } else if (local_read(&mts->open_count) < 0)
80165 + local_set(&mts->open_count, 0);
80166
80167 return 0;
80168 }
80169 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
80170 index f07e38d..7aae69a 100644
80171 --- a/sound/drivers/opl4/opl4_lib.c
80172 +++ b/sound/drivers/opl4/opl4_lib.c
80173 @@ -28,7 +28,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
80174 MODULE_DESCRIPTION("OPL4 driver");
80175 MODULE_LICENSE("GPL");
80176
80177 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
80178 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
80179 {
80180 int timeout = 10;
80181 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
80182 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
80183 index f2b0ba2..429efc5 100644
80184 --- a/sound/drivers/portman2x4.c
80185 +++ b/sound/drivers/portman2x4.c
80186 @@ -47,6 +47,7 @@
80187 #include <sound/initval.h>
80188 #include <sound/rawmidi.h>
80189 #include <sound/control.h>
80190 +#include <asm/local.h>
80191
80192 #define CARD_NAME "Portman 2x4"
80193 #define DRIVER_NAME "portman"
80194 @@ -84,7 +85,7 @@ struct portman {
80195 struct pardevice *pardev;
80196 int pardev_claimed;
80197
80198 - int open_count;
80199 + local_t open_count;
80200 int mode[PORTMAN_NUM_INPUT_PORTS];
80201 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
80202 };
80203 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
80204 index 87657dd..a8268d4 100644
80205 --- a/sound/firewire/amdtp.c
80206 +++ b/sound/firewire/amdtp.c
80207 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
80208 ptr = s->pcm_buffer_pointer + data_blocks;
80209 if (ptr >= pcm->runtime->buffer_size)
80210 ptr -= pcm->runtime->buffer_size;
80211 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
80212 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
80213
80214 s->pcm_period_pointer += data_blocks;
80215 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
80216 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
80217 */
80218 void amdtp_out_stream_update(struct amdtp_out_stream *s)
80219 {
80220 - ACCESS_ONCE(s->source_node_id_field) =
80221 + ACCESS_ONCE_RW(s->source_node_id_field) =
80222 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
80223 }
80224 EXPORT_SYMBOL(amdtp_out_stream_update);
80225 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
80226 index 537a9cb..8e8c8e9 100644
80227 --- a/sound/firewire/amdtp.h
80228 +++ b/sound/firewire/amdtp.h
80229 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
80230 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
80231 struct snd_pcm_substream *pcm)
80232 {
80233 - ACCESS_ONCE(s->pcm) = pcm;
80234 + ACCESS_ONCE_RW(s->pcm) = pcm;
80235 }
80236
80237 /**
80238 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
80239 index 4400308..261e9f3 100644
80240 --- a/sound/firewire/isight.c
80241 +++ b/sound/firewire/isight.c
80242 @@ -97,7 +97,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
80243 ptr += count;
80244 if (ptr >= runtime->buffer_size)
80245 ptr -= runtime->buffer_size;
80246 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
80247 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
80248
80249 isight->period_counter += count;
80250 if (isight->period_counter >= runtime->period_size) {
80251 @@ -308,7 +308,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
80252 if (err < 0)
80253 return err;
80254
80255 - ACCESS_ONCE(isight->pcm_active) = true;
80256 + ACCESS_ONCE_RW(isight->pcm_active) = true;
80257
80258 return 0;
80259 }
80260 @@ -341,7 +341,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
80261 {
80262 struct isight *isight = substream->private_data;
80263
80264 - ACCESS_ONCE(isight->pcm_active) = false;
80265 + ACCESS_ONCE_RW(isight->pcm_active) = false;
80266
80267 mutex_lock(&isight->mutex);
80268 isight_stop_streaming(isight);
80269 @@ -434,10 +434,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
80270
80271 switch (cmd) {
80272 case SNDRV_PCM_TRIGGER_START:
80273 - ACCESS_ONCE(isight->pcm_running) = true;
80274 + ACCESS_ONCE_RW(isight->pcm_running) = true;
80275 break;
80276 case SNDRV_PCM_TRIGGER_STOP:
80277 - ACCESS_ONCE(isight->pcm_running) = false;
80278 + ACCESS_ONCE_RW(isight->pcm_running) = false;
80279 break;
80280 default:
80281 return -EINVAL;
80282 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
80283 index fe79a16..4d9714e 100644
80284 --- a/sound/isa/cmi8330.c
80285 +++ b/sound/isa/cmi8330.c
80286 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
80287
80288 struct snd_pcm *pcm;
80289 struct snd_cmi8330_stream {
80290 - struct snd_pcm_ops ops;
80291 + snd_pcm_ops_no_const ops;
80292 snd_pcm_open_callback_t open;
80293 void *private_data; /* sb or wss */
80294 } streams[2];
80295 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
80296 index 733b014..56ce96f 100644
80297 --- a/sound/oss/sb_audio.c
80298 +++ b/sound/oss/sb_audio.c
80299 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
80300 buf16 = (signed short *)(localbuf + localoffs);
80301 while (c)
80302 {
80303 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
80304 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
80305 if (copy_from_user(lbuf8,
80306 userbuf+useroffs + p,
80307 locallen))
80308 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
80309 index 09d4648..cf234c7 100644
80310 --- a/sound/oss/swarm_cs4297a.c
80311 +++ b/sound/oss/swarm_cs4297a.c
80312 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
80313 {
80314 struct cs4297a_state *s;
80315 u32 pwr, id;
80316 - mm_segment_t fs;
80317 int rval;
80318 #ifndef CONFIG_BCM_CS4297A_CSWARM
80319 u64 cfg;
80320 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
80321 if (!rval) {
80322 char *sb1250_duart_present;
80323
80324 +#if 0
80325 + mm_segment_t fs;
80326 fs = get_fs();
80327 set_fs(KERNEL_DS);
80328 -#if 0
80329 val = SOUND_MASK_LINE;
80330 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
80331 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
80332 val = initvol[i].vol;
80333 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
80334 }
80335 + set_fs(fs);
80336 // cs4297a_write_ac97(s, 0x18, 0x0808);
80337 #else
80338 // cs4297a_write_ac97(s, 0x5e, 0x180);
80339 cs4297a_write_ac97(s, 0x02, 0x0808);
80340 cs4297a_write_ac97(s, 0x18, 0x0808);
80341 #endif
80342 - set_fs(fs);
80343
80344 list_add(&s->list, &cs4297a_devs);
80345
80346 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
80347 index 755f2b0..5c12361 100644
80348 --- a/sound/pci/hda/hda_codec.h
80349 +++ b/sound/pci/hda/hda_codec.h
80350 @@ -611,7 +611,7 @@ struct hda_bus_ops {
80351 /* notify power-up/down from codec to controller */
80352 void (*pm_notify)(struct hda_bus *bus);
80353 #endif
80354 -};
80355 +} __no_const;
80356
80357 /* template to pass to the bus constructor */
80358 struct hda_bus_template {
80359 @@ -713,6 +713,7 @@ struct hda_codec_ops {
80360 #endif
80361 void (*reboot_notify)(struct hda_codec *codec);
80362 };
80363 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
80364
80365 /* record for amp information cache */
80366 struct hda_cache_head {
80367 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
80368 struct snd_pcm_substream *substream);
80369 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
80370 struct snd_pcm_substream *substream);
80371 -};
80372 +} __no_const;
80373
80374 /* PCM information for each substream */
80375 struct hda_pcm_stream {
80376 @@ -801,7 +802,7 @@ struct hda_codec {
80377 const char *modelname; /* model name for preset */
80378
80379 /* set by patch */
80380 - struct hda_codec_ops patch_ops;
80381 + hda_codec_ops_no_const patch_ops;
80382
80383 /* PCM to create, set by patch_ops.build_pcms callback */
80384 unsigned int num_pcms;
80385 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
80386 index 0da778a..bc38b84 100644
80387 --- a/sound/pci/ice1712/ice1712.h
80388 +++ b/sound/pci/ice1712/ice1712.h
80389 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
80390 unsigned int mask_flags; /* total mask bits */
80391 struct snd_akm4xxx_ops {
80392 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
80393 - } ops;
80394 + } __no_const ops;
80395 };
80396
80397 struct snd_ice1712_spdif {
80398 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
80399 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
80400 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
80401 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
80402 - } ops;
80403 + } __no_const ops;
80404 };
80405
80406
80407 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
80408 index f3260e6..4a285d8 100644
80409 --- a/sound/pci/ymfpci/ymfpci_main.c
80410 +++ b/sound/pci/ymfpci/ymfpci_main.c
80411 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
80412 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
80413 break;
80414 }
80415 - if (atomic_read(&chip->interrupt_sleep_count)) {
80416 - atomic_set(&chip->interrupt_sleep_count, 0);
80417 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
80418 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
80419 wake_up(&chip->interrupt_sleep);
80420 }
80421 __end:
80422 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
80423 continue;
80424 init_waitqueue_entry(&wait, current);
80425 add_wait_queue(&chip->interrupt_sleep, &wait);
80426 - atomic_inc(&chip->interrupt_sleep_count);
80427 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
80428 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
80429 remove_wait_queue(&chip->interrupt_sleep, &wait);
80430 }
80431 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
80432 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
80433 spin_unlock(&chip->reg_lock);
80434
80435 - if (atomic_read(&chip->interrupt_sleep_count)) {
80436 - atomic_set(&chip->interrupt_sleep_count, 0);
80437 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
80438 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
80439 wake_up(&chip->interrupt_sleep);
80440 }
80441 }
80442 @@ -2363,7 +2363,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
80443 spin_lock_init(&chip->reg_lock);
80444 spin_lock_init(&chip->voice_lock);
80445 init_waitqueue_head(&chip->interrupt_sleep);
80446 - atomic_set(&chip->interrupt_sleep_count, 0);
80447 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
80448 chip->card = card;
80449 chip->pci = pci;
80450 chip->irq = -1;
80451 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
80452 index 2879c88..224159e 100644
80453 --- a/sound/soc/soc-pcm.c
80454 +++ b/sound/soc/soc-pcm.c
80455 @@ -568,7 +568,7 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
80456 }
80457
80458 /* ASoC PCM operations */
80459 -static struct snd_pcm_ops soc_pcm_ops = {
80460 +static snd_pcm_ops_no_const soc_pcm_ops = {
80461 .open = soc_pcm_open,
80462 .close = soc_pcm_close,
80463 .hw_params = soc_pcm_hw_params,
80464 diff --git a/sound/usb/card.h b/sound/usb/card.h
80465 index ae4251d..0961361 100644
80466 --- a/sound/usb/card.h
80467 +++ b/sound/usb/card.h
80468 @@ -44,6 +44,7 @@ struct snd_urb_ops {
80469 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
80470 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
80471 };
80472 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
80473
80474 struct snd_usb_substream {
80475 struct snd_usb_stream *stream;
80476 @@ -93,7 +94,7 @@ struct snd_usb_substream {
80477 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
80478 spinlock_t lock;
80479
80480 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
80481 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
80482 };
80483
80484 struct snd_usb_stream {
80485 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
80486 new file mode 100644
80487 index 0000000..b044b80
80488 --- /dev/null
80489 +++ b/tools/gcc/Makefile
80490 @@ -0,0 +1,21 @@
80491 +#CC := gcc
80492 +#PLUGIN_SOURCE_FILES := pax_plugin.c
80493 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
80494 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
80495 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
80496 +
80497 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
80498 +
80499 +hostlibs-y := constify_plugin.so
80500 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
80501 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
80502 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
80503 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
80504 +
80505 +always := $(hostlibs-y)
80506 +
80507 +constify_plugin-objs := constify_plugin.o
80508 +stackleak_plugin-objs := stackleak_plugin.o
80509 +kallocstat_plugin-objs := kallocstat_plugin.o
80510 +kernexec_plugin-objs := kernexec_plugin.o
80511 +checker_plugin-objs := checker_plugin.o
80512 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
80513 new file mode 100644
80514 index 0000000..d41b5af
80515 --- /dev/null
80516 +++ b/tools/gcc/checker_plugin.c
80517 @@ -0,0 +1,171 @@
80518 +/*
80519 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
80520 + * Licensed under the GPL v2
80521 + *
80522 + * Note: the choice of the license means that the compilation process is
80523 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80524 + * but for the kernel it doesn't matter since it doesn't link against
80525 + * any of the gcc libraries
80526 + *
80527 + * gcc plugin to implement various sparse (source code checker) features
80528 + *
80529 + * TODO:
80530 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
80531 + *
80532 + * BUGS:
80533 + * - none known
80534 + */
80535 +#include "gcc-plugin.h"
80536 +#include "config.h"
80537 +#include "system.h"
80538 +#include "coretypes.h"
80539 +#include "tree.h"
80540 +#include "tree-pass.h"
80541 +#include "flags.h"
80542 +#include "intl.h"
80543 +#include "toplev.h"
80544 +#include "plugin.h"
80545 +//#include "expr.h" where are you...
80546 +#include "diagnostic.h"
80547 +#include "plugin-version.h"
80548 +#include "tm.h"
80549 +#include "function.h"
80550 +#include "basic-block.h"
80551 +#include "gimple.h"
80552 +#include "rtl.h"
80553 +#include "emit-rtl.h"
80554 +#include "tree-flow.h"
80555 +#include "target.h"
80556 +
80557 +extern void c_register_addr_space (const char *str, addr_space_t as);
80558 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
80559 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
80560 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
80561 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
80562 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
80563 +
80564 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80565 +extern rtx emit_move_insn(rtx x, rtx y);
80566 +
80567 +int plugin_is_GPL_compatible;
80568 +
80569 +static struct plugin_info checker_plugin_info = {
80570 + .version = "201111150100",
80571 +};
80572 +
80573 +#define ADDR_SPACE_KERNEL 0
80574 +#define ADDR_SPACE_FORCE_KERNEL 1
80575 +#define ADDR_SPACE_USER 2
80576 +#define ADDR_SPACE_FORCE_USER 3
80577 +#define ADDR_SPACE_IOMEM 0
80578 +#define ADDR_SPACE_FORCE_IOMEM 0
80579 +#define ADDR_SPACE_PERCPU 0
80580 +#define ADDR_SPACE_FORCE_PERCPU 0
80581 +#define ADDR_SPACE_RCU 0
80582 +#define ADDR_SPACE_FORCE_RCU 0
80583 +
80584 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
80585 +{
80586 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
80587 +}
80588 +
80589 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
80590 +{
80591 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
80592 +}
80593 +
80594 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
80595 +{
80596 + return default_addr_space_valid_pointer_mode(mode, as);
80597 +}
80598 +
80599 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
80600 +{
80601 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
80602 +}
80603 +
80604 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
80605 +{
80606 + return default_addr_space_legitimize_address(x, oldx, mode, as);
80607 +}
80608 +
80609 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
80610 +{
80611 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
80612 + return true;
80613 +
80614 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
80615 + return true;
80616 +
80617 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
80618 + return true;
80619 +
80620 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
80621 + return true;
80622 +
80623 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
80624 + return true;
80625 +
80626 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
80627 + return true;
80628 +
80629 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
80630 + return true;
80631 +
80632 + return subset == superset;
80633 +}
80634 +
80635 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
80636 +{
80637 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
80638 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
80639 +
80640 + return op;
80641 +}
80642 +
80643 +static void register_checker_address_spaces(void *event_data, void *data)
80644 +{
80645 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
80646 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
80647 + c_register_addr_space("__user", ADDR_SPACE_USER);
80648 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
80649 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
80650 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
80651 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
80652 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
80653 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
80654 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
80655 +
80656 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
80657 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
80658 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
80659 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
80660 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
80661 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
80662 + targetm.addr_space.convert = checker_addr_space_convert;
80663 +}
80664 +
80665 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80666 +{
80667 + const char * const plugin_name = plugin_info->base_name;
80668 + const int argc = plugin_info->argc;
80669 + const struct plugin_argument * const argv = plugin_info->argv;
80670 + int i;
80671 +
80672 + if (!plugin_default_version_check(version, &gcc_version)) {
80673 + error(G_("incompatible gcc/plugin versions"));
80674 + return 1;
80675 + }
80676 +
80677 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
80678 +
80679 + for (i = 0; i < argc; ++i)
80680 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80681 +
80682 + if (TARGET_64BIT == 0)
80683 + return 0;
80684 +
80685 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
80686 +
80687 + return 0;
80688 +}
80689 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
80690 new file mode 100644
80691 index 0000000..704a564
80692 --- /dev/null
80693 +++ b/tools/gcc/constify_plugin.c
80694 @@ -0,0 +1,303 @@
80695 +/*
80696 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
80697 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
80698 + * Licensed under the GPL v2, or (at your option) v3
80699 + *
80700 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
80701 + *
80702 + * Homepage:
80703 + * http://www.grsecurity.net/~ephox/const_plugin/
80704 + *
80705 + * Usage:
80706 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
80707 + * $ gcc -fplugin=constify_plugin.so test.c -O2
80708 + */
80709 +
80710 +#include "gcc-plugin.h"
80711 +#include "config.h"
80712 +#include "system.h"
80713 +#include "coretypes.h"
80714 +#include "tree.h"
80715 +#include "tree-pass.h"
80716 +#include "flags.h"
80717 +#include "intl.h"
80718 +#include "toplev.h"
80719 +#include "plugin.h"
80720 +#include "diagnostic.h"
80721 +#include "plugin-version.h"
80722 +#include "tm.h"
80723 +#include "function.h"
80724 +#include "basic-block.h"
80725 +#include "gimple.h"
80726 +#include "rtl.h"
80727 +#include "emit-rtl.h"
80728 +#include "tree-flow.h"
80729 +
80730 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
80731 +
80732 +int plugin_is_GPL_compatible;
80733 +
80734 +static struct plugin_info const_plugin_info = {
80735 + .version = "201111150100",
80736 + .help = "no-constify\tturn off constification\n",
80737 +};
80738 +
80739 +static void constify_type(tree type);
80740 +static bool walk_struct(tree node);
80741 +
80742 +static tree deconstify_type(tree old_type)
80743 +{
80744 + tree new_type, field;
80745 +
80746 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
80747 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
80748 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
80749 + DECL_FIELD_CONTEXT(field) = new_type;
80750 + TYPE_READONLY(new_type) = 0;
80751 + C_TYPE_FIELDS_READONLY(new_type) = 0;
80752 + return new_type;
80753 +}
80754 +
80755 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
80756 +{
80757 + tree type;
80758 +
80759 + *no_add_attrs = true;
80760 + if (TREE_CODE(*node) == FUNCTION_DECL) {
80761 + error("%qE attribute does not apply to functions", name);
80762 + return NULL_TREE;
80763 + }
80764 +
80765 + if (TREE_CODE(*node) == VAR_DECL) {
80766 + error("%qE attribute does not apply to variables", name);
80767 + return NULL_TREE;
80768 + }
80769 +
80770 + if (TYPE_P(*node)) {
80771 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
80772 + *no_add_attrs = false;
80773 + else
80774 + error("%qE attribute applies to struct and union types only", name);
80775 + return NULL_TREE;
80776 + }
80777 +
80778 + type = TREE_TYPE(*node);
80779 +
80780 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
80781 + error("%qE attribute applies to struct and union types only", name);
80782 + return NULL_TREE;
80783 + }
80784 +
80785 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
80786 + error("%qE attribute is already applied to the type", name);
80787 + return NULL_TREE;
80788 + }
80789 +
80790 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
80791 + error("%qE attribute used on type that is not constified", name);
80792 + return NULL_TREE;
80793 + }
80794 +
80795 + if (TREE_CODE(*node) == TYPE_DECL) {
80796 + TREE_TYPE(*node) = deconstify_type(type);
80797 + TREE_READONLY(*node) = 0;
80798 + return NULL_TREE;
80799 + }
80800 +
80801 + return NULL_TREE;
80802 +}
80803 +
80804 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
80805 +{
80806 + *no_add_attrs = true;
80807 + if (!TYPE_P(*node)) {
80808 + error("%qE attribute applies to types only", name);
80809 + return NULL_TREE;
80810 + }
80811 +
80812 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
80813 + error("%qE attribute applies to struct and union types only", name);
80814 + return NULL_TREE;
80815 + }
80816 +
80817 + *no_add_attrs = false;
80818 + constify_type(*node);
80819 + return NULL_TREE;
80820 +}
80821 +
80822 +static struct attribute_spec no_const_attr = {
80823 + .name = "no_const",
80824 + .min_length = 0,
80825 + .max_length = 0,
80826 + .decl_required = false,
80827 + .type_required = false,
80828 + .function_type_required = false,
80829 + .handler = handle_no_const_attribute,
80830 +#if BUILDING_GCC_VERSION >= 4007
80831 + .affects_type_identity = true
80832 +#endif
80833 +};
80834 +
80835 +static struct attribute_spec do_const_attr = {
80836 + .name = "do_const",
80837 + .min_length = 0,
80838 + .max_length = 0,
80839 + .decl_required = false,
80840 + .type_required = false,
80841 + .function_type_required = false,
80842 + .handler = handle_do_const_attribute,
80843 +#if BUILDING_GCC_VERSION >= 4007
80844 + .affects_type_identity = true
80845 +#endif
80846 +};
80847 +
80848 +static void register_attributes(void *event_data, void *data)
80849 +{
80850 + register_attribute(&no_const_attr);
80851 + register_attribute(&do_const_attr);
80852 +}
80853 +
80854 +static void constify_type(tree type)
80855 +{
80856 + TYPE_READONLY(type) = 1;
80857 + C_TYPE_FIELDS_READONLY(type) = 1;
80858 +}
80859 +
80860 +static bool is_fptr(tree field)
80861 +{
80862 + tree ptr = TREE_TYPE(field);
80863 +
80864 + if (TREE_CODE(ptr) != POINTER_TYPE)
80865 + return false;
80866 +
80867 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
80868 +}
80869 +
80870 +static bool walk_struct(tree node)
80871 +{
80872 + tree field;
80873 +
80874 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
80875 + return false;
80876 +
80877 + if (TYPE_FIELDS(node) == NULL_TREE)
80878 + return false;
80879 +
80880 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
80881 + tree type = TREE_TYPE(field);
80882 + enum tree_code code = TREE_CODE(type);
80883 + if (code == RECORD_TYPE || code == UNION_TYPE) {
80884 + if (!(walk_struct(type)))
80885 + return false;
80886 + } else if (!is_fptr(field) && !TREE_READONLY(field))
80887 + return false;
80888 + }
80889 + return true;
80890 +}
80891 +
80892 +static void finish_type(void *event_data, void *data)
80893 +{
80894 + tree type = (tree)event_data;
80895 +
80896 + if (type == NULL_TREE)
80897 + return;
80898 +
80899 + if (TYPE_READONLY(type))
80900 + return;
80901 +
80902 + if (walk_struct(type))
80903 + constify_type(type);
80904 +}
80905 +
80906 +static unsigned int check_local_variables(void);
80907 +
80908 +struct gimple_opt_pass pass_local_variable = {
80909 + {
80910 + .type = GIMPLE_PASS,
80911 + .name = "check_local_variables",
80912 + .gate = NULL,
80913 + .execute = check_local_variables,
80914 + .sub = NULL,
80915 + .next = NULL,
80916 + .static_pass_number = 0,
80917 + .tv_id = TV_NONE,
80918 + .properties_required = 0,
80919 + .properties_provided = 0,
80920 + .properties_destroyed = 0,
80921 + .todo_flags_start = 0,
80922 + .todo_flags_finish = 0
80923 + }
80924 +};
80925 +
80926 +static unsigned int check_local_variables(void)
80927 +{
80928 + tree var;
80929 + referenced_var_iterator rvi;
80930 +
80931 +#if BUILDING_GCC_VERSION == 4005
80932 + FOR_EACH_REFERENCED_VAR(var, rvi) {
80933 +#else
80934 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
80935 +#endif
80936 + tree type = TREE_TYPE(var);
80937 +
80938 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
80939 + continue;
80940 +
80941 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
80942 + continue;
80943 +
80944 + if (!TYPE_READONLY(type))
80945 + continue;
80946 +
80947 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
80948 +// continue;
80949 +
80950 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
80951 +// continue;
80952 +
80953 + if (walk_struct(type)) {
80954 + error("constified variable %qE cannot be local", var);
80955 + return 1;
80956 + }
80957 + }
80958 + return 0;
80959 +}
80960 +
80961 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80962 +{
80963 + const char * const plugin_name = plugin_info->base_name;
80964 + const int argc = plugin_info->argc;
80965 + const struct plugin_argument * const argv = plugin_info->argv;
80966 + int i;
80967 + bool constify = true;
80968 +
80969 + struct register_pass_info local_variable_pass_info = {
80970 + .pass = &pass_local_variable.pass,
80971 + .reference_pass_name = "*referenced_vars",
80972 + .ref_pass_instance_number = 0,
80973 + .pos_op = PASS_POS_INSERT_AFTER
80974 + };
80975 +
80976 + if (!plugin_default_version_check(version, &gcc_version)) {
80977 + error(G_("incompatible gcc/plugin versions"));
80978 + return 1;
80979 + }
80980 +
80981 + for (i = 0; i < argc; ++i) {
80982 + if (!(strcmp(argv[i].key, "no-constify"))) {
80983 + constify = false;
80984 + continue;
80985 + }
80986 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80987 + }
80988 +
80989 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
80990 + if (constify) {
80991 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
80992 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
80993 + }
80994 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
80995 +
80996 + return 0;
80997 +}
80998 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
80999 new file mode 100644
81000 index 0000000..a5eabce
81001 --- /dev/null
81002 +++ b/tools/gcc/kallocstat_plugin.c
81003 @@ -0,0 +1,167 @@
81004 +/*
81005 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
81006 + * Licensed under the GPL v2
81007 + *
81008 + * Note: the choice of the license means that the compilation process is
81009 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
81010 + * but for the kernel it doesn't matter since it doesn't link against
81011 + * any of the gcc libraries
81012 + *
81013 + * gcc plugin to find the distribution of k*alloc sizes
81014 + *
81015 + * TODO:
81016 + *
81017 + * BUGS:
81018 + * - none known
81019 + */
81020 +#include "gcc-plugin.h"
81021 +#include "config.h"
81022 +#include "system.h"
81023 +#include "coretypes.h"
81024 +#include "tree.h"
81025 +#include "tree-pass.h"
81026 +#include "flags.h"
81027 +#include "intl.h"
81028 +#include "toplev.h"
81029 +#include "plugin.h"
81030 +//#include "expr.h" where are you...
81031 +#include "diagnostic.h"
81032 +#include "plugin-version.h"
81033 +#include "tm.h"
81034 +#include "function.h"
81035 +#include "basic-block.h"
81036 +#include "gimple.h"
81037 +#include "rtl.h"
81038 +#include "emit-rtl.h"
81039 +
81040 +extern void print_gimple_stmt(FILE *, gimple, int, int);
81041 +
81042 +int plugin_is_GPL_compatible;
81043 +
81044 +static const char * const kalloc_functions[] = {
81045 + "__kmalloc",
81046 + "kmalloc",
81047 + "kmalloc_large",
81048 + "kmalloc_node",
81049 + "kmalloc_order",
81050 + "kmalloc_order_trace",
81051 + "kmalloc_slab",
81052 + "kzalloc",
81053 + "kzalloc_node",
81054 +};
81055 +
81056 +static struct plugin_info kallocstat_plugin_info = {
81057 + .version = "201111150100",
81058 +};
81059 +
81060 +static unsigned int execute_kallocstat(void);
81061 +
81062 +static struct gimple_opt_pass kallocstat_pass = {
81063 + .pass = {
81064 + .type = GIMPLE_PASS,
81065 + .name = "kallocstat",
81066 + .gate = NULL,
81067 + .execute = execute_kallocstat,
81068 + .sub = NULL,
81069 + .next = NULL,
81070 + .static_pass_number = 0,
81071 + .tv_id = TV_NONE,
81072 + .properties_required = 0,
81073 + .properties_provided = 0,
81074 + .properties_destroyed = 0,
81075 + .todo_flags_start = 0,
81076 + .todo_flags_finish = 0
81077 + }
81078 +};
81079 +
81080 +static bool is_kalloc(const char *fnname)
81081 +{
81082 + size_t i;
81083 +
81084 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
81085 + if (!strcmp(fnname, kalloc_functions[i]))
81086 + return true;
81087 + return false;
81088 +}
81089 +
81090 +static unsigned int execute_kallocstat(void)
81091 +{
81092 + basic_block bb;
81093 +
81094 + // 1. loop through BBs and GIMPLE statements
81095 + FOR_EACH_BB(bb) {
81096 + gimple_stmt_iterator gsi;
81097 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
81098 + // gimple match:
81099 + tree fndecl, size;
81100 + gimple call_stmt;
81101 + const char *fnname;
81102 +
81103 + // is it a call
81104 + call_stmt = gsi_stmt(gsi);
81105 + if (!is_gimple_call(call_stmt))
81106 + continue;
81107 + fndecl = gimple_call_fndecl(call_stmt);
81108 + if (fndecl == NULL_TREE)
81109 + continue;
81110 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
81111 + continue;
81112 +
81113 + // is it a call to k*alloc
81114 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
81115 + if (!is_kalloc(fnname))
81116 + continue;
81117 +
81118 + // is the size arg the result of a simple const assignment
81119 + size = gimple_call_arg(call_stmt, 0);
81120 + while (true) {
81121 + gimple def_stmt;
81122 + expanded_location xloc;
81123 + size_t size_val;
81124 +
81125 + if (TREE_CODE(size) != SSA_NAME)
81126 + break;
81127 + def_stmt = SSA_NAME_DEF_STMT(size);
81128 + if (!def_stmt || !is_gimple_assign(def_stmt))
81129 + break;
81130 + if (gimple_num_ops(def_stmt) != 2)
81131 + break;
81132 + size = gimple_assign_rhs1(def_stmt);
81133 + if (!TREE_CONSTANT(size))
81134 + continue;
81135 + xloc = expand_location(gimple_location(def_stmt));
81136 + if (!xloc.file)
81137 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
81138 + size_val = TREE_INT_CST_LOW(size);
81139 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
81140 + break;
81141 + }
81142 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
81143 +//debug_tree(gimple_call_fn(call_stmt));
81144 +//print_node(stderr, "pax", fndecl, 4);
81145 + }
81146 + }
81147 +
81148 + return 0;
81149 +}
81150 +
81151 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
81152 +{
81153 + const char * const plugin_name = plugin_info->base_name;
81154 + struct register_pass_info kallocstat_pass_info = {
81155 + .pass = &kallocstat_pass.pass,
81156 + .reference_pass_name = "ssa",
81157 + .ref_pass_instance_number = 0,
81158 + .pos_op = PASS_POS_INSERT_AFTER
81159 + };
81160 +
81161 + if (!plugin_default_version_check(version, &gcc_version)) {
81162 + error(G_("incompatible gcc/plugin versions"));
81163 + return 1;
81164 + }
81165 +
81166 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
81167 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
81168 +
81169 + return 0;
81170 +}
81171 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
81172 new file mode 100644
81173 index 0000000..51f747e
81174 --- /dev/null
81175 +++ b/tools/gcc/kernexec_plugin.c
81176 @@ -0,0 +1,348 @@
81177 +/*
81178 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
81179 + * Licensed under the GPL v2
81180 + *
81181 + * Note: the choice of the license means that the compilation process is
81182 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
81183 + * but for the kernel it doesn't matter since it doesn't link against
81184 + * any of the gcc libraries
81185 + *
81186 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
81187 + *
81188 + * TODO:
81189 + *
81190 + * BUGS:
81191 + * - none known
81192 + */
81193 +#include "gcc-plugin.h"
81194 +#include "config.h"
81195 +#include "system.h"
81196 +#include "coretypes.h"
81197 +#include "tree.h"
81198 +#include "tree-pass.h"
81199 +#include "flags.h"
81200 +#include "intl.h"
81201 +#include "toplev.h"
81202 +#include "plugin.h"
81203 +//#include "expr.h" where are you...
81204 +#include "diagnostic.h"
81205 +#include "plugin-version.h"
81206 +#include "tm.h"
81207 +#include "function.h"
81208 +#include "basic-block.h"
81209 +#include "gimple.h"
81210 +#include "rtl.h"
81211 +#include "emit-rtl.h"
81212 +#include "tree-flow.h"
81213 +
81214 +extern void print_gimple_stmt(FILE *, gimple, int, int);
81215 +extern rtx emit_move_insn(rtx x, rtx y);
81216 +
81217 +int plugin_is_GPL_compatible;
81218 +
81219 +static struct plugin_info kernexec_plugin_info = {
81220 + .version = "201111291120",
81221 + .help = "method=[bts|or]\tinstrumentation method\n"
81222 +};
81223 +
81224 +static unsigned int execute_kernexec_fptr(void);
81225 +static unsigned int execute_kernexec_retaddr(void);
81226 +static bool kernexec_cmodel_check(void);
81227 +
81228 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator);
81229 +static void (*kernexec_instrument_retaddr)(rtx);
81230 +
81231 +static struct gimple_opt_pass kernexec_fptr_pass = {
81232 + .pass = {
81233 + .type = GIMPLE_PASS,
81234 + .name = "kernexec_fptr",
81235 + .gate = kernexec_cmodel_check,
81236 + .execute = execute_kernexec_fptr,
81237 + .sub = NULL,
81238 + .next = NULL,
81239 + .static_pass_number = 0,
81240 + .tv_id = TV_NONE,
81241 + .properties_required = 0,
81242 + .properties_provided = 0,
81243 + .properties_destroyed = 0,
81244 + .todo_flags_start = 0,
81245 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
81246 + }
81247 +};
81248 +
81249 +static struct rtl_opt_pass kernexec_retaddr_pass = {
81250 + .pass = {
81251 + .type = RTL_PASS,
81252 + .name = "kernexec_retaddr",
81253 + .gate = kernexec_cmodel_check,
81254 + .execute = execute_kernexec_retaddr,
81255 + .sub = NULL,
81256 + .next = NULL,
81257 + .static_pass_number = 0,
81258 + .tv_id = TV_NONE,
81259 + .properties_required = 0,
81260 + .properties_provided = 0,
81261 + .properties_destroyed = 0,
81262 + .todo_flags_start = 0,
81263 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
81264 + }
81265 +};
81266 +
81267 +static bool kernexec_cmodel_check(void)
81268 +{
81269 + tree section;
81270 +
81271 + if (ix86_cmodel != CM_KERNEL)
81272 + return false;
81273 +
81274 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
81275 + if (!section || !TREE_VALUE(section))
81276 + return true;
81277 +
81278 + section = TREE_VALUE(TREE_VALUE(section));
81279 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
81280 + return true;
81281 +
81282 + return false;
81283 +}
81284 +
81285 +/*
81286 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
81287 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
81288 + */
81289 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator gsi)
81290 +{
81291 + gimple assign_intptr, assign_new_fptr, call_stmt;
81292 + tree intptr, old_fptr, new_fptr, kernexec_mask;
81293 +
81294 + call_stmt = gsi_stmt(gsi);
81295 + old_fptr = gimple_call_fn(call_stmt);
81296 +
81297 + // create temporary unsigned long variable used for bitops and cast fptr to it
81298 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
81299 + add_referenced_var(intptr);
81300 + mark_sym_for_renaming(intptr);
81301 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
81302 + gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
81303 + update_stmt(assign_intptr);
81304 +
81305 + // apply logical or to temporary unsigned long and bitmask
81306 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
81307 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
81308 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
81309 + gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
81310 + update_stmt(assign_intptr);
81311 +
81312 + // cast temporary unsigned long back to a temporary fptr variable
81313 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
81314 + add_referenced_var(new_fptr);
81315 + mark_sym_for_renaming(new_fptr);
81316 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
81317 + gsi_insert_before(&gsi, assign_new_fptr, GSI_SAME_STMT);
81318 + update_stmt(assign_new_fptr);
81319 +
81320 + // replace call stmt fn with the new fptr
81321 + gimple_call_set_fn(call_stmt, new_fptr);
81322 + update_stmt(call_stmt);
81323 +}
81324 +
81325 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator gsi)
81326 +{
81327 + gimple asm_or_stmt, call_stmt;
81328 + tree old_fptr, new_fptr, input, output;
81329 + VEC(tree, gc) *inputs = NULL;
81330 + VEC(tree, gc) *outputs = NULL;
81331 +
81332 + call_stmt = gsi_stmt(gsi);
81333 + old_fptr = gimple_call_fn(call_stmt);
81334 +
81335 + // create temporary fptr variable
81336 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
81337 + add_referenced_var(new_fptr);
81338 + mark_sym_for_renaming(new_fptr);
81339 +
81340 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
81341 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
81342 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
81343 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
81344 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
81345 + VEC_safe_push(tree, gc, inputs, input);
81346 + VEC_safe_push(tree, gc, outputs, output);
81347 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
81348 + gimple_asm_set_volatile(asm_or_stmt, true);
81349 + gsi_insert_before(&gsi, asm_or_stmt, GSI_SAME_STMT);
81350 + update_stmt(asm_or_stmt);
81351 +
81352 + // replace call stmt fn with the new fptr
81353 + gimple_call_set_fn(call_stmt, new_fptr);
81354 + update_stmt(call_stmt);
81355 +}
81356 +
81357 +/*
81358 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
81359 + */
81360 +static unsigned int execute_kernexec_fptr(void)
81361 +{
81362 + basic_block bb;
81363 + gimple_stmt_iterator gsi;
81364 +
81365 + // 1. loop through BBs and GIMPLE statements
81366 + FOR_EACH_BB(bb) {
81367 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
81368 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
81369 + tree fn;
81370 + gimple call_stmt;
81371 +
81372 + // is it a call ...
81373 + call_stmt = gsi_stmt(gsi);
81374 + if (!is_gimple_call(call_stmt))
81375 + continue;
81376 + fn = gimple_call_fn(call_stmt);
81377 + if (TREE_CODE(fn) == ADDR_EXPR)
81378 + continue;
81379 + if (TREE_CODE(fn) != SSA_NAME)
81380 + gcc_unreachable();
81381 +
81382 + // ... through a function pointer
81383 + fn = SSA_NAME_VAR(fn);
81384 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
81385 + continue;
81386 + fn = TREE_TYPE(fn);
81387 + if (TREE_CODE(fn) != POINTER_TYPE)
81388 + continue;
81389 + fn = TREE_TYPE(fn);
81390 + if (TREE_CODE(fn) != FUNCTION_TYPE)
81391 + continue;
81392 +
81393 + kernexec_instrument_fptr(gsi);
81394 +
81395 +//debug_tree(gimple_call_fn(call_stmt));
81396 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
81397 + }
81398 + }
81399 +
81400 + return 0;
81401 +}
81402 +
81403 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
81404 +static void kernexec_instrument_retaddr_bts(rtx insn)
81405 +{
81406 + rtx btsq;
81407 + rtvec argvec, constraintvec, labelvec;
81408 + int line;
81409 +
81410 + // create asm volatile("btsq $63,(%%rsp)":::)
81411 + argvec = rtvec_alloc(0);
81412 + constraintvec = rtvec_alloc(0);
81413 + labelvec = rtvec_alloc(0);
81414 + line = expand_location(RTL_LOCATION(insn)).line;
81415 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
81416 + MEM_VOLATILE_P(btsq) = 1;
81417 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
81418 + emit_insn_before(btsq, insn);
81419 +}
81420 +
81421 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
81422 +static void kernexec_instrument_retaddr_or(rtx insn)
81423 +{
81424 + rtx orq;
81425 + rtvec argvec, constraintvec, labelvec;
81426 + int line;
81427 +
81428 + // create asm volatile("orq %%r10,(%%rsp)":::)
81429 + argvec = rtvec_alloc(0);
81430 + constraintvec = rtvec_alloc(0);
81431 + labelvec = rtvec_alloc(0);
81432 + line = expand_location(RTL_LOCATION(insn)).line;
81433 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
81434 + MEM_VOLATILE_P(orq) = 1;
81435 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
81436 + emit_insn_before(orq, insn);
81437 +}
81438 +
81439 +/*
81440 + * find all asm level function returns and forcibly set the highest bit of the return address
81441 + */
81442 +static unsigned int execute_kernexec_retaddr(void)
81443 +{
81444 + rtx insn;
81445 +
81446 + // 1. find function returns
81447 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
81448 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
81449 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
81450 + rtx body;
81451 +
81452 + // is it a retn
81453 + if (!JUMP_P(insn))
81454 + continue;
81455 + body = PATTERN(insn);
81456 + if (GET_CODE(body) == PARALLEL)
81457 + body = XVECEXP(body, 0, 0);
81458 + if (GET_CODE(body) != RETURN)
81459 + continue;
81460 + kernexec_instrument_retaddr(insn);
81461 + }
81462 +
81463 +// print_simple_rtl(stderr, get_insns());
81464 +// print_rtl(stderr, get_insns());
81465 +
81466 + return 0;
81467 +}
81468 +
81469 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
81470 +{
81471 + const char * const plugin_name = plugin_info->base_name;
81472 + const int argc = plugin_info->argc;
81473 + const struct plugin_argument * const argv = plugin_info->argv;
81474 + int i;
81475 + struct register_pass_info kernexec_fptr_pass_info = {
81476 + .pass = &kernexec_fptr_pass.pass,
81477 + .reference_pass_name = "ssa",
81478 + .ref_pass_instance_number = 0,
81479 + .pos_op = PASS_POS_INSERT_AFTER
81480 + };
81481 + struct register_pass_info kernexec_retaddr_pass_info = {
81482 + .pass = &kernexec_retaddr_pass.pass,
81483 + .reference_pass_name = "pro_and_epilogue",
81484 + .ref_pass_instance_number = 0,
81485 + .pos_op = PASS_POS_INSERT_AFTER
81486 + };
81487 +
81488 + if (!plugin_default_version_check(version, &gcc_version)) {
81489 + error(G_("incompatible gcc/plugin versions"));
81490 + return 1;
81491 + }
81492 +
81493 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
81494 +
81495 + if (TARGET_64BIT == 0)
81496 + return 0;
81497 +
81498 + for (i = 0; i < argc; ++i) {
81499 + if (!strcmp(argv[i].key, "method")) {
81500 + if (!argv[i].value) {
81501 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81502 + continue;
81503 + }
81504 + if (!strcmp(argv[i].value, "bts")) {
81505 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
81506 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
81507 + } else if (!strcmp(argv[i].value, "or")) {
81508 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
81509 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
81510 + fix_register("r10", 1, 1);
81511 + } else
81512 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
81513 + continue;
81514 + }
81515 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81516 + }
81517 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
81518 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
81519 +
81520 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
81521 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
81522 +
81523 + return 0;
81524 +}
81525 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
81526 new file mode 100644
81527 index 0000000..d44f37c
81528 --- /dev/null
81529 +++ b/tools/gcc/stackleak_plugin.c
81530 @@ -0,0 +1,291 @@
81531 +/*
81532 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
81533 + * Licensed under the GPL v2
81534 + *
81535 + * Note: the choice of the license means that the compilation process is
81536 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
81537 + * but for the kernel it doesn't matter since it doesn't link against
81538 + * any of the gcc libraries
81539 + *
81540 + * gcc plugin to help implement various PaX features
81541 + *
81542 + * - track lowest stack pointer
81543 + *
81544 + * TODO:
81545 + * - initialize all local variables
81546 + *
81547 + * BUGS:
81548 + * - none known
81549 + */
81550 +#include "gcc-plugin.h"
81551 +#include "config.h"
81552 +#include "system.h"
81553 +#include "coretypes.h"
81554 +#include "tree.h"
81555 +#include "tree-pass.h"
81556 +#include "flags.h"
81557 +#include "intl.h"
81558 +#include "toplev.h"
81559 +#include "plugin.h"
81560 +//#include "expr.h" where are you...
81561 +#include "diagnostic.h"
81562 +#include "plugin-version.h"
81563 +#include "tm.h"
81564 +#include "function.h"
81565 +#include "basic-block.h"
81566 +#include "gimple.h"
81567 +#include "rtl.h"
81568 +#include "emit-rtl.h"
81569 +
81570 +extern void print_gimple_stmt(FILE *, gimple, int, int);
81571 +
81572 +int plugin_is_GPL_compatible;
81573 +
81574 +static int track_frame_size = -1;
81575 +static const char track_function[] = "pax_track_stack";
81576 +static const char check_function[] = "pax_check_alloca";
81577 +static bool init_locals;
81578 +
81579 +static struct plugin_info stackleak_plugin_info = {
81580 + .version = "201111150100",
81581 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
81582 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
81583 +};
81584 +
81585 +static bool gate_stackleak_track_stack(void);
81586 +static unsigned int execute_stackleak_tree_instrument(void);
81587 +static unsigned int execute_stackleak_final(void);
81588 +
81589 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
81590 + .pass = {
81591 + .type = GIMPLE_PASS,
81592 + .name = "stackleak_tree_instrument",
81593 + .gate = gate_stackleak_track_stack,
81594 + .execute = execute_stackleak_tree_instrument,
81595 + .sub = NULL,
81596 + .next = NULL,
81597 + .static_pass_number = 0,
81598 + .tv_id = TV_NONE,
81599 + .properties_required = PROP_gimple_leh | PROP_cfg,
81600 + .properties_provided = 0,
81601 + .properties_destroyed = 0,
81602 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
81603 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
81604 + }
81605 +};
81606 +
81607 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
81608 + .pass = {
81609 + .type = RTL_PASS,
81610 + .name = "stackleak_final",
81611 + .gate = gate_stackleak_track_stack,
81612 + .execute = execute_stackleak_final,
81613 + .sub = NULL,
81614 + .next = NULL,
81615 + .static_pass_number = 0,
81616 + .tv_id = TV_NONE,
81617 + .properties_required = 0,
81618 + .properties_provided = 0,
81619 + .properties_destroyed = 0,
81620 + .todo_flags_start = 0,
81621 + .todo_flags_finish = TODO_dump_func
81622 + }
81623 +};
81624 +
81625 +static bool gate_stackleak_track_stack(void)
81626 +{
81627 + return track_frame_size >= 0;
81628 +}
81629 +
81630 +static void stackleak_check_alloca(gimple_stmt_iterator gsi)
81631 +{
81632 + gimple check_alloca;
81633 + tree fndecl, fntype, alloca_size;
81634 +
81635 + // insert call to void pax_check_alloca(unsigned long size)
81636 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
81637 + fndecl = build_fn_decl(check_function, fntype);
81638 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
81639 + alloca_size = gimple_call_arg(gsi_stmt(gsi), 0);
81640 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
81641 + gsi_insert_before(&gsi, check_alloca, GSI_CONTINUE_LINKING);
81642 +}
81643 +
81644 +static void stackleak_add_instrumentation(gimple_stmt_iterator gsi)
81645 +{
81646 + gimple track_stack;
81647 + tree fndecl, fntype;
81648 +
81649 + // insert call to void pax_track_stack(void)
81650 + fntype = build_function_type_list(void_type_node, NULL_TREE);
81651 + fndecl = build_fn_decl(track_function, fntype);
81652 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
81653 + track_stack = gimple_build_call(fndecl, 0);
81654 + gsi_insert_after(&gsi, track_stack, GSI_CONTINUE_LINKING);
81655 +}
81656 +
81657 +#if BUILDING_GCC_VERSION == 4005
81658 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
81659 +{
81660 + tree fndecl;
81661 +
81662 + if (!is_gimple_call(stmt))
81663 + return false;
81664 + fndecl = gimple_call_fndecl(stmt);
81665 + if (!fndecl)
81666 + return false;
81667 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
81668 + return false;
81669 +// print_node(stderr, "pax", fndecl, 4);
81670 + return DECL_FUNCTION_CODE(fndecl) == code;
81671 +}
81672 +#endif
81673 +
81674 +static bool is_alloca(gimple stmt)
81675 +{
81676 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
81677 + return true;
81678 +
81679 +#if BUILDING_GCC_VERSION >= 4007
81680 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
81681 + return true;
81682 +#endif
81683 +
81684 + return false;
81685 +}
81686 +
81687 +static unsigned int execute_stackleak_tree_instrument(void)
81688 +{
81689 + basic_block bb, entry_bb;
81690 + bool prologue_instrumented = false;
81691 +
81692 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
81693 +
81694 + // 1. loop through BBs and GIMPLE statements
81695 + FOR_EACH_BB(bb) {
81696 + gimple_stmt_iterator gsi;
81697 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
81698 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
81699 + if (!is_alloca(gsi_stmt(gsi)))
81700 + continue;
81701 +
81702 + // 2. insert stack overflow check before each __builtin_alloca call
81703 + stackleak_check_alloca(gsi);
81704 +
81705 + // 3. insert track call after each __builtin_alloca call
81706 + stackleak_add_instrumentation(gsi);
81707 + if (bb == entry_bb)
81708 + prologue_instrumented = true;
81709 + }
81710 + }
81711 +
81712 + // 4. insert track call at the beginning
81713 + if (!prologue_instrumented) {
81714 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
81715 + if (dom_info_available_p(CDI_DOMINATORS))
81716 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
81717 + stackleak_add_instrumentation(gsi_start_bb(bb));
81718 + }
81719 +
81720 + return 0;
81721 +}
81722 +
81723 +static unsigned int execute_stackleak_final(void)
81724 +{
81725 + rtx insn;
81726 +
81727 + if (cfun->calls_alloca)
81728 + return 0;
81729 +
81730 + // keep calls only if function frame is big enough
81731 + if (get_frame_size() >= track_frame_size)
81732 + return 0;
81733 +
81734 + // 1. find pax_track_stack calls
81735 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
81736 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
81737 + rtx body;
81738 +
81739 + if (!CALL_P(insn))
81740 + continue;
81741 + body = PATTERN(insn);
81742 + if (GET_CODE(body) != CALL)
81743 + continue;
81744 + body = XEXP(body, 0);
81745 + if (GET_CODE(body) != MEM)
81746 + continue;
81747 + body = XEXP(body, 0);
81748 + if (GET_CODE(body) != SYMBOL_REF)
81749 + continue;
81750 + if (strcmp(XSTR(body, 0), track_function))
81751 + continue;
81752 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
81753 + // 2. delete call
81754 + insn = delete_insn_and_edges(insn);
81755 +#if BUILDING_GCC_VERSION >= 4007
81756 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
81757 + insn = delete_insn_and_edges(insn);
81758 +#endif
81759 + }
81760 +
81761 +// print_simple_rtl(stderr, get_insns());
81762 +// print_rtl(stderr, get_insns());
81763 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
81764 +
81765 + return 0;
81766 +}
81767 +
81768 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
81769 +{
81770 + const char * const plugin_name = plugin_info->base_name;
81771 + const int argc = plugin_info->argc;
81772 + const struct plugin_argument * const argv = plugin_info->argv;
81773 + int i;
81774 + struct register_pass_info stackleak_tree_instrument_pass_info = {
81775 + .pass = &stackleak_tree_instrument_pass.pass,
81776 +// .reference_pass_name = "tree_profile",
81777 + .reference_pass_name = "optimized",
81778 + .ref_pass_instance_number = 0,
81779 + .pos_op = PASS_POS_INSERT_AFTER
81780 + };
81781 + struct register_pass_info stackleak_final_pass_info = {
81782 + .pass = &stackleak_final_rtl_opt_pass.pass,
81783 + .reference_pass_name = "final",
81784 + .ref_pass_instance_number = 0,
81785 + .pos_op = PASS_POS_INSERT_BEFORE
81786 + };
81787 +
81788 + if (!plugin_default_version_check(version, &gcc_version)) {
81789 + error(G_("incompatible gcc/plugin versions"));
81790 + return 1;
81791 + }
81792 +
81793 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
81794 +
81795 + for (i = 0; i < argc; ++i) {
81796 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
81797 + if (!argv[i].value) {
81798 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81799 + continue;
81800 + }
81801 + track_frame_size = atoi(argv[i].value);
81802 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
81803 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
81804 + continue;
81805 + }
81806 + if (!strcmp(argv[i].key, "initialize-locals")) {
81807 + if (argv[i].value) {
81808 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
81809 + continue;
81810 + }
81811 + init_locals = true;
81812 + continue;
81813 + }
81814 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81815 + }
81816 +
81817 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
81818 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
81819 +
81820 + return 0;
81821 +}
81822 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
81823 index 6789d78..4afd019 100644
81824 --- a/tools/perf/util/include/asm/alternative-asm.h
81825 +++ b/tools/perf/util/include/asm/alternative-asm.h
81826 @@ -5,4 +5,7 @@
81827
81828 #define altinstruction_entry #
81829
81830 + .macro pax_force_retaddr rip=0, reload=0
81831 + .endm
81832 +
81833 #endif
81834 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
81835 index af0f22f..9a7d479 100644
81836 --- a/usr/gen_init_cpio.c
81837 +++ b/usr/gen_init_cpio.c
81838 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
81839 int retval;
81840 int rc = -1;
81841 int namesize;
81842 - int i;
81843 + unsigned int i;
81844
81845 mode |= S_IFREG;
81846
81847 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
81848 *env_var = *expanded = '\0';
81849 strncat(env_var, start + 2, end - start - 2);
81850 strncat(expanded, new_location, start - new_location);
81851 - strncat(expanded, getenv(env_var), PATH_MAX);
81852 - strncat(expanded, end + 1, PATH_MAX);
81853 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
81854 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
81855 strncpy(new_location, expanded, PATH_MAX);
81856 + new_location[PATH_MAX] = 0;
81857 } else
81858 break;
81859 }
81860 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
81861 index aefdda3..8e8fbb9 100644
81862 --- a/virt/kvm/kvm_main.c
81863 +++ b/virt/kvm/kvm_main.c
81864 @@ -73,7 +73,7 @@ LIST_HEAD(vm_list);
81865
81866 static cpumask_var_t cpus_hardware_enabled;
81867 static int kvm_usage_count = 0;
81868 -static atomic_t hardware_enable_failed;
81869 +static atomic_unchecked_t hardware_enable_failed;
81870
81871 struct kmem_cache *kvm_vcpu_cache;
81872 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
81873 @@ -2266,7 +2266,7 @@ static void hardware_enable_nolock(void *junk)
81874
81875 if (r) {
81876 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
81877 - atomic_inc(&hardware_enable_failed);
81878 + atomic_inc_unchecked(&hardware_enable_failed);
81879 printk(KERN_INFO "kvm: enabling virtualization on "
81880 "CPU%d failed\n", cpu);
81881 }
81882 @@ -2320,10 +2320,10 @@ static int hardware_enable_all(void)
81883
81884 kvm_usage_count++;
81885 if (kvm_usage_count == 1) {
81886 - atomic_set(&hardware_enable_failed, 0);
81887 + atomic_set_unchecked(&hardware_enable_failed, 0);
81888 on_each_cpu(hardware_enable_nolock, NULL, 1);
81889
81890 - if (atomic_read(&hardware_enable_failed)) {
81891 + if (atomic_read_unchecked(&hardware_enable_failed)) {
81892 hardware_disable_all_nolock();
81893 r = -EBUSY;
81894 }
81895 @@ -2588,7 +2588,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
81896 kvm_arch_vcpu_put(vcpu);
81897 }
81898
81899 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
81900 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
81901 struct module *module)
81902 {
81903 int r;
81904 @@ -2651,7 +2651,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
81905 if (!vcpu_align)
81906 vcpu_align = __alignof__(struct kvm_vcpu);
81907 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
81908 - 0, NULL);
81909 + SLAB_USERCOPY, NULL);
81910 if (!kvm_vcpu_cache) {
81911 r = -ENOMEM;
81912 goto out_free_3;
81913 @@ -2661,9 +2661,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
81914 if (r)
81915 goto out_free;
81916
81917 - kvm_chardev_ops.owner = module;
81918 - kvm_vm_fops.owner = module;
81919 - kvm_vcpu_fops.owner = module;
81920 + pax_open_kernel();
81921 + *(void **)&kvm_chardev_ops.owner = module;
81922 + *(void **)&kvm_vm_fops.owner = module;
81923 + *(void **)&kvm_vcpu_fops.owner = module;
81924 + pax_close_kernel();
81925
81926 r = misc_register(&kvm_dev);
81927 if (r) {