]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-3.1.4-201112082139.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.1.4-201112082139.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index dfa6fc6..0095943 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9 +*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13 @@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17 +*.gmo
18 *.grep
19 *.grp
20 *.gz
21 @@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25 +*.vim
26 *.xml
27 *.xz
28 *_MODULES
29 +*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33 @@ -70,6 +74,7 @@ Kerntypes
34 Module.markers
35 Module.symvers
36 PENDING
37 +PERF*
38 SCCS
39 System.map*
40 TAGS
41 @@ -93,19 +98,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45 +builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51 +clut_vga16.c
52 +common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59 +config.c
60 config.mak
61 config.mak.autogen
62 +config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66 @@ -119,6 +129,7 @@ dslm
67 elf2ecoff
68 elfconfig.h*
69 evergreen_reg_safe.h
70 +exception_policy.conf
71 fixdep
72 flask.h
73 fore200e_mkfirm
74 @@ -126,12 +137,15 @@ fore200e_pca_fw.c*
75 gconf
76 gconf.glade.h
77 gen-devlist
78 +gen-kdb_cmds.c
79 gen_crc32table
80 gen_init_cpio
81 generated
82 genheaders
83 genksyms
84 *_gray256.c
85 +hash
86 +hid-example
87 hpet_example
88 hugepage-mmap
89 hugepage-shm
90 @@ -146,7 +160,7 @@ int32.c
91 int4.c
92 int8.c
93 kallsyms
94 -kconfig
95 +kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99 @@ -154,7 +168,6 @@ kxgettext
100 lkc_defs.h
101 lex.c
102 lex.*.c
103 -linux
104 logo_*.c
105 logo_*_clut224.c
106 logo_*_mono.c
107 @@ -166,14 +179,15 @@ machtypes.h
108 map
109 map_hugetlb
110 maui_boot.h
111 -media
112 mconf
113 +mdp
114 miboot*
115 mk_elfconfig
116 mkboot
117 mkbugboot
118 mkcpustr
119 mkdep
120 +mkpiggy
121 mkprep
122 mkregtable
123 mktables
124 @@ -209,6 +223,7 @@ r300_reg_safe.h
125 r420_reg_safe.h
126 r600_reg_safe.h
127 recordmcount
128 +regdb.c
129 relocs
130 rlim_names.h
131 rn50_reg_safe.h
132 @@ -219,6 +234,7 @@ setup
133 setup.bin
134 setup.elf
135 sImage
136 +slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140 @@ -229,6 +245,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144 +user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148 @@ -246,7 +263,9 @@ vmlinux
149 vmlinux-*
150 vmlinux.aout
151 vmlinux.bin.all
152 +vmlinux.bin.bz2
153 vmlinux.lds
154 +vmlinux.relocs
155 vmlinuz
156 voffset.h
157 vsyscall.lds
158 @@ -254,9 +273,11 @@ vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162 +utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168 +zconf.lex.c
169 zoffset.h
170 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
171 index d6e6724..a024ce8 100644
172 --- a/Documentation/kernel-parameters.txt
173 +++ b/Documentation/kernel-parameters.txt
174 @@ -1898,6 +1898,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
175 the specified number of seconds. This is to be used if
176 your oopses keep scrolling off the screen.
177
178 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
179 + virtualization environments that don't cope well with the
180 + expand down segment used by UDEREF on X86-32 or the frequent
181 + page table updates on X86-64.
182 +
183 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
184 +
185 pcbit= [HW,ISDN]
186
187 pcd. [PARIDE]
188 diff --git a/Makefile b/Makefile
189 index 7f8a93b..4435dc9 100644
190 --- a/Makefile
191 +++ b/Makefile
192 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
193
194 HOSTCC = gcc
195 HOSTCXX = g++
196 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
197 -HOSTCXXFLAGS = -O2
198 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
199 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
200 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
201
202 # Decide whether to build built-in, modular, or both.
203 # Normally, just do built-in.
204 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
205 # Rules shared between *config targets and build targets
206
207 # Basic helpers built in scripts/
208 -PHONY += scripts_basic
209 -scripts_basic:
210 +PHONY += scripts_basic gcc-plugins
211 +scripts_basic: gcc-plugins
212 $(Q)$(MAKE) $(build)=scripts/basic
213 $(Q)rm -f .tmp_quiet_recordmcount
214
215 @@ -564,6 +565,42 @@ else
216 KBUILD_CFLAGS += -O2
217 endif
218
219 +ifndef DISABLE_PAX_PLUGINS
220 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
221 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
222 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
223 +endif
224 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
225 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
226 +STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
227 +endif
228 +ifdef CONFIG_KALLOCSTAT_PLUGIN
229 +KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
230 +endif
231 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
232 +KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
233 +KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
234 +endif
235 +ifdef CONFIG_CHECKER_PLUGIN
236 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
237 +CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
238 +endif
239 +endif
240 +GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
241 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
242 +gcc-plugins:
243 + $(Q)$(MAKE) $(build)=tools/gcc
244 +else
245 +gcc-plugins:
246 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
247 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
248 +else
249 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
250 +endif
251 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
252 +endif
253 +endif
254 +
255 include $(srctree)/arch/$(SRCARCH)/Makefile
256
257 ifneq ($(CONFIG_FRAME_WARN),0)
258 @@ -708,7 +745,7 @@ export mod_strip_cmd
259
260
261 ifeq ($(KBUILD_EXTMOD),)
262 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
263 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
264
265 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
266 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
267 @@ -932,6 +969,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
268
269 # The actual objects are generated when descending,
270 # make sure no implicit rule kicks in
271 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
272 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
273
274 # Handle descending into subdirectories listed in $(vmlinux-dirs)
275 @@ -941,7 +979,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
276 # Error messages still appears in the original language
277
278 PHONY += $(vmlinux-dirs)
279 -$(vmlinux-dirs): prepare scripts
280 +$(vmlinux-dirs): gcc-plugins prepare scripts
281 $(Q)$(MAKE) $(build)=$@
282
283 # Store (new) KERNELRELASE string in include/config/kernel.release
284 @@ -986,6 +1024,7 @@ prepare0: archprepare FORCE
285 $(Q)$(MAKE) $(build)=. missing-syscalls
286
287 # All the preparing..
288 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
289 prepare: prepare0
290
291 # Generate some files
292 @@ -1087,6 +1126,7 @@ all: modules
293 # using awk while concatenating to the final file.
294
295 PHONY += modules
296 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
297 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
298 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
299 @$(kecho) ' Building modules, stage 2.';
300 @@ -1102,7 +1142,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
301
302 # Target to prepare building external modules
303 PHONY += modules_prepare
304 -modules_prepare: prepare scripts
305 +modules_prepare: gcc-plugins prepare scripts
306
307 # Target to install modules
308 PHONY += modules_install
309 @@ -1198,7 +1238,7 @@ distclean: mrproper
310 @find $(srctree) $(RCS_FIND_IGNORE) \
311 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
312 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
313 - -o -name '.*.rej' -o -size 0 \
314 + -o -name '.*.rej' -o -name '*.so' -o -size 0 \
315 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
316 -type f -print | xargs rm -f
317
318 @@ -1360,6 +1400,7 @@ PHONY += $(module-dirs) modules
319 $(module-dirs): crmodverdir $(objtree)/Module.symvers
320 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
321
322 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
323 modules: $(module-dirs)
324 @$(kecho) ' Building modules, stage 2.';
325 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
326 @@ -1486,17 +1527,19 @@ else
327 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
328 endif
329
330 -%.s: %.c prepare scripts FORCE
331 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
332 +%.s: %.c gcc-plugins prepare scripts FORCE
333 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
334 %.i: %.c prepare scripts FORCE
335 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
336 -%.o: %.c prepare scripts FORCE
337 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
338 +%.o: %.c gcc-plugins prepare scripts FORCE
339 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
340 %.lst: %.c prepare scripts FORCE
341 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
342 -%.s: %.S prepare scripts FORCE
343 +%.s: %.S gcc-plugins prepare scripts FORCE
344 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
345 -%.o: %.S prepare scripts FORCE
346 +%.o: %.S gcc-plugins prepare scripts FORCE
347 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
348 %.symtypes: %.c prepare scripts FORCE
349 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
350 @@ -1506,11 +1549,13 @@ endif
351 $(cmd_crmodverdir)
352 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
353 $(build)=$(build-dir)
354 -%/: prepare scripts FORCE
355 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
356 +%/: gcc-plugins prepare scripts FORCE
357 $(cmd_crmodverdir)
358 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
359 $(build)=$(build-dir)
360 -%.ko: prepare scripts FORCE
361 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
362 +%.ko: gcc-plugins prepare scripts FORCE
363 $(cmd_crmodverdir)
364 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
365 $(build)=$(build-dir) $(@:.ko=.o)
366 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
367 index da5449e..7418343 100644
368 --- a/arch/alpha/include/asm/elf.h
369 +++ b/arch/alpha/include/asm/elf.h
370 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
371
372 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
373
374 +#ifdef CONFIG_PAX_ASLR
375 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
376 +
377 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
378 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
379 +#endif
380 +
381 /* $0 is set by ld.so to a pointer to a function which might be
382 registered using atexit. This provides a mean for the dynamic
383 linker to call DT_FINI functions for shared libraries that have
384 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
385 index de98a73..bd4f1f8 100644
386 --- a/arch/alpha/include/asm/pgtable.h
387 +++ b/arch/alpha/include/asm/pgtable.h
388 @@ -101,6 +101,17 @@ struct vm_area_struct;
389 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
390 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
391 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
392 +
393 +#ifdef CONFIG_PAX_PAGEEXEC
394 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
395 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
396 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
397 +#else
398 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
399 +# define PAGE_COPY_NOEXEC PAGE_COPY
400 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
401 +#endif
402 +
403 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
404
405 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
406 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
407 index 2fd00b7..cfd5069 100644
408 --- a/arch/alpha/kernel/module.c
409 +++ b/arch/alpha/kernel/module.c
410 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
411
412 /* The small sections were sorted to the end of the segment.
413 The following should definitely cover them. */
414 - gp = (u64)me->module_core + me->core_size - 0x8000;
415 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
416 got = sechdrs[me->arch.gotsecindex].sh_addr;
417
418 for (i = 0; i < n; i++) {
419 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
420 index 01e8715..be0e80f 100644
421 --- a/arch/alpha/kernel/osf_sys.c
422 +++ b/arch/alpha/kernel/osf_sys.c
423 @@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
424 /* At this point: (!vma || addr < vma->vm_end). */
425 if (limit - len < addr)
426 return -ENOMEM;
427 - if (!vma || addr + len <= vma->vm_start)
428 + if (check_heap_stack_gap(vma, addr, len))
429 return addr;
430 addr = vma->vm_end;
431 vma = vma->vm_next;
432 @@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
433 merely specific addresses, but regions of memory -- perhaps
434 this feature should be incorporated into all ports? */
435
436 +#ifdef CONFIG_PAX_RANDMMAP
437 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
438 +#endif
439 +
440 if (addr) {
441 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
442 if (addr != (unsigned long) -ENOMEM)
443 @@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
444 }
445
446 /* Next, try allocating at TASK_UNMAPPED_BASE. */
447 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
448 - len, limit);
449 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
450 +
451 if (addr != (unsigned long) -ENOMEM)
452 return addr;
453
454 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
455 index fadd5f8..904e73a 100644
456 --- a/arch/alpha/mm/fault.c
457 +++ b/arch/alpha/mm/fault.c
458 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
459 __reload_thread(pcb);
460 }
461
462 +#ifdef CONFIG_PAX_PAGEEXEC
463 +/*
464 + * PaX: decide what to do with offenders (regs->pc = fault address)
465 + *
466 + * returns 1 when task should be killed
467 + * 2 when patched PLT trampoline was detected
468 + * 3 when unpatched PLT trampoline was detected
469 + */
470 +static int pax_handle_fetch_fault(struct pt_regs *regs)
471 +{
472 +
473 +#ifdef CONFIG_PAX_EMUPLT
474 + int err;
475 +
476 + do { /* PaX: patched PLT emulation #1 */
477 + unsigned int ldah, ldq, jmp;
478 +
479 + err = get_user(ldah, (unsigned int *)regs->pc);
480 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
481 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
482 +
483 + if (err)
484 + break;
485 +
486 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
487 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
488 + jmp == 0x6BFB0000U)
489 + {
490 + unsigned long r27, addr;
491 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
492 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
493 +
494 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
495 + err = get_user(r27, (unsigned long *)addr);
496 + if (err)
497 + break;
498 +
499 + regs->r27 = r27;
500 + regs->pc = r27;
501 + return 2;
502 + }
503 + } while (0);
504 +
505 + do { /* PaX: patched PLT emulation #2 */
506 + unsigned int ldah, lda, br;
507 +
508 + err = get_user(ldah, (unsigned int *)regs->pc);
509 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
510 + err |= get_user(br, (unsigned int *)(regs->pc+8));
511 +
512 + if (err)
513 + break;
514 +
515 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
516 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
517 + (br & 0xFFE00000U) == 0xC3E00000U)
518 + {
519 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
520 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
521 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
522 +
523 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
524 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
525 + return 2;
526 + }
527 + } while (0);
528 +
529 + do { /* PaX: unpatched PLT emulation */
530 + unsigned int br;
531 +
532 + err = get_user(br, (unsigned int *)regs->pc);
533 +
534 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
535 + unsigned int br2, ldq, nop, jmp;
536 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
537 +
538 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
539 + err = get_user(br2, (unsigned int *)addr);
540 + err |= get_user(ldq, (unsigned int *)(addr+4));
541 + err |= get_user(nop, (unsigned int *)(addr+8));
542 + err |= get_user(jmp, (unsigned int *)(addr+12));
543 + err |= get_user(resolver, (unsigned long *)(addr+16));
544 +
545 + if (err)
546 + break;
547 +
548 + if (br2 == 0xC3600000U &&
549 + ldq == 0xA77B000CU &&
550 + nop == 0x47FF041FU &&
551 + jmp == 0x6B7B0000U)
552 + {
553 + regs->r28 = regs->pc+4;
554 + regs->r27 = addr+16;
555 + regs->pc = resolver;
556 + return 3;
557 + }
558 + }
559 + } while (0);
560 +#endif
561 +
562 + return 1;
563 +}
564 +
565 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
566 +{
567 + unsigned long i;
568 +
569 + printk(KERN_ERR "PAX: bytes at PC: ");
570 + for (i = 0; i < 5; i++) {
571 + unsigned int c;
572 + if (get_user(c, (unsigned int *)pc+i))
573 + printk(KERN_CONT "???????? ");
574 + else
575 + printk(KERN_CONT "%08x ", c);
576 + }
577 + printk("\n");
578 +}
579 +#endif
580
581 /*
582 * This routine handles page faults. It determines the address,
583 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
584 good_area:
585 si_code = SEGV_ACCERR;
586 if (cause < 0) {
587 - if (!(vma->vm_flags & VM_EXEC))
588 + if (!(vma->vm_flags & VM_EXEC)) {
589 +
590 +#ifdef CONFIG_PAX_PAGEEXEC
591 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
592 + goto bad_area;
593 +
594 + up_read(&mm->mmap_sem);
595 + switch (pax_handle_fetch_fault(regs)) {
596 +
597 +#ifdef CONFIG_PAX_EMUPLT
598 + case 2:
599 + case 3:
600 + return;
601 +#endif
602 +
603 + }
604 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
605 + do_group_exit(SIGKILL);
606 +#else
607 goto bad_area;
608 +#endif
609 +
610 + }
611 } else if (!cause) {
612 /* Allow reads even for write-only mappings */
613 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
614 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
615 index 86976d0..8a57797 100644
616 --- a/arch/arm/include/asm/atomic.h
617 +++ b/arch/arm/include/asm/atomic.h
618 @@ -239,6 +239,14 @@ typedef struct {
619 u64 __aligned(8) counter;
620 } atomic64_t;
621
622 +#ifdef CONFIG_PAX_REFCOUNT
623 +typedef struct {
624 + u64 __aligned(8) counter;
625 +} atomic64_unchecked_t;
626 +#else
627 +typedef atomic64_t atomic64_unchecked_t;
628 +#endif
629 +
630 #define ATOMIC64_INIT(i) { (i) }
631
632 static inline u64 atomic64_read(atomic64_t *v)
633 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
634 index 0e9ce8d..6ef1e03 100644
635 --- a/arch/arm/include/asm/elf.h
636 +++ b/arch/arm/include/asm/elf.h
637 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
638 the loader. We need to make sure that it is out of the way of the program
639 that it will "exec", and that there is sufficient room for the brk. */
640
641 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
642 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
643 +
644 +#ifdef CONFIG_PAX_ASLR
645 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
646 +
647 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
648 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
649 +#endif
650
651 /* When the program starts, a1 contains a pointer to a function to be
652 registered with atexit, as per the SVR4 ABI. A value of 0 means we
653 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
654 extern void elf_set_personality(const struct elf32_hdr *);
655 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
656
657 -struct mm_struct;
658 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
659 -#define arch_randomize_brk arch_randomize_brk
660 -
661 extern int vectors_user_mapping(void);
662 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
663 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
664 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
665 index e51b1e8..32a3113 100644
666 --- a/arch/arm/include/asm/kmap_types.h
667 +++ b/arch/arm/include/asm/kmap_types.h
668 @@ -21,6 +21,7 @@ enum km_type {
669 KM_L1_CACHE,
670 KM_L2_CACHE,
671 KM_KDB,
672 + KM_CLEARPAGE,
673 KM_TYPE_NR
674 };
675
676 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
677 index b293616..96310e5 100644
678 --- a/arch/arm/include/asm/uaccess.h
679 +++ b/arch/arm/include/asm/uaccess.h
680 @@ -22,6 +22,8 @@
681 #define VERIFY_READ 0
682 #define VERIFY_WRITE 1
683
684 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
685 +
686 /*
687 * The exception table consists of pairs of addresses: the first is the
688 * address of an instruction that is allowed to fault, and the second is
689 @@ -387,8 +389,23 @@ do { \
690
691
692 #ifdef CONFIG_MMU
693 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
694 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
695 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
696 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
697 +
698 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
699 +{
700 + if (!__builtin_constant_p(n))
701 + check_object_size(to, n, false);
702 + return ___copy_from_user(to, from, n);
703 +}
704 +
705 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
706 +{
707 + if (!__builtin_constant_p(n))
708 + check_object_size(from, n, true);
709 + return ___copy_to_user(to, from, n);
710 +}
711 +
712 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
713 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
714 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
715 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
716
717 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
718 {
719 + if ((long)n < 0)
720 + return n;
721 +
722 if (access_ok(VERIFY_READ, from, n))
723 n = __copy_from_user(to, from, n);
724 else /* security hole - plug it */
725 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
726
727 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
728 {
729 + if ((long)n < 0)
730 + return n;
731 +
732 if (access_ok(VERIFY_WRITE, to, n))
733 n = __copy_to_user(to, from, n);
734 return n;
735 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
736 index aeef960..2966009 100644
737 --- a/arch/arm/kernel/armksyms.c
738 +++ b/arch/arm/kernel/armksyms.c
739 @@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
740 #ifdef CONFIG_MMU
741 EXPORT_SYMBOL(copy_page);
742
743 -EXPORT_SYMBOL(__copy_from_user);
744 -EXPORT_SYMBOL(__copy_to_user);
745 +EXPORT_SYMBOL(___copy_from_user);
746 +EXPORT_SYMBOL(___copy_to_user);
747 EXPORT_SYMBOL(__clear_user);
748
749 EXPORT_SYMBOL(__get_user_1);
750 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
751 index 1a347f4..8b4c8a1 100644
752 --- a/arch/arm/kernel/process.c
753 +++ b/arch/arm/kernel/process.c
754 @@ -28,7 +28,6 @@
755 #include <linux/tick.h>
756 #include <linux/utsname.h>
757 #include <linux/uaccess.h>
758 -#include <linux/random.h>
759 #include <linux/hw_breakpoint.h>
760 #include <linux/cpuidle.h>
761
762 @@ -481,12 +480,6 @@ unsigned long get_wchan(struct task_struct *p)
763 return 0;
764 }
765
766 -unsigned long arch_randomize_brk(struct mm_struct *mm)
767 -{
768 - unsigned long range_end = mm->brk + 0x02000000;
769 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
770 -}
771 -
772 #ifdef CONFIG_MMU
773 /*
774 * The vectors page is always readable from user space for the
775 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
776 index bc9f9da..c75d826 100644
777 --- a/arch/arm/kernel/traps.c
778 +++ b/arch/arm/kernel/traps.c
779 @@ -257,6 +257,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
780
781 static DEFINE_SPINLOCK(die_lock);
782
783 +extern void gr_handle_kernel_exploit(void);
784 +
785 /*
786 * This function is protected against re-entrancy.
787 */
788 @@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs *regs, int err)
789 panic("Fatal exception in interrupt");
790 if (panic_on_oops)
791 panic("Fatal exception");
792 +
793 + gr_handle_kernel_exploit();
794 +
795 if (ret != NOTIFY_STOP)
796 do_exit(SIGSEGV);
797 }
798 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
799 index 66a477a..bee61d3 100644
800 --- a/arch/arm/lib/copy_from_user.S
801 +++ b/arch/arm/lib/copy_from_user.S
802 @@ -16,7 +16,7 @@
803 /*
804 * Prototype:
805 *
806 - * size_t __copy_from_user(void *to, const void *from, size_t n)
807 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
808 *
809 * Purpose:
810 *
811 @@ -84,11 +84,11 @@
812
813 .text
814
815 -ENTRY(__copy_from_user)
816 +ENTRY(___copy_from_user)
817
818 #include "copy_template.S"
819
820 -ENDPROC(__copy_from_user)
821 +ENDPROC(___copy_from_user)
822
823 .pushsection .fixup,"ax"
824 .align 0
825 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
826 index d066df6..df28194 100644
827 --- a/arch/arm/lib/copy_to_user.S
828 +++ b/arch/arm/lib/copy_to_user.S
829 @@ -16,7 +16,7 @@
830 /*
831 * Prototype:
832 *
833 - * size_t __copy_to_user(void *to, const void *from, size_t n)
834 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
835 *
836 * Purpose:
837 *
838 @@ -88,11 +88,11 @@
839 .text
840
841 ENTRY(__copy_to_user_std)
842 -WEAK(__copy_to_user)
843 +WEAK(___copy_to_user)
844
845 #include "copy_template.S"
846
847 -ENDPROC(__copy_to_user)
848 +ENDPROC(___copy_to_user)
849 ENDPROC(__copy_to_user_std)
850
851 .pushsection .fixup,"ax"
852 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
853 index d0ece2a..5ae2f39 100644
854 --- a/arch/arm/lib/uaccess.S
855 +++ b/arch/arm/lib/uaccess.S
856 @@ -20,7 +20,7 @@
857
858 #define PAGE_SHIFT 12
859
860 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
861 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
862 * Purpose : copy a block to user memory from kernel memory
863 * Params : to - user memory
864 * : from - kernel memory
865 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
866 sub r2, r2, ip
867 b .Lc2u_dest_aligned
868
869 -ENTRY(__copy_to_user)
870 +ENTRY(___copy_to_user)
871 stmfd sp!, {r2, r4 - r7, lr}
872 cmp r2, #4
873 blt .Lc2u_not_enough
874 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault
875 ldrgtb r3, [r1], #0
876 USER( T(strgtb) r3, [r0], #1) @ May fault
877 b .Lc2u_finished
878 -ENDPROC(__copy_to_user)
879 +ENDPROC(___copy_to_user)
880
881 .pushsection .fixup,"ax"
882 .align 0
883 9001: ldmfd sp!, {r0, r4 - r7, pc}
884 .popsection
885
886 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
887 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
888 * Purpose : copy a block from user memory to kernel memory
889 * Params : to - kernel memory
890 * : from - user memory
891 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
892 sub r2, r2, ip
893 b .Lcfu_dest_aligned
894
895 -ENTRY(__copy_from_user)
896 +ENTRY(___copy_from_user)
897 stmfd sp!, {r0, r2, r4 - r7, lr}
898 cmp r2, #4
899 blt .Lcfu_not_enough
900 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault
901 USER( T(ldrgtb) r3, [r1], #1) @ May fault
902 strgtb r3, [r0], #1
903 b .Lcfu_finished
904 -ENDPROC(__copy_from_user)
905 +ENDPROC(___copy_from_user)
906
907 .pushsection .fixup,"ax"
908 .align 0
909 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
910 index 8b9b136..70d5100 100644
911 --- a/arch/arm/lib/uaccess_with_memcpy.c
912 +++ b/arch/arm/lib/uaccess_with_memcpy.c
913 @@ -103,7 +103,7 @@ out:
914 }
915
916 unsigned long
917 -__copy_to_user(void __user *to, const void *from, unsigned long n)
918 +___copy_to_user(void __user *to, const void *from, unsigned long n)
919 {
920 /*
921 * This test is stubbed out of the main function above to keep
922 diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
923 index 2b2d51c..0127490 100644
924 --- a/arch/arm/mach-ux500/mbox-db5500.c
925 +++ b/arch/arm/mach-ux500/mbox-db5500.c
926 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
927 return sprintf(buf, "0x%X\n", mbox_value);
928 }
929
930 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
931 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
932
933 static int mbox_show(struct seq_file *s, void *data)
934 {
935 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
936 index 3b5ea68..42fc9af 100644
937 --- a/arch/arm/mm/fault.c
938 +++ b/arch/arm/mm/fault.c
939 @@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
940 }
941 #endif
942
943 +#ifdef CONFIG_PAX_PAGEEXEC
944 + if (fsr & FSR_LNX_PF) {
945 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
946 + do_group_exit(SIGKILL);
947 + }
948 +#endif
949 +
950 tsk->thread.address = addr;
951 tsk->thread.error_code = fsr;
952 tsk->thread.trap_no = 14;
953 @@ -383,6 +390,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
954 }
955 #endif /* CONFIG_MMU */
956
957 +#ifdef CONFIG_PAX_PAGEEXEC
958 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
959 +{
960 + long i;
961 +
962 + printk(KERN_ERR "PAX: bytes at PC: ");
963 + for (i = 0; i < 20; i++) {
964 + unsigned char c;
965 + if (get_user(c, (__force unsigned char __user *)pc+i))
966 + printk(KERN_CONT "?? ");
967 + else
968 + printk(KERN_CONT "%02x ", c);
969 + }
970 + printk("\n");
971 +
972 + printk(KERN_ERR "PAX: bytes at SP-4: ");
973 + for (i = -1; i < 20; i++) {
974 + unsigned long c;
975 + if (get_user(c, (__force unsigned long __user *)sp+i))
976 + printk(KERN_CONT "???????? ");
977 + else
978 + printk(KERN_CONT "%08lx ", c);
979 + }
980 + printk("\n");
981 +}
982 +#endif
983 +
984 /*
985 * First Level Translation Fault Handler
986 *
987 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
988 index 74be05f..f605b8c 100644
989 --- a/arch/arm/mm/mmap.c
990 +++ b/arch/arm/mm/mmap.c
991 @@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
992 if (len > TASK_SIZE)
993 return -ENOMEM;
994
995 +#ifdef CONFIG_PAX_RANDMMAP
996 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
997 +#endif
998 +
999 if (addr) {
1000 if (do_align)
1001 addr = COLOUR_ALIGN(addr, pgoff);
1002 @@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1003 addr = PAGE_ALIGN(addr);
1004
1005 vma = find_vma(mm, addr);
1006 - if (TASK_SIZE - len >= addr &&
1007 - (!vma || addr + len <= vma->vm_start))
1008 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1009 return addr;
1010 }
1011 if (len > mm->cached_hole_size) {
1012 - start_addr = addr = mm->free_area_cache;
1013 + start_addr = addr = mm->free_area_cache;
1014 } else {
1015 - start_addr = addr = TASK_UNMAPPED_BASE;
1016 - mm->cached_hole_size = 0;
1017 + start_addr = addr = mm->mmap_base;
1018 + mm->cached_hole_size = 0;
1019 }
1020 /* 8 bits of randomness in 20 address space bits */
1021 if ((current->flags & PF_RANDOMIZE) &&
1022 @@ -100,14 +103,14 @@ full_search:
1023 * Start a new search - just in case we missed
1024 * some holes.
1025 */
1026 - if (start_addr != TASK_UNMAPPED_BASE) {
1027 - start_addr = addr = TASK_UNMAPPED_BASE;
1028 + if (start_addr != mm->mmap_base) {
1029 + start_addr = addr = mm->mmap_base;
1030 mm->cached_hole_size = 0;
1031 goto full_search;
1032 }
1033 return -ENOMEM;
1034 }
1035 - if (!vma || addr + len <= vma->vm_start) {
1036 + if (check_heap_stack_gap(vma, addr, len)) {
1037 /*
1038 * Remember the place where we stopped the search:
1039 */
1040 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1041 index 3b3159b..425ea94 100644
1042 --- a/arch/avr32/include/asm/elf.h
1043 +++ b/arch/avr32/include/asm/elf.h
1044 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1045 the loader. We need to make sure that it is out of the way of the program
1046 that it will "exec", and that there is sufficient room for the brk. */
1047
1048 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1049 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1050
1051 +#ifdef CONFIG_PAX_ASLR
1052 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1053 +
1054 +#define PAX_DELTA_MMAP_LEN 15
1055 +#define PAX_DELTA_STACK_LEN 15
1056 +#endif
1057
1058 /* This yields a mask that user programs can use to figure out what
1059 instruction set this CPU supports. This could be done in user space,
1060 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1061 index b7f5c68..556135c 100644
1062 --- a/arch/avr32/include/asm/kmap_types.h
1063 +++ b/arch/avr32/include/asm/kmap_types.h
1064 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1065 D(11) KM_IRQ1,
1066 D(12) KM_SOFTIRQ0,
1067 D(13) KM_SOFTIRQ1,
1068 -D(14) KM_TYPE_NR
1069 +D(14) KM_CLEARPAGE,
1070 +D(15) KM_TYPE_NR
1071 };
1072
1073 #undef D
1074 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1075 index f7040a1..db9f300 100644
1076 --- a/arch/avr32/mm/fault.c
1077 +++ b/arch/avr32/mm/fault.c
1078 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1079
1080 int exception_trace = 1;
1081
1082 +#ifdef CONFIG_PAX_PAGEEXEC
1083 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1084 +{
1085 + unsigned long i;
1086 +
1087 + printk(KERN_ERR "PAX: bytes at PC: ");
1088 + for (i = 0; i < 20; i++) {
1089 + unsigned char c;
1090 + if (get_user(c, (unsigned char *)pc+i))
1091 + printk(KERN_CONT "???????? ");
1092 + else
1093 + printk(KERN_CONT "%02x ", c);
1094 + }
1095 + printk("\n");
1096 +}
1097 +#endif
1098 +
1099 /*
1100 * This routine handles page faults. It determines the address and the
1101 * problem, and then passes it off to one of the appropriate routines.
1102 @@ -156,6 +173,16 @@ bad_area:
1103 up_read(&mm->mmap_sem);
1104
1105 if (user_mode(regs)) {
1106 +
1107 +#ifdef CONFIG_PAX_PAGEEXEC
1108 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1109 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1110 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1111 + do_group_exit(SIGKILL);
1112 + }
1113 + }
1114 +#endif
1115 +
1116 if (exception_trace && printk_ratelimit())
1117 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1118 "sp %08lx ecr %lu\n",
1119 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1120 index f8e16b2..c73ff79 100644
1121 --- a/arch/frv/include/asm/kmap_types.h
1122 +++ b/arch/frv/include/asm/kmap_types.h
1123 @@ -23,6 +23,7 @@ enum km_type {
1124 KM_IRQ1,
1125 KM_SOFTIRQ0,
1126 KM_SOFTIRQ1,
1127 + KM_CLEARPAGE,
1128 KM_TYPE_NR
1129 };
1130
1131 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1132 index 385fd30..6c3d97e 100644
1133 --- a/arch/frv/mm/elf-fdpic.c
1134 +++ b/arch/frv/mm/elf-fdpic.c
1135 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1136 if (addr) {
1137 addr = PAGE_ALIGN(addr);
1138 vma = find_vma(current->mm, addr);
1139 - if (TASK_SIZE - len >= addr &&
1140 - (!vma || addr + len <= vma->vm_start))
1141 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1142 goto success;
1143 }
1144
1145 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1146 for (; vma; vma = vma->vm_next) {
1147 if (addr > limit)
1148 break;
1149 - if (addr + len <= vma->vm_start)
1150 + if (check_heap_stack_gap(vma, addr, len))
1151 goto success;
1152 addr = vma->vm_end;
1153 }
1154 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1155 for (; vma; vma = vma->vm_next) {
1156 if (addr > limit)
1157 break;
1158 - if (addr + len <= vma->vm_start)
1159 + if (check_heap_stack_gap(vma, addr, len))
1160 goto success;
1161 addr = vma->vm_end;
1162 }
1163 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1164 index b5298eb..67c6e62 100644
1165 --- a/arch/ia64/include/asm/elf.h
1166 +++ b/arch/ia64/include/asm/elf.h
1167 @@ -42,6 +42,13 @@
1168 */
1169 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1170
1171 +#ifdef CONFIG_PAX_ASLR
1172 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1173 +
1174 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1175 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1176 +#endif
1177 +
1178 #define PT_IA_64_UNWIND 0x70000001
1179
1180 /* IA-64 relocations: */
1181 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1182 index 1a97af3..7529d31 100644
1183 --- a/arch/ia64/include/asm/pgtable.h
1184 +++ b/arch/ia64/include/asm/pgtable.h
1185 @@ -12,7 +12,7 @@
1186 * David Mosberger-Tang <davidm@hpl.hp.com>
1187 */
1188
1189 -
1190 +#include <linux/const.h>
1191 #include <asm/mman.h>
1192 #include <asm/page.h>
1193 #include <asm/processor.h>
1194 @@ -143,6 +143,17 @@
1195 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1196 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1197 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1198 +
1199 +#ifdef CONFIG_PAX_PAGEEXEC
1200 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1201 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1202 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1203 +#else
1204 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1205 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1206 +# define PAGE_COPY_NOEXEC PAGE_COPY
1207 +#endif
1208 +
1209 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1210 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1211 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1212 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1213 index b77768d..e0795eb 100644
1214 --- a/arch/ia64/include/asm/spinlock.h
1215 +++ b/arch/ia64/include/asm/spinlock.h
1216 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
1217 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1218
1219 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1220 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1221 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1222 }
1223
1224 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
1225 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1226 index 449c8c0..432a3d2 100644
1227 --- a/arch/ia64/include/asm/uaccess.h
1228 +++ b/arch/ia64/include/asm/uaccess.h
1229 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1230 const void *__cu_from = (from); \
1231 long __cu_len = (n); \
1232 \
1233 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1234 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1235 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1236 __cu_len; \
1237 })
1238 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1239 long __cu_len = (n); \
1240 \
1241 __chk_user_ptr(__cu_from); \
1242 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1243 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1244 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1245 __cu_len; \
1246 })
1247 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1248 index 24603be..948052d 100644
1249 --- a/arch/ia64/kernel/module.c
1250 +++ b/arch/ia64/kernel/module.c
1251 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
1252 void
1253 module_free (struct module *mod, void *module_region)
1254 {
1255 - if (mod && mod->arch.init_unw_table &&
1256 - module_region == mod->module_init) {
1257 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1258 unw_remove_unwind_table(mod->arch.init_unw_table);
1259 mod->arch.init_unw_table = NULL;
1260 }
1261 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1262 }
1263
1264 static inline int
1265 +in_init_rx (const struct module *mod, uint64_t addr)
1266 +{
1267 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1268 +}
1269 +
1270 +static inline int
1271 +in_init_rw (const struct module *mod, uint64_t addr)
1272 +{
1273 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1274 +}
1275 +
1276 +static inline int
1277 in_init (const struct module *mod, uint64_t addr)
1278 {
1279 - return addr - (uint64_t) mod->module_init < mod->init_size;
1280 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1281 +}
1282 +
1283 +static inline int
1284 +in_core_rx (const struct module *mod, uint64_t addr)
1285 +{
1286 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1287 +}
1288 +
1289 +static inline int
1290 +in_core_rw (const struct module *mod, uint64_t addr)
1291 +{
1292 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1293 }
1294
1295 static inline int
1296 in_core (const struct module *mod, uint64_t addr)
1297 {
1298 - return addr - (uint64_t) mod->module_core < mod->core_size;
1299 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1300 }
1301
1302 static inline int
1303 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1304 break;
1305
1306 case RV_BDREL:
1307 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1308 + if (in_init_rx(mod, val))
1309 + val -= (uint64_t) mod->module_init_rx;
1310 + else if (in_init_rw(mod, val))
1311 + val -= (uint64_t) mod->module_init_rw;
1312 + else if (in_core_rx(mod, val))
1313 + val -= (uint64_t) mod->module_core_rx;
1314 + else if (in_core_rw(mod, val))
1315 + val -= (uint64_t) mod->module_core_rw;
1316 break;
1317
1318 case RV_LTV:
1319 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1320 * addresses have been selected...
1321 */
1322 uint64_t gp;
1323 - if (mod->core_size > MAX_LTOFF)
1324 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1325 /*
1326 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1327 * at the end of the module.
1328 */
1329 - gp = mod->core_size - MAX_LTOFF / 2;
1330 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1331 else
1332 - gp = mod->core_size / 2;
1333 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1334 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1335 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1336 mod->arch.gp = gp;
1337 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1338 }
1339 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1340 index 609d500..7dde2a8 100644
1341 --- a/arch/ia64/kernel/sys_ia64.c
1342 +++ b/arch/ia64/kernel/sys_ia64.c
1343 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1344 if (REGION_NUMBER(addr) == RGN_HPAGE)
1345 addr = 0;
1346 #endif
1347 +
1348 +#ifdef CONFIG_PAX_RANDMMAP
1349 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1350 + addr = mm->free_area_cache;
1351 + else
1352 +#endif
1353 +
1354 if (!addr)
1355 addr = mm->free_area_cache;
1356
1357 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1358 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1359 /* At this point: (!vma || addr < vma->vm_end). */
1360 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1361 - if (start_addr != TASK_UNMAPPED_BASE) {
1362 + if (start_addr != mm->mmap_base) {
1363 /* Start a new search --- just in case we missed some holes. */
1364 - addr = TASK_UNMAPPED_BASE;
1365 + addr = mm->mmap_base;
1366 goto full_search;
1367 }
1368 return -ENOMEM;
1369 }
1370 - if (!vma || addr + len <= vma->vm_start) {
1371 + if (check_heap_stack_gap(vma, addr, len)) {
1372 /* Remember the address where we stopped this search: */
1373 mm->free_area_cache = addr + len;
1374 return addr;
1375 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1376 index 53c0ba0..2accdde 100644
1377 --- a/arch/ia64/kernel/vmlinux.lds.S
1378 +++ b/arch/ia64/kernel/vmlinux.lds.S
1379 @@ -199,7 +199,7 @@ SECTIONS {
1380 /* Per-cpu data: */
1381 . = ALIGN(PERCPU_PAGE_SIZE);
1382 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
1383 - __phys_per_cpu_start = __per_cpu_load;
1384 + __phys_per_cpu_start = per_cpu_load;
1385 /*
1386 * ensure percpu data fits
1387 * into percpu page size
1388 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1389 index 20b3593..1ce77f0 100644
1390 --- a/arch/ia64/mm/fault.c
1391 +++ b/arch/ia64/mm/fault.c
1392 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
1393 return pte_present(pte);
1394 }
1395
1396 +#ifdef CONFIG_PAX_PAGEEXEC
1397 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1398 +{
1399 + unsigned long i;
1400 +
1401 + printk(KERN_ERR "PAX: bytes at PC: ");
1402 + for (i = 0; i < 8; i++) {
1403 + unsigned int c;
1404 + if (get_user(c, (unsigned int *)pc+i))
1405 + printk(KERN_CONT "???????? ");
1406 + else
1407 + printk(KERN_CONT "%08x ", c);
1408 + }
1409 + printk("\n");
1410 +}
1411 +#endif
1412 +
1413 void __kprobes
1414 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1415 {
1416 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1417 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1418 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1419
1420 - if ((vma->vm_flags & mask) != mask)
1421 + if ((vma->vm_flags & mask) != mask) {
1422 +
1423 +#ifdef CONFIG_PAX_PAGEEXEC
1424 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1425 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1426 + goto bad_area;
1427 +
1428 + up_read(&mm->mmap_sem);
1429 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1430 + do_group_exit(SIGKILL);
1431 + }
1432 +#endif
1433 +
1434 goto bad_area;
1435
1436 + }
1437 +
1438 /*
1439 * If for any reason at all we couldn't handle the fault, make
1440 * sure we exit gracefully rather than endlessly redo the
1441 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1442 index 5ca674b..e0e1b70 100644
1443 --- a/arch/ia64/mm/hugetlbpage.c
1444 +++ b/arch/ia64/mm/hugetlbpage.c
1445 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1446 /* At this point: (!vmm || addr < vmm->vm_end). */
1447 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1448 return -ENOMEM;
1449 - if (!vmm || (addr + len) <= vmm->vm_start)
1450 + if (check_heap_stack_gap(vmm, addr, len))
1451 return addr;
1452 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1453 }
1454 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1455 index 00cb0e2..2ad8024 100644
1456 --- a/arch/ia64/mm/init.c
1457 +++ b/arch/ia64/mm/init.c
1458 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1459 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1460 vma->vm_end = vma->vm_start + PAGE_SIZE;
1461 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1462 +
1463 +#ifdef CONFIG_PAX_PAGEEXEC
1464 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1465 + vma->vm_flags &= ~VM_EXEC;
1466 +
1467 +#ifdef CONFIG_PAX_MPROTECT
1468 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1469 + vma->vm_flags &= ~VM_MAYEXEC;
1470 +#endif
1471 +
1472 + }
1473 +#endif
1474 +
1475 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1476 down_write(&current->mm->mmap_sem);
1477 if (insert_vm_struct(current->mm, vma)) {
1478 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1479 index 82abd15..d95ae5d 100644
1480 --- a/arch/m32r/lib/usercopy.c
1481 +++ b/arch/m32r/lib/usercopy.c
1482 @@ -14,6 +14,9 @@
1483 unsigned long
1484 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1485 {
1486 + if ((long)n < 0)
1487 + return n;
1488 +
1489 prefetch(from);
1490 if (access_ok(VERIFY_WRITE, to, n))
1491 __copy_user(to,from,n);
1492 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1493 unsigned long
1494 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1495 {
1496 + if ((long)n < 0)
1497 + return n;
1498 +
1499 prefetchw(to);
1500 if (access_ok(VERIFY_READ, from, n))
1501 __copy_user_zeroing(to,from,n);
1502 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1503 index 455c0ac..ad65fbe 100644
1504 --- a/arch/mips/include/asm/elf.h
1505 +++ b/arch/mips/include/asm/elf.h
1506 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1507 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1508 #endif
1509
1510 +#ifdef CONFIG_PAX_ASLR
1511 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1512 +
1513 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1514 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1515 +#endif
1516 +
1517 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1518 struct linux_binprm;
1519 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1520 int uses_interp);
1521
1522 -struct mm_struct;
1523 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1524 -#define arch_randomize_brk arch_randomize_brk
1525 -
1526 #endif /* _ASM_ELF_H */
1527 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1528 index e59cd1a..8e329d6 100644
1529 --- a/arch/mips/include/asm/page.h
1530 +++ b/arch/mips/include/asm/page.h
1531 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1532 #ifdef CONFIG_CPU_MIPS32
1533 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1534 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1535 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1536 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1537 #else
1538 typedef struct { unsigned long long pte; } pte_t;
1539 #define pte_val(x) ((x).pte)
1540 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1541 index 6018c80..7c37203 100644
1542 --- a/arch/mips/include/asm/system.h
1543 +++ b/arch/mips/include/asm/system.h
1544 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1545 */
1546 #define __ARCH_WANT_UNLOCKED_CTXSW
1547
1548 -extern unsigned long arch_align_stack(unsigned long sp);
1549 +#define arch_align_stack(x) ((x) & ~0xfUL)
1550
1551 #endif /* _ASM_SYSTEM_H */
1552 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1553 index 9fdd8bc..4bd7f1a 100644
1554 --- a/arch/mips/kernel/binfmt_elfn32.c
1555 +++ b/arch/mips/kernel/binfmt_elfn32.c
1556 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1557 #undef ELF_ET_DYN_BASE
1558 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1559
1560 +#ifdef CONFIG_PAX_ASLR
1561 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1562 +
1563 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1564 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1565 +#endif
1566 +
1567 #include <asm/processor.h>
1568 #include <linux/module.h>
1569 #include <linux/elfcore.h>
1570 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
1571 index ff44823..97f8906 100644
1572 --- a/arch/mips/kernel/binfmt_elfo32.c
1573 +++ b/arch/mips/kernel/binfmt_elfo32.c
1574 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1575 #undef ELF_ET_DYN_BASE
1576 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1577
1578 +#ifdef CONFIG_PAX_ASLR
1579 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1580 +
1581 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1582 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1583 +#endif
1584 +
1585 #include <asm/processor.h>
1586
1587 /*
1588 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
1589 index b30cb25..454c0a9 100644
1590 --- a/arch/mips/kernel/process.c
1591 +++ b/arch/mips/kernel/process.c
1592 @@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task)
1593 out:
1594 return pc;
1595 }
1596 -
1597 -/*
1598 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1599 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1600 - */
1601 -unsigned long arch_align_stack(unsigned long sp)
1602 -{
1603 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1604 - sp -= get_random_int() & ~PAGE_MASK;
1605 -
1606 - return sp & ALMASK;
1607 -}
1608 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
1609 index 937cf33..adb39bb 100644
1610 --- a/arch/mips/mm/fault.c
1611 +++ b/arch/mips/mm/fault.c
1612 @@ -28,6 +28,23 @@
1613 #include <asm/highmem.h> /* For VMALLOC_END */
1614 #include <linux/kdebug.h>
1615
1616 +#ifdef CONFIG_PAX_PAGEEXEC
1617 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1618 +{
1619 + unsigned long i;
1620 +
1621 + printk(KERN_ERR "PAX: bytes at PC: ");
1622 + for (i = 0; i < 5; i++) {
1623 + unsigned int c;
1624 + if (get_user(c, (unsigned int *)pc+i))
1625 + printk(KERN_CONT "???????? ");
1626 + else
1627 + printk(KERN_CONT "%08x ", c);
1628 + }
1629 + printk("\n");
1630 +}
1631 +#endif
1632 +
1633 /*
1634 * This routine handles page faults. It determines the address,
1635 * and the problem, and then passes it off to one of the appropriate
1636 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
1637 index 302d779..7d35bf8 100644
1638 --- a/arch/mips/mm/mmap.c
1639 +++ b/arch/mips/mm/mmap.c
1640 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1641 do_color_align = 1;
1642
1643 /* requesting a specific address */
1644 +
1645 +#ifdef CONFIG_PAX_RANDMMAP
1646 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1647 +#endif
1648 +
1649 if (addr) {
1650 if (do_color_align)
1651 addr = COLOUR_ALIGN(addr, pgoff);
1652 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1653 addr = PAGE_ALIGN(addr);
1654
1655 vma = find_vma(mm, addr);
1656 - if (TASK_SIZE - len >= addr &&
1657 - (!vma || addr + len <= vma->vm_start))
1658 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1659 return addr;
1660 }
1661
1662 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1663 /* At this point: (!vma || addr < vma->vm_end). */
1664 if (TASK_SIZE - len < addr)
1665 return -ENOMEM;
1666 - if (!vma || addr + len <= vma->vm_start)
1667 + if (check_heap_stack_gap(vmm, addr, len))
1668 return addr;
1669 addr = vma->vm_end;
1670 if (do_color_align)
1671 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1672 /* make sure it can fit in the remaining address space */
1673 if (likely(addr > len)) {
1674 vma = find_vma(mm, addr - len);
1675 - if (!vma || addr <= vma->vm_start) {
1676 + if (check_heap_stack_gap(vmm, addr - len, len))
1677 /* cache the address as a hint for next time */
1678 return mm->free_area_cache = addr - len;
1679 }
1680 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1681 * return with success:
1682 */
1683 vma = find_vma(mm, addr);
1684 - if (likely(!vma || addr + len <= vma->vm_start)) {
1685 + if (check_heap_stack_gap(vmm, addr, len)) {
1686 /* cache the address as a hint for next time */
1687 return mm->free_area_cache = addr;
1688 }
1689 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1690 mm->unmap_area = arch_unmap_area_topdown;
1691 }
1692 }
1693 -
1694 -static inline unsigned long brk_rnd(void)
1695 -{
1696 - unsigned long rnd = get_random_int();
1697 -
1698 - rnd = rnd << PAGE_SHIFT;
1699 - /* 8MB for 32bit, 256MB for 64bit */
1700 - if (TASK_IS_32BIT_ADDR)
1701 - rnd = rnd & 0x7ffffful;
1702 - else
1703 - rnd = rnd & 0xffffffful;
1704 -
1705 - return rnd;
1706 -}
1707 -
1708 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1709 -{
1710 - unsigned long base = mm->brk;
1711 - unsigned long ret;
1712 -
1713 - ret = PAGE_ALIGN(base + brk_rnd());
1714 -
1715 - if (ret < mm->brk)
1716 - return mm->brk;
1717 -
1718 - return ret;
1719 -}
1720 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
1721 index 19f6cb1..6c78cf2 100644
1722 --- a/arch/parisc/include/asm/elf.h
1723 +++ b/arch/parisc/include/asm/elf.h
1724 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
1725
1726 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1727
1728 +#ifdef CONFIG_PAX_ASLR
1729 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1730 +
1731 +#define PAX_DELTA_MMAP_LEN 16
1732 +#define PAX_DELTA_STACK_LEN 16
1733 +#endif
1734 +
1735 /* This yields a mask that user programs can use to figure out what
1736 instruction set this CPU supports. This could be done in user space,
1737 but it's not easy, and we've already done it here. */
1738 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
1739 index 22dadeb..f6c2be4 100644
1740 --- a/arch/parisc/include/asm/pgtable.h
1741 +++ b/arch/parisc/include/asm/pgtable.h
1742 @@ -210,6 +210,17 @@ struct vm_area_struct;
1743 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1744 #define PAGE_COPY PAGE_EXECREAD
1745 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1746 +
1747 +#ifdef CONFIG_PAX_PAGEEXEC
1748 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1749 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1750 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1751 +#else
1752 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1753 +# define PAGE_COPY_NOEXEC PAGE_COPY
1754 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1755 +#endif
1756 +
1757 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1758 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1759 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1760 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
1761 index 5e34ccf..672bc9c 100644
1762 --- a/arch/parisc/kernel/module.c
1763 +++ b/arch/parisc/kernel/module.c
1764 @@ -98,16 +98,38 @@
1765
1766 /* three functions to determine where in the module core
1767 * or init pieces the location is */
1768 +static inline int in_init_rx(struct module *me, void *loc)
1769 +{
1770 + return (loc >= me->module_init_rx &&
1771 + loc < (me->module_init_rx + me->init_size_rx));
1772 +}
1773 +
1774 +static inline int in_init_rw(struct module *me, void *loc)
1775 +{
1776 + return (loc >= me->module_init_rw &&
1777 + loc < (me->module_init_rw + me->init_size_rw));
1778 +}
1779 +
1780 static inline int in_init(struct module *me, void *loc)
1781 {
1782 - return (loc >= me->module_init &&
1783 - loc <= (me->module_init + me->init_size));
1784 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1785 +}
1786 +
1787 +static inline int in_core_rx(struct module *me, void *loc)
1788 +{
1789 + return (loc >= me->module_core_rx &&
1790 + loc < (me->module_core_rx + me->core_size_rx));
1791 +}
1792 +
1793 +static inline int in_core_rw(struct module *me, void *loc)
1794 +{
1795 + return (loc >= me->module_core_rw &&
1796 + loc < (me->module_core_rw + me->core_size_rw));
1797 }
1798
1799 static inline int in_core(struct module *me, void *loc)
1800 {
1801 - return (loc >= me->module_core &&
1802 - loc <= (me->module_core + me->core_size));
1803 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1804 }
1805
1806 static inline int in_local(struct module *me, void *loc)
1807 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
1808 }
1809
1810 /* align things a bit */
1811 - me->core_size = ALIGN(me->core_size, 16);
1812 - me->arch.got_offset = me->core_size;
1813 - me->core_size += gots * sizeof(struct got_entry);
1814 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1815 + me->arch.got_offset = me->core_size_rw;
1816 + me->core_size_rw += gots * sizeof(struct got_entry);
1817
1818 - me->core_size = ALIGN(me->core_size, 16);
1819 - me->arch.fdesc_offset = me->core_size;
1820 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1821 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1822 + me->arch.fdesc_offset = me->core_size_rw;
1823 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1824
1825 me->arch.got_max = gots;
1826 me->arch.fdesc_max = fdescs;
1827 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1828
1829 BUG_ON(value == 0);
1830
1831 - got = me->module_core + me->arch.got_offset;
1832 + got = me->module_core_rw + me->arch.got_offset;
1833 for (i = 0; got[i].addr; i++)
1834 if (got[i].addr == value)
1835 goto out;
1836 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1837 #ifdef CONFIG_64BIT
1838 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1839 {
1840 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1841 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1842
1843 if (!value) {
1844 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1845 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1846
1847 /* Create new one */
1848 fdesc->addr = value;
1849 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1850 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1851 return (Elf_Addr)fdesc;
1852 }
1853 #endif /* CONFIG_64BIT */
1854 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
1855
1856 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1857 end = table + sechdrs[me->arch.unwind_section].sh_size;
1858 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1859 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1860
1861 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1862 me->arch.unwind_section, table, end, gp);
1863 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
1864 index c9b9322..02d8940 100644
1865 --- a/arch/parisc/kernel/sys_parisc.c
1866 +++ b/arch/parisc/kernel/sys_parisc.c
1867 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
1868 /* At this point: (!vma || addr < vma->vm_end). */
1869 if (TASK_SIZE - len < addr)
1870 return -ENOMEM;
1871 - if (!vma || addr + len <= vma->vm_start)
1872 + if (check_heap_stack_gap(vma, addr, len))
1873 return addr;
1874 addr = vma->vm_end;
1875 }
1876 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
1877 /* At this point: (!vma || addr < vma->vm_end). */
1878 if (TASK_SIZE - len < addr)
1879 return -ENOMEM;
1880 - if (!vma || addr + len <= vma->vm_start)
1881 + if (check_heap_stack_gap(vma, addr, len))
1882 return addr;
1883 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1884 if (addr < vma->vm_end) /* handle wraparound */
1885 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
1886 if (flags & MAP_FIXED)
1887 return addr;
1888 if (!addr)
1889 - addr = TASK_UNMAPPED_BASE;
1890 + addr = current->mm->mmap_base;
1891
1892 if (filp) {
1893 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1894 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
1895 index f19e660..414fe24 100644
1896 --- a/arch/parisc/kernel/traps.c
1897 +++ b/arch/parisc/kernel/traps.c
1898 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
1899
1900 down_read(&current->mm->mmap_sem);
1901 vma = find_vma(current->mm,regs->iaoq[0]);
1902 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1903 - && (vma->vm_flags & VM_EXEC)) {
1904 -
1905 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1906 fault_address = regs->iaoq[0];
1907 fault_space = regs->iasq[0];
1908
1909 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
1910 index 18162ce..94de376 100644
1911 --- a/arch/parisc/mm/fault.c
1912 +++ b/arch/parisc/mm/fault.c
1913 @@ -15,6 +15,7 @@
1914 #include <linux/sched.h>
1915 #include <linux/interrupt.h>
1916 #include <linux/module.h>
1917 +#include <linux/unistd.h>
1918
1919 #include <asm/uaccess.h>
1920 #include <asm/traps.h>
1921 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
1922 static unsigned long
1923 parisc_acctyp(unsigned long code, unsigned int inst)
1924 {
1925 - if (code == 6 || code == 16)
1926 + if (code == 6 || code == 7 || code == 16)
1927 return VM_EXEC;
1928
1929 switch (inst & 0xf0000000) {
1930 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
1931 }
1932 #endif
1933
1934 +#ifdef CONFIG_PAX_PAGEEXEC
1935 +/*
1936 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1937 + *
1938 + * returns 1 when task should be killed
1939 + * 2 when rt_sigreturn trampoline was detected
1940 + * 3 when unpatched PLT trampoline was detected
1941 + */
1942 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1943 +{
1944 +
1945 +#ifdef CONFIG_PAX_EMUPLT
1946 + int err;
1947 +
1948 + do { /* PaX: unpatched PLT emulation */
1949 + unsigned int bl, depwi;
1950 +
1951 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1952 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1953 +
1954 + if (err)
1955 + break;
1956 +
1957 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1958 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1959 +
1960 + err = get_user(ldw, (unsigned int *)addr);
1961 + err |= get_user(bv, (unsigned int *)(addr+4));
1962 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1963 +
1964 + if (err)
1965 + break;
1966 +
1967 + if (ldw == 0x0E801096U &&
1968 + bv == 0xEAC0C000U &&
1969 + ldw2 == 0x0E881095U)
1970 + {
1971 + unsigned int resolver, map;
1972 +
1973 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1974 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1975 + if (err)
1976 + break;
1977 +
1978 + regs->gr[20] = instruction_pointer(regs)+8;
1979 + regs->gr[21] = map;
1980 + regs->gr[22] = resolver;
1981 + regs->iaoq[0] = resolver | 3UL;
1982 + regs->iaoq[1] = regs->iaoq[0] + 4;
1983 + return 3;
1984 + }
1985 + }
1986 + } while (0);
1987 +#endif
1988 +
1989 +#ifdef CONFIG_PAX_EMUTRAMP
1990 +
1991 +#ifndef CONFIG_PAX_EMUSIGRT
1992 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1993 + return 1;
1994 +#endif
1995 +
1996 + do { /* PaX: rt_sigreturn emulation */
1997 + unsigned int ldi1, ldi2, bel, nop;
1998 +
1999 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2000 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2001 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2002 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2003 +
2004 + if (err)
2005 + break;
2006 +
2007 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2008 + ldi2 == 0x3414015AU &&
2009 + bel == 0xE4008200U &&
2010 + nop == 0x08000240U)
2011 + {
2012 + regs->gr[25] = (ldi1 & 2) >> 1;
2013 + regs->gr[20] = __NR_rt_sigreturn;
2014 + regs->gr[31] = regs->iaoq[1] + 16;
2015 + regs->sr[0] = regs->iasq[1];
2016 + regs->iaoq[0] = 0x100UL;
2017 + regs->iaoq[1] = regs->iaoq[0] + 4;
2018 + regs->iasq[0] = regs->sr[2];
2019 + regs->iasq[1] = regs->sr[2];
2020 + return 2;
2021 + }
2022 + } while (0);
2023 +#endif
2024 +
2025 + return 1;
2026 +}
2027 +
2028 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2029 +{
2030 + unsigned long i;
2031 +
2032 + printk(KERN_ERR "PAX: bytes at PC: ");
2033 + for (i = 0; i < 5; i++) {
2034 + unsigned int c;
2035 + if (get_user(c, (unsigned int *)pc+i))
2036 + printk(KERN_CONT "???????? ");
2037 + else
2038 + printk(KERN_CONT "%08x ", c);
2039 + }
2040 + printk("\n");
2041 +}
2042 +#endif
2043 +
2044 int fixup_exception(struct pt_regs *regs)
2045 {
2046 const struct exception_table_entry *fix;
2047 @@ -192,8 +303,33 @@ good_area:
2048
2049 acc_type = parisc_acctyp(code,regs->iir);
2050
2051 - if ((vma->vm_flags & acc_type) != acc_type)
2052 + if ((vma->vm_flags & acc_type) != acc_type) {
2053 +
2054 +#ifdef CONFIG_PAX_PAGEEXEC
2055 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2056 + (address & ~3UL) == instruction_pointer(regs))
2057 + {
2058 + up_read(&mm->mmap_sem);
2059 + switch (pax_handle_fetch_fault(regs)) {
2060 +
2061 +#ifdef CONFIG_PAX_EMUPLT
2062 + case 3:
2063 + return;
2064 +#endif
2065 +
2066 +#ifdef CONFIG_PAX_EMUTRAMP
2067 + case 2:
2068 + return;
2069 +#endif
2070 +
2071 + }
2072 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2073 + do_group_exit(SIGKILL);
2074 + }
2075 +#endif
2076 +
2077 goto bad_area;
2078 + }
2079
2080 /*
2081 * If for any reason at all we couldn't handle the fault, make
2082 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2083 index 3bf9cca..e7457d0 100644
2084 --- a/arch/powerpc/include/asm/elf.h
2085 +++ b/arch/powerpc/include/asm/elf.h
2086 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2087 the loader. We need to make sure that it is out of the way of the program
2088 that it will "exec", and that there is sufficient room for the brk. */
2089
2090 -extern unsigned long randomize_et_dyn(unsigned long base);
2091 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2092 +#define ELF_ET_DYN_BASE (0x20000000)
2093 +
2094 +#ifdef CONFIG_PAX_ASLR
2095 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2096 +
2097 +#ifdef __powerpc64__
2098 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
2099 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
2100 +#else
2101 +#define PAX_DELTA_MMAP_LEN 15
2102 +#define PAX_DELTA_STACK_LEN 15
2103 +#endif
2104 +#endif
2105
2106 /*
2107 * Our registers are always unsigned longs, whether we're a 32 bit
2108 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2109 (0x7ff >> (PAGE_SHIFT - 12)) : \
2110 (0x3ffff >> (PAGE_SHIFT - 12)))
2111
2112 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2113 -#define arch_randomize_brk arch_randomize_brk
2114 -
2115 #endif /* __KERNEL__ */
2116
2117 /*
2118 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2119 index bca8fdc..61e9580 100644
2120 --- a/arch/powerpc/include/asm/kmap_types.h
2121 +++ b/arch/powerpc/include/asm/kmap_types.h
2122 @@ -27,6 +27,7 @@ enum km_type {
2123 KM_PPC_SYNC_PAGE,
2124 KM_PPC_SYNC_ICACHE,
2125 KM_KDB,
2126 + KM_CLEARPAGE,
2127 KM_TYPE_NR
2128 };
2129
2130 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
2131 index d4a7f64..451de1c 100644
2132 --- a/arch/powerpc/include/asm/mman.h
2133 +++ b/arch/powerpc/include/asm/mman.h
2134 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
2135 }
2136 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
2137
2138 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
2139 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
2140 {
2141 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
2142 }
2143 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2144 index 2cd664e..1d2e8a7 100644
2145 --- a/arch/powerpc/include/asm/page.h
2146 +++ b/arch/powerpc/include/asm/page.h
2147 @@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
2148 * and needs to be executable. This means the whole heap ends
2149 * up being executable.
2150 */
2151 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2152 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2153 +#define VM_DATA_DEFAULT_FLAGS32 \
2154 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2155 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2156
2157 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2158 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2159 @@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
2160 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2161 #endif
2162
2163 +#define ktla_ktva(addr) (addr)
2164 +#define ktva_ktla(addr) (addr)
2165 +
2166 #ifndef __ASSEMBLY__
2167
2168 #undef STRICT_MM_TYPECHECKS
2169 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2170 index 9356262..ea96148 100644
2171 --- a/arch/powerpc/include/asm/page_64.h
2172 +++ b/arch/powerpc/include/asm/page_64.h
2173 @@ -155,15 +155,18 @@ do { \
2174 * stack by default, so in the absence of a PT_GNU_STACK program header
2175 * we turn execute permission off.
2176 */
2177 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2178 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2179 +#define VM_STACK_DEFAULT_FLAGS32 \
2180 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2181 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2182
2183 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2184 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2185
2186 +#ifndef CONFIG_PAX_PAGEEXEC
2187 #define VM_STACK_DEFAULT_FLAGS \
2188 (is_32bit_task() ? \
2189 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2190 +#endif
2191
2192 #include <asm-generic/getorder.h>
2193
2194 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2195 index 88b0bd9..e32bc67 100644
2196 --- a/arch/powerpc/include/asm/pgtable.h
2197 +++ b/arch/powerpc/include/asm/pgtable.h
2198 @@ -2,6 +2,7 @@
2199 #define _ASM_POWERPC_PGTABLE_H
2200 #ifdef __KERNEL__
2201
2202 +#include <linux/const.h>
2203 #ifndef __ASSEMBLY__
2204 #include <asm/processor.h> /* For TASK_SIZE */
2205 #include <asm/mmu.h>
2206 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2207 index 4aad413..85d86bf 100644
2208 --- a/arch/powerpc/include/asm/pte-hash32.h
2209 +++ b/arch/powerpc/include/asm/pte-hash32.h
2210 @@ -21,6 +21,7 @@
2211 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2212 #define _PAGE_USER 0x004 /* usermode access allowed */
2213 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2214 +#define _PAGE_EXEC _PAGE_GUARDED
2215 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2216 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2217 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2218 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2219 index 559da19..7e5835c 100644
2220 --- a/arch/powerpc/include/asm/reg.h
2221 +++ b/arch/powerpc/include/asm/reg.h
2222 @@ -212,6 +212,7 @@
2223 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2224 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2225 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2226 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2227 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2228 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2229 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2230 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2231 index e30a13d..2b7d994 100644
2232 --- a/arch/powerpc/include/asm/system.h
2233 +++ b/arch/powerpc/include/asm/system.h
2234 @@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2235 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2236 #endif
2237
2238 -extern unsigned long arch_align_stack(unsigned long sp);
2239 +#define arch_align_stack(x) ((x) & ~0xfUL)
2240
2241 /* Used in very early kernel initialization. */
2242 extern unsigned long reloc_offset(void);
2243 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2244 index bd0fb84..a42a14b 100644
2245 --- a/arch/powerpc/include/asm/uaccess.h
2246 +++ b/arch/powerpc/include/asm/uaccess.h
2247 @@ -13,6 +13,8 @@
2248 #define VERIFY_READ 0
2249 #define VERIFY_WRITE 1
2250
2251 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2252 +
2253 /*
2254 * The fs value determines whether argument validity checking should be
2255 * performed or not. If get_fs() == USER_DS, checking is performed, with
2256 @@ -327,52 +329,6 @@ do { \
2257 extern unsigned long __copy_tofrom_user(void __user *to,
2258 const void __user *from, unsigned long size);
2259
2260 -#ifndef __powerpc64__
2261 -
2262 -static inline unsigned long copy_from_user(void *to,
2263 - const void __user *from, unsigned long n)
2264 -{
2265 - unsigned long over;
2266 -
2267 - if (access_ok(VERIFY_READ, from, n))
2268 - return __copy_tofrom_user((__force void __user *)to, from, n);
2269 - if ((unsigned long)from < TASK_SIZE) {
2270 - over = (unsigned long)from + n - TASK_SIZE;
2271 - return __copy_tofrom_user((__force void __user *)to, from,
2272 - n - over) + over;
2273 - }
2274 - return n;
2275 -}
2276 -
2277 -static inline unsigned long copy_to_user(void __user *to,
2278 - const void *from, unsigned long n)
2279 -{
2280 - unsigned long over;
2281 -
2282 - if (access_ok(VERIFY_WRITE, to, n))
2283 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2284 - if ((unsigned long)to < TASK_SIZE) {
2285 - over = (unsigned long)to + n - TASK_SIZE;
2286 - return __copy_tofrom_user(to, (__force void __user *)from,
2287 - n - over) + over;
2288 - }
2289 - return n;
2290 -}
2291 -
2292 -#else /* __powerpc64__ */
2293 -
2294 -#define __copy_in_user(to, from, size) \
2295 - __copy_tofrom_user((to), (from), (size))
2296 -
2297 -extern unsigned long copy_from_user(void *to, const void __user *from,
2298 - unsigned long n);
2299 -extern unsigned long copy_to_user(void __user *to, const void *from,
2300 - unsigned long n);
2301 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2302 - unsigned long n);
2303 -
2304 -#endif /* __powerpc64__ */
2305 -
2306 static inline unsigned long __copy_from_user_inatomic(void *to,
2307 const void __user *from, unsigned long n)
2308 {
2309 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2310 if (ret == 0)
2311 return 0;
2312 }
2313 +
2314 + if (!__builtin_constant_p(n))
2315 + check_object_size(to, n, false);
2316 +
2317 return __copy_tofrom_user((__force void __user *)to, from, n);
2318 }
2319
2320 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2321 if (ret == 0)
2322 return 0;
2323 }
2324 +
2325 + if (!__builtin_constant_p(n))
2326 + check_object_size(from, n, true);
2327 +
2328 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2329 }
2330
2331 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2332 return __copy_to_user_inatomic(to, from, size);
2333 }
2334
2335 +#ifndef __powerpc64__
2336 +
2337 +static inline unsigned long __must_check copy_from_user(void *to,
2338 + const void __user *from, unsigned long n)
2339 +{
2340 + unsigned long over;
2341 +
2342 + if ((long)n < 0)
2343 + return n;
2344 +
2345 + if (access_ok(VERIFY_READ, from, n)) {
2346 + if (!__builtin_constant_p(n))
2347 + check_object_size(to, n, false);
2348 + return __copy_tofrom_user((__force void __user *)to, from, n);
2349 + }
2350 + if ((unsigned long)from < TASK_SIZE) {
2351 + over = (unsigned long)from + n - TASK_SIZE;
2352 + if (!__builtin_constant_p(n - over))
2353 + check_object_size(to, n - over, false);
2354 + return __copy_tofrom_user((__force void __user *)to, from,
2355 + n - over) + over;
2356 + }
2357 + return n;
2358 +}
2359 +
2360 +static inline unsigned long __must_check copy_to_user(void __user *to,
2361 + const void *from, unsigned long n)
2362 +{
2363 + unsigned long over;
2364 +
2365 + if ((long)n < 0)
2366 + return n;
2367 +
2368 + if (access_ok(VERIFY_WRITE, to, n)) {
2369 + if (!__builtin_constant_p(n))
2370 + check_object_size(from, n, true);
2371 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2372 + }
2373 + if ((unsigned long)to < TASK_SIZE) {
2374 + over = (unsigned long)to + n - TASK_SIZE;
2375 + if (!__builtin_constant_p(n))
2376 + check_object_size(from, n - over, true);
2377 + return __copy_tofrom_user(to, (__force void __user *)from,
2378 + n - over) + over;
2379 + }
2380 + return n;
2381 +}
2382 +
2383 +#else /* __powerpc64__ */
2384 +
2385 +#define __copy_in_user(to, from, size) \
2386 + __copy_tofrom_user((to), (from), (size))
2387 +
2388 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2389 +{
2390 + if ((long)n < 0 || n > INT_MAX)
2391 + return n;
2392 +
2393 + if (!__builtin_constant_p(n))
2394 + check_object_size(to, n, false);
2395 +
2396 + if (likely(access_ok(VERIFY_READ, from, n)))
2397 + n = __copy_from_user(to, from, n);
2398 + else
2399 + memset(to, 0, n);
2400 + return n;
2401 +}
2402 +
2403 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2404 +{
2405 + if ((long)n < 0 || n > INT_MAX)
2406 + return n;
2407 +
2408 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2409 + if (!__builtin_constant_p(n))
2410 + check_object_size(from, n, true);
2411 + n = __copy_to_user(to, from, n);
2412 + }
2413 + return n;
2414 +}
2415 +
2416 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2417 + unsigned long n);
2418 +
2419 +#endif /* __powerpc64__ */
2420 +
2421 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2422
2423 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2424 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
2425 index 429983c..7af363b 100644
2426 --- a/arch/powerpc/kernel/exceptions-64e.S
2427 +++ b/arch/powerpc/kernel/exceptions-64e.S
2428 @@ -587,6 +587,7 @@ storage_fault_common:
2429 std r14,_DAR(r1)
2430 std r15,_DSISR(r1)
2431 addi r3,r1,STACK_FRAME_OVERHEAD
2432 + bl .save_nvgprs
2433 mr r4,r14
2434 mr r5,r15
2435 ld r14,PACA_EXGEN+EX_R14(r13)
2436 @@ -596,8 +597,7 @@ storage_fault_common:
2437 cmpdi r3,0
2438 bne- 1f
2439 b .ret_from_except_lite
2440 -1: bl .save_nvgprs
2441 - mr r5,r3
2442 +1: mr r5,r3
2443 addi r3,r1,STACK_FRAME_OVERHEAD
2444 ld r4,_DAR(r1)
2445 bl .bad_page_fault
2446 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
2447 index 41b02c7..05e76fb 100644
2448 --- a/arch/powerpc/kernel/exceptions-64s.S
2449 +++ b/arch/powerpc/kernel/exceptions-64s.S
2450 @@ -1014,10 +1014,10 @@ handle_page_fault:
2451 11: ld r4,_DAR(r1)
2452 ld r5,_DSISR(r1)
2453 addi r3,r1,STACK_FRAME_OVERHEAD
2454 + bl .save_nvgprs
2455 bl .do_page_fault
2456 cmpdi r3,0
2457 beq+ 13f
2458 - bl .save_nvgprs
2459 mr r5,r3
2460 addi r3,r1,STACK_FRAME_OVERHEAD
2461 lwz r4,_DAR(r1)
2462 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
2463 index 0b6d796..d760ddb 100644
2464 --- a/arch/powerpc/kernel/module_32.c
2465 +++ b/arch/powerpc/kernel/module_32.c
2466 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
2467 me->arch.core_plt_section = i;
2468 }
2469 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2470 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2471 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2472 return -ENOEXEC;
2473 }
2474
2475 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
2476
2477 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2478 /* Init, or core PLT? */
2479 - if (location >= mod->module_core
2480 - && location < mod->module_core + mod->core_size)
2481 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2482 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2483 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2484 - else
2485 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2486 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2487 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2488 + else {
2489 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2490 + return ~0UL;
2491 + }
2492
2493 /* Find this entry, or if that fails, the next avail. entry */
2494 while (entry->jump[0]) {
2495 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
2496 index 8f53954..a704ad6 100644
2497 --- a/arch/powerpc/kernel/process.c
2498 +++ b/arch/powerpc/kernel/process.c
2499 @@ -682,8 +682,8 @@ void show_regs(struct pt_regs * regs)
2500 * Lookup NIP late so we have the best change of getting the
2501 * above info out without failing
2502 */
2503 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2504 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2505 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2506 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2507 #endif
2508 show_stack(current, (unsigned long *) regs->gpr[1]);
2509 if (!user_mode(regs))
2510 @@ -1187,10 +1187,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2511 newsp = stack[0];
2512 ip = stack[STACK_FRAME_LR_SAVE];
2513 if (!firstframe || ip != lr) {
2514 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2515 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2516 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2517 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2518 - printk(" (%pS)",
2519 + printk(" (%pA)",
2520 (void *)current->ret_stack[curr_frame].ret);
2521 curr_frame--;
2522 }
2523 @@ -1210,7 +1210,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2524 struct pt_regs *regs = (struct pt_regs *)
2525 (sp + STACK_FRAME_OVERHEAD);
2526 lr = regs->link;
2527 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2528 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2529 regs->trap, (void *)regs->nip, (void *)lr);
2530 firstframe = 1;
2531 }
2532 @@ -1285,58 +1285,3 @@ void thread_info_cache_init(void)
2533 }
2534
2535 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2536 -
2537 -unsigned long arch_align_stack(unsigned long sp)
2538 -{
2539 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2540 - sp -= get_random_int() & ~PAGE_MASK;
2541 - return sp & ~0xf;
2542 -}
2543 -
2544 -static inline unsigned long brk_rnd(void)
2545 -{
2546 - unsigned long rnd = 0;
2547 -
2548 - /* 8MB for 32bit, 1GB for 64bit */
2549 - if (is_32bit_task())
2550 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2551 - else
2552 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2553 -
2554 - return rnd << PAGE_SHIFT;
2555 -}
2556 -
2557 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2558 -{
2559 - unsigned long base = mm->brk;
2560 - unsigned long ret;
2561 -
2562 -#ifdef CONFIG_PPC_STD_MMU_64
2563 - /*
2564 - * If we are using 1TB segments and we are allowed to randomise
2565 - * the heap, we can put it above 1TB so it is backed by a 1TB
2566 - * segment. Otherwise the heap will be in the bottom 1TB
2567 - * which always uses 256MB segments and this may result in a
2568 - * performance penalty.
2569 - */
2570 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2571 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2572 -#endif
2573 -
2574 - ret = PAGE_ALIGN(base + brk_rnd());
2575 -
2576 - if (ret < mm->brk)
2577 - return mm->brk;
2578 -
2579 - return ret;
2580 -}
2581 -
2582 -unsigned long randomize_et_dyn(unsigned long base)
2583 -{
2584 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2585 -
2586 - if (ret < base)
2587 - return base;
2588 -
2589 - return ret;
2590 -}
2591 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
2592 index 78b76dc..7f232ef 100644
2593 --- a/arch/powerpc/kernel/signal_32.c
2594 +++ b/arch/powerpc/kernel/signal_32.c
2595 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
2596 /* Save user registers on the stack */
2597 frame = &rt_sf->uc.uc_mcontext;
2598 addr = frame;
2599 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2600 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2601 if (save_user_regs(regs, frame, 0, 1))
2602 goto badframe;
2603 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2604 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
2605 index e91c736..742ec06 100644
2606 --- a/arch/powerpc/kernel/signal_64.c
2607 +++ b/arch/powerpc/kernel/signal_64.c
2608 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
2609 current->thread.fpscr.val = 0;
2610
2611 /* Set up to return from userspace. */
2612 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2613 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2614 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2615 } else {
2616 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2617 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
2618 index f19d977..8ac286e 100644
2619 --- a/arch/powerpc/kernel/traps.c
2620 +++ b/arch/powerpc/kernel/traps.c
2621 @@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2622 static inline void pmac_backlight_unblank(void) { }
2623 #endif
2624
2625 +extern void gr_handle_kernel_exploit(void);
2626 +
2627 int die(const char *str, struct pt_regs *regs, long err)
2628 {
2629 static struct {
2630 @@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
2631 if (panic_on_oops)
2632 panic("Fatal exception");
2633
2634 + gr_handle_kernel_exploit();
2635 +
2636 oops_exit();
2637 do_exit(err);
2638
2639 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
2640 index 142ab10..236e61a 100644
2641 --- a/arch/powerpc/kernel/vdso.c
2642 +++ b/arch/powerpc/kernel/vdso.c
2643 @@ -36,6 +36,7 @@
2644 #include <asm/firmware.h>
2645 #include <asm/vdso.h>
2646 #include <asm/vdso_datapage.h>
2647 +#include <asm/mman.h>
2648
2649 #include "setup.h"
2650
2651 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2652 vdso_base = VDSO32_MBASE;
2653 #endif
2654
2655 - current->mm->context.vdso_base = 0;
2656 + current->mm->context.vdso_base = ~0UL;
2657
2658 /* vDSO has a problem and was disabled, just don't "enable" it for the
2659 * process
2660 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2661 vdso_base = get_unmapped_area(NULL, vdso_base,
2662 (vdso_pages << PAGE_SHIFT) +
2663 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2664 - 0, 0);
2665 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2666 if (IS_ERR_VALUE(vdso_base)) {
2667 rc = vdso_base;
2668 goto fail_mmapsem;
2669 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
2670 index 5eea6f3..5d10396 100644
2671 --- a/arch/powerpc/lib/usercopy_64.c
2672 +++ b/arch/powerpc/lib/usercopy_64.c
2673 @@ -9,22 +9,6 @@
2674 #include <linux/module.h>
2675 #include <asm/uaccess.h>
2676
2677 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2678 -{
2679 - if (likely(access_ok(VERIFY_READ, from, n)))
2680 - n = __copy_from_user(to, from, n);
2681 - else
2682 - memset(to, 0, n);
2683 - return n;
2684 -}
2685 -
2686 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2687 -{
2688 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2689 - n = __copy_to_user(to, from, n);
2690 - return n;
2691 -}
2692 -
2693 unsigned long copy_in_user(void __user *to, const void __user *from,
2694 unsigned long n)
2695 {
2696 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
2697 return n;
2698 }
2699
2700 -EXPORT_SYMBOL(copy_from_user);
2701 -EXPORT_SYMBOL(copy_to_user);
2702 EXPORT_SYMBOL(copy_in_user);
2703
2704 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
2705 index 5efe8c9..db9ceef 100644
2706 --- a/arch/powerpc/mm/fault.c
2707 +++ b/arch/powerpc/mm/fault.c
2708 @@ -32,6 +32,10 @@
2709 #include <linux/perf_event.h>
2710 #include <linux/magic.h>
2711 #include <linux/ratelimit.h>
2712 +#include <linux/slab.h>
2713 +#include <linux/pagemap.h>
2714 +#include <linux/compiler.h>
2715 +#include <linux/unistd.h>
2716
2717 #include <asm/firmware.h>
2718 #include <asm/page.h>
2719 @@ -43,6 +47,7 @@
2720 #include <asm/tlbflush.h>
2721 #include <asm/siginfo.h>
2722 #include <mm/mmu_decl.h>
2723 +#include <asm/ptrace.h>
2724
2725 #ifdef CONFIG_KPROBES
2726 static inline int notify_page_fault(struct pt_regs *regs)
2727 @@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
2728 }
2729 #endif
2730
2731 +#ifdef CONFIG_PAX_PAGEEXEC
2732 +/*
2733 + * PaX: decide what to do with offenders (regs->nip = fault address)
2734 + *
2735 + * returns 1 when task should be killed
2736 + */
2737 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2738 +{
2739 + return 1;
2740 +}
2741 +
2742 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2743 +{
2744 + unsigned long i;
2745 +
2746 + printk(KERN_ERR "PAX: bytes at PC: ");
2747 + for (i = 0; i < 5; i++) {
2748 + unsigned int c;
2749 + if (get_user(c, (unsigned int __user *)pc+i))
2750 + printk(KERN_CONT "???????? ");
2751 + else
2752 + printk(KERN_CONT "%08x ", c);
2753 + }
2754 + printk("\n");
2755 +}
2756 +#endif
2757 +
2758 /*
2759 * Check whether the instruction at regs->nip is a store using
2760 * an update addressing form which will update r1.
2761 @@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
2762 * indicate errors in DSISR but can validly be set in SRR1.
2763 */
2764 if (trap == 0x400)
2765 - error_code &= 0x48200000;
2766 + error_code &= 0x58200000;
2767 else
2768 is_write = error_code & DSISR_ISSTORE;
2769 #else
2770 @@ -259,7 +291,7 @@ good_area:
2771 * "undefined". Of those that can be set, this is the only
2772 * one which seems bad.
2773 */
2774 - if (error_code & 0x10000000)
2775 + if (error_code & DSISR_GUARDED)
2776 /* Guarded storage error. */
2777 goto bad_area;
2778 #endif /* CONFIG_8xx */
2779 @@ -274,7 +306,7 @@ good_area:
2780 * processors use the same I/D cache coherency mechanism
2781 * as embedded.
2782 */
2783 - if (error_code & DSISR_PROTFAULT)
2784 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2785 goto bad_area;
2786 #endif /* CONFIG_PPC_STD_MMU */
2787
2788 @@ -343,6 +375,23 @@ bad_area:
2789 bad_area_nosemaphore:
2790 /* User mode accesses cause a SIGSEGV */
2791 if (user_mode(regs)) {
2792 +
2793 +#ifdef CONFIG_PAX_PAGEEXEC
2794 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2795 +#ifdef CONFIG_PPC_STD_MMU
2796 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2797 +#else
2798 + if (is_exec && regs->nip == address) {
2799 +#endif
2800 + switch (pax_handle_fetch_fault(regs)) {
2801 + }
2802 +
2803 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2804 + do_group_exit(SIGKILL);
2805 + }
2806 + }
2807 +#endif
2808 +
2809 _exception(SIGSEGV, regs, code, address);
2810 return 0;
2811 }
2812 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
2813 index 5a783d8..c23e14b 100644
2814 --- a/arch/powerpc/mm/mmap_64.c
2815 +++ b/arch/powerpc/mm/mmap_64.c
2816 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2817 */
2818 if (mmap_is_legacy()) {
2819 mm->mmap_base = TASK_UNMAPPED_BASE;
2820 +
2821 +#ifdef CONFIG_PAX_RANDMMAP
2822 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2823 + mm->mmap_base += mm->delta_mmap;
2824 +#endif
2825 +
2826 mm->get_unmapped_area = arch_get_unmapped_area;
2827 mm->unmap_area = arch_unmap_area;
2828 } else {
2829 mm->mmap_base = mmap_base();
2830 +
2831 +#ifdef CONFIG_PAX_RANDMMAP
2832 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2833 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2834 +#endif
2835 +
2836 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2837 mm->unmap_area = arch_unmap_area_topdown;
2838 }
2839 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
2840 index ba51948..23009d9 100644
2841 --- a/arch/powerpc/mm/slice.c
2842 +++ b/arch/powerpc/mm/slice.c
2843 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
2844 if ((mm->task_size - len) < addr)
2845 return 0;
2846 vma = find_vma(mm, addr);
2847 - return (!vma || (addr + len) <= vma->vm_start);
2848 + return check_heap_stack_gap(vma, addr, len);
2849 }
2850
2851 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2852 @@ -256,7 +256,7 @@ full_search:
2853 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2854 continue;
2855 }
2856 - if (!vma || addr + len <= vma->vm_start) {
2857 + if (check_heap_stack_gap(vma, addr, len)) {
2858 /*
2859 * Remember the place where we stopped the search:
2860 */
2861 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2862 }
2863 }
2864
2865 - addr = mm->mmap_base;
2866 - while (addr > len) {
2867 + if (mm->mmap_base < len)
2868 + addr = -ENOMEM;
2869 + else
2870 + addr = mm->mmap_base - len;
2871 +
2872 + while (!IS_ERR_VALUE(addr)) {
2873 /* Go down by chunk size */
2874 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2875 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2876
2877 /* Check for hit with different page size */
2878 mask = slice_range_to_mask(addr, len);
2879 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2880 * return with success:
2881 */
2882 vma = find_vma(mm, addr);
2883 - if (!vma || (addr + len) <= vma->vm_start) {
2884 + if (check_heap_stack_gap(vma, addr, len)) {
2885 /* remember the address as a hint for next time */
2886 if (use_cache)
2887 mm->free_area_cache = addr;
2888 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2889 mm->cached_hole_size = vma->vm_start - addr;
2890
2891 /* try just below the current vma->vm_start */
2892 - addr = vma->vm_start;
2893 + addr = skip_heap_stack_gap(vma, len);
2894 }
2895
2896 /*
2897 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
2898 if (fixed && addr > (mm->task_size - len))
2899 return -EINVAL;
2900
2901 +#ifdef CONFIG_PAX_RANDMMAP
2902 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2903 + addr = 0;
2904 +#endif
2905 +
2906 /* If hint, make sure it matches our alignment restrictions */
2907 if (!fixed && addr) {
2908 addr = _ALIGN_UP(addr, 1ul << pshift);
2909 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
2910 index 547f1a6..3fff354 100644
2911 --- a/arch/s390/include/asm/elf.h
2912 +++ b/arch/s390/include/asm/elf.h
2913 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2914 the loader. We need to make sure that it is out of the way of the program
2915 that it will "exec", and that there is sufficient room for the brk. */
2916
2917 -extern unsigned long randomize_et_dyn(unsigned long base);
2918 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2919 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2920 +
2921 +#ifdef CONFIG_PAX_ASLR
2922 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2923 +
2924 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2925 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2926 +#endif
2927
2928 /* This yields a mask that user programs can use to figure out what
2929 instruction set this CPU supports. */
2930 @@ -211,7 +217,4 @@ struct linux_binprm;
2931 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2932 int arch_setup_additional_pages(struct linux_binprm *, int);
2933
2934 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2935 -#define arch_randomize_brk arch_randomize_brk
2936 -
2937 #endif
2938 diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
2939 index 6582f69..b69906f 100644
2940 --- a/arch/s390/include/asm/system.h
2941 +++ b/arch/s390/include/asm/system.h
2942 @@ -256,7 +256,7 @@ extern void (*_machine_restart)(char *command);
2943 extern void (*_machine_halt)(void);
2944 extern void (*_machine_power_off)(void);
2945
2946 -extern unsigned long arch_align_stack(unsigned long sp);
2947 +#define arch_align_stack(x) ((x) & ~0xfUL)
2948
2949 static inline int tprot(unsigned long addr)
2950 {
2951 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
2952 index 2b23885..e136e31 100644
2953 --- a/arch/s390/include/asm/uaccess.h
2954 +++ b/arch/s390/include/asm/uaccess.h
2955 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
2956 copy_to_user(void __user *to, const void *from, unsigned long n)
2957 {
2958 might_fault();
2959 +
2960 + if ((long)n < 0)
2961 + return n;
2962 +
2963 if (access_ok(VERIFY_WRITE, to, n))
2964 n = __copy_to_user(to, from, n);
2965 return n;
2966 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
2967 static inline unsigned long __must_check
2968 __copy_from_user(void *to, const void __user *from, unsigned long n)
2969 {
2970 + if ((long)n < 0)
2971 + return n;
2972 +
2973 if (__builtin_constant_p(n) && (n <= 256))
2974 return uaccess.copy_from_user_small(n, from, to);
2975 else
2976 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
2977 unsigned int sz = __compiletime_object_size(to);
2978
2979 might_fault();
2980 +
2981 + if ((long)n < 0)
2982 + return n;
2983 +
2984 if (unlikely(sz != -1 && sz < n)) {
2985 copy_from_user_overflow();
2986 return n;
2987 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
2988 index dfcb343..eda788a 100644
2989 --- a/arch/s390/kernel/module.c
2990 +++ b/arch/s390/kernel/module.c
2991 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
2992
2993 /* Increase core size by size of got & plt and set start
2994 offsets for got and plt. */
2995 - me->core_size = ALIGN(me->core_size, 4);
2996 - me->arch.got_offset = me->core_size;
2997 - me->core_size += me->arch.got_size;
2998 - me->arch.plt_offset = me->core_size;
2999 - me->core_size += me->arch.plt_size;
3000 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3001 + me->arch.got_offset = me->core_size_rw;
3002 + me->core_size_rw += me->arch.got_size;
3003 + me->arch.plt_offset = me->core_size_rx;
3004 + me->core_size_rx += me->arch.plt_size;
3005 return 0;
3006 }
3007
3008 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3009 if (info->got_initialized == 0) {
3010 Elf_Addr *gotent;
3011
3012 - gotent = me->module_core + me->arch.got_offset +
3013 + gotent = me->module_core_rw + me->arch.got_offset +
3014 info->got_offset;
3015 *gotent = val;
3016 info->got_initialized = 1;
3017 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3018 else if (r_type == R_390_GOTENT ||
3019 r_type == R_390_GOTPLTENT)
3020 *(unsigned int *) loc =
3021 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3022 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3023 else if (r_type == R_390_GOT64 ||
3024 r_type == R_390_GOTPLT64)
3025 *(unsigned long *) loc = val;
3026 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3027 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3028 if (info->plt_initialized == 0) {
3029 unsigned int *ip;
3030 - ip = me->module_core + me->arch.plt_offset +
3031 + ip = me->module_core_rx + me->arch.plt_offset +
3032 info->plt_offset;
3033 #ifndef CONFIG_64BIT
3034 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3035 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3036 val - loc + 0xffffUL < 0x1ffffeUL) ||
3037 (r_type == R_390_PLT32DBL &&
3038 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3039 - val = (Elf_Addr) me->module_core +
3040 + val = (Elf_Addr) me->module_core_rx +
3041 me->arch.plt_offset +
3042 info->plt_offset;
3043 val += rela->r_addend - loc;
3044 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3045 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3046 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3047 val = val + rela->r_addend -
3048 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3049 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3050 if (r_type == R_390_GOTOFF16)
3051 *(unsigned short *) loc = val;
3052 else if (r_type == R_390_GOTOFF32)
3053 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3054 break;
3055 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3056 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3057 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3058 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3059 rela->r_addend - loc;
3060 if (r_type == R_390_GOTPC)
3061 *(unsigned int *) loc = val;
3062 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
3063 index 541a750..8739853 100644
3064 --- a/arch/s390/kernel/process.c
3065 +++ b/arch/s390/kernel/process.c
3066 @@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_struct *p)
3067 }
3068 return 0;
3069 }
3070 -
3071 -unsigned long arch_align_stack(unsigned long sp)
3072 -{
3073 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3074 - sp -= get_random_int() & ~PAGE_MASK;
3075 - return sp & ~0xf;
3076 -}
3077 -
3078 -static inline unsigned long brk_rnd(void)
3079 -{
3080 - /* 8MB for 32bit, 1GB for 64bit */
3081 - if (is_32bit_task())
3082 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
3083 - else
3084 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
3085 -}
3086 -
3087 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3088 -{
3089 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
3090 -
3091 - if (ret < mm->brk)
3092 - return mm->brk;
3093 - return ret;
3094 -}
3095 -
3096 -unsigned long randomize_et_dyn(unsigned long base)
3097 -{
3098 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3099 -
3100 - if (!(current->flags & PF_RANDOMIZE))
3101 - return base;
3102 - if (ret < base)
3103 - return base;
3104 - return ret;
3105 -}
3106 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
3107 index 7b371c3..ad06cf1 100644
3108 --- a/arch/s390/kernel/setup.c
3109 +++ b/arch/s390/kernel/setup.c
3110 @@ -271,7 +271,7 @@ static int __init early_parse_mem(char *p)
3111 }
3112 early_param("mem", early_parse_mem);
3113
3114 -unsigned int user_mode = HOME_SPACE_MODE;
3115 +unsigned int user_mode = SECONDARY_SPACE_MODE;
3116 EXPORT_SYMBOL_GPL(user_mode);
3117
3118 static int set_amode_and_uaccess(unsigned long user_amode,
3119 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
3120 index c9a9f7f..60d0315 100644
3121 --- a/arch/s390/mm/mmap.c
3122 +++ b/arch/s390/mm/mmap.c
3123 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3124 */
3125 if (mmap_is_legacy()) {
3126 mm->mmap_base = TASK_UNMAPPED_BASE;
3127 +
3128 +#ifdef CONFIG_PAX_RANDMMAP
3129 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3130 + mm->mmap_base += mm->delta_mmap;
3131 +#endif
3132 +
3133 mm->get_unmapped_area = arch_get_unmapped_area;
3134 mm->unmap_area = arch_unmap_area;
3135 } else {
3136 mm->mmap_base = mmap_base();
3137 +
3138 +#ifdef CONFIG_PAX_RANDMMAP
3139 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3140 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3141 +#endif
3142 +
3143 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3144 mm->unmap_area = arch_unmap_area_topdown;
3145 }
3146 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3147 */
3148 if (mmap_is_legacy()) {
3149 mm->mmap_base = TASK_UNMAPPED_BASE;
3150 +
3151 +#ifdef CONFIG_PAX_RANDMMAP
3152 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3153 + mm->mmap_base += mm->delta_mmap;
3154 +#endif
3155 +
3156 mm->get_unmapped_area = s390_get_unmapped_area;
3157 mm->unmap_area = arch_unmap_area;
3158 } else {
3159 mm->mmap_base = mmap_base();
3160 +
3161 +#ifdef CONFIG_PAX_RANDMMAP
3162 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3163 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3164 +#endif
3165 +
3166 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3167 mm->unmap_area = arch_unmap_area_topdown;
3168 }
3169 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
3170 index 589d5c7..669e274 100644
3171 --- a/arch/score/include/asm/system.h
3172 +++ b/arch/score/include/asm/system.h
3173 @@ -17,7 +17,7 @@ do { \
3174 #define finish_arch_switch(prev) do {} while (0)
3175
3176 typedef void (*vi_handler_t)(void);
3177 -extern unsigned long arch_align_stack(unsigned long sp);
3178 +#define arch_align_stack(x) (x)
3179
3180 #define mb() barrier()
3181 #define rmb() barrier()
3182 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
3183 index 25d0803..d6c8e36 100644
3184 --- a/arch/score/kernel/process.c
3185 +++ b/arch/score/kernel/process.c
3186 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
3187
3188 return task_pt_regs(task)->cp0_epc;
3189 }
3190 -
3191 -unsigned long arch_align_stack(unsigned long sp)
3192 -{
3193 - return sp;
3194 -}
3195 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
3196 index afeb710..d1d1289 100644
3197 --- a/arch/sh/mm/mmap.c
3198 +++ b/arch/sh/mm/mmap.c
3199 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3200 addr = PAGE_ALIGN(addr);
3201
3202 vma = find_vma(mm, addr);
3203 - if (TASK_SIZE - len >= addr &&
3204 - (!vma || addr + len <= vma->vm_start))
3205 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3206 return addr;
3207 }
3208
3209 @@ -106,7 +105,7 @@ full_search:
3210 }
3211 return -ENOMEM;
3212 }
3213 - if (likely(!vma || addr + len <= vma->vm_start)) {
3214 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3215 /*
3216 * Remember the place where we stopped the search:
3217 */
3218 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3219 addr = PAGE_ALIGN(addr);
3220
3221 vma = find_vma(mm, addr);
3222 - if (TASK_SIZE - len >= addr &&
3223 - (!vma || addr + len <= vma->vm_start))
3224 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3225 return addr;
3226 }
3227
3228 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3229 /* make sure it can fit in the remaining address space */
3230 if (likely(addr > len)) {
3231 vma = find_vma(mm, addr-len);
3232 - if (!vma || addr <= vma->vm_start) {
3233 + if (check_heap_stack_gap(vma, addr - len, len)) {
3234 /* remember the address as a hint for next time */
3235 return (mm->free_area_cache = addr-len);
3236 }
3237 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3238 if (unlikely(mm->mmap_base < len))
3239 goto bottomup;
3240
3241 - addr = mm->mmap_base-len;
3242 - if (do_colour_align)
3243 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3244 + addr = mm->mmap_base - len;
3245
3246 do {
3247 + if (do_colour_align)
3248 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3249 /*
3250 * Lookup failure means no vma is above this address,
3251 * else if new region fits below vma->vm_start,
3252 * return with success:
3253 */
3254 vma = find_vma(mm, addr);
3255 - if (likely(!vma || addr+len <= vma->vm_start)) {
3256 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3257 /* remember the address as a hint for next time */
3258 return (mm->free_area_cache = addr);
3259 }
3260 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3261 mm->cached_hole_size = vma->vm_start - addr;
3262
3263 /* try just below the current vma->vm_start */
3264 - addr = vma->vm_start-len;
3265 - if (do_colour_align)
3266 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3267 - } while (likely(len < vma->vm_start));
3268 + addr = skip_heap_stack_gap(vma, len);
3269 + } while (!IS_ERR_VALUE(addr));
3270
3271 bottomup:
3272 /*
3273 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
3274 index ad1fb5d..fc5315b 100644
3275 --- a/arch/sparc/Makefile
3276 +++ b/arch/sparc/Makefile
3277 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
3278 # Export what is needed by arch/sparc/boot/Makefile
3279 export VMLINUX_INIT VMLINUX_MAIN
3280 VMLINUX_INIT := $(head-y) $(init-y)
3281 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
3282 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
3283 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
3284 VMLINUX_MAIN += $(drivers-y) $(net-y)
3285
3286 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
3287 index 9f421df..b81fc12 100644
3288 --- a/arch/sparc/include/asm/atomic_64.h
3289 +++ b/arch/sparc/include/asm/atomic_64.h
3290 @@ -14,18 +14,40 @@
3291 #define ATOMIC64_INIT(i) { (i) }
3292
3293 #define atomic_read(v) (*(volatile int *)&(v)->counter)
3294 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3295 +{
3296 + return v->counter;
3297 +}
3298 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
3299 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3300 +{
3301 + return v->counter;
3302 +}
3303
3304 #define atomic_set(v, i) (((v)->counter) = i)
3305 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3306 +{
3307 + v->counter = i;
3308 +}
3309 #define atomic64_set(v, i) (((v)->counter) = i)
3310 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3311 +{
3312 + v->counter = i;
3313 +}
3314
3315 extern void atomic_add(int, atomic_t *);
3316 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3317 extern void atomic64_add(long, atomic64_t *);
3318 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3319 extern void atomic_sub(int, atomic_t *);
3320 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3321 extern void atomic64_sub(long, atomic64_t *);
3322 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3323
3324 extern int atomic_add_ret(int, atomic_t *);
3325 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3326 extern long atomic64_add_ret(long, atomic64_t *);
3327 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3328 extern int atomic_sub_ret(int, atomic_t *);
3329 extern long atomic64_sub_ret(long, atomic64_t *);
3330
3331 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3332 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3333
3334 #define atomic_inc_return(v) atomic_add_ret(1, v)
3335 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3336 +{
3337 + return atomic_add_ret_unchecked(1, v);
3338 +}
3339 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3340 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3341 +{
3342 + return atomic64_add_ret_unchecked(1, v);
3343 +}
3344
3345 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3346 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3347
3348 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3349 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3350 +{
3351 + return atomic_add_ret_unchecked(i, v);
3352 +}
3353 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3354 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3355 +{
3356 + return atomic64_add_ret_unchecked(i, v);
3357 +}
3358
3359 /*
3360 * atomic_inc_and_test - increment and test
3361 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3362 * other cases.
3363 */
3364 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3365 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3366 +{
3367 + return atomic_inc_return_unchecked(v) == 0;
3368 +}
3369 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3370
3371 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3372 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3373 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3374
3375 #define atomic_inc(v) atomic_add(1, v)
3376 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3377 +{
3378 + atomic_add_unchecked(1, v);
3379 +}
3380 #define atomic64_inc(v) atomic64_add(1, v)
3381 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3382 +{
3383 + atomic64_add_unchecked(1, v);
3384 +}
3385
3386 #define atomic_dec(v) atomic_sub(1, v)
3387 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3388 +{
3389 + atomic_sub_unchecked(1, v);
3390 +}
3391 #define atomic64_dec(v) atomic64_sub(1, v)
3392 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3393 +{
3394 + atomic64_sub_unchecked(1, v);
3395 +}
3396
3397 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3398 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3399
3400 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3401 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3402 +{
3403 + return cmpxchg(&v->counter, old, new);
3404 +}
3405 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3406 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3407 +{
3408 + return xchg(&v->counter, new);
3409 +}
3410
3411 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3412 {
3413 - int c, old;
3414 + int c, old, new;
3415 c = atomic_read(v);
3416 for (;;) {
3417 - if (unlikely(c == (u)))
3418 + if (unlikely(c == u))
3419 break;
3420 - old = atomic_cmpxchg((v), c, c + (a));
3421 +
3422 + asm volatile("addcc %2, %0, %0\n"
3423 +
3424 +#ifdef CONFIG_PAX_REFCOUNT
3425 + "tvs %%icc, 6\n"
3426 +#endif
3427 +
3428 + : "=r" (new)
3429 + : "0" (c), "ir" (a)
3430 + : "cc");
3431 +
3432 + old = atomic_cmpxchg(v, c, new);
3433 if (likely(old == c))
3434 break;
3435 c = old;
3436 @@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3437 #define atomic64_cmpxchg(v, o, n) \
3438 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3439 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3440 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3441 +{
3442 + return xchg(&v->counter, new);
3443 +}
3444
3445 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3446 {
3447 - long c, old;
3448 + long c, old, new;
3449 c = atomic64_read(v);
3450 for (;;) {
3451 - if (unlikely(c == (u)))
3452 + if (unlikely(c == u))
3453 break;
3454 - old = atomic64_cmpxchg((v), c, c + (a));
3455 +
3456 + asm volatile("addcc %2, %0, %0\n"
3457 +
3458 +#ifdef CONFIG_PAX_REFCOUNT
3459 + "tvs %%xcc, 6\n"
3460 +#endif
3461 +
3462 + : "=r" (new)
3463 + : "0" (c), "ir" (a)
3464 + : "cc");
3465 +
3466 + old = atomic64_cmpxchg(v, c, new);
3467 if (likely(old == c))
3468 break;
3469 c = old;
3470 }
3471 - return c != (u);
3472 + return c != u;
3473 }
3474
3475 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3476 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
3477 index 69358b5..17b4745 100644
3478 --- a/arch/sparc/include/asm/cache.h
3479 +++ b/arch/sparc/include/asm/cache.h
3480 @@ -10,7 +10,7 @@
3481 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3482
3483 #define L1_CACHE_SHIFT 5
3484 -#define L1_CACHE_BYTES 32
3485 +#define L1_CACHE_BYTES 32UL
3486
3487 #ifdef CONFIG_SPARC32
3488 #define SMP_CACHE_BYTES_SHIFT 5
3489 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
3490 index 4269ca6..e3da77f 100644
3491 --- a/arch/sparc/include/asm/elf_32.h
3492 +++ b/arch/sparc/include/asm/elf_32.h
3493 @@ -114,6 +114,13 @@ typedef struct {
3494
3495 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3496
3497 +#ifdef CONFIG_PAX_ASLR
3498 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3499 +
3500 +#define PAX_DELTA_MMAP_LEN 16
3501 +#define PAX_DELTA_STACK_LEN 16
3502 +#endif
3503 +
3504 /* This yields a mask that user programs can use to figure out what
3505 instruction set this cpu supports. This can NOT be done in userspace
3506 on Sparc. */
3507 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
3508 index 7df8b7f..4946269 100644
3509 --- a/arch/sparc/include/asm/elf_64.h
3510 +++ b/arch/sparc/include/asm/elf_64.h
3511 @@ -180,6 +180,13 @@ typedef struct {
3512 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3513 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3514
3515 +#ifdef CONFIG_PAX_ASLR
3516 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3517 +
3518 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3519 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3520 +#endif
3521 +
3522 extern unsigned long sparc64_elf_hwcap;
3523 #define ELF_HWCAP sparc64_elf_hwcap
3524
3525 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
3526 index 5b31a8e..1d92567 100644
3527 --- a/arch/sparc/include/asm/pgtable_32.h
3528 +++ b/arch/sparc/include/asm/pgtable_32.h
3529 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3530 BTFIXUPDEF_INT(page_none)
3531 BTFIXUPDEF_INT(page_copy)
3532 BTFIXUPDEF_INT(page_readonly)
3533 +
3534 +#ifdef CONFIG_PAX_PAGEEXEC
3535 +BTFIXUPDEF_INT(page_shared_noexec)
3536 +BTFIXUPDEF_INT(page_copy_noexec)
3537 +BTFIXUPDEF_INT(page_readonly_noexec)
3538 +#endif
3539 +
3540 BTFIXUPDEF_INT(page_kernel)
3541
3542 #define PMD_SHIFT SUN4C_PMD_SHIFT
3543 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3544 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3545 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3546
3547 +#ifdef CONFIG_PAX_PAGEEXEC
3548 +extern pgprot_t PAGE_SHARED_NOEXEC;
3549 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3550 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3551 +#else
3552 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3553 +# define PAGE_COPY_NOEXEC PAGE_COPY
3554 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3555 +#endif
3556 +
3557 extern unsigned long page_kernel;
3558
3559 #ifdef MODULE
3560 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
3561 index f6ae2b2..b03ffc7 100644
3562 --- a/arch/sparc/include/asm/pgtsrmmu.h
3563 +++ b/arch/sparc/include/asm/pgtsrmmu.h
3564 @@ -115,6 +115,13 @@
3565 SRMMU_EXEC | SRMMU_REF)
3566 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3567 SRMMU_EXEC | SRMMU_REF)
3568 +
3569 +#ifdef CONFIG_PAX_PAGEEXEC
3570 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3571 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3572 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3573 +#endif
3574 +
3575 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3576 SRMMU_DIRTY | SRMMU_REF)
3577
3578 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
3579 index 9689176..63c18ea 100644
3580 --- a/arch/sparc/include/asm/spinlock_64.h
3581 +++ b/arch/sparc/include/asm/spinlock_64.h
3582 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
3583
3584 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3585
3586 -static void inline arch_read_lock(arch_rwlock_t *lock)
3587 +static inline void arch_read_lock(arch_rwlock_t *lock)
3588 {
3589 unsigned long tmp1, tmp2;
3590
3591 __asm__ __volatile__ (
3592 "1: ldsw [%2], %0\n"
3593 " brlz,pn %0, 2f\n"
3594 -"4: add %0, 1, %1\n"
3595 +"4: addcc %0, 1, %1\n"
3596 +
3597 +#ifdef CONFIG_PAX_REFCOUNT
3598 +" tvs %%icc, 6\n"
3599 +#endif
3600 +
3601 " cas [%2], %0, %1\n"
3602 " cmp %0, %1\n"
3603 " bne,pn %%icc, 1b\n"
3604 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
3605 " .previous"
3606 : "=&r" (tmp1), "=&r" (tmp2)
3607 : "r" (lock)
3608 - : "memory");
3609 + : "memory", "cc");
3610 }
3611
3612 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3613 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3614 {
3615 int tmp1, tmp2;
3616
3617 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3618 "1: ldsw [%2], %0\n"
3619 " brlz,a,pn %0, 2f\n"
3620 " mov 0, %0\n"
3621 -" add %0, 1, %1\n"
3622 +" addcc %0, 1, %1\n"
3623 +
3624 +#ifdef CONFIG_PAX_REFCOUNT
3625 +" tvs %%icc, 6\n"
3626 +#endif
3627 +
3628 " cas [%2], %0, %1\n"
3629 " cmp %0, %1\n"
3630 " bne,pn %%icc, 1b\n"
3631 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3632 return tmp1;
3633 }
3634
3635 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3636 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3637 {
3638 unsigned long tmp1, tmp2;
3639
3640 __asm__ __volatile__(
3641 "1: lduw [%2], %0\n"
3642 -" sub %0, 1, %1\n"
3643 +" subcc %0, 1, %1\n"
3644 +
3645 +#ifdef CONFIG_PAX_REFCOUNT
3646 +" tvs %%icc, 6\n"
3647 +#endif
3648 +
3649 " cas [%2], %0, %1\n"
3650 " cmp %0, %1\n"
3651 " bne,pn %%xcc, 1b\n"
3652 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
3653 : "memory");
3654 }
3655
3656 -static void inline arch_write_lock(arch_rwlock_t *lock)
3657 +static inline void arch_write_lock(arch_rwlock_t *lock)
3658 {
3659 unsigned long mask, tmp1, tmp2;
3660
3661 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
3662 : "memory");
3663 }
3664
3665 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3666 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3667 {
3668 __asm__ __volatile__(
3669 " stw %%g0, [%0]"
3670 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
3671 : "memory");
3672 }
3673
3674 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3675 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3676 {
3677 unsigned long mask, tmp1, tmp2, result;
3678
3679 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
3680 index fa57532..e1a4c53 100644
3681 --- a/arch/sparc/include/asm/thread_info_32.h
3682 +++ b/arch/sparc/include/asm/thread_info_32.h
3683 @@ -50,6 +50,8 @@ struct thread_info {
3684 unsigned long w_saved;
3685
3686 struct restart_block restart_block;
3687 +
3688 + unsigned long lowest_stack;
3689 };
3690
3691 /*
3692 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
3693 index 60d86be..952dea1 100644
3694 --- a/arch/sparc/include/asm/thread_info_64.h
3695 +++ b/arch/sparc/include/asm/thread_info_64.h
3696 @@ -63,6 +63,8 @@ struct thread_info {
3697 struct pt_regs *kern_una_regs;
3698 unsigned int kern_una_insn;
3699
3700 + unsigned long lowest_stack;
3701 +
3702 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3703 };
3704
3705 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
3706 index e88fbe5..96b0ce5 100644
3707 --- a/arch/sparc/include/asm/uaccess.h
3708 +++ b/arch/sparc/include/asm/uaccess.h
3709 @@ -1,5 +1,13 @@
3710 #ifndef ___ASM_SPARC_UACCESS_H
3711 #define ___ASM_SPARC_UACCESS_H
3712 +
3713 +#ifdef __KERNEL__
3714 +#ifndef __ASSEMBLY__
3715 +#include <linux/types.h>
3716 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3717 +#endif
3718 +#endif
3719 +
3720 #if defined(__sparc__) && defined(__arch64__)
3721 #include <asm/uaccess_64.h>
3722 #else
3723 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
3724 index 8303ac4..07f333d 100644
3725 --- a/arch/sparc/include/asm/uaccess_32.h
3726 +++ b/arch/sparc/include/asm/uaccess_32.h
3727 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
3728
3729 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3730 {
3731 - if (n && __access_ok((unsigned long) to, n))
3732 + if ((long)n < 0)
3733 + return n;
3734 +
3735 + if (n && __access_ok((unsigned long) to, n)) {
3736 + if (!__builtin_constant_p(n))
3737 + check_object_size(from, n, true);
3738 return __copy_user(to, (__force void __user *) from, n);
3739 - else
3740 + } else
3741 return n;
3742 }
3743
3744 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3745 {
3746 + if ((long)n < 0)
3747 + return n;
3748 +
3749 + if (!__builtin_constant_p(n))
3750 + check_object_size(from, n, true);
3751 +
3752 return __copy_user(to, (__force void __user *) from, n);
3753 }
3754
3755 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3756 {
3757 - if (n && __access_ok((unsigned long) from, n))
3758 + if ((long)n < 0)
3759 + return n;
3760 +
3761 + if (n && __access_ok((unsigned long) from, n)) {
3762 + if (!__builtin_constant_p(n))
3763 + check_object_size(to, n, false);
3764 return __copy_user((__force void __user *) to, from, n);
3765 - else
3766 + } else
3767 return n;
3768 }
3769
3770 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3771 {
3772 + if ((long)n < 0)
3773 + return n;
3774 +
3775 return __copy_user((__force void __user *) to, from, n);
3776 }
3777
3778 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
3779 index 3e1449f..5293a0e 100644
3780 --- a/arch/sparc/include/asm/uaccess_64.h
3781 +++ b/arch/sparc/include/asm/uaccess_64.h
3782 @@ -10,6 +10,7 @@
3783 #include <linux/compiler.h>
3784 #include <linux/string.h>
3785 #include <linux/thread_info.h>
3786 +#include <linux/kernel.h>
3787 #include <asm/asi.h>
3788 #include <asm/system.h>
3789 #include <asm/spitfire.h>
3790 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
3791 static inline unsigned long __must_check
3792 copy_from_user(void *to, const void __user *from, unsigned long size)
3793 {
3794 - unsigned long ret = ___copy_from_user(to, from, size);
3795 + unsigned long ret;
3796
3797 + if ((long)size < 0 || size > INT_MAX)
3798 + return size;
3799 +
3800 + if (!__builtin_constant_p(size))
3801 + check_object_size(to, size, false);
3802 +
3803 + ret = ___copy_from_user(to, from, size);
3804 if (unlikely(ret))
3805 ret = copy_from_user_fixup(to, from, size);
3806
3807 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
3808 static inline unsigned long __must_check
3809 copy_to_user(void __user *to, const void *from, unsigned long size)
3810 {
3811 - unsigned long ret = ___copy_to_user(to, from, size);
3812 + unsigned long ret;
3813 +
3814 + if ((long)size < 0 || size > INT_MAX)
3815 + return size;
3816 +
3817 + if (!__builtin_constant_p(size))
3818 + check_object_size(from, size, true);
3819
3820 + ret = ___copy_to_user(to, from, size);
3821 if (unlikely(ret))
3822 ret = copy_to_user_fixup(to, from, size);
3823 return ret;
3824 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
3825 index cb85458..e063f17 100644
3826 --- a/arch/sparc/kernel/Makefile
3827 +++ b/arch/sparc/kernel/Makefile
3828 @@ -3,7 +3,7 @@
3829 #
3830
3831 asflags-y := -ansi
3832 -ccflags-y := -Werror
3833 +#ccflags-y := -Werror
3834
3835 extra-y := head_$(BITS).o
3836 extra-y += init_task.o
3837 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
3838 index f793742..4d880af 100644
3839 --- a/arch/sparc/kernel/process_32.c
3840 +++ b/arch/sparc/kernel/process_32.c
3841 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3842 rw->ins[4], rw->ins[5],
3843 rw->ins[6],
3844 rw->ins[7]);
3845 - printk("%pS\n", (void *) rw->ins[7]);
3846 + printk("%pA\n", (void *) rw->ins[7]);
3847 rw = (struct reg_window32 *) rw->ins[6];
3848 }
3849 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3850 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3851
3852 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3853 r->psr, r->pc, r->npc, r->y, print_tainted());
3854 - printk("PC: <%pS>\n", (void *) r->pc);
3855 + printk("PC: <%pA>\n", (void *) r->pc);
3856 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3857 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3858 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3859 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3860 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3861 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3862 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3863 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3864
3865 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3866 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3867 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
3868 rw = (struct reg_window32 *) fp;
3869 pc = rw->ins[7];
3870 printk("[%08lx : ", pc);
3871 - printk("%pS ] ", (void *) pc);
3872 + printk("%pA ] ", (void *) pc);
3873 fp = rw->ins[6];
3874 } while (++count < 16);
3875 printk("\n");
3876 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
3877 index d959cd0..7b42812 100644
3878 --- a/arch/sparc/kernel/process_64.c
3879 +++ b/arch/sparc/kernel/process_64.c
3880 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
3881 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3882 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3883 if (regs->tstate & TSTATE_PRIV)
3884 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3885 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3886 }
3887
3888 void show_regs(struct pt_regs *regs)
3889 {
3890 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3891 regs->tpc, regs->tnpc, regs->y, print_tainted());
3892 - printk("TPC: <%pS>\n", (void *) regs->tpc);
3893 + printk("TPC: <%pA>\n", (void *) regs->tpc);
3894 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3895 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3896 regs->u_regs[3]);
3897 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3898 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3899 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3900 regs->u_regs[15]);
3901 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3902 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3903 show_regwindow(regs);
3904 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3905 }
3906 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
3907 ((tp && tp->task) ? tp->task->pid : -1));
3908
3909 if (gp->tstate & TSTATE_PRIV) {
3910 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3911 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3912 (void *) gp->tpc,
3913 (void *) gp->o7,
3914 (void *) gp->i7,
3915 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
3916 index 42b282f..28ce9f2 100644
3917 --- a/arch/sparc/kernel/sys_sparc_32.c
3918 +++ b/arch/sparc/kernel/sys_sparc_32.c
3919 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3920 if (ARCH_SUN4C && len > 0x20000000)
3921 return -ENOMEM;
3922 if (!addr)
3923 - addr = TASK_UNMAPPED_BASE;
3924 + addr = current->mm->mmap_base;
3925
3926 if (flags & MAP_SHARED)
3927 addr = COLOUR_ALIGN(addr);
3928 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3929 }
3930 if (TASK_SIZE - PAGE_SIZE - len < addr)
3931 return -ENOMEM;
3932 - if (!vmm || addr + len <= vmm->vm_start)
3933 + if (check_heap_stack_gap(vmm, addr, len))
3934 return addr;
3935 addr = vmm->vm_end;
3936 if (flags & MAP_SHARED)
3937 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
3938 index 908b47a..aa9e584 100644
3939 --- a/arch/sparc/kernel/sys_sparc_64.c
3940 +++ b/arch/sparc/kernel/sys_sparc_64.c
3941 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3942 /* We do not accept a shared mapping if it would violate
3943 * cache aliasing constraints.
3944 */
3945 - if ((flags & MAP_SHARED) &&
3946 + if ((filp || (flags & MAP_SHARED)) &&
3947 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3948 return -EINVAL;
3949 return addr;
3950 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3951 if (filp || (flags & MAP_SHARED))
3952 do_color_align = 1;
3953
3954 +#ifdef CONFIG_PAX_RANDMMAP
3955 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3956 +#endif
3957 +
3958 if (addr) {
3959 if (do_color_align)
3960 addr = COLOUR_ALIGN(addr, pgoff);
3961 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3962 addr = PAGE_ALIGN(addr);
3963
3964 vma = find_vma(mm, addr);
3965 - if (task_size - len >= addr &&
3966 - (!vma || addr + len <= vma->vm_start))
3967 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3968 return addr;
3969 }
3970
3971 if (len > mm->cached_hole_size) {
3972 - start_addr = addr = mm->free_area_cache;
3973 + start_addr = addr = mm->free_area_cache;
3974 } else {
3975 - start_addr = addr = TASK_UNMAPPED_BASE;
3976 + start_addr = addr = mm->mmap_base;
3977 mm->cached_hole_size = 0;
3978 }
3979
3980 @@ -174,14 +177,14 @@ full_search:
3981 vma = find_vma(mm, VA_EXCLUDE_END);
3982 }
3983 if (unlikely(task_size < addr)) {
3984 - if (start_addr != TASK_UNMAPPED_BASE) {
3985 - start_addr = addr = TASK_UNMAPPED_BASE;
3986 + if (start_addr != mm->mmap_base) {
3987 + start_addr = addr = mm->mmap_base;
3988 mm->cached_hole_size = 0;
3989 goto full_search;
3990 }
3991 return -ENOMEM;
3992 }
3993 - if (likely(!vma || addr + len <= vma->vm_start)) {
3994 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3995 /*
3996 * Remember the place where we stopped the search:
3997 */
3998 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3999 /* We do not accept a shared mapping if it would violate
4000 * cache aliasing constraints.
4001 */
4002 - if ((flags & MAP_SHARED) &&
4003 + if ((filp || (flags & MAP_SHARED)) &&
4004 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4005 return -EINVAL;
4006 return addr;
4007 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4008 addr = PAGE_ALIGN(addr);
4009
4010 vma = find_vma(mm, addr);
4011 - if (task_size - len >= addr &&
4012 - (!vma || addr + len <= vma->vm_start))
4013 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4014 return addr;
4015 }
4016
4017 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4018 /* make sure it can fit in the remaining address space */
4019 if (likely(addr > len)) {
4020 vma = find_vma(mm, addr-len);
4021 - if (!vma || addr <= vma->vm_start) {
4022 + if (check_heap_stack_gap(vma, addr - len, len)) {
4023 /* remember the address as a hint for next time */
4024 return (mm->free_area_cache = addr-len);
4025 }
4026 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4027 if (unlikely(mm->mmap_base < len))
4028 goto bottomup;
4029
4030 - addr = mm->mmap_base-len;
4031 - if (do_color_align)
4032 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4033 + addr = mm->mmap_base - len;
4034
4035 do {
4036 + if (do_color_align)
4037 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4038 /*
4039 * Lookup failure means no vma is above this address,
4040 * else if new region fits below vma->vm_start,
4041 * return with success:
4042 */
4043 vma = find_vma(mm, addr);
4044 - if (likely(!vma || addr+len <= vma->vm_start)) {
4045 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4046 /* remember the address as a hint for next time */
4047 return (mm->free_area_cache = addr);
4048 }
4049 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4050 mm->cached_hole_size = vma->vm_start - addr;
4051
4052 /* try just below the current vma->vm_start */
4053 - addr = vma->vm_start-len;
4054 - if (do_color_align)
4055 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4056 - } while (likely(len < vma->vm_start));
4057 + addr = skip_heap_stack_gap(vma, len);
4058 + } while (!IS_ERR_VALUE(addr));
4059
4060 bottomup:
4061 /*
4062 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4063 gap == RLIM_INFINITY ||
4064 sysctl_legacy_va_layout) {
4065 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4066 +
4067 +#ifdef CONFIG_PAX_RANDMMAP
4068 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4069 + mm->mmap_base += mm->delta_mmap;
4070 +#endif
4071 +
4072 mm->get_unmapped_area = arch_get_unmapped_area;
4073 mm->unmap_area = arch_unmap_area;
4074 } else {
4075 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4076 gap = (task_size / 6 * 5);
4077
4078 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4079 +
4080 +#ifdef CONFIG_PAX_RANDMMAP
4081 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4082 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4083 +#endif
4084 +
4085 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4086 mm->unmap_area = arch_unmap_area_topdown;
4087 }
4088 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
4089 index c0490c7..84959d1 100644
4090 --- a/arch/sparc/kernel/traps_32.c
4091 +++ b/arch/sparc/kernel/traps_32.c
4092 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
4093 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4094 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4095
4096 +extern void gr_handle_kernel_exploit(void);
4097 +
4098 void die_if_kernel(char *str, struct pt_regs *regs)
4099 {
4100 static int die_counter;
4101 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4102 count++ < 30 &&
4103 (((unsigned long) rw) >= PAGE_OFFSET) &&
4104 !(((unsigned long) rw) & 0x7)) {
4105 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4106 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4107 (void *) rw->ins[7]);
4108 rw = (struct reg_window32 *)rw->ins[6];
4109 }
4110 }
4111 printk("Instruction DUMP:");
4112 instruction_dump ((unsigned long *) regs->pc);
4113 - if(regs->psr & PSR_PS)
4114 + if(regs->psr & PSR_PS) {
4115 + gr_handle_kernel_exploit();
4116 do_exit(SIGKILL);
4117 + }
4118 do_exit(SIGSEGV);
4119 }
4120
4121 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
4122 index 0cbdaa4..438e4c9 100644
4123 --- a/arch/sparc/kernel/traps_64.c
4124 +++ b/arch/sparc/kernel/traps_64.c
4125 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
4126 i + 1,
4127 p->trapstack[i].tstate, p->trapstack[i].tpc,
4128 p->trapstack[i].tnpc, p->trapstack[i].tt);
4129 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4130 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4131 }
4132 }
4133
4134 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
4135
4136 lvl -= 0x100;
4137 if (regs->tstate & TSTATE_PRIV) {
4138 +
4139 +#ifdef CONFIG_PAX_REFCOUNT
4140 + if (lvl == 6)
4141 + pax_report_refcount_overflow(regs);
4142 +#endif
4143 +
4144 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4145 die_if_kernel(buffer, regs);
4146 }
4147 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
4148 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4149 {
4150 char buffer[32];
4151 -
4152 +
4153 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4154 0, lvl, SIGTRAP) == NOTIFY_STOP)
4155 return;
4156
4157 +#ifdef CONFIG_PAX_REFCOUNT
4158 + if (lvl == 6)
4159 + pax_report_refcount_overflow(regs);
4160 +#endif
4161 +
4162 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4163
4164 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4165 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
4166 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4167 printk("%s" "ERROR(%d): ",
4168 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4169 - printk("TPC<%pS>\n", (void *) regs->tpc);
4170 + printk("TPC<%pA>\n", (void *) regs->tpc);
4171 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4172 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4173 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4174 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4175 smp_processor_id(),
4176 (type & 0x1) ? 'I' : 'D',
4177 regs->tpc);
4178 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4179 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4180 panic("Irrecoverable Cheetah+ parity error.");
4181 }
4182
4183 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4184 smp_processor_id(),
4185 (type & 0x1) ? 'I' : 'D',
4186 regs->tpc);
4187 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4188 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4189 }
4190
4191 struct sun4v_error_entry {
4192 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
4193
4194 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4195 regs->tpc, tl);
4196 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4197 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4198 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4199 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4200 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4201 (void *) regs->u_regs[UREG_I7]);
4202 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4203 "pte[%lx] error[%lx]\n",
4204 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
4205
4206 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4207 regs->tpc, tl);
4208 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4209 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4210 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4211 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4212 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4213 (void *) regs->u_regs[UREG_I7]);
4214 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4215 "pte[%lx] error[%lx]\n",
4216 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4217 fp = (unsigned long)sf->fp + STACK_BIAS;
4218 }
4219
4220 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4221 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4222 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4223 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
4224 int index = tsk->curr_ret_stack;
4225 if (tsk->ret_stack && index >= graph) {
4226 pc = tsk->ret_stack[index - graph].ret;
4227 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4228 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4229 graph++;
4230 }
4231 }
4232 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
4233 return (struct reg_window *) (fp + STACK_BIAS);
4234 }
4235
4236 +extern void gr_handle_kernel_exploit(void);
4237 +
4238 void die_if_kernel(char *str, struct pt_regs *regs)
4239 {
4240 static int die_counter;
4241 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4242 while (rw &&
4243 count++ < 30 &&
4244 kstack_valid(tp, (unsigned long) rw)) {
4245 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4246 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4247 (void *) rw->ins[7]);
4248
4249 rw = kernel_stack_up(rw);
4250 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4251 }
4252 user_instruction_dump ((unsigned int __user *) regs->tpc);
4253 }
4254 - if (regs->tstate & TSTATE_PRIV)
4255 + if (regs->tstate & TSTATE_PRIV) {
4256 + gr_handle_kernel_exploit();
4257 do_exit(SIGKILL);
4258 + }
4259 do_exit(SIGSEGV);
4260 }
4261 EXPORT_SYMBOL(die_if_kernel);
4262 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
4263 index 76e4ac1..78f8bb1 100644
4264 --- a/arch/sparc/kernel/unaligned_64.c
4265 +++ b/arch/sparc/kernel/unaligned_64.c
4266 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
4267 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
4268
4269 if (__ratelimit(&ratelimit)) {
4270 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4271 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4272 regs->tpc, (void *) regs->tpc);
4273 }
4274 }
4275 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
4276 index a3fc437..fea9957 100644
4277 --- a/arch/sparc/lib/Makefile
4278 +++ b/arch/sparc/lib/Makefile
4279 @@ -2,7 +2,7 @@
4280 #
4281
4282 asflags-y := -ansi -DST_DIV0=0x02
4283 -ccflags-y := -Werror
4284 +#ccflags-y := -Werror
4285
4286 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4287 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4288 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
4289 index 59186e0..f747d7a 100644
4290 --- a/arch/sparc/lib/atomic_64.S
4291 +++ b/arch/sparc/lib/atomic_64.S
4292 @@ -18,7 +18,12 @@
4293 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4294 BACKOFF_SETUP(%o2)
4295 1: lduw [%o1], %g1
4296 - add %g1, %o0, %g7
4297 + addcc %g1, %o0, %g7
4298 +
4299 +#ifdef CONFIG_PAX_REFCOUNT
4300 + tvs %icc, 6
4301 +#endif
4302 +
4303 cas [%o1], %g1, %g7
4304 cmp %g1, %g7
4305 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4306 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4307 2: BACKOFF_SPIN(%o2, %o3, 1b)
4308 .size atomic_add, .-atomic_add
4309
4310 + .globl atomic_add_unchecked
4311 + .type atomic_add_unchecked,#function
4312 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4313 + BACKOFF_SETUP(%o2)
4314 +1: lduw [%o1], %g1
4315 + add %g1, %o0, %g7
4316 + cas [%o1], %g1, %g7
4317 + cmp %g1, %g7
4318 + bne,pn %icc, 2f
4319 + nop
4320 + retl
4321 + nop
4322 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4323 + .size atomic_add_unchecked, .-atomic_add_unchecked
4324 +
4325 .globl atomic_sub
4326 .type atomic_sub,#function
4327 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4328 BACKOFF_SETUP(%o2)
4329 1: lduw [%o1], %g1
4330 - sub %g1, %o0, %g7
4331 + subcc %g1, %o0, %g7
4332 +
4333 +#ifdef CONFIG_PAX_REFCOUNT
4334 + tvs %icc, 6
4335 +#endif
4336 +
4337 cas [%o1], %g1, %g7
4338 cmp %g1, %g7
4339 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4340 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4341 2: BACKOFF_SPIN(%o2, %o3, 1b)
4342 .size atomic_sub, .-atomic_sub
4343
4344 + .globl atomic_sub_unchecked
4345 + .type atomic_sub_unchecked,#function
4346 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4347 + BACKOFF_SETUP(%o2)
4348 +1: lduw [%o1], %g1
4349 + sub %g1, %o0, %g7
4350 + cas [%o1], %g1, %g7
4351 + cmp %g1, %g7
4352 + bne,pn %icc, 2f
4353 + nop
4354 + retl
4355 + nop
4356 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4357 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4358 +
4359 .globl atomic_add_ret
4360 .type atomic_add_ret,#function
4361 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4362 BACKOFF_SETUP(%o2)
4363 1: lduw [%o1], %g1
4364 - add %g1, %o0, %g7
4365 + addcc %g1, %o0, %g7
4366 +
4367 +#ifdef CONFIG_PAX_REFCOUNT
4368 + tvs %icc, 6
4369 +#endif
4370 +
4371 cas [%o1], %g1, %g7
4372 cmp %g1, %g7
4373 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4374 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4375 2: BACKOFF_SPIN(%o2, %o3, 1b)
4376 .size atomic_add_ret, .-atomic_add_ret
4377
4378 + .globl atomic_add_ret_unchecked
4379 + .type atomic_add_ret_unchecked,#function
4380 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4381 + BACKOFF_SETUP(%o2)
4382 +1: lduw [%o1], %g1
4383 + addcc %g1, %o0, %g7
4384 + cas [%o1], %g1, %g7
4385 + cmp %g1, %g7
4386 + bne,pn %icc, 2f
4387 + add %g7, %o0, %g7
4388 + sra %g7, 0, %o0
4389 + retl
4390 + nop
4391 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4392 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4393 +
4394 .globl atomic_sub_ret
4395 .type atomic_sub_ret,#function
4396 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4397 BACKOFF_SETUP(%o2)
4398 1: lduw [%o1], %g1
4399 - sub %g1, %o0, %g7
4400 + subcc %g1, %o0, %g7
4401 +
4402 +#ifdef CONFIG_PAX_REFCOUNT
4403 + tvs %icc, 6
4404 +#endif
4405 +
4406 cas [%o1], %g1, %g7
4407 cmp %g1, %g7
4408 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4409 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4410 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4411 BACKOFF_SETUP(%o2)
4412 1: ldx [%o1], %g1
4413 - add %g1, %o0, %g7
4414 + addcc %g1, %o0, %g7
4415 +
4416 +#ifdef CONFIG_PAX_REFCOUNT
4417 + tvs %xcc, 6
4418 +#endif
4419 +
4420 casx [%o1], %g1, %g7
4421 cmp %g1, %g7
4422 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4423 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4424 2: BACKOFF_SPIN(%o2, %o3, 1b)
4425 .size atomic64_add, .-atomic64_add
4426
4427 + .globl atomic64_add_unchecked
4428 + .type atomic64_add_unchecked,#function
4429 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4430 + BACKOFF_SETUP(%o2)
4431 +1: ldx [%o1], %g1
4432 + addcc %g1, %o0, %g7
4433 + casx [%o1], %g1, %g7
4434 + cmp %g1, %g7
4435 + bne,pn %xcc, 2f
4436 + nop
4437 + retl
4438 + nop
4439 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4440 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4441 +
4442 .globl atomic64_sub
4443 .type atomic64_sub,#function
4444 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4445 BACKOFF_SETUP(%o2)
4446 1: ldx [%o1], %g1
4447 - sub %g1, %o0, %g7
4448 + subcc %g1, %o0, %g7
4449 +
4450 +#ifdef CONFIG_PAX_REFCOUNT
4451 + tvs %xcc, 6
4452 +#endif
4453 +
4454 casx [%o1], %g1, %g7
4455 cmp %g1, %g7
4456 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4457 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4458 2: BACKOFF_SPIN(%o2, %o3, 1b)
4459 .size atomic64_sub, .-atomic64_sub
4460
4461 + .globl atomic64_sub_unchecked
4462 + .type atomic64_sub_unchecked,#function
4463 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4464 + BACKOFF_SETUP(%o2)
4465 +1: ldx [%o1], %g1
4466 + subcc %g1, %o0, %g7
4467 + casx [%o1], %g1, %g7
4468 + cmp %g1, %g7
4469 + bne,pn %xcc, 2f
4470 + nop
4471 + retl
4472 + nop
4473 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4474 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4475 +
4476 .globl atomic64_add_ret
4477 .type atomic64_add_ret,#function
4478 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4479 BACKOFF_SETUP(%o2)
4480 1: ldx [%o1], %g1
4481 - add %g1, %o0, %g7
4482 + addcc %g1, %o0, %g7
4483 +
4484 +#ifdef CONFIG_PAX_REFCOUNT
4485 + tvs %xcc, 6
4486 +#endif
4487 +
4488 casx [%o1], %g1, %g7
4489 cmp %g1, %g7
4490 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4491 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4492 2: BACKOFF_SPIN(%o2, %o3, 1b)
4493 .size atomic64_add_ret, .-atomic64_add_ret
4494
4495 + .globl atomic64_add_ret_unchecked
4496 + .type atomic64_add_ret_unchecked,#function
4497 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4498 + BACKOFF_SETUP(%o2)
4499 +1: ldx [%o1], %g1
4500 + addcc %g1, %o0, %g7
4501 + casx [%o1], %g1, %g7
4502 + cmp %g1, %g7
4503 + bne,pn %xcc, 2f
4504 + add %g7, %o0, %g7
4505 + mov %g7, %o0
4506 + retl
4507 + nop
4508 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4509 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4510 +
4511 .globl atomic64_sub_ret
4512 .type atomic64_sub_ret,#function
4513 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4514 BACKOFF_SETUP(%o2)
4515 1: ldx [%o1], %g1
4516 - sub %g1, %o0, %g7
4517 + subcc %g1, %o0, %g7
4518 +
4519 +#ifdef CONFIG_PAX_REFCOUNT
4520 + tvs %xcc, 6
4521 +#endif
4522 +
4523 casx [%o1], %g1, %g7
4524 cmp %g1, %g7
4525 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4526 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
4527 index 1b30bb3..b4a16c7 100644
4528 --- a/arch/sparc/lib/ksyms.c
4529 +++ b/arch/sparc/lib/ksyms.c
4530 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4531
4532 /* Atomic counter implementation. */
4533 EXPORT_SYMBOL(atomic_add);
4534 +EXPORT_SYMBOL(atomic_add_unchecked);
4535 EXPORT_SYMBOL(atomic_add_ret);
4536 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
4537 EXPORT_SYMBOL(atomic_sub);
4538 +EXPORT_SYMBOL(atomic_sub_unchecked);
4539 EXPORT_SYMBOL(atomic_sub_ret);
4540 EXPORT_SYMBOL(atomic64_add);
4541 +EXPORT_SYMBOL(atomic64_add_unchecked);
4542 EXPORT_SYMBOL(atomic64_add_ret);
4543 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4544 EXPORT_SYMBOL(atomic64_sub);
4545 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4546 EXPORT_SYMBOL(atomic64_sub_ret);
4547
4548 /* Atomic bit operations. */
4549 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
4550 index e3cda21..a68e4cb 100644
4551 --- a/arch/sparc/mm/Makefile
4552 +++ b/arch/sparc/mm/Makefile
4553 @@ -2,7 +2,7 @@
4554 #
4555
4556 asflags-y := -ansi
4557 -ccflags-y := -Werror
4558 +#ccflags-y := -Werror
4559
4560 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4561 obj-y += fault_$(BITS).o
4562 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
4563 index aa1c1b1..f93e28f 100644
4564 --- a/arch/sparc/mm/fault_32.c
4565 +++ b/arch/sparc/mm/fault_32.c
4566 @@ -22,6 +22,9 @@
4567 #include <linux/interrupt.h>
4568 #include <linux/module.h>
4569 #include <linux/kdebug.h>
4570 +#include <linux/slab.h>
4571 +#include <linux/pagemap.h>
4572 +#include <linux/compiler.h>
4573
4574 #include <asm/system.h>
4575 #include <asm/page.h>
4576 @@ -209,6 +212,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
4577 return safe_compute_effective_address(regs, insn);
4578 }
4579
4580 +#ifdef CONFIG_PAX_PAGEEXEC
4581 +#ifdef CONFIG_PAX_DLRESOLVE
4582 +static void pax_emuplt_close(struct vm_area_struct *vma)
4583 +{
4584 + vma->vm_mm->call_dl_resolve = 0UL;
4585 +}
4586 +
4587 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4588 +{
4589 + unsigned int *kaddr;
4590 +
4591 + vmf->page = alloc_page(GFP_HIGHUSER);
4592 + if (!vmf->page)
4593 + return VM_FAULT_OOM;
4594 +
4595 + kaddr = kmap(vmf->page);
4596 + memset(kaddr, 0, PAGE_SIZE);
4597 + kaddr[0] = 0x9DE3BFA8U; /* save */
4598 + flush_dcache_page(vmf->page);
4599 + kunmap(vmf->page);
4600 + return VM_FAULT_MAJOR;
4601 +}
4602 +
4603 +static const struct vm_operations_struct pax_vm_ops = {
4604 + .close = pax_emuplt_close,
4605 + .fault = pax_emuplt_fault
4606 +};
4607 +
4608 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4609 +{
4610 + int ret;
4611 +
4612 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4613 + vma->vm_mm = current->mm;
4614 + vma->vm_start = addr;
4615 + vma->vm_end = addr + PAGE_SIZE;
4616 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4617 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4618 + vma->vm_ops = &pax_vm_ops;
4619 +
4620 + ret = insert_vm_struct(current->mm, vma);
4621 + if (ret)
4622 + return ret;
4623 +
4624 + ++current->mm->total_vm;
4625 + return 0;
4626 +}
4627 +#endif
4628 +
4629 +/*
4630 + * PaX: decide what to do with offenders (regs->pc = fault address)
4631 + *
4632 + * returns 1 when task should be killed
4633 + * 2 when patched PLT trampoline was detected
4634 + * 3 when unpatched PLT trampoline was detected
4635 + */
4636 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4637 +{
4638 +
4639 +#ifdef CONFIG_PAX_EMUPLT
4640 + int err;
4641 +
4642 + do { /* PaX: patched PLT emulation #1 */
4643 + unsigned int sethi1, sethi2, jmpl;
4644 +
4645 + err = get_user(sethi1, (unsigned int *)regs->pc);
4646 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4647 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4648 +
4649 + if (err)
4650 + break;
4651 +
4652 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4653 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4654 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4655 + {
4656 + unsigned int addr;
4657 +
4658 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4659 + addr = regs->u_regs[UREG_G1];
4660 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4661 + regs->pc = addr;
4662 + regs->npc = addr+4;
4663 + return 2;
4664 + }
4665 + } while (0);
4666 +
4667 + { /* PaX: patched PLT emulation #2 */
4668 + unsigned int ba;
4669 +
4670 + err = get_user(ba, (unsigned int *)regs->pc);
4671 +
4672 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4673 + unsigned int addr;
4674 +
4675 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4676 + regs->pc = addr;
4677 + regs->npc = addr+4;
4678 + return 2;
4679 + }
4680 + }
4681 +
4682 + do { /* PaX: patched PLT emulation #3 */
4683 + unsigned int sethi, jmpl, nop;
4684 +
4685 + err = get_user(sethi, (unsigned int *)regs->pc);
4686 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4687 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4688 +
4689 + if (err)
4690 + break;
4691 +
4692 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4693 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4694 + nop == 0x01000000U)
4695 + {
4696 + unsigned int addr;
4697 +
4698 + addr = (sethi & 0x003FFFFFU) << 10;
4699 + regs->u_regs[UREG_G1] = addr;
4700 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4701 + regs->pc = addr;
4702 + regs->npc = addr+4;
4703 + return 2;
4704 + }
4705 + } while (0);
4706 +
4707 + do { /* PaX: unpatched PLT emulation step 1 */
4708 + unsigned int sethi, ba, nop;
4709 +
4710 + err = get_user(sethi, (unsigned int *)regs->pc);
4711 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4712 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4713 +
4714 + if (err)
4715 + break;
4716 +
4717 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4718 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4719 + nop == 0x01000000U)
4720 + {
4721 + unsigned int addr, save, call;
4722 +
4723 + if ((ba & 0xFFC00000U) == 0x30800000U)
4724 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4725 + else
4726 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4727 +
4728 + err = get_user(save, (unsigned int *)addr);
4729 + err |= get_user(call, (unsigned int *)(addr+4));
4730 + err |= get_user(nop, (unsigned int *)(addr+8));
4731 + if (err)
4732 + break;
4733 +
4734 +#ifdef CONFIG_PAX_DLRESOLVE
4735 + if (save == 0x9DE3BFA8U &&
4736 + (call & 0xC0000000U) == 0x40000000U &&
4737 + nop == 0x01000000U)
4738 + {
4739 + struct vm_area_struct *vma;
4740 + unsigned long call_dl_resolve;
4741 +
4742 + down_read(&current->mm->mmap_sem);
4743 + call_dl_resolve = current->mm->call_dl_resolve;
4744 + up_read(&current->mm->mmap_sem);
4745 + if (likely(call_dl_resolve))
4746 + goto emulate;
4747 +
4748 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4749 +
4750 + down_write(&current->mm->mmap_sem);
4751 + if (current->mm->call_dl_resolve) {
4752 + call_dl_resolve = current->mm->call_dl_resolve;
4753 + up_write(&current->mm->mmap_sem);
4754 + if (vma)
4755 + kmem_cache_free(vm_area_cachep, vma);
4756 + goto emulate;
4757 + }
4758 +
4759 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4760 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4761 + up_write(&current->mm->mmap_sem);
4762 + if (vma)
4763 + kmem_cache_free(vm_area_cachep, vma);
4764 + return 1;
4765 + }
4766 +
4767 + if (pax_insert_vma(vma, call_dl_resolve)) {
4768 + up_write(&current->mm->mmap_sem);
4769 + kmem_cache_free(vm_area_cachep, vma);
4770 + return 1;
4771 + }
4772 +
4773 + current->mm->call_dl_resolve = call_dl_resolve;
4774 + up_write(&current->mm->mmap_sem);
4775 +
4776 +emulate:
4777 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4778 + regs->pc = call_dl_resolve;
4779 + regs->npc = addr+4;
4780 + return 3;
4781 + }
4782 +#endif
4783 +
4784 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4785 + if ((save & 0xFFC00000U) == 0x05000000U &&
4786 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4787 + nop == 0x01000000U)
4788 + {
4789 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4790 + regs->u_regs[UREG_G2] = addr + 4;
4791 + addr = (save & 0x003FFFFFU) << 10;
4792 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4793 + regs->pc = addr;
4794 + regs->npc = addr+4;
4795 + return 3;
4796 + }
4797 + }
4798 + } while (0);
4799 +
4800 + do { /* PaX: unpatched PLT emulation step 2 */
4801 + unsigned int save, call, nop;
4802 +
4803 + err = get_user(save, (unsigned int *)(regs->pc-4));
4804 + err |= get_user(call, (unsigned int *)regs->pc);
4805 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
4806 + if (err)
4807 + break;
4808 +
4809 + if (save == 0x9DE3BFA8U &&
4810 + (call & 0xC0000000U) == 0x40000000U &&
4811 + nop == 0x01000000U)
4812 + {
4813 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4814 +
4815 + regs->u_regs[UREG_RETPC] = regs->pc;
4816 + regs->pc = dl_resolve;
4817 + regs->npc = dl_resolve+4;
4818 + return 3;
4819 + }
4820 + } while (0);
4821 +#endif
4822 +
4823 + return 1;
4824 +}
4825 +
4826 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4827 +{
4828 + unsigned long i;
4829 +
4830 + printk(KERN_ERR "PAX: bytes at PC: ");
4831 + for (i = 0; i < 8; i++) {
4832 + unsigned int c;
4833 + if (get_user(c, (unsigned int *)pc+i))
4834 + printk(KERN_CONT "???????? ");
4835 + else
4836 + printk(KERN_CONT "%08x ", c);
4837 + }
4838 + printk("\n");
4839 +}
4840 +#endif
4841 +
4842 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4843 int text_fault)
4844 {
4845 @@ -281,6 +546,24 @@ good_area:
4846 if(!(vma->vm_flags & VM_WRITE))
4847 goto bad_area;
4848 } else {
4849 +
4850 +#ifdef CONFIG_PAX_PAGEEXEC
4851 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4852 + up_read(&mm->mmap_sem);
4853 + switch (pax_handle_fetch_fault(regs)) {
4854 +
4855 +#ifdef CONFIG_PAX_EMUPLT
4856 + case 2:
4857 + case 3:
4858 + return;
4859 +#endif
4860 +
4861 + }
4862 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4863 + do_group_exit(SIGKILL);
4864 + }
4865 +#endif
4866 +
4867 /* Allow reads even for write-only mappings */
4868 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4869 goto bad_area;
4870 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
4871 index 504c062..6fcb9c6 100644
4872 --- a/arch/sparc/mm/fault_64.c
4873 +++ b/arch/sparc/mm/fault_64.c
4874 @@ -21,6 +21,9 @@
4875 #include <linux/kprobes.h>
4876 #include <linux/kdebug.h>
4877 #include <linux/percpu.h>
4878 +#include <linux/slab.h>
4879 +#include <linux/pagemap.h>
4880 +#include <linux/compiler.h>
4881
4882 #include <asm/page.h>
4883 #include <asm/pgtable.h>
4884 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
4885 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4886 regs->tpc);
4887 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4888 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4889 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4890 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4891 dump_stack();
4892 unhandled_fault(regs->tpc, current, regs);
4893 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
4894 show_regs(regs);
4895 }
4896
4897 +#ifdef CONFIG_PAX_PAGEEXEC
4898 +#ifdef CONFIG_PAX_DLRESOLVE
4899 +static void pax_emuplt_close(struct vm_area_struct *vma)
4900 +{
4901 + vma->vm_mm->call_dl_resolve = 0UL;
4902 +}
4903 +
4904 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4905 +{
4906 + unsigned int *kaddr;
4907 +
4908 + vmf->page = alloc_page(GFP_HIGHUSER);
4909 + if (!vmf->page)
4910 + return VM_FAULT_OOM;
4911 +
4912 + kaddr = kmap(vmf->page);
4913 + memset(kaddr, 0, PAGE_SIZE);
4914 + kaddr[0] = 0x9DE3BFA8U; /* save */
4915 + flush_dcache_page(vmf->page);
4916 + kunmap(vmf->page);
4917 + return VM_FAULT_MAJOR;
4918 +}
4919 +
4920 +static const struct vm_operations_struct pax_vm_ops = {
4921 + .close = pax_emuplt_close,
4922 + .fault = pax_emuplt_fault
4923 +};
4924 +
4925 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4926 +{
4927 + int ret;
4928 +
4929 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4930 + vma->vm_mm = current->mm;
4931 + vma->vm_start = addr;
4932 + vma->vm_end = addr + PAGE_SIZE;
4933 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4934 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4935 + vma->vm_ops = &pax_vm_ops;
4936 +
4937 + ret = insert_vm_struct(current->mm, vma);
4938 + if (ret)
4939 + return ret;
4940 +
4941 + ++current->mm->total_vm;
4942 + return 0;
4943 +}
4944 +#endif
4945 +
4946 +/*
4947 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4948 + *
4949 + * returns 1 when task should be killed
4950 + * 2 when patched PLT trampoline was detected
4951 + * 3 when unpatched PLT trampoline was detected
4952 + */
4953 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4954 +{
4955 +
4956 +#ifdef CONFIG_PAX_EMUPLT
4957 + int err;
4958 +
4959 + do { /* PaX: patched PLT emulation #1 */
4960 + unsigned int sethi1, sethi2, jmpl;
4961 +
4962 + err = get_user(sethi1, (unsigned int *)regs->tpc);
4963 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4964 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4965 +
4966 + if (err)
4967 + break;
4968 +
4969 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4970 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4971 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4972 + {
4973 + unsigned long addr;
4974 +
4975 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4976 + addr = regs->u_regs[UREG_G1];
4977 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4978 +
4979 + if (test_thread_flag(TIF_32BIT))
4980 + addr &= 0xFFFFFFFFUL;
4981 +
4982 + regs->tpc = addr;
4983 + regs->tnpc = addr+4;
4984 + return 2;
4985 + }
4986 + } while (0);
4987 +
4988 + { /* PaX: patched PLT emulation #2 */
4989 + unsigned int ba;
4990 +
4991 + err = get_user(ba, (unsigned int *)regs->tpc);
4992 +
4993 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4994 + unsigned long addr;
4995 +
4996 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4997 +
4998 + if (test_thread_flag(TIF_32BIT))
4999 + addr &= 0xFFFFFFFFUL;
5000 +
5001 + regs->tpc = addr;
5002 + regs->tnpc = addr+4;
5003 + return 2;
5004 + }
5005 + }
5006 +
5007 + do { /* PaX: patched PLT emulation #3 */
5008 + unsigned int sethi, jmpl, nop;
5009 +
5010 + err = get_user(sethi, (unsigned int *)regs->tpc);
5011 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5012 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5013 +
5014 + if (err)
5015 + break;
5016 +
5017 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5018 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5019 + nop == 0x01000000U)
5020 + {
5021 + unsigned long addr;
5022 +
5023 + addr = (sethi & 0x003FFFFFU) << 10;
5024 + regs->u_regs[UREG_G1] = addr;
5025 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5026 +
5027 + if (test_thread_flag(TIF_32BIT))
5028 + addr &= 0xFFFFFFFFUL;
5029 +
5030 + regs->tpc = addr;
5031 + regs->tnpc = addr+4;
5032 + return 2;
5033 + }
5034 + } while (0);
5035 +
5036 + do { /* PaX: patched PLT emulation #4 */
5037 + unsigned int sethi, mov1, call, mov2;
5038 +
5039 + err = get_user(sethi, (unsigned int *)regs->tpc);
5040 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5041 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5042 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5043 +
5044 + if (err)
5045 + break;
5046 +
5047 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5048 + mov1 == 0x8210000FU &&
5049 + (call & 0xC0000000U) == 0x40000000U &&
5050 + mov2 == 0x9E100001U)
5051 + {
5052 + unsigned long addr;
5053 +
5054 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5055 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5056 +
5057 + if (test_thread_flag(TIF_32BIT))
5058 + addr &= 0xFFFFFFFFUL;
5059 +
5060 + regs->tpc = addr;
5061 + regs->tnpc = addr+4;
5062 + return 2;
5063 + }
5064 + } while (0);
5065 +
5066 + do { /* PaX: patched PLT emulation #5 */
5067 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5068 +
5069 + err = get_user(sethi, (unsigned int *)regs->tpc);
5070 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5071 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5072 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5073 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5074 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5075 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5076 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5077 +
5078 + if (err)
5079 + break;
5080 +
5081 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5082 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5083 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5084 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5085 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5086 + sllx == 0x83287020U &&
5087 + jmpl == 0x81C04005U &&
5088 + nop == 0x01000000U)
5089 + {
5090 + unsigned long addr;
5091 +
5092 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5093 + regs->u_regs[UREG_G1] <<= 32;
5094 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5095 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5096 + regs->tpc = addr;
5097 + regs->tnpc = addr+4;
5098 + return 2;
5099 + }
5100 + } while (0);
5101 +
5102 + do { /* PaX: patched PLT emulation #6 */
5103 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5104 +
5105 + err = get_user(sethi, (unsigned int *)regs->tpc);
5106 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5107 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5108 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5109 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5110 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5111 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5112 +
5113 + if (err)
5114 + break;
5115 +
5116 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5117 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5118 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5119 + sllx == 0x83287020U &&
5120 + (or & 0xFFFFE000U) == 0x8A116000U &&
5121 + jmpl == 0x81C04005U &&
5122 + nop == 0x01000000U)
5123 + {
5124 + unsigned long addr;
5125 +
5126 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5127 + regs->u_regs[UREG_G1] <<= 32;
5128 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5129 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5130 + regs->tpc = addr;
5131 + regs->tnpc = addr+4;
5132 + return 2;
5133 + }
5134 + } while (0);
5135 +
5136 + do { /* PaX: unpatched PLT emulation step 1 */
5137 + unsigned int sethi, ba, nop;
5138 +
5139 + err = get_user(sethi, (unsigned int *)regs->tpc);
5140 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5141 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5142 +
5143 + if (err)
5144 + break;
5145 +
5146 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5147 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5148 + nop == 0x01000000U)
5149 + {
5150 + unsigned long addr;
5151 + unsigned int save, call;
5152 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5153 +
5154 + if ((ba & 0xFFC00000U) == 0x30800000U)
5155 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5156 + else
5157 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5158 +
5159 + if (test_thread_flag(TIF_32BIT))
5160 + addr &= 0xFFFFFFFFUL;
5161 +
5162 + err = get_user(save, (unsigned int *)addr);
5163 + err |= get_user(call, (unsigned int *)(addr+4));
5164 + err |= get_user(nop, (unsigned int *)(addr+8));
5165 + if (err)
5166 + break;
5167 +
5168 +#ifdef CONFIG_PAX_DLRESOLVE
5169 + if (save == 0x9DE3BFA8U &&
5170 + (call & 0xC0000000U) == 0x40000000U &&
5171 + nop == 0x01000000U)
5172 + {
5173 + struct vm_area_struct *vma;
5174 + unsigned long call_dl_resolve;
5175 +
5176 + down_read(&current->mm->mmap_sem);
5177 + call_dl_resolve = current->mm->call_dl_resolve;
5178 + up_read(&current->mm->mmap_sem);
5179 + if (likely(call_dl_resolve))
5180 + goto emulate;
5181 +
5182 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5183 +
5184 + down_write(&current->mm->mmap_sem);
5185 + if (current->mm->call_dl_resolve) {
5186 + call_dl_resolve = current->mm->call_dl_resolve;
5187 + up_write(&current->mm->mmap_sem);
5188 + if (vma)
5189 + kmem_cache_free(vm_area_cachep, vma);
5190 + goto emulate;
5191 + }
5192 +
5193 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5194 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5195 + up_write(&current->mm->mmap_sem);
5196 + if (vma)
5197 + kmem_cache_free(vm_area_cachep, vma);
5198 + return 1;
5199 + }
5200 +
5201 + if (pax_insert_vma(vma, call_dl_resolve)) {
5202 + up_write(&current->mm->mmap_sem);
5203 + kmem_cache_free(vm_area_cachep, vma);
5204 + return 1;
5205 + }
5206 +
5207 + current->mm->call_dl_resolve = call_dl_resolve;
5208 + up_write(&current->mm->mmap_sem);
5209 +
5210 +emulate:
5211 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5212 + regs->tpc = call_dl_resolve;
5213 + regs->tnpc = addr+4;
5214 + return 3;
5215 + }
5216 +#endif
5217 +
5218 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5219 + if ((save & 0xFFC00000U) == 0x05000000U &&
5220 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5221 + nop == 0x01000000U)
5222 + {
5223 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5224 + regs->u_regs[UREG_G2] = addr + 4;
5225 + addr = (save & 0x003FFFFFU) << 10;
5226 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5227 +
5228 + if (test_thread_flag(TIF_32BIT))
5229 + addr &= 0xFFFFFFFFUL;
5230 +
5231 + regs->tpc = addr;
5232 + regs->tnpc = addr+4;
5233 + return 3;
5234 + }
5235 +
5236 + /* PaX: 64-bit PLT stub */
5237 + err = get_user(sethi1, (unsigned int *)addr);
5238 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5239 + err |= get_user(or1, (unsigned int *)(addr+8));
5240 + err |= get_user(or2, (unsigned int *)(addr+12));
5241 + err |= get_user(sllx, (unsigned int *)(addr+16));
5242 + err |= get_user(add, (unsigned int *)(addr+20));
5243 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5244 + err |= get_user(nop, (unsigned int *)(addr+28));
5245 + if (err)
5246 + break;
5247 +
5248 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5249 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5250 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5251 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5252 + sllx == 0x89293020U &&
5253 + add == 0x8A010005U &&
5254 + jmpl == 0x89C14000U &&
5255 + nop == 0x01000000U)
5256 + {
5257 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5258 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5259 + regs->u_regs[UREG_G4] <<= 32;
5260 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5261 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5262 + regs->u_regs[UREG_G4] = addr + 24;
5263 + addr = regs->u_regs[UREG_G5];
5264 + regs->tpc = addr;
5265 + regs->tnpc = addr+4;
5266 + return 3;
5267 + }
5268 + }
5269 + } while (0);
5270 +
5271 +#ifdef CONFIG_PAX_DLRESOLVE
5272 + do { /* PaX: unpatched PLT emulation step 2 */
5273 + unsigned int save, call, nop;
5274 +
5275 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5276 + err |= get_user(call, (unsigned int *)regs->tpc);
5277 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5278 + if (err)
5279 + break;
5280 +
5281 + if (save == 0x9DE3BFA8U &&
5282 + (call & 0xC0000000U) == 0x40000000U &&
5283 + nop == 0x01000000U)
5284 + {
5285 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5286 +
5287 + if (test_thread_flag(TIF_32BIT))
5288 + dl_resolve &= 0xFFFFFFFFUL;
5289 +
5290 + regs->u_regs[UREG_RETPC] = regs->tpc;
5291 + regs->tpc = dl_resolve;
5292 + regs->tnpc = dl_resolve+4;
5293 + return 3;
5294 + }
5295 + } while (0);
5296 +#endif
5297 +
5298 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5299 + unsigned int sethi, ba, nop;
5300 +
5301 + err = get_user(sethi, (unsigned int *)regs->tpc);
5302 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5303 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5304 +
5305 + if (err)
5306 + break;
5307 +
5308 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5309 + (ba & 0xFFF00000U) == 0x30600000U &&
5310 + nop == 0x01000000U)
5311 + {
5312 + unsigned long addr;
5313 +
5314 + addr = (sethi & 0x003FFFFFU) << 10;
5315 + regs->u_regs[UREG_G1] = addr;
5316 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5317 +
5318 + if (test_thread_flag(TIF_32BIT))
5319 + addr &= 0xFFFFFFFFUL;
5320 +
5321 + regs->tpc = addr;
5322 + regs->tnpc = addr+4;
5323 + return 2;
5324 + }
5325 + } while (0);
5326 +
5327 +#endif
5328 +
5329 + return 1;
5330 +}
5331 +
5332 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5333 +{
5334 + unsigned long i;
5335 +
5336 + printk(KERN_ERR "PAX: bytes at PC: ");
5337 + for (i = 0; i < 8; i++) {
5338 + unsigned int c;
5339 + if (get_user(c, (unsigned int *)pc+i))
5340 + printk(KERN_CONT "???????? ");
5341 + else
5342 + printk(KERN_CONT "%08x ", c);
5343 + }
5344 + printk("\n");
5345 +}
5346 +#endif
5347 +
5348 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5349 {
5350 struct mm_struct *mm = current->mm;
5351 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5352 if (!vma)
5353 goto bad_area;
5354
5355 +#ifdef CONFIG_PAX_PAGEEXEC
5356 + /* PaX: detect ITLB misses on non-exec pages */
5357 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5358 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5359 + {
5360 + if (address != regs->tpc)
5361 + goto good_area;
5362 +
5363 + up_read(&mm->mmap_sem);
5364 + switch (pax_handle_fetch_fault(regs)) {
5365 +
5366 +#ifdef CONFIG_PAX_EMUPLT
5367 + case 2:
5368 + case 3:
5369 + return;
5370 +#endif
5371 +
5372 + }
5373 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5374 + do_group_exit(SIGKILL);
5375 + }
5376 +#endif
5377 +
5378 /* Pure DTLB misses do not tell us whether the fault causing
5379 * load/store/atomic was a write or not, it only says that there
5380 * was no match. So in such a case we (carefully) read the
5381 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
5382 index f4e9764..5682724 100644
5383 --- a/arch/sparc/mm/hugetlbpage.c
5384 +++ b/arch/sparc/mm/hugetlbpage.c
5385 @@ -68,7 +68,7 @@ full_search:
5386 }
5387 return -ENOMEM;
5388 }
5389 - if (likely(!vma || addr + len <= vma->vm_start)) {
5390 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5391 /*
5392 * Remember the place where we stopped the search:
5393 */
5394 @@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5395 /* make sure it can fit in the remaining address space */
5396 if (likely(addr > len)) {
5397 vma = find_vma(mm, addr-len);
5398 - if (!vma || addr <= vma->vm_start) {
5399 + if (check_heap_stack_gap(vma, addr - len, len)) {
5400 /* remember the address as a hint for next time */
5401 return (mm->free_area_cache = addr-len);
5402 }
5403 @@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5404 if (unlikely(mm->mmap_base < len))
5405 goto bottomup;
5406
5407 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5408 + addr = mm->mmap_base - len;
5409
5410 do {
5411 + addr &= HPAGE_MASK;
5412 /*
5413 * Lookup failure means no vma is above this address,
5414 * else if new region fits below vma->vm_start,
5415 * return with success:
5416 */
5417 vma = find_vma(mm, addr);
5418 - if (likely(!vma || addr+len <= vma->vm_start)) {
5419 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5420 /* remember the address as a hint for next time */
5421 return (mm->free_area_cache = addr);
5422 }
5423 @@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5424 mm->cached_hole_size = vma->vm_start - addr;
5425
5426 /* try just below the current vma->vm_start */
5427 - addr = (vma->vm_start-len) & HPAGE_MASK;
5428 - } while (likely(len < vma->vm_start));
5429 + addr = skip_heap_stack_gap(vma, len);
5430 + } while (!IS_ERR_VALUE(addr));
5431
5432 bottomup:
5433 /*
5434 @@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
5435 if (addr) {
5436 addr = ALIGN(addr, HPAGE_SIZE);
5437 vma = find_vma(mm, addr);
5438 - if (task_size - len >= addr &&
5439 - (!vma || addr + len <= vma->vm_start))
5440 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5441 return addr;
5442 }
5443 if (mm->get_unmapped_area == arch_get_unmapped_area)
5444 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
5445 index 7b00de6..78239f4 100644
5446 --- a/arch/sparc/mm/init_32.c
5447 +++ b/arch/sparc/mm/init_32.c
5448 @@ -316,6 +316,9 @@ extern void device_scan(void);
5449 pgprot_t PAGE_SHARED __read_mostly;
5450 EXPORT_SYMBOL(PAGE_SHARED);
5451
5452 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5453 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5454 +
5455 void __init paging_init(void)
5456 {
5457 switch(sparc_cpu_model) {
5458 @@ -344,17 +347,17 @@ void __init paging_init(void)
5459
5460 /* Initialize the protection map with non-constant, MMU dependent values. */
5461 protection_map[0] = PAGE_NONE;
5462 - protection_map[1] = PAGE_READONLY;
5463 - protection_map[2] = PAGE_COPY;
5464 - protection_map[3] = PAGE_COPY;
5465 + protection_map[1] = PAGE_READONLY_NOEXEC;
5466 + protection_map[2] = PAGE_COPY_NOEXEC;
5467 + protection_map[3] = PAGE_COPY_NOEXEC;
5468 protection_map[4] = PAGE_READONLY;
5469 protection_map[5] = PAGE_READONLY;
5470 protection_map[6] = PAGE_COPY;
5471 protection_map[7] = PAGE_COPY;
5472 protection_map[8] = PAGE_NONE;
5473 - protection_map[9] = PAGE_READONLY;
5474 - protection_map[10] = PAGE_SHARED;
5475 - protection_map[11] = PAGE_SHARED;
5476 + protection_map[9] = PAGE_READONLY_NOEXEC;
5477 + protection_map[10] = PAGE_SHARED_NOEXEC;
5478 + protection_map[11] = PAGE_SHARED_NOEXEC;
5479 protection_map[12] = PAGE_READONLY;
5480 protection_map[13] = PAGE_READONLY;
5481 protection_map[14] = PAGE_SHARED;
5482 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
5483 index cbef74e..c38fead 100644
5484 --- a/arch/sparc/mm/srmmu.c
5485 +++ b/arch/sparc/mm/srmmu.c
5486 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5487 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5488 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5489 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5490 +
5491 +#ifdef CONFIG_PAX_PAGEEXEC
5492 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5493 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5494 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5495 +#endif
5496 +
5497 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5498 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5499
5500 diff --git a/arch/um/Makefile b/arch/um/Makefile
5501 index c0f712c..3a5c4c9 100644
5502 --- a/arch/um/Makefile
5503 +++ b/arch/um/Makefile
5504 @@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
5505 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5506 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
5507
5508 +ifdef CONSTIFY_PLUGIN
5509 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5510 +endif
5511 +
5512 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
5513
5514 #This will adjust *FLAGS accordingly to the platform.
5515 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
5516 index 6c03acd..a5e0215 100644
5517 --- a/arch/um/include/asm/kmap_types.h
5518 +++ b/arch/um/include/asm/kmap_types.h
5519 @@ -23,6 +23,7 @@ enum km_type {
5520 KM_IRQ1,
5521 KM_SOFTIRQ0,
5522 KM_SOFTIRQ1,
5523 + KM_CLEARPAGE,
5524 KM_TYPE_NR
5525 };
5526
5527 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
5528 index 4cc9b6c..02e5029 100644
5529 --- a/arch/um/include/asm/page.h
5530 +++ b/arch/um/include/asm/page.h
5531 @@ -14,6 +14,9 @@
5532 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5533 #define PAGE_MASK (~(PAGE_SIZE-1))
5534
5535 +#define ktla_ktva(addr) (addr)
5536 +#define ktva_ktla(addr) (addr)
5537 +
5538 #ifndef __ASSEMBLY__
5539
5540 struct page;
5541 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
5542 index 21c1ae7..4640aaa 100644
5543 --- a/arch/um/kernel/process.c
5544 +++ b/arch/um/kernel/process.c
5545 @@ -404,22 +404,6 @@ int singlestepping(void * t)
5546 return 2;
5547 }
5548
5549 -/*
5550 - * Only x86 and x86_64 have an arch_align_stack().
5551 - * All other arches have "#define arch_align_stack(x) (x)"
5552 - * in their asm/system.h
5553 - * As this is included in UML from asm-um/system-generic.h,
5554 - * we can use it to behave as the subarch does.
5555 - */
5556 -#ifndef arch_align_stack
5557 -unsigned long arch_align_stack(unsigned long sp)
5558 -{
5559 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5560 - sp -= get_random_int() % 8192;
5561 - return sp & ~0xf;
5562 -}
5563 -#endif
5564 -
5565 unsigned long get_wchan(struct task_struct *p)
5566 {
5567 unsigned long stack_page, sp, ip;
5568 diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
5569 index d1b93c4..ae1b7fd 100644
5570 --- a/arch/um/sys-i386/shared/sysdep/system.h
5571 +++ b/arch/um/sys-i386/shared/sysdep/system.h
5572 @@ -17,7 +17,7 @@
5573 # define AT_VECTOR_SIZE_ARCH 1
5574 #endif
5575
5576 -extern unsigned long arch_align_stack(unsigned long sp);
5577 +#define arch_align_stack(x) ((x) & ~0xfUL)
5578
5579 void default_idle(void);
5580
5581 diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
5582 index 70ca357..728d1cc 100644
5583 --- a/arch/um/sys-i386/syscalls.c
5584 +++ b/arch/um/sys-i386/syscalls.c
5585 @@ -11,6 +11,21 @@
5586 #include "asm/uaccess.h"
5587 #include "asm/unistd.h"
5588
5589 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5590 +{
5591 + unsigned long pax_task_size = TASK_SIZE;
5592 +
5593 +#ifdef CONFIG_PAX_SEGMEXEC
5594 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5595 + pax_task_size = SEGMEXEC_TASK_SIZE;
5596 +#endif
5597 +
5598 + if (len > pax_task_size || addr > pax_task_size - len)
5599 + return -EINVAL;
5600 +
5601 + return 0;
5602 +}
5603 +
5604 /*
5605 * The prototype on i386 is:
5606 *
5607 diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
5608 index d1b93c4..ae1b7fd 100644
5609 --- a/arch/um/sys-x86_64/shared/sysdep/system.h
5610 +++ b/arch/um/sys-x86_64/shared/sysdep/system.h
5611 @@ -17,7 +17,7 @@
5612 # define AT_VECTOR_SIZE_ARCH 1
5613 #endif
5614
5615 -extern unsigned long arch_align_stack(unsigned long sp);
5616 +#define arch_align_stack(x) ((x) & ~0xfUL)
5617
5618 void default_idle(void);
5619
5620 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
5621 index 6a47bb2..dc9a868 100644
5622 --- a/arch/x86/Kconfig
5623 +++ b/arch/x86/Kconfig
5624 @@ -236,7 +236,7 @@ config X86_HT
5625
5626 config X86_32_LAZY_GS
5627 def_bool y
5628 - depends on X86_32 && !CC_STACKPROTECTOR
5629 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
5630
5631 config ARCH_HWEIGHT_CFLAGS
5632 string
5633 @@ -1019,7 +1019,7 @@ choice
5634
5635 config NOHIGHMEM
5636 bool "off"
5637 - depends on !X86_NUMAQ
5638 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5639 ---help---
5640 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
5641 However, the address space of 32-bit x86 processors is only 4
5642 @@ -1056,7 +1056,7 @@ config NOHIGHMEM
5643
5644 config HIGHMEM4G
5645 bool "4GB"
5646 - depends on !X86_NUMAQ
5647 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5648 ---help---
5649 Select this if you have a 32-bit processor and between 1 and 4
5650 gigabytes of physical RAM.
5651 @@ -1110,7 +1110,7 @@ config PAGE_OFFSET
5652 hex
5653 default 0xB0000000 if VMSPLIT_3G_OPT
5654 default 0x80000000 if VMSPLIT_2G
5655 - default 0x78000000 if VMSPLIT_2G_OPT
5656 + default 0x70000000 if VMSPLIT_2G_OPT
5657 default 0x40000000 if VMSPLIT_1G
5658 default 0xC0000000
5659 depends on X86_32
5660 @@ -1484,6 +1484,7 @@ config SECCOMP
5661
5662 config CC_STACKPROTECTOR
5663 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
5664 + depends on X86_64 || !PAX_MEMORY_UDEREF
5665 ---help---
5666 This option turns on the -fstack-protector GCC feature. This
5667 feature puts, at the beginning of functions, a canary value on
5668 @@ -1541,6 +1542,7 @@ config KEXEC_JUMP
5669 config PHYSICAL_START
5670 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
5671 default "0x1000000"
5672 + range 0x400000 0x40000000
5673 ---help---
5674 This gives the physical address where the kernel is loaded.
5675
5676 @@ -1604,6 +1606,7 @@ config X86_NEED_RELOCS
5677 config PHYSICAL_ALIGN
5678 hex "Alignment value to which kernel should be aligned" if X86_32
5679 default "0x1000000"
5680 + range 0x400000 0x1000000 if PAX_KERNEXEC
5681 range 0x2000 0x1000000
5682 ---help---
5683 This value puts the alignment restrictions on physical address
5684 @@ -1635,9 +1638,10 @@ config HOTPLUG_CPU
5685 Say N if you want to disable CPU hotplug.
5686
5687 config COMPAT_VDSO
5688 - def_bool y
5689 + def_bool n
5690 prompt "Compat VDSO support"
5691 depends on X86_32 || IA32_EMULATION
5692 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
5693 ---help---
5694 Map the 32-bit VDSO to the predictable old-style address too.
5695
5696 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
5697 index e3ca7e0..b30b28a 100644
5698 --- a/arch/x86/Kconfig.cpu
5699 +++ b/arch/x86/Kconfig.cpu
5700 @@ -341,7 +341,7 @@ config X86_PPRO_FENCE
5701
5702 config X86_F00F_BUG
5703 def_bool y
5704 - depends on M586MMX || M586TSC || M586 || M486 || M386
5705 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
5706
5707 config X86_INVD_BUG
5708 def_bool y
5709 @@ -365,7 +365,7 @@ config X86_POPAD_OK
5710
5711 config X86_ALIGNMENT_16
5712 def_bool y
5713 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5714 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5715
5716 config X86_INTEL_USERCOPY
5717 def_bool y
5718 @@ -411,7 +411,7 @@ config X86_CMPXCHG64
5719 # generates cmov.
5720 config X86_CMOV
5721 def_bool y
5722 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5723 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5724
5725 config X86_MINIMUM_CPU_FAMILY
5726 int
5727 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
5728 index c0f8a5c..6404f61 100644
5729 --- a/arch/x86/Kconfig.debug
5730 +++ b/arch/x86/Kconfig.debug
5731 @@ -81,7 +81,7 @@ config X86_PTDUMP
5732 config DEBUG_RODATA
5733 bool "Write protect kernel read-only data structures"
5734 default y
5735 - depends on DEBUG_KERNEL
5736 + depends on DEBUG_KERNEL && BROKEN
5737 ---help---
5738 Mark the kernel read-only data as write-protected in the pagetables,
5739 in order to catch accidental (and incorrect) writes to such const
5740 @@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
5741
5742 config DEBUG_SET_MODULE_RONX
5743 bool "Set loadable kernel module data as NX and text as RO"
5744 - depends on MODULES
5745 + depends on MODULES && BROKEN
5746 ---help---
5747 This option helps catch unintended modifications to loadable
5748 kernel module's text and read-only data. It also prevents execution
5749 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
5750 index b02e509..2631e48 100644
5751 --- a/arch/x86/Makefile
5752 +++ b/arch/x86/Makefile
5753 @@ -46,6 +46,7 @@ else
5754 UTS_MACHINE := x86_64
5755 CHECKFLAGS += -D__x86_64__ -m64
5756
5757 + biarch := $(call cc-option,-m64)
5758 KBUILD_AFLAGS += -m64
5759 KBUILD_CFLAGS += -m64
5760
5761 @@ -195,3 +196,12 @@ define archhelp
5762 echo ' FDARGS="..." arguments for the booted kernel'
5763 echo ' FDINITRD=file initrd for the booted kernel'
5764 endef
5765 +
5766 +define OLD_LD
5767 +
5768 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
5769 +*** Please upgrade your binutils to 2.18 or newer
5770 +endef
5771 +
5772 +archprepare:
5773 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
5774 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
5775 index 95365a8..52f857b 100644
5776 --- a/arch/x86/boot/Makefile
5777 +++ b/arch/x86/boot/Makefile
5778 @@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
5779 $(call cc-option, -fno-stack-protector) \
5780 $(call cc-option, -mpreferred-stack-boundary=2)
5781 KBUILD_CFLAGS += $(call cc-option, -m32)
5782 +ifdef CONSTIFY_PLUGIN
5783 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5784 +endif
5785 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5786 GCOV_PROFILE := n
5787
5788 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
5789 index 878e4b9..20537ab 100644
5790 --- a/arch/x86/boot/bitops.h
5791 +++ b/arch/x86/boot/bitops.h
5792 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5793 u8 v;
5794 const u32 *p = (const u32 *)addr;
5795
5796 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5797 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5798 return v;
5799 }
5800
5801 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5802
5803 static inline void set_bit(int nr, void *addr)
5804 {
5805 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5806 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5807 }
5808
5809 #endif /* BOOT_BITOPS_H */
5810 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
5811 index c7093bd..d4247ffe0 100644
5812 --- a/arch/x86/boot/boot.h
5813 +++ b/arch/x86/boot/boot.h
5814 @@ -85,7 +85,7 @@ static inline void io_delay(void)
5815 static inline u16 ds(void)
5816 {
5817 u16 seg;
5818 - asm("movw %%ds,%0" : "=rm" (seg));
5819 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5820 return seg;
5821 }
5822
5823 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
5824 static inline int memcmp(const void *s1, const void *s2, size_t len)
5825 {
5826 u8 diff;
5827 - asm("repe; cmpsb; setnz %0"
5828 + asm volatile("repe; cmpsb; setnz %0"
5829 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5830 return diff;
5831 }
5832 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
5833 index 09664ef..edc5d03 100644
5834 --- a/arch/x86/boot/compressed/Makefile
5835 +++ b/arch/x86/boot/compressed/Makefile
5836 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
5837 KBUILD_CFLAGS += $(cflags-y)
5838 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5839 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5840 +ifdef CONSTIFY_PLUGIN
5841 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5842 +endif
5843
5844 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5845 GCOV_PROFILE := n
5846 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
5847 index 67a655a..b924059 100644
5848 --- a/arch/x86/boot/compressed/head_32.S
5849 +++ b/arch/x86/boot/compressed/head_32.S
5850 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5851 notl %eax
5852 andl %eax, %ebx
5853 #else
5854 - movl $LOAD_PHYSICAL_ADDR, %ebx
5855 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5856 #endif
5857
5858 /* Target address to relocate to for decompression */
5859 @@ -162,7 +162,7 @@ relocated:
5860 * and where it was actually loaded.
5861 */
5862 movl %ebp, %ebx
5863 - subl $LOAD_PHYSICAL_ADDR, %ebx
5864 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5865 jz 2f /* Nothing to be done if loaded at compiled addr. */
5866 /*
5867 * Process relocations.
5868 @@ -170,8 +170,7 @@ relocated:
5869
5870 1: subl $4, %edi
5871 movl (%edi), %ecx
5872 - testl %ecx, %ecx
5873 - jz 2f
5874 + jecxz 2f
5875 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5876 jmp 1b
5877 2:
5878 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
5879 index 35af09d..99c9676 100644
5880 --- a/arch/x86/boot/compressed/head_64.S
5881 +++ b/arch/x86/boot/compressed/head_64.S
5882 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5883 notl %eax
5884 andl %eax, %ebx
5885 #else
5886 - movl $LOAD_PHYSICAL_ADDR, %ebx
5887 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5888 #endif
5889
5890 /* Target address to relocate to for decompression */
5891 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5892 notq %rax
5893 andq %rax, %rbp
5894 #else
5895 - movq $LOAD_PHYSICAL_ADDR, %rbp
5896 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5897 #endif
5898
5899 /* Target address to relocate to for decompression */
5900 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
5901 index 3a19d04..7c1d55a 100644
5902 --- a/arch/x86/boot/compressed/misc.c
5903 +++ b/arch/x86/boot/compressed/misc.c
5904 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
5905 case PT_LOAD:
5906 #ifdef CONFIG_RELOCATABLE
5907 dest = output;
5908 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5909 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5910 #else
5911 dest = (void *)(phdr->p_paddr);
5912 #endif
5913 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
5914 error("Destination address too large");
5915 #endif
5916 #ifndef CONFIG_RELOCATABLE
5917 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5918 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5919 error("Wrong destination address");
5920 #endif
5921
5922 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
5923 index 89bbf4e..869908e 100644
5924 --- a/arch/x86/boot/compressed/relocs.c
5925 +++ b/arch/x86/boot/compressed/relocs.c
5926 @@ -13,8 +13,11 @@
5927
5928 static void die(char *fmt, ...);
5929
5930 +#include "../../../../include/generated/autoconf.h"
5931 +
5932 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5933 static Elf32_Ehdr ehdr;
5934 +static Elf32_Phdr *phdr;
5935 static unsigned long reloc_count, reloc_idx;
5936 static unsigned long *relocs;
5937
5938 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5939 }
5940 }
5941
5942 +static void read_phdrs(FILE *fp)
5943 +{
5944 + unsigned int i;
5945 +
5946 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5947 + if (!phdr) {
5948 + die("Unable to allocate %d program headers\n",
5949 + ehdr.e_phnum);
5950 + }
5951 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5952 + die("Seek to %d failed: %s\n",
5953 + ehdr.e_phoff, strerror(errno));
5954 + }
5955 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5956 + die("Cannot read ELF program headers: %s\n",
5957 + strerror(errno));
5958 + }
5959 + for(i = 0; i < ehdr.e_phnum; i++) {
5960 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5961 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5962 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5963 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5964 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5965 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5966 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5967 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5968 + }
5969 +
5970 +}
5971 +
5972 static void read_shdrs(FILE *fp)
5973 {
5974 - int i;
5975 + unsigned int i;
5976 Elf32_Shdr shdr;
5977
5978 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5979 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5980
5981 static void read_strtabs(FILE *fp)
5982 {
5983 - int i;
5984 + unsigned int i;
5985 for (i = 0; i < ehdr.e_shnum; i++) {
5986 struct section *sec = &secs[i];
5987 if (sec->shdr.sh_type != SHT_STRTAB) {
5988 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5989
5990 static void read_symtabs(FILE *fp)
5991 {
5992 - int i,j;
5993 + unsigned int i,j;
5994 for (i = 0; i < ehdr.e_shnum; i++) {
5995 struct section *sec = &secs[i];
5996 if (sec->shdr.sh_type != SHT_SYMTAB) {
5997 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5998
5999 static void read_relocs(FILE *fp)
6000 {
6001 - int i,j;
6002 + unsigned int i,j;
6003 + uint32_t base;
6004 +
6005 for (i = 0; i < ehdr.e_shnum; i++) {
6006 struct section *sec = &secs[i];
6007 if (sec->shdr.sh_type != SHT_REL) {
6008 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
6009 die("Cannot read symbol table: %s\n",
6010 strerror(errno));
6011 }
6012 + base = 0;
6013 + for (j = 0; j < ehdr.e_phnum; j++) {
6014 + if (phdr[j].p_type != PT_LOAD )
6015 + continue;
6016 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6017 + continue;
6018 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6019 + break;
6020 + }
6021 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6022 Elf32_Rel *rel = &sec->reltab[j];
6023 - rel->r_offset = elf32_to_cpu(rel->r_offset);
6024 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6025 rel->r_info = elf32_to_cpu(rel->r_info);
6026 }
6027 }
6028 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
6029
6030 static void print_absolute_symbols(void)
6031 {
6032 - int i;
6033 + unsigned int i;
6034 printf("Absolute symbols\n");
6035 printf(" Num: Value Size Type Bind Visibility Name\n");
6036 for (i = 0; i < ehdr.e_shnum; i++) {
6037 struct section *sec = &secs[i];
6038 char *sym_strtab;
6039 Elf32_Sym *sh_symtab;
6040 - int j;
6041 + unsigned int j;
6042
6043 if (sec->shdr.sh_type != SHT_SYMTAB) {
6044 continue;
6045 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
6046
6047 static void print_absolute_relocs(void)
6048 {
6049 - int i, printed = 0;
6050 + unsigned int i, printed = 0;
6051
6052 for (i = 0; i < ehdr.e_shnum; i++) {
6053 struct section *sec = &secs[i];
6054 struct section *sec_applies, *sec_symtab;
6055 char *sym_strtab;
6056 Elf32_Sym *sh_symtab;
6057 - int j;
6058 + unsigned int j;
6059 if (sec->shdr.sh_type != SHT_REL) {
6060 continue;
6061 }
6062 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
6063
6064 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6065 {
6066 - int i;
6067 + unsigned int i;
6068 /* Walk through the relocations */
6069 for (i = 0; i < ehdr.e_shnum; i++) {
6070 char *sym_strtab;
6071 Elf32_Sym *sh_symtab;
6072 struct section *sec_applies, *sec_symtab;
6073 - int j;
6074 + unsigned int j;
6075 struct section *sec = &secs[i];
6076
6077 if (sec->shdr.sh_type != SHT_REL) {
6078 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6079 !is_rel_reloc(sym_name(sym_strtab, sym))) {
6080 continue;
6081 }
6082 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6083 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6084 + continue;
6085 +
6086 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6087 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6088 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6089 + continue;
6090 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6091 + continue;
6092 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6093 + continue;
6094 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6095 + continue;
6096 +#endif
6097 +
6098 switch (r_type) {
6099 case R_386_NONE:
6100 case R_386_PC32:
6101 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
6102
6103 static void emit_relocs(int as_text)
6104 {
6105 - int i;
6106 + unsigned int i;
6107 /* Count how many relocations I have and allocate space for them. */
6108 reloc_count = 0;
6109 walk_relocs(count_reloc);
6110 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
6111 fname, strerror(errno));
6112 }
6113 read_ehdr(fp);
6114 + read_phdrs(fp);
6115 read_shdrs(fp);
6116 read_strtabs(fp);
6117 read_symtabs(fp);
6118 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
6119 index 4d3ff03..e4972ff 100644
6120 --- a/arch/x86/boot/cpucheck.c
6121 +++ b/arch/x86/boot/cpucheck.c
6122 @@ -74,7 +74,7 @@ static int has_fpu(void)
6123 u16 fcw = -1, fsw = -1;
6124 u32 cr0;
6125
6126 - asm("movl %%cr0,%0" : "=r" (cr0));
6127 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6128 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6129 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6130 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6131 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6132 {
6133 u32 f0, f1;
6134
6135 - asm("pushfl ; "
6136 + asm volatile("pushfl ; "
6137 "pushfl ; "
6138 "popl %0 ; "
6139 "movl %0,%1 ; "
6140 @@ -115,7 +115,7 @@ static void get_flags(void)
6141 set_bit(X86_FEATURE_FPU, cpu.flags);
6142
6143 if (has_eflag(X86_EFLAGS_ID)) {
6144 - asm("cpuid"
6145 + asm volatile("cpuid"
6146 : "=a" (max_intel_level),
6147 "=b" (cpu_vendor[0]),
6148 "=d" (cpu_vendor[1]),
6149 @@ -124,7 +124,7 @@ static void get_flags(void)
6150
6151 if (max_intel_level >= 0x00000001 &&
6152 max_intel_level <= 0x0000ffff) {
6153 - asm("cpuid"
6154 + asm volatile("cpuid"
6155 : "=a" (tfms),
6156 "=c" (cpu.flags[4]),
6157 "=d" (cpu.flags[0])
6158 @@ -136,7 +136,7 @@ static void get_flags(void)
6159 cpu.model += ((tfms >> 16) & 0xf) << 4;
6160 }
6161
6162 - asm("cpuid"
6163 + asm volatile("cpuid"
6164 : "=a" (max_amd_level)
6165 : "a" (0x80000000)
6166 : "ebx", "ecx", "edx");
6167 @@ -144,7 +144,7 @@ static void get_flags(void)
6168 if (max_amd_level >= 0x80000001 &&
6169 max_amd_level <= 0x8000ffff) {
6170 u32 eax = 0x80000001;
6171 - asm("cpuid"
6172 + asm volatile("cpuid"
6173 : "+a" (eax),
6174 "=c" (cpu.flags[6]),
6175 "=d" (cpu.flags[1])
6176 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6177 u32 ecx = MSR_K7_HWCR;
6178 u32 eax, edx;
6179
6180 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6181 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6182 eax &= ~(1 << 15);
6183 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6184 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6185
6186 get_flags(); /* Make sure it really did something */
6187 err = check_flags();
6188 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6189 u32 ecx = MSR_VIA_FCR;
6190 u32 eax, edx;
6191
6192 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6193 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6194 eax |= (1<<1)|(1<<7);
6195 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6196 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6197
6198 set_bit(X86_FEATURE_CX8, cpu.flags);
6199 err = check_flags();
6200 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6201 u32 eax, edx;
6202 u32 level = 1;
6203
6204 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6205 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6206 - asm("cpuid"
6207 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6208 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6209 + asm volatile("cpuid"
6210 : "+a" (level), "=d" (cpu.flags[0])
6211 : : "ecx", "ebx");
6212 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6213 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6214
6215 err = check_flags();
6216 }
6217 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
6218 index 93e689f..504ba09 100644
6219 --- a/arch/x86/boot/header.S
6220 +++ b/arch/x86/boot/header.S
6221 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
6222 # single linked list of
6223 # struct setup_data
6224
6225 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6226 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6227
6228 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6229 #define VO_INIT_SIZE (VO__end - VO__text)
6230 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
6231 index db75d07..8e6d0af 100644
6232 --- a/arch/x86/boot/memory.c
6233 +++ b/arch/x86/boot/memory.c
6234 @@ -19,7 +19,7 @@
6235
6236 static int detect_memory_e820(void)
6237 {
6238 - int count = 0;
6239 + unsigned int count = 0;
6240 struct biosregs ireg, oreg;
6241 struct e820entry *desc = boot_params.e820_map;
6242 static struct e820entry buf; /* static so it is zeroed */
6243 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
6244 index 11e8c6e..fdbb1ed 100644
6245 --- a/arch/x86/boot/video-vesa.c
6246 +++ b/arch/x86/boot/video-vesa.c
6247 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6248
6249 boot_params.screen_info.vesapm_seg = oreg.es;
6250 boot_params.screen_info.vesapm_off = oreg.di;
6251 + boot_params.screen_info.vesapm_size = oreg.cx;
6252 }
6253
6254 /*
6255 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
6256 index 43eda28..5ab5fdb 100644
6257 --- a/arch/x86/boot/video.c
6258 +++ b/arch/x86/boot/video.c
6259 @@ -96,7 +96,7 @@ static void store_mode_params(void)
6260 static unsigned int get_entry(void)
6261 {
6262 char entry_buf[4];
6263 - int i, len = 0;
6264 + unsigned int i, len = 0;
6265 int key;
6266 unsigned int v;
6267
6268 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
6269 index 5b577d5..3c1fed4 100644
6270 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
6271 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
6272 @@ -8,6 +8,8 @@
6273 * including this sentence is retained in full.
6274 */
6275
6276 +#include <asm/alternative-asm.h>
6277 +
6278 .extern crypto_ft_tab
6279 .extern crypto_it_tab
6280 .extern crypto_fl_tab
6281 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
6282 je B192; \
6283 leaq 32(r9),r9;
6284
6285 +#define ret pax_force_retaddr 0, 1; ret
6286 +
6287 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
6288 movq r1,r2; \
6289 movq r3,r4; \
6290 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
6291 index be6d9e3..21fbbca 100644
6292 --- a/arch/x86/crypto/aesni-intel_asm.S
6293 +++ b/arch/x86/crypto/aesni-intel_asm.S
6294 @@ -31,6 +31,7 @@
6295
6296 #include <linux/linkage.h>
6297 #include <asm/inst.h>
6298 +#include <asm/alternative-asm.h>
6299
6300 #ifdef __x86_64__
6301 .data
6302 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
6303 pop %r14
6304 pop %r13
6305 pop %r12
6306 + pax_force_retaddr 0, 1
6307 ret
6308 +ENDPROC(aesni_gcm_dec)
6309
6310
6311 /*****************************************************************************
6312 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
6313 pop %r14
6314 pop %r13
6315 pop %r12
6316 + pax_force_retaddr 0, 1
6317 ret
6318 +ENDPROC(aesni_gcm_enc)
6319
6320 #endif
6321
6322 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
6323 pxor %xmm1, %xmm0
6324 movaps %xmm0, (TKEYP)
6325 add $0x10, TKEYP
6326 + pax_force_retaddr_bts
6327 ret
6328
6329 .align 4
6330 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
6331 shufps $0b01001110, %xmm2, %xmm1
6332 movaps %xmm1, 0x10(TKEYP)
6333 add $0x20, TKEYP
6334 + pax_force_retaddr_bts
6335 ret
6336
6337 .align 4
6338 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
6339
6340 movaps %xmm0, (TKEYP)
6341 add $0x10, TKEYP
6342 + pax_force_retaddr_bts
6343 ret
6344
6345 .align 4
6346 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
6347 pxor %xmm1, %xmm2
6348 movaps %xmm2, (TKEYP)
6349 add $0x10, TKEYP
6350 + pax_force_retaddr_bts
6351 ret
6352
6353 /*
6354 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
6355 #ifndef __x86_64__
6356 popl KEYP
6357 #endif
6358 + pax_force_retaddr 0, 1
6359 ret
6360 +ENDPROC(aesni_set_key)
6361
6362 /*
6363 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
6364 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
6365 popl KLEN
6366 popl KEYP
6367 #endif
6368 + pax_force_retaddr 0, 1
6369 ret
6370 +ENDPROC(aesni_enc)
6371
6372 /*
6373 * _aesni_enc1: internal ABI
6374 @@ -1959,6 +1972,7 @@ _aesni_enc1:
6375 AESENC KEY STATE
6376 movaps 0x70(TKEYP), KEY
6377 AESENCLAST KEY STATE
6378 + pax_force_retaddr_bts
6379 ret
6380
6381 /*
6382 @@ -2067,6 +2081,7 @@ _aesni_enc4:
6383 AESENCLAST KEY STATE2
6384 AESENCLAST KEY STATE3
6385 AESENCLAST KEY STATE4
6386 + pax_force_retaddr_bts
6387 ret
6388
6389 /*
6390 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
6391 popl KLEN
6392 popl KEYP
6393 #endif
6394 + pax_force_retaddr 0, 1
6395 ret
6396 +ENDPROC(aesni_dec)
6397
6398 /*
6399 * _aesni_dec1: internal ABI
6400 @@ -2146,6 +2163,7 @@ _aesni_dec1:
6401 AESDEC KEY STATE
6402 movaps 0x70(TKEYP), KEY
6403 AESDECLAST KEY STATE
6404 + pax_force_retaddr_bts
6405 ret
6406
6407 /*
6408 @@ -2254,6 +2272,7 @@ _aesni_dec4:
6409 AESDECLAST KEY STATE2
6410 AESDECLAST KEY STATE3
6411 AESDECLAST KEY STATE4
6412 + pax_force_retaddr_bts
6413 ret
6414
6415 /*
6416 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
6417 popl KEYP
6418 popl LEN
6419 #endif
6420 + pax_force_retaddr 0, 1
6421 ret
6422 +ENDPROC(aesni_ecb_enc)
6423
6424 /*
6425 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6426 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
6427 popl KEYP
6428 popl LEN
6429 #endif
6430 + pax_force_retaddr 0, 1
6431 ret
6432 +ENDPROC(aesni_ecb_dec)
6433
6434 /*
6435 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6436 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
6437 popl LEN
6438 popl IVP
6439 #endif
6440 + pax_force_retaddr 0, 1
6441 ret
6442 +ENDPROC(aesni_cbc_enc)
6443
6444 /*
6445 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6446 @@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
6447 popl LEN
6448 popl IVP
6449 #endif
6450 + pax_force_retaddr 0, 1
6451 ret
6452 +ENDPROC(aesni_cbc_dec)
6453
6454 #ifdef __x86_64__
6455 .align 16
6456 @@ -2524,6 +2551,7 @@ _aesni_inc_init:
6457 mov $1, TCTR_LOW
6458 MOVQ_R64_XMM TCTR_LOW INC
6459 MOVQ_R64_XMM CTR TCTR_LOW
6460 + pax_force_retaddr_bts
6461 ret
6462
6463 /*
6464 @@ -2552,6 +2580,7 @@ _aesni_inc:
6465 .Linc_low:
6466 movaps CTR, IV
6467 PSHUFB_XMM BSWAP_MASK IV
6468 + pax_force_retaddr_bts
6469 ret
6470
6471 /*
6472 @@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
6473 .Lctr_enc_ret:
6474 movups IV, (IVP)
6475 .Lctr_enc_just_ret:
6476 + pax_force_retaddr 0, 1
6477 ret
6478 +ENDPROC(aesni_ctr_enc)
6479 #endif
6480 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6481 index 6214a9b..1f4fc9a 100644
6482 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
6483 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6484 @@ -1,3 +1,5 @@
6485 +#include <asm/alternative-asm.h>
6486 +
6487 # enter ECRYPT_encrypt_bytes
6488 .text
6489 .p2align 5
6490 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
6491 add %r11,%rsp
6492 mov %rdi,%rax
6493 mov %rsi,%rdx
6494 + pax_force_retaddr 0, 1
6495 ret
6496 # bytesatleast65:
6497 ._bytesatleast65:
6498 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
6499 add %r11,%rsp
6500 mov %rdi,%rax
6501 mov %rsi,%rdx
6502 + pax_force_retaddr
6503 ret
6504 # enter ECRYPT_ivsetup
6505 .text
6506 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
6507 add %r11,%rsp
6508 mov %rdi,%rax
6509 mov %rsi,%rdx
6510 + pax_force_retaddr
6511 ret
6512 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
6513 index 573aa10..b73ad89 100644
6514 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
6515 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
6516 @@ -21,6 +21,7 @@
6517 .text
6518
6519 #include <asm/asm-offsets.h>
6520 +#include <asm/alternative-asm.h>
6521
6522 #define a_offset 0
6523 #define b_offset 4
6524 @@ -269,6 +270,7 @@ twofish_enc_blk:
6525
6526 popq R1
6527 movq $1,%rax
6528 + pax_force_retaddr 0, 1
6529 ret
6530
6531 twofish_dec_blk:
6532 @@ -321,4 +323,5 @@ twofish_dec_blk:
6533
6534 popq R1
6535 movq $1,%rax
6536 + pax_force_retaddr 0, 1
6537 ret
6538 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
6539 index fd84387..0b4af7d 100644
6540 --- a/arch/x86/ia32/ia32_aout.c
6541 +++ b/arch/x86/ia32/ia32_aout.c
6542 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
6543 unsigned long dump_start, dump_size;
6544 struct user32 dump;
6545
6546 + memset(&dump, 0, sizeof(dump));
6547 +
6548 fs = get_fs();
6549 set_fs(KERNEL_DS);
6550 has_dumped = 1;
6551 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
6552 index 6557769..ef6ae89 100644
6553 --- a/arch/x86/ia32/ia32_signal.c
6554 +++ b/arch/x86/ia32/ia32_signal.c
6555 @@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
6556 }
6557 seg = get_fs();
6558 set_fs(KERNEL_DS);
6559 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
6560 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
6561 set_fs(seg);
6562 if (ret >= 0 && uoss_ptr) {
6563 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
6564 @@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
6565 */
6566 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6567 size_t frame_size,
6568 - void **fpstate)
6569 + void __user **fpstate)
6570 {
6571 unsigned long sp;
6572
6573 @@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6574
6575 if (used_math()) {
6576 sp = sp - sig_xstate_ia32_size;
6577 - *fpstate = (struct _fpstate_ia32 *) sp;
6578 + *fpstate = (struct _fpstate_ia32 __user *) sp;
6579 if (save_i387_xstate_ia32(*fpstate) < 0)
6580 return (void __user *) -1L;
6581 }
6582 @@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6583 sp -= frame_size;
6584 /* Align the stack pointer according to the i386 ABI,
6585 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6586 - sp = ((sp + 4) & -16ul) - 4;
6587 + sp = ((sp - 12) & -16ul) - 4;
6588 return (void __user *) sp;
6589 }
6590
6591 @@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
6592 * These are actually not used anymore, but left because some
6593 * gdb versions depend on them as a marker.
6594 */
6595 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6596 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6597 } put_user_catch(err);
6598
6599 if (err)
6600 @@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6601 0xb8,
6602 __NR_ia32_rt_sigreturn,
6603 0x80cd,
6604 - 0,
6605 + 0
6606 };
6607
6608 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6609 @@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6610
6611 if (ka->sa.sa_flags & SA_RESTORER)
6612 restorer = ka->sa.sa_restorer;
6613 + else if (current->mm->context.vdso)
6614 + /* Return stub is in 32bit vsyscall page */
6615 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6616 else
6617 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6618 - rt_sigreturn);
6619 + restorer = &frame->retcode;
6620 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6621
6622 /*
6623 * Not actually used anymore, but left because some gdb
6624 * versions need it.
6625 */
6626 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6627 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6628 } put_user_catch(err);
6629
6630 if (err)
6631 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
6632 index 54edb207..9335b5f 100644
6633 --- a/arch/x86/ia32/ia32entry.S
6634 +++ b/arch/x86/ia32/ia32entry.S
6635 @@ -13,7 +13,9 @@
6636 #include <asm/thread_info.h>
6637 #include <asm/segment.h>
6638 #include <asm/irqflags.h>
6639 +#include <asm/pgtable.h>
6640 #include <linux/linkage.h>
6641 +#include <asm/alternative-asm.h>
6642
6643 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6644 #include <linux/elf-em.h>
6645 @@ -95,6 +97,30 @@ ENTRY(native_irq_enable_sysexit)
6646 ENDPROC(native_irq_enable_sysexit)
6647 #endif
6648
6649 + .macro pax_enter_kernel_user
6650 + pax_set_fptr_mask
6651 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6652 + call pax_enter_kernel_user
6653 +#endif
6654 + .endm
6655 +
6656 + .macro pax_exit_kernel_user
6657 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6658 + call pax_exit_kernel_user
6659 +#endif
6660 +#ifdef CONFIG_PAX_RANDKSTACK
6661 + pushq %rax
6662 + call pax_randomize_kstack
6663 + popq %rax
6664 +#endif
6665 + .endm
6666 +
6667 +.macro pax_erase_kstack
6668 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6669 + call pax_erase_kstack
6670 +#endif
6671 +.endm
6672 +
6673 /*
6674 * 32bit SYSENTER instruction entry.
6675 *
6676 @@ -121,12 +147,6 @@ ENTRY(ia32_sysenter_target)
6677 CFI_REGISTER rsp,rbp
6678 SWAPGS_UNSAFE_STACK
6679 movq PER_CPU_VAR(kernel_stack), %rsp
6680 - addq $(KERNEL_STACK_OFFSET),%rsp
6681 - /*
6682 - * No need to follow this irqs on/off section: the syscall
6683 - * disabled irqs, here we enable it straight after entry:
6684 - */
6685 - ENABLE_INTERRUPTS(CLBR_NONE)
6686 movl %ebp,%ebp /* zero extension */
6687 pushq_cfi $__USER32_DS
6688 /*CFI_REL_OFFSET ss,0*/
6689 @@ -134,25 +154,38 @@ ENTRY(ia32_sysenter_target)
6690 CFI_REL_OFFSET rsp,0
6691 pushfq_cfi
6692 /*CFI_REL_OFFSET rflags,0*/
6693 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6694 - CFI_REGISTER rip,r10
6695 + GET_THREAD_INFO(%r11)
6696 + movl TI_sysenter_return(%r11), %r11d
6697 + CFI_REGISTER rip,r11
6698 pushq_cfi $__USER32_CS
6699 /*CFI_REL_OFFSET cs,0*/
6700 movl %eax, %eax
6701 - pushq_cfi %r10
6702 + pushq_cfi %r11
6703 CFI_REL_OFFSET rip,0
6704 pushq_cfi %rax
6705 cld
6706 SAVE_ARGS 0,1,0
6707 + pax_enter_kernel_user
6708 + /*
6709 + * No need to follow this irqs on/off section: the syscall
6710 + * disabled irqs, here we enable it straight after entry:
6711 + */
6712 + ENABLE_INTERRUPTS(CLBR_NONE)
6713 /* no need to do an access_ok check here because rbp has been
6714 32bit zero extended */
6715 +
6716 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6717 + mov $PAX_USER_SHADOW_BASE,%r11
6718 + add %r11,%rbp
6719 +#endif
6720 +
6721 1: movl (%rbp),%ebp
6722 .section __ex_table,"a"
6723 .quad 1b,ia32_badarg
6724 .previous
6725 - GET_THREAD_INFO(%r10)
6726 - orl $TS_COMPAT,TI_status(%r10)
6727 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6728 + GET_THREAD_INFO(%r11)
6729 + orl $TS_COMPAT,TI_status(%r11)
6730 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6731 CFI_REMEMBER_STATE
6732 jnz sysenter_tracesys
6733 cmpq $(IA32_NR_syscalls-1),%rax
6734 @@ -162,13 +195,15 @@ sysenter_do_call:
6735 sysenter_dispatch:
6736 call *ia32_sys_call_table(,%rax,8)
6737 movq %rax,RAX-ARGOFFSET(%rsp)
6738 - GET_THREAD_INFO(%r10)
6739 + GET_THREAD_INFO(%r11)
6740 DISABLE_INTERRUPTS(CLBR_NONE)
6741 TRACE_IRQS_OFF
6742 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6743 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6744 jnz sysexit_audit
6745 sysexit_from_sys_call:
6746 - andl $~TS_COMPAT,TI_status(%r10)
6747 + pax_exit_kernel_user
6748 + pax_erase_kstack
6749 + andl $~TS_COMPAT,TI_status(%r11)
6750 /* clear IF, that popfq doesn't enable interrupts early */
6751 andl $~0x200,EFLAGS-R11(%rsp)
6752 movl RIP-R11(%rsp),%edx /* User %eip */
6753 @@ -194,6 +229,9 @@ sysexit_from_sys_call:
6754 movl %eax,%esi /* 2nd arg: syscall number */
6755 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6756 call audit_syscall_entry
6757 +
6758 + pax_erase_kstack
6759 +
6760 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6761 cmpq $(IA32_NR_syscalls-1),%rax
6762 ja ia32_badsys
6763 @@ -205,7 +243,7 @@ sysexit_from_sys_call:
6764 .endm
6765
6766 .macro auditsys_exit exit
6767 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6768 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6769 jnz ia32_ret_from_sys_call
6770 TRACE_IRQS_ON
6771 sti
6772 @@ -215,12 +253,12 @@ sysexit_from_sys_call:
6773 movzbl %al,%edi /* zero-extend that into %edi */
6774 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
6775 call audit_syscall_exit
6776 - GET_THREAD_INFO(%r10)
6777 + GET_THREAD_INFO(%r11)
6778 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
6779 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
6780 cli
6781 TRACE_IRQS_OFF
6782 - testl %edi,TI_flags(%r10)
6783 + testl %edi,TI_flags(%r11)
6784 jz \exit
6785 CLEAR_RREGS -ARGOFFSET
6786 jmp int_with_check
6787 @@ -238,7 +276,7 @@ sysexit_audit:
6788
6789 sysenter_tracesys:
6790 #ifdef CONFIG_AUDITSYSCALL
6791 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6792 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6793 jz sysenter_auditsys
6794 #endif
6795 SAVE_REST
6796 @@ -246,6 +284,9 @@ sysenter_tracesys:
6797 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6798 movq %rsp,%rdi /* &pt_regs -> arg1 */
6799 call syscall_trace_enter
6800 +
6801 + pax_erase_kstack
6802 +
6803 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6804 RESTORE_REST
6805 cmpq $(IA32_NR_syscalls-1),%rax
6806 @@ -277,19 +318,20 @@ ENDPROC(ia32_sysenter_target)
6807 ENTRY(ia32_cstar_target)
6808 CFI_STARTPROC32 simple
6809 CFI_SIGNAL_FRAME
6810 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6811 + CFI_DEF_CFA rsp,0
6812 CFI_REGISTER rip,rcx
6813 /*CFI_REGISTER rflags,r11*/
6814 SWAPGS_UNSAFE_STACK
6815 movl %esp,%r8d
6816 CFI_REGISTER rsp,r8
6817 movq PER_CPU_VAR(kernel_stack),%rsp
6818 + SAVE_ARGS 8*6,0,0
6819 + pax_enter_kernel_user
6820 /*
6821 * No need to follow this irqs on/off section: the syscall
6822 * disabled irqs and here we enable it straight after entry:
6823 */
6824 ENABLE_INTERRUPTS(CLBR_NONE)
6825 - SAVE_ARGS 8,0,0
6826 movl %eax,%eax /* zero extension */
6827 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6828 movq %rcx,RIP-ARGOFFSET(%rsp)
6829 @@ -305,13 +347,19 @@ ENTRY(ia32_cstar_target)
6830 /* no need to do an access_ok check here because r8 has been
6831 32bit zero extended */
6832 /* hardware stack frame is complete now */
6833 +
6834 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6835 + mov $PAX_USER_SHADOW_BASE,%r11
6836 + add %r11,%r8
6837 +#endif
6838 +
6839 1: movl (%r8),%r9d
6840 .section __ex_table,"a"
6841 .quad 1b,ia32_badarg
6842 .previous
6843 - GET_THREAD_INFO(%r10)
6844 - orl $TS_COMPAT,TI_status(%r10)
6845 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6846 + GET_THREAD_INFO(%r11)
6847 + orl $TS_COMPAT,TI_status(%r11)
6848 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6849 CFI_REMEMBER_STATE
6850 jnz cstar_tracesys
6851 cmpq $IA32_NR_syscalls-1,%rax
6852 @@ -321,13 +369,15 @@ cstar_do_call:
6853 cstar_dispatch:
6854 call *ia32_sys_call_table(,%rax,8)
6855 movq %rax,RAX-ARGOFFSET(%rsp)
6856 - GET_THREAD_INFO(%r10)
6857 + GET_THREAD_INFO(%r11)
6858 DISABLE_INTERRUPTS(CLBR_NONE)
6859 TRACE_IRQS_OFF
6860 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6861 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6862 jnz sysretl_audit
6863 sysretl_from_sys_call:
6864 - andl $~TS_COMPAT,TI_status(%r10)
6865 + pax_exit_kernel_user
6866 + pax_erase_kstack
6867 + andl $~TS_COMPAT,TI_status(%r11)
6868 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
6869 movl RIP-ARGOFFSET(%rsp),%ecx
6870 CFI_REGISTER rip,rcx
6871 @@ -355,7 +405,7 @@ sysretl_audit:
6872
6873 cstar_tracesys:
6874 #ifdef CONFIG_AUDITSYSCALL
6875 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6876 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6877 jz cstar_auditsys
6878 #endif
6879 xchgl %r9d,%ebp
6880 @@ -364,6 +414,9 @@ cstar_tracesys:
6881 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6882 movq %rsp,%rdi /* &pt_regs -> arg1 */
6883 call syscall_trace_enter
6884 +
6885 + pax_erase_kstack
6886 +
6887 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6888 RESTORE_REST
6889 xchgl %ebp,%r9d
6890 @@ -409,20 +462,21 @@ ENTRY(ia32_syscall)
6891 CFI_REL_OFFSET rip,RIP-RIP
6892 PARAVIRT_ADJUST_EXCEPTION_FRAME
6893 SWAPGS
6894 - /*
6895 - * No need to follow this irqs on/off section: the syscall
6896 - * disabled irqs and here we enable it straight after entry:
6897 - */
6898 - ENABLE_INTERRUPTS(CLBR_NONE)
6899 movl %eax,%eax
6900 pushq_cfi %rax
6901 cld
6902 /* note the registers are not zero extended to the sf.
6903 this could be a problem. */
6904 SAVE_ARGS 0,1,0
6905 - GET_THREAD_INFO(%r10)
6906 - orl $TS_COMPAT,TI_status(%r10)
6907 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6908 + pax_enter_kernel_user
6909 + /*
6910 + * No need to follow this irqs on/off section: the syscall
6911 + * disabled irqs and here we enable it straight after entry:
6912 + */
6913 + ENABLE_INTERRUPTS(CLBR_NONE)
6914 + GET_THREAD_INFO(%r11)
6915 + orl $TS_COMPAT,TI_status(%r11)
6916 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6917 jnz ia32_tracesys
6918 cmpq $(IA32_NR_syscalls-1),%rax
6919 ja ia32_badsys
6920 @@ -441,6 +495,9 @@ ia32_tracesys:
6921 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6922 movq %rsp,%rdi /* &pt_regs -> arg1 */
6923 call syscall_trace_enter
6924 +
6925 + pax_erase_kstack
6926 +
6927 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6928 RESTORE_REST
6929 cmpq $(IA32_NR_syscalls-1),%rax
6930 @@ -455,6 +512,7 @@ ia32_badsys:
6931
6932 quiet_ni_syscall:
6933 movq $-ENOSYS,%rax
6934 + pax_force_retaddr
6935 ret
6936 CFI_ENDPROC
6937
6938 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
6939 index f6f5c53..b358b28 100644
6940 --- a/arch/x86/ia32/sys_ia32.c
6941 +++ b/arch/x86/ia32/sys_ia32.c
6942 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
6943 */
6944 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
6945 {
6946 - typeof(ubuf->st_uid) uid = 0;
6947 - typeof(ubuf->st_gid) gid = 0;
6948 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
6949 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
6950 SET_UID(uid, stat->uid);
6951 SET_GID(gid, stat->gid);
6952 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
6953 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
6954 }
6955 set_fs(KERNEL_DS);
6956 ret = sys_rt_sigprocmask(how,
6957 - set ? (sigset_t __user *)&s : NULL,
6958 - oset ? (sigset_t __user *)&s : NULL,
6959 + set ? (sigset_t __force_user *)&s : NULL,
6960 + oset ? (sigset_t __force_user *)&s : NULL,
6961 sigsetsize);
6962 set_fs(old_fs);
6963 if (ret)
6964 @@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
6965 return alarm_setitimer(seconds);
6966 }
6967
6968 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
6969 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
6970 int options)
6971 {
6972 return compat_sys_wait4(pid, stat_addr, options, NULL);
6973 @@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
6974 mm_segment_t old_fs = get_fs();
6975
6976 set_fs(KERNEL_DS);
6977 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
6978 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
6979 set_fs(old_fs);
6980 if (put_compat_timespec(&t, interval))
6981 return -EFAULT;
6982 @@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
6983 mm_segment_t old_fs = get_fs();
6984
6985 set_fs(KERNEL_DS);
6986 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
6987 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
6988 set_fs(old_fs);
6989 if (!ret) {
6990 switch (_NSIG_WORDS) {
6991 @@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
6992 if (copy_siginfo_from_user32(&info, uinfo))
6993 return -EFAULT;
6994 set_fs(KERNEL_DS);
6995 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
6996 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
6997 set_fs(old_fs);
6998 return ret;
6999 }
7000 @@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
7001 return -EFAULT;
7002
7003 set_fs(KERNEL_DS);
7004 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
7005 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
7006 count);
7007 set_fs(old_fs);
7008
7009 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
7010 index 091508b..0ee32ec 100644
7011 --- a/arch/x86/include/asm/alternative-asm.h
7012 +++ b/arch/x86/include/asm/alternative-asm.h
7013 @@ -15,6 +15,45 @@
7014 .endm
7015 #endif
7016
7017 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
7018 + .macro pax_force_retaddr_bts rip=0
7019 + btsq $63,\rip(%rsp)
7020 + .endm
7021 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
7022 + .macro pax_force_retaddr rip=0, reload=0
7023 + btsq $63,\rip(%rsp)
7024 + .endm
7025 + .macro pax_force_fptr ptr
7026 + btsq $63,\ptr
7027 + .endm
7028 + .macro pax_set_fptr_mask
7029 + .endm
7030 +#endif
7031 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
7032 + .macro pax_force_retaddr rip=0, reload=0
7033 + .if \reload
7034 + pax_set_fptr_mask
7035 + .endif
7036 + orq %r10,\rip(%rsp)
7037 + .endm
7038 + .macro pax_force_fptr ptr
7039 + orq %r10,\ptr
7040 + .endm
7041 + .macro pax_set_fptr_mask
7042 + movabs $0x8000000000000000,%r10
7043 + .endm
7044 +#endif
7045 +#else
7046 + .macro pax_force_retaddr rip=0, reload=0
7047 + .endm
7048 + .macro pax_force_fptr ptr
7049 + .endm
7050 + .macro pax_force_retaddr_bts rip=0
7051 + .endm
7052 + .macro pax_set_fptr_mask
7053 + .endm
7054 +#endif
7055 +
7056 .macro altinstruction_entry orig alt feature orig_len alt_len
7057 .long \orig - .
7058 .long \alt - .
7059 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
7060 index 37ad100..7d47faa 100644
7061 --- a/arch/x86/include/asm/alternative.h
7062 +++ b/arch/x86/include/asm/alternative.h
7063 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
7064 ".section .discard,\"aw\",@progbits\n" \
7065 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
7066 ".previous\n" \
7067 - ".section .altinstr_replacement, \"ax\"\n" \
7068 + ".section .altinstr_replacement, \"a\"\n" \
7069 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
7070 ".previous"
7071
7072 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
7073 index 9b7273c..e9fcc24 100644
7074 --- a/arch/x86/include/asm/apic.h
7075 +++ b/arch/x86/include/asm/apic.h
7076 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
7077
7078 #ifdef CONFIG_X86_LOCAL_APIC
7079
7080 -extern unsigned int apic_verbosity;
7081 +extern int apic_verbosity;
7082 extern int local_apic_timer_c2_ok;
7083
7084 extern int disable_apic;
7085 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
7086 index 20370c6..a2eb9b0 100644
7087 --- a/arch/x86/include/asm/apm.h
7088 +++ b/arch/x86/include/asm/apm.h
7089 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
7090 __asm__ __volatile__(APM_DO_ZERO_SEGS
7091 "pushl %%edi\n\t"
7092 "pushl %%ebp\n\t"
7093 - "lcall *%%cs:apm_bios_entry\n\t"
7094 + "lcall *%%ss:apm_bios_entry\n\t"
7095 "setc %%al\n\t"
7096 "popl %%ebp\n\t"
7097 "popl %%edi\n\t"
7098 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
7099 __asm__ __volatile__(APM_DO_ZERO_SEGS
7100 "pushl %%edi\n\t"
7101 "pushl %%ebp\n\t"
7102 - "lcall *%%cs:apm_bios_entry\n\t"
7103 + "lcall *%%ss:apm_bios_entry\n\t"
7104 "setc %%bl\n\t"
7105 "popl %%ebp\n\t"
7106 "popl %%edi\n\t"
7107 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
7108 index 10572e3..2618d91 100644
7109 --- a/arch/x86/include/asm/atomic.h
7110 +++ b/arch/x86/include/asm/atomic.h
7111 @@ -22,7 +22,18 @@
7112 */
7113 static inline int atomic_read(const atomic_t *v)
7114 {
7115 - return (*(volatile int *)&(v)->counter);
7116 + return (*(volatile const int *)&(v)->counter);
7117 +}
7118 +
7119 +/**
7120 + * atomic_read_unchecked - read atomic variable
7121 + * @v: pointer of type atomic_unchecked_t
7122 + *
7123 + * Atomically reads the value of @v.
7124 + */
7125 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7126 +{
7127 + return (*(volatile const int *)&(v)->counter);
7128 }
7129
7130 /**
7131 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
7132 }
7133
7134 /**
7135 + * atomic_set_unchecked - set atomic variable
7136 + * @v: pointer of type atomic_unchecked_t
7137 + * @i: required value
7138 + *
7139 + * Atomically sets the value of @v to @i.
7140 + */
7141 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7142 +{
7143 + v->counter = i;
7144 +}
7145 +
7146 +/**
7147 * atomic_add - add integer to atomic variable
7148 * @i: integer value to add
7149 * @v: pointer of type atomic_t
7150 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
7151 */
7152 static inline void atomic_add(int i, atomic_t *v)
7153 {
7154 - asm volatile(LOCK_PREFIX "addl %1,%0"
7155 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7156 +
7157 +#ifdef CONFIG_PAX_REFCOUNT
7158 + "jno 0f\n"
7159 + LOCK_PREFIX "subl %1,%0\n"
7160 + "int $4\n0:\n"
7161 + _ASM_EXTABLE(0b, 0b)
7162 +#endif
7163 +
7164 + : "+m" (v->counter)
7165 + : "ir" (i));
7166 +}
7167 +
7168 +/**
7169 + * atomic_add_unchecked - add integer to atomic variable
7170 + * @i: integer value to add
7171 + * @v: pointer of type atomic_unchecked_t
7172 + *
7173 + * Atomically adds @i to @v.
7174 + */
7175 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7176 +{
7177 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7178 : "+m" (v->counter)
7179 : "ir" (i));
7180 }
7181 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
7182 */
7183 static inline void atomic_sub(int i, atomic_t *v)
7184 {
7185 - asm volatile(LOCK_PREFIX "subl %1,%0"
7186 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7187 +
7188 +#ifdef CONFIG_PAX_REFCOUNT
7189 + "jno 0f\n"
7190 + LOCK_PREFIX "addl %1,%0\n"
7191 + "int $4\n0:\n"
7192 + _ASM_EXTABLE(0b, 0b)
7193 +#endif
7194 +
7195 + : "+m" (v->counter)
7196 + : "ir" (i));
7197 +}
7198 +
7199 +/**
7200 + * atomic_sub_unchecked - subtract integer from atomic variable
7201 + * @i: integer value to subtract
7202 + * @v: pointer of type atomic_unchecked_t
7203 + *
7204 + * Atomically subtracts @i from @v.
7205 + */
7206 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7207 +{
7208 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7209 : "+m" (v->counter)
7210 : "ir" (i));
7211 }
7212 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7213 {
7214 unsigned char c;
7215
7216 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7217 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7218 +
7219 +#ifdef CONFIG_PAX_REFCOUNT
7220 + "jno 0f\n"
7221 + LOCK_PREFIX "addl %2,%0\n"
7222 + "int $4\n0:\n"
7223 + _ASM_EXTABLE(0b, 0b)
7224 +#endif
7225 +
7226 + "sete %1\n"
7227 : "+m" (v->counter), "=qm" (c)
7228 : "ir" (i) : "memory");
7229 return c;
7230 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7231 */
7232 static inline void atomic_inc(atomic_t *v)
7233 {
7234 - asm volatile(LOCK_PREFIX "incl %0"
7235 + asm volatile(LOCK_PREFIX "incl %0\n"
7236 +
7237 +#ifdef CONFIG_PAX_REFCOUNT
7238 + "jno 0f\n"
7239 + LOCK_PREFIX "decl %0\n"
7240 + "int $4\n0:\n"
7241 + _ASM_EXTABLE(0b, 0b)
7242 +#endif
7243 +
7244 + : "+m" (v->counter));
7245 +}
7246 +
7247 +/**
7248 + * atomic_inc_unchecked - increment atomic variable
7249 + * @v: pointer of type atomic_unchecked_t
7250 + *
7251 + * Atomically increments @v by 1.
7252 + */
7253 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7254 +{
7255 + asm volatile(LOCK_PREFIX "incl %0\n"
7256 : "+m" (v->counter));
7257 }
7258
7259 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
7260 */
7261 static inline void atomic_dec(atomic_t *v)
7262 {
7263 - asm volatile(LOCK_PREFIX "decl %0"
7264 + asm volatile(LOCK_PREFIX "decl %0\n"
7265 +
7266 +#ifdef CONFIG_PAX_REFCOUNT
7267 + "jno 0f\n"
7268 + LOCK_PREFIX "incl %0\n"
7269 + "int $4\n0:\n"
7270 + _ASM_EXTABLE(0b, 0b)
7271 +#endif
7272 +
7273 + : "+m" (v->counter));
7274 +}
7275 +
7276 +/**
7277 + * atomic_dec_unchecked - decrement atomic variable
7278 + * @v: pointer of type atomic_unchecked_t
7279 + *
7280 + * Atomically decrements @v by 1.
7281 + */
7282 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7283 +{
7284 + asm volatile(LOCK_PREFIX "decl %0\n"
7285 : "+m" (v->counter));
7286 }
7287
7288 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
7289 {
7290 unsigned char c;
7291
7292 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7293 + asm volatile(LOCK_PREFIX "decl %0\n"
7294 +
7295 +#ifdef CONFIG_PAX_REFCOUNT
7296 + "jno 0f\n"
7297 + LOCK_PREFIX "incl %0\n"
7298 + "int $4\n0:\n"
7299 + _ASM_EXTABLE(0b, 0b)
7300 +#endif
7301 +
7302 + "sete %1\n"
7303 : "+m" (v->counter), "=qm" (c)
7304 : : "memory");
7305 return c != 0;
7306 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
7307 {
7308 unsigned char c;
7309
7310 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7311 + asm volatile(LOCK_PREFIX "incl %0\n"
7312 +
7313 +#ifdef CONFIG_PAX_REFCOUNT
7314 + "jno 0f\n"
7315 + LOCK_PREFIX "decl %0\n"
7316 + "int $4\n0:\n"
7317 + _ASM_EXTABLE(0b, 0b)
7318 +#endif
7319 +
7320 + "sete %1\n"
7321 + : "+m" (v->counter), "=qm" (c)
7322 + : : "memory");
7323 + return c != 0;
7324 +}
7325 +
7326 +/**
7327 + * atomic_inc_and_test_unchecked - increment and test
7328 + * @v: pointer of type atomic_unchecked_t
7329 + *
7330 + * Atomically increments @v by 1
7331 + * and returns true if the result is zero, or false for all
7332 + * other cases.
7333 + */
7334 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7335 +{
7336 + unsigned char c;
7337 +
7338 + asm volatile(LOCK_PREFIX "incl %0\n"
7339 + "sete %1\n"
7340 : "+m" (v->counter), "=qm" (c)
7341 : : "memory");
7342 return c != 0;
7343 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
7344 {
7345 unsigned char c;
7346
7347 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7348 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7349 +
7350 +#ifdef CONFIG_PAX_REFCOUNT
7351 + "jno 0f\n"
7352 + LOCK_PREFIX "subl %2,%0\n"
7353 + "int $4\n0:\n"
7354 + _ASM_EXTABLE(0b, 0b)
7355 +#endif
7356 +
7357 + "sets %1\n"
7358 : "+m" (v->counter), "=qm" (c)
7359 : "ir" (i) : "memory");
7360 return c;
7361 @@ -180,6 +342,46 @@ static inline int atomic_add_return(int i, atomic_t *v)
7362 #endif
7363 /* Modern 486+ processor */
7364 __i = i;
7365 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7366 +
7367 +#ifdef CONFIG_PAX_REFCOUNT
7368 + "jno 0f\n"
7369 + "movl %0, %1\n"
7370 + "int $4\n0:\n"
7371 + _ASM_EXTABLE(0b, 0b)
7372 +#endif
7373 +
7374 + : "+r" (i), "+m" (v->counter)
7375 + : : "memory");
7376 + return i + __i;
7377 +
7378 +#ifdef CONFIG_M386
7379 +no_xadd: /* Legacy 386 processor */
7380 + local_irq_save(flags);
7381 + __i = atomic_read(v);
7382 + atomic_set(v, i + __i);
7383 + local_irq_restore(flags);
7384 + return i + __i;
7385 +#endif
7386 +}
7387 +
7388 +/**
7389 + * atomic_add_return_unchecked - add integer and return
7390 + * @v: pointer of type atomic_unchecked_t
7391 + * @i: integer value to add
7392 + *
7393 + * Atomically adds @i to @v and returns @i + @v
7394 + */
7395 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7396 +{
7397 + int __i;
7398 +#ifdef CONFIG_M386
7399 + unsigned long flags;
7400 + if (unlikely(boot_cpu_data.x86 <= 3))
7401 + goto no_xadd;
7402 +#endif
7403 + /* Modern 486+ processor */
7404 + __i = i;
7405 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7406 : "+r" (i), "+m" (v->counter)
7407 : : "memory");
7408 @@ -208,6 +410,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
7409 }
7410
7411 #define atomic_inc_return(v) (atomic_add_return(1, v))
7412 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7413 +{
7414 + return atomic_add_return_unchecked(1, v);
7415 +}
7416 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7417
7418 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7419 @@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7420 return cmpxchg(&v->counter, old, new);
7421 }
7422
7423 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7424 +{
7425 + return cmpxchg(&v->counter, old, new);
7426 +}
7427 +
7428 static inline int atomic_xchg(atomic_t *v, int new)
7429 {
7430 return xchg(&v->counter, new);
7431 }
7432
7433 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7434 +{
7435 + return xchg(&v->counter, new);
7436 +}
7437 +
7438 /**
7439 * __atomic_add_unless - add unless the number is already a given value
7440 * @v: pointer of type atomic_t
7441 @@ -231,12 +447,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
7442 */
7443 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7444 {
7445 - int c, old;
7446 + int c, old, new;
7447 c = atomic_read(v);
7448 for (;;) {
7449 - if (unlikely(c == (u)))
7450 + if (unlikely(c == u))
7451 break;
7452 - old = atomic_cmpxchg((v), c, c + (a));
7453 +
7454 + asm volatile("addl %2,%0\n"
7455 +
7456 +#ifdef CONFIG_PAX_REFCOUNT
7457 + "jno 0f\n"
7458 + "subl %2,%0\n"
7459 + "int $4\n0:\n"
7460 + _ASM_EXTABLE(0b, 0b)
7461 +#endif
7462 +
7463 + : "=r" (new)
7464 + : "0" (c), "ir" (a));
7465 +
7466 + old = atomic_cmpxchg(v, c, new);
7467 if (likely(old == c))
7468 break;
7469 c = old;
7470 @@ -244,6 +473,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7471 return c;
7472 }
7473
7474 +/**
7475 + * atomic_inc_not_zero_hint - increment if not null
7476 + * @v: pointer of type atomic_t
7477 + * @hint: probable value of the atomic before the increment
7478 + *
7479 + * This version of atomic_inc_not_zero() gives a hint of probable
7480 + * value of the atomic. This helps processor to not read the memory
7481 + * before doing the atomic read/modify/write cycle, lowering
7482 + * number of bus transactions on some arches.
7483 + *
7484 + * Returns: 0 if increment was not done, 1 otherwise.
7485 + */
7486 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
7487 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
7488 +{
7489 + int val, c = hint, new;
7490 +
7491 + /* sanity test, should be removed by compiler if hint is a constant */
7492 + if (!hint)
7493 + return __atomic_add_unless(v, 1, 0);
7494 +
7495 + do {
7496 + asm volatile("incl %0\n"
7497 +
7498 +#ifdef CONFIG_PAX_REFCOUNT
7499 + "jno 0f\n"
7500 + "decl %0\n"
7501 + "int $4\n0:\n"
7502 + _ASM_EXTABLE(0b, 0b)
7503 +#endif
7504 +
7505 + : "=r" (new)
7506 + : "0" (c));
7507 +
7508 + val = atomic_cmpxchg(v, c, new);
7509 + if (val == c)
7510 + return 1;
7511 + c = val;
7512 + } while (c);
7513 +
7514 + return 0;
7515 +}
7516
7517 /*
7518 * atomic_dec_if_positive - decrement by 1 if old value positive
7519 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
7520 index 24098aa..1e37723 100644
7521 --- a/arch/x86/include/asm/atomic64_32.h
7522 +++ b/arch/x86/include/asm/atomic64_32.h
7523 @@ -12,6 +12,14 @@ typedef struct {
7524 u64 __aligned(8) counter;
7525 } atomic64_t;
7526
7527 +#ifdef CONFIG_PAX_REFCOUNT
7528 +typedef struct {
7529 + u64 __aligned(8) counter;
7530 +} atomic64_unchecked_t;
7531 +#else
7532 +typedef atomic64_t atomic64_unchecked_t;
7533 +#endif
7534 +
7535 #define ATOMIC64_INIT(val) { (val) }
7536
7537 #ifdef CONFIG_X86_CMPXCHG64
7538 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
7539 }
7540
7541 /**
7542 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
7543 + * @p: pointer to type atomic64_unchecked_t
7544 + * @o: expected value
7545 + * @n: new value
7546 + *
7547 + * Atomically sets @v to @n if it was equal to @o and returns
7548 + * the old value.
7549 + */
7550 +
7551 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
7552 +{
7553 + return cmpxchg64(&v->counter, o, n);
7554 +}
7555 +
7556 +/**
7557 * atomic64_xchg - xchg atomic64 variable
7558 * @v: pointer to type atomic64_t
7559 * @n: value to assign
7560 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
7561 }
7562
7563 /**
7564 + * atomic64_set_unchecked - set atomic64 variable
7565 + * @v: pointer to type atomic64_unchecked_t
7566 + * @n: value to assign
7567 + *
7568 + * Atomically sets the value of @v to @n.
7569 + */
7570 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
7571 +{
7572 + unsigned high = (unsigned)(i >> 32);
7573 + unsigned low = (unsigned)i;
7574 + asm volatile(ATOMIC64_ALTERNATIVE(set)
7575 + : "+b" (low), "+c" (high)
7576 + : "S" (v)
7577 + : "eax", "edx", "memory"
7578 + );
7579 +}
7580 +
7581 +/**
7582 * atomic64_read - read atomic64 variable
7583 * @v: pointer to type atomic64_t
7584 *
7585 @@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
7586 }
7587
7588 /**
7589 + * atomic64_read_unchecked - read atomic64 variable
7590 + * @v: pointer to type atomic64_unchecked_t
7591 + *
7592 + * Atomically reads the value of @v and returns it.
7593 + */
7594 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
7595 +{
7596 + long long r;
7597 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
7598 + : "=A" (r), "+c" (v)
7599 + : : "memory"
7600 + );
7601 + return r;
7602 + }
7603 +
7604 +/**
7605 * atomic64_add_return - add and return
7606 * @i: integer value to add
7607 * @v: pointer to type atomic64_t
7608 @@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
7609 return i;
7610 }
7611
7612 +/**
7613 + * atomic64_add_return_unchecked - add and return
7614 + * @i: integer value to add
7615 + * @v: pointer to type atomic64_unchecked_t
7616 + *
7617 + * Atomically adds @i to @v and returns @i + *@v
7618 + */
7619 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
7620 +{
7621 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
7622 + : "+A" (i), "+c" (v)
7623 + : : "memory"
7624 + );
7625 + return i;
7626 +}
7627 +
7628 /*
7629 * Other variants with different arithmetic operators:
7630 */
7631 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
7632 return a;
7633 }
7634
7635 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7636 +{
7637 + long long a;
7638 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
7639 + : "=A" (a)
7640 + : "S" (v)
7641 + : "memory", "ecx"
7642 + );
7643 + return a;
7644 +}
7645 +
7646 static inline long long atomic64_dec_return(atomic64_t *v)
7647 {
7648 long long a;
7649 @@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
7650 }
7651
7652 /**
7653 + * atomic64_add_unchecked - add integer to atomic64 variable
7654 + * @i: integer value to add
7655 + * @v: pointer to type atomic64_unchecked_t
7656 + *
7657 + * Atomically adds @i to @v.
7658 + */
7659 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
7660 +{
7661 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
7662 + : "+A" (i), "+c" (v)
7663 + : : "memory"
7664 + );
7665 + return i;
7666 +}
7667 +
7668 +/**
7669 * atomic64_sub - subtract the atomic64 variable
7670 * @i: integer value to subtract
7671 * @v: pointer to type atomic64_t
7672 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
7673 index 017594d..d3fcf72 100644
7674 --- a/arch/x86/include/asm/atomic64_64.h
7675 +++ b/arch/x86/include/asm/atomic64_64.h
7676 @@ -18,7 +18,19 @@
7677 */
7678 static inline long atomic64_read(const atomic64_t *v)
7679 {
7680 - return (*(volatile long *)&(v)->counter);
7681 + return (*(volatile const long *)&(v)->counter);
7682 +}
7683 +
7684 +/**
7685 + * atomic64_read_unchecked - read atomic64 variable
7686 + * @v: pointer of type atomic64_unchecked_t
7687 + *
7688 + * Atomically reads the value of @v.
7689 + * Doesn't imply a read memory barrier.
7690 + */
7691 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7692 +{
7693 + return (*(volatile const long *)&(v)->counter);
7694 }
7695
7696 /**
7697 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
7698 }
7699
7700 /**
7701 + * atomic64_set_unchecked - set atomic64 variable
7702 + * @v: pointer to type atomic64_unchecked_t
7703 + * @i: required value
7704 + *
7705 + * Atomically sets the value of @v to @i.
7706 + */
7707 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7708 +{
7709 + v->counter = i;
7710 +}
7711 +
7712 +/**
7713 * atomic64_add - add integer to atomic64 variable
7714 * @i: integer value to add
7715 * @v: pointer to type atomic64_t
7716 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
7717 */
7718 static inline void atomic64_add(long i, atomic64_t *v)
7719 {
7720 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
7721 +
7722 +#ifdef CONFIG_PAX_REFCOUNT
7723 + "jno 0f\n"
7724 + LOCK_PREFIX "subq %1,%0\n"
7725 + "int $4\n0:\n"
7726 + _ASM_EXTABLE(0b, 0b)
7727 +#endif
7728 +
7729 + : "=m" (v->counter)
7730 + : "er" (i), "m" (v->counter));
7731 +}
7732 +
7733 +/**
7734 + * atomic64_add_unchecked - add integer to atomic64 variable
7735 + * @i: integer value to add
7736 + * @v: pointer to type atomic64_unchecked_t
7737 + *
7738 + * Atomically adds @i to @v.
7739 + */
7740 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7741 +{
7742 asm volatile(LOCK_PREFIX "addq %1,%0"
7743 : "=m" (v->counter)
7744 : "er" (i), "m" (v->counter));
7745 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
7746 */
7747 static inline void atomic64_sub(long i, atomic64_t *v)
7748 {
7749 - asm volatile(LOCK_PREFIX "subq %1,%0"
7750 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7751 +
7752 +#ifdef CONFIG_PAX_REFCOUNT
7753 + "jno 0f\n"
7754 + LOCK_PREFIX "addq %1,%0\n"
7755 + "int $4\n0:\n"
7756 + _ASM_EXTABLE(0b, 0b)
7757 +#endif
7758 +
7759 + : "=m" (v->counter)
7760 + : "er" (i), "m" (v->counter));
7761 +}
7762 +
7763 +/**
7764 + * atomic64_sub_unchecked - subtract the atomic64 variable
7765 + * @i: integer value to subtract
7766 + * @v: pointer to type atomic64_unchecked_t
7767 + *
7768 + * Atomically subtracts @i from @v.
7769 + */
7770 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
7771 +{
7772 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7773 : "=m" (v->counter)
7774 : "er" (i), "m" (v->counter));
7775 }
7776 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7777 {
7778 unsigned char c;
7779
7780 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7781 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
7782 +
7783 +#ifdef CONFIG_PAX_REFCOUNT
7784 + "jno 0f\n"
7785 + LOCK_PREFIX "addq %2,%0\n"
7786 + "int $4\n0:\n"
7787 + _ASM_EXTABLE(0b, 0b)
7788 +#endif
7789 +
7790 + "sete %1\n"
7791 : "=m" (v->counter), "=qm" (c)
7792 : "er" (i), "m" (v->counter) : "memory");
7793 return c;
7794 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7795 */
7796 static inline void atomic64_inc(atomic64_t *v)
7797 {
7798 + asm volatile(LOCK_PREFIX "incq %0\n"
7799 +
7800 +#ifdef CONFIG_PAX_REFCOUNT
7801 + "jno 0f\n"
7802 + LOCK_PREFIX "decq %0\n"
7803 + "int $4\n0:\n"
7804 + _ASM_EXTABLE(0b, 0b)
7805 +#endif
7806 +
7807 + : "=m" (v->counter)
7808 + : "m" (v->counter));
7809 +}
7810 +
7811 +/**
7812 + * atomic64_inc_unchecked - increment atomic64 variable
7813 + * @v: pointer to type atomic64_unchecked_t
7814 + *
7815 + * Atomically increments @v by 1.
7816 + */
7817 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7818 +{
7819 asm volatile(LOCK_PREFIX "incq %0"
7820 : "=m" (v->counter)
7821 : "m" (v->counter));
7822 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
7823 */
7824 static inline void atomic64_dec(atomic64_t *v)
7825 {
7826 - asm volatile(LOCK_PREFIX "decq %0"
7827 + asm volatile(LOCK_PREFIX "decq %0\n"
7828 +
7829 +#ifdef CONFIG_PAX_REFCOUNT
7830 + "jno 0f\n"
7831 + LOCK_PREFIX "incq %0\n"
7832 + "int $4\n0:\n"
7833 + _ASM_EXTABLE(0b, 0b)
7834 +#endif
7835 +
7836 + : "=m" (v->counter)
7837 + : "m" (v->counter));
7838 +}
7839 +
7840 +/**
7841 + * atomic64_dec_unchecked - decrement atomic64 variable
7842 + * @v: pointer to type atomic64_t
7843 + *
7844 + * Atomically decrements @v by 1.
7845 + */
7846 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7847 +{
7848 + asm volatile(LOCK_PREFIX "decq %0\n"
7849 : "=m" (v->counter)
7850 : "m" (v->counter));
7851 }
7852 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
7853 {
7854 unsigned char c;
7855
7856 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
7857 + asm volatile(LOCK_PREFIX "decq %0\n"
7858 +
7859 +#ifdef CONFIG_PAX_REFCOUNT
7860 + "jno 0f\n"
7861 + LOCK_PREFIX "incq %0\n"
7862 + "int $4\n0:\n"
7863 + _ASM_EXTABLE(0b, 0b)
7864 +#endif
7865 +
7866 + "sete %1\n"
7867 : "=m" (v->counter), "=qm" (c)
7868 : "m" (v->counter) : "memory");
7869 return c != 0;
7870 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
7871 {
7872 unsigned char c;
7873
7874 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
7875 + asm volatile(LOCK_PREFIX "incq %0\n"
7876 +
7877 +#ifdef CONFIG_PAX_REFCOUNT
7878 + "jno 0f\n"
7879 + LOCK_PREFIX "decq %0\n"
7880 + "int $4\n0:\n"
7881 + _ASM_EXTABLE(0b, 0b)
7882 +#endif
7883 +
7884 + "sete %1\n"
7885 : "=m" (v->counter), "=qm" (c)
7886 : "m" (v->counter) : "memory");
7887 return c != 0;
7888 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7889 {
7890 unsigned char c;
7891
7892 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7893 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
7894 +
7895 +#ifdef CONFIG_PAX_REFCOUNT
7896 + "jno 0f\n"
7897 + LOCK_PREFIX "subq %2,%0\n"
7898 + "int $4\n0:\n"
7899 + _ASM_EXTABLE(0b, 0b)
7900 +#endif
7901 +
7902 + "sets %1\n"
7903 : "=m" (v->counter), "=qm" (c)
7904 : "er" (i), "m" (v->counter) : "memory");
7905 return c;
7906 @@ -171,7 +317,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7907 static inline long atomic64_add_return(long i, atomic64_t *v)
7908 {
7909 long __i = i;
7910 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7911 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7912 +
7913 +#ifdef CONFIG_PAX_REFCOUNT
7914 + "jno 0f\n"
7915 + "movq %0, %1\n"
7916 + "int $4\n0:\n"
7917 + _ASM_EXTABLE(0b, 0b)
7918 +#endif
7919 +
7920 + : "+r" (i), "+m" (v->counter)
7921 + : : "memory");
7922 + return i + __i;
7923 +}
7924 +
7925 +/**
7926 + * atomic64_add_return_unchecked - add and return
7927 + * @i: integer value to add
7928 + * @v: pointer to type atomic64_unchecked_t
7929 + *
7930 + * Atomically adds @i to @v and returns @i + @v
7931 + */
7932 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7933 +{
7934 + long __i = i;
7935 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
7936 : "+r" (i), "+m" (v->counter)
7937 : : "memory");
7938 return i + __i;
7939 @@ -183,6 +353,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
7940 }
7941
7942 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7943 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7944 +{
7945 + return atomic64_add_return_unchecked(1, v);
7946 +}
7947 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7948
7949 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7950 @@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7951 return cmpxchg(&v->counter, old, new);
7952 }
7953
7954 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
7955 +{
7956 + return cmpxchg(&v->counter, old, new);
7957 +}
7958 +
7959 static inline long atomic64_xchg(atomic64_t *v, long new)
7960 {
7961 return xchg(&v->counter, new);
7962 @@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
7963 */
7964 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
7965 {
7966 - long c, old;
7967 + long c, old, new;
7968 c = atomic64_read(v);
7969 for (;;) {
7970 - if (unlikely(c == (u)))
7971 + if (unlikely(c == u))
7972 break;
7973 - old = atomic64_cmpxchg((v), c, c + (a));
7974 +
7975 + asm volatile("add %2,%0\n"
7976 +
7977 +#ifdef CONFIG_PAX_REFCOUNT
7978 + "jno 0f\n"
7979 + "sub %2,%0\n"
7980 + "int $4\n0:\n"
7981 + _ASM_EXTABLE(0b, 0b)
7982 +#endif
7983 +
7984 + : "=r" (new)
7985 + : "0" (c), "ir" (a));
7986 +
7987 + old = atomic64_cmpxchg(v, c, new);
7988 if (likely(old == c))
7989 break;
7990 c = old;
7991 }
7992 - return c != (u);
7993 + return c != u;
7994 }
7995
7996 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7997 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
7998 index 1775d6e..b65017f 100644
7999 --- a/arch/x86/include/asm/bitops.h
8000 +++ b/arch/x86/include/asm/bitops.h
8001 @@ -38,7 +38,7 @@
8002 * a mask operation on a byte.
8003 */
8004 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8005 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8006 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8007 #define CONST_MASK(nr) (1 << ((nr) & 7))
8008
8009 /**
8010 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
8011 index 5e1a2ee..c9f9533 100644
8012 --- a/arch/x86/include/asm/boot.h
8013 +++ b/arch/x86/include/asm/boot.h
8014 @@ -11,10 +11,15 @@
8015 #include <asm/pgtable_types.h>
8016
8017 /* Physical address where kernel should be loaded. */
8018 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8019 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8020 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8021 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8022
8023 +#ifndef __ASSEMBLY__
8024 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
8025 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8026 +#endif
8027 +
8028 /* Minimum kernel alignment, as a power of two */
8029 #ifdef CONFIG_X86_64
8030 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8031 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
8032 index 48f99f1..d78ebf9 100644
8033 --- a/arch/x86/include/asm/cache.h
8034 +++ b/arch/x86/include/asm/cache.h
8035 @@ -5,12 +5,13 @@
8036
8037 /* L1 cache line size */
8038 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8039 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8040 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8041
8042 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8043 +#define __read_only __attribute__((__section__(".data..read_only")))
8044
8045 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
8046 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
8047 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
8048
8049 #ifdef CONFIG_X86_VSMP
8050 #ifdef CONFIG_SMP
8051 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
8052 index 4e12668..501d239 100644
8053 --- a/arch/x86/include/asm/cacheflush.h
8054 +++ b/arch/x86/include/asm/cacheflush.h
8055 @@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
8056 unsigned long pg_flags = pg->flags & _PGMT_MASK;
8057
8058 if (pg_flags == _PGMT_DEFAULT)
8059 - return -1;
8060 + return ~0UL;
8061 else if (pg_flags == _PGMT_WC)
8062 return _PAGE_CACHE_WC;
8063 else if (pg_flags == _PGMT_UC_MINUS)
8064 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
8065 index 46fc474..b02b0f9 100644
8066 --- a/arch/x86/include/asm/checksum_32.h
8067 +++ b/arch/x86/include/asm/checksum_32.h
8068 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
8069 int len, __wsum sum,
8070 int *src_err_ptr, int *dst_err_ptr);
8071
8072 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8073 + int len, __wsum sum,
8074 + int *src_err_ptr, int *dst_err_ptr);
8075 +
8076 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8077 + int len, __wsum sum,
8078 + int *src_err_ptr, int *dst_err_ptr);
8079 +
8080 /*
8081 * Note: when you get a NULL pointer exception here this means someone
8082 * passed in an incorrect kernel address to one of these functions.
8083 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
8084 int *err_ptr)
8085 {
8086 might_sleep();
8087 - return csum_partial_copy_generic((__force void *)src, dst,
8088 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
8089 len, sum, err_ptr, NULL);
8090 }
8091
8092 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
8093 {
8094 might_sleep();
8095 if (access_ok(VERIFY_WRITE, dst, len))
8096 - return csum_partial_copy_generic(src, (__force void *)dst,
8097 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8098 len, sum, NULL, err_ptr);
8099
8100 if (len)
8101 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
8102 index 88b23a4..d2e5f9f 100644
8103 --- a/arch/x86/include/asm/cpufeature.h
8104 +++ b/arch/x86/include/asm/cpufeature.h
8105 @@ -358,7 +358,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
8106 ".section .discard,\"aw\",@progbits\n"
8107 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
8108 ".previous\n"
8109 - ".section .altinstr_replacement,\"ax\"\n"
8110 + ".section .altinstr_replacement,\"a\"\n"
8111 "3: movb $1,%0\n"
8112 "4:\n"
8113 ".previous\n"
8114 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
8115 index 41935fa..3b40db8 100644
8116 --- a/arch/x86/include/asm/desc.h
8117 +++ b/arch/x86/include/asm/desc.h
8118 @@ -4,6 +4,7 @@
8119 #include <asm/desc_defs.h>
8120 #include <asm/ldt.h>
8121 #include <asm/mmu.h>
8122 +#include <asm/pgtable.h>
8123
8124 #include <linux/smp.h>
8125
8126 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8127
8128 desc->type = (info->read_exec_only ^ 1) << 1;
8129 desc->type |= info->contents << 2;
8130 + desc->type |= info->seg_not_present ^ 1;
8131
8132 desc->s = 1;
8133 desc->dpl = 0x3;
8134 @@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8135 }
8136
8137 extern struct desc_ptr idt_descr;
8138 -extern gate_desc idt_table[];
8139 -
8140 -struct gdt_page {
8141 - struct desc_struct gdt[GDT_ENTRIES];
8142 -} __attribute__((aligned(PAGE_SIZE)));
8143 -
8144 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8145 +extern gate_desc idt_table[256];
8146
8147 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8148 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8149 {
8150 - return per_cpu(gdt_page, cpu).gdt;
8151 + return cpu_gdt_table[cpu];
8152 }
8153
8154 #ifdef CONFIG_X86_64
8155 @@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
8156 unsigned long base, unsigned dpl, unsigned flags,
8157 unsigned short seg)
8158 {
8159 - gate->a = (seg << 16) | (base & 0xffff);
8160 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8161 + gate->gate.offset_low = base;
8162 + gate->gate.seg = seg;
8163 + gate->gate.reserved = 0;
8164 + gate->gate.type = type;
8165 + gate->gate.s = 0;
8166 + gate->gate.dpl = dpl;
8167 + gate->gate.p = 1;
8168 + gate->gate.offset_high = base >> 16;
8169 }
8170
8171 #endif
8172 @@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
8173
8174 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
8175 {
8176 + pax_open_kernel();
8177 memcpy(&idt[entry], gate, sizeof(*gate));
8178 + pax_close_kernel();
8179 }
8180
8181 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
8182 {
8183 + pax_open_kernel();
8184 memcpy(&ldt[entry], desc, 8);
8185 + pax_close_kernel();
8186 }
8187
8188 static inline void
8189 @@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
8190 default: size = sizeof(*gdt); break;
8191 }
8192
8193 + pax_open_kernel();
8194 memcpy(&gdt[entry], desc, size);
8195 + pax_close_kernel();
8196 }
8197
8198 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8199 @@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
8200
8201 static inline void native_load_tr_desc(void)
8202 {
8203 + pax_open_kernel();
8204 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8205 + pax_close_kernel();
8206 }
8207
8208 static inline void native_load_gdt(const struct desc_ptr *dtr)
8209 @@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
8210 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8211 unsigned int i;
8212
8213 + pax_open_kernel();
8214 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8215 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8216 + pax_close_kernel();
8217 }
8218
8219 #define _LDT_empty(info) \
8220 @@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
8221 desc->limit = (limit >> 16) & 0xf;
8222 }
8223
8224 -static inline void _set_gate(int gate, unsigned type, void *addr,
8225 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8226 unsigned dpl, unsigned ist, unsigned seg)
8227 {
8228 gate_desc s;
8229 @@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
8230 * Pentium F0 0F bugfix can have resulted in the mapped
8231 * IDT being write-protected.
8232 */
8233 -static inline void set_intr_gate(unsigned int n, void *addr)
8234 +static inline void set_intr_gate(unsigned int n, const void *addr)
8235 {
8236 BUG_ON((unsigned)n > 0xFF);
8237 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8238 @@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
8239 /*
8240 * This routine sets up an interrupt gate at directory privilege level 3.
8241 */
8242 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8243 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8244 {
8245 BUG_ON((unsigned)n > 0xFF);
8246 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8247 }
8248
8249 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8250 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8251 {
8252 BUG_ON((unsigned)n > 0xFF);
8253 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8254 }
8255
8256 -static inline void set_trap_gate(unsigned int n, void *addr)
8257 +static inline void set_trap_gate(unsigned int n, const void *addr)
8258 {
8259 BUG_ON((unsigned)n > 0xFF);
8260 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8261 @@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
8262 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8263 {
8264 BUG_ON((unsigned)n > 0xFF);
8265 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8266 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8267 }
8268
8269 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8270 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8271 {
8272 BUG_ON((unsigned)n > 0xFF);
8273 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8274 }
8275
8276 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8277 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8278 {
8279 BUG_ON((unsigned)n > 0xFF);
8280 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8281 }
8282
8283 +#ifdef CONFIG_X86_32
8284 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8285 +{
8286 + struct desc_struct d;
8287 +
8288 + if (likely(limit))
8289 + limit = (limit - 1UL) >> PAGE_SHIFT;
8290 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8291 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8292 +}
8293 +#endif
8294 +
8295 #endif /* _ASM_X86_DESC_H */
8296 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
8297 index 278441f..b95a174 100644
8298 --- a/arch/x86/include/asm/desc_defs.h
8299 +++ b/arch/x86/include/asm/desc_defs.h
8300 @@ -31,6 +31,12 @@ struct desc_struct {
8301 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8302 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8303 };
8304 + struct {
8305 + u16 offset_low;
8306 + u16 seg;
8307 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8308 + unsigned offset_high: 16;
8309 + } gate;
8310 };
8311 } __attribute__((packed));
8312
8313 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
8314 index 908b969..a1f4eb4 100644
8315 --- a/arch/x86/include/asm/e820.h
8316 +++ b/arch/x86/include/asm/e820.h
8317 @@ -69,7 +69,7 @@ struct e820map {
8318 #define ISA_START_ADDRESS 0xa0000
8319 #define ISA_END_ADDRESS 0x100000
8320
8321 -#define BIOS_BEGIN 0x000a0000
8322 +#define BIOS_BEGIN 0x000c0000
8323 #define BIOS_END 0x00100000
8324
8325 #define BIOS_ROM_BASE 0xffe00000
8326 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
8327 index f2ad216..eb24c96 100644
8328 --- a/arch/x86/include/asm/elf.h
8329 +++ b/arch/x86/include/asm/elf.h
8330 @@ -237,7 +237,25 @@ extern int force_personality32;
8331 the loader. We need to make sure that it is out of the way of the program
8332 that it will "exec", and that there is sufficient room for the brk. */
8333
8334 +#ifdef CONFIG_PAX_SEGMEXEC
8335 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8336 +#else
8337 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8338 +#endif
8339 +
8340 +#ifdef CONFIG_PAX_ASLR
8341 +#ifdef CONFIG_X86_32
8342 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8343 +
8344 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8345 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8346 +#else
8347 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8348 +
8349 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8350 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8351 +#endif
8352 +#endif
8353
8354 /* This yields a mask that user programs can use to figure out what
8355 instruction set this CPU supports. This could be done in user space,
8356 @@ -290,9 +308,7 @@ do { \
8357
8358 #define ARCH_DLINFO \
8359 do { \
8360 - if (vdso_enabled) \
8361 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8362 - (unsigned long)current->mm->context.vdso); \
8363 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8364 } while (0)
8365
8366 #define AT_SYSINFO 32
8367 @@ -303,7 +319,7 @@ do { \
8368
8369 #endif /* !CONFIG_X86_32 */
8370
8371 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8372 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8373
8374 #define VDSO_ENTRY \
8375 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8376 @@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8377 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8378 #define compat_arch_setup_additional_pages syscall32_setup_pages
8379
8380 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8381 -#define arch_randomize_brk arch_randomize_brk
8382 -
8383 #endif /* _ASM_X86_ELF_H */
8384 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
8385 index cc70c1c..d96d011 100644
8386 --- a/arch/x86/include/asm/emergency-restart.h
8387 +++ b/arch/x86/include/asm/emergency-restart.h
8388 @@ -15,6 +15,6 @@ enum reboot_type {
8389
8390 extern enum reboot_type reboot_type;
8391
8392 -extern void machine_emergency_restart(void);
8393 +extern void machine_emergency_restart(void) __noreturn;
8394
8395 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8396 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
8397 index d09bb03..4ea4194 100644
8398 --- a/arch/x86/include/asm/futex.h
8399 +++ b/arch/x86/include/asm/futex.h
8400 @@ -12,16 +12,18 @@
8401 #include <asm/system.h>
8402
8403 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8404 + typecheck(u32 __user *, uaddr); \
8405 asm volatile("1:\t" insn "\n" \
8406 "2:\t.section .fixup,\"ax\"\n" \
8407 "3:\tmov\t%3, %1\n" \
8408 "\tjmp\t2b\n" \
8409 "\t.previous\n" \
8410 _ASM_EXTABLE(1b, 3b) \
8411 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8412 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
8413 : "i" (-EFAULT), "0" (oparg), "1" (0))
8414
8415 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8416 + typecheck(u32 __user *, uaddr); \
8417 asm volatile("1:\tmovl %2, %0\n" \
8418 "\tmovl\t%0, %3\n" \
8419 "\t" insn "\n" \
8420 @@ -34,7 +36,7 @@
8421 _ASM_EXTABLE(1b, 4b) \
8422 _ASM_EXTABLE(2b, 4b) \
8423 : "=&a" (oldval), "=&r" (ret), \
8424 - "+m" (*uaddr), "=&r" (tem) \
8425 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
8426 : "r" (oparg), "i" (-EFAULT), "1" (0))
8427
8428 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8429 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8430
8431 switch (op) {
8432 case FUTEX_OP_SET:
8433 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8434 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8435 break;
8436 case FUTEX_OP_ADD:
8437 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8438 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8439 uaddr, oparg);
8440 break;
8441 case FUTEX_OP_OR:
8442 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
8443 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8444 return -EFAULT;
8445
8446 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
8447 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
8448 "2:\t.section .fixup, \"ax\"\n"
8449 "3:\tmov %3, %0\n"
8450 "\tjmp 2b\n"
8451 "\t.previous\n"
8452 _ASM_EXTABLE(1b, 3b)
8453 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
8454 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
8455 : "i" (-EFAULT), "r" (newval), "1" (oldval)
8456 : "memory"
8457 );
8458 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
8459 index 0919905..2cf38d6 100644
8460 --- a/arch/x86/include/asm/hw_irq.h
8461 +++ b/arch/x86/include/asm/hw_irq.h
8462 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
8463 extern void enable_IO_APIC(void);
8464
8465 /* Statistics */
8466 -extern atomic_t irq_err_count;
8467 -extern atomic_t irq_mis_count;
8468 +extern atomic_unchecked_t irq_err_count;
8469 +extern atomic_unchecked_t irq_mis_count;
8470
8471 /* EISA */
8472 extern void eisa_set_level_irq(unsigned int irq);
8473 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
8474 index c9e09ea..73888df 100644
8475 --- a/arch/x86/include/asm/i387.h
8476 +++ b/arch/x86/include/asm/i387.h
8477 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
8478 {
8479 int err;
8480
8481 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8482 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8483 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
8484 +#endif
8485 +
8486 /* See comment in fxsave() below. */
8487 #ifdef CONFIG_AS_FXSAVEQ
8488 asm volatile("1: fxrstorq %[fx]\n\t"
8489 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
8490 {
8491 int err;
8492
8493 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8494 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8495 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8496 +#endif
8497 +
8498 /*
8499 * Clear the bytes not touched by the fxsave and reserved
8500 * for the SW usage.
8501 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu *fpu)
8502 #endif /* CONFIG_X86_64 */
8503
8504 /* We need a safe address that is cheap to find and that is already
8505 - in L1 during context switch. The best choices are unfortunately
8506 - different for UP and SMP */
8507 -#ifdef CONFIG_SMP
8508 -#define safe_address (__per_cpu_offset[0])
8509 -#else
8510 -#define safe_address (kstat_cpu(0).cpustat.user)
8511 -#endif
8512 + in L1 during context switch. */
8513 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8514
8515 /*
8516 * These must be called with preempt disabled
8517 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void)
8518 struct thread_info *me = current_thread_info();
8519 preempt_disable();
8520 if (me->status & TS_USEDFPU)
8521 - __save_init_fpu(me->task);
8522 + __save_init_fpu(current);
8523 else
8524 clts();
8525 }
8526 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
8527 index d8e8eef..99f81ae 100644
8528 --- a/arch/x86/include/asm/io.h
8529 +++ b/arch/x86/include/asm/io.h
8530 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
8531
8532 #include <linux/vmalloc.h>
8533
8534 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8535 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8536 +{
8537 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8538 +}
8539 +
8540 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8541 +{
8542 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8543 +}
8544 +
8545 /*
8546 * Convert a virtual cached pointer to an uncached pointer
8547 */
8548 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
8549 index bba3cf8..06bc8da 100644
8550 --- a/arch/x86/include/asm/irqflags.h
8551 +++ b/arch/x86/include/asm/irqflags.h
8552 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
8553 sti; \
8554 sysexit
8555
8556 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8557 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8558 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8559 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8560 +
8561 #else
8562 #define INTERRUPT_RETURN iret
8563 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8564 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
8565 index 5478825..839e88c 100644
8566 --- a/arch/x86/include/asm/kprobes.h
8567 +++ b/arch/x86/include/asm/kprobes.h
8568 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
8569 #define RELATIVEJUMP_SIZE 5
8570 #define RELATIVECALL_OPCODE 0xe8
8571 #define RELATIVE_ADDR_SIZE 4
8572 -#define MAX_STACK_SIZE 64
8573 -#define MIN_STACK_SIZE(ADDR) \
8574 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8575 - THREAD_SIZE - (unsigned long)(ADDR))) \
8576 - ? (MAX_STACK_SIZE) \
8577 - : (((unsigned long)current_thread_info()) + \
8578 - THREAD_SIZE - (unsigned long)(ADDR)))
8579 +#define MAX_STACK_SIZE 64UL
8580 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8581
8582 #define flush_insn_slot(p) do { } while (0)
8583
8584 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
8585 index dd51c83..66cbfac 100644
8586 --- a/arch/x86/include/asm/kvm_host.h
8587 +++ b/arch/x86/include/asm/kvm_host.h
8588 @@ -456,7 +456,7 @@ struct kvm_arch {
8589 unsigned int n_requested_mmu_pages;
8590 unsigned int n_max_mmu_pages;
8591 unsigned int indirect_shadow_pages;
8592 - atomic_t invlpg_counter;
8593 + atomic_unchecked_t invlpg_counter;
8594 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
8595 /*
8596 * Hash table of struct kvm_mmu_page.
8597 @@ -636,7 +636,7 @@ struct kvm_x86_ops {
8598 enum x86_intercept_stage stage);
8599
8600 const struct trace_print_flags *exit_reasons_str;
8601 -};
8602 +} __do_const;
8603
8604 struct kvm_arch_async_pf {
8605 u32 token;
8606 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
8607 index 9cdae5d..300d20f 100644
8608 --- a/arch/x86/include/asm/local.h
8609 +++ b/arch/x86/include/asm/local.h
8610 @@ -18,26 +18,58 @@ typedef struct {
8611
8612 static inline void local_inc(local_t *l)
8613 {
8614 - asm volatile(_ASM_INC "%0"
8615 + asm volatile(_ASM_INC "%0\n"
8616 +
8617 +#ifdef CONFIG_PAX_REFCOUNT
8618 + "jno 0f\n"
8619 + _ASM_DEC "%0\n"
8620 + "int $4\n0:\n"
8621 + _ASM_EXTABLE(0b, 0b)
8622 +#endif
8623 +
8624 : "+m" (l->a.counter));
8625 }
8626
8627 static inline void local_dec(local_t *l)
8628 {
8629 - asm volatile(_ASM_DEC "%0"
8630 + asm volatile(_ASM_DEC "%0\n"
8631 +
8632 +#ifdef CONFIG_PAX_REFCOUNT
8633 + "jno 0f\n"
8634 + _ASM_INC "%0\n"
8635 + "int $4\n0:\n"
8636 + _ASM_EXTABLE(0b, 0b)
8637 +#endif
8638 +
8639 : "+m" (l->a.counter));
8640 }
8641
8642 static inline void local_add(long i, local_t *l)
8643 {
8644 - asm volatile(_ASM_ADD "%1,%0"
8645 + asm volatile(_ASM_ADD "%1,%0\n"
8646 +
8647 +#ifdef CONFIG_PAX_REFCOUNT
8648 + "jno 0f\n"
8649 + _ASM_SUB "%1,%0\n"
8650 + "int $4\n0:\n"
8651 + _ASM_EXTABLE(0b, 0b)
8652 +#endif
8653 +
8654 : "+m" (l->a.counter)
8655 : "ir" (i));
8656 }
8657
8658 static inline void local_sub(long i, local_t *l)
8659 {
8660 - asm volatile(_ASM_SUB "%1,%0"
8661 + asm volatile(_ASM_SUB "%1,%0\n"
8662 +
8663 +#ifdef CONFIG_PAX_REFCOUNT
8664 + "jno 0f\n"
8665 + _ASM_ADD "%1,%0\n"
8666 + "int $4\n0:\n"
8667 + _ASM_EXTABLE(0b, 0b)
8668 +#endif
8669 +
8670 : "+m" (l->a.counter)
8671 : "ir" (i));
8672 }
8673 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
8674 {
8675 unsigned char c;
8676
8677 - asm volatile(_ASM_SUB "%2,%0; sete %1"
8678 + asm volatile(_ASM_SUB "%2,%0\n"
8679 +
8680 +#ifdef CONFIG_PAX_REFCOUNT
8681 + "jno 0f\n"
8682 + _ASM_ADD "%2,%0\n"
8683 + "int $4\n0:\n"
8684 + _ASM_EXTABLE(0b, 0b)
8685 +#endif
8686 +
8687 + "sete %1\n"
8688 : "+m" (l->a.counter), "=qm" (c)
8689 : "ir" (i) : "memory");
8690 return c;
8691 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
8692 {
8693 unsigned char c;
8694
8695 - asm volatile(_ASM_DEC "%0; sete %1"
8696 + asm volatile(_ASM_DEC "%0\n"
8697 +
8698 +#ifdef CONFIG_PAX_REFCOUNT
8699 + "jno 0f\n"
8700 + _ASM_INC "%0\n"
8701 + "int $4\n0:\n"
8702 + _ASM_EXTABLE(0b, 0b)
8703 +#endif
8704 +
8705 + "sete %1\n"
8706 : "+m" (l->a.counter), "=qm" (c)
8707 : : "memory");
8708 return c != 0;
8709 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
8710 {
8711 unsigned char c;
8712
8713 - asm volatile(_ASM_INC "%0; sete %1"
8714 + asm volatile(_ASM_INC "%0\n"
8715 +
8716 +#ifdef CONFIG_PAX_REFCOUNT
8717 + "jno 0f\n"
8718 + _ASM_DEC "%0\n"
8719 + "int $4\n0:\n"
8720 + _ASM_EXTABLE(0b, 0b)
8721 +#endif
8722 +
8723 + "sete %1\n"
8724 : "+m" (l->a.counter), "=qm" (c)
8725 : : "memory");
8726 return c != 0;
8727 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
8728 {
8729 unsigned char c;
8730
8731 - asm volatile(_ASM_ADD "%2,%0; sets %1"
8732 + asm volatile(_ASM_ADD "%2,%0\n"
8733 +
8734 +#ifdef CONFIG_PAX_REFCOUNT
8735 + "jno 0f\n"
8736 + _ASM_SUB "%2,%0\n"
8737 + "int $4\n0:\n"
8738 + _ASM_EXTABLE(0b, 0b)
8739 +#endif
8740 +
8741 + "sets %1\n"
8742 : "+m" (l->a.counter), "=qm" (c)
8743 : "ir" (i) : "memory");
8744 return c;
8745 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
8746 #endif
8747 /* Modern 486+ processor */
8748 __i = i;
8749 - asm volatile(_ASM_XADD "%0, %1;"
8750 + asm volatile(_ASM_XADD "%0, %1\n"
8751 +
8752 +#ifdef CONFIG_PAX_REFCOUNT
8753 + "jno 0f\n"
8754 + _ASM_MOV "%0,%1\n"
8755 + "int $4\n0:\n"
8756 + _ASM_EXTABLE(0b, 0b)
8757 +#endif
8758 +
8759 : "+r" (i), "+m" (l->a.counter)
8760 : : "memory");
8761 return i + __i;
8762 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
8763 index 593e51d..fa69c9a 100644
8764 --- a/arch/x86/include/asm/mman.h
8765 +++ b/arch/x86/include/asm/mman.h
8766 @@ -5,4 +5,14 @@
8767
8768 #include <asm-generic/mman.h>
8769
8770 +#ifdef __KERNEL__
8771 +#ifndef __ASSEMBLY__
8772 +#ifdef CONFIG_X86_32
8773 +#define arch_mmap_check i386_mmap_check
8774 +int i386_mmap_check(unsigned long addr, unsigned long len,
8775 + unsigned long flags);
8776 +#endif
8777 +#endif
8778 +#endif
8779 +
8780 #endif /* _ASM_X86_MMAN_H */
8781 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
8782 index 5f55e69..e20bfb1 100644
8783 --- a/arch/x86/include/asm/mmu.h
8784 +++ b/arch/x86/include/asm/mmu.h
8785 @@ -9,7 +9,7 @@
8786 * we put the segment information here.
8787 */
8788 typedef struct {
8789 - void *ldt;
8790 + struct desc_struct *ldt;
8791 int size;
8792
8793 #ifdef CONFIG_X86_64
8794 @@ -18,7 +18,19 @@ typedef struct {
8795 #endif
8796
8797 struct mutex lock;
8798 - void *vdso;
8799 + unsigned long vdso;
8800 +
8801 +#ifdef CONFIG_X86_32
8802 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
8803 + unsigned long user_cs_base;
8804 + unsigned long user_cs_limit;
8805 +
8806 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8807 + cpumask_t cpu_user_cs_mask;
8808 +#endif
8809 +
8810 +#endif
8811 +#endif
8812 } mm_context_t;
8813
8814 #ifdef CONFIG_SMP
8815 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
8816 index 6902152..399f3a2 100644
8817 --- a/arch/x86/include/asm/mmu_context.h
8818 +++ b/arch/x86/include/asm/mmu_context.h
8819 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
8820
8821 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8822 {
8823 +
8824 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8825 + unsigned int i;
8826 + pgd_t *pgd;
8827 +
8828 + pax_open_kernel();
8829 + pgd = get_cpu_pgd(smp_processor_id());
8830 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8831 + set_pgd_batched(pgd+i, native_make_pgd(0));
8832 + pax_close_kernel();
8833 +#endif
8834 +
8835 #ifdef CONFIG_SMP
8836 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8837 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8838 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8839 struct task_struct *tsk)
8840 {
8841 unsigned cpu = smp_processor_id();
8842 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8843 + int tlbstate = TLBSTATE_OK;
8844 +#endif
8845
8846 if (likely(prev != next)) {
8847 #ifdef CONFIG_SMP
8848 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8849 + tlbstate = percpu_read(cpu_tlbstate.state);
8850 +#endif
8851 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8852 percpu_write(cpu_tlbstate.active_mm, next);
8853 #endif
8854 cpumask_set_cpu(cpu, mm_cpumask(next));
8855
8856 /* Re-load page tables */
8857 +#ifdef CONFIG_PAX_PER_CPU_PGD
8858 + pax_open_kernel();
8859 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8860 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8861 + pax_close_kernel();
8862 + load_cr3(get_cpu_pgd(cpu));
8863 +#else
8864 load_cr3(next->pgd);
8865 +#endif
8866
8867 /* stop flush ipis for the previous mm */
8868 cpumask_clear_cpu(cpu, mm_cpumask(prev));
8869 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8870 */
8871 if (unlikely(prev->context.ldt != next->context.ldt))
8872 load_LDT_nolock(&next->context);
8873 - }
8874 +
8875 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8876 + if (!(__supported_pte_mask & _PAGE_NX)) {
8877 + smp_mb__before_clear_bit();
8878 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
8879 + smp_mb__after_clear_bit();
8880 + cpu_set(cpu, next->context.cpu_user_cs_mask);
8881 + }
8882 +#endif
8883 +
8884 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8885 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
8886 + prev->context.user_cs_limit != next->context.user_cs_limit))
8887 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8888 #ifdef CONFIG_SMP
8889 + else if (unlikely(tlbstate != TLBSTATE_OK))
8890 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8891 +#endif
8892 +#endif
8893 +
8894 + }
8895 else {
8896 +
8897 +#ifdef CONFIG_PAX_PER_CPU_PGD
8898 + pax_open_kernel();
8899 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8900 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8901 + pax_close_kernel();
8902 + load_cr3(get_cpu_pgd(cpu));
8903 +#endif
8904 +
8905 +#ifdef CONFIG_SMP
8906 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8907 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
8908
8909 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8910 * tlb flush IPI delivery. We must reload CR3
8911 * to make sure to use no freed page tables.
8912 */
8913 +
8914 +#ifndef CONFIG_PAX_PER_CPU_PGD
8915 load_cr3(next->pgd);
8916 +#endif
8917 +
8918 load_LDT_nolock(&next->context);
8919 +
8920 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
8921 + if (!(__supported_pte_mask & _PAGE_NX))
8922 + cpu_set(cpu, next->context.cpu_user_cs_mask);
8923 +#endif
8924 +
8925 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8926 +#ifdef CONFIG_PAX_PAGEEXEC
8927 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
8928 +#endif
8929 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8930 +#endif
8931 +
8932 }
8933 - }
8934 #endif
8935 + }
8936 }
8937
8938 #define activate_mm(prev, next) \
8939 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
8940 index 9eae775..c914fea 100644
8941 --- a/arch/x86/include/asm/module.h
8942 +++ b/arch/x86/include/asm/module.h
8943 @@ -5,6 +5,7 @@
8944
8945 #ifdef CONFIG_X86_64
8946 /* X86_64 does not define MODULE_PROC_FAMILY */
8947 +#define MODULE_PROC_FAMILY ""
8948 #elif defined CONFIG_M386
8949 #define MODULE_PROC_FAMILY "386 "
8950 #elif defined CONFIG_M486
8951 @@ -59,8 +60,20 @@
8952 #error unknown processor family
8953 #endif
8954
8955 -#ifdef CONFIG_X86_32
8956 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
8957 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8958 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
8959 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
8960 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
8961 +#else
8962 +#define MODULE_PAX_KERNEXEC ""
8963 #endif
8964
8965 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8966 +#define MODULE_PAX_UDEREF "UDEREF "
8967 +#else
8968 +#define MODULE_PAX_UDEREF ""
8969 +#endif
8970 +
8971 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
8972 +
8973 #endif /* _ASM_X86_MODULE_H */
8974 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
8975 index 7639dbf..e08a58c 100644
8976 --- a/arch/x86/include/asm/page_64_types.h
8977 +++ b/arch/x86/include/asm/page_64_types.h
8978 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
8979
8980 /* duplicated to the one in bootmem.h */
8981 extern unsigned long max_pfn;
8982 -extern unsigned long phys_base;
8983 +extern const unsigned long phys_base;
8984
8985 extern unsigned long __phys_addr(unsigned long);
8986 #define __phys_reloc_hide(x) (x)
8987 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
8988 index a7d2db9..edb023e 100644
8989 --- a/arch/x86/include/asm/paravirt.h
8990 +++ b/arch/x86/include/asm/paravirt.h
8991 @@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
8992 val);
8993 }
8994
8995 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8996 +{
8997 + pgdval_t val = native_pgd_val(pgd);
8998 +
8999 + if (sizeof(pgdval_t) > sizeof(long))
9000 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
9001 + val, (u64)val >> 32);
9002 + else
9003 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
9004 + val);
9005 +}
9006 +
9007 static inline void pgd_clear(pgd_t *pgdp)
9008 {
9009 set_pgd(pgdp, __pgd(0));
9010 @@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
9011 pv_mmu_ops.set_fixmap(idx, phys, flags);
9012 }
9013
9014 +#ifdef CONFIG_PAX_KERNEXEC
9015 +static inline unsigned long pax_open_kernel(void)
9016 +{
9017 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9018 +}
9019 +
9020 +static inline unsigned long pax_close_kernel(void)
9021 +{
9022 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9023 +}
9024 +#else
9025 +static inline unsigned long pax_open_kernel(void) { return 0; }
9026 +static inline unsigned long pax_close_kernel(void) { return 0; }
9027 +#endif
9028 +
9029 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9030
9031 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
9032 @@ -964,7 +991,7 @@ extern void default_banner(void);
9033
9034 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9035 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9036 -#define PARA_INDIRECT(addr) *%cs:addr
9037 +#define PARA_INDIRECT(addr) *%ss:addr
9038 #endif
9039
9040 #define INTERRUPT_RETURN \
9041 @@ -1041,6 +1068,21 @@ extern void default_banner(void);
9042 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9043 CLBR_NONE, \
9044 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9045 +
9046 +#define GET_CR0_INTO_RDI \
9047 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9048 + mov %rax,%rdi
9049 +
9050 +#define SET_RDI_INTO_CR0 \
9051 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9052 +
9053 +#define GET_CR3_INTO_RDI \
9054 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9055 + mov %rax,%rdi
9056 +
9057 +#define SET_RDI_INTO_CR3 \
9058 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9059 +
9060 #endif /* CONFIG_X86_32 */
9061
9062 #endif /* __ASSEMBLY__ */
9063 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
9064 index 8e8b9a4..f07d725 100644
9065 --- a/arch/x86/include/asm/paravirt_types.h
9066 +++ b/arch/x86/include/asm/paravirt_types.h
9067 @@ -84,20 +84,20 @@ struct pv_init_ops {
9068 */
9069 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9070 unsigned long addr, unsigned len);
9071 -};
9072 +} __no_const;
9073
9074
9075 struct pv_lazy_ops {
9076 /* Set deferred update mode, used for batching operations. */
9077 void (*enter)(void);
9078 void (*leave)(void);
9079 -};
9080 +} __no_const;
9081
9082 struct pv_time_ops {
9083 unsigned long long (*sched_clock)(void);
9084 unsigned long long (*steal_clock)(int cpu);
9085 unsigned long (*get_tsc_khz)(void);
9086 -};
9087 +} __no_const;
9088
9089 struct pv_cpu_ops {
9090 /* hooks for various privileged instructions */
9091 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
9092
9093 void (*start_context_switch)(struct task_struct *prev);
9094 void (*end_context_switch)(struct task_struct *next);
9095 -};
9096 +} __no_const;
9097
9098 struct pv_irq_ops {
9099 /*
9100 @@ -224,7 +224,7 @@ struct pv_apic_ops {
9101 unsigned long start_eip,
9102 unsigned long start_esp);
9103 #endif
9104 -};
9105 +} __no_const;
9106
9107 struct pv_mmu_ops {
9108 unsigned long (*read_cr2)(void);
9109 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
9110 struct paravirt_callee_save make_pud;
9111
9112 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9113 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9114 #endif /* PAGETABLE_LEVELS == 4 */
9115 #endif /* PAGETABLE_LEVELS >= 3 */
9116
9117 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
9118 an mfn. We can tell which is which from the index. */
9119 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9120 phys_addr_t phys, pgprot_t flags);
9121 +
9122 +#ifdef CONFIG_PAX_KERNEXEC
9123 + unsigned long (*pax_open_kernel)(void);
9124 + unsigned long (*pax_close_kernel)(void);
9125 +#endif
9126 +
9127 };
9128
9129 struct arch_spinlock;
9130 @@ -334,7 +341,7 @@ struct pv_lock_ops {
9131 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
9132 int (*spin_trylock)(struct arch_spinlock *lock);
9133 void (*spin_unlock)(struct arch_spinlock *lock);
9134 -};
9135 +} __no_const;
9136
9137 /* This contains all the paravirt structures: we get a convenient
9138 * number for each function using the offset which we use to indicate
9139 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
9140 index b4389a4..b7ff22c 100644
9141 --- a/arch/x86/include/asm/pgalloc.h
9142 +++ b/arch/x86/include/asm/pgalloc.h
9143 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
9144 pmd_t *pmd, pte_t *pte)
9145 {
9146 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9147 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9148 +}
9149 +
9150 +static inline void pmd_populate_user(struct mm_struct *mm,
9151 + pmd_t *pmd, pte_t *pte)
9152 +{
9153 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9154 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9155 }
9156
9157 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
9158 index 98391db..8f6984e 100644
9159 --- a/arch/x86/include/asm/pgtable-2level.h
9160 +++ b/arch/x86/include/asm/pgtable-2level.h
9161 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
9162
9163 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9164 {
9165 + pax_open_kernel();
9166 *pmdp = pmd;
9167 + pax_close_kernel();
9168 }
9169
9170 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9171 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
9172 index effff47..f9e4035 100644
9173 --- a/arch/x86/include/asm/pgtable-3level.h
9174 +++ b/arch/x86/include/asm/pgtable-3level.h
9175 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9176
9177 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9178 {
9179 + pax_open_kernel();
9180 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9181 + pax_close_kernel();
9182 }
9183
9184 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9185 {
9186 + pax_open_kernel();
9187 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9188 + pax_close_kernel();
9189 }
9190
9191 /*
9192 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
9193 index 18601c8..3d716d1 100644
9194 --- a/arch/x86/include/asm/pgtable.h
9195 +++ b/arch/x86/include/asm/pgtable.h
9196 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9197
9198 #ifndef __PAGETABLE_PUD_FOLDED
9199 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9200 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9201 #define pgd_clear(pgd) native_pgd_clear(pgd)
9202 #endif
9203
9204 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9205
9206 #define arch_end_context_switch(prev) do {} while(0)
9207
9208 +#define pax_open_kernel() native_pax_open_kernel()
9209 +#define pax_close_kernel() native_pax_close_kernel()
9210 #endif /* CONFIG_PARAVIRT */
9211
9212 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9213 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9214 +
9215 +#ifdef CONFIG_PAX_KERNEXEC
9216 +static inline unsigned long native_pax_open_kernel(void)
9217 +{
9218 + unsigned long cr0;
9219 +
9220 + preempt_disable();
9221 + barrier();
9222 + cr0 = read_cr0() ^ X86_CR0_WP;
9223 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9224 + write_cr0(cr0);
9225 + return cr0 ^ X86_CR0_WP;
9226 +}
9227 +
9228 +static inline unsigned long native_pax_close_kernel(void)
9229 +{
9230 + unsigned long cr0;
9231 +
9232 + cr0 = read_cr0() ^ X86_CR0_WP;
9233 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9234 + write_cr0(cr0);
9235 + barrier();
9236 + preempt_enable_no_resched();
9237 + return cr0 ^ X86_CR0_WP;
9238 +}
9239 +#else
9240 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9241 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9242 +#endif
9243 +
9244 /*
9245 * The following only work if pte_present() is true.
9246 * Undefined behaviour if not..
9247 */
9248 +static inline int pte_user(pte_t pte)
9249 +{
9250 + return pte_val(pte) & _PAGE_USER;
9251 +}
9252 +
9253 static inline int pte_dirty(pte_t pte)
9254 {
9255 return pte_flags(pte) & _PAGE_DIRTY;
9256 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
9257 return pte_clear_flags(pte, _PAGE_RW);
9258 }
9259
9260 +static inline pte_t pte_mkread(pte_t pte)
9261 +{
9262 + return __pte(pte_val(pte) | _PAGE_USER);
9263 +}
9264 +
9265 static inline pte_t pte_mkexec(pte_t pte)
9266 {
9267 - return pte_clear_flags(pte, _PAGE_NX);
9268 +#ifdef CONFIG_X86_PAE
9269 + if (__supported_pte_mask & _PAGE_NX)
9270 + return pte_clear_flags(pte, _PAGE_NX);
9271 + else
9272 +#endif
9273 + return pte_set_flags(pte, _PAGE_USER);
9274 +}
9275 +
9276 +static inline pte_t pte_exprotect(pte_t pte)
9277 +{
9278 +#ifdef CONFIG_X86_PAE
9279 + if (__supported_pte_mask & _PAGE_NX)
9280 + return pte_set_flags(pte, _PAGE_NX);
9281 + else
9282 +#endif
9283 + return pte_clear_flags(pte, _PAGE_USER);
9284 }
9285
9286 static inline pte_t pte_mkdirty(pte_t pte)
9287 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
9288 #endif
9289
9290 #ifndef __ASSEMBLY__
9291 +
9292 +#ifdef CONFIG_PAX_PER_CPU_PGD
9293 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9294 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9295 +{
9296 + return cpu_pgd[cpu];
9297 +}
9298 +#endif
9299 +
9300 #include <linux/mm_types.h>
9301
9302 static inline int pte_none(pte_t pte)
9303 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
9304
9305 static inline int pgd_bad(pgd_t pgd)
9306 {
9307 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9308 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9309 }
9310
9311 static inline int pgd_none(pgd_t pgd)
9312 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
9313 * pgd_offset() returns a (pgd_t *)
9314 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9315 */
9316 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9317 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9318 +
9319 +#ifdef CONFIG_PAX_PER_CPU_PGD
9320 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9321 +#endif
9322 +
9323 /*
9324 * a shortcut which implies the use of the kernel's pgd, instead
9325 * of a process's
9326 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
9327 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9328 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9329
9330 +#ifdef CONFIG_X86_32
9331 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9332 +#else
9333 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9334 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9335 +
9336 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9337 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9338 +#else
9339 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9340 +#endif
9341 +
9342 +#endif
9343 +
9344 #ifndef __ASSEMBLY__
9345
9346 extern int direct_gbpages;
9347 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
9348 * dst and src can be on the same page, but the range must not overlap,
9349 * and must not cross a page boundary.
9350 */
9351 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9352 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9353 {
9354 - memcpy(dst, src, count * sizeof(pgd_t));
9355 + pax_open_kernel();
9356 + while (count--)
9357 + *dst++ = *src++;
9358 + pax_close_kernel();
9359 }
9360
9361 +#ifdef CONFIG_PAX_PER_CPU_PGD
9362 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9363 +#endif
9364 +
9365 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9366 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9367 +#else
9368 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9369 +#endif
9370
9371 #include <asm-generic/pgtable.h>
9372 #endif /* __ASSEMBLY__ */
9373 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
9374 index 0c92113..34a77c6 100644
9375 --- a/arch/x86/include/asm/pgtable_32.h
9376 +++ b/arch/x86/include/asm/pgtable_32.h
9377 @@ -25,9 +25,6 @@
9378 struct mm_struct;
9379 struct vm_area_struct;
9380
9381 -extern pgd_t swapper_pg_dir[1024];
9382 -extern pgd_t initial_page_table[1024];
9383 -
9384 static inline void pgtable_cache_init(void) { }
9385 static inline void check_pgt_cache(void) { }
9386 void paging_init(void);
9387 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9388 # include <asm/pgtable-2level.h>
9389 #endif
9390
9391 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9392 +extern pgd_t initial_page_table[PTRS_PER_PGD];
9393 +#ifdef CONFIG_X86_PAE
9394 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9395 +#endif
9396 +
9397 #if defined(CONFIG_HIGHPTE)
9398 #define pte_offset_map(dir, address) \
9399 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
9400 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9401 /* Clear a kernel PTE and flush it from the TLB */
9402 #define kpte_clear_flush(ptep, vaddr) \
9403 do { \
9404 + pax_open_kernel(); \
9405 pte_clear(&init_mm, (vaddr), (ptep)); \
9406 + pax_close_kernel(); \
9407 __flush_tlb_one((vaddr)); \
9408 } while (0)
9409
9410 @@ -74,6 +79,9 @@ do { \
9411
9412 #endif /* !__ASSEMBLY__ */
9413
9414 +#define HAVE_ARCH_UNMAPPED_AREA
9415 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9416 +
9417 /*
9418 * kern_addr_valid() is (1) for FLATMEM and (0) for
9419 * SPARSEMEM and DISCONTIGMEM
9420 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
9421 index ed5903b..c7fe163 100644
9422 --- a/arch/x86/include/asm/pgtable_32_types.h
9423 +++ b/arch/x86/include/asm/pgtable_32_types.h
9424 @@ -8,7 +8,7 @@
9425 */
9426 #ifdef CONFIG_X86_PAE
9427 # include <asm/pgtable-3level_types.h>
9428 -# define PMD_SIZE (1UL << PMD_SHIFT)
9429 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9430 # define PMD_MASK (~(PMD_SIZE - 1))
9431 #else
9432 # include <asm/pgtable-2level_types.h>
9433 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
9434 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9435 #endif
9436
9437 +#ifdef CONFIG_PAX_KERNEXEC
9438 +#ifndef __ASSEMBLY__
9439 +extern unsigned char MODULES_EXEC_VADDR[];
9440 +extern unsigned char MODULES_EXEC_END[];
9441 +#endif
9442 +#include <asm/boot.h>
9443 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9444 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9445 +#else
9446 +#define ktla_ktva(addr) (addr)
9447 +#define ktva_ktla(addr) (addr)
9448 +#endif
9449 +
9450 #define MODULES_VADDR VMALLOC_START
9451 #define MODULES_END VMALLOC_END
9452 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9453 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
9454 index 975f709..3a89693 100644
9455 --- a/arch/x86/include/asm/pgtable_64.h
9456 +++ b/arch/x86/include/asm/pgtable_64.h
9457 @@ -16,10 +16,13 @@
9458
9459 extern pud_t level3_kernel_pgt[512];
9460 extern pud_t level3_ident_pgt[512];
9461 +extern pud_t level3_vmalloc_pgt[512];
9462 +extern pud_t level3_vmemmap_pgt[512];
9463 +extern pud_t level2_vmemmap_pgt[512];
9464 extern pmd_t level2_kernel_pgt[512];
9465 extern pmd_t level2_fixmap_pgt[512];
9466 -extern pmd_t level2_ident_pgt[512];
9467 -extern pgd_t init_level4_pgt[];
9468 +extern pmd_t level2_ident_pgt[512*2];
9469 +extern pgd_t init_level4_pgt[512];
9470
9471 #define swapper_pg_dir init_level4_pgt
9472
9473 @@ -61,7 +64,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9474
9475 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9476 {
9477 + pax_open_kernel();
9478 *pmdp = pmd;
9479 + pax_close_kernel();
9480 }
9481
9482 static inline void native_pmd_clear(pmd_t *pmd)
9483 @@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_t *pud)
9484
9485 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9486 {
9487 + pax_open_kernel();
9488 + *pgdp = pgd;
9489 + pax_close_kernel();
9490 +}
9491 +
9492 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9493 +{
9494 *pgdp = pgd;
9495 }
9496
9497 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
9498 index 766ea16..5b96cb3 100644
9499 --- a/arch/x86/include/asm/pgtable_64_types.h
9500 +++ b/arch/x86/include/asm/pgtable_64_types.h
9501 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9502 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9503 #define MODULES_END _AC(0xffffffffff000000, UL)
9504 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9505 +#define MODULES_EXEC_VADDR MODULES_VADDR
9506 +#define MODULES_EXEC_END MODULES_END
9507 +
9508 +#define ktla_ktva(addr) (addr)
9509 +#define ktva_ktla(addr) (addr)
9510
9511 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9512 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
9513 index 013286a..8b42f4f 100644
9514 --- a/arch/x86/include/asm/pgtable_types.h
9515 +++ b/arch/x86/include/asm/pgtable_types.h
9516 @@ -16,13 +16,12 @@
9517 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9518 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9519 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9520 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9521 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9522 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9523 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9524 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9525 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9526 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9527 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
9528 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9529 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
9530 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9531
9532 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9533 @@ -40,7 +39,6 @@
9534 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9535 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9536 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9537 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9538 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9539 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9540 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9541 @@ -57,8 +55,10 @@
9542
9543 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9544 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9545 -#else
9546 +#elif defined(CONFIG_KMEMCHECK)
9547 #define _PAGE_NX (_AT(pteval_t, 0))
9548 +#else
9549 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9550 #endif
9551
9552 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9553 @@ -96,6 +96,9 @@
9554 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9555 _PAGE_ACCESSED)
9556
9557 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9558 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9559 +
9560 #define __PAGE_KERNEL_EXEC \
9561 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9562 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9563 @@ -106,7 +109,7 @@
9564 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9565 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9566 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9567 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9568 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9569 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
9570 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
9571 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9572 @@ -168,8 +171,8 @@
9573 * bits are combined, this will alow user to access the high address mapped
9574 * VDSO in the presence of CONFIG_COMPAT_VDSO
9575 */
9576 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9577 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9578 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9579 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9580 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9581 #endif
9582
9583 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
9584 {
9585 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9586 }
9587 +#endif
9588
9589 +#if PAGETABLE_LEVELS == 3
9590 +#include <asm-generic/pgtable-nopud.h>
9591 +#endif
9592 +
9593 +#if PAGETABLE_LEVELS == 2
9594 +#include <asm-generic/pgtable-nopmd.h>
9595 +#endif
9596 +
9597 +#ifndef __ASSEMBLY__
9598 #if PAGETABLE_LEVELS > 3
9599 typedef struct { pudval_t pud; } pud_t;
9600
9601 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
9602 return pud.pud;
9603 }
9604 #else
9605 -#include <asm-generic/pgtable-nopud.h>
9606 -
9607 static inline pudval_t native_pud_val(pud_t pud)
9608 {
9609 return native_pgd_val(pud.pgd);
9610 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
9611 return pmd.pmd;
9612 }
9613 #else
9614 -#include <asm-generic/pgtable-nopmd.h>
9615 -
9616 static inline pmdval_t native_pmd_val(pmd_t pmd)
9617 {
9618 return native_pgd_val(pmd.pud.pgd);
9619 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
9620
9621 extern pteval_t __supported_pte_mask;
9622 extern void set_nx(void);
9623 -extern int nx_enabled;
9624
9625 #define pgprot_writecombine pgprot_writecombine
9626 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9627 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
9628 index 0d1171c..36571a9 100644
9629 --- a/arch/x86/include/asm/processor.h
9630 +++ b/arch/x86/include/asm/processor.h
9631 @@ -266,7 +266,7 @@ struct tss_struct {
9632
9633 } ____cacheline_aligned;
9634
9635 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9636 +extern struct tss_struct init_tss[NR_CPUS];
9637
9638 /*
9639 * Save the original ist values for checking stack pointers during debugging
9640 @@ -858,11 +858,18 @@ static inline void spin_lock_prefetch(const void *x)
9641 */
9642 #define TASK_SIZE PAGE_OFFSET
9643 #define TASK_SIZE_MAX TASK_SIZE
9644 +
9645 +#ifdef CONFIG_PAX_SEGMEXEC
9646 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9647 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9648 +#else
9649 #define STACK_TOP TASK_SIZE
9650 -#define STACK_TOP_MAX STACK_TOP
9651 +#endif
9652 +
9653 +#define STACK_TOP_MAX TASK_SIZE
9654
9655 #define INIT_THREAD { \
9656 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9657 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9658 .vm86_info = NULL, \
9659 .sysenter_cs = __KERNEL_CS, \
9660 .io_bitmap_ptr = NULL, \
9661 @@ -876,7 +883,7 @@ static inline void spin_lock_prefetch(const void *x)
9662 */
9663 #define INIT_TSS { \
9664 .x86_tss = { \
9665 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9666 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9667 .ss0 = __KERNEL_DS, \
9668 .ss1 = __KERNEL_CS, \
9669 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9670 @@ -887,11 +894,7 @@ static inline void spin_lock_prefetch(const void *x)
9671 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9672
9673 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9674 -#define KSTK_TOP(info) \
9675 -({ \
9676 - unsigned long *__ptr = (unsigned long *)(info); \
9677 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9678 -})
9679 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9680
9681 /*
9682 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9683 @@ -906,7 +909,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9684 #define task_pt_regs(task) \
9685 ({ \
9686 struct pt_regs *__regs__; \
9687 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9688 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9689 __regs__ - 1; \
9690 })
9691
9692 @@ -916,13 +919,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9693 /*
9694 * User space process size. 47bits minus one guard page.
9695 */
9696 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9697 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9698
9699 /* This decides where the kernel will search for a free chunk of vm
9700 * space during mmap's.
9701 */
9702 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9703 - 0xc0000000 : 0xFFFFe000)
9704 + 0xc0000000 : 0xFFFFf000)
9705
9706 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9707 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9708 @@ -933,11 +936,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9709 #define STACK_TOP_MAX TASK_SIZE_MAX
9710
9711 #define INIT_THREAD { \
9712 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9713 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9714 }
9715
9716 #define INIT_TSS { \
9717 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9718 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9719 }
9720
9721 /*
9722 @@ -959,6 +962,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
9723 */
9724 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9725
9726 +#ifdef CONFIG_PAX_SEGMEXEC
9727 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9728 +#endif
9729 +
9730 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9731
9732 /* Get/set a process' ability to use the timestamp counter instruction */
9733 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
9734 index 3566454..4bdfb8c 100644
9735 --- a/arch/x86/include/asm/ptrace.h
9736 +++ b/arch/x86/include/asm/ptrace.h
9737 @@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
9738 }
9739
9740 /*
9741 - * user_mode_vm(regs) determines whether a register set came from user mode.
9742 + * user_mode(regs) determines whether a register set came from user mode.
9743 * This is true if V8086 mode was enabled OR if the register set was from
9744 * protected mode with RPL-3 CS value. This tricky test checks that with
9745 * one comparison. Many places in the kernel can bypass this full check
9746 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9747 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9748 + * be used.
9749 */
9750 -static inline int user_mode(struct pt_regs *regs)
9751 +static inline int user_mode_novm(struct pt_regs *regs)
9752 {
9753 #ifdef CONFIG_X86_32
9754 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9755 #else
9756 - return !!(regs->cs & 3);
9757 + return !!(regs->cs & SEGMENT_RPL_MASK);
9758 #endif
9759 }
9760
9761 -static inline int user_mode_vm(struct pt_regs *regs)
9762 +static inline int user_mode(struct pt_regs *regs)
9763 {
9764 #ifdef CONFIG_X86_32
9765 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9766 USER_RPL;
9767 #else
9768 - return user_mode(regs);
9769 + return user_mode_novm(regs);
9770 #endif
9771 }
9772
9773 @@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
9774 #ifdef CONFIG_X86_64
9775 static inline bool user_64bit_mode(struct pt_regs *regs)
9776 {
9777 + unsigned long cs = regs->cs & 0xffff;
9778 #ifndef CONFIG_PARAVIRT
9779 /*
9780 * On non-paravirt systems, this is the only long mode CPL 3
9781 * selector. We do not allow long mode selectors in the LDT.
9782 */
9783 - return regs->cs == __USER_CS;
9784 + return cs == __USER_CS;
9785 #else
9786 /* Headers are too twisted for this to go in paravirt.h. */
9787 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
9788 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
9789 #endif
9790 }
9791 #endif
9792 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
9793 index 3250e3d..20db631 100644
9794 --- a/arch/x86/include/asm/reboot.h
9795 +++ b/arch/x86/include/asm/reboot.h
9796 @@ -6,19 +6,19 @@
9797 struct pt_regs;
9798
9799 struct machine_ops {
9800 - void (*restart)(char *cmd);
9801 - void (*halt)(void);
9802 - void (*power_off)(void);
9803 + void (* __noreturn restart)(char *cmd);
9804 + void (* __noreturn halt)(void);
9805 + void (* __noreturn power_off)(void);
9806 void (*shutdown)(void);
9807 void (*crash_shutdown)(struct pt_regs *);
9808 - void (*emergency_restart)(void);
9809 -};
9810 + void (* __noreturn emergency_restart)(void);
9811 +} __no_const;
9812
9813 extern struct machine_ops machine_ops;
9814
9815 void native_machine_crash_shutdown(struct pt_regs *regs);
9816 void native_machine_shutdown(void);
9817 -void machine_real_restart(unsigned int type);
9818 +void machine_real_restart(unsigned int type) __noreturn;
9819 /* These must match dispatch_table in reboot_32.S */
9820 #define MRR_BIOS 0
9821 #define MRR_APM 1
9822 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
9823 index df4cd32..27ae072 100644
9824 --- a/arch/x86/include/asm/rwsem.h
9825 +++ b/arch/x86/include/asm/rwsem.h
9826 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
9827 {
9828 asm volatile("# beginning down_read\n\t"
9829 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9830 +
9831 +#ifdef CONFIG_PAX_REFCOUNT
9832 + "jno 0f\n"
9833 + LOCK_PREFIX _ASM_DEC "(%1)\n"
9834 + "int $4\n0:\n"
9835 + _ASM_EXTABLE(0b, 0b)
9836 +#endif
9837 +
9838 /* adds 0x00000001 */
9839 " jns 1f\n"
9840 " call call_rwsem_down_read_failed\n"
9841 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
9842 "1:\n\t"
9843 " mov %1,%2\n\t"
9844 " add %3,%2\n\t"
9845 +
9846 +#ifdef CONFIG_PAX_REFCOUNT
9847 + "jno 0f\n"
9848 + "sub %3,%2\n"
9849 + "int $4\n0:\n"
9850 + _ASM_EXTABLE(0b, 0b)
9851 +#endif
9852 +
9853 " jle 2f\n\t"
9854 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9855 " jnz 1b\n\t"
9856 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
9857 long tmp;
9858 asm volatile("# beginning down_write\n\t"
9859 LOCK_PREFIX " xadd %1,(%2)\n\t"
9860 +
9861 +#ifdef CONFIG_PAX_REFCOUNT
9862 + "jno 0f\n"
9863 + "mov %1,(%2)\n"
9864 + "int $4\n0:\n"
9865 + _ASM_EXTABLE(0b, 0b)
9866 +#endif
9867 +
9868 /* adds 0xffff0001, returns the old value */
9869 " test %1,%1\n\t"
9870 /* was the count 0 before? */
9871 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
9872 long tmp;
9873 asm volatile("# beginning __up_read\n\t"
9874 LOCK_PREFIX " xadd %1,(%2)\n\t"
9875 +
9876 +#ifdef CONFIG_PAX_REFCOUNT
9877 + "jno 0f\n"
9878 + "mov %1,(%2)\n"
9879 + "int $4\n0:\n"
9880 + _ASM_EXTABLE(0b, 0b)
9881 +#endif
9882 +
9883 /* subtracts 1, returns the old value */
9884 " jns 1f\n\t"
9885 " call call_rwsem_wake\n" /* expects old value in %edx */
9886 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
9887 long tmp;
9888 asm volatile("# beginning __up_write\n\t"
9889 LOCK_PREFIX " xadd %1,(%2)\n\t"
9890 +
9891 +#ifdef CONFIG_PAX_REFCOUNT
9892 + "jno 0f\n"
9893 + "mov %1,(%2)\n"
9894 + "int $4\n0:\n"
9895 + _ASM_EXTABLE(0b, 0b)
9896 +#endif
9897 +
9898 /* subtracts 0xffff0001, returns the old value */
9899 " jns 1f\n\t"
9900 " call call_rwsem_wake\n" /* expects old value in %edx */
9901 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
9902 {
9903 asm volatile("# beginning __downgrade_write\n\t"
9904 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
9905 +
9906 +#ifdef CONFIG_PAX_REFCOUNT
9907 + "jno 0f\n"
9908 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
9909 + "int $4\n0:\n"
9910 + _ASM_EXTABLE(0b, 0b)
9911 +#endif
9912 +
9913 /*
9914 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
9915 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
9916 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
9917 */
9918 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
9919 {
9920 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
9921 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
9922 +
9923 +#ifdef CONFIG_PAX_REFCOUNT
9924 + "jno 0f\n"
9925 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
9926 + "int $4\n0:\n"
9927 + _ASM_EXTABLE(0b, 0b)
9928 +#endif
9929 +
9930 : "+m" (sem->count)
9931 : "er" (delta));
9932 }
9933 @@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
9934 {
9935 long tmp = delta;
9936
9937 - asm volatile(LOCK_PREFIX "xadd %0,%1"
9938 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
9939 +
9940 +#ifdef CONFIG_PAX_REFCOUNT
9941 + "jno 0f\n"
9942 + "mov %0,%1\n"
9943 + "int $4\n0:\n"
9944 + _ASM_EXTABLE(0b, 0b)
9945 +#endif
9946 +
9947 : "+r" (tmp), "+m" (sem->count)
9948 : : "memory");
9949
9950 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
9951 index 5e64171..f58957e 100644
9952 --- a/arch/x86/include/asm/segment.h
9953 +++ b/arch/x86/include/asm/segment.h
9954 @@ -64,10 +64,15 @@
9955 * 26 - ESPFIX small SS
9956 * 27 - per-cpu [ offset to per-cpu data area ]
9957 * 28 - stack_canary-20 [ for stack protector ]
9958 - * 29 - unused
9959 - * 30 - unused
9960 + * 29 - PCI BIOS CS
9961 + * 30 - PCI BIOS DS
9962 * 31 - TSS for double fault handler
9963 */
9964 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
9965 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
9966 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
9967 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
9968 +
9969 #define GDT_ENTRY_TLS_MIN 6
9970 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
9971
9972 @@ -79,6 +84,8 @@
9973
9974 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
9975
9976 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
9977 +
9978 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
9979
9980 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
9981 @@ -104,6 +111,12 @@
9982 #define __KERNEL_STACK_CANARY 0
9983 #endif
9984
9985 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
9986 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
9987 +
9988 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
9989 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
9990 +
9991 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
9992
9993 /*
9994 @@ -141,7 +154,7 @@
9995 */
9996
9997 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
9998 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
9999 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10000
10001
10002 #else
10003 @@ -165,6 +178,8 @@
10004 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
10005 #define __USER32_DS __USER_DS
10006
10007 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10008 +
10009 #define GDT_ENTRY_TSS 8 /* needs two entries */
10010 #define GDT_ENTRY_LDT 10 /* needs two entries */
10011 #define GDT_ENTRY_TLS_MIN 12
10012 @@ -185,6 +200,7 @@
10013 #endif
10014
10015 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
10016 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
10017 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
10018 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
10019 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
10020 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
10021 index 73b11bc..d4a3b63 100644
10022 --- a/arch/x86/include/asm/smp.h
10023 +++ b/arch/x86/include/asm/smp.h
10024 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10025 /* cpus sharing the last level cache: */
10026 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
10027 DECLARE_PER_CPU(u16, cpu_llc_id);
10028 -DECLARE_PER_CPU(int, cpu_number);
10029 +DECLARE_PER_CPU(unsigned int, cpu_number);
10030
10031 static inline struct cpumask *cpu_sibling_mask(int cpu)
10032 {
10033 @@ -77,7 +77,7 @@ struct smp_ops {
10034
10035 void (*send_call_func_ipi)(const struct cpumask *mask);
10036 void (*send_call_func_single_ipi)(int cpu);
10037 -};
10038 +} __no_const;
10039
10040 /* Globals due to paravirt */
10041 extern void set_cpu_sibling_map(int cpu);
10042 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
10043 extern int safe_smp_processor_id(void);
10044
10045 #elif defined(CONFIG_X86_64_SMP)
10046 -#define raw_smp_processor_id() (percpu_read(cpu_number))
10047 -
10048 -#define stack_smp_processor_id() \
10049 -({ \
10050 - struct thread_info *ti; \
10051 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10052 - ti->cpu; \
10053 -})
10054 +#define raw_smp_processor_id() (percpu_read(cpu_number))
10055 +#define stack_smp_processor_id() raw_smp_processor_id()
10056 #define safe_smp_processor_id() smp_processor_id()
10057
10058 #endif
10059 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
10060 index ee67edf..49c796b 100644
10061 --- a/arch/x86/include/asm/spinlock.h
10062 +++ b/arch/x86/include/asm/spinlock.h
10063 @@ -248,6 +248,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
10064 static inline void arch_read_lock(arch_rwlock_t *rw)
10065 {
10066 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
10067 +
10068 +#ifdef CONFIG_PAX_REFCOUNT
10069 + "jno 0f\n"
10070 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
10071 + "int $4\n0:\n"
10072 + _ASM_EXTABLE(0b, 0b)
10073 +#endif
10074 +
10075 "jns 1f\n"
10076 "call __read_lock_failed\n\t"
10077 "1:\n"
10078 @@ -257,6 +265,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
10079 static inline void arch_write_lock(arch_rwlock_t *rw)
10080 {
10081 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
10082 +
10083 +#ifdef CONFIG_PAX_REFCOUNT
10084 + "jno 0f\n"
10085 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
10086 + "int $4\n0:\n"
10087 + _ASM_EXTABLE(0b, 0b)
10088 +#endif
10089 +
10090 "jz 1f\n"
10091 "call __write_lock_failed\n\t"
10092 "1:\n"
10093 @@ -286,13 +302,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
10094
10095 static inline void arch_read_unlock(arch_rwlock_t *rw)
10096 {
10097 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
10098 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
10099 +
10100 +#ifdef CONFIG_PAX_REFCOUNT
10101 + "jno 0f\n"
10102 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
10103 + "int $4\n0:\n"
10104 + _ASM_EXTABLE(0b, 0b)
10105 +#endif
10106 +
10107 :"+m" (rw->lock) : : "memory");
10108 }
10109
10110 static inline void arch_write_unlock(arch_rwlock_t *rw)
10111 {
10112 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
10113 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
10114 +
10115 +#ifdef CONFIG_PAX_REFCOUNT
10116 + "jno 0f\n"
10117 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
10118 + "int $4\n0:\n"
10119 + _ASM_EXTABLE(0b, 0b)
10120 +#endif
10121 +
10122 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
10123 }
10124
10125 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
10126 index 1575177..cb23f52 100644
10127 --- a/arch/x86/include/asm/stackprotector.h
10128 +++ b/arch/x86/include/asm/stackprotector.h
10129 @@ -48,7 +48,7 @@
10130 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10131 */
10132 #define GDT_STACK_CANARY_INIT \
10133 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10134 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10135
10136 /*
10137 * Initialize the stackprotector canary value.
10138 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
10139
10140 static inline void load_stack_canary_segment(void)
10141 {
10142 -#ifdef CONFIG_X86_32
10143 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10144 asm volatile ("mov %0, %%gs" : : "r" (0));
10145 #endif
10146 }
10147 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
10148 index 70bbe39..4ae2bd4 100644
10149 --- a/arch/x86/include/asm/stacktrace.h
10150 +++ b/arch/x86/include/asm/stacktrace.h
10151 @@ -11,28 +11,20 @@
10152
10153 extern int kstack_depth_to_print;
10154
10155 -struct thread_info;
10156 +struct task_struct;
10157 struct stacktrace_ops;
10158
10159 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
10160 - unsigned long *stack,
10161 - unsigned long bp,
10162 - const struct stacktrace_ops *ops,
10163 - void *data,
10164 - unsigned long *end,
10165 - int *graph);
10166 -
10167 -extern unsigned long
10168 -print_context_stack(struct thread_info *tinfo,
10169 - unsigned long *stack, unsigned long bp,
10170 - const struct stacktrace_ops *ops, void *data,
10171 - unsigned long *end, int *graph);
10172 -
10173 -extern unsigned long
10174 -print_context_stack_bp(struct thread_info *tinfo,
10175 - unsigned long *stack, unsigned long bp,
10176 - const struct stacktrace_ops *ops, void *data,
10177 - unsigned long *end, int *graph);
10178 +typedef unsigned long walk_stack_t(struct task_struct *task,
10179 + void *stack_start,
10180 + unsigned long *stack,
10181 + unsigned long bp,
10182 + const struct stacktrace_ops *ops,
10183 + void *data,
10184 + unsigned long *end,
10185 + int *graph);
10186 +
10187 +extern walk_stack_t print_context_stack;
10188 +extern walk_stack_t print_context_stack_bp;
10189
10190 /* Generic stack tracer with callbacks */
10191
10192 @@ -40,7 +32,7 @@ struct stacktrace_ops {
10193 void (*address)(void *data, unsigned long address, int reliable);
10194 /* On negative return stop dumping */
10195 int (*stack)(void *data, char *name);
10196 - walk_stack_t walk_stack;
10197 + walk_stack_t *walk_stack;
10198 };
10199
10200 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
10201 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
10202 index cb23852..2dde194 100644
10203 --- a/arch/x86/include/asm/sys_ia32.h
10204 +++ b/arch/x86/include/asm/sys_ia32.h
10205 @@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
10206 compat_sigset_t __user *, unsigned int);
10207 asmlinkage long sys32_alarm(unsigned int);
10208
10209 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
10210 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
10211 asmlinkage long sys32_sysfs(int, u32, u32);
10212
10213 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
10214 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
10215 index c2ff2a1..4349184 100644
10216 --- a/arch/x86/include/asm/system.h
10217 +++ b/arch/x86/include/asm/system.h
10218 @@ -129,7 +129,7 @@ do { \
10219 "call __switch_to\n\t" \
10220 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10221 __switch_canary \
10222 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
10223 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10224 "movq %%rax,%%rdi\n\t" \
10225 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10226 "jnz ret_from_fork\n\t" \
10227 @@ -140,7 +140,7 @@ do { \
10228 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10229 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10230 [_tif_fork] "i" (_TIF_FORK), \
10231 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
10232 + [thread_info] "m" (current_tinfo), \
10233 [current_task] "m" (current_task) \
10234 __switch_canary_iparam \
10235 : "memory", "cc" __EXTRA_CLOBBER)
10236 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
10237 {
10238 unsigned long __limit;
10239 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10240 - return __limit + 1;
10241 + return __limit;
10242 }
10243
10244 static inline void native_clts(void)
10245 @@ -397,12 +397,12 @@ void enable_hlt(void);
10246
10247 void cpu_idle_wait(void);
10248
10249 -extern unsigned long arch_align_stack(unsigned long sp);
10250 +#define arch_align_stack(x) ((x) & ~0xfUL)
10251 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10252
10253 void default_idle(void);
10254
10255 -void stop_this_cpu(void *dummy);
10256 +void stop_this_cpu(void *dummy) __noreturn;
10257
10258 /*
10259 * Force strict CPU ordering.
10260 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
10261 index a1fe5c1..ee326d8 100644
10262 --- a/arch/x86/include/asm/thread_info.h
10263 +++ b/arch/x86/include/asm/thread_info.h
10264 @@ -10,6 +10,7 @@
10265 #include <linux/compiler.h>
10266 #include <asm/page.h>
10267 #include <asm/types.h>
10268 +#include <asm/percpu.h>
10269
10270 /*
10271 * low level task data that entry.S needs immediate access to
10272 @@ -24,7 +25,6 @@ struct exec_domain;
10273 #include <linux/atomic.h>
10274
10275 struct thread_info {
10276 - struct task_struct *task; /* main task structure */
10277 struct exec_domain *exec_domain; /* execution domain */
10278 __u32 flags; /* low level flags */
10279 __u32 status; /* thread synchronous flags */
10280 @@ -34,18 +34,12 @@ struct thread_info {
10281 mm_segment_t addr_limit;
10282 struct restart_block restart_block;
10283 void __user *sysenter_return;
10284 -#ifdef CONFIG_X86_32
10285 - unsigned long previous_esp; /* ESP of the previous stack in
10286 - case of nested (IRQ) stacks
10287 - */
10288 - __u8 supervisor_stack[0];
10289 -#endif
10290 + unsigned long lowest_stack;
10291 int uaccess_err;
10292 };
10293
10294 -#define INIT_THREAD_INFO(tsk) \
10295 +#define INIT_THREAD_INFO \
10296 { \
10297 - .task = &tsk, \
10298 .exec_domain = &default_exec_domain, \
10299 .flags = 0, \
10300 .cpu = 0, \
10301 @@ -56,7 +50,7 @@ struct thread_info {
10302 }, \
10303 }
10304
10305 -#define init_thread_info (init_thread_union.thread_info)
10306 +#define init_thread_info (init_thread_union.stack)
10307 #define init_stack (init_thread_union.stack)
10308
10309 #else /* !__ASSEMBLY__ */
10310 @@ -170,6 +164,23 @@ struct thread_info {
10311 ret; \
10312 })
10313
10314 +#ifdef __ASSEMBLY__
10315 +/* how to get the thread information struct from ASM */
10316 +#define GET_THREAD_INFO(reg) \
10317 + mov PER_CPU_VAR(current_tinfo), reg
10318 +
10319 +/* use this one if reg already contains %esp */
10320 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10321 +#else
10322 +/* how to get the thread information struct from C */
10323 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10324 +
10325 +static __always_inline struct thread_info *current_thread_info(void)
10326 +{
10327 + return percpu_read_stable(current_tinfo);
10328 +}
10329 +#endif
10330 +
10331 #ifdef CONFIG_X86_32
10332
10333 #define STACK_WARN (THREAD_SIZE/8)
10334 @@ -180,35 +191,13 @@ struct thread_info {
10335 */
10336 #ifndef __ASSEMBLY__
10337
10338 -
10339 /* how to get the current stack pointer from C */
10340 register unsigned long current_stack_pointer asm("esp") __used;
10341
10342 -/* how to get the thread information struct from C */
10343 -static inline struct thread_info *current_thread_info(void)
10344 -{
10345 - return (struct thread_info *)
10346 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10347 -}
10348 -
10349 -#else /* !__ASSEMBLY__ */
10350 -
10351 -/* how to get the thread information struct from ASM */
10352 -#define GET_THREAD_INFO(reg) \
10353 - movl $-THREAD_SIZE, reg; \
10354 - andl %esp, reg
10355 -
10356 -/* use this one if reg already contains %esp */
10357 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10358 - andl $-THREAD_SIZE, reg
10359 -
10360 #endif
10361
10362 #else /* X86_32 */
10363
10364 -#include <asm/percpu.h>
10365 -#define KERNEL_STACK_OFFSET (5*8)
10366 -
10367 /*
10368 * macros/functions for gaining access to the thread information structure
10369 * preempt_count needs to be 1 initially, until the scheduler is functional.
10370 @@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
10371 #ifndef __ASSEMBLY__
10372 DECLARE_PER_CPU(unsigned long, kernel_stack);
10373
10374 -static inline struct thread_info *current_thread_info(void)
10375 -{
10376 - struct thread_info *ti;
10377 - ti = (void *)(percpu_read_stable(kernel_stack) +
10378 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10379 - return ti;
10380 -}
10381 -
10382 -#else /* !__ASSEMBLY__ */
10383 -
10384 -/* how to get the thread information struct from ASM */
10385 -#define GET_THREAD_INFO(reg) \
10386 - movq PER_CPU_VAR(kernel_stack),reg ; \
10387 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10388 -
10389 +/* how to get the current stack pointer from C */
10390 +register unsigned long current_stack_pointer asm("rsp") __used;
10391 #endif
10392
10393 #endif /* !X86_32 */
10394 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
10395 extern void free_thread_info(struct thread_info *ti);
10396 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10397 #define arch_task_cache_init arch_task_cache_init
10398 +
10399 +#define __HAVE_THREAD_FUNCTIONS
10400 +#define task_thread_info(task) (&(task)->tinfo)
10401 +#define task_stack_page(task) ((task)->stack)
10402 +#define setup_thread_stack(p, org) do {} while (0)
10403 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10404 +
10405 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10406 +extern struct task_struct *alloc_task_struct_node(int node);
10407 +extern void free_task_struct(struct task_struct *);
10408 +
10409 #endif
10410 #endif /* _ASM_X86_THREAD_INFO_H */
10411 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
10412 index 36361bf..324f262 100644
10413 --- a/arch/x86/include/asm/uaccess.h
10414 +++ b/arch/x86/include/asm/uaccess.h
10415 @@ -7,12 +7,15 @@
10416 #include <linux/compiler.h>
10417 #include <linux/thread_info.h>
10418 #include <linux/string.h>
10419 +#include <linux/sched.h>
10420 #include <asm/asm.h>
10421 #include <asm/page.h>
10422
10423 #define VERIFY_READ 0
10424 #define VERIFY_WRITE 1
10425
10426 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
10427 +
10428 /*
10429 * The fs value determines whether argument validity checking should be
10430 * performed or not. If get_fs() == USER_DS, checking is performed, with
10431 @@ -28,7 +31,12 @@
10432
10433 #define get_ds() (KERNEL_DS)
10434 #define get_fs() (current_thread_info()->addr_limit)
10435 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10436 +void __set_fs(mm_segment_t x);
10437 +void set_fs(mm_segment_t x);
10438 +#else
10439 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10440 +#endif
10441
10442 #define segment_eq(a, b) ((a).seg == (b).seg)
10443
10444 @@ -76,7 +84,33 @@
10445 * checks that the pointer is in the user space range - after calling
10446 * this function, memory access functions may still return -EFAULT.
10447 */
10448 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10449 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10450 +#define access_ok(type, addr, size) \
10451 +({ \
10452 + long __size = size; \
10453 + unsigned long __addr = (unsigned long)addr; \
10454 + unsigned long __addr_ao = __addr & PAGE_MASK; \
10455 + unsigned long __end_ao = __addr + __size - 1; \
10456 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10457 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10458 + while(__addr_ao <= __end_ao) { \
10459 + char __c_ao; \
10460 + __addr_ao += PAGE_SIZE; \
10461 + if (__size > PAGE_SIZE) \
10462 + cond_resched(); \
10463 + if (__get_user(__c_ao, (char __user *)__addr)) \
10464 + break; \
10465 + if (type != VERIFY_WRITE) { \
10466 + __addr = __addr_ao; \
10467 + continue; \
10468 + } \
10469 + if (__put_user(__c_ao, (char __user *)__addr)) \
10470 + break; \
10471 + __addr = __addr_ao; \
10472 + } \
10473 + } \
10474 + __ret_ao; \
10475 +})
10476
10477 /*
10478 * The exception table consists of pairs of addresses: the first is the
10479 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10480 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10481 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10482
10483 -
10484 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10485 +#define __copyuser_seg "gs;"
10486 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10487 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10488 +#else
10489 +#define __copyuser_seg
10490 +#define __COPYUSER_SET_ES
10491 +#define __COPYUSER_RESTORE_ES
10492 +#endif
10493
10494 #ifdef CONFIG_X86_32
10495 #define __put_user_asm_u64(x, addr, err, errret) \
10496 - asm volatile("1: movl %%eax,0(%2)\n" \
10497 - "2: movl %%edx,4(%2)\n" \
10498 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10499 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10500 "3:\n" \
10501 ".section .fixup,\"ax\"\n" \
10502 "4: movl %3,%0\n" \
10503 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10504 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10505
10506 #define __put_user_asm_ex_u64(x, addr) \
10507 - asm volatile("1: movl %%eax,0(%1)\n" \
10508 - "2: movl %%edx,4(%1)\n" \
10509 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10510 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10511 "3:\n" \
10512 _ASM_EXTABLE(1b, 2b - 1b) \
10513 _ASM_EXTABLE(2b, 3b - 2b) \
10514 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
10515 __typeof__(*(ptr)) __pu_val; \
10516 __chk_user_ptr(ptr); \
10517 might_fault(); \
10518 - __pu_val = x; \
10519 + __pu_val = (x); \
10520 switch (sizeof(*(ptr))) { \
10521 case 1: \
10522 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10523 @@ -373,7 +415,7 @@ do { \
10524 } while (0)
10525
10526 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10527 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10528 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10529 "2:\n" \
10530 ".section .fixup,\"ax\"\n" \
10531 "3: mov %3,%0\n" \
10532 @@ -381,7 +423,7 @@ do { \
10533 " jmp 2b\n" \
10534 ".previous\n" \
10535 _ASM_EXTABLE(1b, 3b) \
10536 - : "=r" (err), ltype(x) \
10537 + : "=r" (err), ltype (x) \
10538 : "m" (__m(addr)), "i" (errret), "0" (err))
10539
10540 #define __get_user_size_ex(x, ptr, size) \
10541 @@ -406,7 +448,7 @@ do { \
10542 } while (0)
10543
10544 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10545 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10546 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10547 "2:\n" \
10548 _ASM_EXTABLE(1b, 2b - 1b) \
10549 : ltype(x) : "m" (__m(addr)))
10550 @@ -423,13 +465,24 @@ do { \
10551 int __gu_err; \
10552 unsigned long __gu_val; \
10553 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10554 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
10555 + (x) = (__typeof__(*(ptr)))__gu_val; \
10556 __gu_err; \
10557 })
10558
10559 /* FIXME: this hack is definitely wrong -AK */
10560 struct __large_struct { unsigned long buf[100]; };
10561 -#define __m(x) (*(struct __large_struct __user *)(x))
10562 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10563 +#define ____m(x) \
10564 +({ \
10565 + unsigned long ____x = (unsigned long)(x); \
10566 + if (____x < PAX_USER_SHADOW_BASE) \
10567 + ____x += PAX_USER_SHADOW_BASE; \
10568 + (void __user *)____x; \
10569 +})
10570 +#else
10571 +#define ____m(x) (x)
10572 +#endif
10573 +#define __m(x) (*(struct __large_struct __user *)____m(x))
10574
10575 /*
10576 * Tell gcc we read from memory instead of writing: this is because
10577 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
10578 * aliasing issues.
10579 */
10580 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10581 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10582 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10583 "2:\n" \
10584 ".section .fixup,\"ax\"\n" \
10585 "3: mov %3,%0\n" \
10586 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
10587 ".previous\n" \
10588 _ASM_EXTABLE(1b, 3b) \
10589 : "=r"(err) \
10590 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10591 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10592
10593 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10594 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10595 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10596 "2:\n" \
10597 _ASM_EXTABLE(1b, 2b - 1b) \
10598 : : ltype(x), "m" (__m(addr)))
10599 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
10600 * On error, the variable @x is set to zero.
10601 */
10602
10603 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10604 +#define __get_user(x, ptr) get_user((x), (ptr))
10605 +#else
10606 #define __get_user(x, ptr) \
10607 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10608 +#endif
10609
10610 /**
10611 * __put_user: - Write a simple value into user space, with less checking.
10612 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
10613 * Returns zero on success, or -EFAULT on error.
10614 */
10615
10616 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10617 +#define __put_user(x, ptr) put_user((x), (ptr))
10618 +#else
10619 #define __put_user(x, ptr) \
10620 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10621 +#endif
10622
10623 #define __get_user_unaligned __get_user
10624 #define __put_user_unaligned __put_user
10625 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
10626 #define get_user_ex(x, ptr) do { \
10627 unsigned long __gue_val; \
10628 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10629 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
10630 + (x) = (__typeof__(*(ptr)))__gue_val; \
10631 } while (0)
10632
10633 #ifdef CONFIG_X86_WP_WORKS_OK
10634 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
10635 index 566e803..89f1e60 100644
10636 --- a/arch/x86/include/asm/uaccess_32.h
10637 +++ b/arch/x86/include/asm/uaccess_32.h
10638 @@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
10639 static __always_inline unsigned long __must_check
10640 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10641 {
10642 + pax_track_stack();
10643 +
10644 + if ((long)n < 0)
10645 + return n;
10646 +
10647 if (__builtin_constant_p(n)) {
10648 unsigned long ret;
10649
10650 @@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10651 return ret;
10652 }
10653 }
10654 + if (!__builtin_constant_p(n))
10655 + check_object_size(from, n, true);
10656 return __copy_to_user_ll(to, from, n);
10657 }
10658
10659 @@ -82,12 +89,16 @@ static __always_inline unsigned long __must_check
10660 __copy_to_user(void __user *to, const void *from, unsigned long n)
10661 {
10662 might_fault();
10663 +
10664 return __copy_to_user_inatomic(to, from, n);
10665 }
10666
10667 static __always_inline unsigned long
10668 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10669 {
10670 + if ((long)n < 0)
10671 + return n;
10672 +
10673 /* Avoid zeroing the tail if the copy fails..
10674 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10675 * but as the zeroing behaviour is only significant when n is not
10676 @@ -137,6 +148,12 @@ static __always_inline unsigned long
10677 __copy_from_user(void *to, const void __user *from, unsigned long n)
10678 {
10679 might_fault();
10680 +
10681 + pax_track_stack();
10682 +
10683 + if ((long)n < 0)
10684 + return n;
10685 +
10686 if (__builtin_constant_p(n)) {
10687 unsigned long ret;
10688
10689 @@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
10690 return ret;
10691 }
10692 }
10693 + if (!__builtin_constant_p(n))
10694 + check_object_size(to, n, false);
10695 return __copy_from_user_ll(to, from, n);
10696 }
10697
10698 @@ -159,6 +178,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
10699 const void __user *from, unsigned long n)
10700 {
10701 might_fault();
10702 +
10703 + if ((long)n < 0)
10704 + return n;
10705 +
10706 if (__builtin_constant_p(n)) {
10707 unsigned long ret;
10708
10709 @@ -181,15 +204,19 @@ static __always_inline unsigned long
10710 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10711 unsigned long n)
10712 {
10713 - return __copy_from_user_ll_nocache_nozero(to, from, n);
10714 -}
10715 + if ((long)n < 0)
10716 + return n;
10717
10718 -unsigned long __must_check copy_to_user(void __user *to,
10719 - const void *from, unsigned long n);
10720 -unsigned long __must_check _copy_from_user(void *to,
10721 - const void __user *from,
10722 - unsigned long n);
10723 + return __copy_from_user_ll_nocache_nozero(to, from, n);
10724 +}
10725
10726 +extern void copy_to_user_overflow(void)
10727 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10728 + __compiletime_error("copy_to_user() buffer size is not provably correct")
10729 +#else
10730 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
10731 +#endif
10732 +;
10733
10734 extern void copy_from_user_overflow(void)
10735 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10736 @@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void)
10737 #endif
10738 ;
10739
10740 -static inline unsigned long __must_check copy_from_user(void *to,
10741 - const void __user *from,
10742 - unsigned long n)
10743 +/**
10744 + * copy_to_user: - Copy a block of data into user space.
10745 + * @to: Destination address, in user space.
10746 + * @from: Source address, in kernel space.
10747 + * @n: Number of bytes to copy.
10748 + *
10749 + * Context: User context only. This function may sleep.
10750 + *
10751 + * Copy data from kernel space to user space.
10752 + *
10753 + * Returns number of bytes that could not be copied.
10754 + * On success, this will be zero.
10755 + */
10756 +static inline unsigned long __must_check
10757 +copy_to_user(void __user *to, const void *from, unsigned long n)
10758 +{
10759 + int sz = __compiletime_object_size(from);
10760 +
10761 + if (unlikely(sz != -1 && sz < n))
10762 + copy_to_user_overflow();
10763 + else if (access_ok(VERIFY_WRITE, to, n))
10764 + n = __copy_to_user(to, from, n);
10765 + return n;
10766 +}
10767 +
10768 +/**
10769 + * copy_from_user: - Copy a block of data from user space.
10770 + * @to: Destination address, in kernel space.
10771 + * @from: Source address, in user space.
10772 + * @n: Number of bytes to copy.
10773 + *
10774 + * Context: User context only. This function may sleep.
10775 + *
10776 + * Copy data from user space to kernel space.
10777 + *
10778 + * Returns number of bytes that could not be copied.
10779 + * On success, this will be zero.
10780 + *
10781 + * If some data could not be copied, this function will pad the copied
10782 + * data to the requested size using zero bytes.
10783 + */
10784 +static inline unsigned long __must_check
10785 +copy_from_user(void *to, const void __user *from, unsigned long n)
10786 {
10787 int sz = __compiletime_object_size(to);
10788
10789 - if (likely(sz == -1 || sz >= n))
10790 - n = _copy_from_user(to, from, n);
10791 - else
10792 + if (unlikely(sz != -1 && sz < n))
10793 copy_from_user_overflow();
10794 -
10795 + else if (access_ok(VERIFY_READ, from, n))
10796 + n = __copy_from_user(to, from, n);
10797 + else if ((long)n > 0) {
10798 + if (!__builtin_constant_p(n))
10799 + check_object_size(to, n, false);
10800 + memset(to, 0, n);
10801 + }
10802 return n;
10803 }
10804
10805 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
10806 index 1c66d30..d407072 100644
10807 --- a/arch/x86/include/asm/uaccess_64.h
10808 +++ b/arch/x86/include/asm/uaccess_64.h
10809 @@ -10,6 +10,9 @@
10810 #include <asm/alternative.h>
10811 #include <asm/cpufeature.h>
10812 #include <asm/page.h>
10813 +#include <asm/pgtable.h>
10814 +
10815 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
10816
10817 /*
10818 * Copy To/From Userspace
10819 @@ -17,12 +20,12 @@
10820
10821 /* Handles exceptions in both to and from, but doesn't do access_ok */
10822 __must_check unsigned long
10823 -copy_user_generic_string(void *to, const void *from, unsigned len);
10824 +copy_user_generic_string(void *to, const void *from, unsigned long len);
10825 __must_check unsigned long
10826 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
10827 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len);
10828
10829 static __always_inline __must_check unsigned long
10830 -copy_user_generic(void *to, const void *from, unsigned len)
10831 +copy_user_generic(void *to, const void *from, unsigned long len)
10832 {
10833 unsigned ret;
10834
10835 @@ -36,138 +39,226 @@ copy_user_generic(void *to, const void *from, unsigned len)
10836 return ret;
10837 }
10838
10839 +static __always_inline __must_check unsigned long
10840 +__copy_to_user(void __user *to, const void *from, unsigned long len);
10841 +static __always_inline __must_check unsigned long
10842 +__copy_from_user(void *to, const void __user *from, unsigned long len);
10843 __must_check unsigned long
10844 -_copy_to_user(void __user *to, const void *from, unsigned len);
10845 -__must_check unsigned long
10846 -_copy_from_user(void *to, const void __user *from, unsigned len);
10847 -__must_check unsigned long
10848 -copy_in_user(void __user *to, const void __user *from, unsigned len);
10849 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
10850
10851 static inline unsigned long __must_check copy_from_user(void *to,
10852 const void __user *from,
10853 unsigned long n)
10854 {
10855 - int sz = __compiletime_object_size(to);
10856 -
10857 might_fault();
10858 - if (likely(sz == -1 || sz >= n))
10859 - n = _copy_from_user(to, from, n);
10860 -#ifdef CONFIG_DEBUG_VM
10861 - else
10862 - WARN(1, "Buffer overflow detected!\n");
10863 -#endif
10864 +
10865 + if (access_ok(VERIFY_READ, from, n))
10866 + n = __copy_from_user(to, from, n);
10867 + else if (n < INT_MAX) {
10868 + if (!__builtin_constant_p(n))
10869 + check_object_size(to, n, false);
10870 + memset(to, 0, n);
10871 + }
10872 return n;
10873 }
10874
10875 static __always_inline __must_check
10876 -int copy_to_user(void __user *dst, const void *src, unsigned size)
10877 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
10878 {
10879 might_fault();
10880
10881 - return _copy_to_user(dst, src, size);
10882 + if (access_ok(VERIFY_WRITE, dst, size))
10883 + size = __copy_to_user(dst, src, size);
10884 + return size;
10885 }
10886
10887 static __always_inline __must_check
10888 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
10889 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
10890 {
10891 - int ret = 0;
10892 + int sz = __compiletime_object_size(dst);
10893 + unsigned ret = 0;
10894
10895 might_fault();
10896 - if (!__builtin_constant_p(size))
10897 - return copy_user_generic(dst, (__force void *)src, size);
10898 +
10899 + pax_track_stack();
10900 +
10901 + if (size > INT_MAX)
10902 + return size;
10903 +
10904 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10905 + if (!__access_ok(VERIFY_READ, src, size))
10906 + return size;
10907 +#endif
10908 +
10909 + if (unlikely(sz != -1 && sz < size)) {
10910 +#ifdef CONFIG_DEBUG_VM
10911 + WARN(1, "Buffer overflow detected!\n");
10912 +#endif
10913 + return size;
10914 + }
10915 +
10916 + if (!__builtin_constant_p(size)) {
10917 + check_object_size(dst, size, false);
10918 +
10919 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10920 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10921 + src += PAX_USER_SHADOW_BASE;
10922 +#endif
10923 +
10924 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
10925 + }
10926 switch (size) {
10927 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10928 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10929 ret, "b", "b", "=q", 1);
10930 return ret;
10931 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10932 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10933 ret, "w", "w", "=r", 2);
10934 return ret;
10935 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10936 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10937 ret, "l", "k", "=r", 4);
10938 return ret;
10939 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10940 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10941 ret, "q", "", "=r", 8);
10942 return ret;
10943 case 10:
10944 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10945 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10946 ret, "q", "", "=r", 10);
10947 if (unlikely(ret))
10948 return ret;
10949 __get_user_asm(*(u16 *)(8 + (char *)dst),
10950 - (u16 __user *)(8 + (char __user *)src),
10951 + (const u16 __user *)(8 + (const char __user *)src),
10952 ret, "w", "w", "=r", 2);
10953 return ret;
10954 case 16:
10955 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10956 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10957 ret, "q", "", "=r", 16);
10958 if (unlikely(ret))
10959 return ret;
10960 __get_user_asm(*(u64 *)(8 + (char *)dst),
10961 - (u64 __user *)(8 + (char __user *)src),
10962 + (const u64 __user *)(8 + (const char __user *)src),
10963 ret, "q", "", "=r", 8);
10964 return ret;
10965 default:
10966 - return copy_user_generic(dst, (__force void *)src, size);
10967 +
10968 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10969 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10970 + src += PAX_USER_SHADOW_BASE;
10971 +#endif
10972 +
10973 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
10974 }
10975 }
10976
10977 static __always_inline __must_check
10978 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
10979 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
10980 {
10981 - int ret = 0;
10982 + int sz = __compiletime_object_size(src);
10983 + unsigned ret = 0;
10984
10985 might_fault();
10986 - if (!__builtin_constant_p(size))
10987 - return copy_user_generic((__force void *)dst, src, size);
10988 +
10989 + pax_track_stack();
10990 +
10991 + if (size > INT_MAX)
10992 + return size;
10993 +
10994 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10995 + if (!__access_ok(VERIFY_WRITE, dst, size))
10996 + return size;
10997 +#endif
10998 +
10999 + if (unlikely(sz != -1 && sz < size)) {
11000 +#ifdef CONFIG_DEBUG_VM
11001 + WARN(1, "Buffer overflow detected!\n");
11002 +#endif
11003 + return size;
11004 + }
11005 +
11006 + if (!__builtin_constant_p(size)) {
11007 + check_object_size(src, size, true);
11008 +
11009 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11010 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11011 + dst += PAX_USER_SHADOW_BASE;
11012 +#endif
11013 +
11014 + return copy_user_generic((__force_kernel void *)dst, src, size);
11015 + }
11016 switch (size) {
11017 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
11018 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
11019 ret, "b", "b", "iq", 1);
11020 return ret;
11021 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
11022 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
11023 ret, "w", "w", "ir", 2);
11024 return ret;
11025 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
11026 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
11027 ret, "l", "k", "ir", 4);
11028 return ret;
11029 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
11030 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11031 ret, "q", "", "er", 8);
11032 return ret;
11033 case 10:
11034 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11035 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11036 ret, "q", "", "er", 10);
11037 if (unlikely(ret))
11038 return ret;
11039 asm("":::"memory");
11040 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11041 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11042 ret, "w", "w", "ir", 2);
11043 return ret;
11044 case 16:
11045 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11046 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11047 ret, "q", "", "er", 16);
11048 if (unlikely(ret))
11049 return ret;
11050 asm("":::"memory");
11051 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11052 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11053 ret, "q", "", "er", 8);
11054 return ret;
11055 default:
11056 - return copy_user_generic((__force void *)dst, src, size);
11057 +
11058 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11059 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11060 + dst += PAX_USER_SHADOW_BASE;
11061 +#endif
11062 +
11063 + return copy_user_generic((__force_kernel void *)dst, src, size);
11064 }
11065 }
11066
11067 static __always_inline __must_check
11068 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11069 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
11070 {
11071 - int ret = 0;
11072 + unsigned ret = 0;
11073
11074 might_fault();
11075 - if (!__builtin_constant_p(size))
11076 - return copy_user_generic((__force void *)dst,
11077 - (__force void *)src, size);
11078 +
11079 + if (size > INT_MAX)
11080 + return size;
11081 +
11082 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11083 + if (!__access_ok(VERIFY_READ, src, size))
11084 + return size;
11085 + if (!__access_ok(VERIFY_WRITE, dst, size))
11086 + return size;
11087 +#endif
11088 +
11089 + if (!__builtin_constant_p(size)) {
11090 +
11091 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11092 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11093 + src += PAX_USER_SHADOW_BASE;
11094 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11095 + dst += PAX_USER_SHADOW_BASE;
11096 +#endif
11097 +
11098 + return copy_user_generic((__force_kernel void *)dst,
11099 + (__force_kernel const void *)src, size);
11100 + }
11101 switch (size) {
11102 case 1: {
11103 u8 tmp;
11104 - __get_user_asm(tmp, (u8 __user *)src,
11105 + __get_user_asm(tmp, (const u8 __user *)src,
11106 ret, "b", "b", "=q", 1);
11107 if (likely(!ret))
11108 __put_user_asm(tmp, (u8 __user *)dst,
11109 @@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11110 }
11111 case 2: {
11112 u16 tmp;
11113 - __get_user_asm(tmp, (u16 __user *)src,
11114 + __get_user_asm(tmp, (const u16 __user *)src,
11115 ret, "w", "w", "=r", 2);
11116 if (likely(!ret))
11117 __put_user_asm(tmp, (u16 __user *)dst,
11118 @@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11119
11120 case 4: {
11121 u32 tmp;
11122 - __get_user_asm(tmp, (u32 __user *)src,
11123 + __get_user_asm(tmp, (const u32 __user *)src,
11124 ret, "l", "k", "=r", 4);
11125 if (likely(!ret))
11126 __put_user_asm(tmp, (u32 __user *)dst,
11127 @@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11128 }
11129 case 8: {
11130 u64 tmp;
11131 - __get_user_asm(tmp, (u64 __user *)src,
11132 + __get_user_asm(tmp, (const u64 __user *)src,
11133 ret, "q", "", "=r", 8);
11134 if (likely(!ret))
11135 __put_user_asm(tmp, (u64 __user *)dst,
11136 @@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11137 return ret;
11138 }
11139 default:
11140 - return copy_user_generic((__force void *)dst,
11141 - (__force void *)src, size);
11142 +
11143 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11144 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11145 + src += PAX_USER_SHADOW_BASE;
11146 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11147 + dst += PAX_USER_SHADOW_BASE;
11148 +#endif
11149 +
11150 + return copy_user_generic((__force_kernel void *)dst,
11151 + (__force_kernel const void *)src, size);
11152 }
11153 }
11154
11155 @@ -219,35 +318,74 @@ __must_check unsigned long clear_user(void __user *mem, unsigned long len);
11156 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11157
11158 static __must_check __always_inline int
11159 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11160 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
11161 {
11162 - return copy_user_generic(dst, (__force const void *)src, size);
11163 + pax_track_stack();
11164 +
11165 + if (size > INT_MAX)
11166 + return size;
11167 +
11168 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11169 + if (!__access_ok(VERIFY_READ, src, size))
11170 + return size;
11171 +
11172 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11173 + src += PAX_USER_SHADOW_BASE;
11174 +#endif
11175 +
11176 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11177 }
11178
11179 -static __must_check __always_inline int
11180 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11181 +static __must_check __always_inline unsigned long
11182 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
11183 {
11184 - return copy_user_generic((__force void *)dst, src, size);
11185 + if (size > INT_MAX)
11186 + return size;
11187 +
11188 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11189 + if (!__access_ok(VERIFY_WRITE, dst, size))
11190 + return size;
11191 +
11192 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11193 + dst += PAX_USER_SHADOW_BASE;
11194 +#endif
11195 +
11196 + return copy_user_generic((__force_kernel void *)dst, src, size);
11197 }
11198
11199 -extern long __copy_user_nocache(void *dst, const void __user *src,
11200 - unsigned size, int zerorest);
11201 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11202 + unsigned long size, int zerorest);
11203
11204 -static inline int
11205 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11206 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
11207 {
11208 might_sleep();
11209 +
11210 + if (size > INT_MAX)
11211 + return size;
11212 +
11213 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11214 + if (!__access_ok(VERIFY_READ, src, size))
11215 + return size;
11216 +#endif
11217 +
11218 return __copy_user_nocache(dst, src, size, 1);
11219 }
11220
11221 -static inline int
11222 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11223 - unsigned size)
11224 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11225 + unsigned long size)
11226 {
11227 + if (size > INT_MAX)
11228 + return size;
11229 +
11230 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11231 + if (!__access_ok(VERIFY_READ, src, size))
11232 + return size;
11233 +#endif
11234 +
11235 return __copy_user_nocache(dst, src, size, 0);
11236 }
11237
11238 -unsigned long
11239 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11240 +extern unsigned long
11241 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
11242
11243 #endif /* _ASM_X86_UACCESS_64_H */
11244 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
11245 index bb05228..d763d5b 100644
11246 --- a/arch/x86/include/asm/vdso.h
11247 +++ b/arch/x86/include/asm/vdso.h
11248 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
11249 #define VDSO32_SYMBOL(base, name) \
11250 ({ \
11251 extern const char VDSO32_##name[]; \
11252 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11253 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11254 })
11255 #endif
11256
11257 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
11258 index d3d8590..d296b5f 100644
11259 --- a/arch/x86/include/asm/x86_init.h
11260 +++ b/arch/x86/include/asm/x86_init.h
11261 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
11262 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11263 void (*find_smp_config)(void);
11264 void (*get_smp_config)(unsigned int early);
11265 -};
11266 +} __no_const;
11267
11268 /**
11269 * struct x86_init_resources - platform specific resource related ops
11270 @@ -42,7 +42,7 @@ struct x86_init_resources {
11271 void (*probe_roms)(void);
11272 void (*reserve_resources)(void);
11273 char *(*memory_setup)(void);
11274 -};
11275 +} __no_const;
11276
11277 /**
11278 * struct x86_init_irqs - platform specific interrupt setup
11279 @@ -55,7 +55,7 @@ struct x86_init_irqs {
11280 void (*pre_vector_init)(void);
11281 void (*intr_init)(void);
11282 void (*trap_init)(void);
11283 -};
11284 +} __no_const;
11285
11286 /**
11287 * struct x86_init_oem - oem platform specific customizing functions
11288 @@ -65,7 +65,7 @@ struct x86_init_irqs {
11289 struct x86_init_oem {
11290 void (*arch_setup)(void);
11291 void (*banner)(void);
11292 -};
11293 +} __no_const;
11294
11295 /**
11296 * struct x86_init_mapping - platform specific initial kernel pagetable setup
11297 @@ -76,7 +76,7 @@ struct x86_init_oem {
11298 */
11299 struct x86_init_mapping {
11300 void (*pagetable_reserve)(u64 start, u64 end);
11301 -};
11302 +} __no_const;
11303
11304 /**
11305 * struct x86_init_paging - platform specific paging functions
11306 @@ -86,7 +86,7 @@ struct x86_init_mapping {
11307 struct x86_init_paging {
11308 void (*pagetable_setup_start)(pgd_t *base);
11309 void (*pagetable_setup_done)(pgd_t *base);
11310 -};
11311 +} __no_const;
11312
11313 /**
11314 * struct x86_init_timers - platform specific timer setup
11315 @@ -101,7 +101,7 @@ struct x86_init_timers {
11316 void (*tsc_pre_init)(void);
11317 void (*timer_init)(void);
11318 void (*wallclock_init)(void);
11319 -};
11320 +} __no_const;
11321
11322 /**
11323 * struct x86_init_iommu - platform specific iommu setup
11324 @@ -109,7 +109,7 @@ struct x86_init_timers {
11325 */
11326 struct x86_init_iommu {
11327 int (*iommu_init)(void);
11328 -};
11329 +} __no_const;
11330
11331 /**
11332 * struct x86_init_pci - platform specific pci init functions
11333 @@ -123,7 +123,7 @@ struct x86_init_pci {
11334 int (*init)(void);
11335 void (*init_irq)(void);
11336 void (*fixup_irqs)(void);
11337 -};
11338 +} __no_const;
11339
11340 /**
11341 * struct x86_init_ops - functions for platform specific setup
11342 @@ -139,7 +139,7 @@ struct x86_init_ops {
11343 struct x86_init_timers timers;
11344 struct x86_init_iommu iommu;
11345 struct x86_init_pci pci;
11346 -};
11347 +} __no_const;
11348
11349 /**
11350 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11351 @@ -147,7 +147,7 @@ struct x86_init_ops {
11352 */
11353 struct x86_cpuinit_ops {
11354 void (*setup_percpu_clockev)(void);
11355 -};
11356 +} __no_const;
11357
11358 /**
11359 * struct x86_platform_ops - platform specific runtime functions
11360 @@ -166,7 +166,7 @@ struct x86_platform_ops {
11361 bool (*is_untracked_pat_range)(u64 start, u64 end);
11362 void (*nmi_init)(void);
11363 int (*i8042_detect)(void);
11364 -};
11365 +} __no_const;
11366
11367 struct pci_dev;
11368
11369 @@ -174,7 +174,7 @@ struct x86_msi_ops {
11370 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
11371 void (*teardown_msi_irq)(unsigned int irq);
11372 void (*teardown_msi_irqs)(struct pci_dev *dev);
11373 -};
11374 +} __no_const;
11375
11376 extern struct x86_init_ops x86_init;
11377 extern struct x86_cpuinit_ops x86_cpuinit;
11378 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
11379 index c6ce245..ffbdab7 100644
11380 --- a/arch/x86/include/asm/xsave.h
11381 +++ b/arch/x86/include/asm/xsave.h
11382 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11383 {
11384 int err;
11385
11386 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11387 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11388 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11389 +#endif
11390 +
11391 /*
11392 * Clear the xsave header first, so that reserved fields are
11393 * initialized to zero.
11394 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11395 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
11396 {
11397 int err;
11398 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
11399 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
11400 u32 lmask = mask;
11401 u32 hmask = mask >> 32;
11402
11403 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11404 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11405 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11406 +#endif
11407 +
11408 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11409 "2:\n"
11410 ".section .fixup,\"ax\"\n"
11411 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
11412 index 6a564ac..9b1340c 100644
11413 --- a/arch/x86/kernel/acpi/realmode/Makefile
11414 +++ b/arch/x86/kernel/acpi/realmode/Makefile
11415 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
11416 $(call cc-option, -fno-stack-protector) \
11417 $(call cc-option, -mpreferred-stack-boundary=2)
11418 KBUILD_CFLAGS += $(call cc-option, -m32)
11419 +ifdef CONSTIFY_PLUGIN
11420 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11421 +endif
11422 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11423 GCOV_PROFILE := n
11424
11425 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
11426 index b4fd836..4358fe3 100644
11427 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
11428 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
11429 @@ -108,6 +108,9 @@ wakeup_code:
11430 /* Do any other stuff... */
11431
11432 #ifndef CONFIG_64BIT
11433 + /* Recheck NX bit overrides (64bit path does this in trampoline */
11434 + call verify_cpu
11435 +
11436 /* This could also be done in C code... */
11437 movl pmode_cr3, %eax
11438 movl %eax, %cr3
11439 @@ -131,6 +134,7 @@ wakeup_code:
11440 movl pmode_cr0, %eax
11441 movl %eax, %cr0
11442 jmp pmode_return
11443 +# include "../../verify_cpu.S"
11444 #else
11445 pushw $0
11446 pushw trampoline_segment
11447 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
11448 index 103b6ab..2004d0a 100644
11449 --- a/arch/x86/kernel/acpi/sleep.c
11450 +++ b/arch/x86/kernel/acpi/sleep.c
11451 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
11452 header->trampoline_segment = trampoline_address() >> 4;
11453 #ifdef CONFIG_SMP
11454 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11455 +
11456 + pax_open_kernel();
11457 early_gdt_descr.address =
11458 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11459 + pax_close_kernel();
11460 +
11461 initial_gs = per_cpu_offset(smp_processor_id());
11462 #endif
11463 initial_code = (unsigned long)wakeup_long64;
11464 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
11465 index 13ab720..95d5442 100644
11466 --- a/arch/x86/kernel/acpi/wakeup_32.S
11467 +++ b/arch/x86/kernel/acpi/wakeup_32.S
11468 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11469 # and restore the stack ... but you need gdt for this to work
11470 movl saved_context_esp, %esp
11471
11472 - movl %cs:saved_magic, %eax
11473 - cmpl $0x12345678, %eax
11474 + cmpl $0x12345678, saved_magic
11475 jne bogus_magic
11476
11477 # jump to place where we left off
11478 - movl saved_eip, %eax
11479 - jmp *%eax
11480 + jmp *(saved_eip)
11481
11482 bogus_magic:
11483 jmp bogus_magic
11484 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
11485 index c638228..16dfa8d 100644
11486 --- a/arch/x86/kernel/alternative.c
11487 +++ b/arch/x86/kernel/alternative.c
11488 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
11489 */
11490 for (a = start; a < end; a++) {
11491 instr = (u8 *)&a->instr_offset + a->instr_offset;
11492 +
11493 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11494 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11495 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
11496 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11497 +#endif
11498 +
11499 replacement = (u8 *)&a->repl_offset + a->repl_offset;
11500 BUG_ON(a->replacementlen > a->instrlen);
11501 BUG_ON(a->instrlen > sizeof(insnbuf));
11502 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
11503 for (poff = start; poff < end; poff++) {
11504 u8 *ptr = (u8 *)poff + *poff;
11505
11506 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11507 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11508 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11509 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11510 +#endif
11511 +
11512 if (!*poff || ptr < text || ptr >= text_end)
11513 continue;
11514 /* turn DS segment override prefix into lock prefix */
11515 - if (*ptr == 0x3e)
11516 + if (*ktla_ktva(ptr) == 0x3e)
11517 text_poke(ptr, ((unsigned char []){0xf0}), 1);
11518 };
11519 mutex_unlock(&text_mutex);
11520 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
11521 for (poff = start; poff < end; poff++) {
11522 u8 *ptr = (u8 *)poff + *poff;
11523
11524 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11525 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11526 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11527 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11528 +#endif
11529 +
11530 if (!*poff || ptr < text || ptr >= text_end)
11531 continue;
11532 /* turn lock prefix into DS segment override prefix */
11533 - if (*ptr == 0xf0)
11534 + if (*ktla_ktva(ptr) == 0xf0)
11535 text_poke(ptr, ((unsigned char []){0x3E}), 1);
11536 };
11537 mutex_unlock(&text_mutex);
11538 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
11539
11540 BUG_ON(p->len > MAX_PATCH_LEN);
11541 /* prep the buffer with the original instructions */
11542 - memcpy(insnbuf, p->instr, p->len);
11543 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11544 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11545 (unsigned long)p->instr, p->len);
11546
11547 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
11548 if (smp_alt_once)
11549 free_init_pages("SMP alternatives",
11550 (unsigned long)__smp_locks,
11551 - (unsigned long)__smp_locks_end);
11552 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11553
11554 restart_nmi();
11555 }
11556 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
11557 * instructions. And on the local CPU you need to be protected again NMI or MCE
11558 * handlers seeing an inconsistent instruction while you patch.
11559 */
11560 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
11561 +void *__kprobes text_poke_early(void *addr, const void *opcode,
11562 size_t len)
11563 {
11564 unsigned long flags;
11565 local_irq_save(flags);
11566 - memcpy(addr, opcode, len);
11567 +
11568 + pax_open_kernel();
11569 + memcpy(ktla_ktva(addr), opcode, len);
11570 sync_core();
11571 + pax_close_kernel();
11572 +
11573 local_irq_restore(flags);
11574 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11575 that causes hangs on some VIA CPUs. */
11576 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
11577 */
11578 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11579 {
11580 - unsigned long flags;
11581 - char *vaddr;
11582 + unsigned char *vaddr = ktla_ktva(addr);
11583 struct page *pages[2];
11584 - int i;
11585 + size_t i;
11586
11587 if (!core_kernel_text((unsigned long)addr)) {
11588 - pages[0] = vmalloc_to_page(addr);
11589 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11590 + pages[0] = vmalloc_to_page(vaddr);
11591 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11592 } else {
11593 - pages[0] = virt_to_page(addr);
11594 + pages[0] = virt_to_page(vaddr);
11595 WARN_ON(!PageReserved(pages[0]));
11596 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11597 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11598 }
11599 BUG_ON(!pages[0]);
11600 - local_irq_save(flags);
11601 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11602 - if (pages[1])
11603 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11604 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11605 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11606 - clear_fixmap(FIX_TEXT_POKE0);
11607 - if (pages[1])
11608 - clear_fixmap(FIX_TEXT_POKE1);
11609 - local_flush_tlb();
11610 - sync_core();
11611 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11612 - that causes hangs on some VIA CPUs. */
11613 + text_poke_early(addr, opcode, len);
11614 for (i = 0; i < len; i++)
11615 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11616 - local_irq_restore(flags);
11617 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11618 return addr;
11619 }
11620
11621 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
11622 index 52fa563..5de9d9c 100644
11623 --- a/arch/x86/kernel/apic/apic.c
11624 +++ b/arch/x86/kernel/apic/apic.c
11625 @@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
11626 /*
11627 * Debug level, exported for io_apic.c
11628 */
11629 -unsigned int apic_verbosity;
11630 +int apic_verbosity;
11631
11632 int pic_mode;
11633
11634 @@ -1835,7 +1835,7 @@ void smp_error_interrupt(struct pt_regs *regs)
11635 apic_write(APIC_ESR, 0);
11636 v1 = apic_read(APIC_ESR);
11637 ack_APIC_irq();
11638 - atomic_inc(&irq_err_count);
11639 + atomic_inc_unchecked(&irq_err_count);
11640
11641 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
11642 smp_processor_id(), v0 , v1);
11643 @@ -2209,6 +2209,8 @@ static int __cpuinit apic_cluster_num(void)
11644 u16 *bios_cpu_apicid;
11645 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11646
11647 + pax_track_stack();
11648 +
11649 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11650 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11651
11652 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
11653 index 8eb863e..32e6934 100644
11654 --- a/arch/x86/kernel/apic/io_apic.c
11655 +++ b/arch/x86/kernel/apic/io_apic.c
11656 @@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
11657 }
11658 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11659
11660 -void lock_vector_lock(void)
11661 +void lock_vector_lock(void) __acquires(vector_lock)
11662 {
11663 /* Used to the online set of cpus does not change
11664 * during assign_irq_vector.
11665 @@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
11666 raw_spin_lock(&vector_lock);
11667 }
11668
11669 -void unlock_vector_lock(void)
11670 +void unlock_vector_lock(void) __releases(vector_lock)
11671 {
11672 raw_spin_unlock(&vector_lock);
11673 }
11674 @@ -2405,7 +2405,7 @@ static void ack_apic_edge(struct irq_data *data)
11675 ack_APIC_irq();
11676 }
11677
11678 -atomic_t irq_mis_count;
11679 +atomic_unchecked_t irq_mis_count;
11680
11681 /*
11682 * IO-APIC versions below 0x20 don't support EOI register.
11683 @@ -2513,7 +2513,7 @@ static void ack_apic_level(struct irq_data *data)
11684 * at the cpu.
11685 */
11686 if (!(v & (1 << (i & 0x1f)))) {
11687 - atomic_inc(&irq_mis_count);
11688 + atomic_inc_unchecked(&irq_mis_count);
11689
11690 eoi_ioapic_irq(irq, cfg);
11691 }
11692 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
11693 index 0371c48..54cdf63 100644
11694 --- a/arch/x86/kernel/apm_32.c
11695 +++ b/arch/x86/kernel/apm_32.c
11696 @@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
11697 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11698 * even though they are called in protected mode.
11699 */
11700 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11701 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11702 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11703
11704 static const char driver_version[] = "1.16ac"; /* no spaces */
11705 @@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
11706 BUG_ON(cpu != 0);
11707 gdt = get_cpu_gdt_table(cpu);
11708 save_desc_40 = gdt[0x40 / 8];
11709 +
11710 + pax_open_kernel();
11711 gdt[0x40 / 8] = bad_bios_desc;
11712 + pax_close_kernel();
11713
11714 apm_irq_save(flags);
11715 APM_DO_SAVE_SEGS;
11716 @@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
11717 &call->esi);
11718 APM_DO_RESTORE_SEGS;
11719 apm_irq_restore(flags);
11720 +
11721 + pax_open_kernel();
11722 gdt[0x40 / 8] = save_desc_40;
11723 + pax_close_kernel();
11724 +
11725 put_cpu();
11726
11727 return call->eax & 0xff;
11728 @@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void *_call)
11729 BUG_ON(cpu != 0);
11730 gdt = get_cpu_gdt_table(cpu);
11731 save_desc_40 = gdt[0x40 / 8];
11732 +
11733 + pax_open_kernel();
11734 gdt[0x40 / 8] = bad_bios_desc;
11735 + pax_close_kernel();
11736
11737 apm_irq_save(flags);
11738 APM_DO_SAVE_SEGS;
11739 @@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void *_call)
11740 &call->eax);
11741 APM_DO_RESTORE_SEGS;
11742 apm_irq_restore(flags);
11743 +
11744 + pax_open_kernel();
11745 gdt[0x40 / 8] = save_desc_40;
11746 + pax_close_kernel();
11747 +
11748 put_cpu();
11749 return error;
11750 }
11751 @@ -2349,12 +2363,15 @@ static int __init apm_init(void)
11752 * code to that CPU.
11753 */
11754 gdt = get_cpu_gdt_table(0);
11755 +
11756 + pax_open_kernel();
11757 set_desc_base(&gdt[APM_CS >> 3],
11758 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11759 set_desc_base(&gdt[APM_CS_16 >> 3],
11760 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11761 set_desc_base(&gdt[APM_DS >> 3],
11762 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11763 + pax_close_kernel();
11764
11765 proc_create("apm", 0, NULL, &apm_file_ops);
11766
11767 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
11768 index 4f13faf..87db5d2 100644
11769 --- a/arch/x86/kernel/asm-offsets.c
11770 +++ b/arch/x86/kernel/asm-offsets.c
11771 @@ -33,6 +33,8 @@ void common(void) {
11772 OFFSET(TI_status, thread_info, status);
11773 OFFSET(TI_addr_limit, thread_info, addr_limit);
11774 OFFSET(TI_preempt_count, thread_info, preempt_count);
11775 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11776 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11777
11778 BLANK();
11779 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
11780 @@ -53,8 +55,26 @@ void common(void) {
11781 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11782 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11783 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11784 +
11785 +#ifdef CONFIG_PAX_KERNEXEC
11786 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11787 +#endif
11788 +
11789 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11790 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11791 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11792 +#ifdef CONFIG_X86_64
11793 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
11794 +#endif
11795 #endif
11796
11797 +#endif
11798 +
11799 + BLANK();
11800 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11801 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11802 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11803 +
11804 #ifdef CONFIG_XEN
11805 BLANK();
11806 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11807 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
11808 index e72a119..6e2955d 100644
11809 --- a/arch/x86/kernel/asm-offsets_64.c
11810 +++ b/arch/x86/kernel/asm-offsets_64.c
11811 @@ -69,6 +69,7 @@ int main(void)
11812 BLANK();
11813 #undef ENTRY
11814
11815 + DEFINE(TSS_size, sizeof(struct tss_struct));
11816 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
11817 BLANK();
11818
11819 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
11820 index 6042981..e638266 100644
11821 --- a/arch/x86/kernel/cpu/Makefile
11822 +++ b/arch/x86/kernel/cpu/Makefile
11823 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
11824 CFLAGS_REMOVE_perf_event.o = -pg
11825 endif
11826
11827 -# Make sure load_percpu_segment has no stackprotector
11828 -nostackp := $(call cc-option, -fno-stack-protector)
11829 -CFLAGS_common.o := $(nostackp)
11830 -
11831 obj-y := intel_cacheinfo.o scattered.o topology.o
11832 obj-y += proc.o capflags.o powerflags.o common.o
11833 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
11834 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
11835 index b13ed39..603286c 100644
11836 --- a/arch/x86/kernel/cpu/amd.c
11837 +++ b/arch/x86/kernel/cpu/amd.c
11838 @@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
11839 unsigned int size)
11840 {
11841 /* AMD errata T13 (order #21922) */
11842 - if ((c->x86 == 6)) {
11843 + if (c->x86 == 6) {
11844 /* Duron Rev A0 */
11845 if (c->x86_model == 3 && c->x86_mask == 0)
11846 size = 64;
11847 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
11848 index 6218439..0f1addc 100644
11849 --- a/arch/x86/kernel/cpu/common.c
11850 +++ b/arch/x86/kernel/cpu/common.c
11851 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
11852
11853 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11854
11855 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11856 -#ifdef CONFIG_X86_64
11857 - /*
11858 - * We need valid kernel segments for data and code in long mode too
11859 - * IRET will check the segment types kkeil 2000/10/28
11860 - * Also sysret mandates a special GDT layout
11861 - *
11862 - * TLS descriptors are currently at a different place compared to i386.
11863 - * Hopefully nobody expects them at a fixed place (Wine?)
11864 - */
11865 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11866 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11867 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11868 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11869 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11870 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11871 -#else
11872 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11873 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11874 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11875 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11876 - /*
11877 - * Segments used for calling PnP BIOS have byte granularity.
11878 - * They code segments and data segments have fixed 64k limits,
11879 - * the transfer segment sizes are set at run time.
11880 - */
11881 - /* 32-bit code */
11882 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11883 - /* 16-bit code */
11884 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11885 - /* 16-bit data */
11886 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11887 - /* 16-bit data */
11888 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11889 - /* 16-bit data */
11890 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11891 - /*
11892 - * The APM segments have byte granularity and their bases
11893 - * are set at run time. All have 64k limits.
11894 - */
11895 - /* 32-bit code */
11896 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11897 - /* 16-bit code */
11898 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11899 - /* data */
11900 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11901 -
11902 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11903 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11904 - GDT_STACK_CANARY_INIT
11905 -#endif
11906 -} };
11907 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
11908 -
11909 static int __init x86_xsave_setup(char *s)
11910 {
11911 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
11912 @@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
11913 {
11914 struct desc_ptr gdt_descr;
11915
11916 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
11917 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
11918 gdt_descr.size = GDT_SIZE - 1;
11919 load_gdt(&gdt_descr);
11920 /* Reload the per-cpu base */
11921 @@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
11922 /* Filter out anything that depends on CPUID levels we don't have */
11923 filter_cpuid_features(c, true);
11924
11925 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
11926 + setup_clear_cpu_cap(X86_FEATURE_SEP);
11927 +#endif
11928 +
11929 /* If the model name is still unset, do table lookup. */
11930 if (!c->x86_model_id[0]) {
11931 const char *p;
11932 @@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(char *arg)
11933 }
11934 __setup("clearcpuid=", setup_disablecpuid);
11935
11936 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
11937 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
11938 +
11939 #ifdef CONFIG_X86_64
11940 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
11941
11942 @@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
11943 EXPORT_PER_CPU_SYMBOL(current_task);
11944
11945 DEFINE_PER_CPU(unsigned long, kernel_stack) =
11946 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
11947 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
11948 EXPORT_PER_CPU_SYMBOL(kernel_stack);
11949
11950 DEFINE_PER_CPU(char *, irq_stack_ptr) =
11951 @@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
11952 {
11953 memset(regs, 0, sizeof(struct pt_regs));
11954 regs->fs = __KERNEL_PERCPU;
11955 - regs->gs = __KERNEL_STACK_CANARY;
11956 + savesegment(gs, regs->gs);
11957
11958 return regs;
11959 }
11960 @@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
11961 int i;
11962
11963 cpu = stack_smp_processor_id();
11964 - t = &per_cpu(init_tss, cpu);
11965 + t = init_tss + cpu;
11966 oist = &per_cpu(orig_ist, cpu);
11967
11968 #ifdef CONFIG_NUMA
11969 @@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
11970 switch_to_new_gdt(cpu);
11971 loadsegment(fs, 0);
11972
11973 - load_idt((const struct desc_ptr *)&idt_descr);
11974 + load_idt(&idt_descr);
11975
11976 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
11977 syscall_init();
11978 @@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
11979 wrmsrl(MSR_KERNEL_GS_BASE, 0);
11980 barrier();
11981
11982 - x86_configure_nx();
11983 if (cpu != 0)
11984 enable_x2apic();
11985
11986 @@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
11987 {
11988 int cpu = smp_processor_id();
11989 struct task_struct *curr = current;
11990 - struct tss_struct *t = &per_cpu(init_tss, cpu);
11991 + struct tss_struct *t = init_tss + cpu;
11992 struct thread_struct *thread = &curr->thread;
11993
11994 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
11995 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
11996 index ed6086e..a1dcf29 100644
11997 --- a/arch/x86/kernel/cpu/intel.c
11998 +++ b/arch/x86/kernel/cpu/intel.c
11999 @@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug(void)
12000 * Update the IDT descriptor and reload the IDT so that
12001 * it uses the read-only mapped virtual address.
12002 */
12003 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12004 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12005 load_idt(&idt_descr);
12006 }
12007 #endif
12008 diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
12009 index 0ed633c..82cef2a 100644
12010 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
12011 +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
12012 @@ -215,7 +215,9 @@ static int inject_init(void)
12013 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
12014 return -ENOMEM;
12015 printk(KERN_INFO "Machine check injector initialized\n");
12016 - mce_chrdev_ops.write = mce_write;
12017 + pax_open_kernel();
12018 + *(void **)&mce_chrdev_ops.write = mce_write;
12019 + pax_close_kernel();
12020 register_die_notifier(&mce_raise_nb);
12021 return 0;
12022 }
12023 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
12024 index 08363b0..ee26113 100644
12025 --- a/arch/x86/kernel/cpu/mcheck/mce.c
12026 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
12027 @@ -42,6 +42,7 @@
12028 #include <asm/processor.h>
12029 #include <asm/mce.h>
12030 #include <asm/msr.h>
12031 +#include <asm/local.h>
12032
12033 #include "mce-internal.h"
12034
12035 @@ -205,7 +206,7 @@ static void print_mce(struct mce *m)
12036 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12037 m->cs, m->ip);
12038
12039 - if (m->cs == __KERNEL_CS)
12040 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12041 print_symbol("{%s}", m->ip);
12042 pr_cont("\n");
12043 }
12044 @@ -233,10 +234,10 @@ static void print_mce(struct mce *m)
12045
12046 #define PANIC_TIMEOUT 5 /* 5 seconds */
12047
12048 -static atomic_t mce_paniced;
12049 +static atomic_unchecked_t mce_paniced;
12050
12051 static int fake_panic;
12052 -static atomic_t mce_fake_paniced;
12053 +static atomic_unchecked_t mce_fake_paniced;
12054
12055 /* Panic in progress. Enable interrupts and wait for final IPI */
12056 static void wait_for_panic(void)
12057 @@ -260,7 +261,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12058 /*
12059 * Make sure only one CPU runs in machine check panic
12060 */
12061 - if (atomic_inc_return(&mce_paniced) > 1)
12062 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12063 wait_for_panic();
12064 barrier();
12065
12066 @@ -268,7 +269,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12067 console_verbose();
12068 } else {
12069 /* Don't log too much for fake panic */
12070 - if (atomic_inc_return(&mce_fake_paniced) > 1)
12071 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12072 return;
12073 }
12074 /* First print corrected ones that are still unlogged */
12075 @@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
12076 * might have been modified by someone else.
12077 */
12078 rmb();
12079 - if (atomic_read(&mce_paniced))
12080 + if (atomic_read_unchecked(&mce_paniced))
12081 wait_for_panic();
12082 if (!monarch_timeout)
12083 goto out;
12084 @@ -1392,7 +1393,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
12085 }
12086
12087 /* Call the installed machine check handler for this CPU setup. */
12088 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
12089 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
12090 unexpected_machine_check;
12091
12092 /*
12093 @@ -1415,7 +1416,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12094 return;
12095 }
12096
12097 + pax_open_kernel();
12098 machine_check_vector = do_machine_check;
12099 + pax_close_kernel();
12100
12101 __mcheck_cpu_init_generic();
12102 __mcheck_cpu_init_vendor(c);
12103 @@ -1429,7 +1432,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12104 */
12105
12106 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
12107 -static int mce_chrdev_open_count; /* #times opened */
12108 +static local_t mce_chrdev_open_count; /* #times opened */
12109 static int mce_chrdev_open_exclu; /* already open exclusive? */
12110
12111 static int mce_chrdev_open(struct inode *inode, struct file *file)
12112 @@ -1437,7 +1440,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12113 spin_lock(&mce_chrdev_state_lock);
12114
12115 if (mce_chrdev_open_exclu ||
12116 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
12117 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
12118 spin_unlock(&mce_chrdev_state_lock);
12119
12120 return -EBUSY;
12121 @@ -1445,7 +1448,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12122
12123 if (file->f_flags & O_EXCL)
12124 mce_chrdev_open_exclu = 1;
12125 - mce_chrdev_open_count++;
12126 + local_inc(&mce_chrdev_open_count);
12127
12128 spin_unlock(&mce_chrdev_state_lock);
12129
12130 @@ -1456,7 +1459,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
12131 {
12132 spin_lock(&mce_chrdev_state_lock);
12133
12134 - mce_chrdev_open_count--;
12135 + local_dec(&mce_chrdev_open_count);
12136 mce_chrdev_open_exclu = 0;
12137
12138 spin_unlock(&mce_chrdev_state_lock);
12139 @@ -2147,7 +2150,7 @@ struct dentry *mce_get_debugfs_dir(void)
12140 static void mce_reset(void)
12141 {
12142 cpu_missing = 0;
12143 - atomic_set(&mce_fake_paniced, 0);
12144 + atomic_set_unchecked(&mce_fake_paniced, 0);
12145 atomic_set(&mce_executing, 0);
12146 atomic_set(&mce_callin, 0);
12147 atomic_set(&global_nwo, 0);
12148 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
12149 index 5c0e653..1e82c7c 100644
12150 --- a/arch/x86/kernel/cpu/mcheck/p5.c
12151 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
12152 @@ -50,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
12153 if (!cpu_has(c, X86_FEATURE_MCE))
12154 return;
12155
12156 + pax_open_kernel();
12157 machine_check_vector = pentium_machine_check;
12158 + pax_close_kernel();
12159 /* Make sure the vector pointer is visible before we enable MCEs: */
12160 wmb();
12161
12162 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
12163 index 54060f5..e6ba93d 100644
12164 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
12165 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
12166 @@ -24,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
12167 {
12168 u32 lo, hi;
12169
12170 + pax_open_kernel();
12171 machine_check_vector = winchip_machine_check;
12172 + pax_close_kernel();
12173 /* Make sure the vector pointer is visible before we enable MCEs: */
12174 wmb();
12175
12176 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
12177 index 6b96110..0da73eb 100644
12178 --- a/arch/x86/kernel/cpu/mtrr/main.c
12179 +++ b/arch/x86/kernel/cpu/mtrr/main.c
12180 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
12181 u64 size_or_mask, size_and_mask;
12182 static bool mtrr_aps_delayed_init;
12183
12184 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12185 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12186
12187 const struct mtrr_ops *mtrr_if;
12188
12189 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
12190 index df5e41f..816c719 100644
12191 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
12192 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
12193 @@ -25,7 +25,7 @@ struct mtrr_ops {
12194 int (*validate_add_page)(unsigned long base, unsigned long size,
12195 unsigned int type);
12196 int (*have_wrcomb)(void);
12197 -};
12198 +} __do_const;
12199
12200 extern int generic_get_free_region(unsigned long base, unsigned long size,
12201 int replace_reg);
12202 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
12203 index cfa62ec..9250dd7 100644
12204 --- a/arch/x86/kernel/cpu/perf_event.c
12205 +++ b/arch/x86/kernel/cpu/perf_event.c
12206 @@ -795,6 +795,8 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
12207 int i, j, w, wmax, num = 0;
12208 struct hw_perf_event *hwc;
12209
12210 + pax_track_stack();
12211 +
12212 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
12213
12214 for (i = 0; i < n; i++) {
12215 @@ -1919,7 +1921,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
12216 break;
12217
12218 perf_callchain_store(entry, frame.return_address);
12219 - fp = frame.next_frame;
12220 + fp = (const void __force_user *)frame.next_frame;
12221 }
12222 }
12223
12224 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
12225 index 764c7c2..c5d9c7b 100644
12226 --- a/arch/x86/kernel/crash.c
12227 +++ b/arch/x86/kernel/crash.c
12228 @@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
12229 regs = args->regs;
12230
12231 #ifdef CONFIG_X86_32
12232 - if (!user_mode_vm(regs)) {
12233 + if (!user_mode(regs)) {
12234 crash_fixup_ss_esp(&fixed_regs, regs);
12235 regs = &fixed_regs;
12236 }
12237 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
12238 index 37250fe..bf2ec74 100644
12239 --- a/arch/x86/kernel/doublefault_32.c
12240 +++ b/arch/x86/kernel/doublefault_32.c
12241 @@ -11,7 +11,7 @@
12242
12243 #define DOUBLEFAULT_STACKSIZE (1024)
12244 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12245 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12246 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12247
12248 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12249
12250 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12251 unsigned long gdt, tss;
12252
12253 store_gdt(&gdt_desc);
12254 - gdt = gdt_desc.address;
12255 + gdt = (unsigned long)gdt_desc.address;
12256
12257 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12258
12259 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
12260 /* 0x2 bit is always set */
12261 .flags = X86_EFLAGS_SF | 0x2,
12262 .sp = STACK_START,
12263 - .es = __USER_DS,
12264 + .es = __KERNEL_DS,
12265 .cs = __KERNEL_CS,
12266 .ss = __KERNEL_DS,
12267 - .ds = __USER_DS,
12268 + .ds = __KERNEL_DS,
12269 .fs = __KERNEL_PERCPU,
12270
12271 .__cr3 = __pa_nodebug(swapper_pg_dir),
12272 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
12273 index 1aae78f..aab3a3d 100644
12274 --- a/arch/x86/kernel/dumpstack.c
12275 +++ b/arch/x86/kernel/dumpstack.c
12276 @@ -2,6 +2,9 @@
12277 * Copyright (C) 1991, 1992 Linus Torvalds
12278 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12279 */
12280 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12281 +#define __INCLUDED_BY_HIDESYM 1
12282 +#endif
12283 #include <linux/kallsyms.h>
12284 #include <linux/kprobes.h>
12285 #include <linux/uaccess.h>
12286 @@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
12287 static void
12288 print_ftrace_graph_addr(unsigned long addr, void *data,
12289 const struct stacktrace_ops *ops,
12290 - struct thread_info *tinfo, int *graph)
12291 + struct task_struct *task, int *graph)
12292 {
12293 - struct task_struct *task = tinfo->task;
12294 unsigned long ret_addr;
12295 int index = task->curr_ret_stack;
12296
12297 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12298 static inline void
12299 print_ftrace_graph_addr(unsigned long addr, void *data,
12300 const struct stacktrace_ops *ops,
12301 - struct thread_info *tinfo, int *graph)
12302 + struct task_struct *task, int *graph)
12303 { }
12304 #endif
12305
12306 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12307 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12308 */
12309
12310 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12311 - void *p, unsigned int size, void *end)
12312 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12313 {
12314 - void *t = tinfo;
12315 if (end) {
12316 if (p < end && p >= (end-THREAD_SIZE))
12317 return 1;
12318 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
12319 }
12320
12321 unsigned long
12322 -print_context_stack(struct thread_info *tinfo,
12323 +print_context_stack(struct task_struct *task, void *stack_start,
12324 unsigned long *stack, unsigned long bp,
12325 const struct stacktrace_ops *ops, void *data,
12326 unsigned long *end, int *graph)
12327 {
12328 struct stack_frame *frame = (struct stack_frame *)bp;
12329
12330 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12331 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12332 unsigned long addr;
12333
12334 addr = *stack;
12335 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
12336 } else {
12337 ops->address(data, addr, 0);
12338 }
12339 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12340 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12341 }
12342 stack++;
12343 }
12344 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
12345 EXPORT_SYMBOL_GPL(print_context_stack);
12346
12347 unsigned long
12348 -print_context_stack_bp(struct thread_info *tinfo,
12349 +print_context_stack_bp(struct task_struct *task, void *stack_start,
12350 unsigned long *stack, unsigned long bp,
12351 const struct stacktrace_ops *ops, void *data,
12352 unsigned long *end, int *graph)
12353 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12354 struct stack_frame *frame = (struct stack_frame *)bp;
12355 unsigned long *ret_addr = &frame->return_address;
12356
12357 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
12358 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
12359 unsigned long addr = *ret_addr;
12360
12361 if (!__kernel_text_address(addr))
12362 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12363 ops->address(data, addr, 1);
12364 frame = frame->next_frame;
12365 ret_addr = &frame->return_address;
12366 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12367 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12368 }
12369
12370 return (unsigned long)frame;
12371 @@ -186,7 +186,7 @@ void dump_stack(void)
12372
12373 bp = stack_frame(current, NULL);
12374 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12375 - current->pid, current->comm, print_tainted(),
12376 + task_pid_nr(current), current->comm, print_tainted(),
12377 init_utsname()->release,
12378 (int)strcspn(init_utsname()->version, " "),
12379 init_utsname()->version);
12380 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
12381 }
12382 EXPORT_SYMBOL_GPL(oops_begin);
12383
12384 +extern void gr_handle_kernel_exploit(void);
12385 +
12386 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12387 {
12388 if (regs && kexec_should_crash(current))
12389 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12390 panic("Fatal exception in interrupt");
12391 if (panic_on_oops)
12392 panic("Fatal exception");
12393 - do_exit(signr);
12394 +
12395 + gr_handle_kernel_exploit();
12396 +
12397 + do_group_exit(signr);
12398 }
12399
12400 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12401 @@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12402
12403 show_registers(regs);
12404 #ifdef CONFIG_X86_32
12405 - if (user_mode_vm(regs)) {
12406 + if (user_mode(regs)) {
12407 sp = regs->sp;
12408 ss = regs->ss & 0xffff;
12409 } else {
12410 @@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
12411 unsigned long flags = oops_begin();
12412 int sig = SIGSEGV;
12413
12414 - if (!user_mode_vm(regs))
12415 + if (!user_mode(regs))
12416 report_bug(regs->ip, regs);
12417
12418 if (__die(str, regs, err))
12419 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
12420 index 3b97a80..667ce7a 100644
12421 --- a/arch/x86/kernel/dumpstack_32.c
12422 +++ b/arch/x86/kernel/dumpstack_32.c
12423 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12424 bp = stack_frame(task, regs);
12425
12426 for (;;) {
12427 - struct thread_info *context;
12428 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12429
12430 - context = (struct thread_info *)
12431 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12432 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
12433 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12434
12435 - stack = (unsigned long *)context->previous_esp;
12436 - if (!stack)
12437 + if (stack_start == task_stack_page(task))
12438 break;
12439 + stack = *(unsigned long **)stack_start;
12440 if (ops->stack(data, "IRQ") < 0)
12441 break;
12442 touch_nmi_watchdog();
12443 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
12444 * When in-kernel, we also print out the stack and code at the
12445 * time of the fault..
12446 */
12447 - if (!user_mode_vm(regs)) {
12448 + if (!user_mode(regs)) {
12449 unsigned int code_prologue = code_bytes * 43 / 64;
12450 unsigned int code_len = code_bytes;
12451 unsigned char c;
12452 u8 *ip;
12453 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12454
12455 printk(KERN_EMERG "Stack:\n");
12456 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
12457
12458 printk(KERN_EMERG "Code: ");
12459
12460 - ip = (u8 *)regs->ip - code_prologue;
12461 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12462 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12463 /* try starting at IP */
12464 - ip = (u8 *)regs->ip;
12465 + ip = (u8 *)regs->ip + cs_base;
12466 code_len = code_len - code_prologue + 1;
12467 }
12468 for (i = 0; i < code_len; i++, ip++) {
12469 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
12470 printk(" Bad EIP value.");
12471 break;
12472 }
12473 - if (ip == (u8 *)regs->ip)
12474 + if (ip == (u8 *)regs->ip + cs_base)
12475 printk("<%02x> ", c);
12476 else
12477 printk("%02x ", c);
12478 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
12479 {
12480 unsigned short ud2;
12481
12482 + ip = ktla_ktva(ip);
12483 if (ip < PAGE_OFFSET)
12484 return 0;
12485 if (probe_kernel_address((unsigned short *)ip, ud2))
12486 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
12487
12488 return ud2 == 0x0b0f;
12489 }
12490 +
12491 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12492 +void pax_check_alloca(unsigned long size)
12493 +{
12494 + unsigned long sp = (unsigned long)&sp, stack_left;
12495 +
12496 + /* all kernel stacks are of the same size */
12497 + stack_left = sp & (THREAD_SIZE - 1);
12498 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12499 +}
12500 +EXPORT_SYMBOL(pax_check_alloca);
12501 +#endif
12502 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
12503 index 19853ad..508ca79 100644
12504 --- a/arch/x86/kernel/dumpstack_64.c
12505 +++ b/arch/x86/kernel/dumpstack_64.c
12506 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12507 unsigned long *irq_stack_end =
12508 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12509 unsigned used = 0;
12510 - struct thread_info *tinfo;
12511 int graph = 0;
12512 unsigned long dummy;
12513 + void *stack_start;
12514
12515 if (!task)
12516 task = current;
12517 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12518 * current stack address. If the stacks consist of nested
12519 * exceptions
12520 */
12521 - tinfo = task_thread_info(task);
12522 for (;;) {
12523 char *id;
12524 unsigned long *estack_end;
12525 +
12526 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12527 &used, &id);
12528
12529 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12530 if (ops->stack(data, id) < 0)
12531 break;
12532
12533 - bp = ops->walk_stack(tinfo, stack, bp, ops,
12534 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12535 data, estack_end, &graph);
12536 ops->stack(data, "<EOE>");
12537 /*
12538 @@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12539 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
12540 if (ops->stack(data, "IRQ") < 0)
12541 break;
12542 - bp = ops->walk_stack(tinfo, stack, bp,
12543 + bp = ops->walk_stack(task, irq_stack, stack, bp,
12544 ops, data, irq_stack_end, &graph);
12545 /*
12546 * We link to the next stack (which would be
12547 @@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12548 /*
12549 * This handles the process stack:
12550 */
12551 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12552 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12553 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12554 put_cpu();
12555 }
12556 EXPORT_SYMBOL(dump_trace);
12557 @@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
12558
12559 return ud2 == 0x0b0f;
12560 }
12561 +
12562 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12563 +void pax_check_alloca(unsigned long size)
12564 +{
12565 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
12566 + unsigned cpu, used;
12567 + char *id;
12568 +
12569 + /* check the process stack first */
12570 + stack_start = (unsigned long)task_stack_page(current);
12571 + stack_end = stack_start + THREAD_SIZE;
12572 + if (likely(stack_start <= sp && sp < stack_end)) {
12573 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
12574 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12575 + return;
12576 + }
12577 +
12578 + cpu = get_cpu();
12579 +
12580 + /* check the irq stacks */
12581 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
12582 + stack_start = stack_end - IRQ_STACK_SIZE;
12583 + if (stack_start <= sp && sp < stack_end) {
12584 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
12585 + put_cpu();
12586 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12587 + return;
12588 + }
12589 +
12590 + /* check the exception stacks */
12591 + used = 0;
12592 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
12593 + stack_start = stack_end - EXCEPTION_STKSZ;
12594 + if (stack_end && stack_start <= sp && sp < stack_end) {
12595 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
12596 + put_cpu();
12597 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12598 + return;
12599 + }
12600 +
12601 + put_cpu();
12602 +
12603 + /* unknown stack */
12604 + BUG();
12605 +}
12606 +EXPORT_SYMBOL(pax_check_alloca);
12607 +#endif
12608 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
12609 index cd28a35..2601699 100644
12610 --- a/arch/x86/kernel/early_printk.c
12611 +++ b/arch/x86/kernel/early_printk.c
12612 @@ -7,6 +7,7 @@
12613 #include <linux/pci_regs.h>
12614 #include <linux/pci_ids.h>
12615 #include <linux/errno.h>
12616 +#include <linux/sched.h>
12617 #include <asm/io.h>
12618 #include <asm/processor.h>
12619 #include <asm/fcntl.h>
12620 @@ -179,6 +180,8 @@ asmlinkage void early_printk(const char *fmt, ...)
12621 int n;
12622 va_list ap;
12623
12624 + pax_track_stack();
12625 +
12626 va_start(ap, fmt);
12627 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12628 early_console->write(early_console, buf, n);
12629 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
12630 index f3f6f53..0841b66 100644
12631 --- a/arch/x86/kernel/entry_32.S
12632 +++ b/arch/x86/kernel/entry_32.S
12633 @@ -186,13 +186,146 @@
12634 /*CFI_REL_OFFSET gs, PT_GS*/
12635 .endm
12636 .macro SET_KERNEL_GS reg
12637 +
12638 +#ifdef CONFIG_CC_STACKPROTECTOR
12639 movl $(__KERNEL_STACK_CANARY), \reg
12640 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12641 + movl $(__USER_DS), \reg
12642 +#else
12643 + xorl \reg, \reg
12644 +#endif
12645 +
12646 movl \reg, %gs
12647 .endm
12648
12649 #endif /* CONFIG_X86_32_LAZY_GS */
12650
12651 -.macro SAVE_ALL
12652 +.macro pax_enter_kernel
12653 +#ifdef CONFIG_PAX_KERNEXEC
12654 + call pax_enter_kernel
12655 +#endif
12656 +.endm
12657 +
12658 +.macro pax_exit_kernel
12659 +#ifdef CONFIG_PAX_KERNEXEC
12660 + call pax_exit_kernel
12661 +#endif
12662 +.endm
12663 +
12664 +#ifdef CONFIG_PAX_KERNEXEC
12665 +ENTRY(pax_enter_kernel)
12666 +#ifdef CONFIG_PARAVIRT
12667 + pushl %eax
12668 + pushl %ecx
12669 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12670 + mov %eax, %esi
12671 +#else
12672 + mov %cr0, %esi
12673 +#endif
12674 + bts $16, %esi
12675 + jnc 1f
12676 + mov %cs, %esi
12677 + cmp $__KERNEL_CS, %esi
12678 + jz 3f
12679 + ljmp $__KERNEL_CS, $3f
12680 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12681 +2:
12682 +#ifdef CONFIG_PARAVIRT
12683 + mov %esi, %eax
12684 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12685 +#else
12686 + mov %esi, %cr0
12687 +#endif
12688 +3:
12689 +#ifdef CONFIG_PARAVIRT
12690 + popl %ecx
12691 + popl %eax
12692 +#endif
12693 + ret
12694 +ENDPROC(pax_enter_kernel)
12695 +
12696 +ENTRY(pax_exit_kernel)
12697 +#ifdef CONFIG_PARAVIRT
12698 + pushl %eax
12699 + pushl %ecx
12700 +#endif
12701 + mov %cs, %esi
12702 + cmp $__KERNEXEC_KERNEL_CS, %esi
12703 + jnz 2f
12704 +#ifdef CONFIG_PARAVIRT
12705 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12706 + mov %eax, %esi
12707 +#else
12708 + mov %cr0, %esi
12709 +#endif
12710 + btr $16, %esi
12711 + ljmp $__KERNEL_CS, $1f
12712 +1:
12713 +#ifdef CONFIG_PARAVIRT
12714 + mov %esi, %eax
12715 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12716 +#else
12717 + mov %esi, %cr0
12718 +#endif
12719 +2:
12720 +#ifdef CONFIG_PARAVIRT
12721 + popl %ecx
12722 + popl %eax
12723 +#endif
12724 + ret
12725 +ENDPROC(pax_exit_kernel)
12726 +#endif
12727 +
12728 +.macro pax_erase_kstack
12729 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12730 + call pax_erase_kstack
12731 +#endif
12732 +.endm
12733 +
12734 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12735 +/*
12736 + * ebp: thread_info
12737 + * ecx, edx: can be clobbered
12738 + */
12739 +ENTRY(pax_erase_kstack)
12740 + pushl %edi
12741 + pushl %eax
12742 +
12743 + mov TI_lowest_stack(%ebp), %edi
12744 + mov $-0xBEEF, %eax
12745 + std
12746 +
12747 +1: mov %edi, %ecx
12748 + and $THREAD_SIZE_asm - 1, %ecx
12749 + shr $2, %ecx
12750 + repne scasl
12751 + jecxz 2f
12752 +
12753 + cmp $2*16, %ecx
12754 + jc 2f
12755 +
12756 + mov $2*16, %ecx
12757 + repe scasl
12758 + jecxz 2f
12759 + jne 1b
12760 +
12761 +2: cld
12762 + mov %esp, %ecx
12763 + sub %edi, %ecx
12764 + shr $2, %ecx
12765 + rep stosl
12766 +
12767 + mov TI_task_thread_sp0(%ebp), %edi
12768 + sub $128, %edi
12769 + mov %edi, TI_lowest_stack(%ebp)
12770 +
12771 + popl %eax
12772 + popl %edi
12773 + ret
12774 +ENDPROC(pax_erase_kstack)
12775 +#endif
12776 +
12777 +.macro __SAVE_ALL _DS
12778 cld
12779 PUSH_GS
12780 pushl_cfi %fs
12781 @@ -215,7 +348,7 @@
12782 CFI_REL_OFFSET ecx, 0
12783 pushl_cfi %ebx
12784 CFI_REL_OFFSET ebx, 0
12785 - movl $(__USER_DS), %edx
12786 + movl $\_DS, %edx
12787 movl %edx, %ds
12788 movl %edx, %es
12789 movl $(__KERNEL_PERCPU), %edx
12790 @@ -223,6 +356,15 @@
12791 SET_KERNEL_GS %edx
12792 .endm
12793
12794 +.macro SAVE_ALL
12795 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
12796 + __SAVE_ALL __KERNEL_DS
12797 + pax_enter_kernel
12798 +#else
12799 + __SAVE_ALL __USER_DS
12800 +#endif
12801 +.endm
12802 +
12803 .macro RESTORE_INT_REGS
12804 popl_cfi %ebx
12805 CFI_RESTORE ebx
12806 @@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
12807 popfl_cfi
12808 jmp syscall_exit
12809 CFI_ENDPROC
12810 -END(ret_from_fork)
12811 +ENDPROC(ret_from_fork)
12812
12813 /*
12814 * Interrupt exit functions should be protected against kprobes
12815 @@ -333,7 +475,15 @@ check_userspace:
12816 movb PT_CS(%esp), %al
12817 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
12818 cmpl $USER_RPL, %eax
12819 +
12820 +#ifdef CONFIG_PAX_KERNEXEC
12821 + jae resume_userspace
12822 +
12823 + PAX_EXIT_KERNEL
12824 + jmp resume_kernel
12825 +#else
12826 jb resume_kernel # not returning to v8086 or userspace
12827 +#endif
12828
12829 ENTRY(resume_userspace)
12830 LOCKDEP_SYS_EXIT
12831 @@ -345,8 +495,8 @@ ENTRY(resume_userspace)
12832 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
12833 # int/exception return?
12834 jne work_pending
12835 - jmp restore_all
12836 -END(ret_from_exception)
12837 + jmp restore_all_pax
12838 +ENDPROC(ret_from_exception)
12839
12840 #ifdef CONFIG_PREEMPT
12841 ENTRY(resume_kernel)
12842 @@ -361,7 +511,7 @@ need_resched:
12843 jz restore_all
12844 call preempt_schedule_irq
12845 jmp need_resched
12846 -END(resume_kernel)
12847 +ENDPROC(resume_kernel)
12848 #endif
12849 CFI_ENDPROC
12850 /*
12851 @@ -395,23 +545,34 @@ sysenter_past_esp:
12852 /*CFI_REL_OFFSET cs, 0*/
12853 /*
12854 * Push current_thread_info()->sysenter_return to the stack.
12855 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
12856 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
12857 */
12858 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
12859 + pushl_cfi $0
12860 CFI_REL_OFFSET eip, 0
12861
12862 pushl_cfi %eax
12863 SAVE_ALL
12864 + GET_THREAD_INFO(%ebp)
12865 + movl TI_sysenter_return(%ebp),%ebp
12866 + movl %ebp,PT_EIP(%esp)
12867 ENABLE_INTERRUPTS(CLBR_NONE)
12868
12869 /*
12870 * Load the potential sixth argument from user stack.
12871 * Careful about security.
12872 */
12873 + movl PT_OLDESP(%esp),%ebp
12874 +
12875 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12876 + mov PT_OLDSS(%esp),%ds
12877 +1: movl %ds:(%ebp),%ebp
12878 + push %ss
12879 + pop %ds
12880 +#else
12881 cmpl $__PAGE_OFFSET-3,%ebp
12882 jae syscall_fault
12883 1: movl (%ebp),%ebp
12884 +#endif
12885 +
12886 movl %ebp,PT_EBP(%esp)
12887 .section __ex_table,"a"
12888 .align 4
12889 @@ -434,12 +595,24 @@ sysenter_do_call:
12890 testl $_TIF_ALLWORK_MASK, %ecx
12891 jne sysexit_audit
12892 sysenter_exit:
12893 +
12894 +#ifdef CONFIG_PAX_RANDKSTACK
12895 + pushl_cfi %eax
12896 + movl %esp, %eax
12897 + call pax_randomize_kstack
12898 + popl_cfi %eax
12899 +#endif
12900 +
12901 + pax_erase_kstack
12902 +
12903 /* if something modifies registers it must also disable sysexit */
12904 movl PT_EIP(%esp), %edx
12905 movl PT_OLDESP(%esp), %ecx
12906 xorl %ebp,%ebp
12907 TRACE_IRQS_ON
12908 1: mov PT_FS(%esp), %fs
12909 +2: mov PT_DS(%esp), %ds
12910 +3: mov PT_ES(%esp), %es
12911 PTGS_TO_GS
12912 ENABLE_INTERRUPTS_SYSEXIT
12913
12914 @@ -456,6 +629,9 @@ sysenter_audit:
12915 movl %eax,%edx /* 2nd arg: syscall number */
12916 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
12917 call audit_syscall_entry
12918 +
12919 + pax_erase_kstack
12920 +
12921 pushl_cfi %ebx
12922 movl PT_EAX(%esp),%eax /* reload syscall number */
12923 jmp sysenter_do_call
12924 @@ -482,11 +658,17 @@ sysexit_audit:
12925
12926 CFI_ENDPROC
12927 .pushsection .fixup,"ax"
12928 -2: movl $0,PT_FS(%esp)
12929 +4: movl $0,PT_FS(%esp)
12930 + jmp 1b
12931 +5: movl $0,PT_DS(%esp)
12932 + jmp 1b
12933 +6: movl $0,PT_ES(%esp)
12934 jmp 1b
12935 .section __ex_table,"a"
12936 .align 4
12937 - .long 1b,2b
12938 + .long 1b,4b
12939 + .long 2b,5b
12940 + .long 3b,6b
12941 .popsection
12942 PTGS_TO_GS_EX
12943 ENDPROC(ia32_sysenter_target)
12944 @@ -519,6 +701,15 @@ syscall_exit:
12945 testl $_TIF_ALLWORK_MASK, %ecx # current->work
12946 jne syscall_exit_work
12947
12948 +restore_all_pax:
12949 +
12950 +#ifdef CONFIG_PAX_RANDKSTACK
12951 + movl %esp, %eax
12952 + call pax_randomize_kstack
12953 +#endif
12954 +
12955 + pax_erase_kstack
12956 +
12957 restore_all:
12958 TRACE_IRQS_IRET
12959 restore_all_notrace:
12960 @@ -578,14 +769,34 @@ ldt_ss:
12961 * compensating for the offset by changing to the ESPFIX segment with
12962 * a base address that matches for the difference.
12963 */
12964 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
12965 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
12966 mov %esp, %edx /* load kernel esp */
12967 mov PT_OLDESP(%esp), %eax /* load userspace esp */
12968 mov %dx, %ax /* eax: new kernel esp */
12969 sub %eax, %edx /* offset (low word is 0) */
12970 +#ifdef CONFIG_SMP
12971 + movl PER_CPU_VAR(cpu_number), %ebx
12972 + shll $PAGE_SHIFT_asm, %ebx
12973 + addl $cpu_gdt_table, %ebx
12974 +#else
12975 + movl $cpu_gdt_table, %ebx
12976 +#endif
12977 shr $16, %edx
12978 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
12979 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
12980 +
12981 +#ifdef CONFIG_PAX_KERNEXEC
12982 + mov %cr0, %esi
12983 + btr $16, %esi
12984 + mov %esi, %cr0
12985 +#endif
12986 +
12987 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
12988 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
12989 +
12990 +#ifdef CONFIG_PAX_KERNEXEC
12991 + bts $16, %esi
12992 + mov %esi, %cr0
12993 +#endif
12994 +
12995 pushl_cfi $__ESPFIX_SS
12996 pushl_cfi %eax /* new kernel esp */
12997 /* Disable interrupts, but do not irqtrace this section: we
12998 @@ -614,34 +825,28 @@ work_resched:
12999 movl TI_flags(%ebp), %ecx
13000 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13001 # than syscall tracing?
13002 - jz restore_all
13003 + jz restore_all_pax
13004 testb $_TIF_NEED_RESCHED, %cl
13005 jnz work_resched
13006
13007 work_notifysig: # deal with pending signals and
13008 # notify-resume requests
13009 + movl %esp, %eax
13010 #ifdef CONFIG_VM86
13011 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13012 - movl %esp, %eax
13013 - jne work_notifysig_v86 # returning to kernel-space or
13014 + jz 1f # returning to kernel-space or
13015 # vm86-space
13016 - xorl %edx, %edx
13017 - call do_notify_resume
13018 - jmp resume_userspace_sig
13019
13020 - ALIGN
13021 -work_notifysig_v86:
13022 pushl_cfi %ecx # save ti_flags for do_notify_resume
13023 call save_v86_state # %eax contains pt_regs pointer
13024 popl_cfi %ecx
13025 movl %eax, %esp
13026 -#else
13027 - movl %esp, %eax
13028 +1:
13029 #endif
13030 xorl %edx, %edx
13031 call do_notify_resume
13032 jmp resume_userspace_sig
13033 -END(work_pending)
13034 +ENDPROC(work_pending)
13035
13036 # perform syscall exit tracing
13037 ALIGN
13038 @@ -649,11 +854,14 @@ syscall_trace_entry:
13039 movl $-ENOSYS,PT_EAX(%esp)
13040 movl %esp, %eax
13041 call syscall_trace_enter
13042 +
13043 + pax_erase_kstack
13044 +
13045 /* What it returned is what we'll actually use. */
13046 cmpl $(nr_syscalls), %eax
13047 jnae syscall_call
13048 jmp syscall_exit
13049 -END(syscall_trace_entry)
13050 +ENDPROC(syscall_trace_entry)
13051
13052 # perform syscall exit tracing
13053 ALIGN
13054 @@ -666,20 +874,24 @@ syscall_exit_work:
13055 movl %esp, %eax
13056 call syscall_trace_leave
13057 jmp resume_userspace
13058 -END(syscall_exit_work)
13059 +ENDPROC(syscall_exit_work)
13060 CFI_ENDPROC
13061
13062 RING0_INT_FRAME # can't unwind into user space anyway
13063 syscall_fault:
13064 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13065 + push %ss
13066 + pop %ds
13067 +#endif
13068 GET_THREAD_INFO(%ebp)
13069 movl $-EFAULT,PT_EAX(%esp)
13070 jmp resume_userspace
13071 -END(syscall_fault)
13072 +ENDPROC(syscall_fault)
13073
13074 syscall_badsys:
13075 movl $-ENOSYS,PT_EAX(%esp)
13076 jmp resume_userspace
13077 -END(syscall_badsys)
13078 +ENDPROC(syscall_badsys)
13079 CFI_ENDPROC
13080 /*
13081 * End of kprobes section
13082 @@ -753,6 +965,36 @@ ptregs_clone:
13083 CFI_ENDPROC
13084 ENDPROC(ptregs_clone)
13085
13086 + ALIGN;
13087 +ENTRY(kernel_execve)
13088 + CFI_STARTPROC
13089 + pushl_cfi %ebp
13090 + sub $PT_OLDSS+4,%esp
13091 + pushl_cfi %edi
13092 + pushl_cfi %ecx
13093 + pushl_cfi %eax
13094 + lea 3*4(%esp),%edi
13095 + mov $PT_OLDSS/4+1,%ecx
13096 + xorl %eax,%eax
13097 + rep stosl
13098 + popl_cfi %eax
13099 + popl_cfi %ecx
13100 + popl_cfi %edi
13101 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13102 + pushl_cfi %esp
13103 + call sys_execve
13104 + add $4,%esp
13105 + CFI_ADJUST_CFA_OFFSET -4
13106 + GET_THREAD_INFO(%ebp)
13107 + test %eax,%eax
13108 + jz syscall_exit
13109 + add $PT_OLDSS+4,%esp
13110 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
13111 + popl_cfi %ebp
13112 + ret
13113 + CFI_ENDPROC
13114 +ENDPROC(kernel_execve)
13115 +
13116 .macro FIXUP_ESPFIX_STACK
13117 /*
13118 * Switch back for ESPFIX stack to the normal zerobased stack
13119 @@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
13120 * normal stack and adjusts ESP with the matching offset.
13121 */
13122 /* fixup the stack */
13123 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
13124 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
13125 +#ifdef CONFIG_SMP
13126 + movl PER_CPU_VAR(cpu_number), %ebx
13127 + shll $PAGE_SHIFT_asm, %ebx
13128 + addl $cpu_gdt_table, %ebx
13129 +#else
13130 + movl $cpu_gdt_table, %ebx
13131 +#endif
13132 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
13133 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
13134 shl $16, %eax
13135 addl %esp, %eax /* the adjusted stack pointer */
13136 pushl_cfi $__KERNEL_DS
13137 @@ -816,7 +1065,7 @@ vector=vector+1
13138 .endr
13139 2: jmp common_interrupt
13140 .endr
13141 -END(irq_entries_start)
13142 +ENDPROC(irq_entries_start)
13143
13144 .previous
13145 END(interrupt)
13146 @@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
13147 pushl_cfi $do_coprocessor_error
13148 jmp error_code
13149 CFI_ENDPROC
13150 -END(coprocessor_error)
13151 +ENDPROC(coprocessor_error)
13152
13153 ENTRY(simd_coprocessor_error)
13154 RING0_INT_FRAME
13155 @@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
13156 #endif
13157 jmp error_code
13158 CFI_ENDPROC
13159 -END(simd_coprocessor_error)
13160 +ENDPROC(simd_coprocessor_error)
13161
13162 ENTRY(device_not_available)
13163 RING0_INT_FRAME
13164 @@ -893,7 +1142,7 @@ ENTRY(device_not_available)
13165 pushl_cfi $do_device_not_available
13166 jmp error_code
13167 CFI_ENDPROC
13168 -END(device_not_available)
13169 +ENDPROC(device_not_available)
13170
13171 #ifdef CONFIG_PARAVIRT
13172 ENTRY(native_iret)
13173 @@ -902,12 +1151,12 @@ ENTRY(native_iret)
13174 .align 4
13175 .long native_iret, iret_exc
13176 .previous
13177 -END(native_iret)
13178 +ENDPROC(native_iret)
13179
13180 ENTRY(native_irq_enable_sysexit)
13181 sti
13182 sysexit
13183 -END(native_irq_enable_sysexit)
13184 +ENDPROC(native_irq_enable_sysexit)
13185 #endif
13186
13187 ENTRY(overflow)
13188 @@ -916,7 +1165,7 @@ ENTRY(overflow)
13189 pushl_cfi $do_overflow
13190 jmp error_code
13191 CFI_ENDPROC
13192 -END(overflow)
13193 +ENDPROC(overflow)
13194
13195 ENTRY(bounds)
13196 RING0_INT_FRAME
13197 @@ -924,7 +1173,7 @@ ENTRY(bounds)
13198 pushl_cfi $do_bounds
13199 jmp error_code
13200 CFI_ENDPROC
13201 -END(bounds)
13202 +ENDPROC(bounds)
13203
13204 ENTRY(invalid_op)
13205 RING0_INT_FRAME
13206 @@ -932,7 +1181,7 @@ ENTRY(invalid_op)
13207 pushl_cfi $do_invalid_op
13208 jmp error_code
13209 CFI_ENDPROC
13210 -END(invalid_op)
13211 +ENDPROC(invalid_op)
13212
13213 ENTRY(coprocessor_segment_overrun)
13214 RING0_INT_FRAME
13215 @@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
13216 pushl_cfi $do_coprocessor_segment_overrun
13217 jmp error_code
13218 CFI_ENDPROC
13219 -END(coprocessor_segment_overrun)
13220 +ENDPROC(coprocessor_segment_overrun)
13221
13222 ENTRY(invalid_TSS)
13223 RING0_EC_FRAME
13224 pushl_cfi $do_invalid_TSS
13225 jmp error_code
13226 CFI_ENDPROC
13227 -END(invalid_TSS)
13228 +ENDPROC(invalid_TSS)
13229
13230 ENTRY(segment_not_present)
13231 RING0_EC_FRAME
13232 pushl_cfi $do_segment_not_present
13233 jmp error_code
13234 CFI_ENDPROC
13235 -END(segment_not_present)
13236 +ENDPROC(segment_not_present)
13237
13238 ENTRY(stack_segment)
13239 RING0_EC_FRAME
13240 pushl_cfi $do_stack_segment
13241 jmp error_code
13242 CFI_ENDPROC
13243 -END(stack_segment)
13244 +ENDPROC(stack_segment)
13245
13246 ENTRY(alignment_check)
13247 RING0_EC_FRAME
13248 pushl_cfi $do_alignment_check
13249 jmp error_code
13250 CFI_ENDPROC
13251 -END(alignment_check)
13252 +ENDPROC(alignment_check)
13253
13254 ENTRY(divide_error)
13255 RING0_INT_FRAME
13256 @@ -976,7 +1225,7 @@ ENTRY(divide_error)
13257 pushl_cfi $do_divide_error
13258 jmp error_code
13259 CFI_ENDPROC
13260 -END(divide_error)
13261 +ENDPROC(divide_error)
13262
13263 #ifdef CONFIG_X86_MCE
13264 ENTRY(machine_check)
13265 @@ -985,7 +1234,7 @@ ENTRY(machine_check)
13266 pushl_cfi machine_check_vector
13267 jmp error_code
13268 CFI_ENDPROC
13269 -END(machine_check)
13270 +ENDPROC(machine_check)
13271 #endif
13272
13273 ENTRY(spurious_interrupt_bug)
13274 @@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
13275 pushl_cfi $do_spurious_interrupt_bug
13276 jmp error_code
13277 CFI_ENDPROC
13278 -END(spurious_interrupt_bug)
13279 +ENDPROC(spurious_interrupt_bug)
13280 /*
13281 * End of kprobes section
13282 */
13283 @@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
13284
13285 ENTRY(mcount)
13286 ret
13287 -END(mcount)
13288 +ENDPROC(mcount)
13289
13290 ENTRY(ftrace_caller)
13291 cmpl $0, function_trace_stop
13292 @@ -1138,7 +1387,7 @@ ftrace_graph_call:
13293 .globl ftrace_stub
13294 ftrace_stub:
13295 ret
13296 -END(ftrace_caller)
13297 +ENDPROC(ftrace_caller)
13298
13299 #else /* ! CONFIG_DYNAMIC_FTRACE */
13300
13301 @@ -1174,7 +1423,7 @@ trace:
13302 popl %ecx
13303 popl %eax
13304 jmp ftrace_stub
13305 -END(mcount)
13306 +ENDPROC(mcount)
13307 #endif /* CONFIG_DYNAMIC_FTRACE */
13308 #endif /* CONFIG_FUNCTION_TRACER */
13309
13310 @@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
13311 popl %ecx
13312 popl %eax
13313 ret
13314 -END(ftrace_graph_caller)
13315 +ENDPROC(ftrace_graph_caller)
13316
13317 .globl return_to_handler
13318 return_to_handler:
13319 @@ -1209,7 +1458,6 @@ return_to_handler:
13320 jmp *%ecx
13321 #endif
13322
13323 -.section .rodata,"a"
13324 #include "syscall_table_32.S"
13325
13326 syscall_table_size=(.-sys_call_table)
13327 @@ -1255,15 +1503,18 @@ error_code:
13328 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13329 REG_TO_PTGS %ecx
13330 SET_KERNEL_GS %ecx
13331 - movl $(__USER_DS), %ecx
13332 + movl $(__KERNEL_DS), %ecx
13333 movl %ecx, %ds
13334 movl %ecx, %es
13335 +
13336 + pax_enter_kernel
13337 +
13338 TRACE_IRQS_OFF
13339 movl %esp,%eax # pt_regs pointer
13340 call *%edi
13341 jmp ret_from_exception
13342 CFI_ENDPROC
13343 -END(page_fault)
13344 +ENDPROC(page_fault)
13345
13346 /*
13347 * Debug traps and NMI can happen at the one SYSENTER instruction
13348 @@ -1305,7 +1556,7 @@ debug_stack_correct:
13349 call do_debug
13350 jmp ret_from_exception
13351 CFI_ENDPROC
13352 -END(debug)
13353 +ENDPROC(debug)
13354
13355 /*
13356 * NMI is doubly nasty. It can happen _while_ we're handling
13357 @@ -1342,6 +1593,9 @@ nmi_stack_correct:
13358 xorl %edx,%edx # zero error code
13359 movl %esp,%eax # pt_regs pointer
13360 call do_nmi
13361 +
13362 + pax_exit_kernel
13363 +
13364 jmp restore_all_notrace
13365 CFI_ENDPROC
13366
13367 @@ -1378,12 +1632,15 @@ nmi_espfix_stack:
13368 FIXUP_ESPFIX_STACK # %eax == %esp
13369 xorl %edx,%edx # zero error code
13370 call do_nmi
13371 +
13372 + pax_exit_kernel
13373 +
13374 RESTORE_REGS
13375 lss 12+4(%esp), %esp # back to espfix stack
13376 CFI_ADJUST_CFA_OFFSET -24
13377 jmp irq_return
13378 CFI_ENDPROC
13379 -END(nmi)
13380 +ENDPROC(nmi)
13381
13382 ENTRY(int3)
13383 RING0_INT_FRAME
13384 @@ -1395,14 +1652,14 @@ ENTRY(int3)
13385 call do_int3
13386 jmp ret_from_exception
13387 CFI_ENDPROC
13388 -END(int3)
13389 +ENDPROC(int3)
13390
13391 ENTRY(general_protection)
13392 RING0_EC_FRAME
13393 pushl_cfi $do_general_protection
13394 jmp error_code
13395 CFI_ENDPROC
13396 -END(general_protection)
13397 +ENDPROC(general_protection)
13398
13399 #ifdef CONFIG_KVM_GUEST
13400 ENTRY(async_page_fault)
13401 @@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
13402 pushl_cfi $do_async_page_fault
13403 jmp error_code
13404 CFI_ENDPROC
13405 -END(async_page_fault)
13406 +ENDPROC(async_page_fault)
13407 #endif
13408
13409 /*
13410 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
13411 index 6419bb0..00440bf 100644
13412 --- a/arch/x86/kernel/entry_64.S
13413 +++ b/arch/x86/kernel/entry_64.S
13414 @@ -55,6 +55,8 @@
13415 #include <asm/paravirt.h>
13416 #include <asm/ftrace.h>
13417 #include <asm/percpu.h>
13418 +#include <asm/pgtable.h>
13419 +#include <asm/alternative-asm.h>
13420
13421 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13422 #include <linux/elf-em.h>
13423 @@ -68,8 +70,9 @@
13424 #ifdef CONFIG_FUNCTION_TRACER
13425 #ifdef CONFIG_DYNAMIC_FTRACE
13426 ENTRY(mcount)
13427 + pax_force_retaddr
13428 retq
13429 -END(mcount)
13430 +ENDPROC(mcount)
13431
13432 ENTRY(ftrace_caller)
13433 cmpl $0, function_trace_stop
13434 @@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
13435 #endif
13436
13437 GLOBAL(ftrace_stub)
13438 + pax_force_retaddr
13439 retq
13440 -END(ftrace_caller)
13441 +ENDPROC(ftrace_caller)
13442
13443 #else /* ! CONFIG_DYNAMIC_FTRACE */
13444 ENTRY(mcount)
13445 @@ -112,6 +116,7 @@ ENTRY(mcount)
13446 #endif
13447
13448 GLOBAL(ftrace_stub)
13449 + pax_force_retaddr
13450 retq
13451
13452 trace:
13453 @@ -121,12 +126,13 @@ trace:
13454 movq 8(%rbp), %rsi
13455 subq $MCOUNT_INSN_SIZE, %rdi
13456
13457 + pax_force_fptr ftrace_trace_function
13458 call *ftrace_trace_function
13459
13460 MCOUNT_RESTORE_FRAME
13461
13462 jmp ftrace_stub
13463 -END(mcount)
13464 +ENDPROC(mcount)
13465 #endif /* CONFIG_DYNAMIC_FTRACE */
13466 #endif /* CONFIG_FUNCTION_TRACER */
13467
13468 @@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
13469
13470 MCOUNT_RESTORE_FRAME
13471
13472 + pax_force_retaddr
13473 retq
13474 -END(ftrace_graph_caller)
13475 +ENDPROC(ftrace_graph_caller)
13476
13477 GLOBAL(return_to_handler)
13478 subq $24, %rsp
13479 @@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
13480 movq 8(%rsp), %rdx
13481 movq (%rsp), %rax
13482 addq $24, %rsp
13483 + pax_force_fptr %rdi
13484 jmp *%rdi
13485 #endif
13486
13487 @@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
13488 ENDPROC(native_usergs_sysret64)
13489 #endif /* CONFIG_PARAVIRT */
13490
13491 + .macro ljmpq sel, off
13492 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13493 + .byte 0x48; ljmp *1234f(%rip)
13494 + .pushsection .rodata
13495 + .align 16
13496 + 1234: .quad \off; .word \sel
13497 + .popsection
13498 +#else
13499 + pushq $\sel
13500 + pushq $\off
13501 + lretq
13502 +#endif
13503 + .endm
13504 +
13505 + .macro pax_enter_kernel
13506 + pax_set_fptr_mask
13507 +#ifdef CONFIG_PAX_KERNEXEC
13508 + call pax_enter_kernel
13509 +#endif
13510 + .endm
13511 +
13512 + .macro pax_exit_kernel
13513 +#ifdef CONFIG_PAX_KERNEXEC
13514 + call pax_exit_kernel
13515 +#endif
13516 + .endm
13517 +
13518 +#ifdef CONFIG_PAX_KERNEXEC
13519 +ENTRY(pax_enter_kernel)
13520 + pushq %rdi
13521 +
13522 +#ifdef CONFIG_PARAVIRT
13523 + PV_SAVE_REGS(CLBR_RDI)
13524 +#endif
13525 +
13526 + GET_CR0_INTO_RDI
13527 + bts $16,%rdi
13528 + jnc 3f
13529 + mov %cs,%edi
13530 + cmp $__KERNEL_CS,%edi
13531 + jnz 2f
13532 +1:
13533 +
13534 +#ifdef CONFIG_PARAVIRT
13535 + PV_RESTORE_REGS(CLBR_RDI)
13536 +#endif
13537 +
13538 + popq %rdi
13539 + pax_force_retaddr
13540 + retq
13541 +
13542 +2: ljmpq __KERNEL_CS,1f
13543 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
13544 +4: SET_RDI_INTO_CR0
13545 + jmp 1b
13546 +ENDPROC(pax_enter_kernel)
13547 +
13548 +ENTRY(pax_exit_kernel)
13549 + pushq %rdi
13550 +
13551 +#ifdef CONFIG_PARAVIRT
13552 + PV_SAVE_REGS(CLBR_RDI)
13553 +#endif
13554 +
13555 + mov %cs,%rdi
13556 + cmp $__KERNEXEC_KERNEL_CS,%edi
13557 + jz 2f
13558 +1:
13559 +
13560 +#ifdef CONFIG_PARAVIRT
13561 + PV_RESTORE_REGS(CLBR_RDI);
13562 +#endif
13563 +
13564 + popq %rdi
13565 + pax_force_retaddr
13566 + retq
13567 +
13568 +2: GET_CR0_INTO_RDI
13569 + btr $16,%rdi
13570 + ljmpq __KERNEL_CS,3f
13571 +3: SET_RDI_INTO_CR0
13572 + jmp 1b
13573 +#ifdef CONFIG_PARAVIRT
13574 + PV_RESTORE_REGS(CLBR_RDI);
13575 +#endif
13576 +
13577 + popq %rdi
13578 + pax_force_retaddr
13579 + retq
13580 +ENDPROC(pax_exit_kernel)
13581 +#endif
13582 +
13583 + .macro pax_enter_kernel_user
13584 + pax_set_fptr_mask
13585 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13586 + call pax_enter_kernel_user
13587 +#endif
13588 + .endm
13589 +
13590 + .macro pax_exit_kernel_user
13591 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13592 + call pax_exit_kernel_user
13593 +#endif
13594 +#ifdef CONFIG_PAX_RANDKSTACK
13595 + push %rax
13596 + call pax_randomize_kstack
13597 + pop %rax
13598 +#endif
13599 + .endm
13600 +
13601 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13602 +ENTRY(pax_enter_kernel_user)
13603 + pushq %rdi
13604 + pushq %rbx
13605 +
13606 +#ifdef CONFIG_PARAVIRT
13607 + PV_SAVE_REGS(CLBR_RDI)
13608 +#endif
13609 +
13610 + GET_CR3_INTO_RDI
13611 + mov %rdi,%rbx
13612 + add $__START_KERNEL_map,%rbx
13613 + sub phys_base(%rip),%rbx
13614 +
13615 +#ifdef CONFIG_PARAVIRT
13616 + pushq %rdi
13617 + cmpl $0, pv_info+PARAVIRT_enabled
13618 + jz 1f
13619 + i = 0
13620 + .rept USER_PGD_PTRS
13621 + mov i*8(%rbx),%rsi
13622 + mov $0,%sil
13623 + lea i*8(%rbx),%rdi
13624 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13625 + i = i + 1
13626 + .endr
13627 + jmp 2f
13628 +1:
13629 +#endif
13630 +
13631 + i = 0
13632 + .rept USER_PGD_PTRS
13633 + movb $0,i*8(%rbx)
13634 + i = i + 1
13635 + .endr
13636 +
13637 +#ifdef CONFIG_PARAVIRT
13638 +2: popq %rdi
13639 +#endif
13640 + SET_RDI_INTO_CR3
13641 +
13642 +#ifdef CONFIG_PAX_KERNEXEC
13643 + GET_CR0_INTO_RDI
13644 + bts $16,%rdi
13645 + SET_RDI_INTO_CR0
13646 +#endif
13647 +
13648 +#ifdef CONFIG_PARAVIRT
13649 + PV_RESTORE_REGS(CLBR_RDI)
13650 +#endif
13651 +
13652 + popq %rbx
13653 + popq %rdi
13654 + pax_force_retaddr
13655 + retq
13656 +ENDPROC(pax_enter_kernel_user)
13657 +
13658 +ENTRY(pax_exit_kernel_user)
13659 + push %rdi
13660 +
13661 +#ifdef CONFIG_PARAVIRT
13662 + pushq %rbx
13663 + PV_SAVE_REGS(CLBR_RDI)
13664 +#endif
13665 +
13666 +#ifdef CONFIG_PAX_KERNEXEC
13667 + GET_CR0_INTO_RDI
13668 + btr $16,%rdi
13669 + SET_RDI_INTO_CR0
13670 +#endif
13671 +
13672 + GET_CR3_INTO_RDI
13673 + add $__START_KERNEL_map,%rdi
13674 + sub phys_base(%rip),%rdi
13675 +
13676 +#ifdef CONFIG_PARAVIRT
13677 + cmpl $0, pv_info+PARAVIRT_enabled
13678 + jz 1f
13679 + mov %rdi,%rbx
13680 + i = 0
13681 + .rept USER_PGD_PTRS
13682 + mov i*8(%rbx),%rsi
13683 + mov $0x67,%sil
13684 + lea i*8(%rbx),%rdi
13685 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13686 + i = i + 1
13687 + .endr
13688 + jmp 2f
13689 +1:
13690 +#endif
13691 +
13692 + i = 0
13693 + .rept USER_PGD_PTRS
13694 + movb $0x67,i*8(%rdi)
13695 + i = i + 1
13696 + .endr
13697 +
13698 +#ifdef CONFIG_PARAVIRT
13699 +2: PV_RESTORE_REGS(CLBR_RDI)
13700 + popq %rbx
13701 +#endif
13702 +
13703 + popq %rdi
13704 + pax_force_retaddr
13705 + retq
13706 +ENDPROC(pax_exit_kernel_user)
13707 +#endif
13708 +
13709 +.macro pax_erase_kstack
13710 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13711 + call pax_erase_kstack
13712 +#endif
13713 +.endm
13714 +
13715 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13716 +/*
13717 + * r11: thread_info
13718 + * rcx, rdx: can be clobbered
13719 + */
13720 +ENTRY(pax_erase_kstack)
13721 + pushq %rdi
13722 + pushq %rax
13723 + pushq %r11
13724 +
13725 + GET_THREAD_INFO(%r11)
13726 + mov TI_lowest_stack(%r11), %rdi
13727 + mov $-0xBEEF, %rax
13728 + std
13729 +
13730 +1: mov %edi, %ecx
13731 + and $THREAD_SIZE_asm - 1, %ecx
13732 + shr $3, %ecx
13733 + repne scasq
13734 + jecxz 2f
13735 +
13736 + cmp $2*8, %ecx
13737 + jc 2f
13738 +
13739 + mov $2*8, %ecx
13740 + repe scasq
13741 + jecxz 2f
13742 + jne 1b
13743 +
13744 +2: cld
13745 + mov %esp, %ecx
13746 + sub %edi, %ecx
13747 +
13748 + cmp $THREAD_SIZE_asm, %rcx
13749 + jb 3f
13750 + ud2
13751 +3:
13752 +
13753 + shr $3, %ecx
13754 + rep stosq
13755 +
13756 + mov TI_task_thread_sp0(%r11), %rdi
13757 + sub $256, %rdi
13758 + mov %rdi, TI_lowest_stack(%r11)
13759 +
13760 + popq %r11
13761 + popq %rax
13762 + popq %rdi
13763 + pax_force_retaddr
13764 + ret
13765 +ENDPROC(pax_erase_kstack)
13766 +#endif
13767
13768 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13769 #ifdef CONFIG_TRACE_IRQFLAGS
13770 @@ -231,8 +515,8 @@ ENDPROC(native_usergs_sysret64)
13771 .endm
13772
13773 .macro UNFAKE_STACK_FRAME
13774 - addq $8*6, %rsp
13775 - CFI_ADJUST_CFA_OFFSET -(6*8)
13776 + addq $8*6 + ARG_SKIP, %rsp
13777 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
13778 .endm
13779
13780 /*
13781 @@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
13782 movq %rsp, %rsi
13783
13784 leaq -RBP(%rsp),%rdi /* arg1 for handler */
13785 - testl $3, CS(%rdi)
13786 + testb $3, CS(%rdi)
13787 je 1f
13788 SWAPGS
13789 /*
13790 @@ -350,9 +634,10 @@ ENTRY(save_rest)
13791 movq_cfi r15, R15+16
13792 movq %r11, 8(%rsp) /* return address */
13793 FIXUP_TOP_OF_STACK %r11, 16
13794 + pax_force_retaddr
13795 ret
13796 CFI_ENDPROC
13797 -END(save_rest)
13798 +ENDPROC(save_rest)
13799
13800 /* save complete stack frame */
13801 .pushsection .kprobes.text, "ax"
13802 @@ -381,9 +666,10 @@ ENTRY(save_paranoid)
13803 js 1f /* negative -> in kernel */
13804 SWAPGS
13805 xorl %ebx,%ebx
13806 -1: ret
13807 +1: pax_force_retaddr_bts
13808 + ret
13809 CFI_ENDPROC
13810 -END(save_paranoid)
13811 +ENDPROC(save_paranoid)
13812 .popsection
13813
13814 /*
13815 @@ -405,7 +691,7 @@ ENTRY(ret_from_fork)
13816
13817 RESTORE_REST
13818
13819 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13820 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13821 je int_ret_from_sys_call
13822
13823 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13824 @@ -415,7 +701,7 @@ ENTRY(ret_from_fork)
13825 jmp ret_from_sys_call # go to the SYSRET fastpath
13826
13827 CFI_ENDPROC
13828 -END(ret_from_fork)
13829 +ENDPROC(ret_from_fork)
13830
13831 /*
13832 * System call entry. Up to 6 arguments in registers are supported.
13833 @@ -451,7 +737,7 @@ END(ret_from_fork)
13834 ENTRY(system_call)
13835 CFI_STARTPROC simple
13836 CFI_SIGNAL_FRAME
13837 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13838 + CFI_DEF_CFA rsp,0
13839 CFI_REGISTER rip,rcx
13840 /*CFI_REGISTER rflags,r11*/
13841 SWAPGS_UNSAFE_STACK
13842 @@ -464,12 +750,13 @@ ENTRY(system_call_after_swapgs)
13843
13844 movq %rsp,PER_CPU_VAR(old_rsp)
13845 movq PER_CPU_VAR(kernel_stack),%rsp
13846 + SAVE_ARGS 8*6,0
13847 + pax_enter_kernel_user
13848 /*
13849 * No need to follow this irqs off/on section - it's straight
13850 * and short:
13851 */
13852 ENABLE_INTERRUPTS(CLBR_NONE)
13853 - SAVE_ARGS 8,0
13854 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13855 movq %rcx,RIP-ARGOFFSET(%rsp)
13856 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13857 @@ -479,7 +766,7 @@ ENTRY(system_call_after_swapgs)
13858 system_call_fastpath:
13859 cmpq $__NR_syscall_max,%rax
13860 ja badsys
13861 - movq %r10,%rcx
13862 + movq R10-ARGOFFSET(%rsp),%rcx
13863 call *sys_call_table(,%rax,8) # XXX: rip relative
13864 movq %rax,RAX-ARGOFFSET(%rsp)
13865 /*
13866 @@ -498,6 +785,8 @@ sysret_check:
13867 andl %edi,%edx
13868 jnz sysret_careful
13869 CFI_REMEMBER_STATE
13870 + pax_exit_kernel_user
13871 + pax_erase_kstack
13872 /*
13873 * sysretq will re-enable interrupts:
13874 */
13875 @@ -549,14 +838,18 @@ badsys:
13876 * jump back to the normal fast path.
13877 */
13878 auditsys:
13879 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
13880 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
13881 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
13882 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
13883 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
13884 movq %rax,%rsi /* 2nd arg: syscall number */
13885 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13886 call audit_syscall_entry
13887 +
13888 + pax_erase_kstack
13889 +
13890 LOAD_ARGS 0 /* reload call-clobbered registers */
13891 + pax_set_fptr_mask
13892 jmp system_call_fastpath
13893
13894 /*
13895 @@ -586,16 +879,20 @@ tracesys:
13896 FIXUP_TOP_OF_STACK %rdi
13897 movq %rsp,%rdi
13898 call syscall_trace_enter
13899 +
13900 + pax_erase_kstack
13901 +
13902 /*
13903 * Reload arg registers from stack in case ptrace changed them.
13904 * We don't reload %rax because syscall_trace_enter() returned
13905 * the value it wants us to use in the table lookup.
13906 */
13907 LOAD_ARGS ARGOFFSET, 1
13908 + pax_set_fptr_mask
13909 RESTORE_REST
13910 cmpq $__NR_syscall_max,%rax
13911 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
13912 - movq %r10,%rcx /* fixup for C */
13913 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
13914 call *sys_call_table(,%rax,8)
13915 movq %rax,RAX-ARGOFFSET(%rsp)
13916 /* Use IRET because user could have changed frame */
13917 @@ -607,7 +904,7 @@ tracesys:
13918 GLOBAL(int_ret_from_sys_call)
13919 DISABLE_INTERRUPTS(CLBR_NONE)
13920 TRACE_IRQS_OFF
13921 - testl $3,CS-ARGOFFSET(%rsp)
13922 + testb $3,CS-ARGOFFSET(%rsp)
13923 je retint_restore_args
13924 movl $_TIF_ALLWORK_MASK,%edi
13925 /* edi: mask to check */
13926 @@ -664,7 +961,7 @@ int_restore_rest:
13927 TRACE_IRQS_OFF
13928 jmp int_with_check
13929 CFI_ENDPROC
13930 -END(system_call)
13931 +ENDPROC(system_call)
13932
13933 /*
13934 * Certain special system calls that need to save a complete full stack frame.
13935 @@ -680,7 +977,7 @@ ENTRY(\label)
13936 call \func
13937 jmp ptregscall_common
13938 CFI_ENDPROC
13939 -END(\label)
13940 +ENDPROC(\label)
13941 .endm
13942
13943 PTREGSCALL stub_clone, sys_clone, %r8
13944 @@ -698,9 +995,10 @@ ENTRY(ptregscall_common)
13945 movq_cfi_restore R12+8, r12
13946 movq_cfi_restore RBP+8, rbp
13947 movq_cfi_restore RBX+8, rbx
13948 + pax_force_retaddr
13949 ret $REST_SKIP /* pop extended registers */
13950 CFI_ENDPROC
13951 -END(ptregscall_common)
13952 +ENDPROC(ptregscall_common)
13953
13954 ENTRY(stub_execve)
13955 CFI_STARTPROC
13956 @@ -715,7 +1013,7 @@ ENTRY(stub_execve)
13957 RESTORE_REST
13958 jmp int_ret_from_sys_call
13959 CFI_ENDPROC
13960 -END(stub_execve)
13961 +ENDPROC(stub_execve)
13962
13963 /*
13964 * sigreturn is special because it needs to restore all registers on return.
13965 @@ -733,7 +1031,7 @@ ENTRY(stub_rt_sigreturn)
13966 RESTORE_REST
13967 jmp int_ret_from_sys_call
13968 CFI_ENDPROC
13969 -END(stub_rt_sigreturn)
13970 +ENDPROC(stub_rt_sigreturn)
13971
13972 /*
13973 * Build the entry stubs and pointer table with some assembler magic.
13974 @@ -768,7 +1066,7 @@ vector=vector+1
13975 2: jmp common_interrupt
13976 .endr
13977 CFI_ENDPROC
13978 -END(irq_entries_start)
13979 +ENDPROC(irq_entries_start)
13980
13981 .previous
13982 END(interrupt)
13983 @@ -789,6 +1087,16 @@ END(interrupt)
13984 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
13985 SAVE_ARGS_IRQ
13986 PARTIAL_FRAME 0
13987 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13988 + testb $3, CS(%rdi)
13989 + jnz 1f
13990 + pax_enter_kernel
13991 + jmp 2f
13992 +1: pax_enter_kernel_user
13993 +2:
13994 +#else
13995 + pax_enter_kernel
13996 +#endif
13997 call \func
13998 .endm
13999
14000 @@ -820,7 +1128,7 @@ ret_from_intr:
14001
14002 exit_intr:
14003 GET_THREAD_INFO(%rcx)
14004 - testl $3,CS-ARGOFFSET(%rsp)
14005 + testb $3,CS-ARGOFFSET(%rsp)
14006 je retint_kernel
14007
14008 /* Interrupt came from user space */
14009 @@ -842,12 +1150,16 @@ retint_swapgs: /* return to user-space */
14010 * The iretq could re-enable interrupts:
14011 */
14012 DISABLE_INTERRUPTS(CLBR_ANY)
14013 + pax_exit_kernel_user
14014 + pax_erase_kstack
14015 TRACE_IRQS_IRETQ
14016 SWAPGS
14017 jmp restore_args
14018
14019 retint_restore_args: /* return to kernel space */
14020 DISABLE_INTERRUPTS(CLBR_ANY)
14021 + pax_exit_kernel
14022 + pax_force_retaddr RIP-ARGOFFSET
14023 /*
14024 * The iretq could re-enable interrupts:
14025 */
14026 @@ -936,7 +1248,7 @@ ENTRY(retint_kernel)
14027 #endif
14028
14029 CFI_ENDPROC
14030 -END(common_interrupt)
14031 +ENDPROC(common_interrupt)
14032 /*
14033 * End of kprobes section
14034 */
14035 @@ -952,7 +1264,7 @@ ENTRY(\sym)
14036 interrupt \do_sym
14037 jmp ret_from_intr
14038 CFI_ENDPROC
14039 -END(\sym)
14040 +ENDPROC(\sym)
14041 .endm
14042
14043 #ifdef CONFIG_SMP
14044 @@ -1017,12 +1329,22 @@ ENTRY(\sym)
14045 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14046 call error_entry
14047 DEFAULT_FRAME 0
14048 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14049 + testb $3, CS(%rsp)
14050 + jnz 1f
14051 + pax_enter_kernel
14052 + jmp 2f
14053 +1: pax_enter_kernel_user
14054 +2:
14055 +#else
14056 + pax_enter_kernel
14057 +#endif
14058 movq %rsp,%rdi /* pt_regs pointer */
14059 xorl %esi,%esi /* no error code */
14060 call \do_sym
14061 jmp error_exit /* %ebx: no swapgs flag */
14062 CFI_ENDPROC
14063 -END(\sym)
14064 +ENDPROC(\sym)
14065 .endm
14066
14067 .macro paranoidzeroentry sym do_sym
14068 @@ -1034,15 +1356,25 @@ ENTRY(\sym)
14069 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14070 call save_paranoid
14071 TRACE_IRQS_OFF
14072 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14073 + testb $3, CS(%rsp)
14074 + jnz 1f
14075 + pax_enter_kernel
14076 + jmp 2f
14077 +1: pax_enter_kernel_user
14078 +2:
14079 +#else
14080 + pax_enter_kernel
14081 +#endif
14082 movq %rsp,%rdi /* pt_regs pointer */
14083 xorl %esi,%esi /* no error code */
14084 call \do_sym
14085 jmp paranoid_exit /* %ebx: no swapgs flag */
14086 CFI_ENDPROC
14087 -END(\sym)
14088 +ENDPROC(\sym)
14089 .endm
14090
14091 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
14092 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
14093 .macro paranoidzeroentry_ist sym do_sym ist
14094 ENTRY(\sym)
14095 INTR_FRAME
14096 @@ -1052,14 +1384,30 @@ ENTRY(\sym)
14097 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14098 call save_paranoid
14099 TRACE_IRQS_OFF
14100 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14101 + testb $3, CS(%rsp)
14102 + jnz 1f
14103 + pax_enter_kernel
14104 + jmp 2f
14105 +1: pax_enter_kernel_user
14106 +2:
14107 +#else
14108 + pax_enter_kernel
14109 +#endif
14110 movq %rsp,%rdi /* pt_regs pointer */
14111 xorl %esi,%esi /* no error code */
14112 +#ifdef CONFIG_SMP
14113 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
14114 + lea init_tss(%r12), %r12
14115 +#else
14116 + lea init_tss(%rip), %r12
14117 +#endif
14118 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14119 call \do_sym
14120 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14121 jmp paranoid_exit /* %ebx: no swapgs flag */
14122 CFI_ENDPROC
14123 -END(\sym)
14124 +ENDPROC(\sym)
14125 .endm
14126
14127 .macro errorentry sym do_sym
14128 @@ -1070,13 +1418,23 @@ ENTRY(\sym)
14129 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14130 call error_entry
14131 DEFAULT_FRAME 0
14132 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14133 + testb $3, CS(%rsp)
14134 + jnz 1f
14135 + pax_enter_kernel
14136 + jmp 2f
14137 +1: pax_enter_kernel_user
14138 +2:
14139 +#else
14140 + pax_enter_kernel
14141 +#endif
14142 movq %rsp,%rdi /* pt_regs pointer */
14143 movq ORIG_RAX(%rsp),%rsi /* get error code */
14144 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14145 call \do_sym
14146 jmp error_exit /* %ebx: no swapgs flag */
14147 CFI_ENDPROC
14148 -END(\sym)
14149 +ENDPROC(\sym)
14150 .endm
14151
14152 /* error code is on the stack already */
14153 @@ -1089,13 +1447,23 @@ ENTRY(\sym)
14154 call save_paranoid
14155 DEFAULT_FRAME 0
14156 TRACE_IRQS_OFF
14157 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14158 + testb $3, CS(%rsp)
14159 + jnz 1f
14160 + pax_enter_kernel
14161 + jmp 2f
14162 +1: pax_enter_kernel_user
14163 +2:
14164 +#else
14165 + pax_enter_kernel
14166 +#endif
14167 movq %rsp,%rdi /* pt_regs pointer */
14168 movq ORIG_RAX(%rsp),%rsi /* get error code */
14169 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14170 call \do_sym
14171 jmp paranoid_exit /* %ebx: no swapgs flag */
14172 CFI_ENDPROC
14173 -END(\sym)
14174 +ENDPROC(\sym)
14175 .endm
14176
14177 zeroentry divide_error do_divide_error
14178 @@ -1125,9 +1493,10 @@ gs_change:
14179 2: mfence /* workaround */
14180 SWAPGS
14181 popfq_cfi
14182 + pax_force_retaddr
14183 ret
14184 CFI_ENDPROC
14185 -END(native_load_gs_index)
14186 +ENDPROC(native_load_gs_index)
14187
14188 .section __ex_table,"a"
14189 .align 8
14190 @@ -1149,13 +1518,14 @@ ENTRY(kernel_thread_helper)
14191 * Here we are in the child and the registers are set as they were
14192 * at kernel_thread() invocation in the parent.
14193 */
14194 + pax_force_fptr %rsi
14195 call *%rsi
14196 # exit
14197 mov %eax, %edi
14198 call do_exit
14199 ud2 # padding for call trace
14200 CFI_ENDPROC
14201 -END(kernel_thread_helper)
14202 +ENDPROC(kernel_thread_helper)
14203
14204 /*
14205 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
14206 @@ -1182,11 +1552,11 @@ ENTRY(kernel_execve)
14207 RESTORE_REST
14208 testq %rax,%rax
14209 je int_ret_from_sys_call
14210 - RESTORE_ARGS
14211 UNFAKE_STACK_FRAME
14212 + pax_force_retaddr
14213 ret
14214 CFI_ENDPROC
14215 -END(kernel_execve)
14216 +ENDPROC(kernel_execve)
14217
14218 /* Call softirq on interrupt stack. Interrupts are off. */
14219 ENTRY(call_softirq)
14220 @@ -1204,9 +1574,10 @@ ENTRY(call_softirq)
14221 CFI_DEF_CFA_REGISTER rsp
14222 CFI_ADJUST_CFA_OFFSET -8
14223 decl PER_CPU_VAR(irq_count)
14224 + pax_force_retaddr
14225 ret
14226 CFI_ENDPROC
14227 -END(call_softirq)
14228 +ENDPROC(call_softirq)
14229
14230 #ifdef CONFIG_XEN
14231 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
14232 @@ -1244,7 +1615,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
14233 decl PER_CPU_VAR(irq_count)
14234 jmp error_exit
14235 CFI_ENDPROC
14236 -END(xen_do_hypervisor_callback)
14237 +ENDPROC(xen_do_hypervisor_callback)
14238
14239 /*
14240 * Hypervisor uses this for application faults while it executes.
14241 @@ -1303,7 +1674,7 @@ ENTRY(xen_failsafe_callback)
14242 SAVE_ALL
14243 jmp error_exit
14244 CFI_ENDPROC
14245 -END(xen_failsafe_callback)
14246 +ENDPROC(xen_failsafe_callback)
14247
14248 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
14249 xen_hvm_callback_vector xen_evtchn_do_upcall
14250 @@ -1352,16 +1723,31 @@ ENTRY(paranoid_exit)
14251 TRACE_IRQS_OFF
14252 testl %ebx,%ebx /* swapgs needed? */
14253 jnz paranoid_restore
14254 - testl $3,CS(%rsp)
14255 + testb $3,CS(%rsp)
14256 jnz paranoid_userspace
14257 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14258 + pax_exit_kernel
14259 + TRACE_IRQS_IRETQ 0
14260 + SWAPGS_UNSAFE_STACK
14261 + RESTORE_ALL 8
14262 + pax_force_retaddr_bts
14263 + jmp irq_return
14264 +#endif
14265 paranoid_swapgs:
14266 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14267 + pax_exit_kernel_user
14268 +#else
14269 + pax_exit_kernel
14270 +#endif
14271 TRACE_IRQS_IRETQ 0
14272 SWAPGS_UNSAFE_STACK
14273 RESTORE_ALL 8
14274 jmp irq_return
14275 paranoid_restore:
14276 + pax_exit_kernel
14277 TRACE_IRQS_IRETQ 0
14278 RESTORE_ALL 8
14279 + pax_force_retaddr_bts
14280 jmp irq_return
14281 paranoid_userspace:
14282 GET_THREAD_INFO(%rcx)
14283 @@ -1390,7 +1776,7 @@ paranoid_schedule:
14284 TRACE_IRQS_OFF
14285 jmp paranoid_userspace
14286 CFI_ENDPROC
14287 -END(paranoid_exit)
14288 +ENDPROC(paranoid_exit)
14289
14290 /*
14291 * Exception entry point. This expects an error code/orig_rax on the stack.
14292 @@ -1417,12 +1803,13 @@ ENTRY(error_entry)
14293 movq_cfi r14, R14+8
14294 movq_cfi r15, R15+8
14295 xorl %ebx,%ebx
14296 - testl $3,CS+8(%rsp)
14297 + testb $3,CS+8(%rsp)
14298 je error_kernelspace
14299 error_swapgs:
14300 SWAPGS
14301 error_sti:
14302 TRACE_IRQS_OFF
14303 + pax_force_retaddr_bts
14304 ret
14305
14306 /*
14307 @@ -1449,7 +1836,7 @@ bstep_iret:
14308 movq %rcx,RIP+8(%rsp)
14309 jmp error_swapgs
14310 CFI_ENDPROC
14311 -END(error_entry)
14312 +ENDPROC(error_entry)
14313
14314
14315 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
14316 @@ -1469,7 +1856,7 @@ ENTRY(error_exit)
14317 jnz retint_careful
14318 jmp retint_swapgs
14319 CFI_ENDPROC
14320 -END(error_exit)
14321 +ENDPROC(error_exit)
14322
14323
14324 /* runs on exception stack */
14325 @@ -1481,6 +1868,16 @@ ENTRY(nmi)
14326 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14327 call save_paranoid
14328 DEFAULT_FRAME 0
14329 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14330 + testb $3, CS(%rsp)
14331 + jnz 1f
14332 + pax_enter_kernel
14333 + jmp 2f
14334 +1: pax_enter_kernel_user
14335 +2:
14336 +#else
14337 + pax_enter_kernel
14338 +#endif
14339 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14340 movq %rsp,%rdi
14341 movq $-1,%rsi
14342 @@ -1491,12 +1888,28 @@ ENTRY(nmi)
14343 DISABLE_INTERRUPTS(CLBR_NONE)
14344 testl %ebx,%ebx /* swapgs needed? */
14345 jnz nmi_restore
14346 - testl $3,CS(%rsp)
14347 + testb $3,CS(%rsp)
14348 jnz nmi_userspace
14349 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14350 + pax_exit_kernel
14351 + SWAPGS_UNSAFE_STACK
14352 + RESTORE_ALL 8
14353 + pax_force_retaddr_bts
14354 + jmp irq_return
14355 +#endif
14356 nmi_swapgs:
14357 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14358 + pax_exit_kernel_user
14359 +#else
14360 + pax_exit_kernel
14361 +#endif
14362 SWAPGS_UNSAFE_STACK
14363 + RESTORE_ALL 8
14364 + jmp irq_return
14365 nmi_restore:
14366 + pax_exit_kernel
14367 RESTORE_ALL 8
14368 + pax_force_retaddr_bts
14369 jmp irq_return
14370 nmi_userspace:
14371 GET_THREAD_INFO(%rcx)
14372 @@ -1525,14 +1938,14 @@ nmi_schedule:
14373 jmp paranoid_exit
14374 CFI_ENDPROC
14375 #endif
14376 -END(nmi)
14377 +ENDPROC(nmi)
14378
14379 ENTRY(ignore_sysret)
14380 CFI_STARTPROC
14381 mov $-ENOSYS,%eax
14382 sysret
14383 CFI_ENDPROC
14384 -END(ignore_sysret)
14385 +ENDPROC(ignore_sysret)
14386
14387 /*
14388 * End of kprobes section
14389 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
14390 index c9a281f..ce2f317 100644
14391 --- a/arch/x86/kernel/ftrace.c
14392 +++ b/arch/x86/kernel/ftrace.c
14393 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
14394 static const void *mod_code_newcode; /* holds the text to write to the IP */
14395
14396 static unsigned nmi_wait_count;
14397 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
14398 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14399
14400 int ftrace_arch_read_dyn_info(char *buf, int size)
14401 {
14402 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
14403
14404 r = snprintf(buf, size, "%u %u",
14405 nmi_wait_count,
14406 - atomic_read(&nmi_update_count));
14407 + atomic_read_unchecked(&nmi_update_count));
14408 return r;
14409 }
14410
14411 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
14412
14413 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14414 smp_rmb();
14415 + pax_open_kernel();
14416 ftrace_mod_code();
14417 - atomic_inc(&nmi_update_count);
14418 + pax_close_kernel();
14419 + atomic_inc_unchecked(&nmi_update_count);
14420 }
14421 /* Must have previous changes seen before executions */
14422 smp_mb();
14423 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
14424 {
14425 unsigned char replaced[MCOUNT_INSN_SIZE];
14426
14427 + ip = ktla_ktva(ip);
14428 +
14429 /*
14430 * Note: Due to modules and __init, code can
14431 * disappear and change, we need to protect against faulting
14432 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
14433 unsigned char old[MCOUNT_INSN_SIZE], *new;
14434 int ret;
14435
14436 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14437 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14438 new = ftrace_call_replace(ip, (unsigned long)func);
14439 ret = ftrace_modify_code(ip, old, new);
14440
14441 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
14442 {
14443 unsigned char code[MCOUNT_INSN_SIZE];
14444
14445 + ip = ktla_ktva(ip);
14446 +
14447 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14448 return -EFAULT;
14449
14450 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
14451 index 3bb0850..55a56f4 100644
14452 --- a/arch/x86/kernel/head32.c
14453 +++ b/arch/x86/kernel/head32.c
14454 @@ -19,6 +19,7 @@
14455 #include <asm/io_apic.h>
14456 #include <asm/bios_ebda.h>
14457 #include <asm/tlbflush.h>
14458 +#include <asm/boot.h>
14459
14460 static void __init i386_default_early_setup(void)
14461 {
14462 @@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
14463 {
14464 memblock_init();
14465
14466 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14467 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14468
14469 #ifdef CONFIG_BLK_DEV_INITRD
14470 /* Reserve INITRD */
14471 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
14472 index ce0be7c..c41476e 100644
14473 --- a/arch/x86/kernel/head_32.S
14474 +++ b/arch/x86/kernel/head_32.S
14475 @@ -25,6 +25,12 @@
14476 /* Physical address */
14477 #define pa(X) ((X) - __PAGE_OFFSET)
14478
14479 +#ifdef CONFIG_PAX_KERNEXEC
14480 +#define ta(X) (X)
14481 +#else
14482 +#define ta(X) ((X) - __PAGE_OFFSET)
14483 +#endif
14484 +
14485 /*
14486 * References to members of the new_cpu_data structure.
14487 */
14488 @@ -54,11 +60,7 @@
14489 * and small than max_low_pfn, otherwise will waste some page table entries
14490 */
14491
14492 -#if PTRS_PER_PMD > 1
14493 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14494 -#else
14495 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14496 -#endif
14497 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14498
14499 /* Number of possible pages in the lowmem region */
14500 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
14501 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
14502 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14503
14504 /*
14505 + * Real beginning of normal "text" segment
14506 + */
14507 +ENTRY(stext)
14508 +ENTRY(_stext)
14509 +
14510 +/*
14511 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14512 * %esi points to the real-mode code as a 32-bit pointer.
14513 * CS and DS must be 4 GB flat segments, but we don't depend on
14514 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14515 * can.
14516 */
14517 __HEAD
14518 +
14519 +#ifdef CONFIG_PAX_KERNEXEC
14520 + jmp startup_32
14521 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14522 +.fill PAGE_SIZE-5,1,0xcc
14523 +#endif
14524 +
14525 ENTRY(startup_32)
14526 movl pa(stack_start),%ecx
14527
14528 @@ -105,6 +120,57 @@ ENTRY(startup_32)
14529 2:
14530 leal -__PAGE_OFFSET(%ecx),%esp
14531
14532 +#ifdef CONFIG_SMP
14533 + movl $pa(cpu_gdt_table),%edi
14534 + movl $__per_cpu_load,%eax
14535 + movw %ax,__KERNEL_PERCPU + 2(%edi)
14536 + rorl $16,%eax
14537 + movb %al,__KERNEL_PERCPU + 4(%edi)
14538 + movb %ah,__KERNEL_PERCPU + 7(%edi)
14539 + movl $__per_cpu_end - 1,%eax
14540 + subl $__per_cpu_start,%eax
14541 + movw %ax,__KERNEL_PERCPU + 0(%edi)
14542 +#endif
14543 +
14544 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14545 + movl $NR_CPUS,%ecx
14546 + movl $pa(cpu_gdt_table),%edi
14547 +1:
14548 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14549 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14550 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14551 + addl $PAGE_SIZE_asm,%edi
14552 + loop 1b
14553 +#endif
14554 +
14555 +#ifdef CONFIG_PAX_KERNEXEC
14556 + movl $pa(boot_gdt),%edi
14557 + movl $__LOAD_PHYSICAL_ADDR,%eax
14558 + movw %ax,__BOOT_CS + 2(%edi)
14559 + rorl $16,%eax
14560 + movb %al,__BOOT_CS + 4(%edi)
14561 + movb %ah,__BOOT_CS + 7(%edi)
14562 + rorl $16,%eax
14563 +
14564 + ljmp $(__BOOT_CS),$1f
14565 +1:
14566 +
14567 + movl $NR_CPUS,%ecx
14568 + movl $pa(cpu_gdt_table),%edi
14569 + addl $__PAGE_OFFSET,%eax
14570 +1:
14571 + movw %ax,__KERNEL_CS + 2(%edi)
14572 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14573 + rorl $16,%eax
14574 + movb %al,__KERNEL_CS + 4(%edi)
14575 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14576 + movb %ah,__KERNEL_CS + 7(%edi)
14577 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14578 + rorl $16,%eax
14579 + addl $PAGE_SIZE_asm,%edi
14580 + loop 1b
14581 +#endif
14582 +
14583 /*
14584 * Clear BSS first so that there are no surprises...
14585 */
14586 @@ -195,8 +261,11 @@ ENTRY(startup_32)
14587 movl %eax, pa(max_pfn_mapped)
14588
14589 /* Do early initialization of the fixmap area */
14590 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14591 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
14592 +#ifdef CONFIG_COMPAT_VDSO
14593 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
14594 +#else
14595 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
14596 +#endif
14597 #else /* Not PAE */
14598
14599 page_pde_offset = (__PAGE_OFFSET >> 20);
14600 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14601 movl %eax, pa(max_pfn_mapped)
14602
14603 /* Do early initialization of the fixmap area */
14604 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14605 - movl %eax,pa(initial_page_table+0xffc)
14606 +#ifdef CONFIG_COMPAT_VDSO
14607 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
14608 +#else
14609 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
14610 +#endif
14611 #endif
14612
14613 #ifdef CONFIG_PARAVIRT
14614 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14615 cmpl $num_subarch_entries, %eax
14616 jae bad_subarch
14617
14618 - movl pa(subarch_entries)(,%eax,4), %eax
14619 - subl $__PAGE_OFFSET, %eax
14620 - jmp *%eax
14621 + jmp *pa(subarch_entries)(,%eax,4)
14622
14623 bad_subarch:
14624 WEAK(lguest_entry)
14625 @@ -255,10 +325,10 @@ WEAK(xen_entry)
14626 __INITDATA
14627
14628 subarch_entries:
14629 - .long default_entry /* normal x86/PC */
14630 - .long lguest_entry /* lguest hypervisor */
14631 - .long xen_entry /* Xen hypervisor */
14632 - .long default_entry /* Moorestown MID */
14633 + .long ta(default_entry) /* normal x86/PC */
14634 + .long ta(lguest_entry) /* lguest hypervisor */
14635 + .long ta(xen_entry) /* Xen hypervisor */
14636 + .long ta(default_entry) /* Moorestown MID */
14637 num_subarch_entries = (. - subarch_entries) / 4
14638 .previous
14639 #else
14640 @@ -312,6 +382,7 @@ default_entry:
14641 orl %edx,%eax
14642 movl %eax,%cr4
14643
14644 +#ifdef CONFIG_X86_PAE
14645 testb $X86_CR4_PAE, %al # check if PAE is enabled
14646 jz 6f
14647
14648 @@ -340,6 +411,9 @@ default_entry:
14649 /* Make changes effective */
14650 wrmsr
14651
14652 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14653 +#endif
14654 +
14655 6:
14656
14657 /*
14658 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
14659 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14660 movl %eax,%ss # after changing gdt.
14661
14662 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
14663 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14664 movl %eax,%ds
14665 movl %eax,%es
14666
14667 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
14668 */
14669 cmpb $0,ready
14670 jne 1f
14671 - movl $gdt_page,%eax
14672 + movl $cpu_gdt_table,%eax
14673 movl $stack_canary,%ecx
14674 +#ifdef CONFIG_SMP
14675 + addl $__per_cpu_load,%ecx
14676 +#endif
14677 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14678 shrl $16, %ecx
14679 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14680 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14681 1:
14682 -#endif
14683 movl $(__KERNEL_STACK_CANARY),%eax
14684 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14685 + movl $(__USER_DS),%eax
14686 +#else
14687 + xorl %eax,%eax
14688 +#endif
14689 movl %eax,%gs
14690
14691 xorl %eax,%eax # Clear LDT
14692 @@ -558,22 +639,22 @@ early_page_fault:
14693 jmp early_fault
14694
14695 early_fault:
14696 - cld
14697 #ifdef CONFIG_PRINTK
14698 + cmpl $1,%ss:early_recursion_flag
14699 + je hlt_loop
14700 + incl %ss:early_recursion_flag
14701 + cld
14702 pusha
14703 movl $(__KERNEL_DS),%eax
14704 movl %eax,%ds
14705 movl %eax,%es
14706 - cmpl $2,early_recursion_flag
14707 - je hlt_loop
14708 - incl early_recursion_flag
14709 movl %cr2,%eax
14710 pushl %eax
14711 pushl %edx /* trapno */
14712 pushl $fault_msg
14713 call printk
14714 +; call dump_stack
14715 #endif
14716 - call dump_stack
14717 hlt_loop:
14718 hlt
14719 jmp hlt_loop
14720 @@ -581,8 +662,11 @@ hlt_loop:
14721 /* This is the default interrupt "handler" :-) */
14722 ALIGN
14723 ignore_int:
14724 - cld
14725 #ifdef CONFIG_PRINTK
14726 + cmpl $2,%ss:early_recursion_flag
14727 + je hlt_loop
14728 + incl %ss:early_recursion_flag
14729 + cld
14730 pushl %eax
14731 pushl %ecx
14732 pushl %edx
14733 @@ -591,9 +675,6 @@ ignore_int:
14734 movl $(__KERNEL_DS),%eax
14735 movl %eax,%ds
14736 movl %eax,%es
14737 - cmpl $2,early_recursion_flag
14738 - je hlt_loop
14739 - incl early_recursion_flag
14740 pushl 16(%esp)
14741 pushl 24(%esp)
14742 pushl 32(%esp)
14743 @@ -622,29 +703,43 @@ ENTRY(initial_code)
14744 /*
14745 * BSS section
14746 */
14747 -__PAGE_ALIGNED_BSS
14748 - .align PAGE_SIZE
14749 #ifdef CONFIG_X86_PAE
14750 +.section .initial_pg_pmd,"a",@progbits
14751 initial_pg_pmd:
14752 .fill 1024*KPMDS,4,0
14753 #else
14754 +.section .initial_page_table,"a",@progbits
14755 ENTRY(initial_page_table)
14756 .fill 1024,4,0
14757 #endif
14758 +.section .initial_pg_fixmap,"a",@progbits
14759 initial_pg_fixmap:
14760 .fill 1024,4,0
14761 +.section .empty_zero_page,"a",@progbits
14762 ENTRY(empty_zero_page)
14763 .fill 4096,1,0
14764 +.section .swapper_pg_dir,"a",@progbits
14765 ENTRY(swapper_pg_dir)
14766 +#ifdef CONFIG_X86_PAE
14767 + .fill 4,8,0
14768 +#else
14769 .fill 1024,4,0
14770 +#endif
14771 +
14772 +/*
14773 + * The IDT has to be page-aligned to simplify the Pentium
14774 + * F0 0F bug workaround.. We have a special link segment
14775 + * for this.
14776 + */
14777 +.section .idt,"a",@progbits
14778 +ENTRY(idt_table)
14779 + .fill 256,8,0
14780
14781 /*
14782 * This starts the data section.
14783 */
14784 #ifdef CONFIG_X86_PAE
14785 -__PAGE_ALIGNED_DATA
14786 - /* Page-aligned for the benefit of paravirt? */
14787 - .align PAGE_SIZE
14788 +.section .initial_page_table,"a",@progbits
14789 ENTRY(initial_page_table)
14790 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14791 # if KPMDS == 3
14792 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
14793 # error "Kernel PMDs should be 1, 2 or 3"
14794 # endif
14795 .align PAGE_SIZE /* needs to be page-sized too */
14796 +
14797 +#ifdef CONFIG_PAX_PER_CPU_PGD
14798 +ENTRY(cpu_pgd)
14799 + .rept NR_CPUS
14800 + .fill 4,8,0
14801 + .endr
14802 +#endif
14803 +
14804 #endif
14805
14806 .data
14807 .balign 4
14808 ENTRY(stack_start)
14809 - .long init_thread_union+THREAD_SIZE
14810 + .long init_thread_union+THREAD_SIZE-8
14811
14812 +ready: .byte 0
14813 +
14814 +.section .rodata,"a",@progbits
14815 early_recursion_flag:
14816 .long 0
14817
14818 -ready: .byte 0
14819 -
14820 int_msg:
14821 .asciz "Unknown interrupt or fault at: %p %p %p\n"
14822
14823 @@ -707,7 +811,7 @@ fault_msg:
14824 .word 0 # 32 bit align gdt_desc.address
14825 boot_gdt_descr:
14826 .word __BOOT_DS+7
14827 - .long boot_gdt - __PAGE_OFFSET
14828 + .long pa(boot_gdt)
14829
14830 .word 0 # 32-bit align idt_desc.address
14831 idt_descr:
14832 @@ -718,7 +822,7 @@ idt_descr:
14833 .word 0 # 32 bit align gdt_desc.address
14834 ENTRY(early_gdt_descr)
14835 .word GDT_ENTRIES*8-1
14836 - .long gdt_page /* Overwritten for secondary CPUs */
14837 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
14838
14839 /*
14840 * The boot_gdt must mirror the equivalent in setup.S and is
14841 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
14842 .align L1_CACHE_BYTES
14843 ENTRY(boot_gdt)
14844 .fill GDT_ENTRY_BOOT_CS,8,0
14845 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14846 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14847 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14848 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14849 +
14850 + .align PAGE_SIZE_asm
14851 +ENTRY(cpu_gdt_table)
14852 + .rept NR_CPUS
14853 + .quad 0x0000000000000000 /* NULL descriptor */
14854 + .quad 0x0000000000000000 /* 0x0b reserved */
14855 + .quad 0x0000000000000000 /* 0x13 reserved */
14856 + .quad 0x0000000000000000 /* 0x1b reserved */
14857 +
14858 +#ifdef CONFIG_PAX_KERNEXEC
14859 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14860 +#else
14861 + .quad 0x0000000000000000 /* 0x20 unused */
14862 +#endif
14863 +
14864 + .quad 0x0000000000000000 /* 0x28 unused */
14865 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14866 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14867 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14868 + .quad 0x0000000000000000 /* 0x4b reserved */
14869 + .quad 0x0000000000000000 /* 0x53 reserved */
14870 + .quad 0x0000000000000000 /* 0x5b reserved */
14871 +
14872 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14873 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14874 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14875 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14876 +
14877 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14878 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14879 +
14880 + /*
14881 + * Segments used for calling PnP BIOS have byte granularity.
14882 + * The code segments and data segments have fixed 64k limits,
14883 + * the transfer segment sizes are set at run time.
14884 + */
14885 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
14886 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
14887 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
14888 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
14889 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
14890 +
14891 + /*
14892 + * The APM segments have byte granularity and their bases
14893 + * are set at run time. All have 64k limits.
14894 + */
14895 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14896 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14897 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
14898 +
14899 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14900 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14901 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14902 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14903 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14904 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14905 +
14906 + /* Be sure this is zeroed to avoid false validations in Xen */
14907 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14908 + .endr
14909 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
14910 index e11e394..9aebc5d 100644
14911 --- a/arch/x86/kernel/head_64.S
14912 +++ b/arch/x86/kernel/head_64.S
14913 @@ -19,6 +19,8 @@
14914 #include <asm/cache.h>
14915 #include <asm/processor-flags.h>
14916 #include <asm/percpu.h>
14917 +#include <asm/cpufeature.h>
14918 +#include <asm/alternative-asm.h>
14919
14920 #ifdef CONFIG_PARAVIRT
14921 #include <asm/asm-offsets.h>
14922 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
14923 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14924 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14925 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14926 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
14927 +L3_VMALLOC_START = pud_index(VMALLOC_START)
14928 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
14929 +L3_VMALLOC_END = pud_index(VMALLOC_END)
14930 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14931 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14932
14933 .text
14934 __HEAD
14935 @@ -85,35 +93,23 @@ startup_64:
14936 */
14937 addq %rbp, init_level4_pgt + 0(%rip)
14938 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14939 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14940 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
14941 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14942 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14943
14944 addq %rbp, level3_ident_pgt + 0(%rip)
14945 +#ifndef CONFIG_XEN
14946 + addq %rbp, level3_ident_pgt + 8(%rip)
14947 +#endif
14948
14949 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14950 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14951 -
14952 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14953 -
14954 - /* Add an Identity mapping if I am above 1G */
14955 - leaq _text(%rip), %rdi
14956 - andq $PMD_PAGE_MASK, %rdi
14957 -
14958 - movq %rdi, %rax
14959 - shrq $PUD_SHIFT, %rax
14960 - andq $(PTRS_PER_PUD - 1), %rax
14961 - jz ident_complete
14962 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14963
14964 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14965 - leaq level3_ident_pgt(%rip), %rbx
14966 - movq %rdx, 0(%rbx, %rax, 8)
14967 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14968 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14969
14970 - movq %rdi, %rax
14971 - shrq $PMD_SHIFT, %rax
14972 - andq $(PTRS_PER_PMD - 1), %rax
14973 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14974 - leaq level2_spare_pgt(%rip), %rbx
14975 - movq %rdx, 0(%rbx, %rax, 8)
14976 -ident_complete:
14977 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14978 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14979
14980 /*
14981 * Fixup the kernel text+data virtual addresses. Note that
14982 @@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
14983 * after the boot processor executes this code.
14984 */
14985
14986 - /* Enable PAE mode and PGE */
14987 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
14988 + /* Enable PAE mode and PSE/PGE */
14989 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
14990 movq %rax, %cr4
14991
14992 /* Setup early boot stage 4 level pagetables. */
14993 @@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
14994 movl $MSR_EFER, %ecx
14995 rdmsr
14996 btsl $_EFER_SCE, %eax /* Enable System Call */
14997 - btl $20,%edi /* No Execute supported? */
14998 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
14999 jnc 1f
15000 btsl $_EFER_NX, %eax
15001 + leaq init_level4_pgt(%rip), %rdi
15002 +#ifndef CONFIG_EFI
15003 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
15004 +#endif
15005 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
15006 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
15007 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
15008 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
15009 1: wrmsr /* Make changes effective */
15010
15011 /* Setup cr0 */
15012 @@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
15013 * jump. In addition we need to ensure %cs is set so we make this
15014 * a far return.
15015 */
15016 + pax_set_fptr_mask
15017 movq initial_code(%rip),%rax
15018 pushq $0 # fake return address to stop unwinder
15019 pushq $__KERNEL_CS # set correct cs
15020 @@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
15021 bad_address:
15022 jmp bad_address
15023
15024 - .section ".init.text","ax"
15025 + __INIT
15026 #ifdef CONFIG_EARLY_PRINTK
15027 .globl early_idt_handlers
15028 early_idt_handlers:
15029 @@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
15030 #endif /* EARLY_PRINTK */
15031 1: hlt
15032 jmp 1b
15033 + .previous
15034
15035 #ifdef CONFIG_EARLY_PRINTK
15036 + __INITDATA
15037 early_recursion_flag:
15038 .long 0
15039 + .previous
15040
15041 + .section .rodata,"a",@progbits
15042 early_idt_msg:
15043 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
15044 early_idt_ripmsg:
15045 .asciz "RIP %s\n"
15046 -#endif /* CONFIG_EARLY_PRINTK */
15047 .previous
15048 +#endif /* CONFIG_EARLY_PRINTK */
15049
15050 + .section .rodata,"a",@progbits
15051 #define NEXT_PAGE(name) \
15052 .balign PAGE_SIZE; \
15053 ENTRY(name)
15054 @@ -338,7 +348,6 @@ ENTRY(name)
15055 i = i + 1 ; \
15056 .endr
15057
15058 - .data
15059 /*
15060 * This default setting generates an ident mapping at address 0x100000
15061 * and a mapping for the kernel that precisely maps virtual address
15062 @@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
15063 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15064 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15065 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15066 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
15067 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
15068 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
15069 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
15070 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15071 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15072 .org init_level4_pgt + L4_START_KERNEL*8, 0
15073 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15074 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15075
15076 +#ifdef CONFIG_PAX_PER_CPU_PGD
15077 +NEXT_PAGE(cpu_pgd)
15078 + .rept NR_CPUS
15079 + .fill 512,8,0
15080 + .endr
15081 +#endif
15082 +
15083 NEXT_PAGE(level3_ident_pgt)
15084 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15085 +#ifdef CONFIG_XEN
15086 .fill 511,8,0
15087 +#else
15088 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15089 + .fill 510,8,0
15090 +#endif
15091 +
15092 +NEXT_PAGE(level3_vmalloc_start_pgt)
15093 + .fill 512,8,0
15094 +
15095 +NEXT_PAGE(level3_vmalloc_end_pgt)
15096 + .fill 512,8,0
15097 +
15098 +NEXT_PAGE(level3_vmemmap_pgt)
15099 + .fill L3_VMEMMAP_START,8,0
15100 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15101
15102 NEXT_PAGE(level3_kernel_pgt)
15103 .fill L3_START_KERNEL,8,0
15104 @@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
15105 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15106 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15107
15108 +NEXT_PAGE(level2_vmemmap_pgt)
15109 + .fill 512,8,0
15110 +
15111 NEXT_PAGE(level2_fixmap_pgt)
15112 - .fill 506,8,0
15113 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15114 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15115 - .fill 5,8,0
15116 + .fill 507,8,0
15117 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15118 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15119 + .fill 4,8,0
15120
15121 -NEXT_PAGE(level1_fixmap_pgt)
15122 +NEXT_PAGE(level1_vsyscall_pgt)
15123 .fill 512,8,0
15124
15125 -NEXT_PAGE(level2_ident_pgt)
15126 - /* Since I easily can, map the first 1G.
15127 + /* Since I easily can, map the first 2G.
15128 * Don't set NX because code runs from these pages.
15129 */
15130 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15131 +NEXT_PAGE(level2_ident_pgt)
15132 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15133
15134 NEXT_PAGE(level2_kernel_pgt)
15135 /*
15136 @@ -389,33 +429,55 @@ NEXT_PAGE(level2_kernel_pgt)
15137 * If you want to increase this then increase MODULES_VADDR
15138 * too.)
15139 */
15140 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15141 - KERNEL_IMAGE_SIZE/PMD_SIZE)
15142 -
15143 -NEXT_PAGE(level2_spare_pgt)
15144 - .fill 512, 8, 0
15145 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15146
15147 #undef PMDS
15148 #undef NEXT_PAGE
15149
15150 - .data
15151 + .align PAGE_SIZE
15152 +ENTRY(cpu_gdt_table)
15153 + .rept NR_CPUS
15154 + .quad 0x0000000000000000 /* NULL descriptor */
15155 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15156 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
15157 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
15158 + .quad 0x00cffb000000ffff /* __USER32_CS */
15159 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15160 + .quad 0x00affb000000ffff /* __USER_CS */
15161 +
15162 +#ifdef CONFIG_PAX_KERNEXEC
15163 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15164 +#else
15165 + .quad 0x0 /* unused */
15166 +#endif
15167 +
15168 + .quad 0,0 /* TSS */
15169 + .quad 0,0 /* LDT */
15170 + .quad 0,0,0 /* three TLS descriptors */
15171 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
15172 + /* asm/segment.h:GDT_ENTRIES must match this */
15173 +
15174 + /* zero the remaining page */
15175 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15176 + .endr
15177 +
15178 .align 16
15179 .globl early_gdt_descr
15180 early_gdt_descr:
15181 .word GDT_ENTRIES*8-1
15182 early_gdt_descr_base:
15183 - .quad INIT_PER_CPU_VAR(gdt_page)
15184 + .quad cpu_gdt_table
15185
15186 ENTRY(phys_base)
15187 /* This must match the first entry in level2_kernel_pgt */
15188 .quad 0x0000000000000000
15189
15190 #include "../../x86/xen/xen-head.S"
15191 -
15192 - .section .bss, "aw", @nobits
15193 +
15194 + .section .rodata,"a",@progbits
15195 .align L1_CACHE_BYTES
15196 ENTRY(idt_table)
15197 - .skip IDT_ENTRIES * 16
15198 + .fill 512,8,0
15199
15200 __PAGE_ALIGNED_BSS
15201 .align PAGE_SIZE
15202 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
15203 index 9c3bd4a..e1d9b35 100644
15204 --- a/arch/x86/kernel/i386_ksyms_32.c
15205 +++ b/arch/x86/kernel/i386_ksyms_32.c
15206 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15207 EXPORT_SYMBOL(cmpxchg8b_emu);
15208 #endif
15209
15210 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
15211 +
15212 /* Networking helper routines. */
15213 EXPORT_SYMBOL(csum_partial_copy_generic);
15214 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15215 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15216
15217 EXPORT_SYMBOL(__get_user_1);
15218 EXPORT_SYMBOL(__get_user_2);
15219 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15220
15221 EXPORT_SYMBOL(csum_partial);
15222 EXPORT_SYMBOL(empty_zero_page);
15223 +
15224 +#ifdef CONFIG_PAX_KERNEXEC
15225 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15226 +#endif
15227 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
15228 index 6104852..6114160 100644
15229 --- a/arch/x86/kernel/i8259.c
15230 +++ b/arch/x86/kernel/i8259.c
15231 @@ -210,7 +210,7 @@ spurious_8259A_irq:
15232 "spurious 8259A interrupt: IRQ%d.\n", irq);
15233 spurious_irq_mask |= irqmask;
15234 }
15235 - atomic_inc(&irq_err_count);
15236 + atomic_inc_unchecked(&irq_err_count);
15237 /*
15238 * Theoretically we do not have to handle this IRQ,
15239 * but in Linux this does not cause problems and is
15240 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
15241 index 43e9ccf..44ccf6f 100644
15242 --- a/arch/x86/kernel/init_task.c
15243 +++ b/arch/x86/kernel/init_task.c
15244 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15245 * way process stacks are handled. This is done by having a special
15246 * "init_task" linker map entry..
15247 */
15248 -union thread_union init_thread_union __init_task_data =
15249 - { INIT_THREAD_INFO(init_task) };
15250 +union thread_union init_thread_union __init_task_data;
15251
15252 /*
15253 * Initial task structure.
15254 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15255 * section. Since TSS's are completely CPU-local, we want them
15256 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15257 */
15258 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15259 -
15260 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15261 +EXPORT_SYMBOL(init_tss);
15262 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
15263 index 8c96897..be66bfa 100644
15264 --- a/arch/x86/kernel/ioport.c
15265 +++ b/arch/x86/kernel/ioport.c
15266 @@ -6,6 +6,7 @@
15267 #include <linux/sched.h>
15268 #include <linux/kernel.h>
15269 #include <linux/capability.h>
15270 +#include <linux/security.h>
15271 #include <linux/errno.h>
15272 #include <linux/types.h>
15273 #include <linux/ioport.h>
15274 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15275
15276 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15277 return -EINVAL;
15278 +#ifdef CONFIG_GRKERNSEC_IO
15279 + if (turn_on && grsec_disable_privio) {
15280 + gr_handle_ioperm();
15281 + return -EPERM;
15282 + }
15283 +#endif
15284 if (turn_on && !capable(CAP_SYS_RAWIO))
15285 return -EPERM;
15286
15287 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15288 * because the ->io_bitmap_max value must match the bitmap
15289 * contents:
15290 */
15291 - tss = &per_cpu(init_tss, get_cpu());
15292 + tss = init_tss + get_cpu();
15293
15294 if (turn_on)
15295 bitmap_clear(t->io_bitmap_ptr, from, num);
15296 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
15297 return -EINVAL;
15298 /* Trying to gain more privileges? */
15299 if (level > old) {
15300 +#ifdef CONFIG_GRKERNSEC_IO
15301 + if (grsec_disable_privio) {
15302 + gr_handle_iopl();
15303 + return -EPERM;
15304 + }
15305 +#endif
15306 if (!capable(CAP_SYS_RAWIO))
15307 return -EPERM;
15308 }
15309 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
15310 index 6c0802e..bea25ae 100644
15311 --- a/arch/x86/kernel/irq.c
15312 +++ b/arch/x86/kernel/irq.c
15313 @@ -17,7 +17,7 @@
15314 #include <asm/mce.h>
15315 #include <asm/hw_irq.h>
15316
15317 -atomic_t irq_err_count;
15318 +atomic_unchecked_t irq_err_count;
15319
15320 /* Function pointer for generic interrupt vector handling */
15321 void (*x86_platform_ipi_callback)(void) = NULL;
15322 @@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
15323 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15324 seq_printf(p, " Machine check polls\n");
15325 #endif
15326 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15327 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15328 #if defined(CONFIG_X86_IO_APIC)
15329 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15330 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15331 #endif
15332 return 0;
15333 }
15334 @@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15335
15336 u64 arch_irq_stat(void)
15337 {
15338 - u64 sum = atomic_read(&irq_err_count);
15339 + u64 sum = atomic_read_unchecked(&irq_err_count);
15340
15341 #ifdef CONFIG_X86_IO_APIC
15342 - sum += atomic_read(&irq_mis_count);
15343 + sum += atomic_read_unchecked(&irq_mis_count);
15344 #endif
15345 return sum;
15346 }
15347 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
15348 index 7209070..cbcd71a 100644
15349 --- a/arch/x86/kernel/irq_32.c
15350 +++ b/arch/x86/kernel/irq_32.c
15351 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
15352 __asm__ __volatile__("andl %%esp,%0" :
15353 "=r" (sp) : "0" (THREAD_SIZE - 1));
15354
15355 - return sp < (sizeof(struct thread_info) + STACK_WARN);
15356 + return sp < STACK_WARN;
15357 }
15358
15359 static void print_stack_overflow(void)
15360 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
15361 * per-CPU IRQ handling contexts (thread information and stack)
15362 */
15363 union irq_ctx {
15364 - struct thread_info tinfo;
15365 - u32 stack[THREAD_SIZE/sizeof(u32)];
15366 + unsigned long previous_esp;
15367 + u32 stack[THREAD_SIZE/sizeof(u32)];
15368 } __attribute__((aligned(THREAD_SIZE)));
15369
15370 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15371 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
15372 static inline int
15373 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15374 {
15375 - union irq_ctx *curctx, *irqctx;
15376 + union irq_ctx *irqctx;
15377 u32 *isp, arg1, arg2;
15378
15379 - curctx = (union irq_ctx *) current_thread_info();
15380 irqctx = __this_cpu_read(hardirq_ctx);
15381
15382 /*
15383 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15384 * handler) we can't do that and just have to keep using the
15385 * current stack (which is the irq stack already after all)
15386 */
15387 - if (unlikely(curctx == irqctx))
15388 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15389 return 0;
15390
15391 /* build the stack frame on the IRQ stack */
15392 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15393 - irqctx->tinfo.task = curctx->tinfo.task;
15394 - irqctx->tinfo.previous_esp = current_stack_pointer;
15395 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15396 + irqctx->previous_esp = current_stack_pointer;
15397
15398 - /*
15399 - * Copy the softirq bits in preempt_count so that the
15400 - * softirq checks work in the hardirq context.
15401 - */
15402 - irqctx->tinfo.preempt_count =
15403 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15404 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15405 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15406 + __set_fs(MAKE_MM_SEG(0));
15407 +#endif
15408
15409 if (unlikely(overflow))
15410 call_on_stack(print_stack_overflow, isp);
15411 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15412 : "0" (irq), "1" (desc), "2" (isp),
15413 "D" (desc->handle_irq)
15414 : "memory", "cc", "ecx");
15415 +
15416 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15417 + __set_fs(current_thread_info()->addr_limit);
15418 +#endif
15419 +
15420 return 1;
15421 }
15422
15423 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15424 */
15425 void __cpuinit irq_ctx_init(int cpu)
15426 {
15427 - union irq_ctx *irqctx;
15428 -
15429 if (per_cpu(hardirq_ctx, cpu))
15430 return;
15431
15432 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15433 - THREAD_FLAGS,
15434 - THREAD_ORDER));
15435 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15436 - irqctx->tinfo.cpu = cpu;
15437 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15438 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15439 -
15440 - per_cpu(hardirq_ctx, cpu) = irqctx;
15441 -
15442 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15443 - THREAD_FLAGS,
15444 - THREAD_ORDER));
15445 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15446 - irqctx->tinfo.cpu = cpu;
15447 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15448 -
15449 - per_cpu(softirq_ctx, cpu) = irqctx;
15450 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15451 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15452
15453 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15454 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15455 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
15456 asmlinkage void do_softirq(void)
15457 {
15458 unsigned long flags;
15459 - struct thread_info *curctx;
15460 union irq_ctx *irqctx;
15461 u32 *isp;
15462
15463 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
15464 local_irq_save(flags);
15465
15466 if (local_softirq_pending()) {
15467 - curctx = current_thread_info();
15468 irqctx = __this_cpu_read(softirq_ctx);
15469 - irqctx->tinfo.task = curctx->task;
15470 - irqctx->tinfo.previous_esp = current_stack_pointer;
15471 + irqctx->previous_esp = current_stack_pointer;
15472
15473 /* build the stack frame on the softirq stack */
15474 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15475 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15476 +
15477 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15478 + __set_fs(MAKE_MM_SEG(0));
15479 +#endif
15480
15481 call_on_stack(__do_softirq, isp);
15482 +
15483 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15484 + __set_fs(current_thread_info()->addr_limit);
15485 +#endif
15486 +
15487 /*
15488 * Shouldn't happen, we returned above if in_interrupt():
15489 */
15490 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
15491 index 00354d4..187ae44 100644
15492 --- a/arch/x86/kernel/kgdb.c
15493 +++ b/arch/x86/kernel/kgdb.c
15494 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
15495 #ifdef CONFIG_X86_32
15496 switch (regno) {
15497 case GDB_SS:
15498 - if (!user_mode_vm(regs))
15499 + if (!user_mode(regs))
15500 *(unsigned long *)mem = __KERNEL_DS;
15501 break;
15502 case GDB_SP:
15503 - if (!user_mode_vm(regs))
15504 + if (!user_mode(regs))
15505 *(unsigned long *)mem = kernel_stack_pointer(regs);
15506 break;
15507 case GDB_GS:
15508 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
15509 case 'k':
15510 /* clear the trace bit */
15511 linux_regs->flags &= ~X86_EFLAGS_TF;
15512 - atomic_set(&kgdb_cpu_doing_single_step, -1);
15513 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15514
15515 /* set the trace bit if we're stepping */
15516 if (remcomInBuffer[0] == 's') {
15517 linux_regs->flags |= X86_EFLAGS_TF;
15518 - atomic_set(&kgdb_cpu_doing_single_step,
15519 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15520 raw_smp_processor_id());
15521 }
15522
15523 @@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
15524 return NOTIFY_DONE;
15525
15526 case DIE_DEBUG:
15527 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
15528 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
15529 if (user_mode(regs))
15530 return single_step_cont(regs, args);
15531 break;
15532 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
15533 index 794bc95..c6e29e9 100644
15534 --- a/arch/x86/kernel/kprobes.c
15535 +++ b/arch/x86/kernel/kprobes.c
15536 @@ -117,8 +117,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
15537 } __attribute__((packed)) *insn;
15538
15539 insn = (struct __arch_relative_insn *)from;
15540 +
15541 + pax_open_kernel();
15542 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
15543 insn->op = op;
15544 + pax_close_kernel();
15545 }
15546
15547 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
15548 @@ -155,7 +158,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
15549 kprobe_opcode_t opcode;
15550 kprobe_opcode_t *orig_opcodes = opcodes;
15551
15552 - if (search_exception_tables((unsigned long)opcodes))
15553 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15554 return 0; /* Page fault may occur on this address. */
15555
15556 retry:
15557 @@ -316,7 +319,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15558 }
15559 }
15560 insn_get_length(&insn);
15561 + pax_open_kernel();
15562 memcpy(dest, insn.kaddr, insn.length);
15563 + pax_close_kernel();
15564
15565 #ifdef CONFIG_X86_64
15566 if (insn_rip_relative(&insn)) {
15567 @@ -340,7 +345,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15568 (u8 *) dest;
15569 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
15570 disp = (u8 *) dest + insn_offset_displacement(&insn);
15571 + pax_open_kernel();
15572 *(s32 *) disp = (s32) newdisp;
15573 + pax_close_kernel();
15574 }
15575 #endif
15576 return insn.length;
15577 @@ -354,12 +361,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
15578 */
15579 __copy_instruction(p->ainsn.insn, p->addr, 0);
15580
15581 - if (can_boost(p->addr))
15582 + if (can_boost(ktla_ktva(p->addr)))
15583 p->ainsn.boostable = 0;
15584 else
15585 p->ainsn.boostable = -1;
15586
15587 - p->opcode = *p->addr;
15588 + p->opcode = *(ktla_ktva(p->addr));
15589 }
15590
15591 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15592 @@ -476,7 +483,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15593 * nor set current_kprobe, because it doesn't use single
15594 * stepping.
15595 */
15596 - regs->ip = (unsigned long)p->ainsn.insn;
15597 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15598 preempt_enable_no_resched();
15599 return;
15600 }
15601 @@ -495,7 +502,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15602 if (p->opcode == BREAKPOINT_INSTRUCTION)
15603 regs->ip = (unsigned long)p->addr;
15604 else
15605 - regs->ip = (unsigned long)p->ainsn.insn;
15606 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15607 }
15608
15609 /*
15610 @@ -574,7 +581,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
15611 setup_singlestep(p, regs, kcb, 0);
15612 return 1;
15613 }
15614 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
15615 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15616 /*
15617 * The breakpoint instruction was removed right
15618 * after we hit it. Another cpu has removed
15619 @@ -682,6 +689,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
15620 " movq %rax, 152(%rsp)\n"
15621 RESTORE_REGS_STRING
15622 " popfq\n"
15623 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
15624 + " btsq $63,(%rsp)\n"
15625 +#endif
15626 #else
15627 " pushf\n"
15628 SAVE_REGS_STRING
15629 @@ -819,7 +829,7 @@ static void __kprobes resume_execution(struct kprobe *p,
15630 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15631 {
15632 unsigned long *tos = stack_addr(regs);
15633 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15634 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15635 unsigned long orig_ip = (unsigned long)p->addr;
15636 kprobe_opcode_t *insn = p->ainsn.insn;
15637
15638 @@ -1001,7 +1011,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
15639 struct die_args *args = data;
15640 int ret = NOTIFY_DONE;
15641
15642 - if (args->regs && user_mode_vm(args->regs))
15643 + if (args->regs && user_mode(args->regs))
15644 return ret;
15645
15646 switch (val) {
15647 @@ -1383,7 +1393,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15648 * Verify if the address gap is in 2GB range, because this uses
15649 * a relative jump.
15650 */
15651 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
15652 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
15653 if (abs(rel) > 0x7fffffff)
15654 return -ERANGE;
15655
15656 @@ -1404,11 +1414,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15657 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
15658
15659 /* Set probe function call */
15660 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
15661 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
15662
15663 /* Set returning jmp instruction at the tail of out-of-line buffer */
15664 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
15665 - (u8 *)op->kp.addr + op->optinsn.size);
15666 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
15667
15668 flush_icache_range((unsigned long) buf,
15669 (unsigned long) buf + TMPL_END_IDX +
15670 @@ -1430,7 +1440,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
15671 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
15672
15673 /* Backup instructions which will be replaced by jump address */
15674 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
15675 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
15676 RELATIVE_ADDR_SIZE);
15677
15678 insn_buf[0] = RELATIVEJUMP_OPCODE;
15679 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
15680 index a9c2116..a52d4fc 100644
15681 --- a/arch/x86/kernel/kvm.c
15682 +++ b/arch/x86/kernel/kvm.c
15683 @@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
15684 pv_mmu_ops.set_pud = kvm_set_pud;
15685 #if PAGETABLE_LEVELS == 4
15686 pv_mmu_ops.set_pgd = kvm_set_pgd;
15687 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
15688 #endif
15689 #endif
15690 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
15691 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
15692 index ea69726..604d066 100644
15693 --- a/arch/x86/kernel/ldt.c
15694 +++ b/arch/x86/kernel/ldt.c
15695 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
15696 if (reload) {
15697 #ifdef CONFIG_SMP
15698 preempt_disable();
15699 - load_LDT(pc);
15700 + load_LDT_nolock(pc);
15701 if (!cpumask_equal(mm_cpumask(current->mm),
15702 cpumask_of(smp_processor_id())))
15703 smp_call_function(flush_ldt, current->mm, 1);
15704 preempt_enable();
15705 #else
15706 - load_LDT(pc);
15707 + load_LDT_nolock(pc);
15708 #endif
15709 }
15710 if (oldsize) {
15711 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
15712 return err;
15713
15714 for (i = 0; i < old->size; i++)
15715 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15716 + write_ldt_entry(new->ldt, i, old->ldt + i);
15717 return 0;
15718 }
15719
15720 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
15721 retval = copy_ldt(&mm->context, &old_mm->context);
15722 mutex_unlock(&old_mm->context.lock);
15723 }
15724 +
15725 + if (tsk == current) {
15726 + mm->context.vdso = 0;
15727 +
15728 +#ifdef CONFIG_X86_32
15729 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15730 + mm->context.user_cs_base = 0UL;
15731 + mm->context.user_cs_limit = ~0UL;
15732 +
15733 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15734 + cpus_clear(mm->context.cpu_user_cs_mask);
15735 +#endif
15736 +
15737 +#endif
15738 +#endif
15739 +
15740 + }
15741 +
15742 return retval;
15743 }
15744
15745 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
15746 }
15747 }
15748
15749 +#ifdef CONFIG_PAX_SEGMEXEC
15750 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15751 + error = -EINVAL;
15752 + goto out_unlock;
15753 + }
15754 +#endif
15755 +
15756 fill_ldt(&ldt, &ldt_info);
15757 if (oldmode)
15758 ldt.avl = 0;
15759 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
15760 index a3fa43b..8966f4c 100644
15761 --- a/arch/x86/kernel/machine_kexec_32.c
15762 +++ b/arch/x86/kernel/machine_kexec_32.c
15763 @@ -27,7 +27,7 @@
15764 #include <asm/cacheflush.h>
15765 #include <asm/debugreg.h>
15766
15767 -static void set_idt(void *newidt, __u16 limit)
15768 +static void set_idt(struct desc_struct *newidt, __u16 limit)
15769 {
15770 struct desc_ptr curidt;
15771
15772 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
15773 }
15774
15775
15776 -static void set_gdt(void *newgdt, __u16 limit)
15777 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15778 {
15779 struct desc_ptr curgdt;
15780
15781 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15782 }
15783
15784 control_page = page_address(image->control_code_page);
15785 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15786 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15787
15788 relocate_kernel_ptr = control_page;
15789 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15790 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
15791 index 1a1b606..5c89b55 100644
15792 --- a/arch/x86/kernel/microcode_intel.c
15793 +++ b/arch/x86/kernel/microcode_intel.c
15794 @@ -440,13 +440,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
15795
15796 static int get_ucode_user(void *to, const void *from, size_t n)
15797 {
15798 - return copy_from_user(to, from, n);
15799 + return copy_from_user(to, (const void __force_user *)from, n);
15800 }
15801
15802 static enum ucode_state
15803 request_microcode_user(int cpu, const void __user *buf, size_t size)
15804 {
15805 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15806 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
15807 }
15808
15809 static void microcode_fini_cpu(int cpu)
15810 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
15811 index 925179f..85bec6c 100644
15812 --- a/arch/x86/kernel/module.c
15813 +++ b/arch/x86/kernel/module.c
15814 @@ -36,15 +36,60 @@
15815 #define DEBUGP(fmt...)
15816 #endif
15817
15818 -void *module_alloc(unsigned long size)
15819 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
15820 {
15821 if (PAGE_ALIGN(size) > MODULES_LEN)
15822 return NULL;
15823 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
15824 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
15825 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
15826 -1, __builtin_return_address(0));
15827 }
15828
15829 +void *module_alloc(unsigned long size)
15830 +{
15831 +
15832 +#ifdef CONFIG_PAX_KERNEXEC
15833 + return __module_alloc(size, PAGE_KERNEL);
15834 +#else
15835 + return __module_alloc(size, PAGE_KERNEL_EXEC);
15836 +#endif
15837 +
15838 +}
15839 +
15840 +#ifdef CONFIG_PAX_KERNEXEC
15841 +#ifdef CONFIG_X86_32
15842 +void *module_alloc_exec(unsigned long size)
15843 +{
15844 + struct vm_struct *area;
15845 +
15846 + if (size == 0)
15847 + return NULL;
15848 +
15849 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15850 + return area ? area->addr : NULL;
15851 +}
15852 +EXPORT_SYMBOL(module_alloc_exec);
15853 +
15854 +void module_free_exec(struct module *mod, void *module_region)
15855 +{
15856 + vunmap(module_region);
15857 +}
15858 +EXPORT_SYMBOL(module_free_exec);
15859 +#else
15860 +void module_free_exec(struct module *mod, void *module_region)
15861 +{
15862 + module_free(mod, module_region);
15863 +}
15864 +EXPORT_SYMBOL(module_free_exec);
15865 +
15866 +void *module_alloc_exec(unsigned long size)
15867 +{
15868 + return __module_alloc(size, PAGE_KERNEL_RX);
15869 +}
15870 +EXPORT_SYMBOL(module_alloc_exec);
15871 +#endif
15872 +#endif
15873 +
15874 #ifdef CONFIG_X86_32
15875 int apply_relocate(Elf32_Shdr *sechdrs,
15876 const char *strtab,
15877 @@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15878 unsigned int i;
15879 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15880 Elf32_Sym *sym;
15881 - uint32_t *location;
15882 + uint32_t *plocation, location;
15883
15884 DEBUGP("Applying relocate section %u to %u\n", relsec,
15885 sechdrs[relsec].sh_info);
15886 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15887 /* This is where to make the change */
15888 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15889 - + rel[i].r_offset;
15890 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15891 + location = (uint32_t)plocation;
15892 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15893 + plocation = ktla_ktva((void *)plocation);
15894 /* This is the symbol it is referring to. Note that all
15895 undefined symbols have been resolved. */
15896 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15897 @@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15898 switch (ELF32_R_TYPE(rel[i].r_info)) {
15899 case R_386_32:
15900 /* We add the value into the location given */
15901 - *location += sym->st_value;
15902 + pax_open_kernel();
15903 + *plocation += sym->st_value;
15904 + pax_close_kernel();
15905 break;
15906 case R_386_PC32:
15907 /* Add the value, subtract its postition */
15908 - *location += sym->st_value - (uint32_t)location;
15909 + pax_open_kernel();
15910 + *plocation += sym->st_value - location;
15911 + pax_close_kernel();
15912 break;
15913 default:
15914 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15915 @@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
15916 case R_X86_64_NONE:
15917 break;
15918 case R_X86_64_64:
15919 + pax_open_kernel();
15920 *(u64 *)loc = val;
15921 + pax_close_kernel();
15922 break;
15923 case R_X86_64_32:
15924 + pax_open_kernel();
15925 *(u32 *)loc = val;
15926 + pax_close_kernel();
15927 if (val != *(u32 *)loc)
15928 goto overflow;
15929 break;
15930 case R_X86_64_32S:
15931 + pax_open_kernel();
15932 *(s32 *)loc = val;
15933 + pax_close_kernel();
15934 if ((s64)val != *(s32 *)loc)
15935 goto overflow;
15936 break;
15937 case R_X86_64_PC32:
15938 val -= (u64)loc;
15939 + pax_open_kernel();
15940 *(u32 *)loc = val;
15941 + pax_close_kernel();
15942 +
15943 #if 0
15944 if ((s64)val != *(s32 *)loc)
15945 goto overflow;
15946 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
15947 index 676b8c7..870ba04 100644
15948 --- a/arch/x86/kernel/paravirt-spinlocks.c
15949 +++ b/arch/x86/kernel/paravirt-spinlocks.c
15950 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
15951 arch_spin_lock(lock);
15952 }
15953
15954 -struct pv_lock_ops pv_lock_ops = {
15955 +struct pv_lock_ops pv_lock_ops __read_only = {
15956 #ifdef CONFIG_SMP
15957 .spin_is_locked = __ticket_spin_is_locked,
15958 .spin_is_contended = __ticket_spin_is_contended,
15959 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
15960 index d90272e..2d54e8e 100644
15961 --- a/arch/x86/kernel/paravirt.c
15962 +++ b/arch/x86/kernel/paravirt.c
15963 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
15964 {
15965 return x;
15966 }
15967 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15968 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
15969 +#endif
15970
15971 void __init default_banner(void)
15972 {
15973 @@ -133,6 +136,9 @@ static void *get_call_destination(u8 type)
15974 .pv_lock_ops = pv_lock_ops,
15975 #endif
15976 };
15977 +
15978 + pax_track_stack();
15979 +
15980 return *((void **)&tmpl + type);
15981 }
15982
15983 @@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
15984 if (opfunc == NULL)
15985 /* If there's no function, patch it with a ud2a (BUG) */
15986 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15987 - else if (opfunc == _paravirt_nop)
15988 + else if (opfunc == (void *)_paravirt_nop)
15989 /* If the operation is a nop, then nop the callsite */
15990 ret = paravirt_patch_nop();
15991
15992 /* identity functions just return their single argument */
15993 - else if (opfunc == _paravirt_ident_32)
15994 + else if (opfunc == (void *)_paravirt_ident_32)
15995 ret = paravirt_patch_ident_32(insnbuf, len);
15996 - else if (opfunc == _paravirt_ident_64)
15997 + else if (opfunc == (void *)_paravirt_ident_64)
15998 ret = paravirt_patch_ident_64(insnbuf, len);
15999 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
16000 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
16001 + ret = paravirt_patch_ident_64(insnbuf, len);
16002 +#endif
16003
16004 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
16005 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
16006 @@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
16007 if (insn_len > len || start == NULL)
16008 insn_len = len;
16009 else
16010 - memcpy(insnbuf, start, insn_len);
16011 + memcpy(insnbuf, ktla_ktva(start), insn_len);
16012
16013 return insn_len;
16014 }
16015 @@ -302,7 +312,7 @@ void arch_flush_lazy_mmu_mode(void)
16016 preempt_enable();
16017 }
16018
16019 -struct pv_info pv_info = {
16020 +struct pv_info pv_info __read_only = {
16021 .name = "bare hardware",
16022 .paravirt_enabled = 0,
16023 .kernel_rpl = 0,
16024 @@ -313,16 +323,16 @@ struct pv_info pv_info = {
16025 #endif
16026 };
16027
16028 -struct pv_init_ops pv_init_ops = {
16029 +struct pv_init_ops pv_init_ops __read_only = {
16030 .patch = native_patch,
16031 };
16032
16033 -struct pv_time_ops pv_time_ops = {
16034 +struct pv_time_ops pv_time_ops __read_only = {
16035 .sched_clock = native_sched_clock,
16036 .steal_clock = native_steal_clock,
16037 };
16038
16039 -struct pv_irq_ops pv_irq_ops = {
16040 +struct pv_irq_ops pv_irq_ops __read_only = {
16041 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
16042 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
16043 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
16044 @@ -334,7 +344,7 @@ struct pv_irq_ops pv_irq_ops = {
16045 #endif
16046 };
16047
16048 -struct pv_cpu_ops pv_cpu_ops = {
16049 +struct pv_cpu_ops pv_cpu_ops __read_only = {
16050 .cpuid = native_cpuid,
16051 .get_debugreg = native_get_debugreg,
16052 .set_debugreg = native_set_debugreg,
16053 @@ -395,21 +405,26 @@ struct pv_cpu_ops pv_cpu_ops = {
16054 .end_context_switch = paravirt_nop,
16055 };
16056
16057 -struct pv_apic_ops pv_apic_ops = {
16058 +struct pv_apic_ops pv_apic_ops __read_only = {
16059 #ifdef CONFIG_X86_LOCAL_APIC
16060 .startup_ipi_hook = paravirt_nop,
16061 #endif
16062 };
16063
16064 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16065 +#ifdef CONFIG_X86_32
16066 +#ifdef CONFIG_X86_PAE
16067 +/* 64-bit pagetable entries */
16068 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16069 +#else
16070 /* 32-bit pagetable entries */
16071 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16072 +#endif
16073 #else
16074 /* 64-bit pagetable entries */
16075 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16076 #endif
16077
16078 -struct pv_mmu_ops pv_mmu_ops = {
16079 +struct pv_mmu_ops pv_mmu_ops __read_only = {
16080
16081 .read_cr2 = native_read_cr2,
16082 .write_cr2 = native_write_cr2,
16083 @@ -459,6 +474,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16084 .make_pud = PTE_IDENT,
16085
16086 .set_pgd = native_set_pgd,
16087 + .set_pgd_batched = native_set_pgd_batched,
16088 #endif
16089 #endif /* PAGETABLE_LEVELS >= 3 */
16090
16091 @@ -478,6 +494,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16092 },
16093
16094 .set_fixmap = native_set_fixmap,
16095 +
16096 +#ifdef CONFIG_PAX_KERNEXEC
16097 + .pax_open_kernel = native_pax_open_kernel,
16098 + .pax_close_kernel = native_pax_close_kernel,
16099 +#endif
16100 +
16101 };
16102
16103 EXPORT_SYMBOL_GPL(pv_time_ops);
16104 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
16105 index 35ccf75..67e7d4d 100644
16106 --- a/arch/x86/kernel/pci-iommu_table.c
16107 +++ b/arch/x86/kernel/pci-iommu_table.c
16108 @@ -2,7 +2,7 @@
16109 #include <asm/iommu_table.h>
16110 #include <linux/string.h>
16111 #include <linux/kallsyms.h>
16112 -
16113 +#include <linux/sched.h>
16114
16115 #define DEBUG 1
16116
16117 @@ -51,6 +51,8 @@ void __init check_iommu_entries(struct iommu_table_entry *start,
16118 {
16119 struct iommu_table_entry *p, *q, *x;
16120
16121 + pax_track_stack();
16122 +
16123 /* Simple cyclic dependency checker. */
16124 for (p = start; p < finish; p++) {
16125 q = find_dependents_of(start, finish, p);
16126 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
16127 index e7e3b01..43c5af3 100644
16128 --- a/arch/x86/kernel/process.c
16129 +++ b/arch/x86/kernel/process.c
16130 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
16131
16132 void free_thread_info(struct thread_info *ti)
16133 {
16134 - free_thread_xstate(ti->task);
16135 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
16136 }
16137
16138 +static struct kmem_cache *task_struct_cachep;
16139 +
16140 void arch_task_cache_init(void)
16141 {
16142 - task_xstate_cachep =
16143 - kmem_cache_create("task_xstate", xstate_size,
16144 + /* create a slab on which task_structs can be allocated */
16145 + task_struct_cachep =
16146 + kmem_cache_create("task_struct", sizeof(struct task_struct),
16147 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16148 +
16149 + task_xstate_cachep =
16150 + kmem_cache_create("task_xstate", xstate_size,
16151 __alignof__(union thread_xstate),
16152 - SLAB_PANIC | SLAB_NOTRACK, NULL);
16153 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16154 +}
16155 +
16156 +struct task_struct *alloc_task_struct_node(int node)
16157 +{
16158 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
16159 +}
16160 +
16161 +void free_task_struct(struct task_struct *task)
16162 +{
16163 + free_thread_xstate(task);
16164 + kmem_cache_free(task_struct_cachep, task);
16165 }
16166
16167 /*
16168 @@ -70,7 +87,7 @@ void exit_thread(void)
16169 unsigned long *bp = t->io_bitmap_ptr;
16170
16171 if (bp) {
16172 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16173 + struct tss_struct *tss = init_tss + get_cpu();
16174
16175 t->io_bitmap_ptr = NULL;
16176 clear_thread_flag(TIF_IO_BITMAP);
16177 @@ -106,7 +123,7 @@ void show_regs_common(void)
16178
16179 printk(KERN_CONT "\n");
16180 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
16181 - current->pid, current->comm, print_tainted(),
16182 + task_pid_nr(current), current->comm, print_tainted(),
16183 init_utsname()->release,
16184 (int)strcspn(init_utsname()->version, " "),
16185 init_utsname()->version);
16186 @@ -120,6 +137,9 @@ void flush_thread(void)
16187 {
16188 struct task_struct *tsk = current;
16189
16190 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16191 + loadsegment(gs, 0);
16192 +#endif
16193 flush_ptrace_hw_breakpoint(tsk);
16194 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
16195 /*
16196 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
16197 regs.di = (unsigned long) arg;
16198
16199 #ifdef CONFIG_X86_32
16200 - regs.ds = __USER_DS;
16201 - regs.es = __USER_DS;
16202 + regs.ds = __KERNEL_DS;
16203 + regs.es = __KERNEL_DS;
16204 regs.fs = __KERNEL_PERCPU;
16205 - regs.gs = __KERNEL_STACK_CANARY;
16206 + savesegment(gs, regs.gs);
16207 #else
16208 regs.ss = __KERNEL_DS;
16209 #endif
16210 @@ -403,7 +423,7 @@ void default_idle(void)
16211 EXPORT_SYMBOL(default_idle);
16212 #endif
16213
16214 -void stop_this_cpu(void *dummy)
16215 +__noreturn void stop_this_cpu(void *dummy)
16216 {
16217 local_irq_disable();
16218 /*
16219 @@ -645,16 +665,37 @@ static int __init idle_setup(char *str)
16220 }
16221 early_param("idle", idle_setup);
16222
16223 -unsigned long arch_align_stack(unsigned long sp)
16224 +#ifdef CONFIG_PAX_RANDKSTACK
16225 +void pax_randomize_kstack(struct pt_regs *regs)
16226 {
16227 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16228 - sp -= get_random_int() % 8192;
16229 - return sp & ~0xf;
16230 -}
16231 + struct thread_struct *thread = &current->thread;
16232 + unsigned long time;
16233
16234 -unsigned long arch_randomize_brk(struct mm_struct *mm)
16235 -{
16236 - unsigned long range_end = mm->brk + 0x02000000;
16237 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16238 -}
16239 + if (!randomize_va_space)
16240 + return;
16241 +
16242 + if (v8086_mode(regs))
16243 + return;
16244
16245 + rdtscl(time);
16246 +
16247 + /* P4 seems to return a 0 LSB, ignore it */
16248 +#ifdef CONFIG_MPENTIUM4
16249 + time &= 0x3EUL;
16250 + time <<= 2;
16251 +#elif defined(CONFIG_X86_64)
16252 + time &= 0xFUL;
16253 + time <<= 4;
16254 +#else
16255 + time &= 0x1FUL;
16256 + time <<= 3;
16257 +#endif
16258 +
16259 + thread->sp0 ^= time;
16260 + load_sp0(init_tss + smp_processor_id(), thread);
16261 +
16262 +#ifdef CONFIG_X86_64
16263 + percpu_write(kernel_stack, thread->sp0);
16264 +#endif
16265 +}
16266 +#endif
16267 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
16268 index 7a3b651..5a946f6 100644
16269 --- a/arch/x86/kernel/process_32.c
16270 +++ b/arch/x86/kernel/process_32.c
16271 @@ -66,6 +66,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
16272 unsigned long thread_saved_pc(struct task_struct *tsk)
16273 {
16274 return ((unsigned long *)tsk->thread.sp)[3];
16275 +//XXX return tsk->thread.eip;
16276 }
16277
16278 #ifndef CONFIG_SMP
16279 @@ -128,15 +129,14 @@ void __show_regs(struct pt_regs *regs, int all)
16280 unsigned long sp;
16281 unsigned short ss, gs;
16282
16283 - if (user_mode_vm(regs)) {
16284 + if (user_mode(regs)) {
16285 sp = regs->sp;
16286 ss = regs->ss & 0xffff;
16287 - gs = get_user_gs(regs);
16288 } else {
16289 sp = kernel_stack_pointer(regs);
16290 savesegment(ss, ss);
16291 - savesegment(gs, gs);
16292 }
16293 + gs = get_user_gs(regs);
16294
16295 show_regs_common();
16296
16297 @@ -198,13 +198,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16298 struct task_struct *tsk;
16299 int err;
16300
16301 - childregs = task_pt_regs(p);
16302 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16303 *childregs = *regs;
16304 childregs->ax = 0;
16305 childregs->sp = sp;
16306
16307 p->thread.sp = (unsigned long) childregs;
16308 p->thread.sp0 = (unsigned long) (childregs+1);
16309 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16310
16311 p->thread.ip = (unsigned long) ret_from_fork;
16312
16313 @@ -294,7 +295,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16314 struct thread_struct *prev = &prev_p->thread,
16315 *next = &next_p->thread;
16316 int cpu = smp_processor_id();
16317 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16318 + struct tss_struct *tss = init_tss + cpu;
16319 bool preload_fpu;
16320
16321 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16322 @@ -329,6 +330,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16323 */
16324 lazy_save_gs(prev->gs);
16325
16326 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16327 + __set_fs(task_thread_info(next_p)->addr_limit);
16328 +#endif
16329 +
16330 /*
16331 * Load the per-thread Thread-Local Storage descriptor.
16332 */
16333 @@ -364,6 +369,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16334 */
16335 arch_end_context_switch(next_p);
16336
16337 + percpu_write(current_task, next_p);
16338 + percpu_write(current_tinfo, &next_p->tinfo);
16339 +
16340 if (preload_fpu)
16341 __math_state_restore();
16342
16343 @@ -373,8 +381,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16344 if (prev->gs | next->gs)
16345 lazy_load_gs(next->gs);
16346
16347 - percpu_write(current_task, next_p);
16348 -
16349 return prev_p;
16350 }
16351
16352 @@ -404,4 +410,3 @@ unsigned long get_wchan(struct task_struct *p)
16353 } while (count++ < 16);
16354 return 0;
16355 }
16356 -
16357 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
16358 index f693e44..3c979b2 100644
16359 --- a/arch/x86/kernel/process_64.c
16360 +++ b/arch/x86/kernel/process_64.c
16361 @@ -88,7 +88,7 @@ static void __exit_idle(void)
16362 void exit_idle(void)
16363 {
16364 /* idle loop has pid 0 */
16365 - if (current->pid)
16366 + if (task_pid_nr(current))
16367 return;
16368 __exit_idle();
16369 }
16370 @@ -262,8 +262,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16371 struct pt_regs *childregs;
16372 struct task_struct *me = current;
16373
16374 - childregs = ((struct pt_regs *)
16375 - (THREAD_SIZE + task_stack_page(p))) - 1;
16376 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16377 *childregs = *regs;
16378
16379 childregs->ax = 0;
16380 @@ -275,6 +274,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16381 p->thread.sp = (unsigned long) childregs;
16382 p->thread.sp0 = (unsigned long) (childregs+1);
16383 p->thread.usersp = me->thread.usersp;
16384 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16385
16386 set_tsk_thread_flag(p, TIF_FORK);
16387
16388 @@ -377,7 +377,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16389 struct thread_struct *prev = &prev_p->thread;
16390 struct thread_struct *next = &next_p->thread;
16391 int cpu = smp_processor_id();
16392 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16393 + struct tss_struct *tss = init_tss + cpu;
16394 unsigned fsindex, gsindex;
16395 bool preload_fpu;
16396
16397 @@ -473,10 +473,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16398 prev->usersp = percpu_read(old_rsp);
16399 percpu_write(old_rsp, next->usersp);
16400 percpu_write(current_task, next_p);
16401 + percpu_write(current_tinfo, &next_p->tinfo);
16402
16403 - percpu_write(kernel_stack,
16404 - (unsigned long)task_stack_page(next_p) +
16405 - THREAD_SIZE - KERNEL_STACK_OFFSET);
16406 + percpu_write(kernel_stack, next->sp0);
16407
16408 /*
16409 * Now maybe reload the debug registers and handle I/O bitmaps
16410 @@ -538,12 +537,11 @@ unsigned long get_wchan(struct task_struct *p)
16411 if (!p || p == current || p->state == TASK_RUNNING)
16412 return 0;
16413 stack = (unsigned long)task_stack_page(p);
16414 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16415 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16416 return 0;
16417 fp = *(u64 *)(p->thread.sp);
16418 do {
16419 - if (fp < (unsigned long)stack ||
16420 - fp >= (unsigned long)stack+THREAD_SIZE)
16421 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16422 return 0;
16423 ip = *(u64 *)(fp+8);
16424 if (!in_sched_functions(ip))
16425 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
16426 index 8252879..d3219e0 100644
16427 --- a/arch/x86/kernel/ptrace.c
16428 +++ b/arch/x86/kernel/ptrace.c
16429 @@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request,
16430 unsigned long addr, unsigned long data)
16431 {
16432 int ret;
16433 - unsigned long __user *datap = (unsigned long __user *)data;
16434 + unsigned long __user *datap = (__force unsigned long __user *)data;
16435
16436 switch (request) {
16437 /* read the word at location addr in the USER area. */
16438 @@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request,
16439 if ((int) addr < 0)
16440 return -EIO;
16441 ret = do_get_thread_area(child, addr,
16442 - (struct user_desc __user *)data);
16443 + (__force struct user_desc __user *) data);
16444 break;
16445
16446 case PTRACE_SET_THREAD_AREA:
16447 if ((int) addr < 0)
16448 return -EIO;
16449 ret = do_set_thread_area(child, addr,
16450 - (struct user_desc __user *)data, 0);
16451 + (__force struct user_desc __user *) data, 0);
16452 break;
16453 #endif
16454
16455 @@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
16456 memset(info, 0, sizeof(*info));
16457 info->si_signo = SIGTRAP;
16458 info->si_code = si_code;
16459 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
16460 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
16461 }
16462
16463 void user_single_step_siginfo(struct task_struct *tsk,
16464 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
16465 index 42eb330..139955c 100644
16466 --- a/arch/x86/kernel/pvclock.c
16467 +++ b/arch/x86/kernel/pvclock.c
16468 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
16469 return pv_tsc_khz;
16470 }
16471
16472 -static atomic64_t last_value = ATOMIC64_INIT(0);
16473 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
16474
16475 void pvclock_resume(void)
16476 {
16477 - atomic64_set(&last_value, 0);
16478 + atomic64_set_unchecked(&last_value, 0);
16479 }
16480
16481 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16482 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16483 * updating at the same time, and one of them could be slightly behind,
16484 * making the assumption that last_value always go forward fail to hold.
16485 */
16486 - last = atomic64_read(&last_value);
16487 + last = atomic64_read_unchecked(&last_value);
16488 do {
16489 if (ret < last)
16490 return last;
16491 - last = atomic64_cmpxchg(&last_value, last, ret);
16492 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
16493 } while (unlikely(last != ret));
16494
16495 return ret;
16496 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
16497 index 9242436..753954d 100644
16498 --- a/arch/x86/kernel/reboot.c
16499 +++ b/arch/x86/kernel/reboot.c
16500 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
16501 EXPORT_SYMBOL(pm_power_off);
16502
16503 static const struct desc_ptr no_idt = {};
16504 -static int reboot_mode;
16505 +static unsigned short reboot_mode;
16506 enum reboot_type reboot_type = BOOT_ACPI;
16507 int reboot_force;
16508
16509 @@ -315,13 +315,17 @@ core_initcall(reboot_init);
16510 extern const unsigned char machine_real_restart_asm[];
16511 extern const u64 machine_real_restart_gdt[3];
16512
16513 -void machine_real_restart(unsigned int type)
16514 +__noreturn void machine_real_restart(unsigned int type)
16515 {
16516 void *restart_va;
16517 unsigned long restart_pa;
16518 - void (*restart_lowmem)(unsigned int);
16519 + void (* __noreturn restart_lowmem)(unsigned int);
16520 u64 *lowmem_gdt;
16521
16522 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16523 + struct desc_struct *gdt;
16524 +#endif
16525 +
16526 local_irq_disable();
16527
16528 /* Write zero to CMOS register number 0x0f, which the BIOS POST
16529 @@ -347,14 +351,14 @@ void machine_real_restart(unsigned int type)
16530 boot)". This seems like a fairly standard thing that gets set by
16531 REBOOT.COM programs, and the previous reset routine did this
16532 too. */
16533 - *((unsigned short *)0x472) = reboot_mode;
16534 + *(unsigned short *)(__va(0x472)) = reboot_mode;
16535
16536 /* Patch the GDT in the low memory trampoline */
16537 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
16538
16539 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
16540 restart_pa = virt_to_phys(restart_va);
16541 - restart_lowmem = (void (*)(unsigned int))restart_pa;
16542 + restart_lowmem = (void *)restart_pa;
16543
16544 /* GDT[0]: GDT self-pointer */
16545 lowmem_gdt[0] =
16546 @@ -365,7 +369,33 @@ void machine_real_restart(unsigned int type)
16547 GDT_ENTRY(0x009b, restart_pa, 0xffff);
16548
16549 /* Jump to the identity-mapped low memory code */
16550 +
16551 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16552 + gdt = get_cpu_gdt_table(smp_processor_id());
16553 + pax_open_kernel();
16554 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16555 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
16556 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
16557 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
16558 +#endif
16559 +#ifdef CONFIG_PAX_KERNEXEC
16560 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
16561 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
16562 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
16563 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
16564 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
16565 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
16566 +#endif
16567 + pax_close_kernel();
16568 +#endif
16569 +
16570 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16571 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
16572 + unreachable();
16573 +#else
16574 restart_lowmem(type);
16575 +#endif
16576 +
16577 }
16578 #ifdef CONFIG_APM_MODULE
16579 EXPORT_SYMBOL(machine_real_restart);
16580 @@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
16581 * try to force a triple fault and then cycle between hitting the keyboard
16582 * controller and doing that
16583 */
16584 -static void native_machine_emergency_restart(void)
16585 +__noreturn static void native_machine_emergency_restart(void)
16586 {
16587 int i;
16588 int attempt = 0;
16589 @@ -647,13 +677,13 @@ void native_machine_shutdown(void)
16590 #endif
16591 }
16592
16593 -static void __machine_emergency_restart(int emergency)
16594 +static __noreturn void __machine_emergency_restart(int emergency)
16595 {
16596 reboot_emergency = emergency;
16597 machine_ops.emergency_restart();
16598 }
16599
16600 -static void native_machine_restart(char *__unused)
16601 +static __noreturn void native_machine_restart(char *__unused)
16602 {
16603 printk("machine restart\n");
16604
16605 @@ -662,7 +692,7 @@ static void native_machine_restart(char *__unused)
16606 __machine_emergency_restart(0);
16607 }
16608
16609 -static void native_machine_halt(void)
16610 +static __noreturn void native_machine_halt(void)
16611 {
16612 /* stop other cpus and apics */
16613 machine_shutdown();
16614 @@ -673,7 +703,7 @@ static void native_machine_halt(void)
16615 stop_this_cpu(NULL);
16616 }
16617
16618 -static void native_machine_power_off(void)
16619 +__noreturn static void native_machine_power_off(void)
16620 {
16621 if (pm_power_off) {
16622 if (!reboot_force)
16623 @@ -682,6 +712,7 @@ static void native_machine_power_off(void)
16624 }
16625 /* a fallback in case there is no PM info available */
16626 tboot_shutdown(TB_SHUTDOWN_HALT);
16627 + unreachable();
16628 }
16629
16630 struct machine_ops machine_ops = {
16631 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
16632 index 7a6f3b3..bed145d7 100644
16633 --- a/arch/x86/kernel/relocate_kernel_64.S
16634 +++ b/arch/x86/kernel/relocate_kernel_64.S
16635 @@ -11,6 +11,7 @@
16636 #include <asm/kexec.h>
16637 #include <asm/processor-flags.h>
16638 #include <asm/pgtable_types.h>
16639 +#include <asm/alternative-asm.h>
16640
16641 /*
16642 * Must be relocatable PIC code callable as a C function
16643 @@ -160,13 +161,14 @@ identity_mapped:
16644 xorq %rbp, %rbp
16645 xorq %r8, %r8
16646 xorq %r9, %r9
16647 - xorq %r10, %r9
16648 + xorq %r10, %r10
16649 xorq %r11, %r11
16650 xorq %r12, %r12
16651 xorq %r13, %r13
16652 xorq %r14, %r14
16653 xorq %r15, %r15
16654
16655 + pax_force_retaddr 0, 1
16656 ret
16657
16658 1:
16659 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
16660 index afaf384..1a101fe 100644
16661 --- a/arch/x86/kernel/setup.c
16662 +++ b/arch/x86/kernel/setup.c
16663 @@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
16664
16665 switch (data->type) {
16666 case SETUP_E820_EXT:
16667 - parse_e820_ext(data);
16668 + parse_e820_ext((struct setup_data __force_kernel *)data);
16669 break;
16670 case SETUP_DTB:
16671 add_dtb(pa_data);
16672 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
16673 * area (640->1Mb) as ram even though it is not.
16674 * take them out.
16675 */
16676 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
16677 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
16678 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
16679 }
16680
16681 @@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
16682
16683 if (!boot_params.hdr.root_flags)
16684 root_mountflags &= ~MS_RDONLY;
16685 - init_mm.start_code = (unsigned long) _text;
16686 - init_mm.end_code = (unsigned long) _etext;
16687 + init_mm.start_code = ktla_ktva((unsigned long) _text);
16688 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
16689 init_mm.end_data = (unsigned long) _edata;
16690 init_mm.brk = _brk_end;
16691
16692 - code_resource.start = virt_to_phys(_text);
16693 - code_resource.end = virt_to_phys(_etext)-1;
16694 - data_resource.start = virt_to_phys(_etext);
16695 + code_resource.start = virt_to_phys(ktla_ktva(_text));
16696 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16697 + data_resource.start = virt_to_phys(_sdata);
16698 data_resource.end = virt_to_phys(_edata)-1;
16699 bss_resource.start = virt_to_phys(&__bss_start);
16700 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16701 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
16702 index 71f4727..16dc9f7 100644
16703 --- a/arch/x86/kernel/setup_percpu.c
16704 +++ b/arch/x86/kernel/setup_percpu.c
16705 @@ -21,19 +21,17 @@
16706 #include <asm/cpu.h>
16707 #include <asm/stackprotector.h>
16708
16709 -DEFINE_PER_CPU(int, cpu_number);
16710 +#ifdef CONFIG_SMP
16711 +DEFINE_PER_CPU(unsigned int, cpu_number);
16712 EXPORT_PER_CPU_SYMBOL(cpu_number);
16713 +#endif
16714
16715 -#ifdef CONFIG_X86_64
16716 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16717 -#else
16718 -#define BOOT_PERCPU_OFFSET 0
16719 -#endif
16720
16721 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16722 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16723
16724 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16725 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16726 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16727 };
16728 EXPORT_SYMBOL(__per_cpu_offset);
16729 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
16730 {
16731 #ifdef CONFIG_X86_32
16732 struct desc_struct gdt;
16733 + unsigned long base = per_cpu_offset(cpu);
16734
16735 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16736 - 0x2 | DESCTYPE_S, 0x8);
16737 - gdt.s = 1;
16738 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16739 + 0x83 | DESCTYPE_S, 0xC);
16740 write_gdt_entry(get_cpu_gdt_table(cpu),
16741 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16742 #endif
16743 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
16744 /* alrighty, percpu areas up and running */
16745 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16746 for_each_possible_cpu(cpu) {
16747 +#ifdef CONFIG_CC_STACKPROTECTOR
16748 +#ifdef CONFIG_X86_32
16749 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
16750 +#endif
16751 +#endif
16752 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16753 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16754 per_cpu(cpu_number, cpu) = cpu;
16755 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
16756 */
16757 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
16758 #endif
16759 +#ifdef CONFIG_CC_STACKPROTECTOR
16760 +#ifdef CONFIG_X86_32
16761 + if (!cpu)
16762 + per_cpu(stack_canary.canary, cpu) = canary;
16763 +#endif
16764 +#endif
16765 /*
16766 * Up to this point, the boot CPU has been using .init.data
16767 * area. Reload any changed state for the boot CPU.
16768 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
16769 index 54ddaeb2..a6aa4d2 100644
16770 --- a/arch/x86/kernel/signal.c
16771 +++ b/arch/x86/kernel/signal.c
16772 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
16773 * Align the stack pointer according to the i386 ABI,
16774 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16775 */
16776 - sp = ((sp + 4) & -16ul) - 4;
16777 + sp = ((sp - 12) & -16ul) - 4;
16778 #else /* !CONFIG_X86_32 */
16779 sp = round_down(sp, 16) - 8;
16780 #endif
16781 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
16782 * Return an always-bogus address instead so we will die with SIGSEGV.
16783 */
16784 if (onsigstack && !likely(on_sig_stack(sp)))
16785 - return (void __user *)-1L;
16786 + return (__force void __user *)-1L;
16787
16788 /* save i387 state */
16789 if (used_math() && save_i387_xstate(*fpstate) < 0)
16790 - return (void __user *)-1L;
16791 + return (__force void __user *)-1L;
16792
16793 return (void __user *)sp;
16794 }
16795 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16796 }
16797
16798 if (current->mm->context.vdso)
16799 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16800 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16801 else
16802 - restorer = &frame->retcode;
16803 + restorer = (void __user *)&frame->retcode;
16804 if (ka->sa.sa_flags & SA_RESTORER)
16805 restorer = ka->sa.sa_restorer;
16806
16807 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16808 * reasons and because gdb uses it as a signature to notice
16809 * signal handler stack frames.
16810 */
16811 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16812 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16813
16814 if (err)
16815 return -EFAULT;
16816 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16817 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16818
16819 /* Set up to return from userspace. */
16820 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16821 + if (current->mm->context.vdso)
16822 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16823 + else
16824 + restorer = (void __user *)&frame->retcode;
16825 if (ka->sa.sa_flags & SA_RESTORER)
16826 restorer = ka->sa.sa_restorer;
16827 put_user_ex(restorer, &frame->pretcode);
16828 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16829 * reasons and because gdb uses it as a signature to notice
16830 * signal handler stack frames.
16831 */
16832 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16833 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16834 } put_user_catch(err);
16835
16836 if (err)
16837 @@ -762,6 +765,8 @@ static void do_signal(struct pt_regs *regs)
16838 siginfo_t info;
16839 int signr;
16840
16841 + pax_track_stack();
16842 +
16843 /*
16844 * We want the common case to go fast, which is why we may in certain
16845 * cases get here from kernel mode. Just return without doing anything
16846 @@ -769,7 +774,7 @@ static void do_signal(struct pt_regs *regs)
16847 * X86_32: vm86 regs switched out by assembly code before reaching
16848 * here, so testing against kernel CS suffices.
16849 */
16850 - if (!user_mode(regs))
16851 + if (!user_mode_novm(regs))
16852 return;
16853
16854 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
16855 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
16856 index 9f548cb..caf76f7 100644
16857 --- a/arch/x86/kernel/smpboot.c
16858 +++ b/arch/x86/kernel/smpboot.c
16859 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
16860 set_idle_for_cpu(cpu, c_idle.idle);
16861 do_rest:
16862 per_cpu(current_task, cpu) = c_idle.idle;
16863 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16864 #ifdef CONFIG_X86_32
16865 /* Stack for startup_32 can be just as for start_secondary onwards */
16866 irq_ctx_init(cpu);
16867 #else
16868 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16869 initial_gs = per_cpu_offset(cpu);
16870 - per_cpu(kernel_stack, cpu) =
16871 - (unsigned long)task_stack_page(c_idle.idle) -
16872 - KERNEL_STACK_OFFSET + THREAD_SIZE;
16873 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16874 #endif
16875 +
16876 + pax_open_kernel();
16877 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16878 + pax_close_kernel();
16879 +
16880 initial_code = (unsigned long)start_secondary;
16881 stack_start = c_idle.idle->thread.sp;
16882
16883 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
16884
16885 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16886
16887 +#ifdef CONFIG_PAX_PER_CPU_PGD
16888 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16889 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16890 + KERNEL_PGD_PTRS);
16891 +#endif
16892 +
16893 err = do_boot_cpu(apicid, cpu);
16894 if (err) {
16895 pr_debug("do_boot_cpu failed %d\n", err);
16896 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
16897 index c346d11..d43b163 100644
16898 --- a/arch/x86/kernel/step.c
16899 +++ b/arch/x86/kernel/step.c
16900 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
16901 struct desc_struct *desc;
16902 unsigned long base;
16903
16904 - seg &= ~7UL;
16905 + seg >>= 3;
16906
16907 mutex_lock(&child->mm->context.lock);
16908 - if (unlikely((seg >> 3) >= child->mm->context.size))
16909 + if (unlikely(seg >= child->mm->context.size))
16910 addr = -1L; /* bogus selector, access would fault */
16911 else {
16912 desc = child->mm->context.ldt + seg;
16913 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
16914 addr += base;
16915 }
16916 mutex_unlock(&child->mm->context.lock);
16917 - }
16918 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16919 + addr = ktla_ktva(addr);
16920
16921 return addr;
16922 }
16923 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
16924 unsigned char opcode[15];
16925 unsigned long addr = convert_ip_to_linear(child, regs);
16926
16927 + if (addr == -EINVAL)
16928 + return 0;
16929 +
16930 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16931 for (i = 0; i < copied; i++) {
16932 switch (opcode[i]) {
16933 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
16934 index 0b0cb5f..db6b9ed 100644
16935 --- a/arch/x86/kernel/sys_i386_32.c
16936 +++ b/arch/x86/kernel/sys_i386_32.c
16937 @@ -24,17 +24,224 @@
16938
16939 #include <asm/syscalls.h>
16940
16941 -/*
16942 - * Do a system call from kernel instead of calling sys_execve so we
16943 - * end up with proper pt_regs.
16944 - */
16945 -int kernel_execve(const char *filename,
16946 - const char *const argv[],
16947 - const char *const envp[])
16948 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16949 +{
16950 + unsigned long pax_task_size = TASK_SIZE;
16951 +
16952 +#ifdef CONFIG_PAX_SEGMEXEC
16953 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16954 + pax_task_size = SEGMEXEC_TASK_SIZE;
16955 +#endif
16956 +
16957 + if (len > pax_task_size || addr > pax_task_size - len)
16958 + return -EINVAL;
16959 +
16960 + return 0;
16961 +}
16962 +
16963 +unsigned long
16964 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
16965 + unsigned long len, unsigned long pgoff, unsigned long flags)
16966 +{
16967 + struct mm_struct *mm = current->mm;
16968 + struct vm_area_struct *vma;
16969 + unsigned long start_addr, pax_task_size = TASK_SIZE;
16970 +
16971 +#ifdef CONFIG_PAX_SEGMEXEC
16972 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16973 + pax_task_size = SEGMEXEC_TASK_SIZE;
16974 +#endif
16975 +
16976 + pax_task_size -= PAGE_SIZE;
16977 +
16978 + if (len > pax_task_size)
16979 + return -ENOMEM;
16980 +
16981 + if (flags & MAP_FIXED)
16982 + return addr;
16983 +
16984 +#ifdef CONFIG_PAX_RANDMMAP
16985 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16986 +#endif
16987 +
16988 + if (addr) {
16989 + addr = PAGE_ALIGN(addr);
16990 + if (pax_task_size - len >= addr) {
16991 + vma = find_vma(mm, addr);
16992 + if (check_heap_stack_gap(vma, addr, len))
16993 + return addr;
16994 + }
16995 + }
16996 + if (len > mm->cached_hole_size) {
16997 + start_addr = addr = mm->free_area_cache;
16998 + } else {
16999 + start_addr = addr = mm->mmap_base;
17000 + mm->cached_hole_size = 0;
17001 + }
17002 +
17003 +#ifdef CONFIG_PAX_PAGEEXEC
17004 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
17005 + start_addr = 0x00110000UL;
17006 +
17007 +#ifdef CONFIG_PAX_RANDMMAP
17008 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17009 + start_addr += mm->delta_mmap & 0x03FFF000UL;
17010 +#endif
17011 +
17012 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
17013 + start_addr = addr = mm->mmap_base;
17014 + else
17015 + addr = start_addr;
17016 + }
17017 +#endif
17018 +
17019 +full_search:
17020 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
17021 + /* At this point: (!vma || addr < vma->vm_end). */
17022 + if (pax_task_size - len < addr) {
17023 + /*
17024 + * Start a new search - just in case we missed
17025 + * some holes.
17026 + */
17027 + if (start_addr != mm->mmap_base) {
17028 + start_addr = addr = mm->mmap_base;
17029 + mm->cached_hole_size = 0;
17030 + goto full_search;
17031 + }
17032 + return -ENOMEM;
17033 + }
17034 + if (check_heap_stack_gap(vma, addr, len))
17035 + break;
17036 + if (addr + mm->cached_hole_size < vma->vm_start)
17037 + mm->cached_hole_size = vma->vm_start - addr;
17038 + addr = vma->vm_end;
17039 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
17040 + start_addr = addr = mm->mmap_base;
17041 + mm->cached_hole_size = 0;
17042 + goto full_search;
17043 + }
17044 + }
17045 +
17046 + /*
17047 + * Remember the place where we stopped the search:
17048 + */
17049 + mm->free_area_cache = addr + len;
17050 + return addr;
17051 +}
17052 +
17053 +unsigned long
17054 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17055 + const unsigned long len, const unsigned long pgoff,
17056 + const unsigned long flags)
17057 {
17058 - long __res;
17059 - asm volatile ("int $0x80"
17060 - : "=a" (__res)
17061 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
17062 - return __res;
17063 + struct vm_area_struct *vma;
17064 + struct mm_struct *mm = current->mm;
17065 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17066 +
17067 +#ifdef CONFIG_PAX_SEGMEXEC
17068 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17069 + pax_task_size = SEGMEXEC_TASK_SIZE;
17070 +#endif
17071 +
17072 + pax_task_size -= PAGE_SIZE;
17073 +
17074 + /* requested length too big for entire address space */
17075 + if (len > pax_task_size)
17076 + return -ENOMEM;
17077 +
17078 + if (flags & MAP_FIXED)
17079 + return addr;
17080 +
17081 +#ifdef CONFIG_PAX_PAGEEXEC
17082 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17083 + goto bottomup;
17084 +#endif
17085 +
17086 +#ifdef CONFIG_PAX_RANDMMAP
17087 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17088 +#endif
17089 +
17090 + /* requesting a specific address */
17091 + if (addr) {
17092 + addr = PAGE_ALIGN(addr);
17093 + if (pax_task_size - len >= addr) {
17094 + vma = find_vma(mm, addr);
17095 + if (check_heap_stack_gap(vma, addr, len))
17096 + return addr;
17097 + }
17098 + }
17099 +
17100 + /* check if free_area_cache is useful for us */
17101 + if (len <= mm->cached_hole_size) {
17102 + mm->cached_hole_size = 0;
17103 + mm->free_area_cache = mm->mmap_base;
17104 + }
17105 +
17106 + /* either no address requested or can't fit in requested address hole */
17107 + addr = mm->free_area_cache;
17108 +
17109 + /* make sure it can fit in the remaining address space */
17110 + if (addr > len) {
17111 + vma = find_vma(mm, addr-len);
17112 + if (check_heap_stack_gap(vma, addr - len, len))
17113 + /* remember the address as a hint for next time */
17114 + return (mm->free_area_cache = addr-len);
17115 + }
17116 +
17117 + if (mm->mmap_base < len)
17118 + goto bottomup;
17119 +
17120 + addr = mm->mmap_base-len;
17121 +
17122 + do {
17123 + /*
17124 + * Lookup failure means no vma is above this address,
17125 + * else if new region fits below vma->vm_start,
17126 + * return with success:
17127 + */
17128 + vma = find_vma(mm, addr);
17129 + if (check_heap_stack_gap(vma, addr, len))
17130 + /* remember the address as a hint for next time */
17131 + return (mm->free_area_cache = addr);
17132 +
17133 + /* remember the largest hole we saw so far */
17134 + if (addr + mm->cached_hole_size < vma->vm_start)
17135 + mm->cached_hole_size = vma->vm_start - addr;
17136 +
17137 + /* try just below the current vma->vm_start */
17138 + addr = skip_heap_stack_gap(vma, len);
17139 + } while (!IS_ERR_VALUE(addr));
17140 +
17141 +bottomup:
17142 + /*
17143 + * A failed mmap() very likely causes application failure,
17144 + * so fall back to the bottom-up function here. This scenario
17145 + * can happen with large stack limits and large mmap()
17146 + * allocations.
17147 + */
17148 +
17149 +#ifdef CONFIG_PAX_SEGMEXEC
17150 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17151 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17152 + else
17153 +#endif
17154 +
17155 + mm->mmap_base = TASK_UNMAPPED_BASE;
17156 +
17157 +#ifdef CONFIG_PAX_RANDMMAP
17158 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17159 + mm->mmap_base += mm->delta_mmap;
17160 +#endif
17161 +
17162 + mm->free_area_cache = mm->mmap_base;
17163 + mm->cached_hole_size = ~0UL;
17164 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17165 + /*
17166 + * Restore the topdown base:
17167 + */
17168 + mm->mmap_base = base;
17169 + mm->free_area_cache = base;
17170 + mm->cached_hole_size = ~0UL;
17171 +
17172 + return addr;
17173 }
17174 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
17175 index ff14a50..35626c3 100644
17176 --- a/arch/x86/kernel/sys_x86_64.c
17177 +++ b/arch/x86/kernel/sys_x86_64.c
17178 @@ -32,8 +32,8 @@ out:
17179 return error;
17180 }
17181
17182 -static void find_start_end(unsigned long flags, unsigned long *begin,
17183 - unsigned long *end)
17184 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
17185 + unsigned long *begin, unsigned long *end)
17186 {
17187 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17188 unsigned long new_begin;
17189 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
17190 *begin = new_begin;
17191 }
17192 } else {
17193 - *begin = TASK_UNMAPPED_BASE;
17194 + *begin = mm->mmap_base;
17195 *end = TASK_SIZE;
17196 }
17197 }
17198 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
17199 if (flags & MAP_FIXED)
17200 return addr;
17201
17202 - find_start_end(flags, &begin, &end);
17203 + find_start_end(mm, flags, &begin, &end);
17204
17205 if (len > end)
17206 return -ENOMEM;
17207
17208 +#ifdef CONFIG_PAX_RANDMMAP
17209 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17210 +#endif
17211 +
17212 if (addr) {
17213 addr = PAGE_ALIGN(addr);
17214 vma = find_vma(mm, addr);
17215 - if (end - len >= addr &&
17216 - (!vma || addr + len <= vma->vm_start))
17217 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17218 return addr;
17219 }
17220 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17221 @@ -106,7 +109,7 @@ full_search:
17222 }
17223 return -ENOMEM;
17224 }
17225 - if (!vma || addr + len <= vma->vm_start) {
17226 + if (check_heap_stack_gap(vma, addr, len)) {
17227 /*
17228 * Remember the place where we stopped the search:
17229 */
17230 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17231 {
17232 struct vm_area_struct *vma;
17233 struct mm_struct *mm = current->mm;
17234 - unsigned long addr = addr0;
17235 + unsigned long base = mm->mmap_base, addr = addr0;
17236
17237 /* requested length too big for entire address space */
17238 if (len > TASK_SIZE)
17239 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17240 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17241 goto bottomup;
17242
17243 +#ifdef CONFIG_PAX_RANDMMAP
17244 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17245 +#endif
17246 +
17247 /* requesting a specific address */
17248 if (addr) {
17249 addr = PAGE_ALIGN(addr);
17250 - vma = find_vma(mm, addr);
17251 - if (TASK_SIZE - len >= addr &&
17252 - (!vma || addr + len <= vma->vm_start))
17253 - return addr;
17254 + if (TASK_SIZE - len >= addr) {
17255 + vma = find_vma(mm, addr);
17256 + if (check_heap_stack_gap(vma, addr, len))
17257 + return addr;
17258 + }
17259 }
17260
17261 /* check if free_area_cache is useful for us */
17262 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17263 /* make sure it can fit in the remaining address space */
17264 if (addr > len) {
17265 vma = find_vma(mm, addr-len);
17266 - if (!vma || addr <= vma->vm_start)
17267 + if (check_heap_stack_gap(vma, addr - len, len))
17268 /* remember the address as a hint for next time */
17269 return mm->free_area_cache = addr-len;
17270 }
17271 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17272 * return with success:
17273 */
17274 vma = find_vma(mm, addr);
17275 - if (!vma || addr+len <= vma->vm_start)
17276 + if (check_heap_stack_gap(vma, addr, len))
17277 /* remember the address as a hint for next time */
17278 return mm->free_area_cache = addr;
17279
17280 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17281 mm->cached_hole_size = vma->vm_start - addr;
17282
17283 /* try just below the current vma->vm_start */
17284 - addr = vma->vm_start-len;
17285 - } while (len < vma->vm_start);
17286 + addr = skip_heap_stack_gap(vma, len);
17287 + } while (!IS_ERR_VALUE(addr));
17288
17289 bottomup:
17290 /*
17291 @@ -198,13 +206,21 @@ bottomup:
17292 * can happen with large stack limits and large mmap()
17293 * allocations.
17294 */
17295 + mm->mmap_base = TASK_UNMAPPED_BASE;
17296 +
17297 +#ifdef CONFIG_PAX_RANDMMAP
17298 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17299 + mm->mmap_base += mm->delta_mmap;
17300 +#endif
17301 +
17302 + mm->free_area_cache = mm->mmap_base;
17303 mm->cached_hole_size = ~0UL;
17304 - mm->free_area_cache = TASK_UNMAPPED_BASE;
17305 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17306 /*
17307 * Restore the topdown base:
17308 */
17309 - mm->free_area_cache = mm->mmap_base;
17310 + mm->mmap_base = base;
17311 + mm->free_area_cache = base;
17312 mm->cached_hole_size = ~0UL;
17313
17314 return addr;
17315 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
17316 index bc19be3..0f5fbf7 100644
17317 --- a/arch/x86/kernel/syscall_table_32.S
17318 +++ b/arch/x86/kernel/syscall_table_32.S
17319 @@ -1,3 +1,4 @@
17320 +.section .rodata,"a",@progbits
17321 ENTRY(sys_call_table)
17322 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17323 .long sys_exit
17324 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
17325 index e07a2fc..db0369d 100644
17326 --- a/arch/x86/kernel/tboot.c
17327 +++ b/arch/x86/kernel/tboot.c
17328 @@ -218,7 +218,7 @@ static int tboot_setup_sleep(void)
17329
17330 void tboot_shutdown(u32 shutdown_type)
17331 {
17332 - void (*shutdown)(void);
17333 + void (* __noreturn shutdown)(void);
17334
17335 if (!tboot_enabled())
17336 return;
17337 @@ -240,7 +240,7 @@ void tboot_shutdown(u32 shutdown_type)
17338
17339 switch_to_tboot_pt();
17340
17341 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17342 + shutdown = (void *)tboot->shutdown_entry;
17343 shutdown();
17344
17345 /* should not reach here */
17346 @@ -297,7 +297,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
17347 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17348 }
17349
17350 -static atomic_t ap_wfs_count;
17351 +static atomic_unchecked_t ap_wfs_count;
17352
17353 static int tboot_wait_for_aps(int num_aps)
17354 {
17355 @@ -321,9 +321,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
17356 {
17357 switch (action) {
17358 case CPU_DYING:
17359 - atomic_inc(&ap_wfs_count);
17360 + atomic_inc_unchecked(&ap_wfs_count);
17361 if (num_online_cpus() == 1)
17362 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17363 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17364 return NOTIFY_BAD;
17365 break;
17366 }
17367 @@ -342,7 +342,7 @@ static __init int tboot_late_init(void)
17368
17369 tboot_create_trampoline();
17370
17371 - atomic_set(&ap_wfs_count, 0);
17372 + atomic_set_unchecked(&ap_wfs_count, 0);
17373 register_hotcpu_notifier(&tboot_cpu_notifier);
17374 return 0;
17375 }
17376 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
17377 index 5a64d05..804587b 100644
17378 --- a/arch/x86/kernel/time.c
17379 +++ b/arch/x86/kernel/time.c
17380 @@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
17381 {
17382 unsigned long pc = instruction_pointer(regs);
17383
17384 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17385 + if (!user_mode(regs) && in_lock_functions(pc)) {
17386 #ifdef CONFIG_FRAME_POINTER
17387 - return *(unsigned long *)(regs->bp + sizeof(long));
17388 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17389 #else
17390 unsigned long *sp =
17391 (unsigned long *)kernel_stack_pointer(regs);
17392 @@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
17393 * or above a saved flags. Eflags has bits 22-31 zero,
17394 * kernel addresses don't.
17395 */
17396 +
17397 +#ifdef CONFIG_PAX_KERNEXEC
17398 + return ktla_ktva(sp[0]);
17399 +#else
17400 if (sp[0] >> 22)
17401 return sp[0];
17402 if (sp[1] >> 22)
17403 return sp[1];
17404 #endif
17405 +
17406 +#endif
17407 }
17408 return pc;
17409 }
17410 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
17411 index 6bb7b85..dd853e1 100644
17412 --- a/arch/x86/kernel/tls.c
17413 +++ b/arch/x86/kernel/tls.c
17414 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
17415 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17416 return -EINVAL;
17417
17418 +#ifdef CONFIG_PAX_SEGMEXEC
17419 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17420 + return -EINVAL;
17421 +#endif
17422 +
17423 set_tls_desc(p, idx, &info, 1);
17424
17425 return 0;
17426 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
17427 index 451c0a7..e57f551 100644
17428 --- a/arch/x86/kernel/trampoline_32.S
17429 +++ b/arch/x86/kernel/trampoline_32.S
17430 @@ -32,6 +32,12 @@
17431 #include <asm/segment.h>
17432 #include <asm/page_types.h>
17433
17434 +#ifdef CONFIG_PAX_KERNEXEC
17435 +#define ta(X) (X)
17436 +#else
17437 +#define ta(X) ((X) - __PAGE_OFFSET)
17438 +#endif
17439 +
17440 #ifdef CONFIG_SMP
17441
17442 .section ".x86_trampoline","a"
17443 @@ -62,7 +68,7 @@ r_base = .
17444 inc %ax # protected mode (PE) bit
17445 lmsw %ax # into protected mode
17446 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17447 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17448 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
17449
17450 # These need to be in the same 64K segment as the above;
17451 # hence we don't use the boot_gdt_descr defined in head.S
17452 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
17453 index 09ff517..df19fbff 100644
17454 --- a/arch/x86/kernel/trampoline_64.S
17455 +++ b/arch/x86/kernel/trampoline_64.S
17456 @@ -90,7 +90,7 @@ startup_32:
17457 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17458 movl %eax, %ds
17459
17460 - movl $X86_CR4_PAE, %eax
17461 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17462 movl %eax, %cr4 # Enable PAE mode
17463
17464 # Setup trampoline 4 level pagetables
17465 @@ -138,7 +138,7 @@ tidt:
17466 # so the kernel can live anywhere
17467 .balign 4
17468 tgdt:
17469 - .short tgdt_end - tgdt # gdt limit
17470 + .short tgdt_end - tgdt - 1 # gdt limit
17471 .long tgdt - r_base
17472 .short 0
17473 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17474 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
17475 index 6913369..7e7dff6 100644
17476 --- a/arch/x86/kernel/traps.c
17477 +++ b/arch/x86/kernel/traps.c
17478 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
17479
17480 /* Do we ignore FPU interrupts ? */
17481 char ignore_fpu_irq;
17482 -
17483 -/*
17484 - * The IDT has to be page-aligned to simplify the Pentium
17485 - * F0 0F bug workaround.
17486 - */
17487 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17488 #endif
17489
17490 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17491 @@ -117,13 +111,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
17492 }
17493
17494 static void __kprobes
17495 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17496 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17497 long error_code, siginfo_t *info)
17498 {
17499 struct task_struct *tsk = current;
17500
17501 #ifdef CONFIG_X86_32
17502 - if (regs->flags & X86_VM_MASK) {
17503 + if (v8086_mode(regs)) {
17504 /*
17505 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17506 * On nmi (interrupt 2), do_trap should not be called.
17507 @@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17508 }
17509 #endif
17510
17511 - if (!user_mode(regs))
17512 + if (!user_mode_novm(regs))
17513 goto kernel_trap;
17514
17515 #ifdef CONFIG_X86_32
17516 @@ -157,7 +151,7 @@ trap_signal:
17517 printk_ratelimit()) {
17518 printk(KERN_INFO
17519 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17520 - tsk->comm, tsk->pid, str,
17521 + tsk->comm, task_pid_nr(tsk), str,
17522 regs->ip, regs->sp, error_code);
17523 print_vma_addr(" in ", regs->ip);
17524 printk("\n");
17525 @@ -174,8 +168,20 @@ kernel_trap:
17526 if (!fixup_exception(regs)) {
17527 tsk->thread.error_code = error_code;
17528 tsk->thread.trap_no = trapnr;
17529 +
17530 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17531 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17532 + str = "PAX: suspicious stack segment fault";
17533 +#endif
17534 +
17535 die(str, regs, error_code);
17536 }
17537 +
17538 +#ifdef CONFIG_PAX_REFCOUNT
17539 + if (trapnr == 4)
17540 + pax_report_refcount_overflow(regs);
17541 +#endif
17542 +
17543 return;
17544
17545 #ifdef CONFIG_X86_32
17546 @@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
17547 conditional_sti(regs);
17548
17549 #ifdef CONFIG_X86_32
17550 - if (regs->flags & X86_VM_MASK)
17551 + if (v8086_mode(regs))
17552 goto gp_in_vm86;
17553 #endif
17554
17555 tsk = current;
17556 - if (!user_mode(regs))
17557 + if (!user_mode_novm(regs))
17558 goto gp_in_kernel;
17559
17560 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17561 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17562 + struct mm_struct *mm = tsk->mm;
17563 + unsigned long limit;
17564 +
17565 + down_write(&mm->mmap_sem);
17566 + limit = mm->context.user_cs_limit;
17567 + if (limit < TASK_SIZE) {
17568 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17569 + up_write(&mm->mmap_sem);
17570 + return;
17571 + }
17572 + up_write(&mm->mmap_sem);
17573 + }
17574 +#endif
17575 +
17576 tsk->thread.error_code = error_code;
17577 tsk->thread.trap_no = 13;
17578
17579 @@ -304,6 +326,13 @@ gp_in_kernel:
17580 if (notify_die(DIE_GPF, "general protection fault", regs,
17581 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17582 return;
17583 +
17584 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17585 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17586 + die("PAX: suspicious general protection fault", regs, error_code);
17587 + else
17588 +#endif
17589 +
17590 die("general protection fault", regs, error_code);
17591 }
17592
17593 @@ -433,6 +462,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
17594 dotraplinkage notrace __kprobes void
17595 do_nmi(struct pt_regs *regs, long error_code)
17596 {
17597 +
17598 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17599 + if (!user_mode(regs)) {
17600 + unsigned long cs = regs->cs & 0xFFFF;
17601 + unsigned long ip = ktva_ktla(regs->ip);
17602 +
17603 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17604 + regs->ip = ip;
17605 + }
17606 +#endif
17607 +
17608 nmi_enter();
17609
17610 inc_irq_stat(__nmi_count);
17611 @@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17612 /* It's safe to allow irq's after DR6 has been saved */
17613 preempt_conditional_sti(regs);
17614
17615 - if (regs->flags & X86_VM_MASK) {
17616 + if (v8086_mode(regs)) {
17617 handle_vm86_trap((struct kernel_vm86_regs *) regs,
17618 error_code, 1);
17619 preempt_conditional_cli(regs);
17620 @@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17621 * We already checked v86 mode above, so we can check for kernel mode
17622 * by just checking the CPL of CS.
17623 */
17624 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
17625 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
17626 tsk->thread.debugreg6 &= ~DR_STEP;
17627 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
17628 regs->flags &= ~X86_EFLAGS_TF;
17629 @@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
17630 return;
17631 conditional_sti(regs);
17632
17633 - if (!user_mode_vm(regs))
17634 + if (!user_mode(regs))
17635 {
17636 if (!fixup_exception(regs)) {
17637 task->thread.error_code = error_code;
17638 @@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
17639 void __math_state_restore(void)
17640 {
17641 struct thread_info *thread = current_thread_info();
17642 - struct task_struct *tsk = thread->task;
17643 + struct task_struct *tsk = current;
17644
17645 /*
17646 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17647 @@ -750,8 +790,7 @@ void __math_state_restore(void)
17648 */
17649 asmlinkage void math_state_restore(void)
17650 {
17651 - struct thread_info *thread = current_thread_info();
17652 - struct task_struct *tsk = thread->task;
17653 + struct task_struct *tsk = current;
17654
17655 if (!tsk_used_math(tsk)) {
17656 local_irq_enable();
17657 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
17658 index b9242ba..50c5edd 100644
17659 --- a/arch/x86/kernel/verify_cpu.S
17660 +++ b/arch/x86/kernel/verify_cpu.S
17661 @@ -20,6 +20,7 @@
17662 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17663 * arch/x86/kernel/trampoline_64.S: secondary processor verification
17664 * arch/x86/kernel/head_32.S: processor startup
17665 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17666 *
17667 * verify_cpu, returns the status of longmode and SSE in register %eax.
17668 * 0: Success 1: Failure
17669 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
17670 index 863f875..4307295 100644
17671 --- a/arch/x86/kernel/vm86_32.c
17672 +++ b/arch/x86/kernel/vm86_32.c
17673 @@ -41,6 +41,7 @@
17674 #include <linux/ptrace.h>
17675 #include <linux/audit.h>
17676 #include <linux/stddef.h>
17677 +#include <linux/grsecurity.h>
17678
17679 #include <asm/uaccess.h>
17680 #include <asm/io.h>
17681 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
17682 do_exit(SIGSEGV);
17683 }
17684
17685 - tss = &per_cpu(init_tss, get_cpu());
17686 + tss = init_tss + get_cpu();
17687 current->thread.sp0 = current->thread.saved_sp0;
17688 current->thread.sysenter_cs = __KERNEL_CS;
17689 load_sp0(tss, &current->thread);
17690 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
17691 struct task_struct *tsk;
17692 int tmp, ret = -EPERM;
17693
17694 +#ifdef CONFIG_GRKERNSEC_VM86
17695 + if (!capable(CAP_SYS_RAWIO)) {
17696 + gr_handle_vm86();
17697 + goto out;
17698 + }
17699 +#endif
17700 +
17701 tsk = current;
17702 if (tsk->thread.saved_sp0)
17703 goto out;
17704 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
17705 int tmp, ret;
17706 struct vm86plus_struct __user *v86;
17707
17708 +#ifdef CONFIG_GRKERNSEC_VM86
17709 + if (!capable(CAP_SYS_RAWIO)) {
17710 + gr_handle_vm86();
17711 + ret = -EPERM;
17712 + goto out;
17713 + }
17714 +#endif
17715 +
17716 tsk = current;
17717 switch (cmd) {
17718 case VM86_REQUEST_IRQ:
17719 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
17720 tsk->thread.saved_fs = info->regs32->fs;
17721 tsk->thread.saved_gs = get_user_gs(info->regs32);
17722
17723 - tss = &per_cpu(init_tss, get_cpu());
17724 + tss = init_tss + get_cpu();
17725 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17726 if (cpu_has_sep)
17727 tsk->thread.sysenter_cs = 0;
17728 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
17729 goto cannot_handle;
17730 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17731 goto cannot_handle;
17732 - intr_ptr = (unsigned long __user *) (i << 2);
17733 + intr_ptr = (__force unsigned long __user *) (i << 2);
17734 if (get_user(segoffs, intr_ptr))
17735 goto cannot_handle;
17736 if ((segoffs >> 16) == BIOSSEG)
17737 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
17738 index 0f703f1..9e15f64 100644
17739 --- a/arch/x86/kernel/vmlinux.lds.S
17740 +++ b/arch/x86/kernel/vmlinux.lds.S
17741 @@ -26,6 +26,13 @@
17742 #include <asm/page_types.h>
17743 #include <asm/cache.h>
17744 #include <asm/boot.h>
17745 +#include <asm/segment.h>
17746 +
17747 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17748 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17749 +#else
17750 +#define __KERNEL_TEXT_OFFSET 0
17751 +#endif
17752
17753 #undef i386 /* in case the preprocessor is a 32bit one */
17754
17755 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
17756
17757 PHDRS {
17758 text PT_LOAD FLAGS(5); /* R_E */
17759 +#ifdef CONFIG_X86_32
17760 + module PT_LOAD FLAGS(5); /* R_E */
17761 +#endif
17762 +#ifdef CONFIG_XEN
17763 + rodata PT_LOAD FLAGS(5); /* R_E */
17764 +#else
17765 + rodata PT_LOAD FLAGS(4); /* R__ */
17766 +#endif
17767 data PT_LOAD FLAGS(6); /* RW_ */
17768 -#ifdef CONFIG_X86_64
17769 + init.begin PT_LOAD FLAGS(6); /* RW_ */
17770 #ifdef CONFIG_SMP
17771 percpu PT_LOAD FLAGS(6); /* RW_ */
17772 #endif
17773 + text.init PT_LOAD FLAGS(5); /* R_E */
17774 + text.exit PT_LOAD FLAGS(5); /* R_E */
17775 init PT_LOAD FLAGS(7); /* RWE */
17776 -#endif
17777 note PT_NOTE FLAGS(0); /* ___ */
17778 }
17779
17780 SECTIONS
17781 {
17782 #ifdef CONFIG_X86_32
17783 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17784 - phys_startup_32 = startup_32 - LOAD_OFFSET;
17785 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17786 #else
17787 - . = __START_KERNEL;
17788 - phys_startup_64 = startup_64 - LOAD_OFFSET;
17789 + . = __START_KERNEL;
17790 #endif
17791
17792 /* Text and read-only data */
17793 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
17794 - _text = .;
17795 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17796 /* bootstrapping code */
17797 +#ifdef CONFIG_X86_32
17798 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17799 +#else
17800 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17801 +#endif
17802 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17803 + _text = .;
17804 HEAD_TEXT
17805 #ifdef CONFIG_X86_32
17806 . = ALIGN(PAGE_SIZE);
17807 @@ -108,13 +128,47 @@ SECTIONS
17808 IRQENTRY_TEXT
17809 *(.fixup)
17810 *(.gnu.warning)
17811 - /* End of text section */
17812 - _etext = .;
17813 } :text = 0x9090
17814
17815 - NOTES :text :note
17816 + . += __KERNEL_TEXT_OFFSET;
17817
17818 - EXCEPTION_TABLE(16) :text = 0x9090
17819 +#ifdef CONFIG_X86_32
17820 + . = ALIGN(PAGE_SIZE);
17821 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17822 +
17823 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17824 + MODULES_EXEC_VADDR = .;
17825 + BYTE(0)
17826 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17827 + . = ALIGN(HPAGE_SIZE);
17828 + MODULES_EXEC_END = . - 1;
17829 +#endif
17830 +
17831 + } :module
17832 +#endif
17833 +
17834 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17835 + /* End of text section */
17836 + _etext = . - __KERNEL_TEXT_OFFSET;
17837 + }
17838 +
17839 +#ifdef CONFIG_X86_32
17840 + . = ALIGN(PAGE_SIZE);
17841 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17842 + *(.idt)
17843 + . = ALIGN(PAGE_SIZE);
17844 + *(.empty_zero_page)
17845 + *(.initial_pg_fixmap)
17846 + *(.initial_pg_pmd)
17847 + *(.initial_page_table)
17848 + *(.swapper_pg_dir)
17849 + } :rodata
17850 +#endif
17851 +
17852 + . = ALIGN(PAGE_SIZE);
17853 + NOTES :rodata :note
17854 +
17855 + EXCEPTION_TABLE(16) :rodata
17856
17857 #if defined(CONFIG_DEBUG_RODATA)
17858 /* .text should occupy whole number of pages */
17859 @@ -126,16 +180,20 @@ SECTIONS
17860
17861 /* Data */
17862 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17863 +
17864 +#ifdef CONFIG_PAX_KERNEXEC
17865 + . = ALIGN(HPAGE_SIZE);
17866 +#else
17867 + . = ALIGN(PAGE_SIZE);
17868 +#endif
17869 +
17870 /* Start of data section */
17871 _sdata = .;
17872
17873 /* init_task */
17874 INIT_TASK_DATA(THREAD_SIZE)
17875
17876 -#ifdef CONFIG_X86_32
17877 - /* 32 bit has nosave before _edata */
17878 NOSAVE_DATA
17879 -#endif
17880
17881 PAGE_ALIGNED_DATA(PAGE_SIZE)
17882
17883 @@ -176,12 +234,19 @@ SECTIONS
17884 #endif /* CONFIG_X86_64 */
17885
17886 /* Init code and data - will be freed after init */
17887 - . = ALIGN(PAGE_SIZE);
17888 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17889 + BYTE(0)
17890 +
17891 +#ifdef CONFIG_PAX_KERNEXEC
17892 + . = ALIGN(HPAGE_SIZE);
17893 +#else
17894 + . = ALIGN(PAGE_SIZE);
17895 +#endif
17896 +
17897 __init_begin = .; /* paired with __init_end */
17898 - }
17899 + } :init.begin
17900
17901 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17902 +#ifdef CONFIG_SMP
17903 /*
17904 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17905 * output PHDR, so the next output section - .init.text - should
17906 @@ -190,12 +255,27 @@ SECTIONS
17907 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
17908 #endif
17909
17910 - INIT_TEXT_SECTION(PAGE_SIZE)
17911 -#ifdef CONFIG_X86_64
17912 - :init
17913 -#endif
17914 + . = ALIGN(PAGE_SIZE);
17915 + init_begin = .;
17916 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17917 + VMLINUX_SYMBOL(_sinittext) = .;
17918 + INIT_TEXT
17919 + VMLINUX_SYMBOL(_einittext) = .;
17920 + . = ALIGN(PAGE_SIZE);
17921 + } :text.init
17922
17923 - INIT_DATA_SECTION(16)
17924 + /*
17925 + * .exit.text is discard at runtime, not link time, to deal with
17926 + * references from .altinstructions and .eh_frame
17927 + */
17928 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17929 + EXIT_TEXT
17930 + . = ALIGN(16);
17931 + } :text.exit
17932 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
17933 +
17934 + . = ALIGN(PAGE_SIZE);
17935 + INIT_DATA_SECTION(16) :init
17936
17937 /*
17938 * Code and data for a variety of lowlevel trampolines, to be
17939 @@ -269,19 +349,12 @@ SECTIONS
17940 }
17941
17942 . = ALIGN(8);
17943 - /*
17944 - * .exit.text is discard at runtime, not link time, to deal with
17945 - * references from .altinstructions and .eh_frame
17946 - */
17947 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
17948 - EXIT_TEXT
17949 - }
17950
17951 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
17952 EXIT_DATA
17953 }
17954
17955 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
17956 +#ifndef CONFIG_SMP
17957 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
17958 #endif
17959
17960 @@ -300,16 +373,10 @@ SECTIONS
17961 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
17962 __smp_locks = .;
17963 *(.smp_locks)
17964 - . = ALIGN(PAGE_SIZE);
17965 __smp_locks_end = .;
17966 + . = ALIGN(PAGE_SIZE);
17967 }
17968
17969 -#ifdef CONFIG_X86_64
17970 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
17971 - NOSAVE_DATA
17972 - }
17973 -#endif
17974 -
17975 /* BSS */
17976 . = ALIGN(PAGE_SIZE);
17977 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
17978 @@ -325,6 +392,7 @@ SECTIONS
17979 __brk_base = .;
17980 . += 64 * 1024; /* 64k alignment slop space */
17981 *(.brk_reservation) /* areas brk users have reserved */
17982 + . = ALIGN(HPAGE_SIZE);
17983 __brk_limit = .;
17984 }
17985
17986 @@ -351,13 +419,12 @@ SECTIONS
17987 * for the boot processor.
17988 */
17989 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
17990 -INIT_PER_CPU(gdt_page);
17991 INIT_PER_CPU(irq_stack_union);
17992
17993 /*
17994 * Build-time check on the image size:
17995 */
17996 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
17997 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
17998 "kernel image bigger than KERNEL_IMAGE_SIZE");
17999
18000 #ifdef CONFIG_SMP
18001 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
18002 index b56c65de..561a55b 100644
18003 --- a/arch/x86/kernel/vsyscall_64.c
18004 +++ b/arch/x86/kernel/vsyscall_64.c
18005 @@ -56,15 +56,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
18006 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
18007 };
18008
18009 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
18010 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
18011
18012 static int __init vsyscall_setup(char *str)
18013 {
18014 if (str) {
18015 if (!strcmp("emulate", str))
18016 vsyscall_mode = EMULATE;
18017 - else if (!strcmp("native", str))
18018 - vsyscall_mode = NATIVE;
18019 else if (!strcmp("none", str))
18020 vsyscall_mode = NONE;
18021 else
18022 @@ -177,7 +175,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18023
18024 tsk = current;
18025 if (seccomp_mode(&tsk->seccomp))
18026 - do_exit(SIGKILL);
18027 + do_group_exit(SIGKILL);
18028
18029 switch (vsyscall_nr) {
18030 case 0:
18031 @@ -219,8 +217,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
18032 return true;
18033
18034 sigsegv:
18035 - force_sig(SIGSEGV, current);
18036 - return true;
18037 + do_group_exit(SIGKILL);
18038 }
18039
18040 /*
18041 @@ -273,10 +270,7 @@ void __init map_vsyscall(void)
18042 extern char __vvar_page;
18043 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
18044
18045 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
18046 - vsyscall_mode == NATIVE
18047 - ? PAGE_KERNEL_VSYSCALL
18048 - : PAGE_KERNEL_VVAR);
18049 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
18050 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
18051 (unsigned long)VSYSCALL_START);
18052
18053 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
18054 index 9796c2f..f686fbf 100644
18055 --- a/arch/x86/kernel/x8664_ksyms_64.c
18056 +++ b/arch/x86/kernel/x8664_ksyms_64.c
18057 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
18058 EXPORT_SYMBOL(copy_user_generic_string);
18059 EXPORT_SYMBOL(copy_user_generic_unrolled);
18060 EXPORT_SYMBOL(__copy_user_nocache);
18061 -EXPORT_SYMBOL(_copy_from_user);
18062 -EXPORT_SYMBOL(_copy_to_user);
18063
18064 EXPORT_SYMBOL(copy_page);
18065 EXPORT_SYMBOL(clear_page);
18066 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
18067 index a391134..d0b63b6e 100644
18068 --- a/arch/x86/kernel/xsave.c
18069 +++ b/arch/x86/kernel/xsave.c
18070 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
18071 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18072 return -EINVAL;
18073
18074 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18075 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18076 fx_sw_user->extended_size -
18077 FP_XSTATE_MAGIC2_SIZE));
18078 if (err)
18079 @@ -267,7 +267,7 @@ fx_only:
18080 * the other extended state.
18081 */
18082 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18083 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18084 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
18085 }
18086
18087 /*
18088 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf)
18089 if (use_xsave())
18090 err = restore_user_xstate(buf);
18091 else
18092 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
18093 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
18094 buf);
18095 if (unlikely(err)) {
18096 /*
18097 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
18098 index 8b4cc5f..f086b5b 100644
18099 --- a/arch/x86/kvm/emulate.c
18100 +++ b/arch/x86/kvm/emulate.c
18101 @@ -96,7 +96,7 @@
18102 #define Src2ImmByte (2<<29)
18103 #define Src2One (3<<29)
18104 #define Src2Imm (4<<29)
18105 -#define Src2Mask (7<<29)
18106 +#define Src2Mask (7U<<29)
18107
18108 #define X2(x...) x, x
18109 #define X3(x...) X2(x), x
18110 @@ -207,6 +207,7 @@ struct gprefix {
18111
18112 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
18113 do { \
18114 + unsigned long _tmp; \
18115 __asm__ __volatile__ ( \
18116 _PRE_EFLAGS("0", "4", "2") \
18117 _op _suffix " %"_x"3,%1; " \
18118 @@ -220,8 +221,6 @@ struct gprefix {
18119 /* Raw emulation: instruction has two explicit operands. */
18120 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
18121 do { \
18122 - unsigned long _tmp; \
18123 - \
18124 switch ((_dst).bytes) { \
18125 case 2: \
18126 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
18127 @@ -237,7 +236,6 @@ struct gprefix {
18128
18129 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18130 do { \
18131 - unsigned long _tmp; \
18132 switch ((_dst).bytes) { \
18133 case 1: \
18134 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
18135 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
18136 index 57dcbd4..79aba9b 100644
18137 --- a/arch/x86/kvm/lapic.c
18138 +++ b/arch/x86/kvm/lapic.c
18139 @@ -53,7 +53,7 @@
18140 #define APIC_BUS_CYCLE_NS 1
18141
18142 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18143 -#define apic_debug(fmt, arg...)
18144 +#define apic_debug(fmt, arg...) do {} while (0)
18145
18146 #define APIC_LVT_NUM 6
18147 /* 14 is the version for Xeon and Pentium 8.4.8*/
18148 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
18149 index 8e8da79..13bc641 100644
18150 --- a/arch/x86/kvm/mmu.c
18151 +++ b/arch/x86/kvm/mmu.c
18152 @@ -3552,7 +3552,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18153
18154 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
18155
18156 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
18157 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
18158
18159 /*
18160 * Assume that the pte write on a page table of the same type
18161 @@ -3584,7 +3584,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18162 }
18163
18164 spin_lock(&vcpu->kvm->mmu_lock);
18165 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18166 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18167 gentry = 0;
18168 kvm_mmu_free_some_pages(vcpu);
18169 ++vcpu->kvm->stat.mmu_pte_write;
18170 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
18171 index 507e2b8..fc55f89 100644
18172 --- a/arch/x86/kvm/paging_tmpl.h
18173 +++ b/arch/x86/kvm/paging_tmpl.h
18174 @@ -197,7 +197,7 @@ retry_walk:
18175 if (unlikely(kvm_is_error_hva(host_addr)))
18176 goto error;
18177
18178 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
18179 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
18180 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
18181 goto error;
18182
18183 @@ -575,6 +575,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
18184 unsigned long mmu_seq;
18185 bool map_writable;
18186
18187 + pax_track_stack();
18188 +
18189 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
18190
18191 if (unlikely(error_code & PFERR_RSVD_MASK))
18192 @@ -701,7 +703,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
18193 if (need_flush)
18194 kvm_flush_remote_tlbs(vcpu->kvm);
18195
18196 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
18197 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
18198
18199 spin_unlock(&vcpu->kvm->mmu_lock);
18200
18201 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
18202 index 475d1c9..33658ff 100644
18203 --- a/arch/x86/kvm/svm.c
18204 +++ b/arch/x86/kvm/svm.c
18205 @@ -3381,7 +3381,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
18206 int cpu = raw_smp_processor_id();
18207
18208 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
18209 +
18210 + pax_open_kernel();
18211 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
18212 + pax_close_kernel();
18213 +
18214 load_TR_desc();
18215 }
18216
18217 @@ -3759,6 +3763,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
18218 #endif
18219 #endif
18220
18221 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18222 + __set_fs(current_thread_info()->addr_limit);
18223 +#endif
18224 +
18225 reload_tss(vcpu);
18226
18227 local_irq_disable();
18228 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
18229 index e65a158..656dc24 100644
18230 --- a/arch/x86/kvm/vmx.c
18231 +++ b/arch/x86/kvm/vmx.c
18232 @@ -1251,7 +1251,11 @@ static void reload_tss(void)
18233 struct desc_struct *descs;
18234
18235 descs = (void *)gdt->address;
18236 +
18237 + pax_open_kernel();
18238 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18239 + pax_close_kernel();
18240 +
18241 load_TR_desc();
18242 }
18243
18244 @@ -2520,8 +2524,11 @@ static __init int hardware_setup(void)
18245 if (!cpu_has_vmx_flexpriority())
18246 flexpriority_enabled = 0;
18247
18248 - if (!cpu_has_vmx_tpr_shadow())
18249 - kvm_x86_ops->update_cr8_intercept = NULL;
18250 + if (!cpu_has_vmx_tpr_shadow()) {
18251 + pax_open_kernel();
18252 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18253 + pax_close_kernel();
18254 + }
18255
18256 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18257 kvm_disable_largepages();
18258 @@ -3535,7 +3542,7 @@ static void vmx_set_constant_host_state(void)
18259 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
18260
18261 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
18262 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
18263 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
18264
18265 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
18266 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
18267 @@ -6021,6 +6028,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18268 "jmp .Lkvm_vmx_return \n\t"
18269 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18270 ".Lkvm_vmx_return: "
18271 +
18272 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18273 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18274 + ".Lkvm_vmx_return2: "
18275 +#endif
18276 +
18277 /* Save guest registers, load host registers, keep flags */
18278 "mov %0, %c[wordsize](%%"R"sp) \n\t"
18279 "pop %0 \n\t"
18280 @@ -6069,6 +6082,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18281 #endif
18282 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
18283 [wordsize]"i"(sizeof(ulong))
18284 +
18285 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18286 + ,[cs]"i"(__KERNEL_CS)
18287 +#endif
18288 +
18289 : "cc", "memory"
18290 , R"ax", R"bx", R"di", R"si"
18291 #ifdef CONFIG_X86_64
18292 @@ -6097,7 +6115,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18293 }
18294 }
18295
18296 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18297 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18298 +
18299 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18300 + loadsegment(fs, __KERNEL_PERCPU);
18301 +#endif
18302 +
18303 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18304 + __set_fs(current_thread_info()->addr_limit);
18305 +#endif
18306 +
18307 vmx->loaded_vmcs->launched = 1;
18308
18309 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
18310 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
18311 index 84a28ea..9326501 100644
18312 --- a/arch/x86/kvm/x86.c
18313 +++ b/arch/x86/kvm/x86.c
18314 @@ -1334,8 +1334,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
18315 {
18316 struct kvm *kvm = vcpu->kvm;
18317 int lm = is_long_mode(vcpu);
18318 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18319 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18320 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18321 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18322 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
18323 : kvm->arch.xen_hvm_config.blob_size_32;
18324 u32 page_num = data & ~PAGE_MASK;
18325 @@ -2137,6 +2137,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
18326 if (n < msr_list.nmsrs)
18327 goto out;
18328 r = -EFAULT;
18329 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
18330 + goto out;
18331 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
18332 num_msrs_to_save * sizeof(u32)))
18333 goto out;
18334 @@ -2312,15 +2314,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18335 struct kvm_cpuid2 *cpuid,
18336 struct kvm_cpuid_entry2 __user *entries)
18337 {
18338 - int r;
18339 + int r, i;
18340
18341 r = -E2BIG;
18342 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18343 goto out;
18344 r = -EFAULT;
18345 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18346 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18347 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18348 goto out;
18349 + for (i = 0; i < cpuid->nent; ++i) {
18350 + struct kvm_cpuid_entry2 cpuid_entry;
18351 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18352 + goto out;
18353 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
18354 + }
18355 vcpu->arch.cpuid_nent = cpuid->nent;
18356 kvm_apic_set_version(vcpu);
18357 kvm_x86_ops->cpuid_update(vcpu);
18358 @@ -2335,15 +2342,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
18359 struct kvm_cpuid2 *cpuid,
18360 struct kvm_cpuid_entry2 __user *entries)
18361 {
18362 - int r;
18363 + int r, i;
18364
18365 r = -E2BIG;
18366 if (cpuid->nent < vcpu->arch.cpuid_nent)
18367 goto out;
18368 r = -EFAULT;
18369 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18370 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18371 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18372 goto out;
18373 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18374 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18375 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18376 + goto out;
18377 + }
18378 return 0;
18379
18380 out:
18381 @@ -2718,7 +2729,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
18382 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18383 struct kvm_interrupt *irq)
18384 {
18385 - if (irq->irq < 0 || irq->irq >= 256)
18386 + if (irq->irq >= 256)
18387 return -EINVAL;
18388 if (irqchip_in_kernel(vcpu->kvm))
18389 return -ENXIO;
18390 @@ -5089,7 +5100,7 @@ static void kvm_set_mmio_spte_mask(void)
18391 kvm_mmu_set_mmio_spte_mask(mask);
18392 }
18393
18394 -int kvm_arch_init(void *opaque)
18395 +int kvm_arch_init(const void *opaque)
18396 {
18397 int r;
18398 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18399 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
18400 index 13ee258..b9632f6 100644
18401 --- a/arch/x86/lguest/boot.c
18402 +++ b/arch/x86/lguest/boot.c
18403 @@ -1184,9 +1184,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
18404 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18405 * Launcher to reboot us.
18406 */
18407 -static void lguest_restart(char *reason)
18408 +static __noreturn void lguest_restart(char *reason)
18409 {
18410 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
18411 + BUG();
18412 }
18413
18414 /*G:050
18415 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
18416 index 042f682..c92afb6 100644
18417 --- a/arch/x86/lib/atomic64_32.c
18418 +++ b/arch/x86/lib/atomic64_32.c
18419 @@ -8,18 +8,30 @@
18420
18421 long long atomic64_read_cx8(long long, const atomic64_t *v);
18422 EXPORT_SYMBOL(atomic64_read_cx8);
18423 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18424 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
18425 long long atomic64_set_cx8(long long, const atomic64_t *v);
18426 EXPORT_SYMBOL(atomic64_set_cx8);
18427 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18428 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
18429 long long atomic64_xchg_cx8(long long, unsigned high);
18430 EXPORT_SYMBOL(atomic64_xchg_cx8);
18431 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
18432 EXPORT_SYMBOL(atomic64_add_return_cx8);
18433 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18434 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
18435 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
18436 EXPORT_SYMBOL(atomic64_sub_return_cx8);
18437 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18438 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
18439 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
18440 EXPORT_SYMBOL(atomic64_inc_return_cx8);
18441 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18442 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
18443 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
18444 EXPORT_SYMBOL(atomic64_dec_return_cx8);
18445 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18446 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
18447 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
18448 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
18449 int atomic64_inc_not_zero_cx8(atomic64_t *v);
18450 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
18451 #ifndef CONFIG_X86_CMPXCHG64
18452 long long atomic64_read_386(long long, const atomic64_t *v);
18453 EXPORT_SYMBOL(atomic64_read_386);
18454 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
18455 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
18456 long long atomic64_set_386(long long, const atomic64_t *v);
18457 EXPORT_SYMBOL(atomic64_set_386);
18458 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
18459 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
18460 long long atomic64_xchg_386(long long, unsigned high);
18461 EXPORT_SYMBOL(atomic64_xchg_386);
18462 long long atomic64_add_return_386(long long a, atomic64_t *v);
18463 EXPORT_SYMBOL(atomic64_add_return_386);
18464 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18465 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
18466 long long atomic64_sub_return_386(long long a, atomic64_t *v);
18467 EXPORT_SYMBOL(atomic64_sub_return_386);
18468 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18469 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
18470 long long atomic64_inc_return_386(long long a, atomic64_t *v);
18471 EXPORT_SYMBOL(atomic64_inc_return_386);
18472 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18473 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
18474 long long atomic64_dec_return_386(long long a, atomic64_t *v);
18475 EXPORT_SYMBOL(atomic64_dec_return_386);
18476 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18477 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
18478 long long atomic64_add_386(long long a, atomic64_t *v);
18479 EXPORT_SYMBOL(atomic64_add_386);
18480 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
18481 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
18482 long long atomic64_sub_386(long long a, atomic64_t *v);
18483 EXPORT_SYMBOL(atomic64_sub_386);
18484 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
18485 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
18486 long long atomic64_inc_386(long long a, atomic64_t *v);
18487 EXPORT_SYMBOL(atomic64_inc_386);
18488 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
18489 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
18490 long long atomic64_dec_386(long long a, atomic64_t *v);
18491 EXPORT_SYMBOL(atomic64_dec_386);
18492 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
18493 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
18494 long long atomic64_dec_if_positive_386(atomic64_t *v);
18495 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
18496 int atomic64_inc_not_zero_386(atomic64_t *v);
18497 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
18498 index e8e7e0d..56fd1b0 100644
18499 --- a/arch/x86/lib/atomic64_386_32.S
18500 +++ b/arch/x86/lib/atomic64_386_32.S
18501 @@ -48,6 +48,10 @@ BEGIN(read)
18502 movl (v), %eax
18503 movl 4(v), %edx
18504 RET_ENDP
18505 +BEGIN(read_unchecked)
18506 + movl (v), %eax
18507 + movl 4(v), %edx
18508 +RET_ENDP
18509 #undef v
18510
18511 #define v %esi
18512 @@ -55,6 +59,10 @@ BEGIN(set)
18513 movl %ebx, (v)
18514 movl %ecx, 4(v)
18515 RET_ENDP
18516 +BEGIN(set_unchecked)
18517 + movl %ebx, (v)
18518 + movl %ecx, 4(v)
18519 +RET_ENDP
18520 #undef v
18521
18522 #define v %esi
18523 @@ -70,6 +78,20 @@ RET_ENDP
18524 BEGIN(add)
18525 addl %eax, (v)
18526 adcl %edx, 4(v)
18527 +
18528 +#ifdef CONFIG_PAX_REFCOUNT
18529 + jno 0f
18530 + subl %eax, (v)
18531 + sbbl %edx, 4(v)
18532 + int $4
18533 +0:
18534 + _ASM_EXTABLE(0b, 0b)
18535 +#endif
18536 +
18537 +RET_ENDP
18538 +BEGIN(add_unchecked)
18539 + addl %eax, (v)
18540 + adcl %edx, 4(v)
18541 RET_ENDP
18542 #undef v
18543
18544 @@ -77,6 +99,24 @@ RET_ENDP
18545 BEGIN(add_return)
18546 addl (v), %eax
18547 adcl 4(v), %edx
18548 +
18549 +#ifdef CONFIG_PAX_REFCOUNT
18550 + into
18551 +1234:
18552 + _ASM_EXTABLE(1234b, 2f)
18553 +#endif
18554 +
18555 + movl %eax, (v)
18556 + movl %edx, 4(v)
18557 +
18558 +#ifdef CONFIG_PAX_REFCOUNT
18559 +2:
18560 +#endif
18561 +
18562 +RET_ENDP
18563 +BEGIN(add_return_unchecked)
18564 + addl (v), %eax
18565 + adcl 4(v), %edx
18566 movl %eax, (v)
18567 movl %edx, 4(v)
18568 RET_ENDP
18569 @@ -86,6 +126,20 @@ RET_ENDP
18570 BEGIN(sub)
18571 subl %eax, (v)
18572 sbbl %edx, 4(v)
18573 +
18574 +#ifdef CONFIG_PAX_REFCOUNT
18575 + jno 0f
18576 + addl %eax, (v)
18577 + adcl %edx, 4(v)
18578 + int $4
18579 +0:
18580 + _ASM_EXTABLE(0b, 0b)
18581 +#endif
18582 +
18583 +RET_ENDP
18584 +BEGIN(sub_unchecked)
18585 + subl %eax, (v)
18586 + sbbl %edx, 4(v)
18587 RET_ENDP
18588 #undef v
18589
18590 @@ -96,6 +150,27 @@ BEGIN(sub_return)
18591 sbbl $0, %edx
18592 addl (v), %eax
18593 adcl 4(v), %edx
18594 +
18595 +#ifdef CONFIG_PAX_REFCOUNT
18596 + into
18597 +1234:
18598 + _ASM_EXTABLE(1234b, 2f)
18599 +#endif
18600 +
18601 + movl %eax, (v)
18602 + movl %edx, 4(v)
18603 +
18604 +#ifdef CONFIG_PAX_REFCOUNT
18605 +2:
18606 +#endif
18607 +
18608 +RET_ENDP
18609 +BEGIN(sub_return_unchecked)
18610 + negl %edx
18611 + negl %eax
18612 + sbbl $0, %edx
18613 + addl (v), %eax
18614 + adcl 4(v), %edx
18615 movl %eax, (v)
18616 movl %edx, 4(v)
18617 RET_ENDP
18618 @@ -105,6 +180,20 @@ RET_ENDP
18619 BEGIN(inc)
18620 addl $1, (v)
18621 adcl $0, 4(v)
18622 +
18623 +#ifdef CONFIG_PAX_REFCOUNT
18624 + jno 0f
18625 + subl $1, (v)
18626 + sbbl $0, 4(v)
18627 + int $4
18628 +0:
18629 + _ASM_EXTABLE(0b, 0b)
18630 +#endif
18631 +
18632 +RET_ENDP
18633 +BEGIN(inc_unchecked)
18634 + addl $1, (v)
18635 + adcl $0, 4(v)
18636 RET_ENDP
18637 #undef v
18638
18639 @@ -114,6 +203,26 @@ BEGIN(inc_return)
18640 movl 4(v), %edx
18641 addl $1, %eax
18642 adcl $0, %edx
18643 +
18644 +#ifdef CONFIG_PAX_REFCOUNT
18645 + into
18646 +1234:
18647 + _ASM_EXTABLE(1234b, 2f)
18648 +#endif
18649 +
18650 + movl %eax, (v)
18651 + movl %edx, 4(v)
18652 +
18653 +#ifdef CONFIG_PAX_REFCOUNT
18654 +2:
18655 +#endif
18656 +
18657 +RET_ENDP
18658 +BEGIN(inc_return_unchecked)
18659 + movl (v), %eax
18660 + movl 4(v), %edx
18661 + addl $1, %eax
18662 + adcl $0, %edx
18663 movl %eax, (v)
18664 movl %edx, 4(v)
18665 RET_ENDP
18666 @@ -123,6 +232,20 @@ RET_ENDP
18667 BEGIN(dec)
18668 subl $1, (v)
18669 sbbl $0, 4(v)
18670 +
18671 +#ifdef CONFIG_PAX_REFCOUNT
18672 + jno 0f
18673 + addl $1, (v)
18674 + adcl $0, 4(v)
18675 + int $4
18676 +0:
18677 + _ASM_EXTABLE(0b, 0b)
18678 +#endif
18679 +
18680 +RET_ENDP
18681 +BEGIN(dec_unchecked)
18682 + subl $1, (v)
18683 + sbbl $0, 4(v)
18684 RET_ENDP
18685 #undef v
18686
18687 @@ -132,6 +255,26 @@ BEGIN(dec_return)
18688 movl 4(v), %edx
18689 subl $1, %eax
18690 sbbl $0, %edx
18691 +
18692 +#ifdef CONFIG_PAX_REFCOUNT
18693 + into
18694 +1234:
18695 + _ASM_EXTABLE(1234b, 2f)
18696 +#endif
18697 +
18698 + movl %eax, (v)
18699 + movl %edx, 4(v)
18700 +
18701 +#ifdef CONFIG_PAX_REFCOUNT
18702 +2:
18703 +#endif
18704 +
18705 +RET_ENDP
18706 +BEGIN(dec_return_unchecked)
18707 + movl (v), %eax
18708 + movl 4(v), %edx
18709 + subl $1, %eax
18710 + sbbl $0, %edx
18711 movl %eax, (v)
18712 movl %edx, 4(v)
18713 RET_ENDP
18714 @@ -143,6 +286,13 @@ BEGIN(add_unless)
18715 adcl %edx, %edi
18716 addl (v), %eax
18717 adcl 4(v), %edx
18718 +
18719 +#ifdef CONFIG_PAX_REFCOUNT
18720 + into
18721 +1234:
18722 + _ASM_EXTABLE(1234b, 2f)
18723 +#endif
18724 +
18725 cmpl %eax, %esi
18726 je 3f
18727 1:
18728 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
18729 1:
18730 addl $1, %eax
18731 adcl $0, %edx
18732 +
18733 +#ifdef CONFIG_PAX_REFCOUNT
18734 + into
18735 +1234:
18736 + _ASM_EXTABLE(1234b, 2f)
18737 +#endif
18738 +
18739 movl %eax, (v)
18740 movl %edx, 4(v)
18741 movl $1, %eax
18742 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
18743 movl 4(v), %edx
18744 subl $1, %eax
18745 sbbl $0, %edx
18746 +
18747 +#ifdef CONFIG_PAX_REFCOUNT
18748 + into
18749 +1234:
18750 + _ASM_EXTABLE(1234b, 1f)
18751 +#endif
18752 +
18753 js 1f
18754 movl %eax, (v)
18755 movl %edx, 4(v)
18756 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
18757 index 391a083..d658e9f 100644
18758 --- a/arch/x86/lib/atomic64_cx8_32.S
18759 +++ b/arch/x86/lib/atomic64_cx8_32.S
18760 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
18761 CFI_STARTPROC
18762
18763 read64 %ecx
18764 + pax_force_retaddr
18765 ret
18766 CFI_ENDPROC
18767 ENDPROC(atomic64_read_cx8)
18768
18769 +ENTRY(atomic64_read_unchecked_cx8)
18770 + CFI_STARTPROC
18771 +
18772 + read64 %ecx
18773 + pax_force_retaddr
18774 + ret
18775 + CFI_ENDPROC
18776 +ENDPROC(atomic64_read_unchecked_cx8)
18777 +
18778 ENTRY(atomic64_set_cx8)
18779 CFI_STARTPROC
18780
18781 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
18782 cmpxchg8b (%esi)
18783 jne 1b
18784
18785 + pax_force_retaddr
18786 ret
18787 CFI_ENDPROC
18788 ENDPROC(atomic64_set_cx8)
18789
18790 +ENTRY(atomic64_set_unchecked_cx8)
18791 + CFI_STARTPROC
18792 +
18793 +1:
18794 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
18795 + * are atomic on 586 and newer */
18796 + cmpxchg8b (%esi)
18797 + jne 1b
18798 +
18799 + pax_force_retaddr
18800 + ret
18801 + CFI_ENDPROC
18802 +ENDPROC(atomic64_set_unchecked_cx8)
18803 +
18804 ENTRY(atomic64_xchg_cx8)
18805 CFI_STARTPROC
18806
18807 @@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
18808 cmpxchg8b (%esi)
18809 jne 1b
18810
18811 + pax_force_retaddr
18812 ret
18813 CFI_ENDPROC
18814 ENDPROC(atomic64_xchg_cx8)
18815
18816 -.macro addsub_return func ins insc
18817 -ENTRY(atomic64_\func\()_return_cx8)
18818 +.macro addsub_return func ins insc unchecked=""
18819 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18820 CFI_STARTPROC
18821 SAVE ebp
18822 SAVE ebx
18823 @@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
18824 movl %edx, %ecx
18825 \ins\()l %esi, %ebx
18826 \insc\()l %edi, %ecx
18827 +
18828 +.ifb \unchecked
18829 +#ifdef CONFIG_PAX_REFCOUNT
18830 + into
18831 +2:
18832 + _ASM_EXTABLE(2b, 3f)
18833 +#endif
18834 +.endif
18835 +
18836 LOCK_PREFIX
18837 cmpxchg8b (%ebp)
18838 jne 1b
18839 -
18840 -10:
18841 movl %ebx, %eax
18842 movl %ecx, %edx
18843 +
18844 +.ifb \unchecked
18845 +#ifdef CONFIG_PAX_REFCOUNT
18846 +3:
18847 +#endif
18848 +.endif
18849 +
18850 RESTORE edi
18851 RESTORE esi
18852 RESTORE ebx
18853 RESTORE ebp
18854 + pax_force_retaddr
18855 ret
18856 CFI_ENDPROC
18857 -ENDPROC(atomic64_\func\()_return_cx8)
18858 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18859 .endm
18860
18861 addsub_return add add adc
18862 addsub_return sub sub sbb
18863 +addsub_return add add adc _unchecked
18864 +addsub_return sub sub sbb _unchecked
18865
18866 -.macro incdec_return func ins insc
18867 -ENTRY(atomic64_\func\()_return_cx8)
18868 +.macro incdec_return func ins insc unchecked
18869 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18870 CFI_STARTPROC
18871 SAVE ebx
18872
18873 @@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
18874 movl %edx, %ecx
18875 \ins\()l $1, %ebx
18876 \insc\()l $0, %ecx
18877 +
18878 +.ifb \unchecked
18879 +#ifdef CONFIG_PAX_REFCOUNT
18880 + into
18881 +2:
18882 + _ASM_EXTABLE(2b, 3f)
18883 +#endif
18884 +.endif
18885 +
18886 LOCK_PREFIX
18887 cmpxchg8b (%esi)
18888 jne 1b
18889
18890 -10:
18891 movl %ebx, %eax
18892 movl %ecx, %edx
18893 +
18894 +.ifb \unchecked
18895 +#ifdef CONFIG_PAX_REFCOUNT
18896 +3:
18897 +#endif
18898 +.endif
18899 +
18900 RESTORE ebx
18901 + pax_force_retaddr
18902 ret
18903 CFI_ENDPROC
18904 -ENDPROC(atomic64_\func\()_return_cx8)
18905 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18906 .endm
18907
18908 incdec_return inc add adc
18909 incdec_return dec sub sbb
18910 +incdec_return inc add adc _unchecked
18911 +incdec_return dec sub sbb _unchecked
18912
18913 ENTRY(atomic64_dec_if_positive_cx8)
18914 CFI_STARTPROC
18915 @@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
18916 movl %edx, %ecx
18917 subl $1, %ebx
18918 sbb $0, %ecx
18919 +
18920 +#ifdef CONFIG_PAX_REFCOUNT
18921 + into
18922 +1234:
18923 + _ASM_EXTABLE(1234b, 2f)
18924 +#endif
18925 +
18926 js 2f
18927 LOCK_PREFIX
18928 cmpxchg8b (%esi)
18929 @@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
18930 movl %ebx, %eax
18931 movl %ecx, %edx
18932 RESTORE ebx
18933 + pax_force_retaddr
18934 ret
18935 CFI_ENDPROC
18936 ENDPROC(atomic64_dec_if_positive_cx8)
18937 @@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
18938 movl %edx, %ecx
18939 addl %esi, %ebx
18940 adcl %edi, %ecx
18941 +
18942 +#ifdef CONFIG_PAX_REFCOUNT
18943 + into
18944 +1234:
18945 + _ASM_EXTABLE(1234b, 3f)
18946 +#endif
18947 +
18948 LOCK_PREFIX
18949 cmpxchg8b (%ebp)
18950 jne 1b
18951 @@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
18952 CFI_ADJUST_CFA_OFFSET -8
18953 RESTORE ebx
18954 RESTORE ebp
18955 + pax_force_retaddr
18956 ret
18957 4:
18958 cmpl %edx, 4(%esp)
18959 @@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
18960 movl %edx, %ecx
18961 addl $1, %ebx
18962 adcl $0, %ecx
18963 +
18964 +#ifdef CONFIG_PAX_REFCOUNT
18965 + into
18966 +1234:
18967 + _ASM_EXTABLE(1234b, 3f)
18968 +#endif
18969 +
18970 LOCK_PREFIX
18971 cmpxchg8b (%esi)
18972 jne 1b
18973 @@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
18974 movl $1, %eax
18975 3:
18976 RESTORE ebx
18977 + pax_force_retaddr
18978 ret
18979 4:
18980 testl %edx, %edx
18981 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
18982 index 78d16a5..fbcf666 100644
18983 --- a/arch/x86/lib/checksum_32.S
18984 +++ b/arch/x86/lib/checksum_32.S
18985 @@ -28,7 +28,8 @@
18986 #include <linux/linkage.h>
18987 #include <asm/dwarf2.h>
18988 #include <asm/errno.h>
18989 -
18990 +#include <asm/segment.h>
18991 +
18992 /*
18993 * computes a partial checksum, e.g. for TCP/UDP fragments
18994 */
18995 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
18996
18997 #define ARGBASE 16
18998 #define FP 12
18999 -
19000 -ENTRY(csum_partial_copy_generic)
19001 +
19002 +ENTRY(csum_partial_copy_generic_to_user)
19003 CFI_STARTPROC
19004 +
19005 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19006 + pushl_cfi %gs
19007 + popl_cfi %es
19008 + jmp csum_partial_copy_generic
19009 +#endif
19010 +
19011 +ENTRY(csum_partial_copy_generic_from_user)
19012 +
19013 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19014 + pushl_cfi %gs
19015 + popl_cfi %ds
19016 +#endif
19017 +
19018 +ENTRY(csum_partial_copy_generic)
19019 subl $4,%esp
19020 CFI_ADJUST_CFA_OFFSET 4
19021 pushl_cfi %edi
19022 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
19023 jmp 4f
19024 SRC(1: movw (%esi), %bx )
19025 addl $2, %esi
19026 -DST( movw %bx, (%edi) )
19027 +DST( movw %bx, %es:(%edi) )
19028 addl $2, %edi
19029 addw %bx, %ax
19030 adcl $0, %eax
19031 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
19032 SRC(1: movl (%esi), %ebx )
19033 SRC( movl 4(%esi), %edx )
19034 adcl %ebx, %eax
19035 -DST( movl %ebx, (%edi) )
19036 +DST( movl %ebx, %es:(%edi) )
19037 adcl %edx, %eax
19038 -DST( movl %edx, 4(%edi) )
19039 +DST( movl %edx, %es:4(%edi) )
19040
19041 SRC( movl 8(%esi), %ebx )
19042 SRC( movl 12(%esi), %edx )
19043 adcl %ebx, %eax
19044 -DST( movl %ebx, 8(%edi) )
19045 +DST( movl %ebx, %es:8(%edi) )
19046 adcl %edx, %eax
19047 -DST( movl %edx, 12(%edi) )
19048 +DST( movl %edx, %es:12(%edi) )
19049
19050 SRC( movl 16(%esi), %ebx )
19051 SRC( movl 20(%esi), %edx )
19052 adcl %ebx, %eax
19053 -DST( movl %ebx, 16(%edi) )
19054 +DST( movl %ebx, %es:16(%edi) )
19055 adcl %edx, %eax
19056 -DST( movl %edx, 20(%edi) )
19057 +DST( movl %edx, %es:20(%edi) )
19058
19059 SRC( movl 24(%esi), %ebx )
19060 SRC( movl 28(%esi), %edx )
19061 adcl %ebx, %eax
19062 -DST( movl %ebx, 24(%edi) )
19063 +DST( movl %ebx, %es:24(%edi) )
19064 adcl %edx, %eax
19065 -DST( movl %edx, 28(%edi) )
19066 +DST( movl %edx, %es:28(%edi) )
19067
19068 lea 32(%esi), %esi
19069 lea 32(%edi), %edi
19070 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
19071 shrl $2, %edx # This clears CF
19072 SRC(3: movl (%esi), %ebx )
19073 adcl %ebx, %eax
19074 -DST( movl %ebx, (%edi) )
19075 +DST( movl %ebx, %es:(%edi) )
19076 lea 4(%esi), %esi
19077 lea 4(%edi), %edi
19078 dec %edx
19079 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
19080 jb 5f
19081 SRC( movw (%esi), %cx )
19082 leal 2(%esi), %esi
19083 -DST( movw %cx, (%edi) )
19084 +DST( movw %cx, %es:(%edi) )
19085 leal 2(%edi), %edi
19086 je 6f
19087 shll $16,%ecx
19088 SRC(5: movb (%esi), %cl )
19089 -DST( movb %cl, (%edi) )
19090 +DST( movb %cl, %es:(%edi) )
19091 6: addl %ecx, %eax
19092 adcl $0, %eax
19093 7:
19094 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
19095
19096 6001:
19097 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19098 - movl $-EFAULT, (%ebx)
19099 + movl $-EFAULT, %ss:(%ebx)
19100
19101 # zero the complete destination - computing the rest
19102 # is too much work
19103 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
19104
19105 6002:
19106 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19107 - movl $-EFAULT,(%ebx)
19108 + movl $-EFAULT,%ss:(%ebx)
19109 jmp 5000b
19110
19111 .previous
19112
19113 + pushl_cfi %ss
19114 + popl_cfi %ds
19115 + pushl_cfi %ss
19116 + popl_cfi %es
19117 popl_cfi %ebx
19118 CFI_RESTORE ebx
19119 popl_cfi %esi
19120 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
19121 popl_cfi %ecx # equivalent to addl $4,%esp
19122 ret
19123 CFI_ENDPROC
19124 -ENDPROC(csum_partial_copy_generic)
19125 +ENDPROC(csum_partial_copy_generic_to_user)
19126
19127 #else
19128
19129 /* Version for PentiumII/PPro */
19130
19131 #define ROUND1(x) \
19132 + nop; nop; nop; \
19133 SRC(movl x(%esi), %ebx ) ; \
19134 addl %ebx, %eax ; \
19135 - DST(movl %ebx, x(%edi) ) ;
19136 + DST(movl %ebx, %es:x(%edi)) ;
19137
19138 #define ROUND(x) \
19139 + nop; nop; nop; \
19140 SRC(movl x(%esi), %ebx ) ; \
19141 adcl %ebx, %eax ; \
19142 - DST(movl %ebx, x(%edi) ) ;
19143 + DST(movl %ebx, %es:x(%edi)) ;
19144
19145 #define ARGBASE 12
19146 -
19147 -ENTRY(csum_partial_copy_generic)
19148 +
19149 +ENTRY(csum_partial_copy_generic_to_user)
19150 CFI_STARTPROC
19151 +
19152 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19153 + pushl_cfi %gs
19154 + popl_cfi %es
19155 + jmp csum_partial_copy_generic
19156 +#endif
19157 +
19158 +ENTRY(csum_partial_copy_generic_from_user)
19159 +
19160 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19161 + pushl_cfi %gs
19162 + popl_cfi %ds
19163 +#endif
19164 +
19165 +ENTRY(csum_partial_copy_generic)
19166 pushl_cfi %ebx
19167 CFI_REL_OFFSET ebx, 0
19168 pushl_cfi %edi
19169 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
19170 subl %ebx, %edi
19171 lea -1(%esi),%edx
19172 andl $-32,%edx
19173 - lea 3f(%ebx,%ebx), %ebx
19174 + lea 3f(%ebx,%ebx,2), %ebx
19175 testl %esi, %esi
19176 jmp *%ebx
19177 1: addl $64,%esi
19178 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
19179 jb 5f
19180 SRC( movw (%esi), %dx )
19181 leal 2(%esi), %esi
19182 -DST( movw %dx, (%edi) )
19183 +DST( movw %dx, %es:(%edi) )
19184 leal 2(%edi), %edi
19185 je 6f
19186 shll $16,%edx
19187 5:
19188 SRC( movb (%esi), %dl )
19189 -DST( movb %dl, (%edi) )
19190 +DST( movb %dl, %es:(%edi) )
19191 6: addl %edx, %eax
19192 adcl $0, %eax
19193 7:
19194 .section .fixup, "ax"
19195 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19196 - movl $-EFAULT, (%ebx)
19197 + movl $-EFAULT, %ss:(%ebx)
19198 # zero the complete destination (computing the rest is too much work)
19199 movl ARGBASE+8(%esp),%edi # dst
19200 movl ARGBASE+12(%esp),%ecx # len
19201 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
19202 rep; stosb
19203 jmp 7b
19204 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19205 - movl $-EFAULT, (%ebx)
19206 + movl $-EFAULT, %ss:(%ebx)
19207 jmp 7b
19208 .previous
19209
19210 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19211 + pushl_cfi %ss
19212 + popl_cfi %ds
19213 + pushl_cfi %ss
19214 + popl_cfi %es
19215 +#endif
19216 +
19217 popl_cfi %esi
19218 CFI_RESTORE esi
19219 popl_cfi %edi
19220 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
19221 CFI_RESTORE ebx
19222 ret
19223 CFI_ENDPROC
19224 -ENDPROC(csum_partial_copy_generic)
19225 +ENDPROC(csum_partial_copy_generic_to_user)
19226
19227 #undef ROUND
19228 #undef ROUND1
19229 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
19230 index f2145cf..cea889d 100644
19231 --- a/arch/x86/lib/clear_page_64.S
19232 +++ b/arch/x86/lib/clear_page_64.S
19233 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
19234 movl $4096/8,%ecx
19235 xorl %eax,%eax
19236 rep stosq
19237 + pax_force_retaddr
19238 ret
19239 CFI_ENDPROC
19240 ENDPROC(clear_page_c)
19241 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
19242 movl $4096,%ecx
19243 xorl %eax,%eax
19244 rep stosb
19245 + pax_force_retaddr
19246 ret
19247 CFI_ENDPROC
19248 ENDPROC(clear_page_c_e)
19249 @@ -43,6 +45,7 @@ ENTRY(clear_page)
19250 leaq 64(%rdi),%rdi
19251 jnz .Lloop
19252 nop
19253 + pax_force_retaddr
19254 ret
19255 CFI_ENDPROC
19256 .Lclear_page_end:
19257 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
19258
19259 #include <asm/cpufeature.h>
19260
19261 - .section .altinstr_replacement,"ax"
19262 + .section .altinstr_replacement,"a"
19263 1: .byte 0xeb /* jmp <disp8> */
19264 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19265 2: .byte 0xeb /* jmp <disp8> */
19266 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
19267 index 1e572c5..2a162cd 100644
19268 --- a/arch/x86/lib/cmpxchg16b_emu.S
19269 +++ b/arch/x86/lib/cmpxchg16b_emu.S
19270 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
19271
19272 popf
19273 mov $1, %al
19274 + pax_force_retaddr
19275 ret
19276
19277 not_same:
19278 popf
19279 xor %al,%al
19280 + pax_force_retaddr
19281 ret
19282
19283 CFI_ENDPROC
19284 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
19285 index 01c805b..dccb07f 100644
19286 --- a/arch/x86/lib/copy_page_64.S
19287 +++ b/arch/x86/lib/copy_page_64.S
19288 @@ -9,6 +9,7 @@ copy_page_c:
19289 CFI_STARTPROC
19290 movl $4096/8,%ecx
19291 rep movsq
19292 + pax_force_retaddr
19293 ret
19294 CFI_ENDPROC
19295 ENDPROC(copy_page_c)
19296 @@ -39,7 +40,7 @@ ENTRY(copy_page)
19297 movq 16 (%rsi), %rdx
19298 movq 24 (%rsi), %r8
19299 movq 32 (%rsi), %r9
19300 - movq 40 (%rsi), %r10
19301 + movq 40 (%rsi), %r13
19302 movq 48 (%rsi), %r11
19303 movq 56 (%rsi), %r12
19304
19305 @@ -50,7 +51,7 @@ ENTRY(copy_page)
19306 movq %rdx, 16 (%rdi)
19307 movq %r8, 24 (%rdi)
19308 movq %r9, 32 (%rdi)
19309 - movq %r10, 40 (%rdi)
19310 + movq %r13, 40 (%rdi)
19311 movq %r11, 48 (%rdi)
19312 movq %r12, 56 (%rdi)
19313
19314 @@ -69,7 +70,7 @@ ENTRY(copy_page)
19315 movq 16 (%rsi), %rdx
19316 movq 24 (%rsi), %r8
19317 movq 32 (%rsi), %r9
19318 - movq 40 (%rsi), %r10
19319 + movq 40 (%rsi), %r13
19320 movq 48 (%rsi), %r11
19321 movq 56 (%rsi), %r12
19322
19323 @@ -78,7 +79,7 @@ ENTRY(copy_page)
19324 movq %rdx, 16 (%rdi)
19325 movq %r8, 24 (%rdi)
19326 movq %r9, 32 (%rdi)
19327 - movq %r10, 40 (%rdi)
19328 + movq %r13, 40 (%rdi)
19329 movq %r11, 48 (%rdi)
19330 movq %r12, 56 (%rdi)
19331
19332 @@ -95,6 +96,7 @@ ENTRY(copy_page)
19333 CFI_RESTORE r13
19334 addq $3*8,%rsp
19335 CFI_ADJUST_CFA_OFFSET -3*8
19336 + pax_force_retaddr
19337 ret
19338 .Lcopy_page_end:
19339 CFI_ENDPROC
19340 @@ -105,7 +107,7 @@ ENDPROC(copy_page)
19341
19342 #include <asm/cpufeature.h>
19343
19344 - .section .altinstr_replacement,"ax"
19345 + .section .altinstr_replacement,"a"
19346 1: .byte 0xeb /* jmp <disp8> */
19347 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19348 2:
19349 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
19350 index 0248402..821c786 100644
19351 --- a/arch/x86/lib/copy_user_64.S
19352 +++ b/arch/x86/lib/copy_user_64.S
19353 @@ -16,6 +16,7 @@
19354 #include <asm/thread_info.h>
19355 #include <asm/cpufeature.h>
19356 #include <asm/alternative-asm.h>
19357 +#include <asm/pgtable.h>
19358
19359 /*
19360 * By placing feature2 after feature1 in altinstructions section, we logically
19361 @@ -29,7 +30,7 @@
19362 .byte 0xe9 /* 32bit jump */
19363 .long \orig-1f /* by default jump to orig */
19364 1:
19365 - .section .altinstr_replacement,"ax"
19366 + .section .altinstr_replacement,"a"
19367 2: .byte 0xe9 /* near jump with 32bit immediate */
19368 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
19369 3: .byte 0xe9 /* near jump with 32bit immediate */
19370 @@ -71,47 +72,20 @@
19371 #endif
19372 .endm
19373
19374 -/* Standard copy_to_user with segment limit checking */
19375 -ENTRY(_copy_to_user)
19376 - CFI_STARTPROC
19377 - GET_THREAD_INFO(%rax)
19378 - movq %rdi,%rcx
19379 - addq %rdx,%rcx
19380 - jc bad_to_user
19381 - cmpq TI_addr_limit(%rax),%rcx
19382 - ja bad_to_user
19383 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19384 - copy_user_generic_unrolled,copy_user_generic_string, \
19385 - copy_user_enhanced_fast_string
19386 - CFI_ENDPROC
19387 -ENDPROC(_copy_to_user)
19388 -
19389 -/* Standard copy_from_user with segment limit checking */
19390 -ENTRY(_copy_from_user)
19391 - CFI_STARTPROC
19392 - GET_THREAD_INFO(%rax)
19393 - movq %rsi,%rcx
19394 - addq %rdx,%rcx
19395 - jc bad_from_user
19396 - cmpq TI_addr_limit(%rax),%rcx
19397 - ja bad_from_user
19398 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19399 - copy_user_generic_unrolled,copy_user_generic_string, \
19400 - copy_user_enhanced_fast_string
19401 - CFI_ENDPROC
19402 -ENDPROC(_copy_from_user)
19403 -
19404 .section .fixup,"ax"
19405 /* must zero dest */
19406 ENTRY(bad_from_user)
19407 bad_from_user:
19408 CFI_STARTPROC
19409 + testl %edx,%edx
19410 + js bad_to_user
19411 movl %edx,%ecx
19412 xorl %eax,%eax
19413 rep
19414 stosb
19415 bad_to_user:
19416 movl %edx,%eax
19417 + pax_force_retaddr
19418 ret
19419 CFI_ENDPROC
19420 ENDPROC(bad_from_user)
19421 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
19422 jz 17f
19423 1: movq (%rsi),%r8
19424 2: movq 1*8(%rsi),%r9
19425 -3: movq 2*8(%rsi),%r10
19426 +3: movq 2*8(%rsi),%rax
19427 4: movq 3*8(%rsi),%r11
19428 5: movq %r8,(%rdi)
19429 6: movq %r9,1*8(%rdi)
19430 -7: movq %r10,2*8(%rdi)
19431 +7: movq %rax,2*8(%rdi)
19432 8: movq %r11,3*8(%rdi)
19433 9: movq 4*8(%rsi),%r8
19434 10: movq 5*8(%rsi),%r9
19435 -11: movq 6*8(%rsi),%r10
19436 +11: movq 6*8(%rsi),%rax
19437 12: movq 7*8(%rsi),%r11
19438 13: movq %r8,4*8(%rdi)
19439 14: movq %r9,5*8(%rdi)
19440 -15: movq %r10,6*8(%rdi)
19441 +15: movq %rax,6*8(%rdi)
19442 16: movq %r11,7*8(%rdi)
19443 leaq 64(%rsi),%rsi
19444 leaq 64(%rdi),%rdi
19445 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
19446 decl %ecx
19447 jnz 21b
19448 23: xor %eax,%eax
19449 + pax_force_retaddr
19450 ret
19451
19452 .section .fixup,"ax"
19453 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
19454 3: rep
19455 movsb
19456 4: xorl %eax,%eax
19457 + pax_force_retaddr
19458 ret
19459
19460 .section .fixup,"ax"
19461 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
19462 1: rep
19463 movsb
19464 2: xorl %eax,%eax
19465 + pax_force_retaddr
19466 ret
19467
19468 .section .fixup,"ax"
19469 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
19470 index cb0c112..e3a6895 100644
19471 --- a/arch/x86/lib/copy_user_nocache_64.S
19472 +++ b/arch/x86/lib/copy_user_nocache_64.S
19473 @@ -8,12 +8,14 @@
19474
19475 #include <linux/linkage.h>
19476 #include <asm/dwarf2.h>
19477 +#include <asm/alternative-asm.h>
19478
19479 #define FIX_ALIGNMENT 1
19480
19481 #include <asm/current.h>
19482 #include <asm/asm-offsets.h>
19483 #include <asm/thread_info.h>
19484 +#include <asm/pgtable.h>
19485
19486 .macro ALIGN_DESTINATION
19487 #ifdef FIX_ALIGNMENT
19488 @@ -50,6 +52,15 @@
19489 */
19490 ENTRY(__copy_user_nocache)
19491 CFI_STARTPROC
19492 +
19493 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19494 + mov $PAX_USER_SHADOW_BASE,%rcx
19495 + cmp %rcx,%rsi
19496 + jae 1f
19497 + add %rcx,%rsi
19498 +1:
19499 +#endif
19500 +
19501 cmpl $8,%edx
19502 jb 20f /* less then 8 bytes, go to byte copy loop */
19503 ALIGN_DESTINATION
19504 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
19505 jz 17f
19506 1: movq (%rsi),%r8
19507 2: movq 1*8(%rsi),%r9
19508 -3: movq 2*8(%rsi),%r10
19509 +3: movq 2*8(%rsi),%rax
19510 4: movq 3*8(%rsi),%r11
19511 5: movnti %r8,(%rdi)
19512 6: movnti %r9,1*8(%rdi)
19513 -7: movnti %r10,2*8(%rdi)
19514 +7: movnti %rax,2*8(%rdi)
19515 8: movnti %r11,3*8(%rdi)
19516 9: movq 4*8(%rsi),%r8
19517 10: movq 5*8(%rsi),%r9
19518 -11: movq 6*8(%rsi),%r10
19519 +11: movq 6*8(%rsi),%rax
19520 12: movq 7*8(%rsi),%r11
19521 13: movnti %r8,4*8(%rdi)
19522 14: movnti %r9,5*8(%rdi)
19523 -15: movnti %r10,6*8(%rdi)
19524 +15: movnti %rax,6*8(%rdi)
19525 16: movnti %r11,7*8(%rdi)
19526 leaq 64(%rsi),%rsi
19527 leaq 64(%rdi),%rdi
19528 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
19529 jnz 21b
19530 23: xorl %eax,%eax
19531 sfence
19532 + pax_force_retaddr
19533 ret
19534
19535 .section .fixup,"ax"
19536 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
19537 index fb903b7..c92b7f7 100644
19538 --- a/arch/x86/lib/csum-copy_64.S
19539 +++ b/arch/x86/lib/csum-copy_64.S
19540 @@ -8,6 +8,7 @@
19541 #include <linux/linkage.h>
19542 #include <asm/dwarf2.h>
19543 #include <asm/errno.h>
19544 +#include <asm/alternative-asm.h>
19545
19546 /*
19547 * Checksum copy with exception handling.
19548 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
19549 CFI_RESTORE rbp
19550 addq $7*8, %rsp
19551 CFI_ADJUST_CFA_OFFSET -7*8
19552 + pax_force_retaddr 0, 1
19553 ret
19554 CFI_RESTORE_STATE
19555
19556 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
19557 index 459b58a..9570bc7 100644
19558 --- a/arch/x86/lib/csum-wrappers_64.c
19559 +++ b/arch/x86/lib/csum-wrappers_64.c
19560 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
19561 len -= 2;
19562 }
19563 }
19564 - isum = csum_partial_copy_generic((__force const void *)src,
19565 +
19566 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19567 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19568 + src += PAX_USER_SHADOW_BASE;
19569 +#endif
19570 +
19571 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
19572 dst, len, isum, errp, NULL);
19573 if (unlikely(*errp))
19574 goto out_err;
19575 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
19576 }
19577
19578 *errp = 0;
19579 - return csum_partial_copy_generic(src, (void __force *)dst,
19580 +
19581 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19582 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19583 + dst += PAX_USER_SHADOW_BASE;
19584 +#endif
19585 +
19586 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
19587 len, isum, NULL, errp);
19588 }
19589 EXPORT_SYMBOL(csum_partial_copy_to_user);
19590 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
19591 index 51f1504..ddac4c1 100644
19592 --- a/arch/x86/lib/getuser.S
19593 +++ b/arch/x86/lib/getuser.S
19594 @@ -33,15 +33,38 @@
19595 #include <asm/asm-offsets.h>
19596 #include <asm/thread_info.h>
19597 #include <asm/asm.h>
19598 +#include <asm/segment.h>
19599 +#include <asm/pgtable.h>
19600 +#include <asm/alternative-asm.h>
19601 +
19602 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19603 +#define __copyuser_seg gs;
19604 +#else
19605 +#define __copyuser_seg
19606 +#endif
19607
19608 .text
19609 ENTRY(__get_user_1)
19610 CFI_STARTPROC
19611 +
19612 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19613 GET_THREAD_INFO(%_ASM_DX)
19614 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19615 jae bad_get_user
19616 -1: movzb (%_ASM_AX),%edx
19617 +
19618 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19619 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19620 + cmp %_ASM_DX,%_ASM_AX
19621 + jae 1234f
19622 + add %_ASM_DX,%_ASM_AX
19623 +1234:
19624 +#endif
19625 +
19626 +#endif
19627 +
19628 +1: __copyuser_seg movzb (%_ASM_AX),%edx
19629 xor %eax,%eax
19630 + pax_force_retaddr
19631 ret
19632 CFI_ENDPROC
19633 ENDPROC(__get_user_1)
19634 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
19635 ENTRY(__get_user_2)
19636 CFI_STARTPROC
19637 add $1,%_ASM_AX
19638 +
19639 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19640 jc bad_get_user
19641 GET_THREAD_INFO(%_ASM_DX)
19642 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19643 jae bad_get_user
19644 -2: movzwl -1(%_ASM_AX),%edx
19645 +
19646 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19647 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19648 + cmp %_ASM_DX,%_ASM_AX
19649 + jae 1234f
19650 + add %_ASM_DX,%_ASM_AX
19651 +1234:
19652 +#endif
19653 +
19654 +#endif
19655 +
19656 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19657 xor %eax,%eax
19658 + pax_force_retaddr
19659 ret
19660 CFI_ENDPROC
19661 ENDPROC(__get_user_2)
19662 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
19663 ENTRY(__get_user_4)
19664 CFI_STARTPROC
19665 add $3,%_ASM_AX
19666 +
19667 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19668 jc bad_get_user
19669 GET_THREAD_INFO(%_ASM_DX)
19670 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19671 jae bad_get_user
19672 -3: mov -3(%_ASM_AX),%edx
19673 +
19674 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19675 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19676 + cmp %_ASM_DX,%_ASM_AX
19677 + jae 1234f
19678 + add %_ASM_DX,%_ASM_AX
19679 +1234:
19680 +#endif
19681 +
19682 +#endif
19683 +
19684 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
19685 xor %eax,%eax
19686 + pax_force_retaddr
19687 ret
19688 CFI_ENDPROC
19689 ENDPROC(__get_user_4)
19690 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
19691 GET_THREAD_INFO(%_ASM_DX)
19692 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19693 jae bad_get_user
19694 +
19695 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19696 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19697 + cmp %_ASM_DX,%_ASM_AX
19698 + jae 1234f
19699 + add %_ASM_DX,%_ASM_AX
19700 +1234:
19701 +#endif
19702 +
19703 4: movq -7(%_ASM_AX),%_ASM_DX
19704 xor %eax,%eax
19705 + pax_force_retaddr
19706 ret
19707 CFI_ENDPROC
19708 ENDPROC(__get_user_8)
19709 @@ -91,6 +152,7 @@ bad_get_user:
19710 CFI_STARTPROC
19711 xor %edx,%edx
19712 mov $(-EFAULT),%_ASM_AX
19713 + pax_force_retaddr
19714 ret
19715 CFI_ENDPROC
19716 END(bad_get_user)
19717 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
19718 index 9f33b98..dfc7678 100644
19719 --- a/arch/x86/lib/insn.c
19720 +++ b/arch/x86/lib/insn.c
19721 @@ -21,6 +21,11 @@
19722 #include <linux/string.h>
19723 #include <asm/inat.h>
19724 #include <asm/insn.h>
19725 +#ifdef __KERNEL__
19726 +#include <asm/pgtable_types.h>
19727 +#else
19728 +#define ktla_ktva(addr) addr
19729 +#endif
19730
19731 #define get_next(t, insn) \
19732 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
19733 @@ -40,8 +45,8 @@
19734 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
19735 {
19736 memset(insn, 0, sizeof(*insn));
19737 - insn->kaddr = kaddr;
19738 - insn->next_byte = kaddr;
19739 + insn->kaddr = ktla_ktva(kaddr);
19740 + insn->next_byte = ktla_ktva(kaddr);
19741 insn->x86_64 = x86_64 ? 1 : 0;
19742 insn->opnd_bytes = 4;
19743 if (x86_64)
19744 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
19745 index 05a95e7..326f2fa 100644
19746 --- a/arch/x86/lib/iomap_copy_64.S
19747 +++ b/arch/x86/lib/iomap_copy_64.S
19748 @@ -17,6 +17,7 @@
19749
19750 #include <linux/linkage.h>
19751 #include <asm/dwarf2.h>
19752 +#include <asm/alternative-asm.h>
19753
19754 /*
19755 * override generic version in lib/iomap_copy.c
19756 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
19757 CFI_STARTPROC
19758 movl %edx,%ecx
19759 rep movsd
19760 + pax_force_retaddr
19761 ret
19762 CFI_ENDPROC
19763 ENDPROC(__iowrite32_copy)
19764 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
19765 index efbf2a0..8893637 100644
19766 --- a/arch/x86/lib/memcpy_64.S
19767 +++ b/arch/x86/lib/memcpy_64.S
19768 @@ -34,6 +34,7 @@
19769 rep movsq
19770 movl %edx, %ecx
19771 rep movsb
19772 + pax_force_retaddr
19773 ret
19774 .Lmemcpy_e:
19775 .previous
19776 @@ -51,6 +52,7 @@
19777
19778 movl %edx, %ecx
19779 rep movsb
19780 + pax_force_retaddr
19781 ret
19782 .Lmemcpy_e_e:
19783 .previous
19784 @@ -81,13 +83,13 @@ ENTRY(memcpy)
19785 */
19786 movq 0*8(%rsi), %r8
19787 movq 1*8(%rsi), %r9
19788 - movq 2*8(%rsi), %r10
19789 + movq 2*8(%rsi), %rcx
19790 movq 3*8(%rsi), %r11
19791 leaq 4*8(%rsi), %rsi
19792
19793 movq %r8, 0*8(%rdi)
19794 movq %r9, 1*8(%rdi)
19795 - movq %r10, 2*8(%rdi)
19796 + movq %rcx, 2*8(%rdi)
19797 movq %r11, 3*8(%rdi)
19798 leaq 4*8(%rdi), %rdi
19799 jae .Lcopy_forward_loop
19800 @@ -110,12 +112,12 @@ ENTRY(memcpy)
19801 subq $0x20, %rdx
19802 movq -1*8(%rsi), %r8
19803 movq -2*8(%rsi), %r9
19804 - movq -3*8(%rsi), %r10
19805 + movq -3*8(%rsi), %rcx
19806 movq -4*8(%rsi), %r11
19807 leaq -4*8(%rsi), %rsi
19808 movq %r8, -1*8(%rdi)
19809 movq %r9, -2*8(%rdi)
19810 - movq %r10, -3*8(%rdi)
19811 + movq %rcx, -3*8(%rdi)
19812 movq %r11, -4*8(%rdi)
19813 leaq -4*8(%rdi), %rdi
19814 jae .Lcopy_backward_loop
19815 @@ -135,12 +137,13 @@ ENTRY(memcpy)
19816 */
19817 movq 0*8(%rsi), %r8
19818 movq 1*8(%rsi), %r9
19819 - movq -2*8(%rsi, %rdx), %r10
19820 + movq -2*8(%rsi, %rdx), %rcx
19821 movq -1*8(%rsi, %rdx), %r11
19822 movq %r8, 0*8(%rdi)
19823 movq %r9, 1*8(%rdi)
19824 - movq %r10, -2*8(%rdi, %rdx)
19825 + movq %rcx, -2*8(%rdi, %rdx)
19826 movq %r11, -1*8(%rdi, %rdx)
19827 + pax_force_retaddr
19828 retq
19829 .p2align 4
19830 .Lless_16bytes:
19831 @@ -153,6 +156,7 @@ ENTRY(memcpy)
19832 movq -1*8(%rsi, %rdx), %r9
19833 movq %r8, 0*8(%rdi)
19834 movq %r9, -1*8(%rdi, %rdx)
19835 + pax_force_retaddr
19836 retq
19837 .p2align 4
19838 .Lless_8bytes:
19839 @@ -166,6 +170,7 @@ ENTRY(memcpy)
19840 movl -4(%rsi, %rdx), %r8d
19841 movl %ecx, (%rdi)
19842 movl %r8d, -4(%rdi, %rdx)
19843 + pax_force_retaddr
19844 retq
19845 .p2align 4
19846 .Lless_3bytes:
19847 @@ -183,6 +188,7 @@ ENTRY(memcpy)
19848 jnz .Lloop_1
19849
19850 .Lend:
19851 + pax_force_retaddr
19852 retq
19853 CFI_ENDPROC
19854 ENDPROC(memcpy)
19855 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
19856 index ee16461..c39c199 100644
19857 --- a/arch/x86/lib/memmove_64.S
19858 +++ b/arch/x86/lib/memmove_64.S
19859 @@ -61,13 +61,13 @@ ENTRY(memmove)
19860 5:
19861 sub $0x20, %rdx
19862 movq 0*8(%rsi), %r11
19863 - movq 1*8(%rsi), %r10
19864 + movq 1*8(%rsi), %rcx
19865 movq 2*8(%rsi), %r9
19866 movq 3*8(%rsi), %r8
19867 leaq 4*8(%rsi), %rsi
19868
19869 movq %r11, 0*8(%rdi)
19870 - movq %r10, 1*8(%rdi)
19871 + movq %rcx, 1*8(%rdi)
19872 movq %r9, 2*8(%rdi)
19873 movq %r8, 3*8(%rdi)
19874 leaq 4*8(%rdi), %rdi
19875 @@ -81,10 +81,10 @@ ENTRY(memmove)
19876 4:
19877 movq %rdx, %rcx
19878 movq -8(%rsi, %rdx), %r11
19879 - lea -8(%rdi, %rdx), %r10
19880 + lea -8(%rdi, %rdx), %r9
19881 shrq $3, %rcx
19882 rep movsq
19883 - movq %r11, (%r10)
19884 + movq %r11, (%r9)
19885 jmp 13f
19886 .Lmemmove_end_forward:
19887
19888 @@ -95,14 +95,14 @@ ENTRY(memmove)
19889 7:
19890 movq %rdx, %rcx
19891 movq (%rsi), %r11
19892 - movq %rdi, %r10
19893 + movq %rdi, %r9
19894 leaq -8(%rsi, %rdx), %rsi
19895 leaq -8(%rdi, %rdx), %rdi
19896 shrq $3, %rcx
19897 std
19898 rep movsq
19899 cld
19900 - movq %r11, (%r10)
19901 + movq %r11, (%r9)
19902 jmp 13f
19903
19904 /*
19905 @@ -127,13 +127,13 @@ ENTRY(memmove)
19906 8:
19907 subq $0x20, %rdx
19908 movq -1*8(%rsi), %r11
19909 - movq -2*8(%rsi), %r10
19910 + movq -2*8(%rsi), %rcx
19911 movq -3*8(%rsi), %r9
19912 movq -4*8(%rsi), %r8
19913 leaq -4*8(%rsi), %rsi
19914
19915 movq %r11, -1*8(%rdi)
19916 - movq %r10, -2*8(%rdi)
19917 + movq %rcx, -2*8(%rdi)
19918 movq %r9, -3*8(%rdi)
19919 movq %r8, -4*8(%rdi)
19920 leaq -4*8(%rdi), %rdi
19921 @@ -151,11 +151,11 @@ ENTRY(memmove)
19922 * Move data from 16 bytes to 31 bytes.
19923 */
19924 movq 0*8(%rsi), %r11
19925 - movq 1*8(%rsi), %r10
19926 + movq 1*8(%rsi), %rcx
19927 movq -2*8(%rsi, %rdx), %r9
19928 movq -1*8(%rsi, %rdx), %r8
19929 movq %r11, 0*8(%rdi)
19930 - movq %r10, 1*8(%rdi)
19931 + movq %rcx, 1*8(%rdi)
19932 movq %r9, -2*8(%rdi, %rdx)
19933 movq %r8, -1*8(%rdi, %rdx)
19934 jmp 13f
19935 @@ -167,9 +167,9 @@ ENTRY(memmove)
19936 * Move data from 8 bytes to 15 bytes.
19937 */
19938 movq 0*8(%rsi), %r11
19939 - movq -1*8(%rsi, %rdx), %r10
19940 + movq -1*8(%rsi, %rdx), %r9
19941 movq %r11, 0*8(%rdi)
19942 - movq %r10, -1*8(%rdi, %rdx)
19943 + movq %r9, -1*8(%rdi, %rdx)
19944 jmp 13f
19945 10:
19946 cmpq $4, %rdx
19947 @@ -178,9 +178,9 @@ ENTRY(memmove)
19948 * Move data from 4 bytes to 7 bytes.
19949 */
19950 movl (%rsi), %r11d
19951 - movl -4(%rsi, %rdx), %r10d
19952 + movl -4(%rsi, %rdx), %r9d
19953 movl %r11d, (%rdi)
19954 - movl %r10d, -4(%rdi, %rdx)
19955 + movl %r9d, -4(%rdi, %rdx)
19956 jmp 13f
19957 11:
19958 cmp $2, %rdx
19959 @@ -189,9 +189,9 @@ ENTRY(memmove)
19960 * Move data from 2 bytes to 3 bytes.
19961 */
19962 movw (%rsi), %r11w
19963 - movw -2(%rsi, %rdx), %r10w
19964 + movw -2(%rsi, %rdx), %r9w
19965 movw %r11w, (%rdi)
19966 - movw %r10w, -2(%rdi, %rdx)
19967 + movw %r9w, -2(%rdi, %rdx)
19968 jmp 13f
19969 12:
19970 cmp $1, %rdx
19971 @@ -202,6 +202,7 @@ ENTRY(memmove)
19972 movb (%rsi), %r11b
19973 movb %r11b, (%rdi)
19974 13:
19975 + pax_force_retaddr
19976 retq
19977 CFI_ENDPROC
19978
19979 @@ -210,6 +211,7 @@ ENTRY(memmove)
19980 /* Forward moving data. */
19981 movq %rdx, %rcx
19982 rep movsb
19983 + pax_force_retaddr
19984 retq
19985 .Lmemmove_end_forward_efs:
19986 .previous
19987 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
19988 index 79bd454..dff325a 100644
19989 --- a/arch/x86/lib/memset_64.S
19990 +++ b/arch/x86/lib/memset_64.S
19991 @@ -31,6 +31,7 @@
19992 movl %r8d,%ecx
19993 rep stosb
19994 movq %r9,%rax
19995 + pax_force_retaddr
19996 ret
19997 .Lmemset_e:
19998 .previous
19999 @@ -53,6 +54,7 @@
20000 movl %edx,%ecx
20001 rep stosb
20002 movq %r9,%rax
20003 + pax_force_retaddr
20004 ret
20005 .Lmemset_e_e:
20006 .previous
20007 @@ -60,13 +62,13 @@
20008 ENTRY(memset)
20009 ENTRY(__memset)
20010 CFI_STARTPROC
20011 - movq %rdi,%r10
20012 movq %rdx,%r11
20013
20014 /* expand byte value */
20015 movzbl %sil,%ecx
20016 movabs $0x0101010101010101,%rax
20017 mul %rcx /* with rax, clobbers rdx */
20018 + movq %rdi,%rdx
20019
20020 /* align dst */
20021 movl %edi,%r9d
20022 @@ -120,7 +122,8 @@ ENTRY(__memset)
20023 jnz .Lloop_1
20024
20025 .Lende:
20026 - movq %r10,%rax
20027 + movq %rdx,%rax
20028 + pax_force_retaddr
20029 ret
20030
20031 CFI_RESTORE_STATE
20032 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
20033 index c9f2d9b..e7fd2c0 100644
20034 --- a/arch/x86/lib/mmx_32.c
20035 +++ b/arch/x86/lib/mmx_32.c
20036 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20037 {
20038 void *p;
20039 int i;
20040 + unsigned long cr0;
20041
20042 if (unlikely(in_interrupt()))
20043 return __memcpy(to, from, len);
20044 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
20045 kernel_fpu_begin();
20046
20047 __asm__ __volatile__ (
20048 - "1: prefetch (%0)\n" /* This set is 28 bytes */
20049 - " prefetch 64(%0)\n"
20050 - " prefetch 128(%0)\n"
20051 - " prefetch 192(%0)\n"
20052 - " prefetch 256(%0)\n"
20053 + "1: prefetch (%1)\n" /* This set is 28 bytes */
20054 + " prefetch 64(%1)\n"
20055 + " prefetch 128(%1)\n"
20056 + " prefetch 192(%1)\n"
20057 + " prefetch 256(%1)\n"
20058 "2: \n"
20059 ".section .fixup, \"ax\"\n"
20060 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20061 + "3: \n"
20062 +
20063 +#ifdef CONFIG_PAX_KERNEXEC
20064 + " movl %%cr0, %0\n"
20065 + " movl %0, %%eax\n"
20066 + " andl $0xFFFEFFFF, %%eax\n"
20067 + " movl %%eax, %%cr0\n"
20068 +#endif
20069 +
20070 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20071 +
20072 +#ifdef CONFIG_PAX_KERNEXEC
20073 + " movl %0, %%cr0\n"
20074 +#endif
20075 +
20076 " jmp 2b\n"
20077 ".previous\n"
20078 _ASM_EXTABLE(1b, 3b)
20079 - : : "r" (from));
20080 + : "=&r" (cr0) : "r" (from) : "ax");
20081
20082 for ( ; i > 5; i--) {
20083 __asm__ __volatile__ (
20084 - "1: prefetch 320(%0)\n"
20085 - "2: movq (%0), %%mm0\n"
20086 - " movq 8(%0), %%mm1\n"
20087 - " movq 16(%0), %%mm2\n"
20088 - " movq 24(%0), %%mm3\n"
20089 - " movq %%mm0, (%1)\n"
20090 - " movq %%mm1, 8(%1)\n"
20091 - " movq %%mm2, 16(%1)\n"
20092 - " movq %%mm3, 24(%1)\n"
20093 - " movq 32(%0), %%mm0\n"
20094 - " movq 40(%0), %%mm1\n"
20095 - " movq 48(%0), %%mm2\n"
20096 - " movq 56(%0), %%mm3\n"
20097 - " movq %%mm0, 32(%1)\n"
20098 - " movq %%mm1, 40(%1)\n"
20099 - " movq %%mm2, 48(%1)\n"
20100 - " movq %%mm3, 56(%1)\n"
20101 + "1: prefetch 320(%1)\n"
20102 + "2: movq (%1), %%mm0\n"
20103 + " movq 8(%1), %%mm1\n"
20104 + " movq 16(%1), %%mm2\n"
20105 + " movq 24(%1), %%mm3\n"
20106 + " movq %%mm0, (%2)\n"
20107 + " movq %%mm1, 8(%2)\n"
20108 + " movq %%mm2, 16(%2)\n"
20109 + " movq %%mm3, 24(%2)\n"
20110 + " movq 32(%1), %%mm0\n"
20111 + " movq 40(%1), %%mm1\n"
20112 + " movq 48(%1), %%mm2\n"
20113 + " movq 56(%1), %%mm3\n"
20114 + " movq %%mm0, 32(%2)\n"
20115 + " movq %%mm1, 40(%2)\n"
20116 + " movq %%mm2, 48(%2)\n"
20117 + " movq %%mm3, 56(%2)\n"
20118 ".section .fixup, \"ax\"\n"
20119 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20120 + "3:\n"
20121 +
20122 +#ifdef CONFIG_PAX_KERNEXEC
20123 + " movl %%cr0, %0\n"
20124 + " movl %0, %%eax\n"
20125 + " andl $0xFFFEFFFF, %%eax\n"
20126 + " movl %%eax, %%cr0\n"
20127 +#endif
20128 +
20129 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20130 +
20131 +#ifdef CONFIG_PAX_KERNEXEC
20132 + " movl %0, %%cr0\n"
20133 +#endif
20134 +
20135 " jmp 2b\n"
20136 ".previous\n"
20137 _ASM_EXTABLE(1b, 3b)
20138 - : : "r" (from), "r" (to) : "memory");
20139 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20140
20141 from += 64;
20142 to += 64;
20143 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
20144 static void fast_copy_page(void *to, void *from)
20145 {
20146 int i;
20147 + unsigned long cr0;
20148
20149 kernel_fpu_begin();
20150
20151 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
20152 * but that is for later. -AV
20153 */
20154 __asm__ __volatile__(
20155 - "1: prefetch (%0)\n"
20156 - " prefetch 64(%0)\n"
20157 - " prefetch 128(%0)\n"
20158 - " prefetch 192(%0)\n"
20159 - " prefetch 256(%0)\n"
20160 + "1: prefetch (%1)\n"
20161 + " prefetch 64(%1)\n"
20162 + " prefetch 128(%1)\n"
20163 + " prefetch 192(%1)\n"
20164 + " prefetch 256(%1)\n"
20165 "2: \n"
20166 ".section .fixup, \"ax\"\n"
20167 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20168 + "3: \n"
20169 +
20170 +#ifdef CONFIG_PAX_KERNEXEC
20171 + " movl %%cr0, %0\n"
20172 + " movl %0, %%eax\n"
20173 + " andl $0xFFFEFFFF, %%eax\n"
20174 + " movl %%eax, %%cr0\n"
20175 +#endif
20176 +
20177 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20178 +
20179 +#ifdef CONFIG_PAX_KERNEXEC
20180 + " movl %0, %%cr0\n"
20181 +#endif
20182 +
20183 " jmp 2b\n"
20184 ".previous\n"
20185 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
20186 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20187
20188 for (i = 0; i < (4096-320)/64; i++) {
20189 __asm__ __volatile__ (
20190 - "1: prefetch 320(%0)\n"
20191 - "2: movq (%0), %%mm0\n"
20192 - " movntq %%mm0, (%1)\n"
20193 - " movq 8(%0), %%mm1\n"
20194 - " movntq %%mm1, 8(%1)\n"
20195 - " movq 16(%0), %%mm2\n"
20196 - " movntq %%mm2, 16(%1)\n"
20197 - " movq 24(%0), %%mm3\n"
20198 - " movntq %%mm3, 24(%1)\n"
20199 - " movq 32(%0), %%mm4\n"
20200 - " movntq %%mm4, 32(%1)\n"
20201 - " movq 40(%0), %%mm5\n"
20202 - " movntq %%mm5, 40(%1)\n"
20203 - " movq 48(%0), %%mm6\n"
20204 - " movntq %%mm6, 48(%1)\n"
20205 - " movq 56(%0), %%mm7\n"
20206 - " movntq %%mm7, 56(%1)\n"
20207 + "1: prefetch 320(%1)\n"
20208 + "2: movq (%1), %%mm0\n"
20209 + " movntq %%mm0, (%2)\n"
20210 + " movq 8(%1), %%mm1\n"
20211 + " movntq %%mm1, 8(%2)\n"
20212 + " movq 16(%1), %%mm2\n"
20213 + " movntq %%mm2, 16(%2)\n"
20214 + " movq 24(%1), %%mm3\n"
20215 + " movntq %%mm3, 24(%2)\n"
20216 + " movq 32(%1), %%mm4\n"
20217 + " movntq %%mm4, 32(%2)\n"
20218 + " movq 40(%1), %%mm5\n"
20219 + " movntq %%mm5, 40(%2)\n"
20220 + " movq 48(%1), %%mm6\n"
20221 + " movntq %%mm6, 48(%2)\n"
20222 + " movq 56(%1), %%mm7\n"
20223 + " movntq %%mm7, 56(%2)\n"
20224 ".section .fixup, \"ax\"\n"
20225 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20226 + "3:\n"
20227 +
20228 +#ifdef CONFIG_PAX_KERNEXEC
20229 + " movl %%cr0, %0\n"
20230 + " movl %0, %%eax\n"
20231 + " andl $0xFFFEFFFF, %%eax\n"
20232 + " movl %%eax, %%cr0\n"
20233 +#endif
20234 +
20235 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20236 +
20237 +#ifdef CONFIG_PAX_KERNEXEC
20238 + " movl %0, %%cr0\n"
20239 +#endif
20240 +
20241 " jmp 2b\n"
20242 ".previous\n"
20243 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
20244 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20245
20246 from += 64;
20247 to += 64;
20248 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
20249 static void fast_copy_page(void *to, void *from)
20250 {
20251 int i;
20252 + unsigned long cr0;
20253
20254 kernel_fpu_begin();
20255
20256 __asm__ __volatile__ (
20257 - "1: prefetch (%0)\n"
20258 - " prefetch 64(%0)\n"
20259 - " prefetch 128(%0)\n"
20260 - " prefetch 192(%0)\n"
20261 - " prefetch 256(%0)\n"
20262 + "1: prefetch (%1)\n"
20263 + " prefetch 64(%1)\n"
20264 + " prefetch 128(%1)\n"
20265 + " prefetch 192(%1)\n"
20266 + " prefetch 256(%1)\n"
20267 "2: \n"
20268 ".section .fixup, \"ax\"\n"
20269 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20270 + "3: \n"
20271 +
20272 +#ifdef CONFIG_PAX_KERNEXEC
20273 + " movl %%cr0, %0\n"
20274 + " movl %0, %%eax\n"
20275 + " andl $0xFFFEFFFF, %%eax\n"
20276 + " movl %%eax, %%cr0\n"
20277 +#endif
20278 +
20279 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20280 +
20281 +#ifdef CONFIG_PAX_KERNEXEC
20282 + " movl %0, %%cr0\n"
20283 +#endif
20284 +
20285 " jmp 2b\n"
20286 ".previous\n"
20287 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
20288 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20289
20290 for (i = 0; i < 4096/64; i++) {
20291 __asm__ __volatile__ (
20292 - "1: prefetch 320(%0)\n"
20293 - "2: movq (%0), %%mm0\n"
20294 - " movq 8(%0), %%mm1\n"
20295 - " movq 16(%0), %%mm2\n"
20296 - " movq 24(%0), %%mm3\n"
20297 - " movq %%mm0, (%1)\n"
20298 - " movq %%mm1, 8(%1)\n"
20299 - " movq %%mm2, 16(%1)\n"
20300 - " movq %%mm3, 24(%1)\n"
20301 - " movq 32(%0), %%mm0\n"
20302 - " movq 40(%0), %%mm1\n"
20303 - " movq 48(%0), %%mm2\n"
20304 - " movq 56(%0), %%mm3\n"
20305 - " movq %%mm0, 32(%1)\n"
20306 - " movq %%mm1, 40(%1)\n"
20307 - " movq %%mm2, 48(%1)\n"
20308 - " movq %%mm3, 56(%1)\n"
20309 + "1: prefetch 320(%1)\n"
20310 + "2: movq (%1), %%mm0\n"
20311 + " movq 8(%1), %%mm1\n"
20312 + " movq 16(%1), %%mm2\n"
20313 + " movq 24(%1), %%mm3\n"
20314 + " movq %%mm0, (%2)\n"
20315 + " movq %%mm1, 8(%2)\n"
20316 + " movq %%mm2, 16(%2)\n"
20317 + " movq %%mm3, 24(%2)\n"
20318 + " movq 32(%1), %%mm0\n"
20319 + " movq 40(%1), %%mm1\n"
20320 + " movq 48(%1), %%mm2\n"
20321 + " movq 56(%1), %%mm3\n"
20322 + " movq %%mm0, 32(%2)\n"
20323 + " movq %%mm1, 40(%2)\n"
20324 + " movq %%mm2, 48(%2)\n"
20325 + " movq %%mm3, 56(%2)\n"
20326 ".section .fixup, \"ax\"\n"
20327 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20328 + "3:\n"
20329 +
20330 +#ifdef CONFIG_PAX_KERNEXEC
20331 + " movl %%cr0, %0\n"
20332 + " movl %0, %%eax\n"
20333 + " andl $0xFFFEFFFF, %%eax\n"
20334 + " movl %%eax, %%cr0\n"
20335 +#endif
20336 +
20337 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20338 +
20339 +#ifdef CONFIG_PAX_KERNEXEC
20340 + " movl %0, %%cr0\n"
20341 +#endif
20342 +
20343 " jmp 2b\n"
20344 ".previous\n"
20345 _ASM_EXTABLE(1b, 3b)
20346 - : : "r" (from), "r" (to) : "memory");
20347 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20348
20349 from += 64;
20350 to += 64;
20351 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
20352 index 69fa106..adda88b 100644
20353 --- a/arch/x86/lib/msr-reg.S
20354 +++ b/arch/x86/lib/msr-reg.S
20355 @@ -3,6 +3,7 @@
20356 #include <asm/dwarf2.h>
20357 #include <asm/asm.h>
20358 #include <asm/msr.h>
20359 +#include <asm/alternative-asm.h>
20360
20361 #ifdef CONFIG_X86_64
20362 /*
20363 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
20364 CFI_STARTPROC
20365 pushq_cfi %rbx
20366 pushq_cfi %rbp
20367 - movq %rdi, %r10 /* Save pointer */
20368 + movq %rdi, %r9 /* Save pointer */
20369 xorl %r11d, %r11d /* Return value */
20370 movl (%rdi), %eax
20371 movl 4(%rdi), %ecx
20372 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
20373 movl 28(%rdi), %edi
20374 CFI_REMEMBER_STATE
20375 1: \op
20376 -2: movl %eax, (%r10)
20377 +2: movl %eax, (%r9)
20378 movl %r11d, %eax /* Return value */
20379 - movl %ecx, 4(%r10)
20380 - movl %edx, 8(%r10)
20381 - movl %ebx, 12(%r10)
20382 - movl %ebp, 20(%r10)
20383 - movl %esi, 24(%r10)
20384 - movl %edi, 28(%r10)
20385 + movl %ecx, 4(%r9)
20386 + movl %edx, 8(%r9)
20387 + movl %ebx, 12(%r9)
20388 + movl %ebp, 20(%r9)
20389 + movl %esi, 24(%r9)
20390 + movl %edi, 28(%r9)
20391 popq_cfi %rbp
20392 popq_cfi %rbx
20393 + pax_force_retaddr
20394 ret
20395 3:
20396 CFI_RESTORE_STATE
20397 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
20398 index 36b0d15..d381858 100644
20399 --- a/arch/x86/lib/putuser.S
20400 +++ b/arch/x86/lib/putuser.S
20401 @@ -15,7 +15,9 @@
20402 #include <asm/thread_info.h>
20403 #include <asm/errno.h>
20404 #include <asm/asm.h>
20405 -
20406 +#include <asm/segment.h>
20407 +#include <asm/pgtable.h>
20408 +#include <asm/alternative-asm.h>
20409
20410 /*
20411 * __put_user_X
20412 @@ -29,52 +31,119 @@
20413 * as they get called from within inline assembly.
20414 */
20415
20416 -#define ENTER CFI_STARTPROC ; \
20417 - GET_THREAD_INFO(%_ASM_BX)
20418 -#define EXIT ret ; \
20419 +#define ENTER CFI_STARTPROC
20420 +#define EXIT pax_force_retaddr; ret ; \
20421 CFI_ENDPROC
20422
20423 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20424 +#define _DEST %_ASM_CX,%_ASM_BX
20425 +#else
20426 +#define _DEST %_ASM_CX
20427 +#endif
20428 +
20429 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20430 +#define __copyuser_seg gs;
20431 +#else
20432 +#define __copyuser_seg
20433 +#endif
20434 +
20435 .text
20436 ENTRY(__put_user_1)
20437 ENTER
20438 +
20439 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20440 + GET_THREAD_INFO(%_ASM_BX)
20441 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20442 jae bad_put_user
20443 -1: movb %al,(%_ASM_CX)
20444 +
20445 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20446 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20447 + cmp %_ASM_BX,%_ASM_CX
20448 + jb 1234f
20449 + xor %ebx,%ebx
20450 +1234:
20451 +#endif
20452 +
20453 +#endif
20454 +
20455 +1: __copyuser_seg movb %al,(_DEST)
20456 xor %eax,%eax
20457 EXIT
20458 ENDPROC(__put_user_1)
20459
20460 ENTRY(__put_user_2)
20461 ENTER
20462 +
20463 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20464 + GET_THREAD_INFO(%_ASM_BX)
20465 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20466 sub $1,%_ASM_BX
20467 cmp %_ASM_BX,%_ASM_CX
20468 jae bad_put_user
20469 -2: movw %ax,(%_ASM_CX)
20470 +
20471 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20472 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20473 + cmp %_ASM_BX,%_ASM_CX
20474 + jb 1234f
20475 + xor %ebx,%ebx
20476 +1234:
20477 +#endif
20478 +
20479 +#endif
20480 +
20481 +2: __copyuser_seg movw %ax,(_DEST)
20482 xor %eax,%eax
20483 EXIT
20484 ENDPROC(__put_user_2)
20485
20486 ENTRY(__put_user_4)
20487 ENTER
20488 +
20489 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20490 + GET_THREAD_INFO(%_ASM_BX)
20491 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20492 sub $3,%_ASM_BX
20493 cmp %_ASM_BX,%_ASM_CX
20494 jae bad_put_user
20495 -3: movl %eax,(%_ASM_CX)
20496 +
20497 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20498 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20499 + cmp %_ASM_BX,%_ASM_CX
20500 + jb 1234f
20501 + xor %ebx,%ebx
20502 +1234:
20503 +#endif
20504 +
20505 +#endif
20506 +
20507 +3: __copyuser_seg movl %eax,(_DEST)
20508 xor %eax,%eax
20509 EXIT
20510 ENDPROC(__put_user_4)
20511
20512 ENTRY(__put_user_8)
20513 ENTER
20514 +
20515 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20516 + GET_THREAD_INFO(%_ASM_BX)
20517 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20518 sub $7,%_ASM_BX
20519 cmp %_ASM_BX,%_ASM_CX
20520 jae bad_put_user
20521 -4: mov %_ASM_AX,(%_ASM_CX)
20522 +
20523 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20524 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20525 + cmp %_ASM_BX,%_ASM_CX
20526 + jb 1234f
20527 + xor %ebx,%ebx
20528 +1234:
20529 +#endif
20530 +
20531 +#endif
20532 +
20533 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
20534 #ifdef CONFIG_X86_32
20535 -5: movl %edx,4(%_ASM_CX)
20536 +5: __copyuser_seg movl %edx,4(_DEST)
20537 #endif
20538 xor %eax,%eax
20539 EXIT
20540 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
20541 index 1cad221..de671ee 100644
20542 --- a/arch/x86/lib/rwlock.S
20543 +++ b/arch/x86/lib/rwlock.S
20544 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
20545 FRAME
20546 0: LOCK_PREFIX
20547 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20548 +
20549 +#ifdef CONFIG_PAX_REFCOUNT
20550 + jno 1234f
20551 + LOCK_PREFIX
20552 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20553 + int $4
20554 +1234:
20555 + _ASM_EXTABLE(1234b, 1234b)
20556 +#endif
20557 +
20558 1: rep; nop
20559 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
20560 jne 1b
20561 LOCK_PREFIX
20562 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20563 +
20564 +#ifdef CONFIG_PAX_REFCOUNT
20565 + jno 1234f
20566 + LOCK_PREFIX
20567 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
20568 + int $4
20569 +1234:
20570 + _ASM_EXTABLE(1234b, 1234b)
20571 +#endif
20572 +
20573 jnz 0b
20574 ENDFRAME
20575 + pax_force_retaddr
20576 ret
20577 CFI_ENDPROC
20578 END(__write_lock_failed)
20579 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
20580 FRAME
20581 0: LOCK_PREFIX
20582 READ_LOCK_SIZE(inc) (%__lock_ptr)
20583 +
20584 +#ifdef CONFIG_PAX_REFCOUNT
20585 + jno 1234f
20586 + LOCK_PREFIX
20587 + READ_LOCK_SIZE(dec) (%__lock_ptr)
20588 + int $4
20589 +1234:
20590 + _ASM_EXTABLE(1234b, 1234b)
20591 +#endif
20592 +
20593 1: rep; nop
20594 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
20595 js 1b
20596 LOCK_PREFIX
20597 READ_LOCK_SIZE(dec) (%__lock_ptr)
20598 +
20599 +#ifdef CONFIG_PAX_REFCOUNT
20600 + jno 1234f
20601 + LOCK_PREFIX
20602 + READ_LOCK_SIZE(inc) (%__lock_ptr)
20603 + int $4
20604 +1234:
20605 + _ASM_EXTABLE(1234b, 1234b)
20606 +#endif
20607 +
20608 js 0b
20609 ENDFRAME
20610 + pax_force_retaddr
20611 ret
20612 CFI_ENDPROC
20613 END(__read_lock_failed)
20614 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
20615 index 5dff5f0..cadebf4 100644
20616 --- a/arch/x86/lib/rwsem.S
20617 +++ b/arch/x86/lib/rwsem.S
20618 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
20619 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20620 CFI_RESTORE __ASM_REG(dx)
20621 restore_common_regs
20622 + pax_force_retaddr
20623 ret
20624 CFI_ENDPROC
20625 ENDPROC(call_rwsem_down_read_failed)
20626 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
20627 movq %rax,%rdi
20628 call rwsem_down_write_failed
20629 restore_common_regs
20630 + pax_force_retaddr
20631 ret
20632 CFI_ENDPROC
20633 ENDPROC(call_rwsem_down_write_failed)
20634 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
20635 movq %rax,%rdi
20636 call rwsem_wake
20637 restore_common_regs
20638 -1: ret
20639 +1: pax_force_retaddr
20640 + ret
20641 CFI_ENDPROC
20642 ENDPROC(call_rwsem_wake)
20643
20644 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
20645 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20646 CFI_RESTORE __ASM_REG(dx)
20647 restore_common_regs
20648 + pax_force_retaddr
20649 ret
20650 CFI_ENDPROC
20651 ENDPROC(call_rwsem_downgrade_wake)
20652 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
20653 index a63efd6..ccecad8 100644
20654 --- a/arch/x86/lib/thunk_64.S
20655 +++ b/arch/x86/lib/thunk_64.S
20656 @@ -8,6 +8,7 @@
20657 #include <linux/linkage.h>
20658 #include <asm/dwarf2.h>
20659 #include <asm/calling.h>
20660 +#include <asm/alternative-asm.h>
20661
20662 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
20663 .macro THUNK name, func, put_ret_addr_in_rdi=0
20664 @@ -41,5 +42,6 @@
20665 SAVE_ARGS
20666 restore:
20667 RESTORE_ARGS
20668 + pax_force_retaddr
20669 ret
20670 CFI_ENDPROC
20671 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
20672 index e218d5d..35679b4 100644
20673 --- a/arch/x86/lib/usercopy_32.c
20674 +++ b/arch/x86/lib/usercopy_32.c
20675 @@ -43,7 +43,7 @@ do { \
20676 __asm__ __volatile__( \
20677 " testl %1,%1\n" \
20678 " jz 2f\n" \
20679 - "0: lodsb\n" \
20680 + "0: "__copyuser_seg"lodsb\n" \
20681 " stosb\n" \
20682 " testb %%al,%%al\n" \
20683 " jz 1f\n" \
20684 @@ -128,10 +128,12 @@ do { \
20685 int __d0; \
20686 might_fault(); \
20687 __asm__ __volatile__( \
20688 + __COPYUSER_SET_ES \
20689 "0: rep; stosl\n" \
20690 " movl %2,%0\n" \
20691 "1: rep; stosb\n" \
20692 "2:\n" \
20693 + __COPYUSER_RESTORE_ES \
20694 ".section .fixup,\"ax\"\n" \
20695 "3: lea 0(%2,%0,4),%0\n" \
20696 " jmp 2b\n" \
20697 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
20698 might_fault();
20699
20700 __asm__ __volatile__(
20701 + __COPYUSER_SET_ES
20702 " testl %0, %0\n"
20703 " jz 3f\n"
20704 " andl %0,%%ecx\n"
20705 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
20706 " subl %%ecx,%0\n"
20707 " addl %0,%%eax\n"
20708 "1:\n"
20709 + __COPYUSER_RESTORE_ES
20710 ".section .fixup,\"ax\"\n"
20711 "2: xorl %%eax,%%eax\n"
20712 " jmp 1b\n"
20713 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
20714
20715 #ifdef CONFIG_X86_INTEL_USERCOPY
20716 static unsigned long
20717 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
20718 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
20719 {
20720 int d0, d1;
20721 __asm__ __volatile__(
20722 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20723 " .align 2,0x90\n"
20724 "3: movl 0(%4), %%eax\n"
20725 "4: movl 4(%4), %%edx\n"
20726 - "5: movl %%eax, 0(%3)\n"
20727 - "6: movl %%edx, 4(%3)\n"
20728 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
20729 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
20730 "7: movl 8(%4), %%eax\n"
20731 "8: movl 12(%4),%%edx\n"
20732 - "9: movl %%eax, 8(%3)\n"
20733 - "10: movl %%edx, 12(%3)\n"
20734 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20735 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
20736 "11: movl 16(%4), %%eax\n"
20737 "12: movl 20(%4), %%edx\n"
20738 - "13: movl %%eax, 16(%3)\n"
20739 - "14: movl %%edx, 20(%3)\n"
20740 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20741 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
20742 "15: movl 24(%4), %%eax\n"
20743 "16: movl 28(%4), %%edx\n"
20744 - "17: movl %%eax, 24(%3)\n"
20745 - "18: movl %%edx, 28(%3)\n"
20746 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20747 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20748 "19: movl 32(%4), %%eax\n"
20749 "20: movl 36(%4), %%edx\n"
20750 - "21: movl %%eax, 32(%3)\n"
20751 - "22: movl %%edx, 36(%3)\n"
20752 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20753 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
20754 "23: movl 40(%4), %%eax\n"
20755 "24: movl 44(%4), %%edx\n"
20756 - "25: movl %%eax, 40(%3)\n"
20757 - "26: movl %%edx, 44(%3)\n"
20758 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20759 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
20760 "27: movl 48(%4), %%eax\n"
20761 "28: movl 52(%4), %%edx\n"
20762 - "29: movl %%eax, 48(%3)\n"
20763 - "30: movl %%edx, 52(%3)\n"
20764 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20765 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20766 "31: movl 56(%4), %%eax\n"
20767 "32: movl 60(%4), %%edx\n"
20768 - "33: movl %%eax, 56(%3)\n"
20769 - "34: movl %%edx, 60(%3)\n"
20770 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20771 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20772 " addl $-64, %0\n"
20773 " addl $64, %4\n"
20774 " addl $64, %3\n"
20775 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20776 " shrl $2, %0\n"
20777 " andl $3, %%eax\n"
20778 " cld\n"
20779 + __COPYUSER_SET_ES
20780 "99: rep; movsl\n"
20781 "36: movl %%eax, %0\n"
20782 "37: rep; movsb\n"
20783 "100:\n"
20784 + __COPYUSER_RESTORE_ES
20785 + ".section .fixup,\"ax\"\n"
20786 + "101: lea 0(%%eax,%0,4),%0\n"
20787 + " jmp 100b\n"
20788 + ".previous\n"
20789 + ".section __ex_table,\"a\"\n"
20790 + " .align 4\n"
20791 + " .long 1b,100b\n"
20792 + " .long 2b,100b\n"
20793 + " .long 3b,100b\n"
20794 + " .long 4b,100b\n"
20795 + " .long 5b,100b\n"
20796 + " .long 6b,100b\n"
20797 + " .long 7b,100b\n"
20798 + " .long 8b,100b\n"
20799 + " .long 9b,100b\n"
20800 + " .long 10b,100b\n"
20801 + " .long 11b,100b\n"
20802 + " .long 12b,100b\n"
20803 + " .long 13b,100b\n"
20804 + " .long 14b,100b\n"
20805 + " .long 15b,100b\n"
20806 + " .long 16b,100b\n"
20807 + " .long 17b,100b\n"
20808 + " .long 18b,100b\n"
20809 + " .long 19b,100b\n"
20810 + " .long 20b,100b\n"
20811 + " .long 21b,100b\n"
20812 + " .long 22b,100b\n"
20813 + " .long 23b,100b\n"
20814 + " .long 24b,100b\n"
20815 + " .long 25b,100b\n"
20816 + " .long 26b,100b\n"
20817 + " .long 27b,100b\n"
20818 + " .long 28b,100b\n"
20819 + " .long 29b,100b\n"
20820 + " .long 30b,100b\n"
20821 + " .long 31b,100b\n"
20822 + " .long 32b,100b\n"
20823 + " .long 33b,100b\n"
20824 + " .long 34b,100b\n"
20825 + " .long 35b,100b\n"
20826 + " .long 36b,100b\n"
20827 + " .long 37b,100b\n"
20828 + " .long 99b,101b\n"
20829 + ".previous"
20830 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
20831 + : "1"(to), "2"(from), "0"(size)
20832 + : "eax", "edx", "memory");
20833 + return size;
20834 +}
20835 +
20836 +static unsigned long
20837 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20838 +{
20839 + int d0, d1;
20840 + __asm__ __volatile__(
20841 + " .align 2,0x90\n"
20842 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20843 + " cmpl $67, %0\n"
20844 + " jbe 3f\n"
20845 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20846 + " .align 2,0x90\n"
20847 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20848 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20849 + "5: movl %%eax, 0(%3)\n"
20850 + "6: movl %%edx, 4(%3)\n"
20851 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20852 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20853 + "9: movl %%eax, 8(%3)\n"
20854 + "10: movl %%edx, 12(%3)\n"
20855 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20856 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20857 + "13: movl %%eax, 16(%3)\n"
20858 + "14: movl %%edx, 20(%3)\n"
20859 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20860 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20861 + "17: movl %%eax, 24(%3)\n"
20862 + "18: movl %%edx, 28(%3)\n"
20863 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20864 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20865 + "21: movl %%eax, 32(%3)\n"
20866 + "22: movl %%edx, 36(%3)\n"
20867 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20868 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20869 + "25: movl %%eax, 40(%3)\n"
20870 + "26: movl %%edx, 44(%3)\n"
20871 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20872 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20873 + "29: movl %%eax, 48(%3)\n"
20874 + "30: movl %%edx, 52(%3)\n"
20875 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20876 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20877 + "33: movl %%eax, 56(%3)\n"
20878 + "34: movl %%edx, 60(%3)\n"
20879 + " addl $-64, %0\n"
20880 + " addl $64, %4\n"
20881 + " addl $64, %3\n"
20882 + " cmpl $63, %0\n"
20883 + " ja 1b\n"
20884 + "35: movl %0, %%eax\n"
20885 + " shrl $2, %0\n"
20886 + " andl $3, %%eax\n"
20887 + " cld\n"
20888 + "99: rep; "__copyuser_seg" movsl\n"
20889 + "36: movl %%eax, %0\n"
20890 + "37: rep; "__copyuser_seg" movsb\n"
20891 + "100:\n"
20892 ".section .fixup,\"ax\"\n"
20893 "101: lea 0(%%eax,%0,4),%0\n"
20894 " jmp 100b\n"
20895 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
20896 int d0, d1;
20897 __asm__ __volatile__(
20898 " .align 2,0x90\n"
20899 - "0: movl 32(%4), %%eax\n"
20900 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20901 " cmpl $67, %0\n"
20902 " jbe 2f\n"
20903 - "1: movl 64(%4), %%eax\n"
20904 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20905 " .align 2,0x90\n"
20906 - "2: movl 0(%4), %%eax\n"
20907 - "21: movl 4(%4), %%edx\n"
20908 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20909 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20910 " movl %%eax, 0(%3)\n"
20911 " movl %%edx, 4(%3)\n"
20912 - "3: movl 8(%4), %%eax\n"
20913 - "31: movl 12(%4),%%edx\n"
20914 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20915 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20916 " movl %%eax, 8(%3)\n"
20917 " movl %%edx, 12(%3)\n"
20918 - "4: movl 16(%4), %%eax\n"
20919 - "41: movl 20(%4), %%edx\n"
20920 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20921 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20922 " movl %%eax, 16(%3)\n"
20923 " movl %%edx, 20(%3)\n"
20924 - "10: movl 24(%4), %%eax\n"
20925 - "51: movl 28(%4), %%edx\n"
20926 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20927 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20928 " movl %%eax, 24(%3)\n"
20929 " movl %%edx, 28(%3)\n"
20930 - "11: movl 32(%4), %%eax\n"
20931 - "61: movl 36(%4), %%edx\n"
20932 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20933 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20934 " movl %%eax, 32(%3)\n"
20935 " movl %%edx, 36(%3)\n"
20936 - "12: movl 40(%4), %%eax\n"
20937 - "71: movl 44(%4), %%edx\n"
20938 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20939 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20940 " movl %%eax, 40(%3)\n"
20941 " movl %%edx, 44(%3)\n"
20942 - "13: movl 48(%4), %%eax\n"
20943 - "81: movl 52(%4), %%edx\n"
20944 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20945 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20946 " movl %%eax, 48(%3)\n"
20947 " movl %%edx, 52(%3)\n"
20948 - "14: movl 56(%4), %%eax\n"
20949 - "91: movl 60(%4), %%edx\n"
20950 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20951 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20952 " movl %%eax, 56(%3)\n"
20953 " movl %%edx, 60(%3)\n"
20954 " addl $-64, %0\n"
20955 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
20956 " shrl $2, %0\n"
20957 " andl $3, %%eax\n"
20958 " cld\n"
20959 - "6: rep; movsl\n"
20960 + "6: rep; "__copyuser_seg" movsl\n"
20961 " movl %%eax,%0\n"
20962 - "7: rep; movsb\n"
20963 + "7: rep; "__copyuser_seg" movsb\n"
20964 "8:\n"
20965 ".section .fixup,\"ax\"\n"
20966 "9: lea 0(%%eax,%0,4),%0\n"
20967 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
20968
20969 __asm__ __volatile__(
20970 " .align 2,0x90\n"
20971 - "0: movl 32(%4), %%eax\n"
20972 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20973 " cmpl $67, %0\n"
20974 " jbe 2f\n"
20975 - "1: movl 64(%4), %%eax\n"
20976 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20977 " .align 2,0x90\n"
20978 - "2: movl 0(%4), %%eax\n"
20979 - "21: movl 4(%4), %%edx\n"
20980 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20981 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20982 " movnti %%eax, 0(%3)\n"
20983 " movnti %%edx, 4(%3)\n"
20984 - "3: movl 8(%4), %%eax\n"
20985 - "31: movl 12(%4),%%edx\n"
20986 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20987 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20988 " movnti %%eax, 8(%3)\n"
20989 " movnti %%edx, 12(%3)\n"
20990 - "4: movl 16(%4), %%eax\n"
20991 - "41: movl 20(%4), %%edx\n"
20992 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20993 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20994 " movnti %%eax, 16(%3)\n"
20995 " movnti %%edx, 20(%3)\n"
20996 - "10: movl 24(%4), %%eax\n"
20997 - "51: movl 28(%4), %%edx\n"
20998 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20999 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21000 " movnti %%eax, 24(%3)\n"
21001 " movnti %%edx, 28(%3)\n"
21002 - "11: movl 32(%4), %%eax\n"
21003 - "61: movl 36(%4), %%edx\n"
21004 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21005 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21006 " movnti %%eax, 32(%3)\n"
21007 " movnti %%edx, 36(%3)\n"
21008 - "12: movl 40(%4), %%eax\n"
21009 - "71: movl 44(%4), %%edx\n"
21010 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21011 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21012 " movnti %%eax, 40(%3)\n"
21013 " movnti %%edx, 44(%3)\n"
21014 - "13: movl 48(%4), %%eax\n"
21015 - "81: movl 52(%4), %%edx\n"
21016 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21017 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21018 " movnti %%eax, 48(%3)\n"
21019 " movnti %%edx, 52(%3)\n"
21020 - "14: movl 56(%4), %%eax\n"
21021 - "91: movl 60(%4), %%edx\n"
21022 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21023 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21024 " movnti %%eax, 56(%3)\n"
21025 " movnti %%edx, 60(%3)\n"
21026 " addl $-64, %0\n"
21027 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
21028 " shrl $2, %0\n"
21029 " andl $3, %%eax\n"
21030 " cld\n"
21031 - "6: rep; movsl\n"
21032 + "6: rep; "__copyuser_seg" movsl\n"
21033 " movl %%eax,%0\n"
21034 - "7: rep; movsb\n"
21035 + "7: rep; "__copyuser_seg" movsb\n"
21036 "8:\n"
21037 ".section .fixup,\"ax\"\n"
21038 "9: lea 0(%%eax,%0,4),%0\n"
21039 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
21040
21041 __asm__ __volatile__(
21042 " .align 2,0x90\n"
21043 - "0: movl 32(%4), %%eax\n"
21044 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
21045 " cmpl $67, %0\n"
21046 " jbe 2f\n"
21047 - "1: movl 64(%4), %%eax\n"
21048 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
21049 " .align 2,0x90\n"
21050 - "2: movl 0(%4), %%eax\n"
21051 - "21: movl 4(%4), %%edx\n"
21052 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
21053 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
21054 " movnti %%eax, 0(%3)\n"
21055 " movnti %%edx, 4(%3)\n"
21056 - "3: movl 8(%4), %%eax\n"
21057 - "31: movl 12(%4),%%edx\n"
21058 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
21059 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
21060 " movnti %%eax, 8(%3)\n"
21061 " movnti %%edx, 12(%3)\n"
21062 - "4: movl 16(%4), %%eax\n"
21063 - "41: movl 20(%4), %%edx\n"
21064 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
21065 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
21066 " movnti %%eax, 16(%3)\n"
21067 " movnti %%edx, 20(%3)\n"
21068 - "10: movl 24(%4), %%eax\n"
21069 - "51: movl 28(%4), %%edx\n"
21070 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
21071 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
21072 " movnti %%eax, 24(%3)\n"
21073 " movnti %%edx, 28(%3)\n"
21074 - "11: movl 32(%4), %%eax\n"
21075 - "61: movl 36(%4), %%edx\n"
21076 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
21077 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
21078 " movnti %%eax, 32(%3)\n"
21079 " movnti %%edx, 36(%3)\n"
21080 - "12: movl 40(%4), %%eax\n"
21081 - "71: movl 44(%4), %%edx\n"
21082 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
21083 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
21084 " movnti %%eax, 40(%3)\n"
21085 " movnti %%edx, 44(%3)\n"
21086 - "13: movl 48(%4), %%eax\n"
21087 - "81: movl 52(%4), %%edx\n"
21088 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
21089 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
21090 " movnti %%eax, 48(%3)\n"
21091 " movnti %%edx, 52(%3)\n"
21092 - "14: movl 56(%4), %%eax\n"
21093 - "91: movl 60(%4), %%edx\n"
21094 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
21095 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
21096 " movnti %%eax, 56(%3)\n"
21097 " movnti %%edx, 60(%3)\n"
21098 " addl $-64, %0\n"
21099 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
21100 " shrl $2, %0\n"
21101 " andl $3, %%eax\n"
21102 " cld\n"
21103 - "6: rep; movsl\n"
21104 + "6: rep; "__copyuser_seg" movsl\n"
21105 " movl %%eax,%0\n"
21106 - "7: rep; movsb\n"
21107 + "7: rep; "__copyuser_seg" movsb\n"
21108 "8:\n"
21109 ".section .fixup,\"ax\"\n"
21110 "9: lea 0(%%eax,%0,4),%0\n"
21111 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
21112 */
21113 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
21114 unsigned long size);
21115 -unsigned long __copy_user_intel(void __user *to, const void *from,
21116 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
21117 + unsigned long size);
21118 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
21119 unsigned long size);
21120 unsigned long __copy_user_zeroing_intel_nocache(void *to,
21121 const void __user *from, unsigned long size);
21122 #endif /* CONFIG_X86_INTEL_USERCOPY */
21123
21124 /* Generic arbitrary sized copy. */
21125 -#define __copy_user(to, from, size) \
21126 +#define __copy_user(to, from, size, prefix, set, restore) \
21127 do { \
21128 int __d0, __d1, __d2; \
21129 __asm__ __volatile__( \
21130 + set \
21131 " cmp $7,%0\n" \
21132 " jbe 1f\n" \
21133 " movl %1,%0\n" \
21134 " negl %0\n" \
21135 " andl $7,%0\n" \
21136 " subl %0,%3\n" \
21137 - "4: rep; movsb\n" \
21138 + "4: rep; "prefix"movsb\n" \
21139 " movl %3,%0\n" \
21140 " shrl $2,%0\n" \
21141 " andl $3,%3\n" \
21142 " .align 2,0x90\n" \
21143 - "0: rep; movsl\n" \
21144 + "0: rep; "prefix"movsl\n" \
21145 " movl %3,%0\n" \
21146 - "1: rep; movsb\n" \
21147 + "1: rep; "prefix"movsb\n" \
21148 "2:\n" \
21149 + restore \
21150 ".section .fixup,\"ax\"\n" \
21151 "5: addl %3,%0\n" \
21152 " jmp 2b\n" \
21153 @@ -682,14 +799,14 @@ do { \
21154 " negl %0\n" \
21155 " andl $7,%0\n" \
21156 " subl %0,%3\n" \
21157 - "4: rep; movsb\n" \
21158 + "4: rep; "__copyuser_seg"movsb\n" \
21159 " movl %3,%0\n" \
21160 " shrl $2,%0\n" \
21161 " andl $3,%3\n" \
21162 " .align 2,0x90\n" \
21163 - "0: rep; movsl\n" \
21164 + "0: rep; "__copyuser_seg"movsl\n" \
21165 " movl %3,%0\n" \
21166 - "1: rep; movsb\n" \
21167 + "1: rep; "__copyuser_seg"movsb\n" \
21168 "2:\n" \
21169 ".section .fixup,\"ax\"\n" \
21170 "5: addl %3,%0\n" \
21171 @@ -775,9 +892,9 @@ survive:
21172 }
21173 #endif
21174 if (movsl_is_ok(to, from, n))
21175 - __copy_user(to, from, n);
21176 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
21177 else
21178 - n = __copy_user_intel(to, from, n);
21179 + n = __generic_copy_to_user_intel(to, from, n);
21180 return n;
21181 }
21182 EXPORT_SYMBOL(__copy_to_user_ll);
21183 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
21184 unsigned long n)
21185 {
21186 if (movsl_is_ok(to, from, n))
21187 - __copy_user(to, from, n);
21188 + __copy_user(to, from, n, __copyuser_seg, "", "");
21189 else
21190 - n = __copy_user_intel((void __user *)to,
21191 - (const void *)from, n);
21192 + n = __generic_copy_from_user_intel(to, from, n);
21193 return n;
21194 }
21195 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
21196 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
21197 if (n > 64 && cpu_has_xmm2)
21198 n = __copy_user_intel_nocache(to, from, n);
21199 else
21200 - __copy_user(to, from, n);
21201 + __copy_user(to, from, n, __copyuser_seg, "", "");
21202 #else
21203 - __copy_user(to, from, n);
21204 + __copy_user(to, from, n, __copyuser_seg, "", "");
21205 #endif
21206 return n;
21207 }
21208 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
21209
21210 -/**
21211 - * copy_to_user: - Copy a block of data into user space.
21212 - * @to: Destination address, in user space.
21213 - * @from: Source address, in kernel space.
21214 - * @n: Number of bytes to copy.
21215 - *
21216 - * Context: User context only. This function may sleep.
21217 - *
21218 - * Copy data from kernel space to user space.
21219 - *
21220 - * Returns number of bytes that could not be copied.
21221 - * On success, this will be zero.
21222 - */
21223 -unsigned long
21224 -copy_to_user(void __user *to, const void *from, unsigned long n)
21225 +void copy_from_user_overflow(void)
21226 {
21227 - if (access_ok(VERIFY_WRITE, to, n))
21228 - n = __copy_to_user(to, from, n);
21229 - return n;
21230 + WARN(1, "Buffer overflow detected!\n");
21231 }
21232 -EXPORT_SYMBOL(copy_to_user);
21233 +EXPORT_SYMBOL(copy_from_user_overflow);
21234
21235 -/**
21236 - * copy_from_user: - Copy a block of data from user space.
21237 - * @to: Destination address, in kernel space.
21238 - * @from: Source address, in user space.
21239 - * @n: Number of bytes to copy.
21240 - *
21241 - * Context: User context only. This function may sleep.
21242 - *
21243 - * Copy data from user space to kernel space.
21244 - *
21245 - * Returns number of bytes that could not be copied.
21246 - * On success, this will be zero.
21247 - *
21248 - * If some data could not be copied, this function will pad the copied
21249 - * data to the requested size using zero bytes.
21250 - */
21251 -unsigned long
21252 -_copy_from_user(void *to, const void __user *from, unsigned long n)
21253 +void copy_to_user_overflow(void)
21254 {
21255 - if (access_ok(VERIFY_READ, from, n))
21256 - n = __copy_from_user(to, from, n);
21257 - else
21258 - memset(to, 0, n);
21259 - return n;
21260 + WARN(1, "Buffer overflow detected!\n");
21261 }
21262 -EXPORT_SYMBOL(_copy_from_user);
21263 +EXPORT_SYMBOL(copy_to_user_overflow);
21264
21265 -void copy_from_user_overflow(void)
21266 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21267 +void __set_fs(mm_segment_t x)
21268 {
21269 - WARN(1, "Buffer overflow detected!\n");
21270 + switch (x.seg) {
21271 + case 0:
21272 + loadsegment(gs, 0);
21273 + break;
21274 + case TASK_SIZE_MAX:
21275 + loadsegment(gs, __USER_DS);
21276 + break;
21277 + case -1UL:
21278 + loadsegment(gs, __KERNEL_DS);
21279 + break;
21280 + default:
21281 + BUG();
21282 + }
21283 + return;
21284 }
21285 -EXPORT_SYMBOL(copy_from_user_overflow);
21286 +EXPORT_SYMBOL(__set_fs);
21287 +
21288 +void set_fs(mm_segment_t x)
21289 +{
21290 + current_thread_info()->addr_limit = x;
21291 + __set_fs(x);
21292 +}
21293 +EXPORT_SYMBOL(set_fs);
21294 +#endif
21295 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
21296 index b7c2849..8633ad8 100644
21297 --- a/arch/x86/lib/usercopy_64.c
21298 +++ b/arch/x86/lib/usercopy_64.c
21299 @@ -42,6 +42,12 @@ long
21300 __strncpy_from_user(char *dst, const char __user *src, long count)
21301 {
21302 long res;
21303 +
21304 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21305 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21306 + src += PAX_USER_SHADOW_BASE;
21307 +#endif
21308 +
21309 __do_strncpy_from_user(dst, src, count, res);
21310 return res;
21311 }
21312 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
21313 {
21314 long __d0;
21315 might_fault();
21316 +
21317 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21318 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
21319 + addr += PAX_USER_SHADOW_BASE;
21320 +#endif
21321 +
21322 /* no memory constraint because it doesn't change any memory gcc knows
21323 about */
21324 asm volatile(
21325 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
21326 }
21327 EXPORT_SYMBOL(strlen_user);
21328
21329 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
21330 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
21331 {
21332 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21333 - return copy_user_generic((__force void *)to, (__force void *)from, len);
21334 - }
21335 - return len;
21336 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21337 +
21338 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21339 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
21340 + to += PAX_USER_SHADOW_BASE;
21341 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
21342 + from += PAX_USER_SHADOW_BASE;
21343 +#endif
21344 +
21345 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
21346 + }
21347 + return len;
21348 }
21349 EXPORT_SYMBOL(copy_in_user);
21350
21351 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
21352 * it is not necessary to optimize tail handling.
21353 */
21354 unsigned long
21355 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
21356 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
21357 {
21358 char c;
21359 unsigned zero_len;
21360 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
21361 index d0474ad..36e9257 100644
21362 --- a/arch/x86/mm/extable.c
21363 +++ b/arch/x86/mm/extable.c
21364 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
21365 const struct exception_table_entry *fixup;
21366
21367 #ifdef CONFIG_PNPBIOS
21368 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
21369 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
21370 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
21371 extern u32 pnp_bios_is_utter_crap;
21372 pnp_bios_is_utter_crap = 1;
21373 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
21374 index 0d17c8c..4f4764f 100644
21375 --- a/arch/x86/mm/fault.c
21376 +++ b/arch/x86/mm/fault.c
21377 @@ -13,11 +13,18 @@
21378 #include <linux/perf_event.h> /* perf_sw_event */
21379 #include <linux/hugetlb.h> /* hstate_index_to_shift */
21380 #include <linux/prefetch.h> /* prefetchw */
21381 +#include <linux/unistd.h>
21382 +#include <linux/compiler.h>
21383
21384 #include <asm/traps.h> /* dotraplinkage, ... */
21385 #include <asm/pgalloc.h> /* pgd_*(), ... */
21386 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
21387 #include <asm/vsyscall.h>
21388 +#include <asm/tlbflush.h>
21389 +
21390 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21391 +#include <asm/stacktrace.h>
21392 +#endif
21393
21394 /*
21395 * Page fault error code bits:
21396 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
21397 int ret = 0;
21398
21399 /* kprobe_running() needs smp_processor_id() */
21400 - if (kprobes_built_in() && !user_mode_vm(regs)) {
21401 + if (kprobes_built_in() && !user_mode(regs)) {
21402 preempt_disable();
21403 if (kprobe_running() && kprobe_fault_handler(regs, 14))
21404 ret = 1;
21405 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
21406 return !instr_lo || (instr_lo>>1) == 1;
21407 case 0x00:
21408 /* Prefetch instruction is 0x0F0D or 0x0F18 */
21409 - if (probe_kernel_address(instr, opcode))
21410 + if (user_mode(regs)) {
21411 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21412 + return 0;
21413 + } else if (probe_kernel_address(instr, opcode))
21414 return 0;
21415
21416 *prefetch = (instr_lo == 0xF) &&
21417 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
21418 while (instr < max_instr) {
21419 unsigned char opcode;
21420
21421 - if (probe_kernel_address(instr, opcode))
21422 + if (user_mode(regs)) {
21423 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21424 + break;
21425 + } else if (probe_kernel_address(instr, opcode))
21426 break;
21427
21428 instr++;
21429 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
21430 force_sig_info(si_signo, &info, tsk);
21431 }
21432
21433 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21434 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
21435 +#endif
21436 +
21437 +#ifdef CONFIG_PAX_EMUTRAMP
21438 +static int pax_handle_fetch_fault(struct pt_regs *regs);
21439 +#endif
21440 +
21441 +#ifdef CONFIG_PAX_PAGEEXEC
21442 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
21443 +{
21444 + pgd_t *pgd;
21445 + pud_t *pud;
21446 + pmd_t *pmd;
21447 +
21448 + pgd = pgd_offset(mm, address);
21449 + if (!pgd_present(*pgd))
21450 + return NULL;
21451 + pud = pud_offset(pgd, address);
21452 + if (!pud_present(*pud))
21453 + return NULL;
21454 + pmd = pmd_offset(pud, address);
21455 + if (!pmd_present(*pmd))
21456 + return NULL;
21457 + return pmd;
21458 +}
21459 +#endif
21460 +
21461 DEFINE_SPINLOCK(pgd_lock);
21462 LIST_HEAD(pgd_list);
21463
21464 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
21465 for (address = VMALLOC_START & PMD_MASK;
21466 address >= TASK_SIZE && address < FIXADDR_TOP;
21467 address += PMD_SIZE) {
21468 +
21469 +#ifdef CONFIG_PAX_PER_CPU_PGD
21470 + unsigned long cpu;
21471 +#else
21472 struct page *page;
21473 +#endif
21474
21475 spin_lock(&pgd_lock);
21476 +
21477 +#ifdef CONFIG_PAX_PER_CPU_PGD
21478 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21479 + pgd_t *pgd = get_cpu_pgd(cpu);
21480 + pmd_t *ret;
21481 +#else
21482 list_for_each_entry(page, &pgd_list, lru) {
21483 + pgd_t *pgd = page_address(page);
21484 spinlock_t *pgt_lock;
21485 pmd_t *ret;
21486
21487 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
21488 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21489
21490 spin_lock(pgt_lock);
21491 - ret = vmalloc_sync_one(page_address(page), address);
21492 +#endif
21493 +
21494 + ret = vmalloc_sync_one(pgd, address);
21495 +
21496 +#ifndef CONFIG_PAX_PER_CPU_PGD
21497 spin_unlock(pgt_lock);
21498 +#endif
21499
21500 if (!ret)
21501 break;
21502 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21503 * an interrupt in the middle of a task switch..
21504 */
21505 pgd_paddr = read_cr3();
21506 +
21507 +#ifdef CONFIG_PAX_PER_CPU_PGD
21508 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
21509 +#endif
21510 +
21511 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21512 if (!pmd_k)
21513 return -1;
21514 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21515 * happen within a race in page table update. In the later
21516 * case just flush:
21517 */
21518 +
21519 +#ifdef CONFIG_PAX_PER_CPU_PGD
21520 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21521 + pgd = pgd_offset_cpu(smp_processor_id(), address);
21522 +#else
21523 pgd = pgd_offset(current->active_mm, address);
21524 +#endif
21525 +
21526 pgd_ref = pgd_offset_k(address);
21527 if (pgd_none(*pgd_ref))
21528 return -1;
21529 @@ -534,7 +604,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
21530 static int is_errata100(struct pt_regs *regs, unsigned long address)
21531 {
21532 #ifdef CONFIG_X86_64
21533 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21534 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21535 return 1;
21536 #endif
21537 return 0;
21538 @@ -561,7 +631,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
21539 }
21540
21541 static const char nx_warning[] = KERN_CRIT
21542 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21543 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21544
21545 static void
21546 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21547 @@ -570,15 +640,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21548 if (!oops_may_print())
21549 return;
21550
21551 - if (error_code & PF_INSTR) {
21552 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
21553 unsigned int level;
21554
21555 pte_t *pte = lookup_address(address, &level);
21556
21557 if (pte && pte_present(*pte) && !pte_exec(*pte))
21558 - printk(nx_warning, current_uid());
21559 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21560 }
21561
21562 +#ifdef CONFIG_PAX_KERNEXEC
21563 + if (init_mm.start_code <= address && address < init_mm.end_code) {
21564 + if (current->signal->curr_ip)
21565 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21566 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21567 + else
21568 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21569 + current->comm, task_pid_nr(current), current_uid(), current_euid());
21570 + }
21571 +#endif
21572 +
21573 printk(KERN_ALERT "BUG: unable to handle kernel ");
21574 if (address < PAGE_SIZE)
21575 printk(KERN_CONT "NULL pointer dereference");
21576 @@ -733,6 +814,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
21577 }
21578 #endif
21579
21580 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21581 + if (pax_is_fetch_fault(regs, error_code, address)) {
21582 +
21583 +#ifdef CONFIG_PAX_EMUTRAMP
21584 + switch (pax_handle_fetch_fault(regs)) {
21585 + case 2:
21586 + return;
21587 + }
21588 +#endif
21589 +
21590 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21591 + do_group_exit(SIGKILL);
21592 + }
21593 +#endif
21594 +
21595 if (unlikely(show_unhandled_signals))
21596 show_signal_msg(regs, error_code, address, tsk);
21597
21598 @@ -829,7 +925,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
21599 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
21600 printk(KERN_ERR
21601 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
21602 - tsk->comm, tsk->pid, address);
21603 + tsk->comm, task_pid_nr(tsk), address);
21604 code = BUS_MCEERR_AR;
21605 }
21606 #endif
21607 @@ -884,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
21608 return 1;
21609 }
21610
21611 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21612 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21613 +{
21614 + pte_t *pte;
21615 + pmd_t *pmd;
21616 + spinlock_t *ptl;
21617 + unsigned char pte_mask;
21618 +
21619 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21620 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
21621 + return 0;
21622 +
21623 + /* PaX: it's our fault, let's handle it if we can */
21624 +
21625 + /* PaX: take a look at read faults before acquiring any locks */
21626 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21627 + /* instruction fetch attempt from a protected page in user mode */
21628 + up_read(&mm->mmap_sem);
21629 +
21630 +#ifdef CONFIG_PAX_EMUTRAMP
21631 + switch (pax_handle_fetch_fault(regs)) {
21632 + case 2:
21633 + return 1;
21634 + }
21635 +#endif
21636 +
21637 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21638 + do_group_exit(SIGKILL);
21639 + }
21640 +
21641 + pmd = pax_get_pmd(mm, address);
21642 + if (unlikely(!pmd))
21643 + return 0;
21644 +
21645 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21646 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21647 + pte_unmap_unlock(pte, ptl);
21648 + return 0;
21649 + }
21650 +
21651 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21652 + /* write attempt to a protected page in user mode */
21653 + pte_unmap_unlock(pte, ptl);
21654 + return 0;
21655 + }
21656 +
21657 +#ifdef CONFIG_SMP
21658 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21659 +#else
21660 + if (likely(address > get_limit(regs->cs)))
21661 +#endif
21662 + {
21663 + set_pte(pte, pte_mkread(*pte));
21664 + __flush_tlb_one(address);
21665 + pte_unmap_unlock(pte, ptl);
21666 + up_read(&mm->mmap_sem);
21667 + return 1;
21668 + }
21669 +
21670 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21671 +
21672 + /*
21673 + * PaX: fill DTLB with user rights and retry
21674 + */
21675 + __asm__ __volatile__ (
21676 + "orb %2,(%1)\n"
21677 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21678 +/*
21679 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21680 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21681 + * page fault when examined during a TLB load attempt. this is true not only
21682 + * for PTEs holding a non-present entry but also present entries that will
21683 + * raise a page fault (such as those set up by PaX, or the copy-on-write
21684 + * mechanism). in effect it means that we do *not* need to flush the TLBs
21685 + * for our target pages since their PTEs are simply not in the TLBs at all.
21686 +
21687 + * the best thing in omitting it is that we gain around 15-20% speed in the
21688 + * fast path of the page fault handler and can get rid of tracing since we
21689 + * can no longer flush unintended entries.
21690 + */
21691 + "invlpg (%0)\n"
21692 +#endif
21693 + __copyuser_seg"testb $0,(%0)\n"
21694 + "xorb %3,(%1)\n"
21695 + :
21696 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21697 + : "memory", "cc");
21698 + pte_unmap_unlock(pte, ptl);
21699 + up_read(&mm->mmap_sem);
21700 + return 1;
21701 +}
21702 +#endif
21703 +
21704 /*
21705 * Handle a spurious fault caused by a stale TLB entry.
21706 *
21707 @@ -956,6 +1145,9 @@ int show_unhandled_signals = 1;
21708 static inline int
21709 access_error(unsigned long error_code, struct vm_area_struct *vma)
21710 {
21711 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21712 + return 1;
21713 +
21714 if (error_code & PF_WRITE) {
21715 /* write, present and write, not present: */
21716 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21717 @@ -989,19 +1181,33 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21718 {
21719 struct vm_area_struct *vma;
21720 struct task_struct *tsk;
21721 - unsigned long address;
21722 struct mm_struct *mm;
21723 int fault;
21724 int write = error_code & PF_WRITE;
21725 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
21726 (write ? FAULT_FLAG_WRITE : 0);
21727
21728 + /* Get the faulting address: */
21729 + unsigned long address = read_cr2();
21730 +
21731 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21732 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21733 + if (!search_exception_tables(regs->ip)) {
21734 + bad_area_nosemaphore(regs, error_code, address);
21735 + return;
21736 + }
21737 + if (address < PAX_USER_SHADOW_BASE) {
21738 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21739 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
21740 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21741 + } else
21742 + address -= PAX_USER_SHADOW_BASE;
21743 + }
21744 +#endif
21745 +
21746 tsk = current;
21747 mm = tsk->mm;
21748
21749 - /* Get the faulting address: */
21750 - address = read_cr2();
21751 -
21752 /*
21753 * Detect and handle instructions that would cause a page fault for
21754 * both a tracked kernel page and a userspace page.
21755 @@ -1061,7 +1267,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21756 * User-mode registers count as a user access even for any
21757 * potential system fault or CPU buglet:
21758 */
21759 - if (user_mode_vm(regs)) {
21760 + if (user_mode(regs)) {
21761 local_irq_enable();
21762 error_code |= PF_USER;
21763 } else {
21764 @@ -1116,6 +1322,11 @@ retry:
21765 might_sleep();
21766 }
21767
21768 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21769 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21770 + return;
21771 +#endif
21772 +
21773 vma = find_vma(mm, address);
21774 if (unlikely(!vma)) {
21775 bad_area(regs, error_code, address);
21776 @@ -1127,18 +1338,24 @@ retry:
21777 bad_area(regs, error_code, address);
21778 return;
21779 }
21780 - if (error_code & PF_USER) {
21781 - /*
21782 - * Accessing the stack below %sp is always a bug.
21783 - * The large cushion allows instructions like enter
21784 - * and pusha to work. ("enter $65535, $31" pushes
21785 - * 32 pointers and then decrements %sp by 65535.)
21786 - */
21787 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21788 - bad_area(regs, error_code, address);
21789 - return;
21790 - }
21791 + /*
21792 + * Accessing the stack below %sp is always a bug.
21793 + * The large cushion allows instructions like enter
21794 + * and pusha to work. ("enter $65535, $31" pushes
21795 + * 32 pointers and then decrements %sp by 65535.)
21796 + */
21797 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21798 + bad_area(regs, error_code, address);
21799 + return;
21800 + }
21801 +
21802 +#ifdef CONFIG_PAX_SEGMEXEC
21803 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21804 + bad_area(regs, error_code, address);
21805 + return;
21806 }
21807 +#endif
21808 +
21809 if (unlikely(expand_stack(vma, address))) {
21810 bad_area(regs, error_code, address);
21811 return;
21812 @@ -1193,3 +1410,240 @@ good_area:
21813
21814 up_read(&mm->mmap_sem);
21815 }
21816 +
21817 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21818 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
21819 +{
21820 + struct mm_struct *mm = current->mm;
21821 + unsigned long ip = regs->ip;
21822 +
21823 + if (v8086_mode(regs))
21824 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
21825 +
21826 +#ifdef CONFIG_PAX_PAGEEXEC
21827 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
21828 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
21829 + return true;
21830 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
21831 + return true;
21832 + return false;
21833 + }
21834 +#endif
21835 +
21836 +#ifdef CONFIG_PAX_SEGMEXEC
21837 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
21838 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
21839 + return true;
21840 + return false;
21841 + }
21842 +#endif
21843 +
21844 + return false;
21845 +}
21846 +#endif
21847 +
21848 +#ifdef CONFIG_PAX_EMUTRAMP
21849 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21850 +{
21851 + int err;
21852 +
21853 + do { /* PaX: gcc trampoline emulation #1 */
21854 + unsigned char mov1, mov2;
21855 + unsigned short jmp;
21856 + unsigned int addr1, addr2;
21857 +
21858 +#ifdef CONFIG_X86_64
21859 + if ((regs->ip + 11) >> 32)
21860 + break;
21861 +#endif
21862 +
21863 + err = get_user(mov1, (unsigned char __user *)regs->ip);
21864 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21865 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21866 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21867 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21868 +
21869 + if (err)
21870 + break;
21871 +
21872 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21873 + regs->cx = addr1;
21874 + regs->ax = addr2;
21875 + regs->ip = addr2;
21876 + return 2;
21877 + }
21878 + } while (0);
21879 +
21880 + do { /* PaX: gcc trampoline emulation #2 */
21881 + unsigned char mov, jmp;
21882 + unsigned int addr1, addr2;
21883 +
21884 +#ifdef CONFIG_X86_64
21885 + if ((regs->ip + 9) >> 32)
21886 + break;
21887 +#endif
21888 +
21889 + err = get_user(mov, (unsigned char __user *)regs->ip);
21890 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21891 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21892 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21893 +
21894 + if (err)
21895 + break;
21896 +
21897 + if (mov == 0xB9 && jmp == 0xE9) {
21898 + regs->cx = addr1;
21899 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21900 + return 2;
21901 + }
21902 + } while (0);
21903 +
21904 + return 1; /* PaX in action */
21905 +}
21906 +
21907 +#ifdef CONFIG_X86_64
21908 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21909 +{
21910 + int err;
21911 +
21912 + do { /* PaX: gcc trampoline emulation #1 */
21913 + unsigned short mov1, mov2, jmp1;
21914 + unsigned char jmp2;
21915 + unsigned int addr1;
21916 + unsigned long addr2;
21917 +
21918 + err = get_user(mov1, (unsigned short __user *)regs->ip);
21919 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
21920 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
21921 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
21922 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
21923 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
21924 +
21925 + if (err)
21926 + break;
21927 +
21928 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21929 + regs->r11 = addr1;
21930 + regs->r10 = addr2;
21931 + regs->ip = addr1;
21932 + return 2;
21933 + }
21934 + } while (0);
21935 +
21936 + do { /* PaX: gcc trampoline emulation #2 */
21937 + unsigned short mov1, mov2, jmp1;
21938 + unsigned char jmp2;
21939 + unsigned long addr1, addr2;
21940 +
21941 + err = get_user(mov1, (unsigned short __user *)regs->ip);
21942 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21943 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21944 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21945 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
21946 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
21947 +
21948 + if (err)
21949 + break;
21950 +
21951 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21952 + regs->r11 = addr1;
21953 + regs->r10 = addr2;
21954 + regs->ip = addr1;
21955 + return 2;
21956 + }
21957 + } while (0);
21958 +
21959 + return 1; /* PaX in action */
21960 +}
21961 +#endif
21962 +
21963 +/*
21964 + * PaX: decide what to do with offenders (regs->ip = fault address)
21965 + *
21966 + * returns 1 when task should be killed
21967 + * 2 when gcc trampoline was detected
21968 + */
21969 +static int pax_handle_fetch_fault(struct pt_regs *regs)
21970 +{
21971 + if (v8086_mode(regs))
21972 + return 1;
21973 +
21974 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
21975 + return 1;
21976 +
21977 +#ifdef CONFIG_X86_32
21978 + return pax_handle_fetch_fault_32(regs);
21979 +#else
21980 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
21981 + return pax_handle_fetch_fault_32(regs);
21982 + else
21983 + return pax_handle_fetch_fault_64(regs);
21984 +#endif
21985 +}
21986 +#endif
21987 +
21988 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21989 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
21990 +{
21991 + long i;
21992 +
21993 + printk(KERN_ERR "PAX: bytes at PC: ");
21994 + for (i = 0; i < 20; i++) {
21995 + unsigned char c;
21996 + if (get_user(c, (unsigned char __force_user *)pc+i))
21997 + printk(KERN_CONT "?? ");
21998 + else
21999 + printk(KERN_CONT "%02x ", c);
22000 + }
22001 + printk("\n");
22002 +
22003 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
22004 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
22005 + unsigned long c;
22006 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
22007 +#ifdef CONFIG_X86_32
22008 + printk(KERN_CONT "???????? ");
22009 +#else
22010 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
22011 + printk(KERN_CONT "???????? ???????? ");
22012 + else
22013 + printk(KERN_CONT "???????????????? ");
22014 +#endif
22015 + } else {
22016 +#ifdef CONFIG_X86_64
22017 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
22018 + printk(KERN_CONT "%08x ", (unsigned int)c);
22019 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
22020 + } else
22021 +#endif
22022 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
22023 + }
22024 + }
22025 + printk("\n");
22026 +}
22027 +#endif
22028 +
22029 +/**
22030 + * probe_kernel_write(): safely attempt to write to a location
22031 + * @dst: address to write to
22032 + * @src: pointer to the data that shall be written
22033 + * @size: size of the data chunk
22034 + *
22035 + * Safely write to address @dst from the buffer at @src. If a kernel fault
22036 + * happens, handle that and return -EFAULT.
22037 + */
22038 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
22039 +{
22040 + long ret;
22041 + mm_segment_t old_fs = get_fs();
22042 +
22043 + set_fs(KERNEL_DS);
22044 + pagefault_disable();
22045 + pax_open_kernel();
22046 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
22047 + pax_close_kernel();
22048 + pagefault_enable();
22049 + set_fs(old_fs);
22050 +
22051 + return ret ? -EFAULT : 0;
22052 +}
22053 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
22054 index ea30585..b5e1508 100644
22055 --- a/arch/x86/mm/gup.c
22056 +++ b/arch/x86/mm/gup.c
22057 @@ -253,7 +253,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
22058 addr = start;
22059 len = (unsigned long) nr_pages << PAGE_SHIFT;
22060 end = start + len;
22061 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22062 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
22063 (void __user *)start, len)))
22064 return 0;
22065
22066 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
22067 index b499626..6fd1882 100644
22068 --- a/arch/x86/mm/highmem_32.c
22069 +++ b/arch/x86/mm/highmem_32.c
22070 @@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
22071 idx = type + KM_TYPE_NR*smp_processor_id();
22072 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22073 BUG_ON(!pte_none(*(kmap_pte-idx)));
22074 +
22075 + pax_open_kernel();
22076 set_pte(kmap_pte-idx, mk_pte(page, prot));
22077 + pax_close_kernel();
22078
22079 return (void *)vaddr;
22080 }
22081 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
22082 index f581a18..29efd37 100644
22083 --- a/arch/x86/mm/hugetlbpage.c
22084 +++ b/arch/x86/mm/hugetlbpage.c
22085 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
22086 struct hstate *h = hstate_file(file);
22087 struct mm_struct *mm = current->mm;
22088 struct vm_area_struct *vma;
22089 - unsigned long start_addr;
22090 + unsigned long start_addr, pax_task_size = TASK_SIZE;
22091 +
22092 +#ifdef CONFIG_PAX_SEGMEXEC
22093 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22094 + pax_task_size = SEGMEXEC_TASK_SIZE;
22095 +#endif
22096 +
22097 + pax_task_size -= PAGE_SIZE;
22098
22099 if (len > mm->cached_hole_size) {
22100 - start_addr = mm->free_area_cache;
22101 + start_addr = mm->free_area_cache;
22102 } else {
22103 - start_addr = TASK_UNMAPPED_BASE;
22104 - mm->cached_hole_size = 0;
22105 + start_addr = mm->mmap_base;
22106 + mm->cached_hole_size = 0;
22107 }
22108
22109 full_search:
22110 @@ -280,26 +287,27 @@ full_search:
22111
22112 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22113 /* At this point: (!vma || addr < vma->vm_end). */
22114 - if (TASK_SIZE - len < addr) {
22115 + if (pax_task_size - len < addr) {
22116 /*
22117 * Start a new search - just in case we missed
22118 * some holes.
22119 */
22120 - if (start_addr != TASK_UNMAPPED_BASE) {
22121 - start_addr = TASK_UNMAPPED_BASE;
22122 + if (start_addr != mm->mmap_base) {
22123 + start_addr = mm->mmap_base;
22124 mm->cached_hole_size = 0;
22125 goto full_search;
22126 }
22127 return -ENOMEM;
22128 }
22129 - if (!vma || addr + len <= vma->vm_start) {
22130 - mm->free_area_cache = addr + len;
22131 - return addr;
22132 - }
22133 + if (check_heap_stack_gap(vma, addr, len))
22134 + break;
22135 if (addr + mm->cached_hole_size < vma->vm_start)
22136 mm->cached_hole_size = vma->vm_start - addr;
22137 addr = ALIGN(vma->vm_end, huge_page_size(h));
22138 }
22139 +
22140 + mm->free_area_cache = addr + len;
22141 + return addr;
22142 }
22143
22144 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22145 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22146 {
22147 struct hstate *h = hstate_file(file);
22148 struct mm_struct *mm = current->mm;
22149 - struct vm_area_struct *vma, *prev_vma;
22150 - unsigned long base = mm->mmap_base, addr = addr0;
22151 + struct vm_area_struct *vma;
22152 + unsigned long base = mm->mmap_base, addr;
22153 unsigned long largest_hole = mm->cached_hole_size;
22154 - int first_time = 1;
22155
22156 /* don't allow allocations above current base */
22157 if (mm->free_area_cache > base)
22158 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22159 largest_hole = 0;
22160 mm->free_area_cache = base;
22161 }
22162 -try_again:
22163 +
22164 /* make sure it can fit in the remaining address space */
22165 if (mm->free_area_cache < len)
22166 goto fail;
22167
22168 /* either no address requested or can't fit in requested address hole */
22169 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
22170 + addr = (mm->free_area_cache - len);
22171 do {
22172 + addr &= huge_page_mask(h);
22173 + vma = find_vma(mm, addr);
22174 /*
22175 * Lookup failure means no vma is above this address,
22176 * i.e. return with success:
22177 - */
22178 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
22179 - return addr;
22180 -
22181 - /*
22182 * new region fits between prev_vma->vm_end and
22183 * vma->vm_start, use it:
22184 */
22185 - if (addr + len <= vma->vm_start &&
22186 - (!prev_vma || (addr >= prev_vma->vm_end))) {
22187 + if (check_heap_stack_gap(vma, addr, len)) {
22188 /* remember the address as a hint for next time */
22189 - mm->cached_hole_size = largest_hole;
22190 - return (mm->free_area_cache = addr);
22191 - } else {
22192 - /* pull free_area_cache down to the first hole */
22193 - if (mm->free_area_cache == vma->vm_end) {
22194 - mm->free_area_cache = vma->vm_start;
22195 - mm->cached_hole_size = largest_hole;
22196 - }
22197 + mm->cached_hole_size = largest_hole;
22198 + return (mm->free_area_cache = addr);
22199 + }
22200 + /* pull free_area_cache down to the first hole */
22201 + if (mm->free_area_cache == vma->vm_end) {
22202 + mm->free_area_cache = vma->vm_start;
22203 + mm->cached_hole_size = largest_hole;
22204 }
22205
22206 /* remember the largest hole we saw so far */
22207 if (addr + largest_hole < vma->vm_start)
22208 - largest_hole = vma->vm_start - addr;
22209 + largest_hole = vma->vm_start - addr;
22210
22211 /* try just below the current vma->vm_start */
22212 - addr = (vma->vm_start - len) & huge_page_mask(h);
22213 - } while (len <= vma->vm_start);
22214 + addr = skip_heap_stack_gap(vma, len);
22215 + } while (!IS_ERR_VALUE(addr));
22216
22217 fail:
22218 /*
22219 - * if hint left us with no space for the requested
22220 - * mapping then try again:
22221 - */
22222 - if (first_time) {
22223 - mm->free_area_cache = base;
22224 - largest_hole = 0;
22225 - first_time = 0;
22226 - goto try_again;
22227 - }
22228 - /*
22229 * A failed mmap() very likely causes application failure,
22230 * so fall back to the bottom-up function here. This scenario
22231 * can happen with large stack limits and large mmap()
22232 * allocations.
22233 */
22234 - mm->free_area_cache = TASK_UNMAPPED_BASE;
22235 +
22236 +#ifdef CONFIG_PAX_SEGMEXEC
22237 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22238 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22239 + else
22240 +#endif
22241 +
22242 + mm->mmap_base = TASK_UNMAPPED_BASE;
22243 +
22244 +#ifdef CONFIG_PAX_RANDMMAP
22245 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22246 + mm->mmap_base += mm->delta_mmap;
22247 +#endif
22248 +
22249 + mm->free_area_cache = mm->mmap_base;
22250 mm->cached_hole_size = ~0UL;
22251 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
22252 len, pgoff, flags);
22253 @@ -386,6 +392,7 @@ fail:
22254 /*
22255 * Restore the topdown base:
22256 */
22257 + mm->mmap_base = base;
22258 mm->free_area_cache = base;
22259 mm->cached_hole_size = ~0UL;
22260
22261 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22262 struct hstate *h = hstate_file(file);
22263 struct mm_struct *mm = current->mm;
22264 struct vm_area_struct *vma;
22265 + unsigned long pax_task_size = TASK_SIZE;
22266
22267 if (len & ~huge_page_mask(h))
22268 return -EINVAL;
22269 - if (len > TASK_SIZE)
22270 +
22271 +#ifdef CONFIG_PAX_SEGMEXEC
22272 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22273 + pax_task_size = SEGMEXEC_TASK_SIZE;
22274 +#endif
22275 +
22276 + pax_task_size -= PAGE_SIZE;
22277 +
22278 + if (len > pax_task_size)
22279 return -ENOMEM;
22280
22281 if (flags & MAP_FIXED) {
22282 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22283 if (addr) {
22284 addr = ALIGN(addr, huge_page_size(h));
22285 vma = find_vma(mm, addr);
22286 - if (TASK_SIZE - len >= addr &&
22287 - (!vma || addr + len <= vma->vm_start))
22288 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
22289 return addr;
22290 }
22291 if (mm->get_unmapped_area == arch_get_unmapped_area)
22292 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
22293 index 87488b9..7129f32 100644
22294 --- a/arch/x86/mm/init.c
22295 +++ b/arch/x86/mm/init.c
22296 @@ -31,7 +31,7 @@ int direct_gbpages
22297 static void __init find_early_table_space(unsigned long end, int use_pse,
22298 int use_gbpages)
22299 {
22300 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
22301 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
22302 phys_addr_t base;
22303
22304 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
22305 @@ -312,8 +312,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
22306 */
22307 int devmem_is_allowed(unsigned long pagenr)
22308 {
22309 - if (pagenr <= 256)
22310 +#ifdef CONFIG_GRKERNSEC_KMEM
22311 + /* allow BDA */
22312 + if (!pagenr)
22313 + return 1;
22314 + /* allow EBDA */
22315 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22316 + return 1;
22317 +#else
22318 + if (!pagenr)
22319 + return 1;
22320 +#ifdef CONFIG_VM86
22321 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
22322 + return 1;
22323 +#endif
22324 +#endif
22325 +
22326 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22327 return 1;
22328 +#ifdef CONFIG_GRKERNSEC_KMEM
22329 + /* throw out everything else below 1MB */
22330 + if (pagenr <= 256)
22331 + return 0;
22332 +#endif
22333 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22334 return 0;
22335 if (!page_is_ram(pagenr))
22336 @@ -372,6 +393,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
22337
22338 void free_initmem(void)
22339 {
22340 +
22341 +#ifdef CONFIG_PAX_KERNEXEC
22342 +#ifdef CONFIG_X86_32
22343 + /* PaX: limit KERNEL_CS to actual size */
22344 + unsigned long addr, limit;
22345 + struct desc_struct d;
22346 + int cpu;
22347 +
22348 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22349 + limit = (limit - 1UL) >> PAGE_SHIFT;
22350 +
22351 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22352 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
22353 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22354 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22355 + }
22356 +
22357 + /* PaX: make KERNEL_CS read-only */
22358 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22359 + if (!paravirt_enabled())
22360 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22361 +/*
22362 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22363 + pgd = pgd_offset_k(addr);
22364 + pud = pud_offset(pgd, addr);
22365 + pmd = pmd_offset(pud, addr);
22366 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22367 + }
22368 +*/
22369 +#ifdef CONFIG_X86_PAE
22370 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22371 +/*
22372 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22373 + pgd = pgd_offset_k(addr);
22374 + pud = pud_offset(pgd, addr);
22375 + pmd = pmd_offset(pud, addr);
22376 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22377 + }
22378 +*/
22379 +#endif
22380 +
22381 +#ifdef CONFIG_MODULES
22382 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22383 +#endif
22384 +
22385 +#else
22386 + pgd_t *pgd;
22387 + pud_t *pud;
22388 + pmd_t *pmd;
22389 + unsigned long addr, end;
22390 +
22391 + /* PaX: make kernel code/rodata read-only, rest non-executable */
22392 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22393 + pgd = pgd_offset_k(addr);
22394 + pud = pud_offset(pgd, addr);
22395 + pmd = pmd_offset(pud, addr);
22396 + if (!pmd_present(*pmd))
22397 + continue;
22398 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22399 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22400 + else
22401 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22402 + }
22403 +
22404 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22405 + end = addr + KERNEL_IMAGE_SIZE;
22406 + for (; addr < end; addr += PMD_SIZE) {
22407 + pgd = pgd_offset_k(addr);
22408 + pud = pud_offset(pgd, addr);
22409 + pmd = pmd_offset(pud, addr);
22410 + if (!pmd_present(*pmd))
22411 + continue;
22412 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22413 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22414 + }
22415 +#endif
22416 +
22417 + flush_tlb_all();
22418 +#endif
22419 +
22420 free_init_pages("unused kernel memory",
22421 (unsigned long)(&__init_begin),
22422 (unsigned long)(&__init_end));
22423 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
22424 index 29f7c6d..b46b35b 100644
22425 --- a/arch/x86/mm/init_32.c
22426 +++ b/arch/x86/mm/init_32.c
22427 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
22428 }
22429
22430 /*
22431 - * Creates a middle page table and puts a pointer to it in the
22432 - * given global directory entry. This only returns the gd entry
22433 - * in non-PAE compilation mode, since the middle layer is folded.
22434 - */
22435 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
22436 -{
22437 - pud_t *pud;
22438 - pmd_t *pmd_table;
22439 -
22440 -#ifdef CONFIG_X86_PAE
22441 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
22442 - if (after_bootmem)
22443 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
22444 - else
22445 - pmd_table = (pmd_t *)alloc_low_page();
22446 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
22447 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
22448 - pud = pud_offset(pgd, 0);
22449 - BUG_ON(pmd_table != pmd_offset(pud, 0));
22450 -
22451 - return pmd_table;
22452 - }
22453 -#endif
22454 - pud = pud_offset(pgd, 0);
22455 - pmd_table = pmd_offset(pud, 0);
22456 -
22457 - return pmd_table;
22458 -}
22459 -
22460 -/*
22461 * Create a page table and place a pointer to it in a middle page
22462 * directory entry:
22463 */
22464 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
22465 page_table = (pte_t *)alloc_low_page();
22466
22467 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
22468 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22469 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
22470 +#else
22471 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
22472 +#endif
22473 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
22474 }
22475
22476 return pte_offset_kernel(pmd, 0);
22477 }
22478
22479 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
22480 +{
22481 + pud_t *pud;
22482 + pmd_t *pmd_table;
22483 +
22484 + pud = pud_offset(pgd, 0);
22485 + pmd_table = pmd_offset(pud, 0);
22486 +
22487 + return pmd_table;
22488 +}
22489 +
22490 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
22491 {
22492 int pgd_idx = pgd_index(vaddr);
22493 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22494 int pgd_idx, pmd_idx;
22495 unsigned long vaddr;
22496 pgd_t *pgd;
22497 + pud_t *pud;
22498 pmd_t *pmd;
22499 pte_t *pte = NULL;
22500
22501 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22502 pgd = pgd_base + pgd_idx;
22503
22504 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
22505 - pmd = one_md_table_init(pgd);
22506 - pmd = pmd + pmd_index(vaddr);
22507 + pud = pud_offset(pgd, vaddr);
22508 + pmd = pmd_offset(pud, vaddr);
22509 +
22510 +#ifdef CONFIG_X86_PAE
22511 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22512 +#endif
22513 +
22514 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
22515 pmd++, pmd_idx++) {
22516 pte = page_table_kmap_check(one_page_table_init(pmd),
22517 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22518 }
22519 }
22520
22521 -static inline int is_kernel_text(unsigned long addr)
22522 +static inline int is_kernel_text(unsigned long start, unsigned long end)
22523 {
22524 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
22525 - return 1;
22526 - return 0;
22527 + if ((start > ktla_ktva((unsigned long)_etext) ||
22528 + end <= ktla_ktva((unsigned long)_stext)) &&
22529 + (start > ktla_ktva((unsigned long)_einittext) ||
22530 + end <= ktla_ktva((unsigned long)_sinittext)) &&
22531 +
22532 +#ifdef CONFIG_ACPI_SLEEP
22533 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
22534 +#endif
22535 +
22536 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
22537 + return 0;
22538 + return 1;
22539 }
22540
22541 /*
22542 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
22543 unsigned long last_map_addr = end;
22544 unsigned long start_pfn, end_pfn;
22545 pgd_t *pgd_base = swapper_pg_dir;
22546 - int pgd_idx, pmd_idx, pte_ofs;
22547 + unsigned int pgd_idx, pmd_idx, pte_ofs;
22548 unsigned long pfn;
22549 pgd_t *pgd;
22550 + pud_t *pud;
22551 pmd_t *pmd;
22552 pte_t *pte;
22553 unsigned pages_2m, pages_4k;
22554 @@ -281,8 +282,13 @@ repeat:
22555 pfn = start_pfn;
22556 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22557 pgd = pgd_base + pgd_idx;
22558 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
22559 - pmd = one_md_table_init(pgd);
22560 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
22561 + pud = pud_offset(pgd, 0);
22562 + pmd = pmd_offset(pud, 0);
22563 +
22564 +#ifdef CONFIG_X86_PAE
22565 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22566 +#endif
22567
22568 if (pfn >= end_pfn)
22569 continue;
22570 @@ -294,14 +300,13 @@ repeat:
22571 #endif
22572 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
22573 pmd++, pmd_idx++) {
22574 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
22575 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
22576
22577 /*
22578 * Map with big pages if possible, otherwise
22579 * create normal page tables:
22580 */
22581 if (use_pse) {
22582 - unsigned int addr2;
22583 pgprot_t prot = PAGE_KERNEL_LARGE;
22584 /*
22585 * first pass will use the same initial
22586 @@ -311,11 +316,7 @@ repeat:
22587 __pgprot(PTE_IDENT_ATTR |
22588 _PAGE_PSE);
22589
22590 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
22591 - PAGE_OFFSET + PAGE_SIZE-1;
22592 -
22593 - if (is_kernel_text(addr) ||
22594 - is_kernel_text(addr2))
22595 + if (is_kernel_text(address, address + PMD_SIZE))
22596 prot = PAGE_KERNEL_LARGE_EXEC;
22597
22598 pages_2m++;
22599 @@ -332,7 +333,7 @@ repeat:
22600 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22601 pte += pte_ofs;
22602 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
22603 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
22604 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
22605 pgprot_t prot = PAGE_KERNEL;
22606 /*
22607 * first pass will use the same initial
22608 @@ -340,7 +341,7 @@ repeat:
22609 */
22610 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
22611
22612 - if (is_kernel_text(addr))
22613 + if (is_kernel_text(address, address + PAGE_SIZE))
22614 prot = PAGE_KERNEL_EXEC;
22615
22616 pages_4k++;
22617 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
22618
22619 pud = pud_offset(pgd, va);
22620 pmd = pmd_offset(pud, va);
22621 - if (!pmd_present(*pmd))
22622 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
22623 break;
22624
22625 pte = pte_offset_kernel(pmd, va);
22626 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
22627
22628 static void __init pagetable_init(void)
22629 {
22630 - pgd_t *pgd_base = swapper_pg_dir;
22631 -
22632 - permanent_kmaps_init(pgd_base);
22633 + permanent_kmaps_init(swapper_pg_dir);
22634 }
22635
22636 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22637 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22638 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22639
22640 /* user-defined highmem size */
22641 @@ -757,6 +756,12 @@ void __init mem_init(void)
22642
22643 pci_iommu_alloc();
22644
22645 +#ifdef CONFIG_PAX_PER_CPU_PGD
22646 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22647 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22648 + KERNEL_PGD_PTRS);
22649 +#endif
22650 +
22651 #ifdef CONFIG_FLATMEM
22652 BUG_ON(!mem_map);
22653 #endif
22654 @@ -774,7 +779,7 @@ void __init mem_init(void)
22655 set_highmem_pages_init();
22656
22657 codesize = (unsigned long) &_etext - (unsigned long) &_text;
22658 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
22659 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
22660 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
22661
22662 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
22663 @@ -815,10 +820,10 @@ void __init mem_init(void)
22664 ((unsigned long)&__init_end -
22665 (unsigned long)&__init_begin) >> 10,
22666
22667 - (unsigned long)&_etext, (unsigned long)&_edata,
22668 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
22669 + (unsigned long)&_sdata, (unsigned long)&_edata,
22670 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
22671
22672 - (unsigned long)&_text, (unsigned long)&_etext,
22673 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
22674 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
22675
22676 /*
22677 @@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
22678 if (!kernel_set_to_readonly)
22679 return;
22680
22681 + start = ktla_ktva(start);
22682 pr_debug("Set kernel text: %lx - %lx for read write\n",
22683 start, start+size);
22684
22685 @@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
22686 if (!kernel_set_to_readonly)
22687 return;
22688
22689 + start = ktla_ktva(start);
22690 pr_debug("Set kernel text: %lx - %lx for read only\n",
22691 start, start+size);
22692
22693 @@ -938,6 +945,7 @@ void mark_rodata_ro(void)
22694 unsigned long start = PFN_ALIGN(_text);
22695 unsigned long size = PFN_ALIGN(_etext) - start;
22696
22697 + start = ktla_ktva(start);
22698 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
22699 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
22700 size >> 10);
22701 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
22702 index bbaaa00..16dffad 100644
22703 --- a/arch/x86/mm/init_64.c
22704 +++ b/arch/x86/mm/init_64.c
22705 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
22706 * around without checking the pgd every time.
22707 */
22708
22709 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
22710 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
22711 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22712
22713 int force_personality32;
22714 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22715
22716 for (address = start; address <= end; address += PGDIR_SIZE) {
22717 const pgd_t *pgd_ref = pgd_offset_k(address);
22718 +
22719 +#ifdef CONFIG_PAX_PER_CPU_PGD
22720 + unsigned long cpu;
22721 +#else
22722 struct page *page;
22723 +#endif
22724
22725 if (pgd_none(*pgd_ref))
22726 continue;
22727
22728 spin_lock(&pgd_lock);
22729 +
22730 +#ifdef CONFIG_PAX_PER_CPU_PGD
22731 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22732 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
22733 +#else
22734 list_for_each_entry(page, &pgd_list, lru) {
22735 pgd_t *pgd;
22736 spinlock_t *pgt_lock;
22737 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22738 /* the pgt_lock only for Xen */
22739 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
22740 spin_lock(pgt_lock);
22741 +#endif
22742
22743 if (pgd_none(*pgd))
22744 set_pgd(pgd, *pgd_ref);
22745 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22746 BUG_ON(pgd_page_vaddr(*pgd)
22747 != pgd_page_vaddr(*pgd_ref));
22748
22749 +#ifndef CONFIG_PAX_PER_CPU_PGD
22750 spin_unlock(pgt_lock);
22751 +#endif
22752 +
22753 }
22754 spin_unlock(&pgd_lock);
22755 }
22756 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
22757 pmd = fill_pmd(pud, vaddr);
22758 pte = fill_pte(pmd, vaddr);
22759
22760 + pax_open_kernel();
22761 set_pte(pte, new_pte);
22762 + pax_close_kernel();
22763
22764 /*
22765 * It's enough to flush this one mapping.
22766 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
22767 pgd = pgd_offset_k((unsigned long)__va(phys));
22768 if (pgd_none(*pgd)) {
22769 pud = (pud_t *) spp_getpage();
22770 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
22771 - _PAGE_USER));
22772 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
22773 }
22774 pud = pud_offset(pgd, (unsigned long)__va(phys));
22775 if (pud_none(*pud)) {
22776 pmd = (pmd_t *) spp_getpage();
22777 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
22778 - _PAGE_USER));
22779 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
22780 }
22781 pmd = pmd_offset(pud, phys);
22782 BUG_ON(!pmd_none(*pmd));
22783 @@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
22784 if (pfn >= pgt_buf_top)
22785 panic("alloc_low_page: ran out of memory");
22786
22787 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22788 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22789 clear_page(adr);
22790 *phys = pfn * PAGE_SIZE;
22791 return adr;
22792 @@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
22793
22794 phys = __pa(virt);
22795 left = phys & (PAGE_SIZE - 1);
22796 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22797 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22798 adr = (void *)(((unsigned long)adr) | left);
22799
22800 return adr;
22801 @@ -693,6 +707,12 @@ void __init mem_init(void)
22802
22803 pci_iommu_alloc();
22804
22805 +#ifdef CONFIG_PAX_PER_CPU_PGD
22806 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22807 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22808 + KERNEL_PGD_PTRS);
22809 +#endif
22810 +
22811 /* clear_bss() already clear the empty_zero_page */
22812
22813 reservedpages = 0;
22814 @@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
22815 static struct vm_area_struct gate_vma = {
22816 .vm_start = VSYSCALL_START,
22817 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
22818 - .vm_page_prot = PAGE_READONLY_EXEC,
22819 - .vm_flags = VM_READ | VM_EXEC
22820 + .vm_page_prot = PAGE_READONLY,
22821 + .vm_flags = VM_READ
22822 };
22823
22824 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
22825 @@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr)
22826
22827 const char *arch_vma_name(struct vm_area_struct *vma)
22828 {
22829 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22830 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22831 return "[vdso]";
22832 if (vma == &gate_vma)
22833 return "[vsyscall]";
22834 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
22835 index 7b179b4..6bd1777 100644
22836 --- a/arch/x86/mm/iomap_32.c
22837 +++ b/arch/x86/mm/iomap_32.c
22838 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
22839 type = kmap_atomic_idx_push();
22840 idx = type + KM_TYPE_NR * smp_processor_id();
22841 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22842 +
22843 + pax_open_kernel();
22844 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22845 + pax_close_kernel();
22846 +
22847 arch_flush_lazy_mmu_mode();
22848
22849 return (void *)vaddr;
22850 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
22851 index be1ef57..9680edc 100644
22852 --- a/arch/x86/mm/ioremap.c
22853 +++ b/arch/x86/mm/ioremap.c
22854 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
22855 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
22856 int is_ram = page_is_ram(pfn);
22857
22858 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22859 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22860 return NULL;
22861 WARN_ON_ONCE(is_ram);
22862 }
22863 @@ -344,7 +344,7 @@ static int __init early_ioremap_debug_setup(char *str)
22864 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22865
22866 static __initdata int after_paging_init;
22867 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22868 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22869
22870 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22871 {
22872 @@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
22873 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22874
22875 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22876 - memset(bm_pte, 0, sizeof(bm_pte));
22877 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
22878 + pmd_populate_user(&init_mm, pmd, bm_pte);
22879
22880 /*
22881 * The boot-ioremap range spans multiple pmds, for which
22882 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
22883 index d87dd6d..bf3fa66 100644
22884 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
22885 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
22886 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
22887 * memory (e.g. tracked pages)? For now, we need this to avoid
22888 * invoking kmemcheck for PnP BIOS calls.
22889 */
22890 - if (regs->flags & X86_VM_MASK)
22891 + if (v8086_mode(regs))
22892 return false;
22893 - if (regs->cs != __KERNEL_CS)
22894 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
22895 return false;
22896
22897 pte = kmemcheck_pte_lookup(address);
22898 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
22899 index 1dab519..60a7e5f 100644
22900 --- a/arch/x86/mm/mmap.c
22901 +++ b/arch/x86/mm/mmap.c
22902 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
22903 * Leave an at least ~128 MB hole with possible stack randomization.
22904 */
22905 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
22906 -#define MAX_GAP (TASK_SIZE/6*5)
22907 +#define MAX_GAP (pax_task_size/6*5)
22908
22909 /*
22910 * True on X86_32 or when emulating IA32 on X86_64
22911 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
22912 return rnd << PAGE_SHIFT;
22913 }
22914
22915 -static unsigned long mmap_base(void)
22916 +static unsigned long mmap_base(struct mm_struct *mm)
22917 {
22918 unsigned long gap = rlimit(RLIMIT_STACK);
22919 + unsigned long pax_task_size = TASK_SIZE;
22920 +
22921 +#ifdef CONFIG_PAX_SEGMEXEC
22922 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22923 + pax_task_size = SEGMEXEC_TASK_SIZE;
22924 +#endif
22925
22926 if (gap < MIN_GAP)
22927 gap = MIN_GAP;
22928 else if (gap > MAX_GAP)
22929 gap = MAX_GAP;
22930
22931 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
22932 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
22933 }
22934
22935 /*
22936 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
22937 * does, but not when emulating X86_32
22938 */
22939 -static unsigned long mmap_legacy_base(void)
22940 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
22941 {
22942 - if (mmap_is_ia32())
22943 + if (mmap_is_ia32()) {
22944 +
22945 +#ifdef CONFIG_PAX_SEGMEXEC
22946 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22947 + return SEGMEXEC_TASK_UNMAPPED_BASE;
22948 + else
22949 +#endif
22950 +
22951 return TASK_UNMAPPED_BASE;
22952 - else
22953 + } else
22954 return TASK_UNMAPPED_BASE + mmap_rnd();
22955 }
22956
22957 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
22958 void arch_pick_mmap_layout(struct mm_struct *mm)
22959 {
22960 if (mmap_is_legacy()) {
22961 - mm->mmap_base = mmap_legacy_base();
22962 + mm->mmap_base = mmap_legacy_base(mm);
22963 +
22964 +#ifdef CONFIG_PAX_RANDMMAP
22965 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22966 + mm->mmap_base += mm->delta_mmap;
22967 +#endif
22968 +
22969 mm->get_unmapped_area = arch_get_unmapped_area;
22970 mm->unmap_area = arch_unmap_area;
22971 } else {
22972 - mm->mmap_base = mmap_base();
22973 + mm->mmap_base = mmap_base(mm);
22974 +
22975 +#ifdef CONFIG_PAX_RANDMMAP
22976 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22977 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
22978 +#endif
22979 +
22980 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
22981 mm->unmap_area = arch_unmap_area_topdown;
22982 }
22983 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
22984 index 67421f3..8d6b107 100644
22985 --- a/arch/x86/mm/mmio-mod.c
22986 +++ b/arch/x86/mm/mmio-mod.c
22987 @@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
22988 break;
22989 default:
22990 {
22991 - unsigned char *ip = (unsigned char *)instptr;
22992 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
22993 my_trace->opcode = MMIO_UNKNOWN_OP;
22994 my_trace->width = 0;
22995 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
22996 @@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
22997 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
22998 void __iomem *addr)
22999 {
23000 - static atomic_t next_id;
23001 + static atomic_unchecked_t next_id;
23002 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
23003 /* These are page-unaligned. */
23004 struct mmiotrace_map map = {
23005 @@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
23006 .private = trace
23007 },
23008 .phys = offset,
23009 - .id = atomic_inc_return(&next_id)
23010 + .id = atomic_inc_return_unchecked(&next_id)
23011 };
23012 map.map_id = trace->id;
23013
23014 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
23015 index b008656..773eac2 100644
23016 --- a/arch/x86/mm/pageattr-test.c
23017 +++ b/arch/x86/mm/pageattr-test.c
23018 @@ -36,7 +36,7 @@ enum {
23019
23020 static int pte_testbit(pte_t pte)
23021 {
23022 - return pte_flags(pte) & _PAGE_UNUSED1;
23023 + return pte_flags(pte) & _PAGE_CPA_TEST;
23024 }
23025
23026 struct split_state {
23027 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
23028 index f9e5267..6f6e27f 100644
23029 --- a/arch/x86/mm/pageattr.c
23030 +++ b/arch/x86/mm/pageattr.c
23031 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23032 */
23033 #ifdef CONFIG_PCI_BIOS
23034 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
23035 - pgprot_val(forbidden) |= _PAGE_NX;
23036 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23037 #endif
23038
23039 /*
23040 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23041 * Does not cover __inittext since that is gone later on. On
23042 * 64bit we do not enforce !NX on the low mapping
23043 */
23044 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
23045 - pgprot_val(forbidden) |= _PAGE_NX;
23046 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
23047 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23048
23049 +#ifdef CONFIG_DEBUG_RODATA
23050 /*
23051 * The .rodata section needs to be read-only. Using the pfn
23052 * catches all aliases.
23053 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23054 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
23055 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
23056 pgprot_val(forbidden) |= _PAGE_RW;
23057 +#endif
23058
23059 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
23060 /*
23061 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
23062 }
23063 #endif
23064
23065 +#ifdef CONFIG_PAX_KERNEXEC
23066 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
23067 + pgprot_val(forbidden) |= _PAGE_RW;
23068 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
23069 + }
23070 +#endif
23071 +
23072 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
23073
23074 return prot;
23075 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
23076 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
23077 {
23078 /* change init_mm */
23079 + pax_open_kernel();
23080 set_pte_atomic(kpte, pte);
23081 +
23082 #ifdef CONFIG_X86_32
23083 if (!SHARED_KERNEL_PMD) {
23084 +
23085 +#ifdef CONFIG_PAX_PER_CPU_PGD
23086 + unsigned long cpu;
23087 +#else
23088 struct page *page;
23089 +#endif
23090
23091 +#ifdef CONFIG_PAX_PER_CPU_PGD
23092 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23093 + pgd_t *pgd = get_cpu_pgd(cpu);
23094 +#else
23095 list_for_each_entry(page, &pgd_list, lru) {
23096 - pgd_t *pgd;
23097 + pgd_t *pgd = (pgd_t *)page_address(page);
23098 +#endif
23099 +
23100 pud_t *pud;
23101 pmd_t *pmd;
23102
23103 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
23104 + pgd += pgd_index(address);
23105 pud = pud_offset(pgd, address);
23106 pmd = pmd_offset(pud, address);
23107 set_pte_atomic((pte_t *)pmd, pte);
23108 }
23109 }
23110 #endif
23111 + pax_close_kernel();
23112 }
23113
23114 static int
23115 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
23116 index f6ff57b..481690f 100644
23117 --- a/arch/x86/mm/pat.c
23118 +++ b/arch/x86/mm/pat.c
23119 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
23120
23121 if (!entry) {
23122 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
23123 - current->comm, current->pid, start, end);
23124 + current->comm, task_pid_nr(current), start, end);
23125 return -EINVAL;
23126 }
23127
23128 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23129 while (cursor < to) {
23130 if (!devmem_is_allowed(pfn)) {
23131 printk(KERN_INFO
23132 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23133 - current->comm, from, to);
23134 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
23135 + current->comm, from, to, cursor);
23136 return 0;
23137 }
23138 cursor += PAGE_SIZE;
23139 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
23140 printk(KERN_INFO
23141 "%s:%d ioremap_change_attr failed %s "
23142 "for %Lx-%Lx\n",
23143 - current->comm, current->pid,
23144 + current->comm, task_pid_nr(current),
23145 cattr_name(flags),
23146 base, (unsigned long long)(base + size));
23147 return -EINVAL;
23148 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23149 if (want_flags != flags) {
23150 printk(KERN_WARNING
23151 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
23152 - current->comm, current->pid,
23153 + current->comm, task_pid_nr(current),
23154 cattr_name(want_flags),
23155 (unsigned long long)paddr,
23156 (unsigned long long)(paddr + size),
23157 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23158 free_memtype(paddr, paddr + size);
23159 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
23160 " for %Lx-%Lx, got %s\n",
23161 - current->comm, current->pid,
23162 + current->comm, task_pid_nr(current),
23163 cattr_name(want_flags),
23164 (unsigned long long)paddr,
23165 (unsigned long long)(paddr + size),
23166 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
23167 index 9f0614d..92ae64a 100644
23168 --- a/arch/x86/mm/pf_in.c
23169 +++ b/arch/x86/mm/pf_in.c
23170 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
23171 int i;
23172 enum reason_type rv = OTHERS;
23173
23174 - p = (unsigned char *)ins_addr;
23175 + p = (unsigned char *)ktla_ktva(ins_addr);
23176 p += skip_prefix(p, &prf);
23177 p += get_opcode(p, &opcode);
23178
23179 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
23180 struct prefix_bits prf;
23181 int i;
23182
23183 - p = (unsigned char *)ins_addr;
23184 + p = (unsigned char *)ktla_ktva(ins_addr);
23185 p += skip_prefix(p, &prf);
23186 p += get_opcode(p, &opcode);
23187
23188 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
23189 struct prefix_bits prf;
23190 int i;
23191
23192 - p = (unsigned char *)ins_addr;
23193 + p = (unsigned char *)ktla_ktva(ins_addr);
23194 p += skip_prefix(p, &prf);
23195 p += get_opcode(p, &opcode);
23196
23197 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
23198 struct prefix_bits prf;
23199 int i;
23200
23201 - p = (unsigned char *)ins_addr;
23202 + p = (unsigned char *)ktla_ktva(ins_addr);
23203 p += skip_prefix(p, &prf);
23204 p += get_opcode(p, &opcode);
23205 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
23206 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
23207 struct prefix_bits prf;
23208 int i;
23209
23210 - p = (unsigned char *)ins_addr;
23211 + p = (unsigned char *)ktla_ktva(ins_addr);
23212 p += skip_prefix(p, &prf);
23213 p += get_opcode(p, &opcode);
23214 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
23215 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
23216 index 8573b83..6372501 100644
23217 --- a/arch/x86/mm/pgtable.c
23218 +++ b/arch/x86/mm/pgtable.c
23219 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
23220 list_del(&page->lru);
23221 }
23222
23223 -#define UNSHARED_PTRS_PER_PGD \
23224 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23225 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23226 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
23227
23228 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23229 +{
23230 + while (count--)
23231 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
23232 +}
23233 +#endif
23234 +
23235 +#ifdef CONFIG_PAX_PER_CPU_PGD
23236 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23237 +{
23238 + while (count--)
23239 +
23240 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23241 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
23242 +#else
23243 + *dst++ = *src++;
23244 +#endif
23245 +
23246 +}
23247 +#endif
23248 +
23249 +#ifdef CONFIG_X86_64
23250 +#define pxd_t pud_t
23251 +#define pyd_t pgd_t
23252 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
23253 +#define pxd_free(mm, pud) pud_free((mm), (pud))
23254 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
23255 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
23256 +#define PYD_SIZE PGDIR_SIZE
23257 +#else
23258 +#define pxd_t pmd_t
23259 +#define pyd_t pud_t
23260 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
23261 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
23262 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
23263 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
23264 +#define PYD_SIZE PUD_SIZE
23265 +#endif
23266
23267 +#ifdef CONFIG_PAX_PER_CPU_PGD
23268 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
23269 +static inline void pgd_dtor(pgd_t *pgd) {}
23270 +#else
23271 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
23272 {
23273 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
23274 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
23275 pgd_list_del(pgd);
23276 spin_unlock(&pgd_lock);
23277 }
23278 +#endif
23279
23280 /*
23281 * List of all pgd's needed for non-PAE so it can invalidate entries
23282 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
23283 * -- wli
23284 */
23285
23286 -#ifdef CONFIG_X86_PAE
23287 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23288 /*
23289 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
23290 * updating the top-level pagetable entries to guarantee the
23291 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
23292 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
23293 * and initialize the kernel pmds here.
23294 */
23295 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
23296 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23297
23298 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23299 {
23300 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23301 */
23302 flush_tlb_mm(mm);
23303 }
23304 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
23305 +#define PREALLOCATED_PXDS USER_PGD_PTRS
23306 #else /* !CONFIG_X86_PAE */
23307
23308 /* No need to prepopulate any pagetable entries in non-PAE modes. */
23309 -#define PREALLOCATED_PMDS 0
23310 +#define PREALLOCATED_PXDS 0
23311
23312 #endif /* CONFIG_X86_PAE */
23313
23314 -static void free_pmds(pmd_t *pmds[])
23315 +static void free_pxds(pxd_t *pxds[])
23316 {
23317 int i;
23318
23319 - for(i = 0; i < PREALLOCATED_PMDS; i++)
23320 - if (pmds[i])
23321 - free_page((unsigned long)pmds[i]);
23322 + for(i = 0; i < PREALLOCATED_PXDS; i++)
23323 + if (pxds[i])
23324 + free_page((unsigned long)pxds[i]);
23325 }
23326
23327 -static int preallocate_pmds(pmd_t *pmds[])
23328 +static int preallocate_pxds(pxd_t *pxds[])
23329 {
23330 int i;
23331 bool failed = false;
23332
23333 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
23334 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
23335 - if (pmd == NULL)
23336 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
23337 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
23338 + if (pxd == NULL)
23339 failed = true;
23340 - pmds[i] = pmd;
23341 + pxds[i] = pxd;
23342 }
23343
23344 if (failed) {
23345 - free_pmds(pmds);
23346 + free_pxds(pxds);
23347 return -ENOMEM;
23348 }
23349
23350 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
23351 * preallocate which never got a corresponding vma will need to be
23352 * freed manually.
23353 */
23354 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
23355 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
23356 {
23357 int i;
23358
23359 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
23360 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
23361 pgd_t pgd = pgdp[i];
23362
23363 if (pgd_val(pgd) != 0) {
23364 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
23365 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
23366
23367 - pgdp[i] = native_make_pgd(0);
23368 + set_pgd(pgdp + i, native_make_pgd(0));
23369
23370 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
23371 - pmd_free(mm, pmd);
23372 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
23373 + pxd_free(mm, pxd);
23374 }
23375 }
23376 }
23377
23378 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
23379 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
23380 {
23381 - pud_t *pud;
23382 + pyd_t *pyd;
23383 unsigned long addr;
23384 int i;
23385
23386 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
23387 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
23388 return;
23389
23390 - pud = pud_offset(pgd, 0);
23391 +#ifdef CONFIG_X86_64
23392 + pyd = pyd_offset(mm, 0L);
23393 +#else
23394 + pyd = pyd_offset(pgd, 0L);
23395 +#endif
23396
23397 - for (addr = i = 0; i < PREALLOCATED_PMDS;
23398 - i++, pud++, addr += PUD_SIZE) {
23399 - pmd_t *pmd = pmds[i];
23400 + for (addr = i = 0; i < PREALLOCATED_PXDS;
23401 + i++, pyd++, addr += PYD_SIZE) {
23402 + pxd_t *pxd = pxds[i];
23403
23404 if (i >= KERNEL_PGD_BOUNDARY)
23405 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23406 - sizeof(pmd_t) * PTRS_PER_PMD);
23407 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23408 + sizeof(pxd_t) * PTRS_PER_PMD);
23409
23410 - pud_populate(mm, pud, pmd);
23411 + pyd_populate(mm, pyd, pxd);
23412 }
23413 }
23414
23415 pgd_t *pgd_alloc(struct mm_struct *mm)
23416 {
23417 pgd_t *pgd;
23418 - pmd_t *pmds[PREALLOCATED_PMDS];
23419 + pxd_t *pxds[PREALLOCATED_PXDS];
23420
23421 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
23422
23423 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23424
23425 mm->pgd = pgd;
23426
23427 - if (preallocate_pmds(pmds) != 0)
23428 + if (preallocate_pxds(pxds) != 0)
23429 goto out_free_pgd;
23430
23431 if (paravirt_pgd_alloc(mm) != 0)
23432 - goto out_free_pmds;
23433 + goto out_free_pxds;
23434
23435 /*
23436 * Make sure that pre-populating the pmds is atomic with
23437 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23438 spin_lock(&pgd_lock);
23439
23440 pgd_ctor(mm, pgd);
23441 - pgd_prepopulate_pmd(mm, pgd, pmds);
23442 + pgd_prepopulate_pxd(mm, pgd, pxds);
23443
23444 spin_unlock(&pgd_lock);
23445
23446 return pgd;
23447
23448 -out_free_pmds:
23449 - free_pmds(pmds);
23450 +out_free_pxds:
23451 + free_pxds(pxds);
23452 out_free_pgd:
23453 free_page((unsigned long)pgd);
23454 out:
23455 @@ -295,7 +344,7 @@ out:
23456
23457 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23458 {
23459 - pgd_mop_up_pmds(mm, pgd);
23460 + pgd_mop_up_pxds(mm, pgd);
23461 pgd_dtor(pgd);
23462 paravirt_pgd_free(mm, pgd);
23463 free_page((unsigned long)pgd);
23464 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
23465 index cac7184..09a39fa 100644
23466 --- a/arch/x86/mm/pgtable_32.c
23467 +++ b/arch/x86/mm/pgtable_32.c
23468 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
23469 return;
23470 }
23471 pte = pte_offset_kernel(pmd, vaddr);
23472 +
23473 + pax_open_kernel();
23474 if (pte_val(pteval))
23475 set_pte_at(&init_mm, vaddr, pte, pteval);
23476 else
23477 pte_clear(&init_mm, vaddr, pte);
23478 + pax_close_kernel();
23479
23480 /*
23481 * It's enough to flush this one mapping.
23482 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
23483 index 410531d..0f16030 100644
23484 --- a/arch/x86/mm/setup_nx.c
23485 +++ b/arch/x86/mm/setup_nx.c
23486 @@ -5,8 +5,10 @@
23487 #include <asm/pgtable.h>
23488 #include <asm/proto.h>
23489
23490 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23491 static int disable_nx __cpuinitdata;
23492
23493 +#ifndef CONFIG_PAX_PAGEEXEC
23494 /*
23495 * noexec = on|off
23496 *
23497 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
23498 return 0;
23499 }
23500 early_param("noexec", noexec_setup);
23501 +#endif
23502 +
23503 +#endif
23504
23505 void __cpuinit x86_configure_nx(void)
23506 {
23507 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23508 if (cpu_has_nx && !disable_nx)
23509 __supported_pte_mask |= _PAGE_NX;
23510 else
23511 +#endif
23512 __supported_pte_mask &= ~_PAGE_NX;
23513 }
23514
23515 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
23516 index d6c0418..06a0ad5 100644
23517 --- a/arch/x86/mm/tlb.c
23518 +++ b/arch/x86/mm/tlb.c
23519 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
23520 BUG();
23521 cpumask_clear_cpu(cpu,
23522 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23523 +
23524 +#ifndef CONFIG_PAX_PER_CPU_PGD
23525 load_cr3(swapper_pg_dir);
23526 +#endif
23527 +
23528 }
23529 EXPORT_SYMBOL_GPL(leave_mm);
23530
23531 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
23532 index 6687022..ceabcfa 100644
23533 --- a/arch/x86/net/bpf_jit.S
23534 +++ b/arch/x86/net/bpf_jit.S
23535 @@ -9,6 +9,7 @@
23536 */
23537 #include <linux/linkage.h>
23538 #include <asm/dwarf2.h>
23539 +#include <asm/alternative-asm.h>
23540
23541 /*
23542 * Calling convention :
23543 @@ -35,6 +36,7 @@ sk_load_word:
23544 jle bpf_slow_path_word
23545 mov (SKBDATA,%rsi),%eax
23546 bswap %eax /* ntohl() */
23547 + pax_force_retaddr
23548 ret
23549
23550
23551 @@ -53,6 +55,7 @@ sk_load_half:
23552 jle bpf_slow_path_half
23553 movzwl (SKBDATA,%rsi),%eax
23554 rol $8,%ax # ntohs()
23555 + pax_force_retaddr
23556 ret
23557
23558 sk_load_byte_ind:
23559 @@ -66,6 +69,7 @@ sk_load_byte:
23560 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
23561 jle bpf_slow_path_byte
23562 movzbl (SKBDATA,%rsi),%eax
23563 + pax_force_retaddr
23564 ret
23565
23566 /**
23567 @@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
23568 movzbl (SKBDATA,%rsi),%ebx
23569 and $15,%bl
23570 shl $2,%bl
23571 + pax_force_retaddr
23572 ret
23573 CFI_ENDPROC
23574 ENDPROC(sk_load_byte_msh)
23575 @@ -91,6 +96,7 @@ bpf_error:
23576 xor %eax,%eax
23577 mov -8(%rbp),%rbx
23578 leaveq
23579 + pax_force_retaddr
23580 ret
23581
23582 /* rsi contains offset and can be scratched */
23583 @@ -113,6 +119,7 @@ bpf_slow_path_word:
23584 js bpf_error
23585 mov -12(%rbp),%eax
23586 bswap %eax
23587 + pax_force_retaddr
23588 ret
23589
23590 bpf_slow_path_half:
23591 @@ -121,12 +128,14 @@ bpf_slow_path_half:
23592 mov -12(%rbp),%ax
23593 rol $8,%ax
23594 movzwl %ax,%eax
23595 + pax_force_retaddr
23596 ret
23597
23598 bpf_slow_path_byte:
23599 bpf_slow_path_common(1)
23600 js bpf_error
23601 movzbl -12(%rbp),%eax
23602 + pax_force_retaddr
23603 ret
23604
23605 bpf_slow_path_byte_msh:
23606 @@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
23607 and $15,%al
23608 shl $2,%al
23609 xchg %eax,%ebx
23610 + pax_force_retaddr
23611 ret
23612 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
23613 index bfab3fa..05aac3a 100644
23614 --- a/arch/x86/net/bpf_jit_comp.c
23615 +++ b/arch/x86/net/bpf_jit_comp.c
23616 @@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
23617 set_fs(old_fs);
23618 }
23619
23620 +struct bpf_jit_work {
23621 + struct work_struct work;
23622 + void *image;
23623 +};
23624
23625 void bpf_jit_compile(struct sk_filter *fp)
23626 {
23627 @@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
23628 if (addrs == NULL)
23629 return;
23630
23631 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
23632 + if (!fp->work)
23633 + goto out;
23634 +
23635 /* Before first pass, make a rough estimation of addrs[]
23636 * each bpf instruction is translated to less than 64 bytes
23637 */
23638 @@ -585,11 +593,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23639 if (image) {
23640 if (unlikely(proglen + ilen > oldproglen)) {
23641 pr_err("bpb_jit_compile fatal error\n");
23642 - kfree(addrs);
23643 - module_free(NULL, image);
23644 - return;
23645 + module_free_exec(NULL, image);
23646 + goto out;
23647 }
23648 + pax_open_kernel();
23649 memcpy(image + proglen, temp, ilen);
23650 + pax_close_kernel();
23651 }
23652 proglen += ilen;
23653 addrs[i] = proglen;
23654 @@ -609,7 +618,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23655 break;
23656 }
23657 if (proglen == oldproglen) {
23658 - image = module_alloc(max_t(unsigned int,
23659 + image = module_alloc_exec(max_t(unsigned int,
23660 proglen,
23661 sizeof(struct work_struct)));
23662 if (!image)
23663 @@ -631,24 +640,27 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23664 fp->bpf_func = (void *)image;
23665 }
23666 out:
23667 + kfree(fp->work);
23668 kfree(addrs);
23669 return;
23670 }
23671
23672 static void jit_free_defer(struct work_struct *arg)
23673 {
23674 - module_free(NULL, arg);
23675 + module_free_exec(NULL, ((struct bpf_jit_work*)arg)->image);
23676 + kfree(arg);
23677 }
23678
23679 /* run from softirq, we must use a work_struct to call
23680 - * module_free() from process context
23681 + * module_free_exec() from process context
23682 */
23683 void bpf_jit_free(struct sk_filter *fp)
23684 {
23685 if (fp->bpf_func != sk_run_filter) {
23686 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
23687 + struct work_struct *work = &fp->work->work;
23688
23689 INIT_WORK(work, jit_free_defer);
23690 + fp->work->image = fp->bpf_func;
23691 schedule_work(work);
23692 }
23693 }
23694 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
23695 index bff89df..377758a 100644
23696 --- a/arch/x86/oprofile/backtrace.c
23697 +++ b/arch/x86/oprofile/backtrace.c
23698 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
23699 struct stack_frame_ia32 *fp;
23700 unsigned long bytes;
23701
23702 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23703 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23704 if (bytes != sizeof(bufhead))
23705 return NULL;
23706
23707 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
23708 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
23709
23710 oprofile_add_trace(bufhead[0].return_address);
23711
23712 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
23713 struct stack_frame bufhead[2];
23714 unsigned long bytes;
23715
23716 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23717 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23718 if (bytes != sizeof(bufhead))
23719 return NULL;
23720
23721 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
23722 {
23723 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
23724
23725 - if (!user_mode_vm(regs)) {
23726 + if (!user_mode(regs)) {
23727 unsigned long stack = kernel_stack_pointer(regs);
23728 if (depth)
23729 dump_trace(NULL, regs, (unsigned long *)stack, 0,
23730 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
23731 index cb29191..036766d 100644
23732 --- a/arch/x86/pci/mrst.c
23733 +++ b/arch/x86/pci/mrst.c
23734 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
23735 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
23736 pci_mmcfg_late_init();
23737 pcibios_enable_irq = mrst_pci_irq_enable;
23738 - pci_root_ops = pci_mrst_ops;
23739 + pax_open_kernel();
23740 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
23741 + pax_close_kernel();
23742 /* Continue with standard init */
23743 return 1;
23744 }
23745 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
23746 index f685535..2b76a81 100644
23747 --- a/arch/x86/pci/pcbios.c
23748 +++ b/arch/x86/pci/pcbios.c
23749 @@ -79,50 +79,93 @@ union bios32 {
23750 static struct {
23751 unsigned long address;
23752 unsigned short segment;
23753 -} bios32_indirect = { 0, __KERNEL_CS };
23754 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23755
23756 /*
23757 * Returns the entry point for the given service, NULL on error
23758 */
23759
23760 -static unsigned long bios32_service(unsigned long service)
23761 +static unsigned long __devinit bios32_service(unsigned long service)
23762 {
23763 unsigned char return_code; /* %al */
23764 unsigned long address; /* %ebx */
23765 unsigned long length; /* %ecx */
23766 unsigned long entry; /* %edx */
23767 unsigned long flags;
23768 + struct desc_struct d, *gdt;
23769
23770 local_irq_save(flags);
23771 - __asm__("lcall *(%%edi); cld"
23772 +
23773 + gdt = get_cpu_gdt_table(smp_processor_id());
23774 +
23775 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23776 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23777 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23778 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23779 +
23780 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23781 : "=a" (return_code),
23782 "=b" (address),
23783 "=c" (length),
23784 "=d" (entry)
23785 : "0" (service),
23786 "1" (0),
23787 - "D" (&bios32_indirect));
23788 + "D" (&bios32_indirect),
23789 + "r"(__PCIBIOS_DS)
23790 + : "memory");
23791 +
23792 + pax_open_kernel();
23793 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23794 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23795 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23796 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23797 + pax_close_kernel();
23798 +
23799 local_irq_restore(flags);
23800
23801 switch (return_code) {
23802 - case 0:
23803 - return address + entry;
23804 - case 0x80: /* Not present */
23805 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23806 - return 0;
23807 - default: /* Shouldn't happen */
23808 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23809 - service, return_code);
23810 + case 0: {
23811 + int cpu;
23812 + unsigned char flags;
23813 +
23814 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23815 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23816 + printk(KERN_WARNING "bios32_service: not valid\n");
23817 return 0;
23818 + }
23819 + address = address + PAGE_OFFSET;
23820 + length += 16UL; /* some BIOSs underreport this... */
23821 + flags = 4;
23822 + if (length >= 64*1024*1024) {
23823 + length >>= PAGE_SHIFT;
23824 + flags |= 8;
23825 + }
23826 +
23827 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
23828 + gdt = get_cpu_gdt_table(cpu);
23829 + pack_descriptor(&d, address, length, 0x9b, flags);
23830 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23831 + pack_descriptor(&d, address, length, 0x93, flags);
23832 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23833 + }
23834 + return entry;
23835 + }
23836 + case 0x80: /* Not present */
23837 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23838 + return 0;
23839 + default: /* Shouldn't happen */
23840 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23841 + service, return_code);
23842 + return 0;
23843 }
23844 }
23845
23846 static struct {
23847 unsigned long address;
23848 unsigned short segment;
23849 -} pci_indirect = { 0, __KERNEL_CS };
23850 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23851
23852 -static int pci_bios_present;
23853 +static int pci_bios_present __read_only;
23854
23855 static int __devinit check_pcibios(void)
23856 {
23857 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
23858 unsigned long flags, pcibios_entry;
23859
23860 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23861 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23862 + pci_indirect.address = pcibios_entry;
23863
23864 local_irq_save(flags);
23865 - __asm__(
23866 - "lcall *(%%edi); cld\n\t"
23867 + __asm__("movw %w6, %%ds\n\t"
23868 + "lcall *%%ss:(%%edi); cld\n\t"
23869 + "push %%ss\n\t"
23870 + "pop %%ds\n\t"
23871 "jc 1f\n\t"
23872 "xor %%ah, %%ah\n"
23873 "1:"
23874 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
23875 "=b" (ebx),
23876 "=c" (ecx)
23877 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23878 - "D" (&pci_indirect)
23879 + "D" (&pci_indirect),
23880 + "r" (__PCIBIOS_DS)
23881 : "memory");
23882 local_irq_restore(flags);
23883
23884 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23885
23886 switch (len) {
23887 case 1:
23888 - __asm__("lcall *(%%esi); cld\n\t"
23889 + __asm__("movw %w6, %%ds\n\t"
23890 + "lcall *%%ss:(%%esi); cld\n\t"
23891 + "push %%ss\n\t"
23892 + "pop %%ds\n\t"
23893 "jc 1f\n\t"
23894 "xor %%ah, %%ah\n"
23895 "1:"
23896 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23897 : "1" (PCIBIOS_READ_CONFIG_BYTE),
23898 "b" (bx),
23899 "D" ((long)reg),
23900 - "S" (&pci_indirect));
23901 + "S" (&pci_indirect),
23902 + "r" (__PCIBIOS_DS));
23903 /*
23904 * Zero-extend the result beyond 8 bits, do not trust the
23905 * BIOS having done it:
23906 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23907 *value &= 0xff;
23908 break;
23909 case 2:
23910 - __asm__("lcall *(%%esi); cld\n\t"
23911 + __asm__("movw %w6, %%ds\n\t"
23912 + "lcall *%%ss:(%%esi); cld\n\t"
23913 + "push %%ss\n\t"
23914 + "pop %%ds\n\t"
23915 "jc 1f\n\t"
23916 "xor %%ah, %%ah\n"
23917 "1:"
23918 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23919 : "1" (PCIBIOS_READ_CONFIG_WORD),
23920 "b" (bx),
23921 "D" ((long)reg),
23922 - "S" (&pci_indirect));
23923 + "S" (&pci_indirect),
23924 + "r" (__PCIBIOS_DS));
23925 /*
23926 * Zero-extend the result beyond 16 bits, do not trust the
23927 * BIOS having done it:
23928 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23929 *value &= 0xffff;
23930 break;
23931 case 4:
23932 - __asm__("lcall *(%%esi); cld\n\t"
23933 + __asm__("movw %w6, %%ds\n\t"
23934 + "lcall *%%ss:(%%esi); cld\n\t"
23935 + "push %%ss\n\t"
23936 + "pop %%ds\n\t"
23937 "jc 1f\n\t"
23938 "xor %%ah, %%ah\n"
23939 "1:"
23940 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23941 : "1" (PCIBIOS_READ_CONFIG_DWORD),
23942 "b" (bx),
23943 "D" ((long)reg),
23944 - "S" (&pci_indirect));
23945 + "S" (&pci_indirect),
23946 + "r" (__PCIBIOS_DS));
23947 break;
23948 }
23949
23950 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
23951
23952 switch (len) {
23953 case 1:
23954 - __asm__("lcall *(%%esi); cld\n\t"
23955 + __asm__("movw %w6, %%ds\n\t"
23956 + "lcall *%%ss:(%%esi); cld\n\t"
23957 + "push %%ss\n\t"
23958 + "pop %%ds\n\t"
23959 "jc 1f\n\t"
23960 "xor %%ah, %%ah\n"
23961 "1:"
23962 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
23963 "c" (value),
23964 "b" (bx),
23965 "D" ((long)reg),
23966 - "S" (&pci_indirect));
23967 + "S" (&pci_indirect),
23968 + "r" (__PCIBIOS_DS));
23969 break;
23970 case 2:
23971 - __asm__("lcall *(%%esi); cld\n\t"
23972 + __asm__("movw %w6, %%ds\n\t"
23973 + "lcall *%%ss:(%%esi); cld\n\t"
23974 + "push %%ss\n\t"
23975 + "pop %%ds\n\t"
23976 "jc 1f\n\t"
23977 "xor %%ah, %%ah\n"
23978 "1:"
23979 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
23980 "c" (value),
23981 "b" (bx),
23982 "D" ((long)reg),
23983 - "S" (&pci_indirect));
23984 + "S" (&pci_indirect),
23985 + "r" (__PCIBIOS_DS));
23986 break;
23987 case 4:
23988 - __asm__("lcall *(%%esi); cld\n\t"
23989 + __asm__("movw %w6, %%ds\n\t"
23990 + "lcall *%%ss:(%%esi); cld\n\t"
23991 + "push %%ss\n\t"
23992 + "pop %%ds\n\t"
23993 "jc 1f\n\t"
23994 "xor %%ah, %%ah\n"
23995 "1:"
23996 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
23997 "c" (value),
23998 "b" (bx),
23999 "D" ((long)reg),
24000 - "S" (&pci_indirect));
24001 + "S" (&pci_indirect),
24002 + "r" (__PCIBIOS_DS));
24003 break;
24004 }
24005
24006 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24007
24008 DBG("PCI: Fetching IRQ routing table... ");
24009 __asm__("push %%es\n\t"
24010 + "movw %w8, %%ds\n\t"
24011 "push %%ds\n\t"
24012 "pop %%es\n\t"
24013 - "lcall *(%%esi); cld\n\t"
24014 + "lcall *%%ss:(%%esi); cld\n\t"
24015 "pop %%es\n\t"
24016 + "push %%ss\n\t"
24017 + "pop %%ds\n"
24018 "jc 1f\n\t"
24019 "xor %%ah, %%ah\n"
24020 "1:"
24021 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
24022 "1" (0),
24023 "D" ((long) &opt),
24024 "S" (&pci_indirect),
24025 - "m" (opt)
24026 + "m" (opt),
24027 + "r" (__PCIBIOS_DS)
24028 : "memory");
24029 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
24030 if (ret & 0xff00)
24031 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24032 {
24033 int ret;
24034
24035 - __asm__("lcall *(%%esi); cld\n\t"
24036 + __asm__("movw %w5, %%ds\n\t"
24037 + "lcall *%%ss:(%%esi); cld\n\t"
24038 + "push %%ss\n\t"
24039 + "pop %%ds\n"
24040 "jc 1f\n\t"
24041 "xor %%ah, %%ah\n"
24042 "1:"
24043 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
24044 : "0" (PCIBIOS_SET_PCI_HW_INT),
24045 "b" ((dev->bus->number << 8) | dev->devfn),
24046 "c" ((irq << 8) | (pin + 10)),
24047 - "S" (&pci_indirect));
24048 + "S" (&pci_indirect),
24049 + "r" (__PCIBIOS_DS));
24050 return !(ret & 0xff00);
24051 }
24052 EXPORT_SYMBOL(pcibios_set_irq_routing);
24053 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
24054 index 5cab48e..b025f9b 100644
24055 --- a/arch/x86/platform/efi/efi_32.c
24056 +++ b/arch/x86/platform/efi/efi_32.c
24057 @@ -38,70 +38,56 @@
24058 */
24059
24060 static unsigned long efi_rt_eflags;
24061 -static pgd_t efi_bak_pg_dir_pointer[2];
24062 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
24063
24064 -void efi_call_phys_prelog(void)
24065 +void __init efi_call_phys_prelog(void)
24066 {
24067 - unsigned long cr4;
24068 - unsigned long temp;
24069 struct desc_ptr gdt_descr;
24070
24071 +#ifdef CONFIG_PAX_KERNEXEC
24072 + struct desc_struct d;
24073 +#endif
24074 +
24075 local_irq_save(efi_rt_eflags);
24076
24077 - /*
24078 - * If I don't have PAE, I should just duplicate two entries in page
24079 - * directory. If I have PAE, I just need to duplicate one entry in
24080 - * page directory.
24081 - */
24082 - cr4 = read_cr4_safe();
24083 -
24084 - if (cr4 & X86_CR4_PAE) {
24085 - efi_bak_pg_dir_pointer[0].pgd =
24086 - swapper_pg_dir[pgd_index(0)].pgd;
24087 - swapper_pg_dir[0].pgd =
24088 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
24089 - } else {
24090 - efi_bak_pg_dir_pointer[0].pgd =
24091 - swapper_pg_dir[pgd_index(0)].pgd;
24092 - efi_bak_pg_dir_pointer[1].pgd =
24093 - swapper_pg_dir[pgd_index(0x400000)].pgd;
24094 - swapper_pg_dir[pgd_index(0)].pgd =
24095 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
24096 - temp = PAGE_OFFSET + 0x400000;
24097 - swapper_pg_dir[pgd_index(0x400000)].pgd =
24098 - swapper_pg_dir[pgd_index(temp)].pgd;
24099 - }
24100 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
24101 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24102 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
24103
24104 /*
24105 * After the lock is released, the original page table is restored.
24106 */
24107 __flush_tlb_all();
24108
24109 +#ifdef CONFIG_PAX_KERNEXEC
24110 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
24111 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24112 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
24113 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24114 +#endif
24115 +
24116 gdt_descr.address = __pa(get_cpu_gdt_table(0));
24117 gdt_descr.size = GDT_SIZE - 1;
24118 load_gdt(&gdt_descr);
24119 }
24120
24121 -void efi_call_phys_epilog(void)
24122 +void __init efi_call_phys_epilog(void)
24123 {
24124 - unsigned long cr4;
24125 struct desc_ptr gdt_descr;
24126
24127 +#ifdef CONFIG_PAX_KERNEXEC
24128 + struct desc_struct d;
24129 +
24130 + memset(&d, 0, sizeof d);
24131 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24132 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24133 +#endif
24134 +
24135 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
24136 gdt_descr.size = GDT_SIZE - 1;
24137 load_gdt(&gdt_descr);
24138
24139 - cr4 = read_cr4_safe();
24140 -
24141 - if (cr4 & X86_CR4_PAE) {
24142 - swapper_pg_dir[pgd_index(0)].pgd =
24143 - efi_bak_pg_dir_pointer[0].pgd;
24144 - } else {
24145 - swapper_pg_dir[pgd_index(0)].pgd =
24146 - efi_bak_pg_dir_pointer[0].pgd;
24147 - swapper_pg_dir[pgd_index(0x400000)].pgd =
24148 - efi_bak_pg_dir_pointer[1].pgd;
24149 - }
24150 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
24151
24152 /*
24153 * After the lock is released, the original page table is restored.
24154 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
24155 index fbe66e6..c5c0dd2 100644
24156 --- a/arch/x86/platform/efi/efi_stub_32.S
24157 +++ b/arch/x86/platform/efi/efi_stub_32.S
24158 @@ -6,7 +6,9 @@
24159 */
24160
24161 #include <linux/linkage.h>
24162 +#include <linux/init.h>
24163 #include <asm/page_types.h>
24164 +#include <asm/segment.h>
24165
24166 /*
24167 * efi_call_phys(void *, ...) is a function with variable parameters.
24168 @@ -20,7 +22,7 @@
24169 * service functions will comply with gcc calling convention, too.
24170 */
24171
24172 -.text
24173 +__INIT
24174 ENTRY(efi_call_phys)
24175 /*
24176 * 0. The function can only be called in Linux kernel. So CS has been
24177 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
24178 * The mapping of lower virtual memory has been created in prelog and
24179 * epilog.
24180 */
24181 - movl $1f, %edx
24182 - subl $__PAGE_OFFSET, %edx
24183 - jmp *%edx
24184 + movl $(__KERNEXEC_EFI_DS), %edx
24185 + mov %edx, %ds
24186 + mov %edx, %es
24187 + mov %edx, %ss
24188 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
24189 1:
24190
24191 /*
24192 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
24193 * parameter 2, ..., param n. To make things easy, we save the return
24194 * address of efi_call_phys in a global variable.
24195 */
24196 - popl %edx
24197 - movl %edx, saved_return_addr
24198 - /* get the function pointer into ECX*/
24199 - popl %ecx
24200 - movl %ecx, efi_rt_function_ptr
24201 - movl $2f, %edx
24202 - subl $__PAGE_OFFSET, %edx
24203 - pushl %edx
24204 + popl (saved_return_addr)
24205 + popl (efi_rt_function_ptr)
24206
24207 /*
24208 * 3. Clear PG bit in %CR0.
24209 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
24210 /*
24211 * 5. Call the physical function.
24212 */
24213 - jmp *%ecx
24214 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
24215
24216 -2:
24217 /*
24218 * 6. After EFI runtime service returns, control will return to
24219 * following instruction. We'd better readjust stack pointer first.
24220 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
24221 movl %cr0, %edx
24222 orl $0x80000000, %edx
24223 movl %edx, %cr0
24224 - jmp 1f
24225 -1:
24226 +
24227 /*
24228 * 8. Now restore the virtual mode from flat mode by
24229 * adding EIP with PAGE_OFFSET.
24230 */
24231 - movl $1f, %edx
24232 - jmp *%edx
24233 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
24234 1:
24235 + movl $(__KERNEL_DS), %edx
24236 + mov %edx, %ds
24237 + mov %edx, %es
24238 + mov %edx, %ss
24239
24240 /*
24241 * 9. Balance the stack. And because EAX contain the return value,
24242 * we'd better not clobber it.
24243 */
24244 - leal efi_rt_function_ptr, %edx
24245 - movl (%edx), %ecx
24246 - pushl %ecx
24247 + pushl (efi_rt_function_ptr)
24248
24249 /*
24250 - * 10. Push the saved return address onto the stack and return.
24251 + * 10. Return to the saved return address.
24252 */
24253 - leal saved_return_addr, %edx
24254 - movl (%edx), %ecx
24255 - pushl %ecx
24256 - ret
24257 + jmpl *(saved_return_addr)
24258 ENDPROC(efi_call_phys)
24259 .previous
24260
24261 -.data
24262 +__INITDATA
24263 saved_return_addr:
24264 .long 0
24265 efi_rt_function_ptr:
24266 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
24267 index 4c07cca..2c8427d 100644
24268 --- a/arch/x86/platform/efi/efi_stub_64.S
24269 +++ b/arch/x86/platform/efi/efi_stub_64.S
24270 @@ -7,6 +7,7 @@
24271 */
24272
24273 #include <linux/linkage.h>
24274 +#include <asm/alternative-asm.h>
24275
24276 #define SAVE_XMM \
24277 mov %rsp, %rax; \
24278 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
24279 call *%rdi
24280 addq $32, %rsp
24281 RESTORE_XMM
24282 + pax_force_retaddr 0, 1
24283 ret
24284 ENDPROC(efi_call0)
24285
24286 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
24287 call *%rdi
24288 addq $32, %rsp
24289 RESTORE_XMM
24290 + pax_force_retaddr 0, 1
24291 ret
24292 ENDPROC(efi_call1)
24293
24294 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
24295 call *%rdi
24296 addq $32, %rsp
24297 RESTORE_XMM
24298 + pax_force_retaddr 0, 1
24299 ret
24300 ENDPROC(efi_call2)
24301
24302 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
24303 call *%rdi
24304 addq $32, %rsp
24305 RESTORE_XMM
24306 + pax_force_retaddr 0, 1
24307 ret
24308 ENDPROC(efi_call3)
24309
24310 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
24311 call *%rdi
24312 addq $32, %rsp
24313 RESTORE_XMM
24314 + pax_force_retaddr 0, 1
24315 ret
24316 ENDPROC(efi_call4)
24317
24318 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
24319 call *%rdi
24320 addq $48, %rsp
24321 RESTORE_XMM
24322 + pax_force_retaddr 0, 1
24323 ret
24324 ENDPROC(efi_call5)
24325
24326 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
24327 call *%rdi
24328 addq $48, %rsp
24329 RESTORE_XMM
24330 + pax_force_retaddr 0, 1
24331 ret
24332 ENDPROC(efi_call6)
24333 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
24334 index fe73276..70fe25a 100644
24335 --- a/arch/x86/platform/mrst/mrst.c
24336 +++ b/arch/x86/platform/mrst/mrst.c
24337 @@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
24338 }
24339
24340 /* Reboot and power off are handled by the SCU on a MID device */
24341 -static void mrst_power_off(void)
24342 +static __noreturn void mrst_power_off(void)
24343 {
24344 intel_scu_ipc_simple_command(0xf1, 1);
24345 + BUG();
24346 }
24347
24348 -static void mrst_reboot(void)
24349 +static __noreturn void mrst_reboot(void)
24350 {
24351 intel_scu_ipc_simple_command(0xf1, 0);
24352 + BUG();
24353 }
24354
24355 /*
24356 diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
24357 index 5b55219..b326540 100644
24358 --- a/arch/x86/platform/uv/tlb_uv.c
24359 +++ b/arch/x86/platform/uv/tlb_uv.c
24360 @@ -377,6 +377,8 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
24361 struct bau_control *smaster = bcp->socket_master;
24362 struct reset_args reset_args;
24363
24364 + pax_track_stack();
24365 +
24366 reset_args.sender = sender;
24367 cpus_clear(*mask);
24368 /* find a single cpu for each uvhub in this distribution mask */
24369 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
24370 index 87bb35e..eff2da8 100644
24371 --- a/arch/x86/power/cpu.c
24372 +++ b/arch/x86/power/cpu.c
24373 @@ -130,7 +130,7 @@ static void do_fpu_end(void)
24374 static void fix_processor_context(void)
24375 {
24376 int cpu = smp_processor_id();
24377 - struct tss_struct *t = &per_cpu(init_tss, cpu);
24378 + struct tss_struct *t = init_tss + cpu;
24379
24380 set_tss_desc(cpu, t); /*
24381 * This just modifies memory; should not be
24382 @@ -140,7 +140,9 @@ static void fix_processor_context(void)
24383 */
24384
24385 #ifdef CONFIG_X86_64
24386 + pax_open_kernel();
24387 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
24388 + pax_close_kernel();
24389
24390 syscall_init(); /* This sets MSR_*STAR and related */
24391 #endif
24392 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
24393 index 5d17950..2253fc9 100644
24394 --- a/arch/x86/vdso/Makefile
24395 +++ b/arch/x86/vdso/Makefile
24396 @@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
24397 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
24398 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
24399
24400 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24401 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24402 GCOV_PROFILE := n
24403
24404 #
24405 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
24406 index 468d591..8e80a0a 100644
24407 --- a/arch/x86/vdso/vdso32-setup.c
24408 +++ b/arch/x86/vdso/vdso32-setup.c
24409 @@ -25,6 +25,7 @@
24410 #include <asm/tlbflush.h>
24411 #include <asm/vdso.h>
24412 #include <asm/proto.h>
24413 +#include <asm/mman.h>
24414
24415 enum {
24416 VDSO_DISABLED = 0,
24417 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
24418 void enable_sep_cpu(void)
24419 {
24420 int cpu = get_cpu();
24421 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
24422 + struct tss_struct *tss = init_tss + cpu;
24423
24424 if (!boot_cpu_has(X86_FEATURE_SEP)) {
24425 put_cpu();
24426 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
24427 gate_vma.vm_start = FIXADDR_USER_START;
24428 gate_vma.vm_end = FIXADDR_USER_END;
24429 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
24430 - gate_vma.vm_page_prot = __P101;
24431 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
24432 /*
24433 * Make sure the vDSO gets into every core dump.
24434 * Dumping its contents makes post-mortem fully interpretable later
24435 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24436 if (compat)
24437 addr = VDSO_HIGH_BASE;
24438 else {
24439 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
24440 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
24441 if (IS_ERR_VALUE(addr)) {
24442 ret = addr;
24443 goto up_fail;
24444 }
24445 }
24446
24447 - current->mm->context.vdso = (void *)addr;
24448 + current->mm->context.vdso = addr;
24449
24450 if (compat_uses_vma || !compat) {
24451 /*
24452 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24453 }
24454
24455 current_thread_info()->sysenter_return =
24456 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24457 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24458
24459 up_fail:
24460 if (ret)
24461 - current->mm->context.vdso = NULL;
24462 + current->mm->context.vdso = 0;
24463
24464 up_write(&mm->mmap_sem);
24465
24466 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
24467
24468 const char *arch_vma_name(struct vm_area_struct *vma)
24469 {
24470 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24471 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24472 return "[vdso]";
24473 +
24474 +#ifdef CONFIG_PAX_SEGMEXEC
24475 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
24476 + return "[vdso]";
24477 +#endif
24478 +
24479 return NULL;
24480 }
24481
24482 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24483 * Check to see if the corresponding task was created in compat vdso
24484 * mode.
24485 */
24486 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
24487 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
24488 return &gate_vma;
24489 return NULL;
24490 }
24491 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
24492 index 316fbca..4638633 100644
24493 --- a/arch/x86/vdso/vma.c
24494 +++ b/arch/x86/vdso/vma.c
24495 @@ -16,8 +16,6 @@
24496 #include <asm/vdso.h>
24497 #include <asm/page.h>
24498
24499 -unsigned int __read_mostly vdso_enabled = 1;
24500 -
24501 extern char vdso_start[], vdso_end[];
24502 extern unsigned short vdso_sync_cpuid;
24503
24504 @@ -97,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24505 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24506 {
24507 struct mm_struct *mm = current->mm;
24508 - unsigned long addr;
24509 + unsigned long addr = 0;
24510 int ret;
24511
24512 - if (!vdso_enabled)
24513 - return 0;
24514 -
24515 down_write(&mm->mmap_sem);
24516 +
24517 +#ifdef CONFIG_PAX_RANDMMAP
24518 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24519 +#endif
24520 +
24521 addr = vdso_addr(mm->start_stack, vdso_size);
24522 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
24523 if (IS_ERR_VALUE(addr)) {
24524 @@ -111,26 +111,18 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24525 goto up_fail;
24526 }
24527
24528 - current->mm->context.vdso = (void *)addr;
24529 + mm->context.vdso = addr;
24530
24531 ret = install_special_mapping(mm, addr, vdso_size,
24532 VM_READ|VM_EXEC|
24533 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
24534 VM_ALWAYSDUMP,
24535 vdso_pages);
24536 - if (ret) {
24537 - current->mm->context.vdso = NULL;
24538 - goto up_fail;
24539 - }
24540 +
24541 + if (ret)
24542 + mm->context.vdso = 0;
24543
24544 up_fail:
24545 up_write(&mm->mmap_sem);
24546 return ret;
24547 }
24548 -
24549 -static __init int vdso_setup(char *s)
24550 -{
24551 - vdso_enabled = simple_strtoul(s, NULL, 0);
24552 - return 0;
24553 -}
24554 -__setup("vdso=", vdso_setup);
24555 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
24556 index 46c8069..6330d3c 100644
24557 --- a/arch/x86/xen/enlighten.c
24558 +++ b/arch/x86/xen/enlighten.c
24559 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
24560
24561 struct shared_info xen_dummy_shared_info;
24562
24563 -void *xen_initial_gdt;
24564 -
24565 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
24566 __read_mostly int xen_have_vector_callback;
24567 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
24568 @@ -1028,7 +1026,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
24569 #endif
24570 };
24571
24572 -static void xen_reboot(int reason)
24573 +static __noreturn void xen_reboot(int reason)
24574 {
24575 struct sched_shutdown r = { .reason = reason };
24576
24577 @@ -1036,17 +1034,17 @@ static void xen_reboot(int reason)
24578 BUG();
24579 }
24580
24581 -static void xen_restart(char *msg)
24582 +static __noreturn void xen_restart(char *msg)
24583 {
24584 xen_reboot(SHUTDOWN_reboot);
24585 }
24586
24587 -static void xen_emergency_restart(void)
24588 +static __noreturn void xen_emergency_restart(void)
24589 {
24590 xen_reboot(SHUTDOWN_reboot);
24591 }
24592
24593 -static void xen_machine_halt(void)
24594 +static __noreturn void xen_machine_halt(void)
24595 {
24596 xen_reboot(SHUTDOWN_poweroff);
24597 }
24598 @@ -1152,7 +1150,17 @@ asmlinkage void __init xen_start_kernel(void)
24599 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
24600
24601 /* Work out if we support NX */
24602 - x86_configure_nx();
24603 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
24604 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
24605 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
24606 + unsigned l, h;
24607 +
24608 + __supported_pte_mask |= _PAGE_NX;
24609 + rdmsr(MSR_EFER, l, h);
24610 + l |= EFER_NX;
24611 + wrmsr(MSR_EFER, l, h);
24612 + }
24613 +#endif
24614
24615 xen_setup_features();
24616
24617 @@ -1183,13 +1191,6 @@ asmlinkage void __init xen_start_kernel(void)
24618
24619 machine_ops = xen_machine_ops;
24620
24621 - /*
24622 - * The only reliable way to retain the initial address of the
24623 - * percpu gdt_page is to remember it here, so we can go and
24624 - * mark it RW later, when the initial percpu area is freed.
24625 - */
24626 - xen_initial_gdt = &per_cpu(gdt_page, 0);
24627 -
24628 xen_smp_init();
24629
24630 #ifdef CONFIG_ACPI_NUMA
24631 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
24632 index 3dd53f9..9e8ba48 100644
24633 --- a/arch/x86/xen/mmu.c
24634 +++ b/arch/x86/xen/mmu.c
24635 @@ -1768,6 +1768,8 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24636 convert_pfn_mfn(init_level4_pgt);
24637 convert_pfn_mfn(level3_ident_pgt);
24638 convert_pfn_mfn(level3_kernel_pgt);
24639 + convert_pfn_mfn(level3_vmalloc_pgt);
24640 + convert_pfn_mfn(level3_vmemmap_pgt);
24641
24642 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
24643 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
24644 @@ -1786,7 +1788,10 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24645 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
24646 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
24647 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
24648 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
24649 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
24650 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
24651 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
24652 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
24653 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
24654
24655 @@ -2000,6 +2005,7 @@ static void __init xen_post_allocator_init(void)
24656 pv_mmu_ops.set_pud = xen_set_pud;
24657 #if PAGETABLE_LEVELS == 4
24658 pv_mmu_ops.set_pgd = xen_set_pgd;
24659 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
24660 #endif
24661
24662 /* This will work as long as patching hasn't happened yet
24663 @@ -2081,6 +2087,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
24664 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
24665 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
24666 .set_pgd = xen_set_pgd_hyper,
24667 + .set_pgd_batched = xen_set_pgd_hyper,
24668
24669 .alloc_pud = xen_alloc_pmd_init,
24670 .release_pud = xen_release_pmd_init,
24671 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
24672 index 041d4fe..7666b7e 100644
24673 --- a/arch/x86/xen/smp.c
24674 +++ b/arch/x86/xen/smp.c
24675 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
24676 {
24677 BUG_ON(smp_processor_id() != 0);
24678 native_smp_prepare_boot_cpu();
24679 -
24680 - /* We've switched to the "real" per-cpu gdt, so make sure the
24681 - old memory can be recycled */
24682 - make_lowmem_page_readwrite(xen_initial_gdt);
24683 -
24684 xen_filter_cpu_maps();
24685 xen_setup_vcpu_info_placement();
24686 }
24687 @@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
24688 gdt = get_cpu_gdt_table(cpu);
24689
24690 ctxt->flags = VGCF_IN_KERNEL;
24691 - ctxt->user_regs.ds = __USER_DS;
24692 - ctxt->user_regs.es = __USER_DS;
24693 + ctxt->user_regs.ds = __KERNEL_DS;
24694 + ctxt->user_regs.es = __KERNEL_DS;
24695 ctxt->user_regs.ss = __KERNEL_DS;
24696 #ifdef CONFIG_X86_32
24697 ctxt->user_regs.fs = __KERNEL_PERCPU;
24698 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
24699 + savesegment(gs, ctxt->user_regs.gs);
24700 #else
24701 ctxt->gs_base_kernel = per_cpu_offset(cpu);
24702 #endif
24703 @@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
24704 int rc;
24705
24706 per_cpu(current_task, cpu) = idle;
24707 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
24708 #ifdef CONFIG_X86_32
24709 irq_ctx_init(cpu);
24710 #else
24711 clear_tsk_thread_flag(idle, TIF_FORK);
24712 - per_cpu(kernel_stack, cpu) =
24713 - (unsigned long)task_stack_page(idle) -
24714 - KERNEL_STACK_OFFSET + THREAD_SIZE;
24715 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
24716 #endif
24717 xen_setup_runstate_info(cpu);
24718 xen_setup_timer(cpu);
24719 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
24720 index b040b0e..8cc4fe0 100644
24721 --- a/arch/x86/xen/xen-asm_32.S
24722 +++ b/arch/x86/xen/xen-asm_32.S
24723 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
24724 ESP_OFFSET=4 # bytes pushed onto stack
24725
24726 /*
24727 - * Store vcpu_info pointer for easy access. Do it this way to
24728 - * avoid having to reload %fs
24729 + * Store vcpu_info pointer for easy access.
24730 */
24731 #ifdef CONFIG_SMP
24732 - GET_THREAD_INFO(%eax)
24733 - movl TI_cpu(%eax), %eax
24734 - movl __per_cpu_offset(,%eax,4), %eax
24735 - mov xen_vcpu(%eax), %eax
24736 + push %fs
24737 + mov $(__KERNEL_PERCPU), %eax
24738 + mov %eax, %fs
24739 + mov PER_CPU_VAR(xen_vcpu), %eax
24740 + pop %fs
24741 #else
24742 movl xen_vcpu, %eax
24743 #endif
24744 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
24745 index aaa7291..3f77960 100644
24746 --- a/arch/x86/xen/xen-head.S
24747 +++ b/arch/x86/xen/xen-head.S
24748 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
24749 #ifdef CONFIG_X86_32
24750 mov %esi,xen_start_info
24751 mov $init_thread_union+THREAD_SIZE,%esp
24752 +#ifdef CONFIG_SMP
24753 + movl $cpu_gdt_table,%edi
24754 + movl $__per_cpu_load,%eax
24755 + movw %ax,__KERNEL_PERCPU + 2(%edi)
24756 + rorl $16,%eax
24757 + movb %al,__KERNEL_PERCPU + 4(%edi)
24758 + movb %ah,__KERNEL_PERCPU + 7(%edi)
24759 + movl $__per_cpu_end - 1,%eax
24760 + subl $__per_cpu_start,%eax
24761 + movw %ax,__KERNEL_PERCPU + 0(%edi)
24762 +#endif
24763 #else
24764 mov %rsi,xen_start_info
24765 mov $init_thread_union+THREAD_SIZE,%rsp
24766 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
24767 index b095739..8c17bcd 100644
24768 --- a/arch/x86/xen/xen-ops.h
24769 +++ b/arch/x86/xen/xen-ops.h
24770 @@ -10,8 +10,6 @@
24771 extern const char xen_hypervisor_callback[];
24772 extern const char xen_failsafe_callback[];
24773
24774 -extern void *xen_initial_gdt;
24775 -
24776 struct trap_info;
24777 void xen_copy_trap_info(struct trap_info *traps);
24778
24779 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
24780 index 58916af..9cb880b 100644
24781 --- a/block/blk-iopoll.c
24782 +++ b/block/blk-iopoll.c
24783 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
24784 }
24785 EXPORT_SYMBOL(blk_iopoll_complete);
24786
24787 -static void blk_iopoll_softirq(struct softirq_action *h)
24788 +static void blk_iopoll_softirq(void)
24789 {
24790 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
24791 int rearm = 0, budget = blk_iopoll_budget;
24792 diff --git a/block/blk-map.c b/block/blk-map.c
24793 index 164cd00..6d96fc1 100644
24794 --- a/block/blk-map.c
24795 +++ b/block/blk-map.c
24796 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
24797 if (!len || !kbuf)
24798 return -EINVAL;
24799
24800 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
24801 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
24802 if (do_copy)
24803 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
24804 else
24805 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
24806 index 1366a89..e17f54b 100644
24807 --- a/block/blk-softirq.c
24808 +++ b/block/blk-softirq.c
24809 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
24810 * Softirq action handler - move entries to local list and loop over them
24811 * while passing them to the queue registered handler.
24812 */
24813 -static void blk_done_softirq(struct softirq_action *h)
24814 +static void blk_done_softirq(void)
24815 {
24816 struct list_head *cpu_list, local_list;
24817
24818 diff --git a/block/bsg.c b/block/bsg.c
24819 index 702f131..37808bf 100644
24820 --- a/block/bsg.c
24821 +++ b/block/bsg.c
24822 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
24823 struct sg_io_v4 *hdr, struct bsg_device *bd,
24824 fmode_t has_write_perm)
24825 {
24826 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24827 + unsigned char *cmdptr;
24828 +
24829 if (hdr->request_len > BLK_MAX_CDB) {
24830 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
24831 if (!rq->cmd)
24832 return -ENOMEM;
24833 - }
24834 + cmdptr = rq->cmd;
24835 + } else
24836 + cmdptr = tmpcmd;
24837
24838 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
24839 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
24840 hdr->request_len))
24841 return -EFAULT;
24842
24843 + if (cmdptr != rq->cmd)
24844 + memcpy(rq->cmd, cmdptr, hdr->request_len);
24845 +
24846 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
24847 if (blk_verify_command(rq->cmd, has_write_perm))
24848 return -EPERM;
24849 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
24850 index 7b72502..646105c 100644
24851 --- a/block/compat_ioctl.c
24852 +++ b/block/compat_ioctl.c
24853 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
24854 err |= __get_user(f->spec1, &uf->spec1);
24855 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
24856 err |= __get_user(name, &uf->name);
24857 - f->name = compat_ptr(name);
24858 + f->name = (void __force_kernel *)compat_ptr(name);
24859 if (err) {
24860 err = -EFAULT;
24861 goto out;
24862 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
24863 index 4f4230b..0feae9a 100644
24864 --- a/block/scsi_ioctl.c
24865 +++ b/block/scsi_ioctl.c
24866 @@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
24867 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
24868 struct sg_io_hdr *hdr, fmode_t mode)
24869 {
24870 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
24871 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24872 + unsigned char *cmdptr;
24873 +
24874 + if (rq->cmd != rq->__cmd)
24875 + cmdptr = rq->cmd;
24876 + else
24877 + cmdptr = tmpcmd;
24878 +
24879 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
24880 return -EFAULT;
24881 +
24882 + if (cmdptr != rq->cmd)
24883 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
24884 +
24885 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
24886 return -EPERM;
24887
24888 @@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24889 int err;
24890 unsigned int in_len, out_len, bytes, opcode, cmdlen;
24891 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
24892 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24893 + unsigned char *cmdptr;
24894
24895 if (!sic)
24896 return -EINVAL;
24897 @@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24898 */
24899 err = -EFAULT;
24900 rq->cmd_len = cmdlen;
24901 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
24902 +
24903 + if (rq->cmd != rq->__cmd)
24904 + cmdptr = rq->cmd;
24905 + else
24906 + cmdptr = tmpcmd;
24907 +
24908 + if (copy_from_user(cmdptr, sic->data, cmdlen))
24909 goto error;
24910
24911 + if (rq->cmd != cmdptr)
24912 + memcpy(rq->cmd, cmdptr, cmdlen);
24913 +
24914 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
24915 goto error;
24916
24917 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
24918 index 671d4d6..5f24030 100644
24919 --- a/crypto/cryptd.c
24920 +++ b/crypto/cryptd.c
24921 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
24922
24923 struct cryptd_blkcipher_request_ctx {
24924 crypto_completion_t complete;
24925 -};
24926 +} __no_const;
24927
24928 struct cryptd_hash_ctx {
24929 struct crypto_shash *child;
24930 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
24931
24932 struct cryptd_aead_request_ctx {
24933 crypto_completion_t complete;
24934 -};
24935 +} __no_const;
24936
24937 static void cryptd_queue_worker(struct work_struct *work);
24938
24939 diff --git a/crypto/serpent.c b/crypto/serpent.c
24940 index b651a55..a9ddd79b 100644
24941 --- a/crypto/serpent.c
24942 +++ b/crypto/serpent.c
24943 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
24944 u32 r0,r1,r2,r3,r4;
24945 int i;
24946
24947 + pax_track_stack();
24948 +
24949 /* Copy key, add padding */
24950
24951 for (i = 0; i < keylen; ++i)
24952 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
24953 index 5d41894..22021e4 100644
24954 --- a/drivers/acpi/apei/cper.c
24955 +++ b/drivers/acpi/apei/cper.c
24956 @@ -38,12 +38,12 @@
24957 */
24958 u64 cper_next_record_id(void)
24959 {
24960 - static atomic64_t seq;
24961 + static atomic64_unchecked_t seq;
24962
24963 - if (!atomic64_read(&seq))
24964 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
24965 + if (!atomic64_read_unchecked(&seq))
24966 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
24967
24968 - return atomic64_inc_return(&seq);
24969 + return atomic64_inc_return_unchecked(&seq);
24970 }
24971 EXPORT_SYMBOL_GPL(cper_next_record_id);
24972
24973 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
24974 index 22f918b..9fafb84 100644
24975 --- a/drivers/acpi/ec_sys.c
24976 +++ b/drivers/acpi/ec_sys.c
24977 @@ -11,6 +11,7 @@
24978 #include <linux/kernel.h>
24979 #include <linux/acpi.h>
24980 #include <linux/debugfs.h>
24981 +#include <asm/uaccess.h>
24982 #include "internal.h"
24983
24984 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
24985 @@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
24986 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
24987 */
24988 unsigned int size = EC_SPACE_SIZE;
24989 - u8 *data = (u8 *) buf;
24990 + u8 data;
24991 loff_t init_off = *off;
24992 int err = 0;
24993
24994 @@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
24995 size = count;
24996
24997 while (size) {
24998 - err = ec_read(*off, &data[*off - init_off]);
24999 + err = ec_read(*off, &data);
25000 if (err)
25001 return err;
25002 + if (put_user(data, &buf[*off - init_off]))
25003 + return -EFAULT;
25004 *off += 1;
25005 size--;
25006 }
25007 @@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25008
25009 unsigned int size = count;
25010 loff_t init_off = *off;
25011 - u8 *data = (u8 *) buf;
25012 int err = 0;
25013
25014 if (*off >= EC_SPACE_SIZE)
25015 @@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
25016 }
25017
25018 while (size) {
25019 - u8 byte_write = data[*off - init_off];
25020 + u8 byte_write;
25021 + if (get_user(byte_write, &buf[*off - init_off]))
25022 + return -EFAULT;
25023 err = ec_write(*off, byte_write);
25024 if (err)
25025 return err;
25026 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
25027 index f5f9869..da87aeb 100644
25028 --- a/drivers/acpi/proc.c
25029 +++ b/drivers/acpi/proc.c
25030 @@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct file *file,
25031 size_t count, loff_t * ppos)
25032 {
25033 struct list_head *node, *next;
25034 - char strbuf[5];
25035 - char str[5] = "";
25036 - unsigned int len = count;
25037 + char strbuf[5] = {0};
25038
25039 - if (len > 4)
25040 - len = 4;
25041 - if (len < 0)
25042 + if (count > 4)
25043 + count = 4;
25044 + if (copy_from_user(strbuf, buffer, count))
25045 return -EFAULT;
25046 -
25047 - if (copy_from_user(strbuf, buffer, len))
25048 - return -EFAULT;
25049 - strbuf[len] = '\0';
25050 - sscanf(strbuf, "%s", str);
25051 + strbuf[count] = '\0';
25052
25053 mutex_lock(&acpi_device_lock);
25054 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
25055 @@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct file *file,
25056 if (!dev->wakeup.flags.valid)
25057 continue;
25058
25059 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
25060 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
25061 if (device_can_wakeup(&dev->dev)) {
25062 bool enable = !device_may_wakeup(&dev->dev);
25063 device_set_wakeup_enable(&dev->dev, enable);
25064 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
25065 index a4e0f1b..9793b28 100644
25066 --- a/drivers/acpi/processor_driver.c
25067 +++ b/drivers/acpi/processor_driver.c
25068 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
25069 return 0;
25070 #endif
25071
25072 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
25073 + BUG_ON(pr->id >= nr_cpu_ids);
25074
25075 /*
25076 * Buggy BIOS check
25077 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
25078 index 4a3a5ae..cbee192 100644
25079 --- a/drivers/ata/libata-core.c
25080 +++ b/drivers/ata/libata-core.c
25081 @@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
25082 struct ata_port *ap;
25083 unsigned int tag;
25084
25085 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25086 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25087 ap = qc->ap;
25088
25089 qc->flags = 0;
25090 @@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
25091 struct ata_port *ap;
25092 struct ata_link *link;
25093
25094 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25095 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
25096 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
25097 ap = qc->ap;
25098 link = qc->dev->link;
25099 @@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25100 return;
25101
25102 spin_lock(&lock);
25103 + pax_open_kernel();
25104
25105 for (cur = ops->inherits; cur; cur = cur->inherits) {
25106 void **inherit = (void **)cur;
25107 @@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
25108 if (IS_ERR(*pp))
25109 *pp = NULL;
25110
25111 - ops->inherits = NULL;
25112 + *(struct ata_port_operations **)&ops->inherits = NULL;
25113
25114 + pax_close_kernel();
25115 spin_unlock(&lock);
25116 }
25117
25118 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
25119 index ed16fbe..fc92cb8 100644
25120 --- a/drivers/ata/libata-eh.c
25121 +++ b/drivers/ata/libata-eh.c
25122 @@ -2515,6 +2515,8 @@ void ata_eh_report(struct ata_port *ap)
25123 {
25124 struct ata_link *link;
25125
25126 + pax_track_stack();
25127 +
25128 ata_for_each_link(link, ap, HOST_FIRST)
25129 ata_eh_link_report(link);
25130 }
25131 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
25132 index 719bb73..79ce858 100644
25133 --- a/drivers/ata/pata_arasan_cf.c
25134 +++ b/drivers/ata/pata_arasan_cf.c
25135 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
25136 /* Handle platform specific quirks */
25137 if (pdata->quirk) {
25138 if (pdata->quirk & CF_BROKEN_PIO) {
25139 - ap->ops->set_piomode = NULL;
25140 + pax_open_kernel();
25141 + *(void **)&ap->ops->set_piomode = NULL;
25142 + pax_close_kernel();
25143 ap->pio_mask = 0;
25144 }
25145 if (pdata->quirk & CF_BROKEN_MWDMA)
25146 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
25147 index f9b983a..887b9d8 100644
25148 --- a/drivers/atm/adummy.c
25149 +++ b/drivers/atm/adummy.c
25150 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
25151 vcc->pop(vcc, skb);
25152 else
25153 dev_kfree_skb_any(skb);
25154 - atomic_inc(&vcc->stats->tx);
25155 + atomic_inc_unchecked(&vcc->stats->tx);
25156
25157 return 0;
25158 }
25159 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
25160 index f8f41e0..1f987dd 100644
25161 --- a/drivers/atm/ambassador.c
25162 +++ b/drivers/atm/ambassador.c
25163 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
25164 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25165
25166 // VC layer stats
25167 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25168 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25169
25170 // free the descriptor
25171 kfree (tx_descr);
25172 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25173 dump_skb ("<<<", vc, skb);
25174
25175 // VC layer stats
25176 - atomic_inc(&atm_vcc->stats->rx);
25177 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25178 __net_timestamp(skb);
25179 // end of our responsibility
25180 atm_vcc->push (atm_vcc, skb);
25181 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25182 } else {
25183 PRINTK (KERN_INFO, "dropped over-size frame");
25184 // should we count this?
25185 - atomic_inc(&atm_vcc->stats->rx_drop);
25186 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25187 }
25188
25189 } else {
25190 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
25191 }
25192
25193 if (check_area (skb->data, skb->len)) {
25194 - atomic_inc(&atm_vcc->stats->tx_err);
25195 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25196 return -ENOMEM; // ?
25197 }
25198
25199 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
25200 index b22d71c..d6e1049 100644
25201 --- a/drivers/atm/atmtcp.c
25202 +++ b/drivers/atm/atmtcp.c
25203 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25204 if (vcc->pop) vcc->pop(vcc,skb);
25205 else dev_kfree_skb(skb);
25206 if (dev_data) return 0;
25207 - atomic_inc(&vcc->stats->tx_err);
25208 + atomic_inc_unchecked(&vcc->stats->tx_err);
25209 return -ENOLINK;
25210 }
25211 size = skb->len+sizeof(struct atmtcp_hdr);
25212 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25213 if (!new_skb) {
25214 if (vcc->pop) vcc->pop(vcc,skb);
25215 else dev_kfree_skb(skb);
25216 - atomic_inc(&vcc->stats->tx_err);
25217 + atomic_inc_unchecked(&vcc->stats->tx_err);
25218 return -ENOBUFS;
25219 }
25220 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25221 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25222 if (vcc->pop) vcc->pop(vcc,skb);
25223 else dev_kfree_skb(skb);
25224 out_vcc->push(out_vcc,new_skb);
25225 - atomic_inc(&vcc->stats->tx);
25226 - atomic_inc(&out_vcc->stats->rx);
25227 + atomic_inc_unchecked(&vcc->stats->tx);
25228 + atomic_inc_unchecked(&out_vcc->stats->rx);
25229 return 0;
25230 }
25231
25232 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25233 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25234 read_unlock(&vcc_sklist_lock);
25235 if (!out_vcc) {
25236 - atomic_inc(&vcc->stats->tx_err);
25237 + atomic_inc_unchecked(&vcc->stats->tx_err);
25238 goto done;
25239 }
25240 skb_pull(skb,sizeof(struct atmtcp_hdr));
25241 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25242 __net_timestamp(new_skb);
25243 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25244 out_vcc->push(out_vcc,new_skb);
25245 - atomic_inc(&vcc->stats->tx);
25246 - atomic_inc(&out_vcc->stats->rx);
25247 + atomic_inc_unchecked(&vcc->stats->tx);
25248 + atomic_inc_unchecked(&out_vcc->stats->rx);
25249 done:
25250 if (vcc->pop) vcc->pop(vcc,skb);
25251 else dev_kfree_skb(skb);
25252 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
25253 index 9307141..d8521bf 100644
25254 --- a/drivers/atm/eni.c
25255 +++ b/drivers/atm/eni.c
25256 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25257 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25258 vcc->dev->number);
25259 length = 0;
25260 - atomic_inc(&vcc->stats->rx_err);
25261 + atomic_inc_unchecked(&vcc->stats->rx_err);
25262 }
25263 else {
25264 length = ATM_CELL_SIZE-1; /* no HEC */
25265 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25266 size);
25267 }
25268 eff = length = 0;
25269 - atomic_inc(&vcc->stats->rx_err);
25270 + atomic_inc_unchecked(&vcc->stats->rx_err);
25271 }
25272 else {
25273 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25274 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25275 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25276 vcc->dev->number,vcc->vci,length,size << 2,descr);
25277 length = eff = 0;
25278 - atomic_inc(&vcc->stats->rx_err);
25279 + atomic_inc_unchecked(&vcc->stats->rx_err);
25280 }
25281 }
25282 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25283 @@ -771,7 +771,7 @@ rx_dequeued++;
25284 vcc->push(vcc,skb);
25285 pushed++;
25286 }
25287 - atomic_inc(&vcc->stats->rx);
25288 + atomic_inc_unchecked(&vcc->stats->rx);
25289 }
25290 wake_up(&eni_dev->rx_wait);
25291 }
25292 @@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *dev)
25293 PCI_DMA_TODEVICE);
25294 if (vcc->pop) vcc->pop(vcc,skb);
25295 else dev_kfree_skb_irq(skb);
25296 - atomic_inc(&vcc->stats->tx);
25297 + atomic_inc_unchecked(&vcc->stats->tx);
25298 wake_up(&eni_dev->tx_wait);
25299 dma_complete++;
25300 }
25301 @@ -1568,7 +1568,7 @@ tx_complete++;
25302 /*--------------------------------- entries ---------------------------------*/
25303
25304
25305 -static const char *media_name[] __devinitdata = {
25306 +static const char *media_name[] __devinitconst = {
25307 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
25308 "UTP", "05?", "06?", "07?", /* 4- 7 */
25309 "TAXI","09?", "10?", "11?", /* 8-11 */
25310 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
25311 index 5072f8a..fa52520 100644
25312 --- a/drivers/atm/firestream.c
25313 +++ b/drivers/atm/firestream.c
25314 @@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
25315 }
25316 }
25317
25318 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25319 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25320
25321 fs_dprintk (FS_DEBUG_TXMEM, "i");
25322 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25323 @@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25324 #endif
25325 skb_put (skb, qe->p1 & 0xffff);
25326 ATM_SKB(skb)->vcc = atm_vcc;
25327 - atomic_inc(&atm_vcc->stats->rx);
25328 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25329 __net_timestamp(skb);
25330 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25331 atm_vcc->push (atm_vcc, skb);
25332 @@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25333 kfree (pe);
25334 }
25335 if (atm_vcc)
25336 - atomic_inc(&atm_vcc->stats->rx_drop);
25337 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25338 break;
25339 case 0x1f: /* Reassembly abort: no buffers. */
25340 /* Silently increment error counter. */
25341 if (atm_vcc)
25342 - atomic_inc(&atm_vcc->stats->rx_drop);
25343 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25344 break;
25345 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25346 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25347 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
25348 index 361f5ae..7fc552d 100644
25349 --- a/drivers/atm/fore200e.c
25350 +++ b/drivers/atm/fore200e.c
25351 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
25352 #endif
25353 /* check error condition */
25354 if (*entry->status & STATUS_ERROR)
25355 - atomic_inc(&vcc->stats->tx_err);
25356 + atomic_inc_unchecked(&vcc->stats->tx_err);
25357 else
25358 - atomic_inc(&vcc->stats->tx);
25359 + atomic_inc_unchecked(&vcc->stats->tx);
25360 }
25361 }
25362
25363 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25364 if (skb == NULL) {
25365 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25366
25367 - atomic_inc(&vcc->stats->rx_drop);
25368 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25369 return -ENOMEM;
25370 }
25371
25372 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25373
25374 dev_kfree_skb_any(skb);
25375
25376 - atomic_inc(&vcc->stats->rx_drop);
25377 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25378 return -ENOMEM;
25379 }
25380
25381 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25382
25383 vcc->push(vcc, skb);
25384 - atomic_inc(&vcc->stats->rx);
25385 + atomic_inc_unchecked(&vcc->stats->rx);
25386
25387 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25388
25389 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
25390 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25391 fore200e->atm_dev->number,
25392 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25393 - atomic_inc(&vcc->stats->rx_err);
25394 + atomic_inc_unchecked(&vcc->stats->rx_err);
25395 }
25396 }
25397
25398 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
25399 goto retry_here;
25400 }
25401
25402 - atomic_inc(&vcc->stats->tx_err);
25403 + atomic_inc_unchecked(&vcc->stats->tx_err);
25404
25405 fore200e->tx_sat++;
25406 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25407 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
25408 index 9a51df4..f3bb5f8 100644
25409 --- a/drivers/atm/he.c
25410 +++ b/drivers/atm/he.c
25411 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25412
25413 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25414 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25415 - atomic_inc(&vcc->stats->rx_drop);
25416 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25417 goto return_host_buffers;
25418 }
25419
25420 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25421 RBRQ_LEN_ERR(he_dev->rbrq_head)
25422 ? "LEN_ERR" : "",
25423 vcc->vpi, vcc->vci);
25424 - atomic_inc(&vcc->stats->rx_err);
25425 + atomic_inc_unchecked(&vcc->stats->rx_err);
25426 goto return_host_buffers;
25427 }
25428
25429 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25430 vcc->push(vcc, skb);
25431 spin_lock(&he_dev->global_lock);
25432
25433 - atomic_inc(&vcc->stats->rx);
25434 + atomic_inc_unchecked(&vcc->stats->rx);
25435
25436 return_host_buffers:
25437 ++pdus_assembled;
25438 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
25439 tpd->vcc->pop(tpd->vcc, tpd->skb);
25440 else
25441 dev_kfree_skb_any(tpd->skb);
25442 - atomic_inc(&tpd->vcc->stats->tx_err);
25443 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25444 }
25445 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25446 return;
25447 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25448 vcc->pop(vcc, skb);
25449 else
25450 dev_kfree_skb_any(skb);
25451 - atomic_inc(&vcc->stats->tx_err);
25452 + atomic_inc_unchecked(&vcc->stats->tx_err);
25453 return -EINVAL;
25454 }
25455
25456 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25457 vcc->pop(vcc, skb);
25458 else
25459 dev_kfree_skb_any(skb);
25460 - atomic_inc(&vcc->stats->tx_err);
25461 + atomic_inc_unchecked(&vcc->stats->tx_err);
25462 return -EINVAL;
25463 }
25464 #endif
25465 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25466 vcc->pop(vcc, skb);
25467 else
25468 dev_kfree_skb_any(skb);
25469 - atomic_inc(&vcc->stats->tx_err);
25470 + atomic_inc_unchecked(&vcc->stats->tx_err);
25471 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25472 return -ENOMEM;
25473 }
25474 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25475 vcc->pop(vcc, skb);
25476 else
25477 dev_kfree_skb_any(skb);
25478 - atomic_inc(&vcc->stats->tx_err);
25479 + atomic_inc_unchecked(&vcc->stats->tx_err);
25480 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25481 return -ENOMEM;
25482 }
25483 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25484 __enqueue_tpd(he_dev, tpd, cid);
25485 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25486
25487 - atomic_inc(&vcc->stats->tx);
25488 + atomic_inc_unchecked(&vcc->stats->tx);
25489
25490 return 0;
25491 }
25492 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
25493 index b812103..e391a49 100644
25494 --- a/drivers/atm/horizon.c
25495 +++ b/drivers/atm/horizon.c
25496 @@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
25497 {
25498 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25499 // VC layer stats
25500 - atomic_inc(&vcc->stats->rx);
25501 + atomic_inc_unchecked(&vcc->stats->rx);
25502 __net_timestamp(skb);
25503 // end of our responsibility
25504 vcc->push (vcc, skb);
25505 @@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
25506 dev->tx_iovec = NULL;
25507
25508 // VC layer stats
25509 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25510 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25511
25512 // free the skb
25513 hrz_kfree_skb (skb);
25514 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
25515 index db06f34..dcebb61 100644
25516 --- a/drivers/atm/idt77252.c
25517 +++ b/drivers/atm/idt77252.c
25518 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
25519 else
25520 dev_kfree_skb(skb);
25521
25522 - atomic_inc(&vcc->stats->tx);
25523 + atomic_inc_unchecked(&vcc->stats->tx);
25524 }
25525
25526 atomic_dec(&scq->used);
25527 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25528 if ((sb = dev_alloc_skb(64)) == NULL) {
25529 printk("%s: Can't allocate buffers for aal0.\n",
25530 card->name);
25531 - atomic_add(i, &vcc->stats->rx_drop);
25532 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25533 break;
25534 }
25535 if (!atm_charge(vcc, sb->truesize)) {
25536 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25537 card->name);
25538 - atomic_add(i - 1, &vcc->stats->rx_drop);
25539 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25540 dev_kfree_skb(sb);
25541 break;
25542 }
25543 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25544 ATM_SKB(sb)->vcc = vcc;
25545 __net_timestamp(sb);
25546 vcc->push(vcc, sb);
25547 - atomic_inc(&vcc->stats->rx);
25548 + atomic_inc_unchecked(&vcc->stats->rx);
25549
25550 cell += ATM_CELL_PAYLOAD;
25551 }
25552 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25553 "(CDC: %08x)\n",
25554 card->name, len, rpp->len, readl(SAR_REG_CDC));
25555 recycle_rx_pool_skb(card, rpp);
25556 - atomic_inc(&vcc->stats->rx_err);
25557 + atomic_inc_unchecked(&vcc->stats->rx_err);
25558 return;
25559 }
25560 if (stat & SAR_RSQE_CRC) {
25561 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25562 recycle_rx_pool_skb(card, rpp);
25563 - atomic_inc(&vcc->stats->rx_err);
25564 + atomic_inc_unchecked(&vcc->stats->rx_err);
25565 return;
25566 }
25567 if (skb_queue_len(&rpp->queue) > 1) {
25568 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25569 RXPRINTK("%s: Can't alloc RX skb.\n",
25570 card->name);
25571 recycle_rx_pool_skb(card, rpp);
25572 - atomic_inc(&vcc->stats->rx_err);
25573 + atomic_inc_unchecked(&vcc->stats->rx_err);
25574 return;
25575 }
25576 if (!atm_charge(vcc, skb->truesize)) {
25577 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25578 __net_timestamp(skb);
25579
25580 vcc->push(vcc, skb);
25581 - atomic_inc(&vcc->stats->rx);
25582 + atomic_inc_unchecked(&vcc->stats->rx);
25583
25584 return;
25585 }
25586 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25587 __net_timestamp(skb);
25588
25589 vcc->push(vcc, skb);
25590 - atomic_inc(&vcc->stats->rx);
25591 + atomic_inc_unchecked(&vcc->stats->rx);
25592
25593 if (skb->truesize > SAR_FB_SIZE_3)
25594 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25595 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
25596 if (vcc->qos.aal != ATM_AAL0) {
25597 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25598 card->name, vpi, vci);
25599 - atomic_inc(&vcc->stats->rx_drop);
25600 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25601 goto drop;
25602 }
25603
25604 if ((sb = dev_alloc_skb(64)) == NULL) {
25605 printk("%s: Can't allocate buffers for AAL0.\n",
25606 card->name);
25607 - atomic_inc(&vcc->stats->rx_err);
25608 + atomic_inc_unchecked(&vcc->stats->rx_err);
25609 goto drop;
25610 }
25611
25612 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
25613 ATM_SKB(sb)->vcc = vcc;
25614 __net_timestamp(sb);
25615 vcc->push(vcc, sb);
25616 - atomic_inc(&vcc->stats->rx);
25617 + atomic_inc_unchecked(&vcc->stats->rx);
25618
25619 drop:
25620 skb_pull(queue, 64);
25621 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25622
25623 if (vc == NULL) {
25624 printk("%s: NULL connection in send().\n", card->name);
25625 - atomic_inc(&vcc->stats->tx_err);
25626 + atomic_inc_unchecked(&vcc->stats->tx_err);
25627 dev_kfree_skb(skb);
25628 return -EINVAL;
25629 }
25630 if (!test_bit(VCF_TX, &vc->flags)) {
25631 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25632 - atomic_inc(&vcc->stats->tx_err);
25633 + atomic_inc_unchecked(&vcc->stats->tx_err);
25634 dev_kfree_skb(skb);
25635 return -EINVAL;
25636 }
25637 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25638 break;
25639 default:
25640 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25641 - atomic_inc(&vcc->stats->tx_err);
25642 + atomic_inc_unchecked(&vcc->stats->tx_err);
25643 dev_kfree_skb(skb);
25644 return -EINVAL;
25645 }
25646
25647 if (skb_shinfo(skb)->nr_frags != 0) {
25648 printk("%s: No scatter-gather yet.\n", card->name);
25649 - atomic_inc(&vcc->stats->tx_err);
25650 + atomic_inc_unchecked(&vcc->stats->tx_err);
25651 dev_kfree_skb(skb);
25652 return -EINVAL;
25653 }
25654 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25655
25656 err = queue_skb(card, vc, skb, oam);
25657 if (err) {
25658 - atomic_inc(&vcc->stats->tx_err);
25659 + atomic_inc_unchecked(&vcc->stats->tx_err);
25660 dev_kfree_skb(skb);
25661 return err;
25662 }
25663 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
25664 skb = dev_alloc_skb(64);
25665 if (!skb) {
25666 printk("%s: Out of memory in send_oam().\n", card->name);
25667 - atomic_inc(&vcc->stats->tx_err);
25668 + atomic_inc_unchecked(&vcc->stats->tx_err);
25669 return -ENOMEM;
25670 }
25671 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25672 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
25673 index cb90f7a..bd33566 100644
25674 --- a/drivers/atm/iphase.c
25675 +++ b/drivers/atm/iphase.c
25676 @@ -1121,7 +1121,7 @@ static int rx_pkt(struct atm_dev *dev)
25677 status = (u_short) (buf_desc_ptr->desc_mode);
25678 if (status & (RX_CER | RX_PTE | RX_OFL))
25679 {
25680 - atomic_inc(&vcc->stats->rx_err);
25681 + atomic_inc_unchecked(&vcc->stats->rx_err);
25682 IF_ERR(printk("IA: bad packet, dropping it");)
25683 if (status & RX_CER) {
25684 IF_ERR(printk(" cause: packet CRC error\n");)
25685 @@ -1144,7 +1144,7 @@ static int rx_pkt(struct atm_dev *dev)
25686 len = dma_addr - buf_addr;
25687 if (len > iadev->rx_buf_sz) {
25688 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25689 - atomic_inc(&vcc->stats->rx_err);
25690 + atomic_inc_unchecked(&vcc->stats->rx_err);
25691 goto out_free_desc;
25692 }
25693
25694 @@ -1294,7 +1294,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25695 ia_vcc = INPH_IA_VCC(vcc);
25696 if (ia_vcc == NULL)
25697 {
25698 - atomic_inc(&vcc->stats->rx_err);
25699 + atomic_inc_unchecked(&vcc->stats->rx_err);
25700 dev_kfree_skb_any(skb);
25701 atm_return(vcc, atm_guess_pdu2truesize(len));
25702 goto INCR_DLE;
25703 @@ -1306,7 +1306,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25704 if ((length > iadev->rx_buf_sz) || (length >
25705 (skb->len - sizeof(struct cpcs_trailer))))
25706 {
25707 - atomic_inc(&vcc->stats->rx_err);
25708 + atomic_inc_unchecked(&vcc->stats->rx_err);
25709 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
25710 length, skb->len);)
25711 dev_kfree_skb_any(skb);
25712 @@ -1322,7 +1322,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25713
25714 IF_RX(printk("rx_dle_intr: skb push");)
25715 vcc->push(vcc,skb);
25716 - atomic_inc(&vcc->stats->rx);
25717 + atomic_inc_unchecked(&vcc->stats->rx);
25718 iadev->rx_pkt_cnt++;
25719 }
25720 INCR_DLE:
25721 @@ -2802,15 +2802,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
25722 {
25723 struct k_sonet_stats *stats;
25724 stats = &PRIV(_ia_dev[board])->sonet_stats;
25725 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
25726 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
25727 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
25728 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
25729 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
25730 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
25731 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
25732 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
25733 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
25734 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
25735 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
25736 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
25737 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
25738 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
25739 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
25740 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
25741 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
25742 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
25743 }
25744 ia_cmds.status = 0;
25745 break;
25746 @@ -2915,7 +2915,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25747 if ((desc == 0) || (desc > iadev->num_tx_desc))
25748 {
25749 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
25750 - atomic_inc(&vcc->stats->tx);
25751 + atomic_inc_unchecked(&vcc->stats->tx);
25752 if (vcc->pop)
25753 vcc->pop(vcc, skb);
25754 else
25755 @@ -3020,14 +3020,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25756 ATM_DESC(skb) = vcc->vci;
25757 skb_queue_tail(&iadev->tx_dma_q, skb);
25758
25759 - atomic_inc(&vcc->stats->tx);
25760 + atomic_inc_unchecked(&vcc->stats->tx);
25761 iadev->tx_pkt_cnt++;
25762 /* Increment transaction counter */
25763 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
25764
25765 #if 0
25766 /* add flow control logic */
25767 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
25768 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
25769 if (iavcc->vc_desc_cnt > 10) {
25770 vcc->tx_quota = vcc->tx_quota * 3 / 4;
25771 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
25772 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
25773 index e828c54..ae83976 100644
25774 --- a/drivers/atm/lanai.c
25775 +++ b/drivers/atm/lanai.c
25776 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
25777 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
25778 lanai_endtx(lanai, lvcc);
25779 lanai_free_skb(lvcc->tx.atmvcc, skb);
25780 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
25781 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
25782 }
25783
25784 /* Try to fill the buffer - don't call unless there is backlog */
25785 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
25786 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
25787 __net_timestamp(skb);
25788 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
25789 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
25790 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
25791 out:
25792 lvcc->rx.buf.ptr = end;
25793 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
25794 @@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25795 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
25796 "vcc %d\n", lanai->number, (unsigned int) s, vci);
25797 lanai->stats.service_rxnotaal5++;
25798 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25799 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25800 return 0;
25801 }
25802 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
25803 @@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25804 int bytes;
25805 read_unlock(&vcc_sklist_lock);
25806 DPRINTK("got trashed rx pdu on vci %d\n", vci);
25807 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25808 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25809 lvcc->stats.x.aal5.service_trash++;
25810 bytes = (SERVICE_GET_END(s) * 16) -
25811 (((unsigned long) lvcc->rx.buf.ptr) -
25812 @@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25813 }
25814 if (s & SERVICE_STREAM) {
25815 read_unlock(&vcc_sklist_lock);
25816 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25817 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25818 lvcc->stats.x.aal5.service_stream++;
25819 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
25820 "PDU on VCI %d!\n", lanai->number, vci);
25821 @@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25822 return 0;
25823 }
25824 DPRINTK("got rx crc error on vci %d\n", vci);
25825 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25826 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25827 lvcc->stats.x.aal5.service_rxcrc++;
25828 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
25829 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
25830 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
25831 index 1c70c45..300718d 100644
25832 --- a/drivers/atm/nicstar.c
25833 +++ b/drivers/atm/nicstar.c
25834 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25835 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
25836 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
25837 card->index);
25838 - atomic_inc(&vcc->stats->tx_err);
25839 + atomic_inc_unchecked(&vcc->stats->tx_err);
25840 dev_kfree_skb_any(skb);
25841 return -EINVAL;
25842 }
25843 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25844 if (!vc->tx) {
25845 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
25846 card->index);
25847 - atomic_inc(&vcc->stats->tx_err);
25848 + atomic_inc_unchecked(&vcc->stats->tx_err);
25849 dev_kfree_skb_any(skb);
25850 return -EINVAL;
25851 }
25852 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25853 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
25854 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
25855 card->index);
25856 - atomic_inc(&vcc->stats->tx_err);
25857 + atomic_inc_unchecked(&vcc->stats->tx_err);
25858 dev_kfree_skb_any(skb);
25859 return -EINVAL;
25860 }
25861
25862 if (skb_shinfo(skb)->nr_frags != 0) {
25863 printk("nicstar%d: No scatter-gather yet.\n", card->index);
25864 - atomic_inc(&vcc->stats->tx_err);
25865 + atomic_inc_unchecked(&vcc->stats->tx_err);
25866 dev_kfree_skb_any(skb);
25867 return -EINVAL;
25868 }
25869 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25870 }
25871
25872 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
25873 - atomic_inc(&vcc->stats->tx_err);
25874 + atomic_inc_unchecked(&vcc->stats->tx_err);
25875 dev_kfree_skb_any(skb);
25876 return -EIO;
25877 }
25878 - atomic_inc(&vcc->stats->tx);
25879 + atomic_inc_unchecked(&vcc->stats->tx);
25880
25881 return 0;
25882 }
25883 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25884 printk
25885 ("nicstar%d: Can't allocate buffers for aal0.\n",
25886 card->index);
25887 - atomic_add(i, &vcc->stats->rx_drop);
25888 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25889 break;
25890 }
25891 if (!atm_charge(vcc, sb->truesize)) {
25892 RXPRINTK
25893 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
25894 card->index);
25895 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
25896 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
25897 dev_kfree_skb_any(sb);
25898 break;
25899 }
25900 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25901 ATM_SKB(sb)->vcc = vcc;
25902 __net_timestamp(sb);
25903 vcc->push(vcc, sb);
25904 - atomic_inc(&vcc->stats->rx);
25905 + atomic_inc_unchecked(&vcc->stats->rx);
25906 cell += ATM_CELL_PAYLOAD;
25907 }
25908
25909 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25910 if (iovb == NULL) {
25911 printk("nicstar%d: Out of iovec buffers.\n",
25912 card->index);
25913 - atomic_inc(&vcc->stats->rx_drop);
25914 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25915 recycle_rx_buf(card, skb);
25916 return;
25917 }
25918 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25919 small or large buffer itself. */
25920 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
25921 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
25922 - atomic_inc(&vcc->stats->rx_err);
25923 + atomic_inc_unchecked(&vcc->stats->rx_err);
25924 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25925 NS_MAX_IOVECS);
25926 NS_PRV_IOVCNT(iovb) = 0;
25927 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25928 ("nicstar%d: Expected a small buffer, and this is not one.\n",
25929 card->index);
25930 which_list(card, skb);
25931 - atomic_inc(&vcc->stats->rx_err);
25932 + atomic_inc_unchecked(&vcc->stats->rx_err);
25933 recycle_rx_buf(card, skb);
25934 vc->rx_iov = NULL;
25935 recycle_iov_buf(card, iovb);
25936 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25937 ("nicstar%d: Expected a large buffer, and this is not one.\n",
25938 card->index);
25939 which_list(card, skb);
25940 - atomic_inc(&vcc->stats->rx_err);
25941 + atomic_inc_unchecked(&vcc->stats->rx_err);
25942 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25943 NS_PRV_IOVCNT(iovb));
25944 vc->rx_iov = NULL;
25945 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25946 printk(" - PDU size mismatch.\n");
25947 else
25948 printk(".\n");
25949 - atomic_inc(&vcc->stats->rx_err);
25950 + atomic_inc_unchecked(&vcc->stats->rx_err);
25951 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25952 NS_PRV_IOVCNT(iovb));
25953 vc->rx_iov = NULL;
25954 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25955 /* skb points to a small buffer */
25956 if (!atm_charge(vcc, skb->truesize)) {
25957 push_rxbufs(card, skb);
25958 - atomic_inc(&vcc->stats->rx_drop);
25959 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25960 } else {
25961 skb_put(skb, len);
25962 dequeue_sm_buf(card, skb);
25963 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25964 ATM_SKB(skb)->vcc = vcc;
25965 __net_timestamp(skb);
25966 vcc->push(vcc, skb);
25967 - atomic_inc(&vcc->stats->rx);
25968 + atomic_inc_unchecked(&vcc->stats->rx);
25969 }
25970 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
25971 struct sk_buff *sb;
25972 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25973 if (len <= NS_SMBUFSIZE) {
25974 if (!atm_charge(vcc, sb->truesize)) {
25975 push_rxbufs(card, sb);
25976 - atomic_inc(&vcc->stats->rx_drop);
25977 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25978 } else {
25979 skb_put(sb, len);
25980 dequeue_sm_buf(card, sb);
25981 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25982 ATM_SKB(sb)->vcc = vcc;
25983 __net_timestamp(sb);
25984 vcc->push(vcc, sb);
25985 - atomic_inc(&vcc->stats->rx);
25986 + atomic_inc_unchecked(&vcc->stats->rx);
25987 }
25988
25989 push_rxbufs(card, skb);
25990 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25991
25992 if (!atm_charge(vcc, skb->truesize)) {
25993 push_rxbufs(card, skb);
25994 - atomic_inc(&vcc->stats->rx_drop);
25995 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25996 } else {
25997 dequeue_lg_buf(card, skb);
25998 #ifdef NS_USE_DESTRUCTORS
25999 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26000 ATM_SKB(skb)->vcc = vcc;
26001 __net_timestamp(skb);
26002 vcc->push(vcc, skb);
26003 - atomic_inc(&vcc->stats->rx);
26004 + atomic_inc_unchecked(&vcc->stats->rx);
26005 }
26006
26007 push_rxbufs(card, sb);
26008 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26009 printk
26010 ("nicstar%d: Out of huge buffers.\n",
26011 card->index);
26012 - atomic_inc(&vcc->stats->rx_drop);
26013 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26014 recycle_iovec_rx_bufs(card,
26015 (struct iovec *)
26016 iovb->data,
26017 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26018 card->hbpool.count++;
26019 } else
26020 dev_kfree_skb_any(hb);
26021 - atomic_inc(&vcc->stats->rx_drop);
26022 + atomic_inc_unchecked(&vcc->stats->rx_drop);
26023 } else {
26024 /* Copy the small buffer to the huge buffer */
26025 sb = (struct sk_buff *)iov->iov_base;
26026 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
26027 #endif /* NS_USE_DESTRUCTORS */
26028 __net_timestamp(hb);
26029 vcc->push(vcc, hb);
26030 - atomic_inc(&vcc->stats->rx);
26031 + atomic_inc_unchecked(&vcc->stats->rx);
26032 }
26033 }
26034
26035 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
26036 index 5d1d076..4f31f42 100644
26037 --- a/drivers/atm/solos-pci.c
26038 +++ b/drivers/atm/solos-pci.c
26039 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
26040 }
26041 atm_charge(vcc, skb->truesize);
26042 vcc->push(vcc, skb);
26043 - atomic_inc(&vcc->stats->rx);
26044 + atomic_inc_unchecked(&vcc->stats->rx);
26045 break;
26046
26047 case PKT_STATUS:
26048 @@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *buf)
26049 char msg[500];
26050 char item[10];
26051
26052 + pax_track_stack();
26053 +
26054 len = buf->len;
26055 for (i = 0; i < len; i++){
26056 if(i % 8 == 0)
26057 @@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_card *card)
26058 vcc = SKB_CB(oldskb)->vcc;
26059
26060 if (vcc) {
26061 - atomic_inc(&vcc->stats->tx);
26062 + atomic_inc_unchecked(&vcc->stats->tx);
26063 solos_pop(vcc, oldskb);
26064 } else
26065 dev_kfree_skb_irq(oldskb);
26066 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
26067 index 90f1ccc..04c4a1e 100644
26068 --- a/drivers/atm/suni.c
26069 +++ b/drivers/atm/suni.c
26070 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26071
26072
26073 #define ADD_LIMITED(s,v) \
26074 - atomic_add((v),&stats->s); \
26075 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26076 + atomic_add_unchecked((v),&stats->s); \
26077 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26078
26079
26080 static void suni_hz(unsigned long from_timer)
26081 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
26082 index 5120a96..e2572bd 100644
26083 --- a/drivers/atm/uPD98402.c
26084 +++ b/drivers/atm/uPD98402.c
26085 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
26086 struct sonet_stats tmp;
26087 int error = 0;
26088
26089 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26090 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26091 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26092 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26093 if (zero && !error) {
26094 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
26095
26096
26097 #define ADD_LIMITED(s,v) \
26098 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26099 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26100 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26101 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26102 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26103 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26104
26105
26106 static void stat_event(struct atm_dev *dev)
26107 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
26108 if (reason & uPD98402_INT_PFM) stat_event(dev);
26109 if (reason & uPD98402_INT_PCO) {
26110 (void) GET(PCOCR); /* clear interrupt cause */
26111 - atomic_add(GET(HECCT),
26112 + atomic_add_unchecked(GET(HECCT),
26113 &PRIV(dev)->sonet_stats.uncorr_hcs);
26114 }
26115 if ((reason & uPD98402_INT_RFO) &&
26116 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
26117 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26118 uPD98402_INT_LOS),PIMR); /* enable them */
26119 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26120 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26121 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26122 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26123 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26124 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26125 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26126 return 0;
26127 }
26128
26129 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
26130 index d889f56..17eb71e 100644
26131 --- a/drivers/atm/zatm.c
26132 +++ b/drivers/atm/zatm.c
26133 @@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26134 }
26135 if (!size) {
26136 dev_kfree_skb_irq(skb);
26137 - if (vcc) atomic_inc(&vcc->stats->rx_err);
26138 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26139 continue;
26140 }
26141 if (!atm_charge(vcc,skb->truesize)) {
26142 @@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26143 skb->len = size;
26144 ATM_SKB(skb)->vcc = vcc;
26145 vcc->push(vcc,skb);
26146 - atomic_inc(&vcc->stats->rx);
26147 + atomic_inc_unchecked(&vcc->stats->rx);
26148 }
26149 zout(pos & 0xffff,MTA(mbx));
26150 #if 0 /* probably a stupid idea */
26151 @@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
26152 skb_queue_head(&zatm_vcc->backlog,skb);
26153 break;
26154 }
26155 - atomic_inc(&vcc->stats->tx);
26156 + atomic_inc_unchecked(&vcc->stats->tx);
26157 wake_up(&zatm_vcc->tx_wait);
26158 }
26159
26160 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
26161 index a4760e0..51283cf 100644
26162 --- a/drivers/base/devtmpfs.c
26163 +++ b/drivers/base/devtmpfs.c
26164 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
26165 if (!thread)
26166 return 0;
26167
26168 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
26169 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
26170 if (err)
26171 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
26172 else
26173 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
26174 index 84f7c7d..37cfd87 100644
26175 --- a/drivers/base/power/wakeup.c
26176 +++ b/drivers/base/power/wakeup.c
26177 @@ -29,14 +29,14 @@ bool events_check_enabled;
26178 * They need to be modified together atomically, so it's better to use one
26179 * atomic variable to hold them both.
26180 */
26181 -static atomic_t combined_event_count = ATOMIC_INIT(0);
26182 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
26183
26184 #define IN_PROGRESS_BITS (sizeof(int) * 4)
26185 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
26186
26187 static void split_counters(unsigned int *cnt, unsigned int *inpr)
26188 {
26189 - unsigned int comb = atomic_read(&combined_event_count);
26190 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
26191
26192 *cnt = (comb >> IN_PROGRESS_BITS);
26193 *inpr = comb & MAX_IN_PROGRESS;
26194 @@ -350,7 +350,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
26195 ws->last_time = ktime_get();
26196
26197 /* Increment the counter of events in progress. */
26198 - atomic_inc(&combined_event_count);
26199 + atomic_inc_unchecked(&combined_event_count);
26200 }
26201
26202 /**
26203 @@ -440,7 +440,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
26204 * Increment the counter of registered wakeup events and decrement the
26205 * couter of wakeup events in progress simultaneously.
26206 */
26207 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
26208 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
26209 }
26210
26211 /**
26212 diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
26213 index e086fbb..398e1fe 100644
26214 --- a/drivers/block/DAC960.c
26215 +++ b/drivers/block/DAC960.c
26216 @@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
26217 unsigned long flags;
26218 int Channel, TargetID;
26219
26220 + pax_track_stack();
26221 +
26222 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
26223 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
26224 sizeof(DAC960_SCSI_Inquiry_T) +
26225 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
26226 index c2f9b3e..5911988 100644
26227 --- a/drivers/block/cciss.c
26228 +++ b/drivers/block/cciss.c
26229 @@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
26230 int err;
26231 u32 cp;
26232
26233 + memset(&arg64, 0, sizeof(arg64));
26234 +
26235 err = 0;
26236 err |=
26237 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26238 @@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
26239 while (!list_empty(&h->reqQ)) {
26240 c = list_entry(h->reqQ.next, CommandList_struct, list);
26241 /* can't do anything if fifo is full */
26242 - if ((h->access.fifo_full(h))) {
26243 + if ((h->access->fifo_full(h))) {
26244 dev_warn(&h->pdev->dev, "fifo full\n");
26245 break;
26246 }
26247 @@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
26248 h->Qdepth--;
26249
26250 /* Tell the controller execute command */
26251 - h->access.submit_command(h, c);
26252 + h->access->submit_command(h, c);
26253
26254 /* Put job onto the completed Q */
26255 addQ(&h->cmpQ, c);
26256 @@ -3422,17 +3424,17 @@ startio:
26257
26258 static inline unsigned long get_next_completion(ctlr_info_t *h)
26259 {
26260 - return h->access.command_completed(h);
26261 + return h->access->command_completed(h);
26262 }
26263
26264 static inline int interrupt_pending(ctlr_info_t *h)
26265 {
26266 - return h->access.intr_pending(h);
26267 + return h->access->intr_pending(h);
26268 }
26269
26270 static inline long interrupt_not_for_us(ctlr_info_t *h)
26271 {
26272 - return ((h->access.intr_pending(h) == 0) ||
26273 + return ((h->access->intr_pending(h) == 0) ||
26274 (h->interrupts_enabled == 0));
26275 }
26276
26277 @@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info_t *h)
26278 u32 a;
26279
26280 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
26281 - return h->access.command_completed(h);
26282 + return h->access->command_completed(h);
26283
26284 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
26285 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
26286 @@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
26287 trans_support & CFGTBL_Trans_use_short_tags);
26288
26289 /* Change the access methods to the performant access methods */
26290 - h->access = SA5_performant_access;
26291 + h->access = &SA5_performant_access;
26292 h->transMethod = CFGTBL_Trans_Performant;
26293
26294 return;
26295 @@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
26296 if (prod_index < 0)
26297 return -ENODEV;
26298 h->product_name = products[prod_index].product_name;
26299 - h->access = *(products[prod_index].access);
26300 + h->access = products[prod_index].access;
26301
26302 if (cciss_board_disabled(h)) {
26303 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
26304 @@ -5009,7 +5011,7 @@ reinit_after_soft_reset:
26305 }
26306
26307 /* make sure the board interrupts are off */
26308 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26309 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26310 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
26311 if (rc)
26312 goto clean2;
26313 @@ -5061,7 +5063,7 @@ reinit_after_soft_reset:
26314 * fake ones to scoop up any residual completions.
26315 */
26316 spin_lock_irqsave(&h->lock, flags);
26317 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26318 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26319 spin_unlock_irqrestore(&h->lock, flags);
26320 free_irq(h->intr[PERF_MODE_INT], h);
26321 rc = cciss_request_irq(h, cciss_msix_discard_completions,
26322 @@ -5081,9 +5083,9 @@ reinit_after_soft_reset:
26323 dev_info(&h->pdev->dev, "Board READY.\n");
26324 dev_info(&h->pdev->dev,
26325 "Waiting for stale completions to drain.\n");
26326 - h->access.set_intr_mask(h, CCISS_INTR_ON);
26327 + h->access->set_intr_mask(h, CCISS_INTR_ON);
26328 msleep(10000);
26329 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26330 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26331
26332 rc = controller_reset_failed(h->cfgtable);
26333 if (rc)
26334 @@ -5106,7 +5108,7 @@ reinit_after_soft_reset:
26335 cciss_scsi_setup(h);
26336
26337 /* Turn the interrupts on so we can service requests */
26338 - h->access.set_intr_mask(h, CCISS_INTR_ON);
26339 + h->access->set_intr_mask(h, CCISS_INTR_ON);
26340
26341 /* Get the firmware version */
26342 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
26343 @@ -5178,7 +5180,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
26344 kfree(flush_buf);
26345 if (return_code != IO_OK)
26346 dev_warn(&h->pdev->dev, "Error flushing cache\n");
26347 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26348 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26349 free_irq(h->intr[PERF_MODE_INT], h);
26350 }
26351
26352 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
26353 index c049548..a09cb6e 100644
26354 --- a/drivers/block/cciss.h
26355 +++ b/drivers/block/cciss.h
26356 @@ -100,7 +100,7 @@ struct ctlr_info
26357 /* information about each logical volume */
26358 drive_info_struct *drv[CISS_MAX_LUN];
26359
26360 - struct access_method access;
26361 + struct access_method *access;
26362
26363 /* queue and queue Info */
26364 struct list_head reqQ;
26365 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
26366 index b2fceb5..87fec83 100644
26367 --- a/drivers/block/cpqarray.c
26368 +++ b/drivers/block/cpqarray.c
26369 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26370 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
26371 goto Enomem4;
26372 }
26373 - hba[i]->access.set_intr_mask(hba[i], 0);
26374 + hba[i]->access->set_intr_mask(hba[i], 0);
26375 if (request_irq(hba[i]->intr, do_ida_intr,
26376 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
26377 {
26378 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26379 add_timer(&hba[i]->timer);
26380
26381 /* Enable IRQ now that spinlock and rate limit timer are set up */
26382 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26383 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26384
26385 for(j=0; j<NWD; j++) {
26386 struct gendisk *disk = ida_gendisk[i][j];
26387 @@ -694,7 +694,7 @@ DBGINFO(
26388 for(i=0; i<NR_PRODUCTS; i++) {
26389 if (board_id == products[i].board_id) {
26390 c->product_name = products[i].product_name;
26391 - c->access = *(products[i].access);
26392 + c->access = products[i].access;
26393 break;
26394 }
26395 }
26396 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
26397 hba[ctlr]->intr = intr;
26398 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
26399 hba[ctlr]->product_name = products[j].product_name;
26400 - hba[ctlr]->access = *(products[j].access);
26401 + hba[ctlr]->access = products[j].access;
26402 hba[ctlr]->ctlr = ctlr;
26403 hba[ctlr]->board_id = board_id;
26404 hba[ctlr]->pci_dev = NULL; /* not PCI */
26405 @@ -911,6 +911,8 @@ static void do_ida_request(struct request_queue *q)
26406 struct scatterlist tmp_sg[SG_MAX];
26407 int i, dir, seg;
26408
26409 + pax_track_stack();
26410 +
26411 queue_next:
26412 creq = blk_peek_request(q);
26413 if (!creq)
26414 @@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
26415
26416 while((c = h->reqQ) != NULL) {
26417 /* Can't do anything if we're busy */
26418 - if (h->access.fifo_full(h) == 0)
26419 + if (h->access->fifo_full(h) == 0)
26420 return;
26421
26422 /* Get the first entry from the request Q */
26423 @@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
26424 h->Qdepth--;
26425
26426 /* Tell the controller to do our bidding */
26427 - h->access.submit_command(h, c);
26428 + h->access->submit_command(h, c);
26429
26430 /* Get onto the completion Q */
26431 addQ(&h->cmpQ, c);
26432 @@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26433 unsigned long flags;
26434 __u32 a,a1;
26435
26436 - istat = h->access.intr_pending(h);
26437 + istat = h->access->intr_pending(h);
26438 /* Is this interrupt for us? */
26439 if (istat == 0)
26440 return IRQ_NONE;
26441 @@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26442 */
26443 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
26444 if (istat & FIFO_NOT_EMPTY) {
26445 - while((a = h->access.command_completed(h))) {
26446 + while((a = h->access->command_completed(h))) {
26447 a1 = a; a &= ~3;
26448 if ((c = h->cmpQ) == NULL)
26449 {
26450 @@ -1449,11 +1451,11 @@ static int sendcmd(
26451 /*
26452 * Disable interrupt
26453 */
26454 - info_p->access.set_intr_mask(info_p, 0);
26455 + info_p->access->set_intr_mask(info_p, 0);
26456 /* Make sure there is room in the command FIFO */
26457 /* Actually it should be completely empty at this time. */
26458 for (i = 200000; i > 0; i--) {
26459 - temp = info_p->access.fifo_full(info_p);
26460 + temp = info_p->access->fifo_full(info_p);
26461 if (temp != 0) {
26462 break;
26463 }
26464 @@ -1466,7 +1468,7 @@ DBG(
26465 /*
26466 * Send the cmd
26467 */
26468 - info_p->access.submit_command(info_p, c);
26469 + info_p->access->submit_command(info_p, c);
26470 complete = pollcomplete(ctlr);
26471
26472 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
26473 @@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t *host)
26474 * we check the new geometry. Then turn interrupts back on when
26475 * we're done.
26476 */
26477 - host->access.set_intr_mask(host, 0);
26478 + host->access->set_intr_mask(host, 0);
26479 getgeometry(ctlr);
26480 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
26481 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
26482
26483 for(i=0; i<NWD; i++) {
26484 struct gendisk *disk = ida_gendisk[ctlr][i];
26485 @@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
26486 /* Wait (up to 2 seconds) for a command to complete */
26487
26488 for (i = 200000; i > 0; i--) {
26489 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
26490 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
26491 if (done == 0) {
26492 udelay(10); /* a short fixed delay */
26493 } else
26494 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
26495 index be73e9d..7fbf140 100644
26496 --- a/drivers/block/cpqarray.h
26497 +++ b/drivers/block/cpqarray.h
26498 @@ -99,7 +99,7 @@ struct ctlr_info {
26499 drv_info_t drv[NWD];
26500 struct proc_dir_entry *proc;
26501
26502 - struct access_method access;
26503 + struct access_method *access;
26504
26505 cmdlist_t *reqQ;
26506 cmdlist_t *cmpQ;
26507 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
26508 index ef2ceed..c9cb18e 100644
26509 --- a/drivers/block/drbd/drbd_int.h
26510 +++ b/drivers/block/drbd/drbd_int.h
26511 @@ -737,7 +737,7 @@ struct drbd_request;
26512 struct drbd_epoch {
26513 struct list_head list;
26514 unsigned int barrier_nr;
26515 - atomic_t epoch_size; /* increased on every request added. */
26516 + atomic_unchecked_t epoch_size; /* increased on every request added. */
26517 atomic_t active; /* increased on every req. added, and dec on every finished. */
26518 unsigned long flags;
26519 };
26520 @@ -1109,7 +1109,7 @@ struct drbd_conf {
26521 void *int_dig_in;
26522 void *int_dig_vv;
26523 wait_queue_head_t seq_wait;
26524 - atomic_t packet_seq;
26525 + atomic_unchecked_t packet_seq;
26526 unsigned int peer_seq;
26527 spinlock_t peer_seq_lock;
26528 unsigned int minor;
26529 @@ -1618,30 +1618,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
26530
26531 static inline void drbd_tcp_cork(struct socket *sock)
26532 {
26533 - int __user val = 1;
26534 + int val = 1;
26535 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26536 - (char __user *)&val, sizeof(val));
26537 + (char __force_user *)&val, sizeof(val));
26538 }
26539
26540 static inline void drbd_tcp_uncork(struct socket *sock)
26541 {
26542 - int __user val = 0;
26543 + int val = 0;
26544 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26545 - (char __user *)&val, sizeof(val));
26546 + (char __force_user *)&val, sizeof(val));
26547 }
26548
26549 static inline void drbd_tcp_nodelay(struct socket *sock)
26550 {
26551 - int __user val = 1;
26552 + int val = 1;
26553 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
26554 - (char __user *)&val, sizeof(val));
26555 + (char __force_user *)&val, sizeof(val));
26556 }
26557
26558 static inline void drbd_tcp_quickack(struct socket *sock)
26559 {
26560 - int __user val = 2;
26561 + int val = 2;
26562 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
26563 - (char __user *)&val, sizeof(val));
26564 + (char __force_user *)&val, sizeof(val));
26565 }
26566
26567 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
26568 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
26569 index 0358e55..bc33689 100644
26570 --- a/drivers/block/drbd/drbd_main.c
26571 +++ b/drivers/block/drbd/drbd_main.c
26572 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
26573 p.sector = sector;
26574 p.block_id = block_id;
26575 p.blksize = blksize;
26576 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
26577 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
26578
26579 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
26580 return false;
26581 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
26582 p.sector = cpu_to_be64(req->sector);
26583 p.block_id = (unsigned long)req;
26584 p.seq_num = cpu_to_be32(req->seq_num =
26585 - atomic_add_return(1, &mdev->packet_seq));
26586 + atomic_add_return_unchecked(1, &mdev->packet_seq));
26587
26588 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
26589
26590 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
26591 atomic_set(&mdev->unacked_cnt, 0);
26592 atomic_set(&mdev->local_cnt, 0);
26593 atomic_set(&mdev->net_cnt, 0);
26594 - atomic_set(&mdev->packet_seq, 0);
26595 + atomic_set_unchecked(&mdev->packet_seq, 0);
26596 atomic_set(&mdev->pp_in_use, 0);
26597 atomic_set(&mdev->pp_in_use_by_net, 0);
26598 atomic_set(&mdev->rs_sect_in, 0);
26599 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
26600 mdev->receiver.t_state);
26601
26602 /* no need to lock it, I'm the only thread alive */
26603 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
26604 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
26605 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
26606 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
26607 mdev->al_writ_cnt =
26608 mdev->bm_writ_cnt =
26609 mdev->read_cnt =
26610 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
26611 index 0feab26..5d9b3dd 100644
26612 --- a/drivers/block/drbd/drbd_nl.c
26613 +++ b/drivers/block/drbd/drbd_nl.c
26614 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
26615 module_put(THIS_MODULE);
26616 }
26617
26618 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26619 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26620
26621 static unsigned short *
26622 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
26623 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
26624 cn_reply->id.idx = CN_IDX_DRBD;
26625 cn_reply->id.val = CN_VAL_DRBD;
26626
26627 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26628 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26629 cn_reply->ack = 0; /* not used here. */
26630 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26631 (int)((char *)tl - (char *)reply->tag_list);
26632 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
26633 cn_reply->id.idx = CN_IDX_DRBD;
26634 cn_reply->id.val = CN_VAL_DRBD;
26635
26636 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26637 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26638 cn_reply->ack = 0; /* not used here. */
26639 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26640 (int)((char *)tl - (char *)reply->tag_list);
26641 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
26642 cn_reply->id.idx = CN_IDX_DRBD;
26643 cn_reply->id.val = CN_VAL_DRBD;
26644
26645 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
26646 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
26647 cn_reply->ack = 0; // not used here.
26648 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26649 (int)((char*)tl - (char*)reply->tag_list);
26650 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
26651 cn_reply->id.idx = CN_IDX_DRBD;
26652 cn_reply->id.val = CN_VAL_DRBD;
26653
26654 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26655 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26656 cn_reply->ack = 0; /* not used here. */
26657 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26658 (int)((char *)tl - (char *)reply->tag_list);
26659 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
26660 index 43beaca..4a5b1dd 100644
26661 --- a/drivers/block/drbd/drbd_receiver.c
26662 +++ b/drivers/block/drbd/drbd_receiver.c
26663 @@ -894,7 +894,7 @@ retry:
26664 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
26665 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
26666
26667 - atomic_set(&mdev->packet_seq, 0);
26668 + atomic_set_unchecked(&mdev->packet_seq, 0);
26669 mdev->peer_seq = 0;
26670
26671 drbd_thread_start(&mdev->asender);
26672 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26673 do {
26674 next_epoch = NULL;
26675
26676 - epoch_size = atomic_read(&epoch->epoch_size);
26677 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
26678
26679 switch (ev & ~EV_CLEANUP) {
26680 case EV_PUT:
26681 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26682 rv = FE_DESTROYED;
26683 } else {
26684 epoch->flags = 0;
26685 - atomic_set(&epoch->epoch_size, 0);
26686 + atomic_set_unchecked(&epoch->epoch_size, 0);
26687 /* atomic_set(&epoch->active, 0); is already zero */
26688 if (rv == FE_STILL_LIVE)
26689 rv = FE_RECYCLED;
26690 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26691 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
26692 drbd_flush(mdev);
26693
26694 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
26695 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26696 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
26697 if (epoch)
26698 break;
26699 }
26700
26701 epoch = mdev->current_epoch;
26702 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
26703 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
26704
26705 D_ASSERT(atomic_read(&epoch->active) == 0);
26706 D_ASSERT(epoch->flags == 0);
26707 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26708 }
26709
26710 epoch->flags = 0;
26711 - atomic_set(&epoch->epoch_size, 0);
26712 + atomic_set_unchecked(&epoch->epoch_size, 0);
26713 atomic_set(&epoch->active, 0);
26714
26715 spin_lock(&mdev->epoch_lock);
26716 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
26717 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26718 list_add(&epoch->list, &mdev->current_epoch->list);
26719 mdev->current_epoch = epoch;
26720 mdev->epochs++;
26721 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26722 spin_unlock(&mdev->peer_seq_lock);
26723
26724 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
26725 - atomic_inc(&mdev->current_epoch->epoch_size);
26726 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
26727 return drbd_drain_block(mdev, data_size);
26728 }
26729
26730 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26731
26732 spin_lock(&mdev->epoch_lock);
26733 e->epoch = mdev->current_epoch;
26734 - atomic_inc(&e->epoch->epoch_size);
26735 + atomic_inc_unchecked(&e->epoch->epoch_size);
26736 atomic_inc(&e->epoch->active);
26737 spin_unlock(&mdev->epoch_lock);
26738
26739 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
26740 D_ASSERT(list_empty(&mdev->done_ee));
26741
26742 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
26743 - atomic_set(&mdev->current_epoch->epoch_size, 0);
26744 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
26745 D_ASSERT(list_empty(&mdev->current_epoch->list));
26746 }
26747
26748 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
26749 index 4720c7a..2c49af1 100644
26750 --- a/drivers/block/loop.c
26751 +++ b/drivers/block/loop.c
26752 @@ -283,7 +283,7 @@ static int __do_lo_send_write(struct file *file,
26753 mm_segment_t old_fs = get_fs();
26754
26755 set_fs(get_ds());
26756 - bw = file->f_op->write(file, buf, len, &pos);
26757 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
26758 set_fs(old_fs);
26759 if (likely(bw == len))
26760 return 0;
26761 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
26762 index f533f33..6177bcb 100644
26763 --- a/drivers/block/nbd.c
26764 +++ b/drivers/block/nbd.c
26765 @@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
26766 struct kvec iov;
26767 sigset_t blocked, oldset;
26768
26769 + pax_track_stack();
26770 +
26771 if (unlikely(!sock)) {
26772 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
26773 lo->disk->disk_name, (send ? "send" : "recv"));
26774 @@ -572,6 +574,8 @@ static void do_nbd_request(struct request_queue *q)
26775 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
26776 unsigned int cmd, unsigned long arg)
26777 {
26778 + pax_track_stack();
26779 +
26780 switch (cmd) {
26781 case NBD_DISCONNECT: {
26782 struct request sreq;
26783 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
26784 index 423fd56..06d3be0 100644
26785 --- a/drivers/char/Kconfig
26786 +++ b/drivers/char/Kconfig
26787 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
26788
26789 config DEVKMEM
26790 bool "/dev/kmem virtual device support"
26791 - default y
26792 + default n
26793 + depends on !GRKERNSEC_KMEM
26794 help
26795 Say Y here if you want to support the /dev/kmem device. The
26796 /dev/kmem device is rarely used, but can be used for certain
26797 @@ -596,6 +597,7 @@ config DEVPORT
26798 bool
26799 depends on !M68K
26800 depends on ISA || PCI
26801 + depends on !GRKERNSEC_KMEM
26802 default y
26803
26804 source "drivers/s390/char/Kconfig"
26805 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
26806 index 2e04433..22afc64 100644
26807 --- a/drivers/char/agp/frontend.c
26808 +++ b/drivers/char/agp/frontend.c
26809 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
26810 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26811 return -EFAULT;
26812
26813 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26814 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26815 return -EFAULT;
26816
26817 client = agp_find_client_by_pid(reserve.pid);
26818 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
26819 index 095ab90..afad0a4 100644
26820 --- a/drivers/char/briq_panel.c
26821 +++ b/drivers/char/briq_panel.c
26822 @@ -9,6 +9,7 @@
26823 #include <linux/types.h>
26824 #include <linux/errno.h>
26825 #include <linux/tty.h>
26826 +#include <linux/mutex.h>
26827 #include <linux/timer.h>
26828 #include <linux/kernel.h>
26829 #include <linux/wait.h>
26830 @@ -34,6 +35,7 @@ static int vfd_is_open;
26831 static unsigned char vfd[40];
26832 static int vfd_cursor;
26833 static unsigned char ledpb, led;
26834 +static DEFINE_MUTEX(vfd_mutex);
26835
26836 static void update_vfd(void)
26837 {
26838 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26839 if (!vfd_is_open)
26840 return -EBUSY;
26841
26842 + mutex_lock(&vfd_mutex);
26843 for (;;) {
26844 char c;
26845 if (!indx)
26846 break;
26847 - if (get_user(c, buf))
26848 + if (get_user(c, buf)) {
26849 + mutex_unlock(&vfd_mutex);
26850 return -EFAULT;
26851 + }
26852 if (esc) {
26853 set_led(c);
26854 esc = 0;
26855 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26856 buf++;
26857 }
26858 update_vfd();
26859 + mutex_unlock(&vfd_mutex);
26860
26861 return len;
26862 }
26863 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
26864 index f773a9d..65cd683 100644
26865 --- a/drivers/char/genrtc.c
26866 +++ b/drivers/char/genrtc.c
26867 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
26868 switch (cmd) {
26869
26870 case RTC_PLL_GET:
26871 + memset(&pll, 0, sizeof(pll));
26872 if (get_rtc_pll(&pll))
26873 return -EINVAL;
26874 else
26875 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
26876 index 0833896..cccce52 100644
26877 --- a/drivers/char/hpet.c
26878 +++ b/drivers/char/hpet.c
26879 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
26880 }
26881
26882 static int
26883 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
26884 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
26885 struct hpet_info *info)
26886 {
26887 struct hpet_timer __iomem *timer;
26888 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
26889 index 58c0e63..25aed94 100644
26890 --- a/drivers/char/ipmi/ipmi_msghandler.c
26891 +++ b/drivers/char/ipmi/ipmi_msghandler.c
26892 @@ -415,7 +415,7 @@ struct ipmi_smi {
26893 struct proc_dir_entry *proc_dir;
26894 char proc_dir_name[10];
26895
26896 - atomic_t stats[IPMI_NUM_STATS];
26897 + atomic_unchecked_t stats[IPMI_NUM_STATS];
26898
26899 /*
26900 * run_to_completion duplicate of smb_info, smi_info
26901 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
26902
26903
26904 #define ipmi_inc_stat(intf, stat) \
26905 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
26906 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
26907 #define ipmi_get_stat(intf, stat) \
26908 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
26909 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
26910
26911 static int is_lan_addr(struct ipmi_addr *addr)
26912 {
26913 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
26914 INIT_LIST_HEAD(&intf->cmd_rcvrs);
26915 init_waitqueue_head(&intf->waitq);
26916 for (i = 0; i < IPMI_NUM_STATS; i++)
26917 - atomic_set(&intf->stats[i], 0);
26918 + atomic_set_unchecked(&intf->stats[i], 0);
26919
26920 intf->proc_dir = NULL;
26921
26922 @@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
26923 struct ipmi_smi_msg smi_msg;
26924 struct ipmi_recv_msg recv_msg;
26925
26926 + pax_track_stack();
26927 +
26928 si = (struct ipmi_system_interface_addr *) &addr;
26929 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
26930 si->channel = IPMI_BMC_CHANNEL;
26931 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
26932 index 9397ab4..d01bee1 100644
26933 --- a/drivers/char/ipmi/ipmi_si_intf.c
26934 +++ b/drivers/char/ipmi/ipmi_si_intf.c
26935 @@ -277,7 +277,7 @@ struct smi_info {
26936 unsigned char slave_addr;
26937
26938 /* Counters and things for the proc filesystem. */
26939 - atomic_t stats[SI_NUM_STATS];
26940 + atomic_unchecked_t stats[SI_NUM_STATS];
26941
26942 struct task_struct *thread;
26943
26944 @@ -286,9 +286,9 @@ struct smi_info {
26945 };
26946
26947 #define smi_inc_stat(smi, stat) \
26948 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
26949 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
26950 #define smi_get_stat(smi, stat) \
26951 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
26952 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
26953
26954 #define SI_MAX_PARMS 4
26955
26956 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
26957 atomic_set(&new_smi->req_events, 0);
26958 new_smi->run_to_completion = 0;
26959 for (i = 0; i < SI_NUM_STATS; i++)
26960 - atomic_set(&new_smi->stats[i], 0);
26961 + atomic_set_unchecked(&new_smi->stats[i], 0);
26962
26963 new_smi->interrupt_disabled = 1;
26964 atomic_set(&new_smi->stop_operation, 0);
26965 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
26966 index 1aeaaba..e018570 100644
26967 --- a/drivers/char/mbcs.c
26968 +++ b/drivers/char/mbcs.c
26969 @@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
26970 return 0;
26971 }
26972
26973 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
26974 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
26975 {
26976 .part_num = MBCS_PART_NUM,
26977 .mfg_num = MBCS_MFG_NUM,
26978 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
26979 index 8fc04b4..cebdeec 100644
26980 --- a/drivers/char/mem.c
26981 +++ b/drivers/char/mem.c
26982 @@ -18,6 +18,7 @@
26983 #include <linux/raw.h>
26984 #include <linux/tty.h>
26985 #include <linux/capability.h>
26986 +#include <linux/security.h>
26987 #include <linux/ptrace.h>
26988 #include <linux/device.h>
26989 #include <linux/highmem.h>
26990 @@ -34,6 +35,10 @@
26991 # include <linux/efi.h>
26992 #endif
26993
26994 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26995 +extern const struct file_operations grsec_fops;
26996 +#endif
26997 +
26998 static inline unsigned long size_inside_page(unsigned long start,
26999 unsigned long size)
27000 {
27001 @@ -65,9 +70,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27002
27003 while (cursor < to) {
27004 if (!devmem_is_allowed(pfn)) {
27005 +#ifdef CONFIG_GRKERNSEC_KMEM
27006 + gr_handle_mem_readwrite(from, to);
27007 +#else
27008 printk(KERN_INFO
27009 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27010 current->comm, from, to);
27011 +#endif
27012 return 0;
27013 }
27014 cursor += PAGE_SIZE;
27015 @@ -75,6 +84,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27016 }
27017 return 1;
27018 }
27019 +#elif defined(CONFIG_GRKERNSEC_KMEM)
27020 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27021 +{
27022 + return 0;
27023 +}
27024 #else
27025 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27026 {
27027 @@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27028
27029 while (count > 0) {
27030 unsigned long remaining;
27031 + char *temp;
27032
27033 sz = size_inside_page(p, count);
27034
27035 @@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
27036 if (!ptr)
27037 return -EFAULT;
27038
27039 - remaining = copy_to_user(buf, ptr, sz);
27040 +#ifdef CONFIG_PAX_USERCOPY
27041 + temp = kmalloc(sz, GFP_KERNEL);
27042 + if (!temp) {
27043 + unxlate_dev_mem_ptr(p, ptr);
27044 + return -ENOMEM;
27045 + }
27046 + memcpy(temp, ptr, sz);
27047 +#else
27048 + temp = ptr;
27049 +#endif
27050 +
27051 + remaining = copy_to_user(buf, temp, sz);
27052 +
27053 +#ifdef CONFIG_PAX_USERCOPY
27054 + kfree(temp);
27055 +#endif
27056 +
27057 unxlate_dev_mem_ptr(p, ptr);
27058 if (remaining)
27059 return -EFAULT;
27060 @@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27061 size_t count, loff_t *ppos)
27062 {
27063 unsigned long p = *ppos;
27064 - ssize_t low_count, read, sz;
27065 + ssize_t low_count, read, sz, err = 0;
27066 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27067 - int err = 0;
27068
27069 read = 0;
27070 if (p < (unsigned long) high_memory) {
27071 @@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27072 }
27073 #endif
27074 while (low_count > 0) {
27075 + char *temp;
27076 +
27077 sz = size_inside_page(p, low_count);
27078
27079 /*
27080 @@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
27081 */
27082 kbuf = xlate_dev_kmem_ptr((char *)p);
27083
27084 - if (copy_to_user(buf, kbuf, sz))
27085 +#ifdef CONFIG_PAX_USERCOPY
27086 + temp = kmalloc(sz, GFP_KERNEL);
27087 + if (!temp)
27088 + return -ENOMEM;
27089 + memcpy(temp, kbuf, sz);
27090 +#else
27091 + temp = kbuf;
27092 +#endif
27093 +
27094 + err = copy_to_user(buf, temp, sz);
27095 +
27096 +#ifdef CONFIG_PAX_USERCOPY
27097 + kfree(temp);
27098 +#endif
27099 +
27100 + if (err)
27101 return -EFAULT;
27102 buf += sz;
27103 p += sz;
27104 @@ -866,6 +913,9 @@ static const struct memdev {
27105 #ifdef CONFIG_CRASH_DUMP
27106 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27107 #endif
27108 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27109 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27110 +#endif
27111 };
27112
27113 static int memory_open(struct inode *inode, struct file *filp)
27114 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
27115 index da3cfee..a5a6606 100644
27116 --- a/drivers/char/nvram.c
27117 +++ b/drivers/char/nvram.c
27118 @@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
27119
27120 spin_unlock_irq(&rtc_lock);
27121
27122 - if (copy_to_user(buf, contents, tmp - contents))
27123 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
27124 return -EFAULT;
27125
27126 *ppos = i;
27127 diff --git a/drivers/char/random.c b/drivers/char/random.c
27128 index c35a785..6d82202 100644
27129 --- a/drivers/char/random.c
27130 +++ b/drivers/char/random.c
27131 @@ -261,8 +261,13 @@
27132 /*
27133 * Configuration information
27134 */
27135 +#ifdef CONFIG_GRKERNSEC_RANDNET
27136 +#define INPUT_POOL_WORDS 512
27137 +#define OUTPUT_POOL_WORDS 128
27138 +#else
27139 #define INPUT_POOL_WORDS 128
27140 #define OUTPUT_POOL_WORDS 32
27141 +#endif
27142 #define SEC_XFER_SIZE 512
27143 #define EXTRACT_SIZE 10
27144
27145 @@ -300,10 +305,17 @@ static struct poolinfo {
27146 int poolwords;
27147 int tap1, tap2, tap3, tap4, tap5;
27148 } poolinfo_table[] = {
27149 +#ifdef CONFIG_GRKERNSEC_RANDNET
27150 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27151 + { 512, 411, 308, 208, 104, 1 },
27152 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27153 + { 128, 103, 76, 51, 25, 1 },
27154 +#else
27155 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27156 { 128, 103, 76, 51, 25, 1 },
27157 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27158 { 32, 26, 20, 14, 7, 1 },
27159 +#endif
27160 #if 0
27161 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27162 { 2048, 1638, 1231, 819, 411, 1 },
27163 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
27164
27165 extract_buf(r, tmp);
27166 i = min_t(int, nbytes, EXTRACT_SIZE);
27167 - if (copy_to_user(buf, tmp, i)) {
27168 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
27169 ret = -EFAULT;
27170 break;
27171 }
27172 @@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27173 #include <linux/sysctl.h>
27174
27175 static int min_read_thresh = 8, min_write_thresh;
27176 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
27177 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27178 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27179 static char sysctl_bootid[16];
27180
27181 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
27182 index 1ee8ce7..b778bef 100644
27183 --- a/drivers/char/sonypi.c
27184 +++ b/drivers/char/sonypi.c
27185 @@ -55,6 +55,7 @@
27186 #include <asm/uaccess.h>
27187 #include <asm/io.h>
27188 #include <asm/system.h>
27189 +#include <asm/local.h>
27190
27191 #include <linux/sonypi.h>
27192
27193 @@ -491,7 +492,7 @@ static struct sonypi_device {
27194 spinlock_t fifo_lock;
27195 wait_queue_head_t fifo_proc_list;
27196 struct fasync_struct *fifo_async;
27197 - int open_count;
27198 + local_t open_count;
27199 int model;
27200 struct input_dev *input_jog_dev;
27201 struct input_dev *input_key_dev;
27202 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
27203 static int sonypi_misc_release(struct inode *inode, struct file *file)
27204 {
27205 mutex_lock(&sonypi_device.lock);
27206 - sonypi_device.open_count--;
27207 + local_dec(&sonypi_device.open_count);
27208 mutex_unlock(&sonypi_device.lock);
27209 return 0;
27210 }
27211 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
27212 {
27213 mutex_lock(&sonypi_device.lock);
27214 /* Flush input queue on first open */
27215 - if (!sonypi_device.open_count)
27216 + if (!local_read(&sonypi_device.open_count))
27217 kfifo_reset(&sonypi_device.fifo);
27218 - sonypi_device.open_count++;
27219 + local_inc(&sonypi_device.open_count);
27220 mutex_unlock(&sonypi_device.lock);
27221
27222 return 0;
27223 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
27224 index 9ca5c02..7ce352c 100644
27225 --- a/drivers/char/tpm/tpm.c
27226 +++ b/drivers/char/tpm/tpm.c
27227 @@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
27228 chip->vendor.req_complete_val)
27229 goto out_recv;
27230
27231 - if ((status == chip->vendor.req_canceled)) {
27232 + if (status == chip->vendor.req_canceled) {
27233 dev_err(chip->dev, "Operation Canceled\n");
27234 rc = -ECANCELED;
27235 goto out;
27236 @@ -862,6 +862,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
27237
27238 struct tpm_chip *chip = dev_get_drvdata(dev);
27239
27240 + pax_track_stack();
27241 +
27242 tpm_cmd.header.in = tpm_readpubek_header;
27243 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
27244 "attempting to read the PUBEK");
27245 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
27246 index 0636520..169c1d0 100644
27247 --- a/drivers/char/tpm/tpm_bios.c
27248 +++ b/drivers/char/tpm/tpm_bios.c
27249 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
27250 event = addr;
27251
27252 if ((event->event_type == 0 && event->event_size == 0) ||
27253 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27254 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27255 return NULL;
27256
27257 return addr;
27258 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
27259 return NULL;
27260
27261 if ((event->event_type == 0 && event->event_size == 0) ||
27262 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27263 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27264 return NULL;
27265
27266 (*pos)++;
27267 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
27268 int i;
27269
27270 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27271 - seq_putc(m, data[i]);
27272 + if (!seq_putc(m, data[i]))
27273 + return -EFAULT;
27274
27275 return 0;
27276 }
27277 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
27278 log->bios_event_log_end = log->bios_event_log + len;
27279
27280 virt = acpi_os_map_memory(start, len);
27281 + if (!virt) {
27282 + kfree(log->bios_event_log);
27283 + log->bios_event_log = NULL;
27284 + return -EFAULT;
27285 + }
27286
27287 - memcpy(log->bios_event_log, virt, len);
27288 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
27289
27290 acpi_os_unmap_memory(virt, len);
27291 return 0;
27292 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
27293 index fb68b12..0f6c6ca 100644
27294 --- a/drivers/char/virtio_console.c
27295 +++ b/drivers/char/virtio_console.c
27296 @@ -555,7 +555,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
27297 if (to_user) {
27298 ssize_t ret;
27299
27300 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
27301 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
27302 if (ret)
27303 return -EFAULT;
27304 } else {
27305 @@ -654,7 +654,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
27306 if (!port_has_data(port) && !port->host_connected)
27307 return 0;
27308
27309 - return fill_readbuf(port, ubuf, count, true);
27310 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
27311 }
27312
27313 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
27314 diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
27315 index a84250a..68c725e 100644
27316 --- a/drivers/crypto/hifn_795x.c
27317 +++ b/drivers/crypto/hifn_795x.c
27318 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
27319 0xCA, 0x34, 0x2B, 0x2E};
27320 struct scatterlist sg;
27321
27322 + pax_track_stack();
27323 +
27324 memset(src, 0, sizeof(src));
27325 memset(ctx.key, 0, sizeof(ctx.key));
27326
27327 diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
27328 index db33d30..7823369 100644
27329 --- a/drivers/crypto/padlock-aes.c
27330 +++ b/drivers/crypto/padlock-aes.c
27331 @@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
27332 struct crypto_aes_ctx gen_aes;
27333 int cpu;
27334
27335 + pax_track_stack();
27336 +
27337 if (key_len % 8) {
27338 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
27339 return -EINVAL;
27340 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
27341 index 9a8bebc..b1e4989 100644
27342 --- a/drivers/edac/amd64_edac.c
27343 +++ b/drivers/edac/amd64_edac.c
27344 @@ -2670,7 +2670,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
27345 * PCI core identifies what devices are on a system during boot, and then
27346 * inquiry this table to see if this driver is for a given device found.
27347 */
27348 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
27349 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
27350 {
27351 .vendor = PCI_VENDOR_ID_AMD,
27352 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
27353 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
27354 index e47e73b..348e0bd 100644
27355 --- a/drivers/edac/amd76x_edac.c
27356 +++ b/drivers/edac/amd76x_edac.c
27357 @@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
27358 edac_mc_free(mci);
27359 }
27360
27361 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
27362 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
27363 {
27364 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27365 AMD762},
27366 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
27367 index 1af531a..3a8ff27 100644
27368 --- a/drivers/edac/e752x_edac.c
27369 +++ b/drivers/edac/e752x_edac.c
27370 @@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
27371 edac_mc_free(mci);
27372 }
27373
27374 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
27375 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
27376 {
27377 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27378 E7520},
27379 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
27380 index 6ffb6d2..383d8d7 100644
27381 --- a/drivers/edac/e7xxx_edac.c
27382 +++ b/drivers/edac/e7xxx_edac.c
27383 @@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
27384 edac_mc_free(mci);
27385 }
27386
27387 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
27388 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
27389 {
27390 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27391 E7205},
27392 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
27393 index 495198a..ac08c85 100644
27394 --- a/drivers/edac/edac_pci_sysfs.c
27395 +++ b/drivers/edac/edac_pci_sysfs.c
27396 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
27397 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27398 static int edac_pci_poll_msec = 1000; /* one second workq period */
27399
27400 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
27401 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27402 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27403 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27404
27405 static struct kobject *edac_pci_top_main_kobj;
27406 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27407 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27408 edac_printk(KERN_CRIT, EDAC_PCI,
27409 "Signaled System Error on %s\n",
27410 pci_name(dev));
27411 - atomic_inc(&pci_nonparity_count);
27412 + atomic_inc_unchecked(&pci_nonparity_count);
27413 }
27414
27415 if (status & (PCI_STATUS_PARITY)) {
27416 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27417 "Master Data Parity Error on %s\n",
27418 pci_name(dev));
27419
27420 - atomic_inc(&pci_parity_count);
27421 + atomic_inc_unchecked(&pci_parity_count);
27422 }
27423
27424 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27425 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27426 "Detected Parity Error on %s\n",
27427 pci_name(dev));
27428
27429 - atomic_inc(&pci_parity_count);
27430 + atomic_inc_unchecked(&pci_parity_count);
27431 }
27432 }
27433
27434 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27435 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27436 "Signaled System Error on %s\n",
27437 pci_name(dev));
27438 - atomic_inc(&pci_nonparity_count);
27439 + atomic_inc_unchecked(&pci_nonparity_count);
27440 }
27441
27442 if (status & (PCI_STATUS_PARITY)) {
27443 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27444 "Master Data Parity Error on "
27445 "%s\n", pci_name(dev));
27446
27447 - atomic_inc(&pci_parity_count);
27448 + atomic_inc_unchecked(&pci_parity_count);
27449 }
27450
27451 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27452 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27453 "Detected Parity Error on %s\n",
27454 pci_name(dev));
27455
27456 - atomic_inc(&pci_parity_count);
27457 + atomic_inc_unchecked(&pci_parity_count);
27458 }
27459 }
27460 }
27461 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
27462 if (!check_pci_errors)
27463 return;
27464
27465 - before_count = atomic_read(&pci_parity_count);
27466 + before_count = atomic_read_unchecked(&pci_parity_count);
27467
27468 /* scan all PCI devices looking for a Parity Error on devices and
27469 * bridges.
27470 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
27471 /* Only if operator has selected panic on PCI Error */
27472 if (edac_pci_get_panic_on_pe()) {
27473 /* If the count is different 'after' from 'before' */
27474 - if (before_count != atomic_read(&pci_parity_count))
27475 + if (before_count != atomic_read_unchecked(&pci_parity_count))
27476 panic("EDAC: PCI Parity Error");
27477 }
27478 }
27479 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
27480 index c0510b3..6e2a954 100644
27481 --- a/drivers/edac/i3000_edac.c
27482 +++ b/drivers/edac/i3000_edac.c
27483 @@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
27484 edac_mc_free(mci);
27485 }
27486
27487 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
27488 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
27489 {
27490 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27491 I3000},
27492 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
27493 index aa08497..7e6822a 100644
27494 --- a/drivers/edac/i3200_edac.c
27495 +++ b/drivers/edac/i3200_edac.c
27496 @@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
27497 edac_mc_free(mci);
27498 }
27499
27500 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
27501 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
27502 {
27503 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27504 I3200},
27505 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
27506 index 4dc3ac2..67d05a6 100644
27507 --- a/drivers/edac/i5000_edac.c
27508 +++ b/drivers/edac/i5000_edac.c
27509 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
27510 *
27511 * The "E500P" device is the first device supported.
27512 */
27513 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
27514 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
27515 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
27516 .driver_data = I5000P},
27517
27518 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
27519 index bcbdeec..9886d16 100644
27520 --- a/drivers/edac/i5100_edac.c
27521 +++ b/drivers/edac/i5100_edac.c
27522 @@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
27523 edac_mc_free(mci);
27524 }
27525
27526 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
27527 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
27528 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
27529 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
27530 { 0, }
27531 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
27532 index 74d6ec34..baff517 100644
27533 --- a/drivers/edac/i5400_edac.c
27534 +++ b/drivers/edac/i5400_edac.c
27535 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
27536 *
27537 * The "E500P" device is the first device supported.
27538 */
27539 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
27540 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
27541 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
27542 {0,} /* 0 terminated list. */
27543 };
27544 diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
27545 index a76fe83..15479e6 100644
27546 --- a/drivers/edac/i7300_edac.c
27547 +++ b/drivers/edac/i7300_edac.c
27548 @@ -1191,7 +1191,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
27549 *
27550 * Has only 8086:360c PCI ID
27551 */
27552 -static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
27553 +static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
27554 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
27555 {0,} /* 0 terminated list. */
27556 };
27557 diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
27558 index f6cf448..3f612e9 100644
27559 --- a/drivers/edac/i7core_edac.c
27560 +++ b/drivers/edac/i7core_edac.c
27561 @@ -359,7 +359,7 @@ static const struct pci_id_table pci_dev_table[] = {
27562 /*
27563 * pci_device_id table for which devices we are looking for
27564 */
27565 -static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
27566 +static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
27567 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
27568 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
27569 {0,} /* 0 terminated list. */
27570 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
27571 index 4329d39..f3022ef 100644
27572 --- a/drivers/edac/i82443bxgx_edac.c
27573 +++ b/drivers/edac/i82443bxgx_edac.c
27574 @@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
27575
27576 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
27577
27578 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
27579 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
27580 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
27581 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
27582 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
27583 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
27584 index 931a057..fd28340 100644
27585 --- a/drivers/edac/i82860_edac.c
27586 +++ b/drivers/edac/i82860_edac.c
27587 @@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
27588 edac_mc_free(mci);
27589 }
27590
27591 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
27592 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
27593 {
27594 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27595 I82860},
27596 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
27597 index 33864c6..01edc61 100644
27598 --- a/drivers/edac/i82875p_edac.c
27599 +++ b/drivers/edac/i82875p_edac.c
27600 @@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
27601 edac_mc_free(mci);
27602 }
27603
27604 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
27605 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
27606 {
27607 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27608 I82875P},
27609 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
27610 index a5da732..983363b 100644
27611 --- a/drivers/edac/i82975x_edac.c
27612 +++ b/drivers/edac/i82975x_edac.c
27613 @@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
27614 edac_mc_free(mci);
27615 }
27616
27617 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
27618 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
27619 {
27620 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27621 I82975X
27622 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
27623 index 795a320..3bbc3d3 100644
27624 --- a/drivers/edac/mce_amd.h
27625 +++ b/drivers/edac/mce_amd.h
27626 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
27627 bool (*dc_mce)(u16, u8);
27628 bool (*ic_mce)(u16, u8);
27629 bool (*nb_mce)(u16, u8);
27630 -};
27631 +} __no_const;
27632
27633 void amd_report_gart_errors(bool);
27634 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
27635 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
27636 index b153674..ad2ba9b 100644
27637 --- a/drivers/edac/r82600_edac.c
27638 +++ b/drivers/edac/r82600_edac.c
27639 @@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
27640 edac_mc_free(mci);
27641 }
27642
27643 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
27644 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
27645 {
27646 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
27647 },
27648 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
27649 index b6f47de..c5acf3a 100644
27650 --- a/drivers/edac/x38_edac.c
27651 +++ b/drivers/edac/x38_edac.c
27652 @@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
27653 edac_mc_free(mci);
27654 }
27655
27656 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
27657 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
27658 {
27659 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27660 X38},
27661 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
27662 index 85661b0..c784559a 100644
27663 --- a/drivers/firewire/core-card.c
27664 +++ b/drivers/firewire/core-card.c
27665 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
27666
27667 void fw_core_remove_card(struct fw_card *card)
27668 {
27669 - struct fw_card_driver dummy_driver = dummy_driver_template;
27670 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
27671
27672 card->driver->update_phy_reg(card, 4,
27673 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
27674 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
27675 index 4799393..37bd3ab 100644
27676 --- a/drivers/firewire/core-cdev.c
27677 +++ b/drivers/firewire/core-cdev.c
27678 @@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
27679 int ret;
27680
27681 if ((request->channels == 0 && request->bandwidth == 0) ||
27682 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27683 - request->bandwidth < 0)
27684 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27685 return -EINVAL;
27686
27687 r = kmalloc(sizeof(*r), GFP_KERNEL);
27688 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
27689 index 334b82a..ea5261d 100644
27690 --- a/drivers/firewire/core-transaction.c
27691 +++ b/drivers/firewire/core-transaction.c
27692 @@ -37,6 +37,7 @@
27693 #include <linux/timer.h>
27694 #include <linux/types.h>
27695 #include <linux/workqueue.h>
27696 +#include <linux/sched.h>
27697
27698 #include <asm/byteorder.h>
27699
27700 @@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
27701 struct transaction_callback_data d;
27702 struct fw_transaction t;
27703
27704 + pax_track_stack();
27705 +
27706 init_timer_on_stack(&t.split_timeout_timer);
27707 init_completion(&d.done);
27708 d.payload = payload;
27709 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
27710 index b45be57..5fad18b 100644
27711 --- a/drivers/firewire/core.h
27712 +++ b/drivers/firewire/core.h
27713 @@ -101,6 +101,7 @@ struct fw_card_driver {
27714
27715 int (*stop_iso)(struct fw_iso_context *ctx);
27716 };
27717 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
27718
27719 void fw_card_initialize(struct fw_card *card,
27720 const struct fw_card_driver *driver, struct device *device);
27721 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
27722 index bcb1126..2cc2121 100644
27723 --- a/drivers/firmware/dmi_scan.c
27724 +++ b/drivers/firmware/dmi_scan.c
27725 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
27726 }
27727 }
27728 else {
27729 - /*
27730 - * no iounmap() for that ioremap(); it would be a no-op, but
27731 - * it's so early in setup that sucker gets confused into doing
27732 - * what it shouldn't if we actually call it.
27733 - */
27734 p = dmi_ioremap(0xF0000, 0x10000);
27735 if (p == NULL)
27736 goto error;
27737 @@ -725,7 +720,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
27738 if (buf == NULL)
27739 return -1;
27740
27741 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
27742 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
27743
27744 iounmap(buf);
27745 return 0;
27746 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
27747 index 98723cb..10ca85b 100644
27748 --- a/drivers/gpio/gpio-vr41xx.c
27749 +++ b/drivers/gpio/gpio-vr41xx.c
27750 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27751 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27752 maskl, pendl, maskh, pendh);
27753
27754 - atomic_inc(&irq_err_count);
27755 + atomic_inc_unchecked(&irq_err_count);
27756
27757 return -EINVAL;
27758 }
27759 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
27760 index fe738f0..2d03563 100644
27761 --- a/drivers/gpu/drm/drm_crtc.c
27762 +++ b/drivers/gpu/drm/drm_crtc.c
27763 @@ -1374,7 +1374,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27764 */
27765 if ((out_resp->count_modes >= mode_count) && mode_count) {
27766 copied = 0;
27767 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
27768 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
27769 list_for_each_entry(mode, &connector->modes, head) {
27770 drm_crtc_convert_to_umode(&u_mode, mode);
27771 if (copy_to_user(mode_ptr + copied,
27772 @@ -1389,8 +1389,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27773
27774 if ((out_resp->count_props >= props_count) && props_count) {
27775 copied = 0;
27776 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
27777 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
27778 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
27779 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
27780 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
27781 if (connector->property_ids[i] != 0) {
27782 if (put_user(connector->property_ids[i],
27783 @@ -1412,7 +1412,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27784
27785 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
27786 copied = 0;
27787 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
27788 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
27789 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
27790 if (connector->encoder_ids[i] != 0) {
27791 if (put_user(connector->encoder_ids[i],
27792 @@ -1571,7 +1571,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
27793 }
27794
27795 for (i = 0; i < crtc_req->count_connectors; i++) {
27796 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
27797 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
27798 if (get_user(out_id, &set_connectors_ptr[i])) {
27799 ret = -EFAULT;
27800 goto out;
27801 @@ -1852,7 +1852,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
27802 fb = obj_to_fb(obj);
27803
27804 num_clips = r->num_clips;
27805 - clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
27806 + clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
27807
27808 if (!num_clips != !clips_ptr) {
27809 ret = -EINVAL;
27810 @@ -1868,6 +1868,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
27811 }
27812
27813 if (num_clips && clips_ptr) {
27814 + if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
27815 + ret = -EINVAL;
27816 + goto out_err1;
27817 + }
27818 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
27819 if (!clips) {
27820 ret = -ENOMEM;
27821 @@ -2272,7 +2276,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27822 out_resp->flags = property->flags;
27823
27824 if ((out_resp->count_values >= value_count) && value_count) {
27825 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
27826 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
27827 for (i = 0; i < value_count; i++) {
27828 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
27829 ret = -EFAULT;
27830 @@ -2285,7 +2289,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27831 if (property->flags & DRM_MODE_PROP_ENUM) {
27832 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
27833 copied = 0;
27834 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
27835 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
27836 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
27837
27838 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
27839 @@ -2308,7 +2312,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27840 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
27841 copied = 0;
27842 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
27843 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
27844 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
27845
27846 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
27847 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
27848 @@ -2369,7 +2373,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27849 struct drm_mode_get_blob *out_resp = data;
27850 struct drm_property_blob *blob;
27851 int ret = 0;
27852 - void *blob_ptr;
27853 + void __user *blob_ptr;
27854
27855 if (!drm_core_check_feature(dev, DRIVER_MODESET))
27856 return -EINVAL;
27857 @@ -2383,7 +2387,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27858 blob = obj_to_blob(obj);
27859
27860 if (out_resp->length == blob->length) {
27861 - blob_ptr = (void *)(unsigned long)out_resp->data;
27862 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
27863 if (copy_to_user(blob_ptr, blob->data, blob->length)){
27864 ret = -EFAULT;
27865 goto done;
27866 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
27867 index f88a9b2..8f4078f 100644
27868 --- a/drivers/gpu/drm/drm_crtc_helper.c
27869 +++ b/drivers/gpu/drm/drm_crtc_helper.c
27870 @@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
27871 struct drm_crtc *tmp;
27872 int crtc_mask = 1;
27873
27874 - WARN(!crtc, "checking null crtc?\n");
27875 + BUG_ON(!crtc);
27876
27877 dev = crtc->dev;
27878
27879 @@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
27880 struct drm_encoder *encoder;
27881 bool ret = true;
27882
27883 + pax_track_stack();
27884 +
27885 crtc->enabled = drm_helper_crtc_in_use(crtc);
27886 if (!crtc->enabled)
27887 return true;
27888 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
27889 index 93a112d..c8b065d 100644
27890 --- a/drivers/gpu/drm/drm_drv.c
27891 +++ b/drivers/gpu/drm/drm_drv.c
27892 @@ -307,7 +307,7 @@ module_exit(drm_core_exit);
27893 /**
27894 * Copy and IOCTL return string to user space
27895 */
27896 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
27897 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
27898 {
27899 int len;
27900
27901 @@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
27902
27903 dev = file_priv->minor->dev;
27904 atomic_inc(&dev->ioctl_count);
27905 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
27906 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
27907 ++file_priv->ioctl_count;
27908
27909 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
27910 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
27911 index 2ec7d48..be14bb1 100644
27912 --- a/drivers/gpu/drm/drm_fops.c
27913 +++ b/drivers/gpu/drm/drm_fops.c
27914 @@ -70,7 +70,7 @@ static int drm_setup(struct drm_device * dev)
27915 }
27916
27917 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
27918 - atomic_set(&dev->counts[i], 0);
27919 + atomic_set_unchecked(&dev->counts[i], 0);
27920
27921 dev->sigdata.lock = NULL;
27922
27923 @@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct file *filp)
27924
27925 retcode = drm_open_helper(inode, filp, dev);
27926 if (!retcode) {
27927 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
27928 - if (!dev->open_count++)
27929 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
27930 + if (local_inc_return(&dev->open_count) == 1)
27931 retcode = drm_setup(dev);
27932 }
27933 if (!retcode) {
27934 @@ -472,7 +472,7 @@ int drm_release(struct inode *inode, struct file *filp)
27935
27936 mutex_lock(&drm_global_mutex);
27937
27938 - DRM_DEBUG("open_count = %d\n", dev->open_count);
27939 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
27940
27941 if (dev->driver->preclose)
27942 dev->driver->preclose(dev, file_priv);
27943 @@ -484,7 +484,7 @@ int drm_release(struct inode *inode, struct file *filp)
27944 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
27945 task_pid_nr(current),
27946 (long)old_encode_dev(file_priv->minor->device),
27947 - dev->open_count);
27948 + local_read(&dev->open_count));
27949
27950 /* if the master has gone away we can't do anything with the lock */
27951 if (file_priv->minor->master)
27952 @@ -565,8 +565,8 @@ int drm_release(struct inode *inode, struct file *filp)
27953 * End inline drm_release
27954 */
27955
27956 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
27957 - if (!--dev->open_count) {
27958 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
27959 + if (local_dec_and_test(&dev->open_count)) {
27960 if (atomic_read(&dev->ioctl_count)) {
27961 DRM_ERROR("Device busy: %d\n",
27962 atomic_read(&dev->ioctl_count));
27963 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
27964 index c87dc96..326055d 100644
27965 --- a/drivers/gpu/drm/drm_global.c
27966 +++ b/drivers/gpu/drm/drm_global.c
27967 @@ -36,7 +36,7 @@
27968 struct drm_global_item {
27969 struct mutex mutex;
27970 void *object;
27971 - int refcount;
27972 + atomic_t refcount;
27973 };
27974
27975 static struct drm_global_item glob[DRM_GLOBAL_NUM];
27976 @@ -49,7 +49,7 @@ void drm_global_init(void)
27977 struct drm_global_item *item = &glob[i];
27978 mutex_init(&item->mutex);
27979 item->object = NULL;
27980 - item->refcount = 0;
27981 + atomic_set(&item->refcount, 0);
27982 }
27983 }
27984
27985 @@ -59,7 +59,7 @@ void drm_global_release(void)
27986 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
27987 struct drm_global_item *item = &glob[i];
27988 BUG_ON(item->object != NULL);
27989 - BUG_ON(item->refcount != 0);
27990 + BUG_ON(atomic_read(&item->refcount) != 0);
27991 }
27992 }
27993
27994 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
27995 void *object;
27996
27997 mutex_lock(&item->mutex);
27998 - if (item->refcount == 0) {
27999 + if (atomic_read(&item->refcount) == 0) {
28000 item->object = kzalloc(ref->size, GFP_KERNEL);
28001 if (unlikely(item->object == NULL)) {
28002 ret = -ENOMEM;
28003 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
28004 goto out_err;
28005
28006 }
28007 - ++item->refcount;
28008 + atomic_inc(&item->refcount);
28009 ref->object = item->object;
28010 object = item->object;
28011 mutex_unlock(&item->mutex);
28012 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
28013 struct drm_global_item *item = &glob[ref->global_type];
28014
28015 mutex_lock(&item->mutex);
28016 - BUG_ON(item->refcount == 0);
28017 + BUG_ON(atomic_read(&item->refcount) == 0);
28018 BUG_ON(ref->object != item->object);
28019 - if (--item->refcount == 0) {
28020 + if (atomic_dec_and_test(&item->refcount)) {
28021 ref->release(ref);
28022 item->object = NULL;
28023 }
28024 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
28025 index ab1162d..42587b2 100644
28026 --- a/drivers/gpu/drm/drm_info.c
28027 +++ b/drivers/gpu/drm/drm_info.c
28028 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
28029 struct drm_local_map *map;
28030 struct drm_map_list *r_list;
28031
28032 - /* Hardcoded from _DRM_FRAME_BUFFER,
28033 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28034 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28035 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28036 + static const char * const types[] = {
28037 + [_DRM_FRAME_BUFFER] = "FB",
28038 + [_DRM_REGISTERS] = "REG",
28039 + [_DRM_SHM] = "SHM",
28040 + [_DRM_AGP] = "AGP",
28041 + [_DRM_SCATTER_GATHER] = "SG",
28042 + [_DRM_CONSISTENT] = "PCI",
28043 + [_DRM_GEM] = "GEM" };
28044 const char *type;
28045 int i;
28046
28047 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
28048 map = r_list->map;
28049 if (!map)
28050 continue;
28051 - if (map->type < 0 || map->type > 5)
28052 + if (map->type >= ARRAY_SIZE(types))
28053 type = "??";
28054 else
28055 type = types[map->type];
28056 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
28057 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
28058 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28059 vma->vm_flags & VM_IO ? 'i' : '-',
28060 +#ifdef CONFIG_GRKERNSEC_HIDESYM
28061 + 0);
28062 +#else
28063 vma->vm_pgoff);
28064 +#endif
28065
28066 #if defined(__i386__)
28067 pgprot = pgprot_val(vma->vm_page_prot);
28068 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
28069 index 4a058c7..b42cd92 100644
28070 --- a/drivers/gpu/drm/drm_ioc32.c
28071 +++ b/drivers/gpu/drm/drm_ioc32.c
28072 @@ -455,7 +455,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
28073 request = compat_alloc_user_space(nbytes);
28074 if (!access_ok(VERIFY_WRITE, request, nbytes))
28075 return -EFAULT;
28076 - list = (struct drm_buf_desc *) (request + 1);
28077 + list = (struct drm_buf_desc __user *) (request + 1);
28078
28079 if (__put_user(count, &request->count)
28080 || __put_user(list, &request->list))
28081 @@ -516,7 +516,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
28082 request = compat_alloc_user_space(nbytes);
28083 if (!access_ok(VERIFY_WRITE, request, nbytes))
28084 return -EFAULT;
28085 - list = (struct drm_buf_pub *) (request + 1);
28086 + list = (struct drm_buf_pub __user *) (request + 1);
28087
28088 if (__put_user(count, &request->count)
28089 || __put_user(list, &request->list))
28090 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
28091 index 904d7e9..ab88581 100644
28092 --- a/drivers/gpu/drm/drm_ioctl.c
28093 +++ b/drivers/gpu/drm/drm_ioctl.c
28094 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
28095 stats->data[i].value =
28096 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28097 else
28098 - stats->data[i].value = atomic_read(&dev->counts[i]);
28099 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28100 stats->data[i].type = dev->types[i];
28101 }
28102
28103 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
28104 index 632ae24..244cf4a 100644
28105 --- a/drivers/gpu/drm/drm_lock.c
28106 +++ b/drivers/gpu/drm/drm_lock.c
28107 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28108 if (drm_lock_take(&master->lock, lock->context)) {
28109 master->lock.file_priv = file_priv;
28110 master->lock.lock_time = jiffies;
28111 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28112 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28113 break; /* Got lock */
28114 }
28115
28116 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
28117 return -EINVAL;
28118 }
28119
28120 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28121 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28122
28123 if (drm_lock_free(&master->lock, lock->context)) {
28124 /* FIXME: Should really bail out here. */
28125 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
28126 index 8f371e8..9f85d52 100644
28127 --- a/drivers/gpu/drm/i810/i810_dma.c
28128 +++ b/drivers/gpu/drm/i810/i810_dma.c
28129 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
28130 dma->buflist[vertex->idx],
28131 vertex->discard, vertex->used);
28132
28133 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28134 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28135 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28136 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28137 sarea_priv->last_enqueue = dev_priv->counter - 1;
28138 sarea_priv->last_dispatch = (int)hw_status[5];
28139
28140 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
28141 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28142 mc->last_render);
28143
28144 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28145 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28146 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28147 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28148 sarea_priv->last_enqueue = dev_priv->counter - 1;
28149 sarea_priv->last_dispatch = (int)hw_status[5];
28150
28151 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
28152 index c9339f4..f5e1b9d 100644
28153 --- a/drivers/gpu/drm/i810/i810_drv.h
28154 +++ b/drivers/gpu/drm/i810/i810_drv.h
28155 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28156 int page_flipping;
28157
28158 wait_queue_head_t irq_queue;
28159 - atomic_t irq_received;
28160 - atomic_t irq_emitted;
28161 + atomic_unchecked_t irq_received;
28162 + atomic_unchecked_t irq_emitted;
28163
28164 int front_offset;
28165 } drm_i810_private_t;
28166 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
28167 index 3c395a5..02889c2 100644
28168 --- a/drivers/gpu/drm/i915/i915_debugfs.c
28169 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
28170 @@ -497,7 +497,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
28171 I915_READ(GTIMR));
28172 }
28173 seq_printf(m, "Interrupts received: %d\n",
28174 - atomic_read(&dev_priv->irq_received));
28175 + atomic_read_unchecked(&dev_priv->irq_received));
28176 for (i = 0; i < I915_NUM_RINGS; i++) {
28177 if (IS_GEN6(dev) || IS_GEN7(dev)) {
28178 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
28179 @@ -1185,7 +1185,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
28180 return ret;
28181
28182 if (opregion->header)
28183 - seq_write(m, opregion->header, OPREGION_SIZE);
28184 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
28185
28186 mutex_unlock(&dev->struct_mutex);
28187
28188 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
28189 index 8a3942c..1b73bf1 100644
28190 --- a/drivers/gpu/drm/i915/i915_dma.c
28191 +++ b/drivers/gpu/drm/i915/i915_dma.c
28192 @@ -1171,7 +1171,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
28193 bool can_switch;
28194
28195 spin_lock(&dev->count_lock);
28196 - can_switch = (dev->open_count == 0);
28197 + can_switch = (local_read(&dev->open_count) == 0);
28198 spin_unlock(&dev->count_lock);
28199 return can_switch;
28200 }
28201 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
28202 index 7916bd9..7c17a0f 100644
28203 --- a/drivers/gpu/drm/i915/i915_drv.h
28204 +++ b/drivers/gpu/drm/i915/i915_drv.h
28205 @@ -222,7 +222,7 @@ struct drm_i915_display_funcs {
28206 /* render clock increase/decrease */
28207 /* display clock increase/decrease */
28208 /* pll clock increase/decrease */
28209 -};
28210 +} __no_const;
28211
28212 struct intel_device_info {
28213 u8 gen;
28214 @@ -305,7 +305,7 @@ typedef struct drm_i915_private {
28215 int current_page;
28216 int page_flipping;
28217
28218 - atomic_t irq_received;
28219 + atomic_unchecked_t irq_received;
28220
28221 /* protects the irq masks */
28222 spinlock_t irq_lock;
28223 @@ -882,7 +882,7 @@ struct drm_i915_gem_object {
28224 * will be page flipped away on the next vblank. When it
28225 * reaches 0, dev_priv->pending_flip_queue will be woken up.
28226 */
28227 - atomic_t pending_flip;
28228 + atomic_unchecked_t pending_flip;
28229 };
28230
28231 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
28232 @@ -1262,7 +1262,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
28233 extern void intel_teardown_gmbus(struct drm_device *dev);
28234 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
28235 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
28236 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28237 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28238 {
28239 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
28240 }
28241 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28242 index 4934cf8..1da9c84 100644
28243 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28244 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28245 @@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
28246 i915_gem_clflush_object(obj);
28247
28248 if (obj->base.pending_write_domain)
28249 - cd->flips |= atomic_read(&obj->pending_flip);
28250 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
28251
28252 /* The actual obj->write_domain will be updated with
28253 * pending_write_domain after we emit the accumulated flush for all
28254 @@ -864,9 +864,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
28255
28256 static int
28257 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
28258 - int count)
28259 + unsigned int count)
28260 {
28261 - int i;
28262 + unsigned int i;
28263
28264 for (i = 0; i < count; i++) {
28265 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
28266 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
28267 index 9cbb0cd..958a31f 100644
28268 --- a/drivers/gpu/drm/i915/i915_irq.c
28269 +++ b/drivers/gpu/drm/i915/i915_irq.c
28270 @@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
28271 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
28272 struct drm_i915_master_private *master_priv;
28273
28274 - atomic_inc(&dev_priv->irq_received);
28275 + atomic_inc_unchecked(&dev_priv->irq_received);
28276
28277 /* disable master interrupt before clearing iir */
28278 de_ier = I915_READ(DEIER);
28279 @@ -565,7 +565,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
28280 struct drm_i915_master_private *master_priv;
28281 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
28282
28283 - atomic_inc(&dev_priv->irq_received);
28284 + atomic_inc_unchecked(&dev_priv->irq_received);
28285
28286 if (IS_GEN6(dev))
28287 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
28288 @@ -1228,7 +1228,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
28289 int ret = IRQ_NONE, pipe;
28290 bool blc_event = false;
28291
28292 - atomic_inc(&dev_priv->irq_received);
28293 + atomic_inc_unchecked(&dev_priv->irq_received);
28294
28295 iir = I915_READ(IIR);
28296
28297 @@ -1740,7 +1740,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
28298 {
28299 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28300
28301 - atomic_set(&dev_priv->irq_received, 0);
28302 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28303
28304 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28305 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28306 @@ -1904,7 +1904,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
28307 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28308 int pipe;
28309
28310 - atomic_set(&dev_priv->irq_received, 0);
28311 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28312
28313 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28314 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28315 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
28316 index e1340a2..24f40c3 100644
28317 --- a/drivers/gpu/drm/i915/intel_display.c
28318 +++ b/drivers/gpu/drm/i915/intel_display.c
28319 @@ -2205,7 +2205,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
28320
28321 wait_event(dev_priv->pending_flip_queue,
28322 atomic_read(&dev_priv->mm.wedged) ||
28323 - atomic_read(&obj->pending_flip) == 0);
28324 + atomic_read_unchecked(&obj->pending_flip) == 0);
28325
28326 /* Big Hammer, we also need to ensure that any pending
28327 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
28328 @@ -2826,7 +2826,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
28329 obj = to_intel_framebuffer(crtc->fb)->obj;
28330 dev_priv = crtc->dev->dev_private;
28331 wait_event(dev_priv->pending_flip_queue,
28332 - atomic_read(&obj->pending_flip) == 0);
28333 + atomic_read_unchecked(&obj->pending_flip) == 0);
28334 }
28335
28336 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
28337 @@ -6676,7 +6676,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
28338
28339 atomic_clear_mask(1 << intel_crtc->plane,
28340 &obj->pending_flip.counter);
28341 - if (atomic_read(&obj->pending_flip) == 0)
28342 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
28343 wake_up(&dev_priv->pending_flip_queue);
28344
28345 schedule_work(&work->work);
28346 @@ -6965,7 +6965,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28347 /* Block clients from rendering to the new back buffer until
28348 * the flip occurs and the object is no longer visible.
28349 */
28350 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28351 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28352
28353 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
28354 if (ret)
28355 @@ -6979,7 +6979,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28356 return 0;
28357
28358 cleanup_pending:
28359 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28360 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28361 cleanup_objs:
28362 drm_gem_object_unreference(&work->old_fb_obj->base);
28363 drm_gem_object_unreference(&obj->base);
28364 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
28365 index 54558a0..2d97005 100644
28366 --- a/drivers/gpu/drm/mga/mga_drv.h
28367 +++ b/drivers/gpu/drm/mga/mga_drv.h
28368 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28369 u32 clear_cmd;
28370 u32 maccess;
28371
28372 - atomic_t vbl_received; /**< Number of vblanks received. */
28373 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28374 wait_queue_head_t fence_queue;
28375 - atomic_t last_fence_retired;
28376 + atomic_unchecked_t last_fence_retired;
28377 u32 next_fence_to_post;
28378
28379 unsigned int fb_cpp;
28380 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
28381 index 2581202..f230a8d9 100644
28382 --- a/drivers/gpu/drm/mga/mga_irq.c
28383 +++ b/drivers/gpu/drm/mga/mga_irq.c
28384 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
28385 if (crtc != 0)
28386 return 0;
28387
28388 - return atomic_read(&dev_priv->vbl_received);
28389 + return atomic_read_unchecked(&dev_priv->vbl_received);
28390 }
28391
28392
28393 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28394 /* VBLANK interrupt */
28395 if (status & MGA_VLINEPEN) {
28396 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28397 - atomic_inc(&dev_priv->vbl_received);
28398 + atomic_inc_unchecked(&dev_priv->vbl_received);
28399 drm_handle_vblank(dev, 0);
28400 handled = 1;
28401 }
28402 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28403 if ((prim_start & ~0x03) != (prim_end & ~0x03))
28404 MGA_WRITE(MGA_PRIMEND, prim_end);
28405
28406 - atomic_inc(&dev_priv->last_fence_retired);
28407 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
28408 DRM_WAKEUP(&dev_priv->fence_queue);
28409 handled = 1;
28410 }
28411 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
28412 * using fences.
28413 */
28414 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28415 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28416 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28417 - *sequence) <= (1 << 23)));
28418
28419 *sequence = cur_fence;
28420 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
28421 index b311fab..dc11d6a 100644
28422 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
28423 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
28424 @@ -201,7 +201,7 @@ struct methods {
28425 const char desc[8];
28426 void (*loadbios)(struct drm_device *, uint8_t *);
28427 const bool rw;
28428 -};
28429 +} __do_const;
28430
28431 static struct methods shadow_methods[] = {
28432 { "PRAMIN", load_vbios_pramin, true },
28433 @@ -5489,7 +5489,7 @@ parse_bit_displayport_tbl_entry(struct drm_device *dev, struct nvbios *bios,
28434 struct bit_table {
28435 const char id;
28436 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
28437 -};
28438 +} __no_const;
28439
28440 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
28441
28442 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
28443 index d7d51de..7c6a7f1 100644
28444 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
28445 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
28446 @@ -238,7 +238,7 @@ struct nouveau_channel {
28447 struct list_head pending;
28448 uint32_t sequence;
28449 uint32_t sequence_ack;
28450 - atomic_t last_sequence_irq;
28451 + atomic_unchecked_t last_sequence_irq;
28452 struct nouveau_vma vma;
28453 } fence;
28454
28455 @@ -319,7 +319,7 @@ struct nouveau_exec_engine {
28456 u32 handle, u16 class);
28457 void (*set_tile_region)(struct drm_device *dev, int i);
28458 void (*tlb_flush)(struct drm_device *, int engine);
28459 -};
28460 +} __no_const;
28461
28462 struct nouveau_instmem_engine {
28463 void *priv;
28464 @@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
28465 struct nouveau_mc_engine {
28466 int (*init)(struct drm_device *dev);
28467 void (*takedown)(struct drm_device *dev);
28468 -};
28469 +} __no_const;
28470
28471 struct nouveau_timer_engine {
28472 int (*init)(struct drm_device *dev);
28473 void (*takedown)(struct drm_device *dev);
28474 uint64_t (*read)(struct drm_device *dev);
28475 -};
28476 +} __no_const;
28477
28478 struct nouveau_fb_engine {
28479 int num_tiles;
28480 @@ -513,7 +513,7 @@ struct nouveau_vram_engine {
28481 void (*put)(struct drm_device *, struct nouveau_mem **);
28482
28483 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
28484 -};
28485 +} __no_const;
28486
28487 struct nouveau_engine {
28488 struct nouveau_instmem_engine instmem;
28489 @@ -660,7 +660,7 @@ struct drm_nouveau_private {
28490 struct drm_global_reference mem_global_ref;
28491 struct ttm_bo_global_ref bo_global_ref;
28492 struct ttm_bo_device bdev;
28493 - atomic_t validate_sequence;
28494 + atomic_unchecked_t validate_sequence;
28495 } ttm;
28496
28497 struct {
28498 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
28499 index ae22dfa..4f09960 100644
28500 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
28501 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
28502 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
28503 if (USE_REFCNT(dev))
28504 sequence = nvchan_rd32(chan, 0x48);
28505 else
28506 - sequence = atomic_read(&chan->fence.last_sequence_irq);
28507 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
28508
28509 if (chan->fence.sequence_ack == sequence)
28510 goto out;
28511 @@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
28512 return ret;
28513 }
28514
28515 - atomic_set(&chan->fence.last_sequence_irq, 0);
28516 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
28517 return 0;
28518 }
28519
28520 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
28521 index 5f0bc57..eb9fac8 100644
28522 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
28523 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
28524 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
28525 int trycnt = 0;
28526 int ret, i;
28527
28528 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
28529 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
28530 retry:
28531 if (++trycnt > 100000) {
28532 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
28533 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
28534 index 10656e4..59bf2a4 100644
28535 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
28536 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
28537 @@ -496,7 +496,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
28538 bool can_switch;
28539
28540 spin_lock(&dev->count_lock);
28541 - can_switch = (dev->open_count == 0);
28542 + can_switch = (local_read(&dev->open_count) == 0);
28543 spin_unlock(&dev->count_lock);
28544 return can_switch;
28545 }
28546 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
28547 index dbdea8e..cd6eeeb 100644
28548 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
28549 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
28550 @@ -554,7 +554,7 @@ static int
28551 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
28552 u32 class, u32 mthd, u32 data)
28553 {
28554 - atomic_set(&chan->fence.last_sequence_irq, data);
28555 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
28556 return 0;
28557 }
28558
28559 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
28560 index 570e190..084a31a 100644
28561 --- a/drivers/gpu/drm/r128/r128_cce.c
28562 +++ b/drivers/gpu/drm/r128/r128_cce.c
28563 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
28564
28565 /* GH: Simple idle check.
28566 */
28567 - atomic_set(&dev_priv->idle_count, 0);
28568 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28569
28570 /* We don't support anything other than bus-mastering ring mode,
28571 * but the ring can be in either AGP or PCI space for the ring
28572 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
28573 index 930c71b..499aded 100644
28574 --- a/drivers/gpu/drm/r128/r128_drv.h
28575 +++ b/drivers/gpu/drm/r128/r128_drv.h
28576 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28577 int is_pci;
28578 unsigned long cce_buffers_offset;
28579
28580 - atomic_t idle_count;
28581 + atomic_unchecked_t idle_count;
28582
28583 int page_flipping;
28584 int current_page;
28585 u32 crtc_offset;
28586 u32 crtc_offset_cntl;
28587
28588 - atomic_t vbl_received;
28589 + atomic_unchecked_t vbl_received;
28590
28591 u32 color_fmt;
28592 unsigned int front_offset;
28593 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
28594 index 429d5a0..7e899ed 100644
28595 --- a/drivers/gpu/drm/r128/r128_irq.c
28596 +++ b/drivers/gpu/drm/r128/r128_irq.c
28597 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
28598 if (crtc != 0)
28599 return 0;
28600
28601 - return atomic_read(&dev_priv->vbl_received);
28602 + return atomic_read_unchecked(&dev_priv->vbl_received);
28603 }
28604
28605 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28606 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28607 /* VBLANK interrupt */
28608 if (status & R128_CRTC_VBLANK_INT) {
28609 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28610 - atomic_inc(&dev_priv->vbl_received);
28611 + atomic_inc_unchecked(&dev_priv->vbl_received);
28612 drm_handle_vblank(dev, 0);
28613 return IRQ_HANDLED;
28614 }
28615 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
28616 index a9e33ce..09edd4b 100644
28617 --- a/drivers/gpu/drm/r128/r128_state.c
28618 +++ b/drivers/gpu/drm/r128/r128_state.c
28619 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
28620
28621 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
28622 {
28623 - if (atomic_read(&dev_priv->idle_count) == 0)
28624 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
28625 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28626 else
28627 - atomic_set(&dev_priv->idle_count, 0);
28628 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28629 }
28630
28631 #endif
28632 diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
28633 index 14cc88a..cc7b3a5 100644
28634 --- a/drivers/gpu/drm/radeon/atom.c
28635 +++ b/drivers/gpu/drm/radeon/atom.c
28636 @@ -1254,6 +1254,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
28637 char name[512];
28638 int i;
28639
28640 + pax_track_stack();
28641 +
28642 if (!ctx)
28643 return NULL;
28644
28645 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
28646 index 5a82b6b..9e69c73 100644
28647 --- a/drivers/gpu/drm/radeon/mkregtable.c
28648 +++ b/drivers/gpu/drm/radeon/mkregtable.c
28649 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
28650 regex_t mask_rex;
28651 regmatch_t match[4];
28652 char buf[1024];
28653 - size_t end;
28654 + long end;
28655 int len;
28656 int done = 0;
28657 int r;
28658 unsigned o;
28659 struct offset *offset;
28660 char last_reg_s[10];
28661 - int last_reg;
28662 + unsigned long last_reg;
28663
28664 if (regcomp
28665 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28666 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
28667 index 184628c..30e1725 100644
28668 --- a/drivers/gpu/drm/radeon/radeon.h
28669 +++ b/drivers/gpu/drm/radeon/radeon.h
28670 @@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
28671 */
28672 struct radeon_fence_driver {
28673 uint32_t scratch_reg;
28674 - atomic_t seq;
28675 + atomic_unchecked_t seq;
28676 uint32_t last_seq;
28677 unsigned long last_jiffies;
28678 unsigned long last_timeout;
28679 @@ -962,7 +962,7 @@ struct radeon_asic {
28680 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
28681 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
28682 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
28683 -};
28684 +} __no_const;
28685
28686 /*
28687 * Asic structures
28688 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
28689 index bf2b615..c821ec8 100644
28690 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
28691 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
28692 @@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
28693 struct radeon_gpio_rec gpio;
28694 struct radeon_hpd hpd;
28695
28696 + pax_track_stack();
28697 +
28698 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
28699 return false;
28700
28701 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
28702 index b51e157..8f14fb9 100644
28703 --- a/drivers/gpu/drm/radeon/radeon_device.c
28704 +++ b/drivers/gpu/drm/radeon/radeon_device.c
28705 @@ -684,7 +684,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
28706 bool can_switch;
28707
28708 spin_lock(&dev->count_lock);
28709 - can_switch = (dev->open_count == 0);
28710 + can_switch = (local_read(&dev->open_count) == 0);
28711 spin_unlock(&dev->count_lock);
28712 return can_switch;
28713 }
28714 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
28715 index 6adb3e5..b91553e2 100644
28716 --- a/drivers/gpu/drm/radeon/radeon_display.c
28717 +++ b/drivers/gpu/drm/radeon/radeon_display.c
28718 @@ -925,6 +925,8 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
28719 uint32_t post_div;
28720 u32 pll_out_min, pll_out_max;
28721
28722 + pax_track_stack();
28723 +
28724 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
28725 freq = freq * 1000;
28726
28727 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
28728 index a1b59ca..86f2d44 100644
28729 --- a/drivers/gpu/drm/radeon/radeon_drv.h
28730 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
28731 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
28732
28733 /* SW interrupt */
28734 wait_queue_head_t swi_queue;
28735 - atomic_t swi_emitted;
28736 + atomic_unchecked_t swi_emitted;
28737 int vblank_crtc;
28738 uint32_t irq_enable_reg;
28739 uint32_t r500_disp_irq_reg;
28740 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
28741 index 7fd4e3e..9748ab5 100644
28742 --- a/drivers/gpu/drm/radeon/radeon_fence.c
28743 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
28744 @@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
28745 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28746 return 0;
28747 }
28748 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28749 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28750 if (!rdev->cp.ready)
28751 /* FIXME: cp is not running assume everythings is done right
28752 * away
28753 @@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
28754 return r;
28755 }
28756 radeon_fence_write(rdev, 0);
28757 - atomic_set(&rdev->fence_drv.seq, 0);
28758 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28759 INIT_LIST_HEAD(&rdev->fence_drv.created);
28760 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28761 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28762 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
28763 index 48b7cea..342236f 100644
28764 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
28765 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
28766 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
28767 request = compat_alloc_user_space(sizeof(*request));
28768 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28769 || __put_user(req32.param, &request->param)
28770 - || __put_user((void __user *)(unsigned long)req32.value,
28771 + || __put_user((unsigned long)req32.value,
28772 &request->value))
28773 return -EFAULT;
28774
28775 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
28776 index 465746b..cb2b055 100644
28777 --- a/drivers/gpu/drm/radeon/radeon_irq.c
28778 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
28779 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
28780 unsigned int ret;
28781 RING_LOCALS;
28782
28783 - atomic_inc(&dev_priv->swi_emitted);
28784 - ret = atomic_read(&dev_priv->swi_emitted);
28785 + atomic_inc_unchecked(&dev_priv->swi_emitted);
28786 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28787
28788 BEGIN_RING(4);
28789 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28790 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
28791 drm_radeon_private_t *dev_priv =
28792 (drm_radeon_private_t *) dev->dev_private;
28793
28794 - atomic_set(&dev_priv->swi_emitted, 0);
28795 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28796 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28797
28798 dev->max_vblank_count = 0x001fffff;
28799 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
28800 index 92e7ea7..147ffad 100644
28801 --- a/drivers/gpu/drm/radeon/radeon_state.c
28802 +++ b/drivers/gpu/drm/radeon/radeon_state.c
28803 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
28804 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
28805 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
28806
28807 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28808 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28809 sarea_priv->nbox * sizeof(depth_boxes[0])))
28810 return -EFAULT;
28811
28812 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
28813 {
28814 drm_radeon_private_t *dev_priv = dev->dev_private;
28815 drm_radeon_getparam_t *param = data;
28816 - int value;
28817 + int value = 0;
28818
28819 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28820
28821 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
28822 index 0b5468b..9c4b308 100644
28823 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
28824 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
28825 @@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
28826 }
28827 if (unlikely(ttm_vm_ops == NULL)) {
28828 ttm_vm_ops = vma->vm_ops;
28829 - radeon_ttm_vm_ops = *ttm_vm_ops;
28830 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28831 + pax_open_kernel();
28832 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
28833 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28834 + pax_close_kernel();
28835 }
28836 vma->vm_ops = &radeon_ttm_vm_ops;
28837 return 0;
28838 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
28839 index a9049ed..501f284 100644
28840 --- a/drivers/gpu/drm/radeon/rs690.c
28841 +++ b/drivers/gpu/drm/radeon/rs690.c
28842 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
28843 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
28844 rdev->pm.sideport_bandwidth.full)
28845 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
28846 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
28847 + read_delay_latency.full = dfixed_const(800 * 1000);
28848 read_delay_latency.full = dfixed_div(read_delay_latency,
28849 rdev->pm.igp_sideport_mclk);
28850 + a.full = dfixed_const(370);
28851 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
28852 } else {
28853 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
28854 rdev->pm.k8_bandwidth.full)
28855 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28856 index 727e93d..1565650 100644
28857 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
28858 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28859 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
28860 static int ttm_pool_mm_shrink(struct shrinker *shrink,
28861 struct shrink_control *sc)
28862 {
28863 - static atomic_t start_pool = ATOMIC_INIT(0);
28864 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
28865 unsigned i;
28866 - unsigned pool_offset = atomic_add_return(1, &start_pool);
28867 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
28868 struct ttm_page_pool *pool;
28869 int shrink_pages = sc->nr_to_scan;
28870
28871 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
28872 index 9cf87d9..2000b7d 100644
28873 --- a/drivers/gpu/drm/via/via_drv.h
28874 +++ b/drivers/gpu/drm/via/via_drv.h
28875 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
28876 typedef uint32_t maskarray_t[5];
28877
28878 typedef struct drm_via_irq {
28879 - atomic_t irq_received;
28880 + atomic_unchecked_t irq_received;
28881 uint32_t pending_mask;
28882 uint32_t enable_mask;
28883 wait_queue_head_t irq_queue;
28884 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
28885 struct timeval last_vblank;
28886 int last_vblank_valid;
28887 unsigned usec_per_vblank;
28888 - atomic_t vbl_received;
28889 + atomic_unchecked_t vbl_received;
28890 drm_via_state_t hc_state;
28891 char pci_buf[VIA_PCI_BUF_SIZE];
28892 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
28893 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
28894 index d391f48..10c8ca3 100644
28895 --- a/drivers/gpu/drm/via/via_irq.c
28896 +++ b/drivers/gpu/drm/via/via_irq.c
28897 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
28898 if (crtc != 0)
28899 return 0;
28900
28901 - return atomic_read(&dev_priv->vbl_received);
28902 + return atomic_read_unchecked(&dev_priv->vbl_received);
28903 }
28904
28905 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28906 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28907
28908 status = VIA_READ(VIA_REG_INTERRUPT);
28909 if (status & VIA_IRQ_VBLANK_PENDING) {
28910 - atomic_inc(&dev_priv->vbl_received);
28911 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
28912 + atomic_inc_unchecked(&dev_priv->vbl_received);
28913 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
28914 do_gettimeofday(&cur_vblank);
28915 if (dev_priv->last_vblank_valid) {
28916 dev_priv->usec_per_vblank =
28917 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28918 dev_priv->last_vblank = cur_vblank;
28919 dev_priv->last_vblank_valid = 1;
28920 }
28921 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
28922 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
28923 DRM_DEBUG("US per vblank is: %u\n",
28924 dev_priv->usec_per_vblank);
28925 }
28926 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28927
28928 for (i = 0; i < dev_priv->num_irqs; ++i) {
28929 if (status & cur_irq->pending_mask) {
28930 - atomic_inc(&cur_irq->irq_received);
28931 + atomic_inc_unchecked(&cur_irq->irq_received);
28932 DRM_WAKEUP(&cur_irq->irq_queue);
28933 handled = 1;
28934 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
28935 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
28936 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28937 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
28938 masks[irq][4]));
28939 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
28940 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
28941 } else {
28942 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28943 (((cur_irq_sequence =
28944 - atomic_read(&cur_irq->irq_received)) -
28945 + atomic_read_unchecked(&cur_irq->irq_received)) -
28946 *sequence) <= (1 << 23)));
28947 }
28948 *sequence = cur_irq_sequence;
28949 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
28950 }
28951
28952 for (i = 0; i < dev_priv->num_irqs; ++i) {
28953 - atomic_set(&cur_irq->irq_received, 0);
28954 + atomic_set_unchecked(&cur_irq->irq_received, 0);
28955 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
28956 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
28957 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
28958 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
28959 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
28960 case VIA_IRQ_RELATIVE:
28961 irqwait->request.sequence +=
28962 - atomic_read(&cur_irq->irq_received);
28963 + atomic_read_unchecked(&cur_irq->irq_received);
28964 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
28965 case VIA_IRQ_ABSOLUTE:
28966 break;
28967 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28968 index 10fc01f..b4e9822 100644
28969 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28970 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28971 @@ -240,7 +240,7 @@ struct vmw_private {
28972 * Fencing and IRQs.
28973 */
28974
28975 - atomic_t fence_seq;
28976 + atomic_unchecked_t fence_seq;
28977 wait_queue_head_t fence_queue;
28978 wait_queue_head_t fifo_queue;
28979 atomic_t fence_queue_waiters;
28980 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
28981 index 41b95ed..69ea504 100644
28982 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
28983 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
28984 @@ -610,7 +610,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
28985 struct drm_vmw_fence_rep fence_rep;
28986 struct drm_vmw_fence_rep __user *user_fence_rep;
28987 int ret;
28988 - void *user_cmd;
28989 + void __user *user_cmd;
28990 void *cmd;
28991 uint32_t sequence;
28992 struct vmw_sw_context *sw_context = &dev_priv->ctx;
28993 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
28994 index 61eacc1..ee38ce8 100644
28995 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
28996 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
28997 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
28998 while (!vmw_lag_lt(queue, us)) {
28999 spin_lock(&queue->lock);
29000 if (list_empty(&queue->head))
29001 - sequence = atomic_read(&dev_priv->fence_seq);
29002 + sequence = atomic_read_unchecked(&dev_priv->fence_seq);
29003 else {
29004 fence = list_first_entry(&queue->head,
29005 struct vmw_fence, head);
29006 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29007 index 635c0ff..2641bbb 100644
29008 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29009 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
29010 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
29011 (unsigned int) min,
29012 (unsigned int) fifo->capabilities);
29013
29014 - atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
29015 + atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
29016 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
29017 vmw_fence_queue_init(&fifo->fence_queue);
29018 return vmw_fifo_send_fence(dev_priv, &dummy);
29019 @@ -356,7 +356,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
29020 if (reserveable)
29021 iowrite32(bytes, fifo_mem +
29022 SVGA_FIFO_RESERVED);
29023 - return fifo_mem + (next_cmd >> 2);
29024 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
29025 } else {
29026 need_bounce = true;
29027 }
29028 @@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
29029
29030 fm = vmw_fifo_reserve(dev_priv, bytes);
29031 if (unlikely(fm == NULL)) {
29032 - *sequence = atomic_read(&dev_priv->fence_seq);
29033 + *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
29034 ret = -ENOMEM;
29035 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
29036 false, 3*HZ);
29037 @@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
29038 }
29039
29040 do {
29041 - *sequence = atomic_add_return(1, &dev_priv->fence_seq);
29042 + *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
29043 } while (*sequence == 0);
29044
29045 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
29046 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29047 index e92298a..f68f2d6 100644
29048 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29049 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
29050 @@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
29051 * emitted. Then the fence is stale and signaled.
29052 */
29053
29054 - ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
29055 + ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
29056 > VMW_FENCE_WRAP);
29057
29058 return ret;
29059 @@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
29060
29061 if (fifo_idle)
29062 down_read(&fifo_state->rwsem);
29063 - signal_seq = atomic_read(&dev_priv->fence_seq);
29064 + signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
29065 ret = 0;
29066
29067 for (;;) {
29068 diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
29069 index c72f1c0..18376f1 100644
29070 --- a/drivers/gpu/vga/vgaarb.c
29071 +++ b/drivers/gpu/vga/vgaarb.c
29072 @@ -993,14 +993,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
29073 uc = &priv->cards[i];
29074 }
29075
29076 - if (!uc)
29077 - return -EINVAL;
29078 + if (!uc) {
29079 + ret_val = -EINVAL;
29080 + goto done;
29081 + }
29082
29083 - if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
29084 - return -EINVAL;
29085 + if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
29086 + ret_val = -EINVAL;
29087 + goto done;
29088 + }
29089
29090 - if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
29091 - return -EINVAL;
29092 + if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
29093 + ret_val = -EINVAL;
29094 + goto done;
29095 + }
29096
29097 vga_put(pdev, io_state);
29098
29099 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
29100 index 5be9f47..aa81d42 100644
29101 --- a/drivers/hid/hid-core.c
29102 +++ b/drivers/hid/hid-core.c
29103 @@ -1951,7 +1951,7 @@ static bool hid_ignore(struct hid_device *hdev)
29104
29105 int hid_add_device(struct hid_device *hdev)
29106 {
29107 - static atomic_t id = ATOMIC_INIT(0);
29108 + static atomic_unchecked_t id = ATOMIC_INIT(0);
29109 int ret;
29110
29111 if (WARN_ON(hdev->status & HID_STAT_ADDED))
29112 @@ -1966,7 +1966,7 @@ int hid_add_device(struct hid_device *hdev)
29113 /* XXX hack, any other cleaner solution after the driver core
29114 * is converted to allow more than 20 bytes as the device name? */
29115 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
29116 - hdev->vendor, hdev->product, atomic_inc_return(&id));
29117 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
29118
29119 hid_debug_register(hdev, dev_name(&hdev->dev));
29120 ret = device_add(&hdev->dev);
29121 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
29122 index 7c1188b..5a64357 100644
29123 --- a/drivers/hid/usbhid/hiddev.c
29124 +++ b/drivers/hid/usbhid/hiddev.c
29125 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
29126 break;
29127
29128 case HIDIOCAPPLICATION:
29129 - if (arg < 0 || arg >= hid->maxapplication)
29130 + if (arg >= hid->maxapplication)
29131 break;
29132
29133 for (i = 0; i < hid->maxcollection; i++)
29134 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
29135 index 66f6729..2d6de0a 100644
29136 --- a/drivers/hwmon/acpi_power_meter.c
29137 +++ b/drivers/hwmon/acpi_power_meter.c
29138 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
29139 return res;
29140
29141 temp /= 1000;
29142 - if (temp < 0)
29143 - return -EINVAL;
29144
29145 mutex_lock(&resource->lock);
29146 resource->trip[attr->index - 7] = temp;
29147 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
29148 index fe4104c..346febb 100644
29149 --- a/drivers/hwmon/sht15.c
29150 +++ b/drivers/hwmon/sht15.c
29151 @@ -166,7 +166,7 @@ struct sht15_data {
29152 int supply_uV;
29153 bool supply_uV_valid;
29154 struct work_struct update_supply_work;
29155 - atomic_t interrupt_handled;
29156 + atomic_unchecked_t interrupt_handled;
29157 };
29158
29159 /**
29160 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
29161 return ret;
29162
29163 gpio_direction_input(data->pdata->gpio_data);
29164 - atomic_set(&data->interrupt_handled, 0);
29165 + atomic_set_unchecked(&data->interrupt_handled, 0);
29166
29167 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29168 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29169 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29170 /* Only relevant if the interrupt hasn't occurred. */
29171 - if (!atomic_read(&data->interrupt_handled))
29172 + if (!atomic_read_unchecked(&data->interrupt_handled))
29173 schedule_work(&data->read_work);
29174 }
29175 ret = wait_event_timeout(data->wait_queue,
29176 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
29177
29178 /* First disable the interrupt */
29179 disable_irq_nosync(irq);
29180 - atomic_inc(&data->interrupt_handled);
29181 + atomic_inc_unchecked(&data->interrupt_handled);
29182 /* Then schedule a reading work struct */
29183 if (data->state != SHT15_READING_NOTHING)
29184 schedule_work(&data->read_work);
29185 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
29186 * If not, then start the interrupt again - care here as could
29187 * have gone low in meantime so verify it hasn't!
29188 */
29189 - atomic_set(&data->interrupt_handled, 0);
29190 + atomic_set_unchecked(&data->interrupt_handled, 0);
29191 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29192 /* If still not occurred or another handler has been scheduled */
29193 if (gpio_get_value(data->pdata->gpio_data)
29194 - || atomic_read(&data->interrupt_handled))
29195 + || atomic_read_unchecked(&data->interrupt_handled))
29196 return;
29197 }
29198
29199 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
29200 index 378fcb5..5e91fa8 100644
29201 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
29202 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
29203 @@ -43,7 +43,7 @@
29204 extern struct i2c_adapter amd756_smbus;
29205
29206 static struct i2c_adapter *s4882_adapter;
29207 -static struct i2c_algorithm *s4882_algo;
29208 +static i2c_algorithm_no_const *s4882_algo;
29209
29210 /* Wrapper access functions for multiplexed SMBus */
29211 static DEFINE_MUTEX(amd756_lock);
29212 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
29213 index 29015eb..af2d8e9 100644
29214 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
29215 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
29216 @@ -41,7 +41,7 @@
29217 extern struct i2c_adapter *nforce2_smbus;
29218
29219 static struct i2c_adapter *s4985_adapter;
29220 -static struct i2c_algorithm *s4985_algo;
29221 +static i2c_algorithm_no_const *s4985_algo;
29222
29223 /* Wrapper access functions for multiplexed SMBus */
29224 static DEFINE_MUTEX(nforce2_lock);
29225 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
29226 index d7a4833..7fae376 100644
29227 --- a/drivers/i2c/i2c-mux.c
29228 +++ b/drivers/i2c/i2c-mux.c
29229 @@ -28,7 +28,7 @@
29230 /* multiplexer per channel data */
29231 struct i2c_mux_priv {
29232 struct i2c_adapter adap;
29233 - struct i2c_algorithm algo;
29234 + i2c_algorithm_no_const algo;
29235
29236 struct i2c_adapter *parent;
29237 void *mux_dev; /* the mux chip/device */
29238 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
29239 index 57d00ca..0145194 100644
29240 --- a/drivers/ide/aec62xx.c
29241 +++ b/drivers/ide/aec62xx.c
29242 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
29243 .cable_detect = atp86x_cable_detect,
29244 };
29245
29246 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
29247 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
29248 { /* 0: AEC6210 */
29249 .name = DRV_NAME,
29250 .init_chipset = init_chipset_aec62xx,
29251 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
29252 index 2c8016a..911a27c 100644
29253 --- a/drivers/ide/alim15x3.c
29254 +++ b/drivers/ide/alim15x3.c
29255 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
29256 .dma_sff_read_status = ide_dma_sff_read_status,
29257 };
29258
29259 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
29260 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
29261 .name = DRV_NAME,
29262 .init_chipset = init_chipset_ali15x3,
29263 .init_hwif = init_hwif_ali15x3,
29264 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
29265 index 3747b25..56fc995 100644
29266 --- a/drivers/ide/amd74xx.c
29267 +++ b/drivers/ide/amd74xx.c
29268 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
29269 .udma_mask = udma, \
29270 }
29271
29272 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
29273 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
29274 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
29275 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
29276 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
29277 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
29278 index 15f0ead..cb43480 100644
29279 --- a/drivers/ide/atiixp.c
29280 +++ b/drivers/ide/atiixp.c
29281 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
29282 .cable_detect = atiixp_cable_detect,
29283 };
29284
29285 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
29286 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
29287 { /* 0: IXP200/300/400/700 */
29288 .name = DRV_NAME,
29289 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
29290 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
29291 index 5f80312..d1fc438 100644
29292 --- a/drivers/ide/cmd64x.c
29293 +++ b/drivers/ide/cmd64x.c
29294 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
29295 .dma_sff_read_status = ide_dma_sff_read_status,
29296 };
29297
29298 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
29299 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
29300 { /* 0: CMD643 */
29301 .name = DRV_NAME,
29302 .init_chipset = init_chipset_cmd64x,
29303 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
29304 index 2c1e5f7..1444762 100644
29305 --- a/drivers/ide/cs5520.c
29306 +++ b/drivers/ide/cs5520.c
29307 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
29308 .set_dma_mode = cs5520_set_dma_mode,
29309 };
29310
29311 -static const struct ide_port_info cyrix_chipset __devinitdata = {
29312 +static const struct ide_port_info cyrix_chipset __devinitconst = {
29313 .name = DRV_NAME,
29314 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
29315 .port_ops = &cs5520_port_ops,
29316 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
29317 index 4dc4eb9..49b40ad 100644
29318 --- a/drivers/ide/cs5530.c
29319 +++ b/drivers/ide/cs5530.c
29320 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
29321 .udma_filter = cs5530_udma_filter,
29322 };
29323
29324 -static const struct ide_port_info cs5530_chipset __devinitdata = {
29325 +static const struct ide_port_info cs5530_chipset __devinitconst = {
29326 .name = DRV_NAME,
29327 .init_chipset = init_chipset_cs5530,
29328 .init_hwif = init_hwif_cs5530,
29329 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
29330 index 5059faf..18d4c85 100644
29331 --- a/drivers/ide/cs5535.c
29332 +++ b/drivers/ide/cs5535.c
29333 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
29334 .cable_detect = cs5535_cable_detect,
29335 };
29336
29337 -static const struct ide_port_info cs5535_chipset __devinitdata = {
29338 +static const struct ide_port_info cs5535_chipset __devinitconst = {
29339 .name = DRV_NAME,
29340 .port_ops = &cs5535_port_ops,
29341 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
29342 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
29343 index 67cbcfa..37ea151 100644
29344 --- a/drivers/ide/cy82c693.c
29345 +++ b/drivers/ide/cy82c693.c
29346 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
29347 .set_dma_mode = cy82c693_set_dma_mode,
29348 };
29349
29350 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
29351 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
29352 .name = DRV_NAME,
29353 .init_iops = init_iops_cy82c693,
29354 .port_ops = &cy82c693_port_ops,
29355 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
29356 index 58c51cd..4aec3b8 100644
29357 --- a/drivers/ide/hpt366.c
29358 +++ b/drivers/ide/hpt366.c
29359 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
29360 }
29361 };
29362
29363 -static const struct hpt_info hpt36x __devinitdata = {
29364 +static const struct hpt_info hpt36x __devinitconst = {
29365 .chip_name = "HPT36x",
29366 .chip_type = HPT36x,
29367 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
29368 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
29369 .timings = &hpt36x_timings
29370 };
29371
29372 -static const struct hpt_info hpt370 __devinitdata = {
29373 +static const struct hpt_info hpt370 __devinitconst = {
29374 .chip_name = "HPT370",
29375 .chip_type = HPT370,
29376 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29377 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
29378 .timings = &hpt37x_timings
29379 };
29380
29381 -static const struct hpt_info hpt370a __devinitdata = {
29382 +static const struct hpt_info hpt370a __devinitconst = {
29383 .chip_name = "HPT370A",
29384 .chip_type = HPT370A,
29385 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29386 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
29387 .timings = &hpt37x_timings
29388 };
29389
29390 -static const struct hpt_info hpt374 __devinitdata = {
29391 +static const struct hpt_info hpt374 __devinitconst = {
29392 .chip_name = "HPT374",
29393 .chip_type = HPT374,
29394 .udma_mask = ATA_UDMA5,
29395 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
29396 .timings = &hpt37x_timings
29397 };
29398
29399 -static const struct hpt_info hpt372 __devinitdata = {
29400 +static const struct hpt_info hpt372 __devinitconst = {
29401 .chip_name = "HPT372",
29402 .chip_type = HPT372,
29403 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29404 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
29405 .timings = &hpt37x_timings
29406 };
29407
29408 -static const struct hpt_info hpt372a __devinitdata = {
29409 +static const struct hpt_info hpt372a __devinitconst = {
29410 .chip_name = "HPT372A",
29411 .chip_type = HPT372A,
29412 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29413 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
29414 .timings = &hpt37x_timings
29415 };
29416
29417 -static const struct hpt_info hpt302 __devinitdata = {
29418 +static const struct hpt_info hpt302 __devinitconst = {
29419 .chip_name = "HPT302",
29420 .chip_type = HPT302,
29421 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29422 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
29423 .timings = &hpt37x_timings
29424 };
29425
29426 -static const struct hpt_info hpt371 __devinitdata = {
29427 +static const struct hpt_info hpt371 __devinitconst = {
29428 .chip_name = "HPT371",
29429 .chip_type = HPT371,
29430 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29431 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
29432 .timings = &hpt37x_timings
29433 };
29434
29435 -static const struct hpt_info hpt372n __devinitdata = {
29436 +static const struct hpt_info hpt372n __devinitconst = {
29437 .chip_name = "HPT372N",
29438 .chip_type = HPT372N,
29439 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29440 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
29441 .timings = &hpt37x_timings
29442 };
29443
29444 -static const struct hpt_info hpt302n __devinitdata = {
29445 +static const struct hpt_info hpt302n __devinitconst = {
29446 .chip_name = "HPT302N",
29447 .chip_type = HPT302N,
29448 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29449 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
29450 .timings = &hpt37x_timings
29451 };
29452
29453 -static const struct hpt_info hpt371n __devinitdata = {
29454 +static const struct hpt_info hpt371n __devinitconst = {
29455 .chip_name = "HPT371N",
29456 .chip_type = HPT371N,
29457 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29458 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
29459 .dma_sff_read_status = ide_dma_sff_read_status,
29460 };
29461
29462 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
29463 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
29464 { /* 0: HPT36x */
29465 .name = DRV_NAME,
29466 .init_chipset = init_chipset_hpt366,
29467 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
29468 index 04b0956..f5b47dc 100644
29469 --- a/drivers/ide/ide-cd.c
29470 +++ b/drivers/ide/ide-cd.c
29471 @@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
29472 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29473 if ((unsigned long)buf & alignment
29474 || blk_rq_bytes(rq) & q->dma_pad_mask
29475 - || object_is_on_stack(buf))
29476 + || object_starts_on_stack(buf))
29477 drive->dma = 0;
29478 }
29479 }
29480 diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
29481 index 61fdf54..2834ea6 100644
29482 --- a/drivers/ide/ide-floppy.c
29483 +++ b/drivers/ide/ide-floppy.c
29484 @@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
29485 u8 pc_buf[256], header_len, desc_cnt;
29486 int i, rc = 1, blocks, length;
29487
29488 + pax_track_stack();
29489 +
29490 ide_debug_log(IDE_DBG_FUNC, "enter");
29491
29492 drive->bios_cyl = 0;
29493 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
29494 index a743e68..1cfd674 100644
29495 --- a/drivers/ide/ide-pci-generic.c
29496 +++ b/drivers/ide/ide-pci-generic.c
29497 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
29498 .udma_mask = ATA_UDMA6, \
29499 }
29500
29501 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
29502 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
29503 /* 0: Unknown */
29504 DECLARE_GENERIC_PCI_DEV(0),
29505
29506 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
29507 index 560e66d..d5dd180 100644
29508 --- a/drivers/ide/it8172.c
29509 +++ b/drivers/ide/it8172.c
29510 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
29511 .set_dma_mode = it8172_set_dma_mode,
29512 };
29513
29514 -static const struct ide_port_info it8172_port_info __devinitdata = {
29515 +static const struct ide_port_info it8172_port_info __devinitconst = {
29516 .name = DRV_NAME,
29517 .port_ops = &it8172_port_ops,
29518 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
29519 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
29520 index 46816ba..1847aeb 100644
29521 --- a/drivers/ide/it8213.c
29522 +++ b/drivers/ide/it8213.c
29523 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
29524 .cable_detect = it8213_cable_detect,
29525 };
29526
29527 -static const struct ide_port_info it8213_chipset __devinitdata = {
29528 +static const struct ide_port_info it8213_chipset __devinitconst = {
29529 .name = DRV_NAME,
29530 .enablebits = { {0x41, 0x80, 0x80} },
29531 .port_ops = &it8213_port_ops,
29532 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
29533 index 2e3169f..c5611db 100644
29534 --- a/drivers/ide/it821x.c
29535 +++ b/drivers/ide/it821x.c
29536 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
29537 .cable_detect = it821x_cable_detect,
29538 };
29539
29540 -static const struct ide_port_info it821x_chipset __devinitdata = {
29541 +static const struct ide_port_info it821x_chipset __devinitconst = {
29542 .name = DRV_NAME,
29543 .init_chipset = init_chipset_it821x,
29544 .init_hwif = init_hwif_it821x,
29545 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
29546 index 74c2c4a..efddd7d 100644
29547 --- a/drivers/ide/jmicron.c
29548 +++ b/drivers/ide/jmicron.c
29549 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
29550 .cable_detect = jmicron_cable_detect,
29551 };
29552
29553 -static const struct ide_port_info jmicron_chipset __devinitdata = {
29554 +static const struct ide_port_info jmicron_chipset __devinitconst = {
29555 .name = DRV_NAME,
29556 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
29557 .port_ops = &jmicron_port_ops,
29558 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
29559 index 95327a2..73f78d8 100644
29560 --- a/drivers/ide/ns87415.c
29561 +++ b/drivers/ide/ns87415.c
29562 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
29563 .dma_sff_read_status = superio_dma_sff_read_status,
29564 };
29565
29566 -static const struct ide_port_info ns87415_chipset __devinitdata = {
29567 +static const struct ide_port_info ns87415_chipset __devinitconst = {
29568 .name = DRV_NAME,
29569 .init_hwif = init_hwif_ns87415,
29570 .tp_ops = &ns87415_tp_ops,
29571 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
29572 index 1a53a4c..39edc66 100644
29573 --- a/drivers/ide/opti621.c
29574 +++ b/drivers/ide/opti621.c
29575 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
29576 .set_pio_mode = opti621_set_pio_mode,
29577 };
29578
29579 -static const struct ide_port_info opti621_chipset __devinitdata = {
29580 +static const struct ide_port_info opti621_chipset __devinitconst = {
29581 .name = DRV_NAME,
29582 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
29583 .port_ops = &opti621_port_ops,
29584 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
29585 index 9546fe2..2e5ceb6 100644
29586 --- a/drivers/ide/pdc202xx_new.c
29587 +++ b/drivers/ide/pdc202xx_new.c
29588 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
29589 .udma_mask = udma, \
29590 }
29591
29592 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
29593 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
29594 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
29595 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
29596 };
29597 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
29598 index 3a35ec6..5634510 100644
29599 --- a/drivers/ide/pdc202xx_old.c
29600 +++ b/drivers/ide/pdc202xx_old.c
29601 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
29602 .max_sectors = sectors, \
29603 }
29604
29605 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
29606 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
29607 { /* 0: PDC20246 */
29608 .name = DRV_NAME,
29609 .init_chipset = init_chipset_pdc202xx,
29610 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
29611 index b59d04c..368c2a7 100644
29612 --- a/drivers/ide/piix.c
29613 +++ b/drivers/ide/piix.c
29614 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
29615 .udma_mask = udma, \
29616 }
29617
29618 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
29619 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
29620 /* 0: MPIIX */
29621 { /*
29622 * MPIIX actually has only a single IDE channel mapped to
29623 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
29624 index a6414a8..c04173e 100644
29625 --- a/drivers/ide/rz1000.c
29626 +++ b/drivers/ide/rz1000.c
29627 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
29628 }
29629 }
29630
29631 -static const struct ide_port_info rz1000_chipset __devinitdata = {
29632 +static const struct ide_port_info rz1000_chipset __devinitconst = {
29633 .name = DRV_NAME,
29634 .host_flags = IDE_HFLAG_NO_DMA,
29635 };
29636 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
29637 index 356b9b5..d4758eb 100644
29638 --- a/drivers/ide/sc1200.c
29639 +++ b/drivers/ide/sc1200.c
29640 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
29641 .dma_sff_read_status = ide_dma_sff_read_status,
29642 };
29643
29644 -static const struct ide_port_info sc1200_chipset __devinitdata = {
29645 +static const struct ide_port_info sc1200_chipset __devinitconst = {
29646 .name = DRV_NAME,
29647 .port_ops = &sc1200_port_ops,
29648 .dma_ops = &sc1200_dma_ops,
29649 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
29650 index b7f5b0c..9701038 100644
29651 --- a/drivers/ide/scc_pata.c
29652 +++ b/drivers/ide/scc_pata.c
29653 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
29654 .dma_sff_read_status = scc_dma_sff_read_status,
29655 };
29656
29657 -static const struct ide_port_info scc_chipset __devinitdata = {
29658 +static const struct ide_port_info scc_chipset __devinitconst = {
29659 .name = "sccIDE",
29660 .init_iops = init_iops_scc,
29661 .init_dma = scc_init_dma,
29662 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
29663 index 35fb8da..24d72ef 100644
29664 --- a/drivers/ide/serverworks.c
29665 +++ b/drivers/ide/serverworks.c
29666 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
29667 .cable_detect = svwks_cable_detect,
29668 };
29669
29670 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
29671 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
29672 { /* 0: OSB4 */
29673 .name = DRV_NAME,
29674 .init_chipset = init_chipset_svwks,
29675 diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
29676 index ab3db61..afed580 100644
29677 --- a/drivers/ide/setup-pci.c
29678 +++ b/drivers/ide/setup-pci.c
29679 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
29680 int ret, i, n_ports = dev2 ? 4 : 2;
29681 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
29682
29683 + pax_track_stack();
29684 +
29685 for (i = 0; i < n_ports / 2; i++) {
29686 ret = ide_setup_pci_controller(pdev[i], d, !i);
29687 if (ret < 0)
29688 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
29689 index ddeda44..46f7e30 100644
29690 --- a/drivers/ide/siimage.c
29691 +++ b/drivers/ide/siimage.c
29692 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
29693 .udma_mask = ATA_UDMA6, \
29694 }
29695
29696 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
29697 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
29698 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
29699 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
29700 };
29701 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
29702 index 4a00225..09e61b4 100644
29703 --- a/drivers/ide/sis5513.c
29704 +++ b/drivers/ide/sis5513.c
29705 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
29706 .cable_detect = sis_cable_detect,
29707 };
29708
29709 -static const struct ide_port_info sis5513_chipset __devinitdata = {
29710 +static const struct ide_port_info sis5513_chipset __devinitconst = {
29711 .name = DRV_NAME,
29712 .init_chipset = init_chipset_sis5513,
29713 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
29714 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
29715 index f21dc2a..d051cd2 100644
29716 --- a/drivers/ide/sl82c105.c
29717 +++ b/drivers/ide/sl82c105.c
29718 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
29719 .dma_sff_read_status = ide_dma_sff_read_status,
29720 };
29721
29722 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
29723 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
29724 .name = DRV_NAME,
29725 .init_chipset = init_chipset_sl82c105,
29726 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
29727 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
29728 index 864ffe0..863a5e9 100644
29729 --- a/drivers/ide/slc90e66.c
29730 +++ b/drivers/ide/slc90e66.c
29731 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
29732 .cable_detect = slc90e66_cable_detect,
29733 };
29734
29735 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
29736 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
29737 .name = DRV_NAME,
29738 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
29739 .port_ops = &slc90e66_port_ops,
29740 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
29741 index e444d24..ba577de 100644
29742 --- a/drivers/ide/tc86c001.c
29743 +++ b/drivers/ide/tc86c001.c
29744 @@ -191,7 +191,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
29745 .dma_sff_read_status = ide_dma_sff_read_status,
29746 };
29747
29748 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
29749 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
29750 .name = DRV_NAME,
29751 .init_hwif = init_hwif_tc86c001,
29752 .port_ops = &tc86c001_port_ops,
29753 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
29754 index e53a1b7..d11aff7 100644
29755 --- a/drivers/ide/triflex.c
29756 +++ b/drivers/ide/triflex.c
29757 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
29758 .set_dma_mode = triflex_set_mode,
29759 };
29760
29761 -static const struct ide_port_info triflex_device __devinitdata = {
29762 +static const struct ide_port_info triflex_device __devinitconst = {
29763 .name = DRV_NAME,
29764 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
29765 .port_ops = &triflex_port_ops,
29766 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
29767 index 4b42ca0..e494a98 100644
29768 --- a/drivers/ide/trm290.c
29769 +++ b/drivers/ide/trm290.c
29770 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
29771 .dma_check = trm290_dma_check,
29772 };
29773
29774 -static const struct ide_port_info trm290_chipset __devinitdata = {
29775 +static const struct ide_port_info trm290_chipset __devinitconst = {
29776 .name = DRV_NAME,
29777 .init_hwif = init_hwif_trm290,
29778 .tp_ops = &trm290_tp_ops,
29779 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
29780 index f46f49c..eb77678 100644
29781 --- a/drivers/ide/via82cxxx.c
29782 +++ b/drivers/ide/via82cxxx.c
29783 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
29784 .cable_detect = via82cxxx_cable_detect,
29785 };
29786
29787 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
29788 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
29789 .name = DRV_NAME,
29790 .init_chipset = init_chipset_via82cxxx,
29791 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
29792 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
29793 index fc0f2bd..ac2f8a5 100644
29794 --- a/drivers/infiniband/core/cm.c
29795 +++ b/drivers/infiniband/core/cm.c
29796 @@ -113,7 +113,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
29797
29798 struct cm_counter_group {
29799 struct kobject obj;
29800 - atomic_long_t counter[CM_ATTR_COUNT];
29801 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29802 };
29803
29804 struct cm_counter_attribute {
29805 @@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm_work *work,
29806 struct ib_mad_send_buf *msg = NULL;
29807 int ret;
29808
29809 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29810 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29811 counter[CM_REQ_COUNTER]);
29812
29813 /* Quick state check to discard duplicate REQs. */
29814 @@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
29815 if (!cm_id_priv)
29816 return;
29817
29818 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29819 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29820 counter[CM_REP_COUNTER]);
29821 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29822 if (ret)
29823 @@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work *work)
29824 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29825 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29826 spin_unlock_irq(&cm_id_priv->lock);
29827 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29828 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29829 counter[CM_RTU_COUNTER]);
29830 goto out;
29831 }
29832 @@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_work *work)
29833 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29834 dreq_msg->local_comm_id);
29835 if (!cm_id_priv) {
29836 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29837 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29838 counter[CM_DREQ_COUNTER]);
29839 cm_issue_drep(work->port, work->mad_recv_wc);
29840 return -EINVAL;
29841 @@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_work *work)
29842 case IB_CM_MRA_REP_RCVD:
29843 break;
29844 case IB_CM_TIMEWAIT:
29845 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29846 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29847 counter[CM_DREQ_COUNTER]);
29848 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29849 goto unlock;
29850 @@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_work *work)
29851 cm_free_msg(msg);
29852 goto deref;
29853 case IB_CM_DREQ_RCVD:
29854 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29855 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29856 counter[CM_DREQ_COUNTER]);
29857 goto unlock;
29858 default:
29859 @@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work *work)
29860 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29861 cm_id_priv->msg, timeout)) {
29862 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29863 - atomic_long_inc(&work->port->
29864 + atomic_long_inc_unchecked(&work->port->
29865 counter_group[CM_RECV_DUPLICATES].
29866 counter[CM_MRA_COUNTER]);
29867 goto out;
29868 @@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work *work)
29869 break;
29870 case IB_CM_MRA_REQ_RCVD:
29871 case IB_CM_MRA_REP_RCVD:
29872 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29873 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29874 counter[CM_MRA_COUNTER]);
29875 /* fall through */
29876 default:
29877 @@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work *work)
29878 case IB_CM_LAP_IDLE:
29879 break;
29880 case IB_CM_MRA_LAP_SENT:
29881 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29882 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29883 counter[CM_LAP_COUNTER]);
29884 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29885 goto unlock;
29886 @@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work *work)
29887 cm_free_msg(msg);
29888 goto deref;
29889 case IB_CM_LAP_RCVD:
29890 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29891 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29892 counter[CM_LAP_COUNTER]);
29893 goto unlock;
29894 default:
29895 @@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
29896 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29897 if (cur_cm_id_priv) {
29898 spin_unlock_irq(&cm.lock);
29899 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29900 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29901 counter[CM_SIDR_REQ_COUNTER]);
29902 goto out; /* Duplicate message. */
29903 }
29904 @@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
29905 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29906 msg->retries = 1;
29907
29908 - atomic_long_add(1 + msg->retries,
29909 + atomic_long_add_unchecked(1 + msg->retries,
29910 &port->counter_group[CM_XMIT].counter[attr_index]);
29911 if (msg->retries)
29912 - atomic_long_add(msg->retries,
29913 + atomic_long_add_unchecked(msg->retries,
29914 &port->counter_group[CM_XMIT_RETRIES].
29915 counter[attr_index]);
29916
29917 @@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
29918 }
29919
29920 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29921 - atomic_long_inc(&port->counter_group[CM_RECV].
29922 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29923 counter[attr_id - CM_ATTR_ID_OFFSET]);
29924
29925 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29926 @@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
29927 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29928
29929 return sprintf(buf, "%ld\n",
29930 - atomic_long_read(&group->counter[cm_attr->index]));
29931 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29932 }
29933
29934 static const struct sysfs_ops cm_counter_ops = {
29935 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
29936 index 4507043..14ad522 100644
29937 --- a/drivers/infiniband/core/fmr_pool.c
29938 +++ b/drivers/infiniband/core/fmr_pool.c
29939 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
29940
29941 struct task_struct *thread;
29942
29943 - atomic_t req_ser;
29944 - atomic_t flush_ser;
29945 + atomic_unchecked_t req_ser;
29946 + atomic_unchecked_t flush_ser;
29947
29948 wait_queue_head_t force_wait;
29949 };
29950 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
29951 struct ib_fmr_pool *pool = pool_ptr;
29952
29953 do {
29954 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29955 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29956 ib_fmr_batch_release(pool);
29957
29958 - atomic_inc(&pool->flush_ser);
29959 + atomic_inc_unchecked(&pool->flush_ser);
29960 wake_up_interruptible(&pool->force_wait);
29961
29962 if (pool->flush_function)
29963 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
29964 }
29965
29966 set_current_state(TASK_INTERRUPTIBLE);
29967 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
29968 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
29969 !kthread_should_stop())
29970 schedule();
29971 __set_current_state(TASK_RUNNING);
29972 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
29973 pool->dirty_watermark = params->dirty_watermark;
29974 pool->dirty_len = 0;
29975 spin_lock_init(&pool->pool_lock);
29976 - atomic_set(&pool->req_ser, 0);
29977 - atomic_set(&pool->flush_ser, 0);
29978 + atomic_set_unchecked(&pool->req_ser, 0);
29979 + atomic_set_unchecked(&pool->flush_ser, 0);
29980 init_waitqueue_head(&pool->force_wait);
29981
29982 pool->thread = kthread_run(ib_fmr_cleanup_thread,
29983 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
29984 }
29985 spin_unlock_irq(&pool->pool_lock);
29986
29987 - serial = atomic_inc_return(&pool->req_ser);
29988 + serial = atomic_inc_return_unchecked(&pool->req_ser);
29989 wake_up_process(pool->thread);
29990
29991 if (wait_event_interruptible(pool->force_wait,
29992 - atomic_read(&pool->flush_ser) - serial >= 0))
29993 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
29994 return -EINTR;
29995
29996 return 0;
29997 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
29998 } else {
29999 list_add_tail(&fmr->list, &pool->dirty_list);
30000 if (++pool->dirty_len >= pool->dirty_watermark) {
30001 - atomic_inc(&pool->req_ser);
30002 + atomic_inc_unchecked(&pool->req_ser);
30003 wake_up_process(pool->thread);
30004 }
30005 }
30006 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
30007 index 40c8353..946b0e4 100644
30008 --- a/drivers/infiniband/hw/cxgb4/mem.c
30009 +++ b/drivers/infiniband/hw/cxgb4/mem.c
30010 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30011 int err;
30012 struct fw_ri_tpte tpt;
30013 u32 stag_idx;
30014 - static atomic_t key;
30015 + static atomic_unchecked_t key;
30016
30017 if (c4iw_fatal_error(rdev))
30018 return -EIO;
30019 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
30020 &rdev->resource.tpt_fifo_lock);
30021 if (!stag_idx)
30022 return -ENOMEM;
30023 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
30024 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
30025 }
30026 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
30027 __func__, stag_state, type, pdid, stag_idx);
30028 diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
30029 index 31ae1b1..2f5b038 100644
30030 --- a/drivers/infiniband/hw/ipath/ipath_fs.c
30031 +++ b/drivers/infiniband/hw/ipath/ipath_fs.c
30032 @@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
30033 struct infinipath_counters counters;
30034 struct ipath_devdata *dd;
30035
30036 + pax_track_stack();
30037 +
30038 dd = file->f_path.dentry->d_inode->i_private;
30039 dd->ipath_f_read_counters(dd, &counters);
30040
30041 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
30042 index 79b3dbc..96e5fcc 100644
30043 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
30044 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
30045 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30046 struct ib_atomic_eth *ateth;
30047 struct ipath_ack_entry *e;
30048 u64 vaddr;
30049 - atomic64_t *maddr;
30050 + atomic64_unchecked_t *maddr;
30051 u64 sdata;
30052 u32 rkey;
30053 u8 next;
30054 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
30055 IB_ACCESS_REMOTE_ATOMIC)))
30056 goto nack_acc_unlck;
30057 /* Perform atomic OP and save result. */
30058 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30059 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30060 sdata = be64_to_cpu(ateth->swap_data);
30061 e = &qp->s_ack_queue[qp->r_head_ack_queue];
30062 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
30063 - (u64) atomic64_add_return(sdata, maddr) - sdata :
30064 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30065 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30066 be64_to_cpu(ateth->compare_data),
30067 sdata);
30068 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
30069 index 1f95bba..9530f87 100644
30070 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
30071 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
30072 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
30073 unsigned long flags;
30074 struct ib_wc wc;
30075 u64 sdata;
30076 - atomic64_t *maddr;
30077 + atomic64_unchecked_t *maddr;
30078 enum ib_wc_status send_status;
30079
30080 /*
30081 @@ -382,11 +382,11 @@ again:
30082 IB_ACCESS_REMOTE_ATOMIC)))
30083 goto acc_err;
30084 /* Perform atomic OP and save result. */
30085 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
30086 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
30087 sdata = wqe->wr.wr.atomic.compare_add;
30088 *(u64 *) sqp->s_sge.sge.vaddr =
30089 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
30090 - (u64) atomic64_add_return(sdata, maddr) - sdata :
30091 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
30092 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
30093 sdata, wqe->wr.wr.atomic.swap);
30094 goto send_comp;
30095 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
30096 index 2d668c6..3312bb7 100644
30097 --- a/drivers/infiniband/hw/nes/nes.c
30098 +++ b/drivers/infiniband/hw/nes/nes.c
30099 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
30100 LIST_HEAD(nes_adapter_list);
30101 static LIST_HEAD(nes_dev_list);
30102
30103 -atomic_t qps_destroyed;
30104 +atomic_unchecked_t qps_destroyed;
30105
30106 static unsigned int ee_flsh_adapter;
30107 static unsigned int sysfs_nonidx_addr;
30108 @@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
30109 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
30110 struct nes_adapter *nesadapter = nesdev->nesadapter;
30111
30112 - atomic_inc(&qps_destroyed);
30113 + atomic_inc_unchecked(&qps_destroyed);
30114
30115 /* Free the control structures */
30116
30117 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
30118 index 6fe7987..68637b5 100644
30119 --- a/drivers/infiniband/hw/nes/nes.h
30120 +++ b/drivers/infiniband/hw/nes/nes.h
30121 @@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
30122 extern unsigned int wqm_quanta;
30123 extern struct list_head nes_adapter_list;
30124
30125 -extern atomic_t cm_connects;
30126 -extern atomic_t cm_accepts;
30127 -extern atomic_t cm_disconnects;
30128 -extern atomic_t cm_closes;
30129 -extern atomic_t cm_connecteds;
30130 -extern atomic_t cm_connect_reqs;
30131 -extern atomic_t cm_rejects;
30132 -extern atomic_t mod_qp_timouts;
30133 -extern atomic_t qps_created;
30134 -extern atomic_t qps_destroyed;
30135 -extern atomic_t sw_qps_destroyed;
30136 +extern atomic_unchecked_t cm_connects;
30137 +extern atomic_unchecked_t cm_accepts;
30138 +extern atomic_unchecked_t cm_disconnects;
30139 +extern atomic_unchecked_t cm_closes;
30140 +extern atomic_unchecked_t cm_connecteds;
30141 +extern atomic_unchecked_t cm_connect_reqs;
30142 +extern atomic_unchecked_t cm_rejects;
30143 +extern atomic_unchecked_t mod_qp_timouts;
30144 +extern atomic_unchecked_t qps_created;
30145 +extern atomic_unchecked_t qps_destroyed;
30146 +extern atomic_unchecked_t sw_qps_destroyed;
30147 extern u32 mh_detected;
30148 extern u32 mh_pauses_sent;
30149 extern u32 cm_packets_sent;
30150 @@ -194,14 +194,14 @@ extern u32 cm_packets_created;
30151 extern u32 cm_packets_received;
30152 extern u32 cm_packets_dropped;
30153 extern u32 cm_packets_retrans;
30154 -extern atomic_t cm_listens_created;
30155 -extern atomic_t cm_listens_destroyed;
30156 +extern atomic_unchecked_t cm_listens_created;
30157 +extern atomic_unchecked_t cm_listens_destroyed;
30158 extern u32 cm_backlog_drops;
30159 -extern atomic_t cm_loopbacks;
30160 -extern atomic_t cm_nodes_created;
30161 -extern atomic_t cm_nodes_destroyed;
30162 -extern atomic_t cm_accel_dropped_pkts;
30163 -extern atomic_t cm_resets_recvd;
30164 +extern atomic_unchecked_t cm_loopbacks;
30165 +extern atomic_unchecked_t cm_nodes_created;
30166 +extern atomic_unchecked_t cm_nodes_destroyed;
30167 +extern atomic_unchecked_t cm_accel_dropped_pkts;
30168 +extern atomic_unchecked_t cm_resets_recvd;
30169
30170 extern u32 int_mod_timer_init;
30171 extern u32 int_mod_cq_depth_256;
30172 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
30173 index c118663..049a3ab 100644
30174 --- a/drivers/infiniband/hw/nes/nes_cm.c
30175 +++ b/drivers/infiniband/hw/nes/nes_cm.c
30176 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
30177 u32 cm_packets_retrans;
30178 u32 cm_packets_created;
30179 u32 cm_packets_received;
30180 -atomic_t cm_listens_created;
30181 -atomic_t cm_listens_destroyed;
30182 +atomic_unchecked_t cm_listens_created;
30183 +atomic_unchecked_t cm_listens_destroyed;
30184 u32 cm_backlog_drops;
30185 -atomic_t cm_loopbacks;
30186 -atomic_t cm_nodes_created;
30187 -atomic_t cm_nodes_destroyed;
30188 -atomic_t cm_accel_dropped_pkts;
30189 -atomic_t cm_resets_recvd;
30190 +atomic_unchecked_t cm_loopbacks;
30191 +atomic_unchecked_t cm_nodes_created;
30192 +atomic_unchecked_t cm_nodes_destroyed;
30193 +atomic_unchecked_t cm_accel_dropped_pkts;
30194 +atomic_unchecked_t cm_resets_recvd;
30195
30196 static inline int mini_cm_accelerated(struct nes_cm_core *,
30197 struct nes_cm_node *);
30198 @@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
30199
30200 static struct nes_cm_core *g_cm_core;
30201
30202 -atomic_t cm_connects;
30203 -atomic_t cm_accepts;
30204 -atomic_t cm_disconnects;
30205 -atomic_t cm_closes;
30206 -atomic_t cm_connecteds;
30207 -atomic_t cm_connect_reqs;
30208 -atomic_t cm_rejects;
30209 +atomic_unchecked_t cm_connects;
30210 +atomic_unchecked_t cm_accepts;
30211 +atomic_unchecked_t cm_disconnects;
30212 +atomic_unchecked_t cm_closes;
30213 +atomic_unchecked_t cm_connecteds;
30214 +atomic_unchecked_t cm_connect_reqs;
30215 +atomic_unchecked_t cm_rejects;
30216
30217
30218 /**
30219 @@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
30220 kfree(listener);
30221 listener = NULL;
30222 ret = 0;
30223 - atomic_inc(&cm_listens_destroyed);
30224 + atomic_inc_unchecked(&cm_listens_destroyed);
30225 } else {
30226 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
30227 }
30228 @@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
30229 cm_node->rem_mac);
30230
30231 add_hte_node(cm_core, cm_node);
30232 - atomic_inc(&cm_nodes_created);
30233 + atomic_inc_unchecked(&cm_nodes_created);
30234
30235 return cm_node;
30236 }
30237 @@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
30238 }
30239
30240 atomic_dec(&cm_core->node_cnt);
30241 - atomic_inc(&cm_nodes_destroyed);
30242 + atomic_inc_unchecked(&cm_nodes_destroyed);
30243 nesqp = cm_node->nesqp;
30244 if (nesqp) {
30245 nesqp->cm_node = NULL;
30246 @@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
30247
30248 static void drop_packet(struct sk_buff *skb)
30249 {
30250 - atomic_inc(&cm_accel_dropped_pkts);
30251 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30252 dev_kfree_skb_any(skb);
30253 }
30254
30255 @@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
30256 {
30257
30258 int reset = 0; /* whether to send reset in case of err.. */
30259 - atomic_inc(&cm_resets_recvd);
30260 + atomic_inc_unchecked(&cm_resets_recvd);
30261 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30262 " refcnt=%d\n", cm_node, cm_node->state,
30263 atomic_read(&cm_node->ref_count));
30264 @@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
30265 rem_ref_cm_node(cm_node->cm_core, cm_node);
30266 return NULL;
30267 }
30268 - atomic_inc(&cm_loopbacks);
30269 + atomic_inc_unchecked(&cm_loopbacks);
30270 loopbackremotenode->loopbackpartner = cm_node;
30271 loopbackremotenode->tcp_cntxt.rcv_wscale =
30272 NES_CM_DEFAULT_RCV_WND_SCALE;
30273 @@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
30274 add_ref_cm_node(cm_node);
30275 } else if (cm_node->state == NES_CM_STATE_TSA) {
30276 rem_ref_cm_node(cm_core, cm_node);
30277 - atomic_inc(&cm_accel_dropped_pkts);
30278 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30279 dev_kfree_skb_any(skb);
30280 break;
30281 }
30282 @@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30283
30284 if ((cm_id) && (cm_id->event_handler)) {
30285 if (issue_disconn) {
30286 - atomic_inc(&cm_disconnects);
30287 + atomic_inc_unchecked(&cm_disconnects);
30288 cm_event.event = IW_CM_EVENT_DISCONNECT;
30289 cm_event.status = disconn_status;
30290 cm_event.local_addr = cm_id->local_addr;
30291 @@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30292 }
30293
30294 if (issue_close) {
30295 - atomic_inc(&cm_closes);
30296 + atomic_inc_unchecked(&cm_closes);
30297 nes_disconnect(nesqp, 1);
30298
30299 cm_id->provider_data = nesqp;
30300 @@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30301
30302 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30303 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30304 - atomic_inc(&cm_accepts);
30305 + atomic_inc_unchecked(&cm_accepts);
30306
30307 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30308 netdev_refcnt_read(nesvnic->netdev));
30309 @@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
30310
30311 struct nes_cm_core *cm_core;
30312
30313 - atomic_inc(&cm_rejects);
30314 + atomic_inc_unchecked(&cm_rejects);
30315 cm_node = (struct nes_cm_node *) cm_id->provider_data;
30316 loopback = cm_node->loopbackpartner;
30317 cm_core = cm_node->cm_core;
30318 @@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30319 ntohl(cm_id->local_addr.sin_addr.s_addr),
30320 ntohs(cm_id->local_addr.sin_port));
30321
30322 - atomic_inc(&cm_connects);
30323 + atomic_inc_unchecked(&cm_connects);
30324 nesqp->active_conn = 1;
30325
30326 /* cache the cm_id in the qp */
30327 @@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
30328 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
30329 return err;
30330 }
30331 - atomic_inc(&cm_listens_created);
30332 + atomic_inc_unchecked(&cm_listens_created);
30333 }
30334
30335 cm_id->add_ref(cm_id);
30336 @@ -3278,7 +3278,7 @@ static void cm_event_connected(struct nes_cm_event *event)
30337 if (nesqp->destroyed) {
30338 return;
30339 }
30340 - atomic_inc(&cm_connecteds);
30341 + atomic_inc_unchecked(&cm_connecteds);
30342 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30343 " local port 0x%04X. jiffies = %lu.\n",
30344 nesqp->hwqp.qp_id,
30345 @@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm_event *event)
30346
30347 cm_id->add_ref(cm_id);
30348 ret = cm_id->event_handler(cm_id, &cm_event);
30349 - atomic_inc(&cm_closes);
30350 + atomic_inc_unchecked(&cm_closes);
30351 cm_event.event = IW_CM_EVENT_CLOSE;
30352 cm_event.status = 0;
30353 cm_event.provider_data = cm_id->provider_data;
30354 @@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
30355 return;
30356 cm_id = cm_node->cm_id;
30357
30358 - atomic_inc(&cm_connect_reqs);
30359 + atomic_inc_unchecked(&cm_connect_reqs);
30360 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30361 cm_node, cm_id, jiffies);
30362
30363 @@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
30364 return;
30365 cm_id = cm_node->cm_id;
30366
30367 - atomic_inc(&cm_connect_reqs);
30368 + atomic_inc_unchecked(&cm_connect_reqs);
30369 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30370 cm_node, cm_id, jiffies);
30371
30372 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
30373 index 9d7ffeb..a95dd7d 100644
30374 --- a/drivers/infiniband/hw/nes/nes_nic.c
30375 +++ b/drivers/infiniband/hw/nes/nes_nic.c
30376 @@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
30377 target_stat_values[++index] = mh_detected;
30378 target_stat_values[++index] = mh_pauses_sent;
30379 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30380 - target_stat_values[++index] = atomic_read(&cm_connects);
30381 - target_stat_values[++index] = atomic_read(&cm_accepts);
30382 - target_stat_values[++index] = atomic_read(&cm_disconnects);
30383 - target_stat_values[++index] = atomic_read(&cm_connecteds);
30384 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30385 - target_stat_values[++index] = atomic_read(&cm_rejects);
30386 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30387 - target_stat_values[++index] = atomic_read(&qps_created);
30388 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30389 - target_stat_values[++index] = atomic_read(&qps_destroyed);
30390 - target_stat_values[++index] = atomic_read(&cm_closes);
30391 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30392 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30393 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30394 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30395 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30396 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30397 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30398 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30399 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30400 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30401 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30402 target_stat_values[++index] = cm_packets_sent;
30403 target_stat_values[++index] = cm_packets_bounced;
30404 target_stat_values[++index] = cm_packets_created;
30405 target_stat_values[++index] = cm_packets_received;
30406 target_stat_values[++index] = cm_packets_dropped;
30407 target_stat_values[++index] = cm_packets_retrans;
30408 - target_stat_values[++index] = atomic_read(&cm_listens_created);
30409 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
30410 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
30411 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
30412 target_stat_values[++index] = cm_backlog_drops;
30413 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
30414 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
30415 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30416 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30417 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30418 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30419 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30420 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30421 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30422 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30423 target_stat_values[++index] = nesadapter->free_4kpbl;
30424 target_stat_values[++index] = nesadapter->free_256pbl;
30425 target_stat_values[++index] = int_mod_timer_init;
30426 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
30427 index 9f2f7d4..6d2fee2 100644
30428 --- a/drivers/infiniband/hw/nes/nes_verbs.c
30429 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
30430 @@ -46,9 +46,9 @@
30431
30432 #include <rdma/ib_umem.h>
30433
30434 -atomic_t mod_qp_timouts;
30435 -atomic_t qps_created;
30436 -atomic_t sw_qps_destroyed;
30437 +atomic_unchecked_t mod_qp_timouts;
30438 +atomic_unchecked_t qps_created;
30439 +atomic_unchecked_t sw_qps_destroyed;
30440
30441 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30442
30443 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
30444 if (init_attr->create_flags)
30445 return ERR_PTR(-EINVAL);
30446
30447 - atomic_inc(&qps_created);
30448 + atomic_inc_unchecked(&qps_created);
30449 switch (init_attr->qp_type) {
30450 case IB_QPT_RC:
30451 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30452 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
30453 struct iw_cm_event cm_event;
30454 int ret;
30455
30456 - atomic_inc(&sw_qps_destroyed);
30457 + atomic_inc_unchecked(&sw_qps_destroyed);
30458 nesqp->destroyed = 1;
30459
30460 /* Blow away the connection if it exists. */
30461 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
30462 index c9624ea..e025b66 100644
30463 --- a/drivers/infiniband/hw/qib/qib.h
30464 +++ b/drivers/infiniband/hw/qib/qib.h
30465 @@ -51,6 +51,7 @@
30466 #include <linux/completion.h>
30467 #include <linux/kref.h>
30468 #include <linux/sched.h>
30469 +#include <linux/slab.h>
30470
30471 #include "qib_common.h"
30472 #include "qib_verbs.h"
30473 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
30474 index c351aa4..e6967c2 100644
30475 --- a/drivers/input/gameport/gameport.c
30476 +++ b/drivers/input/gameport/gameport.c
30477 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
30478 */
30479 static void gameport_init_port(struct gameport *gameport)
30480 {
30481 - static atomic_t gameport_no = ATOMIC_INIT(0);
30482 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30483
30484 __module_get(THIS_MODULE);
30485
30486 mutex_init(&gameport->drv_mutex);
30487 device_initialize(&gameport->dev);
30488 dev_set_name(&gameport->dev, "gameport%lu",
30489 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
30490 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30491 gameport->dev.bus = &gameport_bus;
30492 gameport->dev.release = gameport_release_port;
30493 if (gameport->parent)
30494 diff --git a/drivers/input/input.c b/drivers/input/input.c
30495 index da38d97..2aa0b79 100644
30496 --- a/drivers/input/input.c
30497 +++ b/drivers/input/input.c
30498 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
30499 */
30500 int input_register_device(struct input_dev *dev)
30501 {
30502 - static atomic_t input_no = ATOMIC_INIT(0);
30503 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30504 struct input_handler *handler;
30505 const char *path;
30506 int error;
30507 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
30508 dev->setkeycode = input_default_setkeycode;
30509
30510 dev_set_name(&dev->dev, "input%ld",
30511 - (unsigned long) atomic_inc_return(&input_no) - 1);
30512 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30513
30514 error = device_add(&dev->dev);
30515 if (error)
30516 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
30517 index b8d8611..15f8d2c 100644
30518 --- a/drivers/input/joystick/sidewinder.c
30519 +++ b/drivers/input/joystick/sidewinder.c
30520 @@ -30,6 +30,7 @@
30521 #include <linux/kernel.h>
30522 #include <linux/module.h>
30523 #include <linux/slab.h>
30524 +#include <linux/sched.h>
30525 #include <linux/init.h>
30526 #include <linux/input.h>
30527 #include <linux/gameport.h>
30528 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
30529 unsigned char buf[SW_LENGTH];
30530 int i;
30531
30532 + pax_track_stack();
30533 +
30534 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
30535
30536 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
30537 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
30538 index d728875..844c89b 100644
30539 --- a/drivers/input/joystick/xpad.c
30540 +++ b/drivers/input/joystick/xpad.c
30541 @@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
30542
30543 static int xpad_led_probe(struct usb_xpad *xpad)
30544 {
30545 - static atomic_t led_seq = ATOMIC_INIT(0);
30546 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30547 long led_no;
30548 struct xpad_led *led;
30549 struct led_classdev *led_cdev;
30550 @@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
30551 if (!led)
30552 return -ENOMEM;
30553
30554 - led_no = (long)atomic_inc_return(&led_seq) - 1;
30555 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30556
30557 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30558 led->xpad = xpad;
30559 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
30560 index 0110b5a..d3ad144 100644
30561 --- a/drivers/input/mousedev.c
30562 +++ b/drivers/input/mousedev.c
30563 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
30564
30565 spin_unlock_irq(&client->packet_lock);
30566
30567 - if (copy_to_user(buffer, data, count))
30568 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
30569 return -EFAULT;
30570
30571 return count;
30572 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
30573 index ba70058..571d25d 100644
30574 --- a/drivers/input/serio/serio.c
30575 +++ b/drivers/input/serio/serio.c
30576 @@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
30577 */
30578 static void serio_init_port(struct serio *serio)
30579 {
30580 - static atomic_t serio_no = ATOMIC_INIT(0);
30581 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30582
30583 __module_get(THIS_MODULE);
30584
30585 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
30586 mutex_init(&serio->drv_mutex);
30587 device_initialize(&serio->dev);
30588 dev_set_name(&serio->dev, "serio%ld",
30589 - (long)atomic_inc_return(&serio_no) - 1);
30590 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
30591 serio->dev.bus = &serio_bus;
30592 serio->dev.release = serio_release_port;
30593 serio->dev.groups = serio_device_attr_groups;
30594 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
30595 index e44933d..9ba484a 100644
30596 --- a/drivers/isdn/capi/capi.c
30597 +++ b/drivers/isdn/capi/capi.c
30598 @@ -83,8 +83,8 @@ struct capiminor {
30599
30600 struct capi20_appl *ap;
30601 u32 ncci;
30602 - atomic_t datahandle;
30603 - atomic_t msgid;
30604 + atomic_unchecked_t datahandle;
30605 + atomic_unchecked_t msgid;
30606
30607 struct tty_port port;
30608 int ttyinstop;
30609 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
30610 capimsg_setu16(s, 2, mp->ap->applid);
30611 capimsg_setu8 (s, 4, CAPI_DATA_B3);
30612 capimsg_setu8 (s, 5, CAPI_RESP);
30613 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
30614 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
30615 capimsg_setu32(s, 8, mp->ncci);
30616 capimsg_setu16(s, 12, datahandle);
30617 }
30618 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
30619 mp->outbytes -= len;
30620 spin_unlock_bh(&mp->outlock);
30621
30622 - datahandle = atomic_inc_return(&mp->datahandle);
30623 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
30624 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
30625 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30626 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30627 capimsg_setu16(skb->data, 2, mp->ap->applid);
30628 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
30629 capimsg_setu8 (skb->data, 5, CAPI_REQ);
30630 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
30631 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
30632 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
30633 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
30634 capimsg_setu16(skb->data, 16, len); /* Data length */
30635 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
30636 index db621db..825ea1a 100644
30637 --- a/drivers/isdn/gigaset/common.c
30638 +++ b/drivers/isdn/gigaset/common.c
30639 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
30640 cs->commands_pending = 0;
30641 cs->cur_at_seq = 0;
30642 cs->gotfwver = -1;
30643 - cs->open_count = 0;
30644 + local_set(&cs->open_count, 0);
30645 cs->dev = NULL;
30646 cs->tty = NULL;
30647 cs->tty_dev = NULL;
30648 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
30649 index 212efaf..f187c6b 100644
30650 --- a/drivers/isdn/gigaset/gigaset.h
30651 +++ b/drivers/isdn/gigaset/gigaset.h
30652 @@ -35,6 +35,7 @@
30653 #include <linux/tty_driver.h>
30654 #include <linux/list.h>
30655 #include <linux/atomic.h>
30656 +#include <asm/local.h>
30657
30658 #define GIG_VERSION {0, 5, 0, 0}
30659 #define GIG_COMPAT {0, 4, 0, 0}
30660 @@ -433,7 +434,7 @@ struct cardstate {
30661 spinlock_t cmdlock;
30662 unsigned curlen, cmdbytes;
30663
30664 - unsigned open_count;
30665 + local_t open_count;
30666 struct tty_struct *tty;
30667 struct tasklet_struct if_wake_tasklet;
30668 unsigned control_state;
30669 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
30670 index e35058b..5898a8b 100644
30671 --- a/drivers/isdn/gigaset/interface.c
30672 +++ b/drivers/isdn/gigaset/interface.c
30673 @@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
30674 }
30675 tty->driver_data = cs;
30676
30677 - ++cs->open_count;
30678 -
30679 - if (cs->open_count == 1) {
30680 + if (local_inc_return(&cs->open_count) == 1) {
30681 spin_lock_irqsave(&cs->lock, flags);
30682 cs->tty = tty;
30683 spin_unlock_irqrestore(&cs->lock, flags);
30684 @@ -192,10 +190,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
30685
30686 if (!cs->connected)
30687 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30688 - else if (!cs->open_count)
30689 + else if (!local_read(&cs->open_count))
30690 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30691 else {
30692 - if (!--cs->open_count) {
30693 + if (!local_dec_return(&cs->open_count)) {
30694 spin_lock_irqsave(&cs->lock, flags);
30695 cs->tty = NULL;
30696 spin_unlock_irqrestore(&cs->lock, flags);
30697 @@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *tty,
30698 if (!cs->connected) {
30699 gig_dbg(DEBUG_IF, "not connected");
30700 retval = -ENODEV;
30701 - } else if (!cs->open_count)
30702 + } else if (!local_read(&cs->open_count))
30703 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30704 else {
30705 retval = 0;
30706 @@ -360,7 +358,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
30707 retval = -ENODEV;
30708 goto done;
30709 }
30710 - if (!cs->open_count) {
30711 + if (!local_read(&cs->open_count)) {
30712 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30713 retval = -ENODEV;
30714 goto done;
30715 @@ -413,7 +411,7 @@ static int if_write_room(struct tty_struct *tty)
30716 if (!cs->connected) {
30717 gig_dbg(DEBUG_IF, "not connected");
30718 retval = -ENODEV;
30719 - } else if (!cs->open_count)
30720 + } else if (!local_read(&cs->open_count))
30721 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30722 else if (cs->mstate != MS_LOCKED) {
30723 dev_warn(cs->dev, "can't write to unlocked device\n");
30724 @@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
30725
30726 if (!cs->connected)
30727 gig_dbg(DEBUG_IF, "not connected");
30728 - else if (!cs->open_count)
30729 + else if (!local_read(&cs->open_count))
30730 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30731 else if (cs->mstate != MS_LOCKED)
30732 dev_warn(cs->dev, "can't write to unlocked device\n");
30733 @@ -471,7 +469,7 @@ static void if_throttle(struct tty_struct *tty)
30734
30735 if (!cs->connected)
30736 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30737 - else if (!cs->open_count)
30738 + else if (!local_read(&cs->open_count))
30739 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30740 else
30741 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30742 @@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_struct *tty)
30743
30744 if (!cs->connected)
30745 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30746 - else if (!cs->open_count)
30747 + else if (!local_read(&cs->open_count))
30748 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30749 else
30750 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30751 @@ -526,7 +524,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
30752 goto out;
30753 }
30754
30755 - if (!cs->open_count) {
30756 + if (!local_read(&cs->open_count)) {
30757 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30758 goto out;
30759 }
30760 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
30761 index 2a57da59..e7a12ed 100644
30762 --- a/drivers/isdn/hardware/avm/b1.c
30763 +++ b/drivers/isdn/hardware/avm/b1.c
30764 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
30765 }
30766 if (left) {
30767 if (t4file->user) {
30768 - if (copy_from_user(buf, dp, left))
30769 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30770 return -EFAULT;
30771 } else {
30772 memcpy(buf, dp, left);
30773 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
30774 }
30775 if (left) {
30776 if (config->user) {
30777 - if (copy_from_user(buf, dp, left))
30778 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30779 return -EFAULT;
30780 } else {
30781 memcpy(buf, dp, left);
30782 diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
30783 index f130724..c373c68 100644
30784 --- a/drivers/isdn/hardware/eicon/capidtmf.c
30785 +++ b/drivers/isdn/hardware/eicon/capidtmf.c
30786 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
30787 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
30788 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
30789
30790 + pax_track_stack();
30791
30792 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
30793 {
30794 diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
30795 index 4d425c6..a9be6c4 100644
30796 --- a/drivers/isdn/hardware/eicon/capifunc.c
30797 +++ b/drivers/isdn/hardware/eicon/capifunc.c
30798 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
30799 IDI_SYNC_REQ req;
30800 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30801
30802 + pax_track_stack();
30803 +
30804 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30805
30806 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30807 diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
30808 index 3029234..ef0d9e2 100644
30809 --- a/drivers/isdn/hardware/eicon/diddfunc.c
30810 +++ b/drivers/isdn/hardware/eicon/diddfunc.c
30811 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
30812 IDI_SYNC_REQ req;
30813 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30814
30815 + pax_track_stack();
30816 +
30817 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30818
30819 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30820 diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
30821 index 0bbee78..a0d0a01 100644
30822 --- a/drivers/isdn/hardware/eicon/divasfunc.c
30823 +++ b/drivers/isdn/hardware/eicon/divasfunc.c
30824 @@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
30825 IDI_SYNC_REQ req;
30826 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30827
30828 + pax_track_stack();
30829 +
30830 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30831
30832 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30833 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
30834 index 85784a7..a19ca98 100644
30835 --- a/drivers/isdn/hardware/eicon/divasync.h
30836 +++ b/drivers/isdn/hardware/eicon/divasync.h
30837 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
30838 } diva_didd_add_adapter_t;
30839 typedef struct _diva_didd_remove_adapter {
30840 IDI_CALL p_request;
30841 -} diva_didd_remove_adapter_t;
30842 +} __no_const diva_didd_remove_adapter_t;
30843 typedef struct _diva_didd_read_adapter_array {
30844 void * buffer;
30845 dword length;
30846 diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
30847 index db87d51..7d09acf 100644
30848 --- a/drivers/isdn/hardware/eicon/idifunc.c
30849 +++ b/drivers/isdn/hardware/eicon/idifunc.c
30850 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
30851 IDI_SYNC_REQ req;
30852 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30853
30854 + pax_track_stack();
30855 +
30856 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30857
30858 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30859 diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
30860 index a339598..b6a8bfc 100644
30861 --- a/drivers/isdn/hardware/eicon/message.c
30862 +++ b/drivers/isdn/hardware/eicon/message.c
30863 @@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
30864 dword d;
30865 word w;
30866
30867 + pax_track_stack();
30868 +
30869 a = plci->adapter;
30870 Id = ((word)plci->Id<<8)|a->Id;
30871 PUT_WORD(&SS_Ind[4],0x0000);
30872 @@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
30873 word j, n, w;
30874 dword d;
30875
30876 + pax_track_stack();
30877 +
30878
30879 for(i=0;i<8;i++) bp_parms[i].length = 0;
30880 for(i=0;i<2;i++) global_config[i].length = 0;
30881 @@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
30882 const byte llc3[] = {4,3,2,2,6,6,0};
30883 const byte header[] = {0,2,3,3,0,0,0};
30884
30885 + pax_track_stack();
30886 +
30887 for(i=0;i<8;i++) bp_parms[i].length = 0;
30888 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
30889 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
30890 @@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
30891 word appl_number_group_type[MAX_APPL];
30892 PLCI *auxplci;
30893
30894 + pax_track_stack();
30895 +
30896 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
30897
30898 if(!a->group_optimization_enabled)
30899 diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
30900 index a564b75..f3cf8b5 100644
30901 --- a/drivers/isdn/hardware/eicon/mntfunc.c
30902 +++ b/drivers/isdn/hardware/eicon/mntfunc.c
30903 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
30904 IDI_SYNC_REQ req;
30905 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30906
30907 + pax_track_stack();
30908 +
30909 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30910
30911 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30912 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
30913 index a3bd163..8956575 100644
30914 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
30915 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
30916 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
30917 typedef struct _diva_os_idi_adapter_interface {
30918 diva_init_card_proc_t cleanup_adapter_proc;
30919 diva_cmd_card_proc_t cmd_proc;
30920 -} diva_os_idi_adapter_interface_t;
30921 +} __no_const diva_os_idi_adapter_interface_t;
30922
30923 typedef struct _diva_os_xdi_adapter {
30924 struct list_head link;
30925 diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
30926 index 6ed82ad..b05ac05 100644
30927 --- a/drivers/isdn/i4l/isdn_common.c
30928 +++ b/drivers/isdn/i4l/isdn_common.c
30929 @@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
30930 } iocpar;
30931 void __user *argp = (void __user *)arg;
30932
30933 + pax_track_stack();
30934 +
30935 #define name iocpar.name
30936 #define bname iocpar.bname
30937 #define iocts iocpar.iocts
30938 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
30939 index 1f355bb..43f1fea 100644
30940 --- a/drivers/isdn/icn/icn.c
30941 +++ b/drivers/isdn/icn/icn.c
30942 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
30943 if (count > len)
30944 count = len;
30945 if (user) {
30946 - if (copy_from_user(msg, buf, count))
30947 + if (count > sizeof msg || copy_from_user(msg, buf, count))
30948 return -EFAULT;
30949 } else
30950 memcpy(msg, buf, count);
30951 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
30952 index 2535933..09a8e86 100644
30953 --- a/drivers/lguest/core.c
30954 +++ b/drivers/lguest/core.c
30955 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
30956 * it's worked so far. The end address needs +1 because __get_vm_area
30957 * allocates an extra guard page, so we need space for that.
30958 */
30959 +
30960 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30961 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30962 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30963 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30964 +#else
30965 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30966 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30967 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30968 +#endif
30969 +
30970 if (!switcher_vma) {
30971 err = -ENOMEM;
30972 printk("lguest: could not map switcher pages high\n");
30973 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
30974 * Now the Switcher is mapped at the right address, we can't fail!
30975 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
30976 */
30977 - memcpy(switcher_vma->addr, start_switcher_text,
30978 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30979 end_switcher_text - start_switcher_text);
30980
30981 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30982 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
30983 index 65af42f..530c87a 100644
30984 --- a/drivers/lguest/x86/core.c
30985 +++ b/drivers/lguest/x86/core.c
30986 @@ -59,7 +59,7 @@ static struct {
30987 /* Offset from where switcher.S was compiled to where we've copied it */
30988 static unsigned long switcher_offset(void)
30989 {
30990 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30991 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30992 }
30993
30994 /* This cpu's struct lguest_pages. */
30995 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
30996 * These copies are pretty cheap, so we do them unconditionally: */
30997 /* Save the current Host top-level page directory.
30998 */
30999 +
31000 +#ifdef CONFIG_PAX_PER_CPU_PGD
31001 + pages->state.host_cr3 = read_cr3();
31002 +#else
31003 pages->state.host_cr3 = __pa(current->mm->pgd);
31004 +#endif
31005 +
31006 /*
31007 * Set up the Guest's page tables to see this CPU's pages (and no
31008 * other CPU's pages).
31009 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
31010 * compiled-in switcher code and the high-mapped copy we just made.
31011 */
31012 for (i = 0; i < IDT_ENTRIES; i++)
31013 - default_idt_entries[i] += switcher_offset();
31014 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
31015
31016 /*
31017 * Set up the Switcher's per-cpu areas.
31018 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
31019 * it will be undisturbed when we switch. To change %cs and jump we
31020 * need this structure to feed to Intel's "lcall" instruction.
31021 */
31022 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
31023 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
31024 lguest_entry.segment = LGUEST_CS;
31025
31026 /*
31027 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
31028 index 40634b0..4f5855e 100644
31029 --- a/drivers/lguest/x86/switcher_32.S
31030 +++ b/drivers/lguest/x86/switcher_32.S
31031 @@ -87,6 +87,7 @@
31032 #include <asm/page.h>
31033 #include <asm/segment.h>
31034 #include <asm/lguest.h>
31035 +#include <asm/processor-flags.h>
31036
31037 // We mark the start of the code to copy
31038 // It's placed in .text tho it's never run here
31039 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
31040 // Changes type when we load it: damn Intel!
31041 // For after we switch over our page tables
31042 // That entry will be read-only: we'd crash.
31043 +
31044 +#ifdef CONFIG_PAX_KERNEXEC
31045 + mov %cr0, %edx
31046 + xor $X86_CR0_WP, %edx
31047 + mov %edx, %cr0
31048 +#endif
31049 +
31050 movl $(GDT_ENTRY_TSS*8), %edx
31051 ltr %dx
31052
31053 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
31054 // Let's clear it again for our return.
31055 // The GDT descriptor of the Host
31056 // Points to the table after two "size" bytes
31057 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31058 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31059 // Clear "used" from type field (byte 5, bit 2)
31060 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31061 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31062 +
31063 +#ifdef CONFIG_PAX_KERNEXEC
31064 + mov %cr0, %eax
31065 + xor $X86_CR0_WP, %eax
31066 + mov %eax, %cr0
31067 +#endif
31068
31069 // Once our page table's switched, the Guest is live!
31070 // The Host fades as we run this final step.
31071 @@ -295,13 +309,12 @@ deliver_to_host:
31072 // I consulted gcc, and it gave
31073 // These instructions, which I gladly credit:
31074 leal (%edx,%ebx,8), %eax
31075 - movzwl (%eax),%edx
31076 - movl 4(%eax), %eax
31077 - xorw %ax, %ax
31078 - orl %eax, %edx
31079 + movl 4(%eax), %edx
31080 + movw (%eax), %dx
31081 // Now the address of the handler's in %edx
31082 // We call it now: its "iret" drops us home.
31083 - jmp *%edx
31084 + ljmp $__KERNEL_CS, $1f
31085 +1: jmp *%edx
31086
31087 // Every interrupt can come to us here
31088 // But we must truly tell each apart.
31089 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
31090 index 4daf9e5..b8d1d0f 100644
31091 --- a/drivers/macintosh/macio_asic.c
31092 +++ b/drivers/macintosh/macio_asic.c
31093 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
31094 * MacIO is matched against any Apple ID, it's probe() function
31095 * will then decide wether it applies or not
31096 */
31097 -static const struct pci_device_id __devinitdata pci_ids [] = { {
31098 +static const struct pci_device_id __devinitconst pci_ids [] = { {
31099 .vendor = PCI_VENDOR_ID_APPLE,
31100 .device = PCI_ANY_ID,
31101 .subvendor = PCI_ANY_ID,
31102 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
31103 index 2e9a3ca..c2fb229 100644
31104 --- a/drivers/md/dm-ioctl.c
31105 +++ b/drivers/md/dm-ioctl.c
31106 @@ -1578,7 +1578,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
31107 cmd == DM_LIST_VERSIONS_CMD)
31108 return 0;
31109
31110 - if ((cmd == DM_DEV_CREATE_CMD)) {
31111 + if (cmd == DM_DEV_CREATE_CMD) {
31112 if (!*param->name) {
31113 DMWARN("name not supplied when creating device");
31114 return -EINVAL;
31115 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
31116 index 9bfd057..01180bc 100644
31117 --- a/drivers/md/dm-raid1.c
31118 +++ b/drivers/md/dm-raid1.c
31119 @@ -40,7 +40,7 @@ enum dm_raid1_error {
31120
31121 struct mirror {
31122 struct mirror_set *ms;
31123 - atomic_t error_count;
31124 + atomic_unchecked_t error_count;
31125 unsigned long error_type;
31126 struct dm_dev *dev;
31127 sector_t offset;
31128 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
31129 struct mirror *m;
31130
31131 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
31132 - if (!atomic_read(&m->error_count))
31133 + if (!atomic_read_unchecked(&m->error_count))
31134 return m;
31135
31136 return NULL;
31137 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
31138 * simple way to tell if a device has encountered
31139 * errors.
31140 */
31141 - atomic_inc(&m->error_count);
31142 + atomic_inc_unchecked(&m->error_count);
31143
31144 if (test_and_set_bit(error_type, &m->error_type))
31145 return;
31146 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
31147 struct mirror *m = get_default_mirror(ms);
31148
31149 do {
31150 - if (likely(!atomic_read(&m->error_count)))
31151 + if (likely(!atomic_read_unchecked(&m->error_count)))
31152 return m;
31153
31154 if (m-- == ms->mirror)
31155 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
31156 {
31157 struct mirror *default_mirror = get_default_mirror(m->ms);
31158
31159 - return !atomic_read(&default_mirror->error_count);
31160 + return !atomic_read_unchecked(&default_mirror->error_count);
31161 }
31162
31163 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31164 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
31165 */
31166 if (likely(region_in_sync(ms, region, 1)))
31167 m = choose_mirror(ms, bio->bi_sector);
31168 - else if (m && atomic_read(&m->error_count))
31169 + else if (m && atomic_read_unchecked(&m->error_count))
31170 m = NULL;
31171
31172 if (likely(m))
31173 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
31174 }
31175
31176 ms->mirror[mirror].ms = ms;
31177 - atomic_set(&(ms->mirror[mirror].error_count), 0);
31178 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31179 ms->mirror[mirror].error_type = 0;
31180 ms->mirror[mirror].offset = offset;
31181
31182 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
31183 */
31184 static char device_status_char(struct mirror *m)
31185 {
31186 - if (!atomic_read(&(m->error_count)))
31187 + if (!atomic_read_unchecked(&(m->error_count)))
31188 return 'A';
31189
31190 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
31191 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
31192 index 3d80cf0..b77cc47 100644
31193 --- a/drivers/md/dm-stripe.c
31194 +++ b/drivers/md/dm-stripe.c
31195 @@ -20,7 +20,7 @@ struct stripe {
31196 struct dm_dev *dev;
31197 sector_t physical_start;
31198
31199 - atomic_t error_count;
31200 + atomic_unchecked_t error_count;
31201 };
31202
31203 struct stripe_c {
31204 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
31205 kfree(sc);
31206 return r;
31207 }
31208 - atomic_set(&(sc->stripe[i].error_count), 0);
31209 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31210 }
31211
31212 ti->private = sc;
31213 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
31214 DMEMIT("%d ", sc->stripes);
31215 for (i = 0; i < sc->stripes; i++) {
31216 DMEMIT("%s ", sc->stripe[i].dev->name);
31217 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31218 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31219 'D' : 'A';
31220 }
31221 buffer[i] = '\0';
31222 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
31223 */
31224 for (i = 0; i < sc->stripes; i++)
31225 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31226 - atomic_inc(&(sc->stripe[i].error_count));
31227 - if (atomic_read(&(sc->stripe[i].error_count)) <
31228 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
31229 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31230 DM_IO_ERROR_THRESHOLD)
31231 schedule_work(&sc->trigger_event);
31232 }
31233 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
31234 index bc04518..7a83b81 100644
31235 --- a/drivers/md/dm-table.c
31236 +++ b/drivers/md/dm-table.c
31237 @@ -389,7 +389,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
31238 if (!dev_size)
31239 return 0;
31240
31241 - if ((start >= dev_size) || (start + len > dev_size)) {
31242 + if ((start >= dev_size) || (len > dev_size - start)) {
31243 DMWARN("%s: %s too small for target: "
31244 "start=%llu, len=%llu, dev_size=%llu",
31245 dm_device_name(ti->table->md), bdevname(bdev, b),
31246 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
31247 index 52b39f3..83a8b6b 100644
31248 --- a/drivers/md/dm.c
31249 +++ b/drivers/md/dm.c
31250 @@ -165,9 +165,9 @@ struct mapped_device {
31251 /*
31252 * Event handling.
31253 */
31254 - atomic_t event_nr;
31255 + atomic_unchecked_t event_nr;
31256 wait_queue_head_t eventq;
31257 - atomic_t uevent_seq;
31258 + atomic_unchecked_t uevent_seq;
31259 struct list_head uevent_list;
31260 spinlock_t uevent_lock; /* Protect access to uevent_list */
31261
31262 @@ -1843,8 +1843,8 @@ static struct mapped_device *alloc_dev(int minor)
31263 rwlock_init(&md->map_lock);
31264 atomic_set(&md->holders, 1);
31265 atomic_set(&md->open_count, 0);
31266 - atomic_set(&md->event_nr, 0);
31267 - atomic_set(&md->uevent_seq, 0);
31268 + atomic_set_unchecked(&md->event_nr, 0);
31269 + atomic_set_unchecked(&md->uevent_seq, 0);
31270 INIT_LIST_HEAD(&md->uevent_list);
31271 spin_lock_init(&md->uevent_lock);
31272
31273 @@ -1978,7 +1978,7 @@ static void event_callback(void *context)
31274
31275 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31276
31277 - atomic_inc(&md->event_nr);
31278 + atomic_inc_unchecked(&md->event_nr);
31279 wake_up(&md->eventq);
31280 }
31281
31282 @@ -2614,18 +2614,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
31283
31284 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31285 {
31286 - return atomic_add_return(1, &md->uevent_seq);
31287 + return atomic_add_return_unchecked(1, &md->uevent_seq);
31288 }
31289
31290 uint32_t dm_get_event_nr(struct mapped_device *md)
31291 {
31292 - return atomic_read(&md->event_nr);
31293 + return atomic_read_unchecked(&md->event_nr);
31294 }
31295
31296 int dm_wait_event(struct mapped_device *md, int event_nr)
31297 {
31298 return wait_event_interruptible(md->eventq,
31299 - (event_nr != atomic_read(&md->event_nr)));
31300 + (event_nr != atomic_read_unchecked(&md->event_nr)));
31301 }
31302
31303 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31304 diff --git a/drivers/md/md.c b/drivers/md/md.c
31305 index 5c95ccb..217fa57 100644
31306 --- a/drivers/md/md.c
31307 +++ b/drivers/md/md.c
31308 @@ -280,10 +280,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
31309 * start build, activate spare
31310 */
31311 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31312 -static atomic_t md_event_count;
31313 +static atomic_unchecked_t md_event_count;
31314 void md_new_event(mddev_t *mddev)
31315 {
31316 - atomic_inc(&md_event_count);
31317 + atomic_inc_unchecked(&md_event_count);
31318 wake_up(&md_event_waiters);
31319 }
31320 EXPORT_SYMBOL_GPL(md_new_event);
31321 @@ -293,7 +293,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31322 */
31323 static void md_new_event_inintr(mddev_t *mddev)
31324 {
31325 - atomic_inc(&md_event_count);
31326 + atomic_inc_unchecked(&md_event_count);
31327 wake_up(&md_event_waiters);
31328 }
31329
31330 @@ -1531,7 +1531,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
31331
31332 rdev->preferred_minor = 0xffff;
31333 rdev->data_offset = le64_to_cpu(sb->data_offset);
31334 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31335 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31336
31337 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31338 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31339 @@ -1748,7 +1748,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
31340 else
31341 sb->resync_offset = cpu_to_le64(0);
31342
31343 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31344 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31345
31346 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31347 sb->size = cpu_to_le64(mddev->dev_sectors);
31348 @@ -2643,7 +2643,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
31349 static ssize_t
31350 errors_show(mdk_rdev_t *rdev, char *page)
31351 {
31352 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31353 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31354 }
31355
31356 static ssize_t
31357 @@ -2652,7 +2652,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
31358 char *e;
31359 unsigned long n = simple_strtoul(buf, &e, 10);
31360 if (*buf && (*e == 0 || *e == '\n')) {
31361 - atomic_set(&rdev->corrected_errors, n);
31362 + atomic_set_unchecked(&rdev->corrected_errors, n);
31363 return len;
31364 }
31365 return -EINVAL;
31366 @@ -3042,8 +3042,8 @@ int md_rdev_init(mdk_rdev_t *rdev)
31367 rdev->sb_loaded = 0;
31368 rdev->bb_page = NULL;
31369 atomic_set(&rdev->nr_pending, 0);
31370 - atomic_set(&rdev->read_errors, 0);
31371 - atomic_set(&rdev->corrected_errors, 0);
31372 + atomic_set_unchecked(&rdev->read_errors, 0);
31373 + atomic_set_unchecked(&rdev->corrected_errors, 0);
31374
31375 INIT_LIST_HEAD(&rdev->same_set);
31376 init_waitqueue_head(&rdev->blocked_wait);
31377 @@ -6667,7 +6667,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31378
31379 spin_unlock(&pers_lock);
31380 seq_printf(seq, "\n");
31381 - seq->poll_event = atomic_read(&md_event_count);
31382 + seq->poll_event = atomic_read_unchecked(&md_event_count);
31383 return 0;
31384 }
31385 if (v == (void*)2) {
31386 @@ -6756,7 +6756,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31387 chunk_kb ? "KB" : "B");
31388 if (bitmap->file) {
31389 seq_printf(seq, ", file: ");
31390 - seq_path(seq, &bitmap->file->f_path, " \t\n");
31391 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31392 }
31393
31394 seq_printf(seq, "\n");
31395 @@ -6787,7 +6787,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
31396 return error;
31397
31398 seq = file->private_data;
31399 - seq->poll_event = atomic_read(&md_event_count);
31400 + seq->poll_event = atomic_read_unchecked(&md_event_count);
31401 return error;
31402 }
31403
31404 @@ -6801,7 +6801,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
31405 /* always allow read */
31406 mask = POLLIN | POLLRDNORM;
31407
31408 - if (seq->poll_event != atomic_read(&md_event_count))
31409 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
31410 mask |= POLLERR | POLLPRI;
31411 return mask;
31412 }
31413 @@ -6845,7 +6845,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
31414 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31415 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31416 (int)part_stat_read(&disk->part0, sectors[1]) -
31417 - atomic_read(&disk->sync_io);
31418 + atomic_read_unchecked(&disk->sync_io);
31419 /* sync IO will cause sync_io to increase before the disk_stats
31420 * as sync_io is counted when a request starts, and
31421 * disk_stats is counted when it completes.
31422 diff --git a/drivers/md/md.h b/drivers/md/md.h
31423 index 0a309dc..7e01d7f 100644
31424 --- a/drivers/md/md.h
31425 +++ b/drivers/md/md.h
31426 @@ -124,13 +124,13 @@ struct mdk_rdev_s
31427 * only maintained for arrays that
31428 * support hot removal
31429 */
31430 - atomic_t read_errors; /* number of consecutive read errors that
31431 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
31432 * we have tried to ignore.
31433 */
31434 struct timespec last_read_error; /* monotonic time since our
31435 * last read error
31436 */
31437 - atomic_t corrected_errors; /* number of corrected read errors,
31438 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31439 * for reporting to userspace and storing
31440 * in superblock.
31441 */
31442 @@ -415,7 +415,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
31443
31444 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31445 {
31446 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31447 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31448 }
31449
31450 struct mdk_personality
31451 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
31452 index d9587df..83a0dc3 100644
31453 --- a/drivers/md/raid1.c
31454 +++ b/drivers/md/raid1.c
31455 @@ -1541,7 +1541,7 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
31456 if (r1_sync_page_io(rdev, sect, s,
31457 bio->bi_io_vec[idx].bv_page,
31458 READ) != 0)
31459 - atomic_add(s, &rdev->corrected_errors);
31460 + atomic_add_unchecked(s, &rdev->corrected_errors);
31461 }
31462 sectors -= s;
31463 sect += s;
31464 @@ -1754,7 +1754,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
31465 test_bit(In_sync, &rdev->flags)) {
31466 if (r1_sync_page_io(rdev, sect, s,
31467 conf->tmppage, READ)) {
31468 - atomic_add(s, &rdev->corrected_errors);
31469 + atomic_add_unchecked(s, &rdev->corrected_errors);
31470 printk(KERN_INFO
31471 "md/raid1:%s: read error corrected "
31472 "(%d sectors at %llu on %s)\n",
31473 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
31474 index 1d44228..98db57d 100644
31475 --- a/drivers/md/raid10.c
31476 +++ b/drivers/md/raid10.c
31477 @@ -1423,7 +1423,7 @@ static void end_sync_read(struct bio *bio, int error)
31478 /* The write handler will notice the lack of
31479 * R10BIO_Uptodate and record any errors etc
31480 */
31481 - atomic_add(r10_bio->sectors,
31482 + atomic_add_unchecked(r10_bio->sectors,
31483 &conf->mirrors[d].rdev->corrected_errors);
31484
31485 /* for reconstruct, we always reschedule after a read.
31486 @@ -1723,7 +1723,7 @@ static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
31487 {
31488 struct timespec cur_time_mon;
31489 unsigned long hours_since_last;
31490 - unsigned int read_errors = atomic_read(&rdev->read_errors);
31491 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
31492
31493 ktime_get_ts(&cur_time_mon);
31494
31495 @@ -1745,9 +1745,9 @@ static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
31496 * overflowing the shift of read_errors by hours_since_last.
31497 */
31498 if (hours_since_last >= 8 * sizeof(read_errors))
31499 - atomic_set(&rdev->read_errors, 0);
31500 + atomic_set_unchecked(&rdev->read_errors, 0);
31501 else
31502 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
31503 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
31504 }
31505
31506 static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector,
31507 @@ -1797,8 +1797,8 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
31508 return;
31509
31510 check_decay_read_errors(mddev, rdev);
31511 - atomic_inc(&rdev->read_errors);
31512 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
31513 + atomic_inc_unchecked(&rdev->read_errors);
31514 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
31515 char b[BDEVNAME_SIZE];
31516 bdevname(rdev->bdev, b);
31517
31518 @@ -1806,7 +1806,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
31519 "md/raid10:%s: %s: Raid device exceeded "
31520 "read_error threshold [cur %d:max %d]\n",
31521 mdname(mddev), b,
31522 - atomic_read(&rdev->read_errors), max_read_errors);
31523 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
31524 printk(KERN_NOTICE
31525 "md/raid10:%s: %s: Failing raid device\n",
31526 mdname(mddev), b);
31527 @@ -1951,7 +1951,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
31528 (unsigned long long)(
31529 sect + rdev->data_offset),
31530 bdevname(rdev->bdev, b));
31531 - atomic_add(s, &rdev->corrected_errors);
31532 + atomic_add_unchecked(s, &rdev->corrected_errors);
31533 }
31534
31535 rdev_dec_pending(rdev, mddev);
31536 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
31537 index b6200c3..02e8702 100644
31538 --- a/drivers/md/raid5.c
31539 +++ b/drivers/md/raid5.c
31540 @@ -1616,19 +1616,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
31541 (unsigned long long)(sh->sector
31542 + rdev->data_offset),
31543 bdevname(rdev->bdev, b));
31544 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
31545 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
31546 clear_bit(R5_ReadError, &sh->dev[i].flags);
31547 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31548 }
31549 - if (atomic_read(&conf->disks[i].rdev->read_errors))
31550 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
31551 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31552 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31553 } else {
31554 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31555 int retry = 0;
31556 rdev = conf->disks[i].rdev;
31557
31558 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31559 - atomic_inc(&rdev->read_errors);
31560 + atomic_inc_unchecked(&rdev->read_errors);
31561 if (conf->mddev->degraded >= conf->max_degraded)
31562 printk_ratelimited(
31563 KERN_WARNING
31564 @@ -1648,7 +1648,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
31565 (unsigned long long)(sh->sector
31566 + rdev->data_offset),
31567 bdn);
31568 - else if (atomic_read(&rdev->read_errors)
31569 + else if (atomic_read_unchecked(&rdev->read_errors)
31570 > conf->max_nr_stripes)
31571 printk(KERN_WARNING
31572 "md/raid:%s: Too many read errors, failing device %s.\n",
31573 @@ -1978,6 +1978,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
31574 sector_t r_sector;
31575 struct stripe_head sh2;
31576
31577 + pax_track_stack();
31578
31579 chunk_offset = sector_div(new_sector, sectors_per_chunk);
31580 stripe = new_sector;
31581 diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
31582 index 1d1d8d2..6c6837a 100644
31583 --- a/drivers/media/common/saa7146_hlp.c
31584 +++ b/drivers/media/common/saa7146_hlp.c
31585 @@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
31586
31587 int x[32], y[32], w[32], h[32];
31588
31589 + pax_track_stack();
31590 +
31591 /* clear out memory */
31592 memset(&line_list[0], 0x00, sizeof(u32)*32);
31593 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
31594 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
31595 index 573d540..16f78f3 100644
31596 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
31597 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
31598 @@ -1675,7 +1675,7 @@ static struct ddb_info ddb_v6 = {
31599 .subvendor = _subvend, .subdevice = _subdev, \
31600 .driver_data = (unsigned long)&_driverdata }
31601
31602 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
31603 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
31604 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
31605 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
31606 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
31607 diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
31608 index 7ea517b..252fe54 100644
31609 --- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
31610 +++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
31611 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
31612 u8 buf[HOST_LINK_BUF_SIZE];
31613 int i;
31614
31615 + pax_track_stack();
31616 +
31617 dprintk("%s\n", __func__);
31618
31619 /* check if we have space for a link buf in the rx_buffer */
31620 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
31621 unsigned long timeout;
31622 int written;
31623
31624 + pax_track_stack();
31625 +
31626 dprintk("%s\n", __func__);
31627
31628 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
31629 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
31630 index a7d876f..8c21b61 100644
31631 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
31632 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
31633 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
31634 union {
31635 dmx_ts_cb ts;
31636 dmx_section_cb sec;
31637 - } cb;
31638 + } __no_const cb;
31639
31640 struct dvb_demux *demux;
31641 void *priv;
31642 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
31643 index f732877..d38c35a 100644
31644 --- a/drivers/media/dvb/dvb-core/dvbdev.c
31645 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
31646 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
31647 const struct dvb_device *template, void *priv, int type)
31648 {
31649 struct dvb_device *dvbdev;
31650 - struct file_operations *dvbdevfops;
31651 + file_operations_no_const *dvbdevfops;
31652 struct device *clsdev;
31653 int minor;
31654 int id;
31655 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
31656 index acb5fb2..2413f1d 100644
31657 --- a/drivers/media/dvb/dvb-usb/cxusb.c
31658 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
31659 @@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
31660 struct dib0700_adapter_state {
31661 int (*set_param_save) (struct dvb_frontend *,
31662 struct dvb_frontend_parameters *);
31663 -};
31664 +} __no_const;
31665
31666 static int dib7070_set_param_override(struct dvb_frontend *fe,
31667 struct dvb_frontend_parameters *fep)
31668 diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
31669 index a224e94..503b76a 100644
31670 --- a/drivers/media/dvb/dvb-usb/dib0700_core.c
31671 +++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
31672 @@ -478,6 +478,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
31673 if (!buf)
31674 return -ENOMEM;
31675
31676 + pax_track_stack();
31677 +
31678 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
31679 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
31680 hx.addr, hx.len, hx.chk);
31681 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
31682 index 058b231..183d2b3 100644
31683 --- a/drivers/media/dvb/dvb-usb/dw2102.c
31684 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
31685 @@ -95,7 +95,7 @@ struct su3000_state {
31686
31687 struct s6x0_state {
31688 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
31689 -};
31690 +} __no_const;
31691
31692 /* debug */
31693 static int dvb_usb_dw2102_debug;
31694 diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
31695 index 37b1469..28a6f6f 100644
31696 --- a/drivers/media/dvb/dvb-usb/lmedm04.c
31697 +++ b/drivers/media/dvb/dvb-usb/lmedm04.c
31698 @@ -742,6 +742,7 @@ static int lme2510_download_firmware(struct usb_device *dev,
31699 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
31700 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
31701
31702 + pax_track_stack();
31703
31704 data[0] = 0x8a;
31705 len_in = 1;
31706 @@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_device *dev)
31707 int ret = 0, len_in;
31708 u8 data[512] = {0};
31709
31710 + pax_track_stack();
31711 +
31712 data[0] = 0x0a;
31713 len_in = 1;
31714 info("FRM Firmware Cold Reset");
31715 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
31716 index ba91735..4261d84 100644
31717 --- a/drivers/media/dvb/frontends/dib3000.h
31718 +++ b/drivers/media/dvb/frontends/dib3000.h
31719 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
31720 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
31721 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
31722 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
31723 -};
31724 +} __no_const;
31725
31726 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
31727 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
31728 diff --git a/drivers/media/dvb/frontends/mb86a16.c b/drivers/media/dvb/frontends/mb86a16.c
31729 index c283112..7f367a7 100644
31730 --- a/drivers/media/dvb/frontends/mb86a16.c
31731 +++ b/drivers/media/dvb/frontends/mb86a16.c
31732 @@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16_state *state)
31733 int ret = -1;
31734 int sync;
31735
31736 + pax_track_stack();
31737 +
31738 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
31739
31740 fcp = 3000;
31741 diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
31742 index c709ce6..b3fe620 100644
31743 --- a/drivers/media/dvb/frontends/or51211.c
31744 +++ b/drivers/media/dvb/frontends/or51211.c
31745 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
31746 u8 tudata[585];
31747 int i;
31748
31749 + pax_track_stack();
31750 +
31751 dprintk("Firmware is %zd bytes\n",fw->size);
31752
31753 /* Get eprom data */
31754 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
31755 index 0564192..75b16f5 100644
31756 --- a/drivers/media/dvb/ngene/ngene-cards.c
31757 +++ b/drivers/media/dvb/ngene/ngene-cards.c
31758 @@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
31759
31760 /****************************************************************************/
31761
31762 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
31763 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
31764 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
31765 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
31766 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
31767 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
31768 index 16a089f..ab1667d 100644
31769 --- a/drivers/media/radio/radio-cadet.c
31770 +++ b/drivers/media/radio/radio-cadet.c
31771 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
31772 unsigned char readbuf[RDS_BUFFER];
31773 int i = 0;
31774
31775 + if (count > RDS_BUFFER)
31776 + return -EFAULT;
31777 mutex_lock(&dev->lock);
31778 if (dev->rdsstat == 0) {
31779 dev->rdsstat = 1;
31780 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
31781 index 9cde353..8c6a1c3 100644
31782 --- a/drivers/media/video/au0828/au0828.h
31783 +++ b/drivers/media/video/au0828/au0828.h
31784 @@ -191,7 +191,7 @@ struct au0828_dev {
31785
31786 /* I2C */
31787 struct i2c_adapter i2c_adap;
31788 - struct i2c_algorithm i2c_algo;
31789 + i2c_algorithm_no_const i2c_algo;
31790 struct i2c_client i2c_client;
31791 u32 i2c_rc;
31792
31793 diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
31794 index 9e2f870..22e3a08 100644
31795 --- a/drivers/media/video/cx18/cx18-driver.c
31796 +++ b/drivers/media/video/cx18/cx18-driver.c
31797 @@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
31798 struct i2c_client c;
31799 u8 eedata[256];
31800
31801 + pax_track_stack();
31802 +
31803 memset(&c, 0, sizeof(c));
31804 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
31805 c.adapter = &cx->i2c_adap[0];
31806 diff --git a/drivers/media/video/cx23885/cx23885-input.c b/drivers/media/video/cx23885/cx23885-input.c
31807 index ce765e3..f9e1b04 100644
31808 --- a/drivers/media/video/cx23885/cx23885-input.c
31809 +++ b/drivers/media/video/cx23885/cx23885-input.c
31810 @@ -53,6 +53,8 @@ static void cx23885_input_process_measurements(struct cx23885_dev *dev,
31811 bool handle = false;
31812 struct ir_raw_event ir_core_event[64];
31813
31814 + pax_track_stack();
31815 +
31816 do {
31817 num = 0;
31818 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
31819 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
31820 index 68d1240..46b32eb 100644
31821 --- a/drivers/media/video/cx88/cx88-alsa.c
31822 +++ b/drivers/media/video/cx88/cx88-alsa.c
31823 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
31824 * Only boards with eeprom and byte 1 at eeprom=1 have it
31825 */
31826
31827 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
31828 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
31829 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31830 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31831 {0, }
31832 diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
31833 index 9515f3a..c9ecb85 100644
31834 --- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
31835 +++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
31836 @@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
31837 u8 *eeprom;
31838 struct tveeprom tvdata;
31839
31840 + pax_track_stack();
31841 +
31842 memset(&tvdata,0,sizeof(tvdata));
31843
31844 eeprom = pvr2_eeprom_fetch(hdw);
31845 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31846 index 305e6aa..0143317 100644
31847 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31848 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31849 @@ -196,7 +196,7 @@ struct pvr2_hdw {
31850
31851 /* I2C stuff */
31852 struct i2c_adapter i2c_adap;
31853 - struct i2c_algorithm i2c_algo;
31854 + i2c_algorithm_no_const i2c_algo;
31855 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
31856 int i2c_cx25840_hack_state;
31857 int i2c_linked;
31858 diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
31859 index f9f29cc..5a2e330 100644
31860 --- a/drivers/media/video/saa7134/saa6752hs.c
31861 +++ b/drivers/media/video/saa7134/saa6752hs.c
31862 @@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
31863 unsigned char localPAT[256];
31864 unsigned char localPMT[256];
31865
31866 + pax_track_stack();
31867 +
31868 /* Set video format - must be done first as it resets other settings */
31869 set_reg8(client, 0x41, h->video_format);
31870
31871 diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
31872 index 62fac7f..f29e0b9 100644
31873 --- a/drivers/media/video/saa7164/saa7164-cmd.c
31874 +++ b/drivers/media/video/saa7164/saa7164-cmd.c
31875 @@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
31876 u8 tmp[512];
31877 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31878
31879 + pax_track_stack();
31880 +
31881 /* While any outstand message on the bus exists... */
31882 do {
31883
31884 @@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
31885 u8 tmp[512];
31886 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31887
31888 + pax_track_stack();
31889 +
31890 while (loop) {
31891
31892 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
31893 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
31894 index 84cd1b6..f741e07 100644
31895 --- a/drivers/media/video/timblogiw.c
31896 +++ b/drivers/media/video/timblogiw.c
31897 @@ -744,7 +744,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
31898
31899 /* Platform device functions */
31900
31901 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31902 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
31903 .vidioc_querycap = timblogiw_querycap,
31904 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
31905 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
31906 @@ -766,7 +766,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31907 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
31908 };
31909
31910 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
31911 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
31912 .owner = THIS_MODULE,
31913 .open = timblogiw_open,
31914 .release = timblogiw_close,
31915 diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
31916 index f344411..6ae9974 100644
31917 --- a/drivers/media/video/usbvision/usbvision-core.c
31918 +++ b/drivers/media/video/usbvision/usbvision-core.c
31919 @@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_compress(struct usb_usbvision *usbvision
31920 unsigned char rv, gv, bv;
31921 static unsigned char *Y, *U, *V;
31922
31923 + pax_track_stack();
31924 +
31925 frame = usbvision->cur_frame;
31926 image_size = frame->frmwidth * frame->frmheight;
31927 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
31928 diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
31929 index f300dea..04834ba 100644
31930 --- a/drivers/media/video/videobuf-dma-sg.c
31931 +++ b/drivers/media/video/videobuf-dma-sg.c
31932 @@ -607,6 +607,8 @@ void *videobuf_sg_alloc(size_t size)
31933 {
31934 struct videobuf_queue q;
31935
31936 + pax_track_stack();
31937 +
31938 /* Required to make generic handler to call __videobuf_alloc */
31939 q.int_ops = &sg_ops;
31940
31941 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
31942 index 7956a10..f39232f 100644
31943 --- a/drivers/message/fusion/mptbase.c
31944 +++ b/drivers/message/fusion/mptbase.c
31945 @@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
31946 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31947 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31948
31949 +#ifdef CONFIG_GRKERNSEC_HIDESYM
31950 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
31951 +#else
31952 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31953 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31954 +#endif
31955 +
31956 /*
31957 * Rounding UP to nearest 4-kB boundary here...
31958 */
31959 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
31960 index 7596aec..f7ae9aa 100644
31961 --- a/drivers/message/fusion/mptsas.c
31962 +++ b/drivers/message/fusion/mptsas.c
31963 @@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
31964 return 0;
31965 }
31966
31967 +static inline void
31968 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31969 +{
31970 + if (phy_info->port_details) {
31971 + phy_info->port_details->rphy = rphy;
31972 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31973 + ioc->name, rphy));
31974 + }
31975 +
31976 + if (rphy) {
31977 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31978 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31979 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31980 + ioc->name, rphy, rphy->dev.release));
31981 + }
31982 +}
31983 +
31984 /* no mutex */
31985 static void
31986 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31987 @@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
31988 return NULL;
31989 }
31990
31991 -static inline void
31992 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31993 -{
31994 - if (phy_info->port_details) {
31995 - phy_info->port_details->rphy = rphy;
31996 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31997 - ioc->name, rphy));
31998 - }
31999 -
32000 - if (rphy) {
32001 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
32002 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
32003 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
32004 - ioc->name, rphy, rphy->dev.release));
32005 - }
32006 -}
32007 -
32008 static inline struct sas_port *
32009 mptsas_get_port(struct mptsas_phyinfo *phy_info)
32010 {
32011 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
32012 index ce61a57..3da8862 100644
32013 --- a/drivers/message/fusion/mptscsih.c
32014 +++ b/drivers/message/fusion/mptscsih.c
32015 @@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
32016
32017 h = shost_priv(SChost);
32018
32019 - if (h) {
32020 - if (h->info_kbuf == NULL)
32021 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32022 - return h->info_kbuf;
32023 - h->info_kbuf[0] = '\0';
32024 + if (!h)
32025 + return NULL;
32026
32027 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32028 - h->info_kbuf[size-1] = '\0';
32029 - }
32030 + if (h->info_kbuf == NULL)
32031 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
32032 + return h->info_kbuf;
32033 + h->info_kbuf[0] = '\0';
32034 +
32035 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
32036 + h->info_kbuf[size-1] = '\0';
32037
32038 return h->info_kbuf;
32039 }
32040 diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
32041 index 098de2b..fbb922c 100644
32042 --- a/drivers/message/i2o/i2o_config.c
32043 +++ b/drivers/message/i2o/i2o_config.c
32044 @@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned long arg)
32045 struct i2o_message *msg;
32046 unsigned int iop;
32047
32048 + pax_track_stack();
32049 +
32050 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
32051 return -EFAULT;
32052
32053 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
32054 index 07dbeaf..5533142 100644
32055 --- a/drivers/message/i2o/i2o_proc.c
32056 +++ b/drivers/message/i2o/i2o_proc.c
32057 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
32058 "Array Controller Device"
32059 };
32060
32061 -static char *chtostr(u8 * chars, int n)
32062 -{
32063 - char tmp[256];
32064 - tmp[0] = 0;
32065 - return strncat(tmp, (char *)chars, n);
32066 -}
32067 -
32068 static int i2o_report_query_status(struct seq_file *seq, int block_status,
32069 char *group)
32070 {
32071 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
32072
32073 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
32074 seq_printf(seq, "%-#8x", ddm_table.module_id);
32075 - seq_printf(seq, "%-29s",
32076 - chtostr(ddm_table.module_name_version, 28));
32077 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
32078 seq_printf(seq, "%9d ", ddm_table.data_size);
32079 seq_printf(seq, "%8d", ddm_table.code_size);
32080
32081 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
32082
32083 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
32084 seq_printf(seq, "%-#8x", dst->module_id);
32085 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
32086 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
32087 + seq_printf(seq, "%-.28s", dst->module_name_version);
32088 + seq_printf(seq, "%-.8s", dst->date);
32089 seq_printf(seq, "%8d ", dst->module_size);
32090 seq_printf(seq, "%8d ", dst->mpb_size);
32091 seq_printf(seq, "0x%04x", dst->module_flags);
32092 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
32093 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
32094 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
32095 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
32096 - seq_printf(seq, "Vendor info : %s\n",
32097 - chtostr((u8 *) (work32 + 2), 16));
32098 - seq_printf(seq, "Product info : %s\n",
32099 - chtostr((u8 *) (work32 + 6), 16));
32100 - seq_printf(seq, "Description : %s\n",
32101 - chtostr((u8 *) (work32 + 10), 16));
32102 - seq_printf(seq, "Product rev. : %s\n",
32103 - chtostr((u8 *) (work32 + 14), 8));
32104 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
32105 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
32106 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
32107 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
32108
32109 seq_printf(seq, "Serial number : ");
32110 print_serial_number(seq, (u8 *) (work32 + 16),
32111 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
32112 }
32113
32114 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
32115 - seq_printf(seq, "Module name : %s\n",
32116 - chtostr(result.module_name, 24));
32117 - seq_printf(seq, "Module revision : %s\n",
32118 - chtostr(result.module_rev, 8));
32119 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
32120 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
32121
32122 seq_printf(seq, "Serial number : ");
32123 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
32124 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
32125 return 0;
32126 }
32127
32128 - seq_printf(seq, "Device name : %s\n",
32129 - chtostr(result.device_name, 64));
32130 - seq_printf(seq, "Service name : %s\n",
32131 - chtostr(result.service_name, 64));
32132 - seq_printf(seq, "Physical name : %s\n",
32133 - chtostr(result.physical_location, 64));
32134 - seq_printf(seq, "Instance number : %s\n",
32135 - chtostr(result.instance_number, 4));
32136 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
32137 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
32138 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
32139 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
32140
32141 return 0;
32142 }
32143 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
32144 index a8c08f3..155fe3d 100644
32145 --- a/drivers/message/i2o/iop.c
32146 +++ b/drivers/message/i2o/iop.c
32147 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
32148
32149 spin_lock_irqsave(&c->context_list_lock, flags);
32150
32151 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
32152 - atomic_inc(&c->context_list_counter);
32153 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
32154 + atomic_inc_unchecked(&c->context_list_counter);
32155
32156 - entry->context = atomic_read(&c->context_list_counter);
32157 + entry->context = atomic_read_unchecked(&c->context_list_counter);
32158
32159 list_add(&entry->list, &c->context_list);
32160
32161 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
32162
32163 #if BITS_PER_LONG == 64
32164 spin_lock_init(&c->context_list_lock);
32165 - atomic_set(&c->context_list_counter, 0);
32166 + atomic_set_unchecked(&c->context_list_counter, 0);
32167 INIT_LIST_HEAD(&c->context_list);
32168 #endif
32169
32170 diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
32171 index a20e1c4..4f57255 100644
32172 --- a/drivers/mfd/ab3100-core.c
32173 +++ b/drivers/mfd/ab3100-core.c
32174 @@ -809,7 +809,7 @@ struct ab_family_id {
32175 char *name;
32176 };
32177
32178 -static const struct ab_family_id ids[] __devinitdata = {
32179 +static const struct ab_family_id ids[] __devinitconst = {
32180 /* AB3100 */
32181 {
32182 .id = 0xc0,
32183 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
32184 index f12720d..3c251fd 100644
32185 --- a/drivers/mfd/abx500-core.c
32186 +++ b/drivers/mfd/abx500-core.c
32187 @@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
32188
32189 struct abx500_device_entry {
32190 struct list_head list;
32191 - struct abx500_ops ops;
32192 + abx500_ops_no_const ops;
32193 struct device *dev;
32194 };
32195
32196 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
32197 index 5c2a06a..8fa077c 100644
32198 --- a/drivers/mfd/janz-cmodio.c
32199 +++ b/drivers/mfd/janz-cmodio.c
32200 @@ -13,6 +13,7 @@
32201
32202 #include <linux/kernel.h>
32203 #include <linux/module.h>
32204 +#include <linux/slab.h>
32205 #include <linux/init.h>
32206 #include <linux/pci.h>
32207 #include <linux/interrupt.h>
32208 diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
32209 index 5fe5de1..af64f53 100644
32210 --- a/drivers/mfd/wm8350-i2c.c
32211 +++ b/drivers/mfd/wm8350-i2c.c
32212 @@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
32213 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
32214 int ret;
32215
32216 + pax_track_stack();
32217 +
32218 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
32219 return -EINVAL;
32220
32221 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
32222 index 8b51cd6..f628f8d 100644
32223 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
32224 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
32225 @@ -437,7 +437,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
32226 * the lid is closed. This leads to interrupts as soon as a little move
32227 * is done.
32228 */
32229 - atomic_inc(&lis3_dev.count);
32230 + atomic_inc_unchecked(&lis3_dev.count);
32231
32232 wake_up_interruptible(&lis3_dev.misc_wait);
32233 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
32234 @@ -520,7 +520,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
32235 if (lis3_dev.pm_dev)
32236 pm_runtime_get_sync(lis3_dev.pm_dev);
32237
32238 - atomic_set(&lis3_dev.count, 0);
32239 + atomic_set_unchecked(&lis3_dev.count, 0);
32240 return 0;
32241 }
32242
32243 @@ -547,7 +547,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
32244 add_wait_queue(&lis3_dev.misc_wait, &wait);
32245 while (true) {
32246 set_current_state(TASK_INTERRUPTIBLE);
32247 - data = atomic_xchg(&lis3_dev.count, 0);
32248 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
32249 if (data)
32250 break;
32251
32252 @@ -585,7 +585,7 @@ out:
32253 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
32254 {
32255 poll_wait(file, &lis3_dev.misc_wait, wait);
32256 - if (atomic_read(&lis3_dev.count))
32257 + if (atomic_read_unchecked(&lis3_dev.count))
32258 return POLLIN | POLLRDNORM;
32259 return 0;
32260 }
32261 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
32262 index a193958..4d7ecd2 100644
32263 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
32264 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
32265 @@ -265,7 +265,7 @@ struct lis3lv02d {
32266 struct input_polled_dev *idev; /* input device */
32267 struct platform_device *pdev; /* platform device */
32268 struct regulator_bulk_data regulators[2];
32269 - atomic_t count; /* interrupt count after last read */
32270 + atomic_unchecked_t count; /* interrupt count after last read */
32271 union axis_conversion ac; /* hw -> logical axis */
32272 int mapped_btns[3];
32273
32274 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
32275 index 2f30bad..c4c13d0 100644
32276 --- a/drivers/misc/sgi-gru/gruhandles.c
32277 +++ b/drivers/misc/sgi-gru/gruhandles.c
32278 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32279 unsigned long nsec;
32280
32281 nsec = CLKS2NSEC(clks);
32282 - atomic_long_inc(&mcs_op_statistics[op].count);
32283 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
32284 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32285 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
32286 if (mcs_op_statistics[op].max < nsec)
32287 mcs_op_statistics[op].max = nsec;
32288 }
32289 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
32290 index 7768b87..f8aac38 100644
32291 --- a/drivers/misc/sgi-gru/gruprocfs.c
32292 +++ b/drivers/misc/sgi-gru/gruprocfs.c
32293 @@ -32,9 +32,9 @@
32294
32295 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32296
32297 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32298 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32299 {
32300 - unsigned long val = atomic_long_read(v);
32301 + unsigned long val = atomic_long_read_unchecked(v);
32302
32303 seq_printf(s, "%16lu %s\n", val, id);
32304 }
32305 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
32306
32307 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
32308 for (op = 0; op < mcsop_last; op++) {
32309 - count = atomic_long_read(&mcs_op_statistics[op].count);
32310 - total = atomic_long_read(&mcs_op_statistics[op].total);
32311 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32312 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32313 max = mcs_op_statistics[op].max;
32314 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32315 count ? total / count : 0, max);
32316 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
32317 index 5c3ce24..4915ccb 100644
32318 --- a/drivers/misc/sgi-gru/grutables.h
32319 +++ b/drivers/misc/sgi-gru/grutables.h
32320 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
32321 * GRU statistics.
32322 */
32323 struct gru_stats_s {
32324 - atomic_long_t vdata_alloc;
32325 - atomic_long_t vdata_free;
32326 - atomic_long_t gts_alloc;
32327 - atomic_long_t gts_free;
32328 - atomic_long_t gms_alloc;
32329 - atomic_long_t gms_free;
32330 - atomic_long_t gts_double_allocate;
32331 - atomic_long_t assign_context;
32332 - atomic_long_t assign_context_failed;
32333 - atomic_long_t free_context;
32334 - atomic_long_t load_user_context;
32335 - atomic_long_t load_kernel_context;
32336 - atomic_long_t lock_kernel_context;
32337 - atomic_long_t unlock_kernel_context;
32338 - atomic_long_t steal_user_context;
32339 - atomic_long_t steal_kernel_context;
32340 - atomic_long_t steal_context_failed;
32341 - atomic_long_t nopfn;
32342 - atomic_long_t asid_new;
32343 - atomic_long_t asid_next;
32344 - atomic_long_t asid_wrap;
32345 - atomic_long_t asid_reuse;
32346 - atomic_long_t intr;
32347 - atomic_long_t intr_cbr;
32348 - atomic_long_t intr_tfh;
32349 - atomic_long_t intr_spurious;
32350 - atomic_long_t intr_mm_lock_failed;
32351 - atomic_long_t call_os;
32352 - atomic_long_t call_os_wait_queue;
32353 - atomic_long_t user_flush_tlb;
32354 - atomic_long_t user_unload_context;
32355 - atomic_long_t user_exception;
32356 - atomic_long_t set_context_option;
32357 - atomic_long_t check_context_retarget_intr;
32358 - atomic_long_t check_context_unload;
32359 - atomic_long_t tlb_dropin;
32360 - atomic_long_t tlb_preload_page;
32361 - atomic_long_t tlb_dropin_fail_no_asid;
32362 - atomic_long_t tlb_dropin_fail_upm;
32363 - atomic_long_t tlb_dropin_fail_invalid;
32364 - atomic_long_t tlb_dropin_fail_range_active;
32365 - atomic_long_t tlb_dropin_fail_idle;
32366 - atomic_long_t tlb_dropin_fail_fmm;
32367 - atomic_long_t tlb_dropin_fail_no_exception;
32368 - atomic_long_t tfh_stale_on_fault;
32369 - atomic_long_t mmu_invalidate_range;
32370 - atomic_long_t mmu_invalidate_page;
32371 - atomic_long_t flush_tlb;
32372 - atomic_long_t flush_tlb_gru;
32373 - atomic_long_t flush_tlb_gru_tgh;
32374 - atomic_long_t flush_tlb_gru_zero_asid;
32375 -
32376 - atomic_long_t copy_gpa;
32377 - atomic_long_t read_gpa;
32378 -
32379 - atomic_long_t mesq_receive;
32380 - atomic_long_t mesq_receive_none;
32381 - atomic_long_t mesq_send;
32382 - atomic_long_t mesq_send_failed;
32383 - atomic_long_t mesq_noop;
32384 - atomic_long_t mesq_send_unexpected_error;
32385 - atomic_long_t mesq_send_lb_overflow;
32386 - atomic_long_t mesq_send_qlimit_reached;
32387 - atomic_long_t mesq_send_amo_nacked;
32388 - atomic_long_t mesq_send_put_nacked;
32389 - atomic_long_t mesq_page_overflow;
32390 - atomic_long_t mesq_qf_locked;
32391 - atomic_long_t mesq_qf_noop_not_full;
32392 - atomic_long_t mesq_qf_switch_head_failed;
32393 - atomic_long_t mesq_qf_unexpected_error;
32394 - atomic_long_t mesq_noop_unexpected_error;
32395 - atomic_long_t mesq_noop_lb_overflow;
32396 - atomic_long_t mesq_noop_qlimit_reached;
32397 - atomic_long_t mesq_noop_amo_nacked;
32398 - atomic_long_t mesq_noop_put_nacked;
32399 - atomic_long_t mesq_noop_page_overflow;
32400 + atomic_long_unchecked_t vdata_alloc;
32401 + atomic_long_unchecked_t vdata_free;
32402 + atomic_long_unchecked_t gts_alloc;
32403 + atomic_long_unchecked_t gts_free;
32404 + atomic_long_unchecked_t gms_alloc;
32405 + atomic_long_unchecked_t gms_free;
32406 + atomic_long_unchecked_t gts_double_allocate;
32407 + atomic_long_unchecked_t assign_context;
32408 + atomic_long_unchecked_t assign_context_failed;
32409 + atomic_long_unchecked_t free_context;
32410 + atomic_long_unchecked_t load_user_context;
32411 + atomic_long_unchecked_t load_kernel_context;
32412 + atomic_long_unchecked_t lock_kernel_context;
32413 + atomic_long_unchecked_t unlock_kernel_context;
32414 + atomic_long_unchecked_t steal_user_context;
32415 + atomic_long_unchecked_t steal_kernel_context;
32416 + atomic_long_unchecked_t steal_context_failed;
32417 + atomic_long_unchecked_t nopfn;
32418 + atomic_long_unchecked_t asid_new;
32419 + atomic_long_unchecked_t asid_next;
32420 + atomic_long_unchecked_t asid_wrap;
32421 + atomic_long_unchecked_t asid_reuse;
32422 + atomic_long_unchecked_t intr;
32423 + atomic_long_unchecked_t intr_cbr;
32424 + atomic_long_unchecked_t intr_tfh;
32425 + atomic_long_unchecked_t intr_spurious;
32426 + atomic_long_unchecked_t intr_mm_lock_failed;
32427 + atomic_long_unchecked_t call_os;
32428 + atomic_long_unchecked_t call_os_wait_queue;
32429 + atomic_long_unchecked_t user_flush_tlb;
32430 + atomic_long_unchecked_t user_unload_context;
32431 + atomic_long_unchecked_t user_exception;
32432 + atomic_long_unchecked_t set_context_option;
32433 + atomic_long_unchecked_t check_context_retarget_intr;
32434 + atomic_long_unchecked_t check_context_unload;
32435 + atomic_long_unchecked_t tlb_dropin;
32436 + atomic_long_unchecked_t tlb_preload_page;
32437 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32438 + atomic_long_unchecked_t tlb_dropin_fail_upm;
32439 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
32440 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
32441 + atomic_long_unchecked_t tlb_dropin_fail_idle;
32442 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
32443 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32444 + atomic_long_unchecked_t tfh_stale_on_fault;
32445 + atomic_long_unchecked_t mmu_invalidate_range;
32446 + atomic_long_unchecked_t mmu_invalidate_page;
32447 + atomic_long_unchecked_t flush_tlb;
32448 + atomic_long_unchecked_t flush_tlb_gru;
32449 + atomic_long_unchecked_t flush_tlb_gru_tgh;
32450 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32451 +
32452 + atomic_long_unchecked_t copy_gpa;
32453 + atomic_long_unchecked_t read_gpa;
32454 +
32455 + atomic_long_unchecked_t mesq_receive;
32456 + atomic_long_unchecked_t mesq_receive_none;
32457 + atomic_long_unchecked_t mesq_send;
32458 + atomic_long_unchecked_t mesq_send_failed;
32459 + atomic_long_unchecked_t mesq_noop;
32460 + atomic_long_unchecked_t mesq_send_unexpected_error;
32461 + atomic_long_unchecked_t mesq_send_lb_overflow;
32462 + atomic_long_unchecked_t mesq_send_qlimit_reached;
32463 + atomic_long_unchecked_t mesq_send_amo_nacked;
32464 + atomic_long_unchecked_t mesq_send_put_nacked;
32465 + atomic_long_unchecked_t mesq_page_overflow;
32466 + atomic_long_unchecked_t mesq_qf_locked;
32467 + atomic_long_unchecked_t mesq_qf_noop_not_full;
32468 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
32469 + atomic_long_unchecked_t mesq_qf_unexpected_error;
32470 + atomic_long_unchecked_t mesq_noop_unexpected_error;
32471 + atomic_long_unchecked_t mesq_noop_lb_overflow;
32472 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
32473 + atomic_long_unchecked_t mesq_noop_amo_nacked;
32474 + atomic_long_unchecked_t mesq_noop_put_nacked;
32475 + atomic_long_unchecked_t mesq_noop_page_overflow;
32476
32477 };
32478
32479 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
32480 tghop_invalidate, mcsop_last};
32481
32482 struct mcs_op_statistic {
32483 - atomic_long_t count;
32484 - atomic_long_t total;
32485 + atomic_long_unchecked_t count;
32486 + atomic_long_unchecked_t total;
32487 unsigned long max;
32488 };
32489
32490 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
32491
32492 #define STAT(id) do { \
32493 if (gru_options & OPT_STATS) \
32494 - atomic_long_inc(&gru_stats.id); \
32495 + atomic_long_inc_unchecked(&gru_stats.id); \
32496 } while (0)
32497
32498 #ifdef CONFIG_SGI_GRU_DEBUG
32499 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
32500 index 851b2f2..a4ec097 100644
32501 --- a/drivers/misc/sgi-xp/xp.h
32502 +++ b/drivers/misc/sgi-xp/xp.h
32503 @@ -289,7 +289,7 @@ struct xpc_interface {
32504 xpc_notify_func, void *);
32505 void (*received) (short, int, void *);
32506 enum xp_retval (*partid_to_nasids) (short, void *);
32507 -};
32508 +} __no_const;
32509
32510 extern struct xpc_interface xpc_interface;
32511
32512 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
32513 index b94d5f7..7f494c5 100644
32514 --- a/drivers/misc/sgi-xp/xpc.h
32515 +++ b/drivers/misc/sgi-xp/xpc.h
32516 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
32517 void (*received_payload) (struct xpc_channel *, void *);
32518 void (*notify_senders_of_disconnect) (struct xpc_channel *);
32519 };
32520 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
32521
32522 /* struct xpc_partition act_state values (for XPC HB) */
32523
32524 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
32525 /* found in xpc_main.c */
32526 extern struct device *xpc_part;
32527 extern struct device *xpc_chan;
32528 -extern struct xpc_arch_operations xpc_arch_ops;
32529 +extern xpc_arch_operations_no_const xpc_arch_ops;
32530 extern int xpc_disengage_timelimit;
32531 extern int xpc_disengage_timedout;
32532 extern int xpc_activate_IRQ_rcvd;
32533 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
32534 index 8d082b4..aa749ae 100644
32535 --- a/drivers/misc/sgi-xp/xpc_main.c
32536 +++ b/drivers/misc/sgi-xp/xpc_main.c
32537 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
32538 .notifier_call = xpc_system_die,
32539 };
32540
32541 -struct xpc_arch_operations xpc_arch_ops;
32542 +xpc_arch_operations_no_const xpc_arch_ops;
32543
32544 /*
32545 * Timer function to enforce the timelimit on the partition disengage.
32546 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
32547 index 26c5286..292d261 100644
32548 --- a/drivers/mmc/host/sdhci-pci.c
32549 +++ b/drivers/mmc/host/sdhci-pci.c
32550 @@ -542,7 +542,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
32551 .probe = via_probe,
32552 };
32553
32554 -static const struct pci_device_id pci_ids[] __devinitdata = {
32555 +static const struct pci_device_id pci_ids[] __devinitconst = {
32556 {
32557 .vendor = PCI_VENDOR_ID_RICOH,
32558 .device = PCI_DEVICE_ID_RICOH_R5C822,
32559 diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
32560 index e1e122f..d99a6ea 100644
32561 --- a/drivers/mtd/chips/cfi_cmdset_0001.c
32562 +++ b/drivers/mtd/chips/cfi_cmdset_0001.c
32563 @@ -757,6 +757,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
32564 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
32565 unsigned long timeo = jiffies + HZ;
32566
32567 + pax_track_stack();
32568 +
32569 /* Prevent setting state FL_SYNCING for chip in suspended state. */
32570 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
32571 goto sleep;
32572 @@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
32573 unsigned long initial_adr;
32574 int initial_len = len;
32575
32576 + pax_track_stack();
32577 +
32578 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
32579 adr += chip->start;
32580 initial_adr = adr;
32581 @@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
32582 int retries = 3;
32583 int ret;
32584
32585 + pax_track_stack();
32586 +
32587 adr += chip->start;
32588
32589 retry:
32590 diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
32591 index 179814a..abe9d60 100644
32592 --- a/drivers/mtd/chips/cfi_cmdset_0020.c
32593 +++ b/drivers/mtd/chips/cfi_cmdset_0020.c
32594 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
32595 unsigned long cmd_addr;
32596 struct cfi_private *cfi = map->fldrv_priv;
32597
32598 + pax_track_stack();
32599 +
32600 adr += chip->start;
32601
32602 /* Ensure cmd read/writes are aligned. */
32603 @@ -429,6 +431,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
32604 DECLARE_WAITQUEUE(wait, current);
32605 int wbufsize, z;
32606
32607 + pax_track_stack();
32608 +
32609 /* M58LW064A requires bus alignment for buffer wriets -- saw */
32610 if (adr & (map_bankwidth(map)-1))
32611 return -EINVAL;
32612 @@ -743,6 +747,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
32613 DECLARE_WAITQUEUE(wait, current);
32614 int ret = 0;
32615
32616 + pax_track_stack();
32617 +
32618 adr += chip->start;
32619
32620 /* Let's determine this according to the interleave only once */
32621 @@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
32622 unsigned long timeo = jiffies + HZ;
32623 DECLARE_WAITQUEUE(wait, current);
32624
32625 + pax_track_stack();
32626 +
32627 adr += chip->start;
32628
32629 /* Let's determine this according to the interleave only once */
32630 @@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
32631 unsigned long timeo = jiffies + HZ;
32632 DECLARE_WAITQUEUE(wait, current);
32633
32634 + pax_track_stack();
32635 +
32636 adr += chip->start;
32637
32638 /* Let's determine this according to the interleave only once */
32639 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
32640 index f7fbf60..9866457 100644
32641 --- a/drivers/mtd/devices/doc2000.c
32642 +++ b/drivers/mtd/devices/doc2000.c
32643 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
32644
32645 /* The ECC will not be calculated correctly if less than 512 is written */
32646 /* DBB-
32647 - if (len != 0x200 && eccbuf)
32648 + if (len != 0x200)
32649 printk(KERN_WARNING
32650 "ECC needs a full sector write (adr: %lx size %lx)\n",
32651 (long) to, (long) len);
32652 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
32653 index 241192f..d0c35a3 100644
32654 --- a/drivers/mtd/devices/doc2001.c
32655 +++ b/drivers/mtd/devices/doc2001.c
32656 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
32657 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32658
32659 /* Don't allow read past end of device */
32660 - if (from >= this->totlen)
32661 + if (from >= this->totlen || !len)
32662 return -EINVAL;
32663
32664 /* Don't allow a single read to cross a 512-byte block boundary */
32665 diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
32666 index 037b399..225a71d 100644
32667 --- a/drivers/mtd/ftl.c
32668 +++ b/drivers/mtd/ftl.c
32669 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
32670 loff_t offset;
32671 uint16_t srcunitswap = cpu_to_le16(srcunit);
32672
32673 + pax_track_stack();
32674 +
32675 eun = &part->EUNInfo[srcunit];
32676 xfer = &part->XferInfo[xferunit];
32677 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
32678 diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
32679 index d7592e6..31c505c 100644
32680 --- a/drivers/mtd/inftlcore.c
32681 +++ b/drivers/mtd/inftlcore.c
32682 @@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
32683 struct inftl_oob oob;
32684 size_t retlen;
32685
32686 + pax_track_stack();
32687 +
32688 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
32689 "pending=%d)\n", inftl, thisVUC, pendingblock);
32690
32691 diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
32692 index 104052e..6232be5 100644
32693 --- a/drivers/mtd/inftlmount.c
32694 +++ b/drivers/mtd/inftlmount.c
32695 @@ -53,6 +53,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
32696 struct INFTLPartition *ip;
32697 size_t retlen;
32698
32699 + pax_track_stack();
32700 +
32701 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
32702
32703 /*
32704 diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
32705 index dbfe17b..c7b0918 100644
32706 --- a/drivers/mtd/lpddr/qinfo_probe.c
32707 +++ b/drivers/mtd/lpddr/qinfo_probe.c
32708 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
32709 {
32710 map_word pfow_val[4];
32711
32712 + pax_track_stack();
32713 +
32714 /* Check identification string */
32715 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
32716 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
32717 diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
32718 index 49e20a4..60fbfa5 100644
32719 --- a/drivers/mtd/mtdchar.c
32720 +++ b/drivers/mtd/mtdchar.c
32721 @@ -554,6 +554,8 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
32722 u_long size;
32723 struct mtd_info_user info;
32724
32725 + pax_track_stack();
32726 +
32727 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
32728
32729 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
32730 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
32731 index d527621..2491fab 100644
32732 --- a/drivers/mtd/nand/denali.c
32733 +++ b/drivers/mtd/nand/denali.c
32734 @@ -26,6 +26,7 @@
32735 #include <linux/pci.h>
32736 #include <linux/mtd/mtd.h>
32737 #include <linux/module.h>
32738 +#include <linux/slab.h>
32739
32740 #include "denali.h"
32741
32742 diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
32743 index b155666..611b801 100644
32744 --- a/drivers/mtd/nftlcore.c
32745 +++ b/drivers/mtd/nftlcore.c
32746 @@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
32747 int inplace = 1;
32748 size_t retlen;
32749
32750 + pax_track_stack();
32751 +
32752 memset(BlockMap, 0xff, sizeof(BlockMap));
32753 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
32754
32755 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
32756 index e3cd1ff..0ea79a3 100644
32757 --- a/drivers/mtd/nftlmount.c
32758 +++ b/drivers/mtd/nftlmount.c
32759 @@ -24,6 +24,7 @@
32760 #include <asm/errno.h>
32761 #include <linux/delay.h>
32762 #include <linux/slab.h>
32763 +#include <linux/sched.h>
32764 #include <linux/mtd/mtd.h>
32765 #include <linux/mtd/nand.h>
32766 #include <linux/mtd/nftl.h>
32767 @@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
32768 struct mtd_info *mtd = nftl->mbd.mtd;
32769 unsigned int i;
32770
32771 + pax_track_stack();
32772 +
32773 /* Assume logical EraseSize == physical erasesize for starting the scan.
32774 We'll sort it out later if we find a MediaHeader which says otherwise */
32775 /* Actually, we won't. The new DiskOnChip driver has already scanned
32776 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
32777 index 6c3fb5a..c542a81 100644
32778 --- a/drivers/mtd/ubi/build.c
32779 +++ b/drivers/mtd/ubi/build.c
32780 @@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
32781 static int __init bytes_str_to_int(const char *str)
32782 {
32783 char *endp;
32784 - unsigned long result;
32785 + unsigned long result, scale = 1;
32786
32787 result = simple_strtoul(str, &endp, 0);
32788 if (str == endp || result >= INT_MAX) {
32789 @@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
32790
32791 switch (*endp) {
32792 case 'G':
32793 - result *= 1024;
32794 + scale *= 1024;
32795 case 'M':
32796 - result *= 1024;
32797 + scale *= 1024;
32798 case 'K':
32799 - result *= 1024;
32800 + scale *= 1024;
32801 if (endp[1] == 'i' && endp[2] == 'B')
32802 endp += 2;
32803 case '\0':
32804 @@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
32805 return -EINVAL;
32806 }
32807
32808 - return result;
32809 + if ((intoverflow_t)result*scale >= INT_MAX) {
32810 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
32811 + str);
32812 + return -EINVAL;
32813 + }
32814 +
32815 + return result*scale;
32816 }
32817
32818 /**
32819 diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
32820 index d4f7dda..d627d46 100644
32821 --- a/drivers/net/atlx/atl2.c
32822 +++ b/drivers/net/atlx/atl2.c
32823 @@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
32824 */
32825
32826 #define ATL2_PARAM(X, desc) \
32827 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32828 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32829 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
32830 MODULE_PARM_DESC(X, desc);
32831 #else
32832 diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
32833 index 87aecdf..ec23470 100644
32834 --- a/drivers/net/bna/bfa_ioc_ct.c
32835 +++ b/drivers/net/bna/bfa_ioc_ct.c
32836 @@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
32837 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
32838 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
32839
32840 -static struct bfa_ioc_hwif nw_hwif_ct;
32841 +static struct bfa_ioc_hwif nw_hwif_ct = {
32842 + .ioc_pll_init = bfa_ioc_ct_pll_init,
32843 + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
32844 + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
32845 + .ioc_reg_init = bfa_ioc_ct_reg_init,
32846 + .ioc_map_port = bfa_ioc_ct_map_port,
32847 + .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
32848 + .ioc_notify_fail = bfa_ioc_ct_notify_fail,
32849 + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
32850 + .ioc_sync_start = bfa_ioc_ct_sync_start,
32851 + .ioc_sync_join = bfa_ioc_ct_sync_join,
32852 + .ioc_sync_leave = bfa_ioc_ct_sync_leave,
32853 + .ioc_sync_ack = bfa_ioc_ct_sync_ack,
32854 + .ioc_sync_complete = bfa_ioc_ct_sync_complete
32855 +};
32856
32857 /**
32858 * Called from bfa_ioc_attach() to map asic specific calls.
32859 @@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
32860 void
32861 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
32862 {
32863 - nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
32864 - nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
32865 - nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
32866 - nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
32867 - nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
32868 - nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
32869 - nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
32870 - nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
32871 - nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
32872 - nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
32873 - nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
32874 - nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
32875 - nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
32876 -
32877 ioc->ioc_hwif = &nw_hwif_ct;
32878 }
32879
32880 diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
32881 index 8e35b25..c39f205 100644
32882 --- a/drivers/net/bna/bnad.c
32883 +++ b/drivers/net/bna/bnad.c
32884 @@ -1673,7 +1673,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx_id)
32885 struct bna_intr_info *intr_info =
32886 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
32887 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
32888 - struct bna_tx_event_cbfn tx_cbfn;
32889 + static struct bna_tx_event_cbfn tx_cbfn = {
32890 + /* Initialize the tx event handlers */
32891 + .tcb_setup_cbfn = bnad_cb_tcb_setup,
32892 + .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
32893 + .tx_stall_cbfn = bnad_cb_tx_stall,
32894 + .tx_resume_cbfn = bnad_cb_tx_resume,
32895 + .tx_cleanup_cbfn = bnad_cb_tx_cleanup
32896 + };
32897 struct bna_tx *tx;
32898 unsigned long flags;
32899
32900 @@ -1682,13 +1689,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx_id)
32901 tx_config->txq_depth = bnad->txq_depth;
32902 tx_config->tx_type = BNA_TX_T_REGULAR;
32903
32904 - /* Initialize the tx event handlers */
32905 - tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
32906 - tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
32907 - tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
32908 - tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
32909 - tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
32910 -
32911 /* Get BNA's resource requirement for one tx object */
32912 spin_lock_irqsave(&bnad->bna_lock, flags);
32913 bna_tx_res_req(bnad->num_txq_per_tx,
32914 @@ -1819,21 +1819,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
32915 struct bna_intr_info *intr_info =
32916 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
32917 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
32918 - struct bna_rx_event_cbfn rx_cbfn;
32919 + static struct bna_rx_event_cbfn rx_cbfn = {
32920 + /* Initialize the Rx event handlers */
32921 + .rcb_setup_cbfn = bnad_cb_rcb_setup,
32922 + .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
32923 + .ccb_setup_cbfn = bnad_cb_ccb_setup,
32924 + .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
32925 + .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
32926 + .rx_post_cbfn = bnad_cb_rx_post
32927 + };
32928 struct bna_rx *rx;
32929 unsigned long flags;
32930
32931 /* Initialize the Rx object configuration */
32932 bnad_init_rx_config(bnad, rx_config);
32933
32934 - /* Initialize the Rx event handlers */
32935 - rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
32936 - rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
32937 - rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
32938 - rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
32939 - rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
32940 - rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
32941 -
32942 /* Get BNA's resource requirement for one Rx object */
32943 spin_lock_irqsave(&bnad->bna_lock, flags);
32944 bna_rx_res_req(rx_config, res_info);
32945 diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
32946 index 4b2b570..31033f4 100644
32947 --- a/drivers/net/bnx2.c
32948 +++ b/drivers/net/bnx2.c
32949 @@ -5877,6 +5877,8 @@ bnx2_test_nvram(struct bnx2 *bp)
32950 int rc = 0;
32951 u32 magic, csum;
32952
32953 + pax_track_stack();
32954 +
32955 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
32956 goto test_nvram_done;
32957
32958 diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
32959 index cf3e479..5dc0ecc 100644
32960 --- a/drivers/net/bnx2x/bnx2x_ethtool.c
32961 +++ b/drivers/net/bnx2x/bnx2x_ethtool.c
32962 @@ -1943,6 +1943,8 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
32963 int i, rc;
32964 u32 magic, crc;
32965
32966 + pax_track_stack();
32967 +
32968 if (BP_NOMCP(bp))
32969 return 0;
32970
32971 diff --git a/drivers/net/bnx2x/bnx2x_sp.h b/drivers/net/bnx2x/bnx2x_sp.h
32972 index 9a517c2..a50cfcb 100644
32973 --- a/drivers/net/bnx2x/bnx2x_sp.h
32974 +++ b/drivers/net/bnx2x/bnx2x_sp.h
32975 @@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
32976
32977 int (*wait_comp)(struct bnx2x *bp,
32978 struct bnx2x_rx_mode_ramrod_params *p);
32979 -};
32980 +} __no_const;
32981
32982 /********************** Set multicast group ***********************************/
32983
32984 diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
32985 index c5f5479..2e8c260 100644
32986 --- a/drivers/net/cxgb3/l2t.h
32987 +++ b/drivers/net/cxgb3/l2t.h
32988 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
32989 */
32990 struct l2t_skb_cb {
32991 arp_failure_handler_func arp_failure_handler;
32992 -};
32993 +} __no_const;
32994
32995 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
32996
32997 diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
32998 index b4efa29..c5f2703 100644
32999 --- a/drivers/net/cxgb4/cxgb4_main.c
33000 +++ b/drivers/net/cxgb4/cxgb4_main.c
33001 @@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct adapter *adap)
33002 unsigned int nchan = adap->params.nports;
33003 struct msix_entry entries[MAX_INGQ + 1];
33004
33005 + pax_track_stack();
33006 +
33007 for (i = 0; i < ARRAY_SIZE(entries); ++i)
33008 entries[i].entry = i;
33009
33010 diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
33011 index d1ec111..12735bc 100644
33012 --- a/drivers/net/cxgb4/t4_hw.c
33013 +++ b/drivers/net/cxgb4/t4_hw.c
33014 @@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
33015 u8 vpd[VPD_LEN], csum;
33016 unsigned int vpdr_len, kw_offset, id_len;
33017
33018 + pax_track_stack();
33019 +
33020 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
33021 if (ret < 0)
33022 return ret;
33023 diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
33024 index 536b3a5..e6f8dcc 100644
33025 --- a/drivers/net/e1000e/82571.c
33026 +++ b/drivers/net/e1000e/82571.c
33027 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
33028 {
33029 struct e1000_hw *hw = &adapter->hw;
33030 struct e1000_mac_info *mac = &hw->mac;
33031 - struct e1000_mac_operations *func = &mac->ops;
33032 + e1000_mac_operations_no_const *func = &mac->ops;
33033 u32 swsm = 0;
33034 u32 swsm2 = 0;
33035 bool force_clear_smbi = false;
33036 diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
33037 index e4f4225..24da2ea 100644
33038 --- a/drivers/net/e1000e/es2lan.c
33039 +++ b/drivers/net/e1000e/es2lan.c
33040 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
33041 {
33042 struct e1000_hw *hw = &adapter->hw;
33043 struct e1000_mac_info *mac = &hw->mac;
33044 - struct e1000_mac_operations *func = &mac->ops;
33045 + e1000_mac_operations_no_const *func = &mac->ops;
33046
33047 /* Set media type */
33048 switch (adapter->pdev->device) {
33049 diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
33050 index 2967039..ca8c40c 100644
33051 --- a/drivers/net/e1000e/hw.h
33052 +++ b/drivers/net/e1000e/hw.h
33053 @@ -778,6 +778,7 @@ struct e1000_mac_operations {
33054 void (*write_vfta)(struct e1000_hw *, u32, u32);
33055 s32 (*read_mac_addr)(struct e1000_hw *);
33056 };
33057 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33058
33059 /*
33060 * When to use various PHY register access functions:
33061 @@ -818,6 +819,7 @@ struct e1000_phy_operations {
33062 void (*power_up)(struct e1000_hw *);
33063 void (*power_down)(struct e1000_hw *);
33064 };
33065 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
33066
33067 /* Function pointers for the NVM. */
33068 struct e1000_nvm_operations {
33069 @@ -829,9 +831,10 @@ struct e1000_nvm_operations {
33070 s32 (*validate)(struct e1000_hw *);
33071 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
33072 };
33073 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
33074
33075 struct e1000_mac_info {
33076 - struct e1000_mac_operations ops;
33077 + e1000_mac_operations_no_const ops;
33078 u8 addr[ETH_ALEN];
33079 u8 perm_addr[ETH_ALEN];
33080
33081 @@ -872,7 +875,7 @@ struct e1000_mac_info {
33082 };
33083
33084 struct e1000_phy_info {
33085 - struct e1000_phy_operations ops;
33086 + e1000_phy_operations_no_const ops;
33087
33088 enum e1000_phy_type type;
33089
33090 @@ -906,7 +909,7 @@ struct e1000_phy_info {
33091 };
33092
33093 struct e1000_nvm_info {
33094 - struct e1000_nvm_operations ops;
33095 + e1000_nvm_operations_no_const ops;
33096
33097 enum e1000_nvm_type type;
33098 enum e1000_nvm_override override;
33099 diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
33100 index fa8677c..196356f 100644
33101 --- a/drivers/net/fealnx.c
33102 +++ b/drivers/net/fealnx.c
33103 @@ -150,7 +150,7 @@ struct chip_info {
33104 int flags;
33105 };
33106
33107 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
33108 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
33109 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
33110 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
33111 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
33112 diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
33113 index 2a5a34d..be871cc 100644
33114 --- a/drivers/net/hamradio/6pack.c
33115 +++ b/drivers/net/hamradio/6pack.c
33116 @@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
33117 unsigned char buf[512];
33118 int count1;
33119
33120 + pax_track_stack();
33121 +
33122 if (!count)
33123 return;
33124
33125 diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
33126 index 4519a13..f97fcd0 100644
33127 --- a/drivers/net/igb/e1000_hw.h
33128 +++ b/drivers/net/igb/e1000_hw.h
33129 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
33130 s32 (*read_mac_addr)(struct e1000_hw *);
33131 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
33132 };
33133 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33134
33135 struct e1000_phy_operations {
33136 s32 (*acquire)(struct e1000_hw *);
33137 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
33138 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
33139 s32 (*write_reg)(struct e1000_hw *, u32, u16);
33140 };
33141 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
33142
33143 struct e1000_nvm_operations {
33144 s32 (*acquire)(struct e1000_hw *);
33145 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
33146 s32 (*update)(struct e1000_hw *);
33147 s32 (*validate)(struct e1000_hw *);
33148 };
33149 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
33150
33151 struct e1000_info {
33152 s32 (*get_invariants)(struct e1000_hw *);
33153 @@ -350,7 +353,7 @@ struct e1000_info {
33154 extern const struct e1000_info e1000_82575_info;
33155
33156 struct e1000_mac_info {
33157 - struct e1000_mac_operations ops;
33158 + e1000_mac_operations_no_const ops;
33159
33160 u8 addr[6];
33161 u8 perm_addr[6];
33162 @@ -388,7 +391,7 @@ struct e1000_mac_info {
33163 };
33164
33165 struct e1000_phy_info {
33166 - struct e1000_phy_operations ops;
33167 + e1000_phy_operations_no_const ops;
33168
33169 enum e1000_phy_type type;
33170
33171 @@ -423,7 +426,7 @@ struct e1000_phy_info {
33172 };
33173
33174 struct e1000_nvm_info {
33175 - struct e1000_nvm_operations ops;
33176 + e1000_nvm_operations_no_const ops;
33177 enum e1000_nvm_type type;
33178 enum e1000_nvm_override override;
33179
33180 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
33181 s32 (*check_for_ack)(struct e1000_hw *, u16);
33182 s32 (*check_for_rst)(struct e1000_hw *, u16);
33183 };
33184 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
33185
33186 struct e1000_mbx_stats {
33187 u32 msgs_tx;
33188 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
33189 };
33190
33191 struct e1000_mbx_info {
33192 - struct e1000_mbx_operations ops;
33193 + e1000_mbx_operations_no_const ops;
33194 struct e1000_mbx_stats stats;
33195 u32 timeout;
33196 u32 usec_delay;
33197 diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
33198 index d7ed58f..64cde36 100644
33199 --- a/drivers/net/igbvf/vf.h
33200 +++ b/drivers/net/igbvf/vf.h
33201 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
33202 s32 (*read_mac_addr)(struct e1000_hw *);
33203 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
33204 };
33205 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33206
33207 struct e1000_mac_info {
33208 - struct e1000_mac_operations ops;
33209 + e1000_mac_operations_no_const ops;
33210 u8 addr[6];
33211 u8 perm_addr[6];
33212
33213 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
33214 s32 (*check_for_ack)(struct e1000_hw *);
33215 s32 (*check_for_rst)(struct e1000_hw *);
33216 };
33217 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
33218
33219 struct e1000_mbx_stats {
33220 u32 msgs_tx;
33221 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
33222 };
33223
33224 struct e1000_mbx_info {
33225 - struct e1000_mbx_operations ops;
33226 + e1000_mbx_operations_no_const ops;
33227 struct e1000_mbx_stats stats;
33228 u32 timeout;
33229 u32 usec_delay;
33230 diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
33231 index 6a130eb..1aeb9e4 100644
33232 --- a/drivers/net/ixgb/ixgb_main.c
33233 +++ b/drivers/net/ixgb/ixgb_main.c
33234 @@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev)
33235 u32 rctl;
33236 int i;
33237
33238 + pax_track_stack();
33239 +
33240 /* Check for Promiscuous and All Multicast modes */
33241
33242 rctl = IXGB_READ_REG(hw, RCTL);
33243 diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
33244 index dd7fbeb..44b9bbf 100644
33245 --- a/drivers/net/ixgb/ixgb_param.c
33246 +++ b/drivers/net/ixgb/ixgb_param.c
33247 @@ -261,6 +261,9 @@ void __devinit
33248 ixgb_check_options(struct ixgb_adapter *adapter)
33249 {
33250 int bd = adapter->bd_number;
33251 +
33252 + pax_track_stack();
33253 +
33254 if (bd >= IXGB_MAX_NIC) {
33255 pr_notice("Warning: no configuration for board #%i\n", bd);
33256 pr_notice("Using defaults for all values\n");
33257 diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
33258 index e0d970e..1cfdea5 100644
33259 --- a/drivers/net/ixgbe/ixgbe_type.h
33260 +++ b/drivers/net/ixgbe/ixgbe_type.h
33261 @@ -2642,6 +2642,7 @@ struct ixgbe_eeprom_operations {
33262 s32 (*update_checksum)(struct ixgbe_hw *);
33263 u16 (*calc_checksum)(struct ixgbe_hw *);
33264 };
33265 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
33266
33267 struct ixgbe_mac_operations {
33268 s32 (*init_hw)(struct ixgbe_hw *);
33269 @@ -2703,6 +2704,7 @@ struct ixgbe_mac_operations {
33270 /* Manageability interface */
33271 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
33272 };
33273 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
33274
33275 struct ixgbe_phy_operations {
33276 s32 (*identify)(struct ixgbe_hw *);
33277 @@ -2722,9 +2724,10 @@ struct ixgbe_phy_operations {
33278 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
33279 s32 (*check_overtemp)(struct ixgbe_hw *);
33280 };
33281 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
33282
33283 struct ixgbe_eeprom_info {
33284 - struct ixgbe_eeprom_operations ops;
33285 + ixgbe_eeprom_operations_no_const ops;
33286 enum ixgbe_eeprom_type type;
33287 u32 semaphore_delay;
33288 u16 word_size;
33289 @@ -2734,7 +2737,7 @@ struct ixgbe_eeprom_info {
33290
33291 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
33292 struct ixgbe_mac_info {
33293 - struct ixgbe_mac_operations ops;
33294 + ixgbe_mac_operations_no_const ops;
33295 enum ixgbe_mac_type type;
33296 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
33297 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
33298 @@ -2762,7 +2765,7 @@ struct ixgbe_mac_info {
33299 };
33300
33301 struct ixgbe_phy_info {
33302 - struct ixgbe_phy_operations ops;
33303 + ixgbe_phy_operations_no_const ops;
33304 struct mdio_if_info mdio;
33305 enum ixgbe_phy_type type;
33306 u32 id;
33307 @@ -2790,6 +2793,7 @@ struct ixgbe_mbx_operations {
33308 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
33309 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
33310 };
33311 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
33312
33313 struct ixgbe_mbx_stats {
33314 u32 msgs_tx;
33315 @@ -2801,7 +2805,7 @@ struct ixgbe_mbx_stats {
33316 };
33317
33318 struct ixgbe_mbx_info {
33319 - struct ixgbe_mbx_operations ops;
33320 + ixgbe_mbx_operations_no_const ops;
33321 struct ixgbe_mbx_stats stats;
33322 u32 timeout;
33323 u32 usec_delay;
33324 diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
33325 index 10306b4..28df758 100644
33326 --- a/drivers/net/ixgbevf/vf.h
33327 +++ b/drivers/net/ixgbevf/vf.h
33328 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
33329 s32 (*clear_vfta)(struct ixgbe_hw *);
33330 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
33331 };
33332 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
33333
33334 enum ixgbe_mac_type {
33335 ixgbe_mac_unknown = 0,
33336 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
33337 };
33338
33339 struct ixgbe_mac_info {
33340 - struct ixgbe_mac_operations ops;
33341 + ixgbe_mac_operations_no_const ops;
33342 u8 addr[6];
33343 u8 perm_addr[6];
33344
33345 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
33346 s32 (*check_for_ack)(struct ixgbe_hw *);
33347 s32 (*check_for_rst)(struct ixgbe_hw *);
33348 };
33349 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
33350
33351 struct ixgbe_mbx_stats {
33352 u32 msgs_tx;
33353 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
33354 };
33355
33356 struct ixgbe_mbx_info {
33357 - struct ixgbe_mbx_operations ops;
33358 + ixgbe_mbx_operations_no_const ops;
33359 struct ixgbe_mbx_stats stats;
33360 u32 timeout;
33361 u32 udelay;
33362 diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
33363 index 27418d3..adf15bb 100644
33364 --- a/drivers/net/ksz884x.c
33365 +++ b/drivers/net/ksz884x.c
33366 @@ -6533,6 +6533,8 @@ static void netdev_get_ethtool_stats(struct net_device *dev,
33367 int rc;
33368 u64 counter[TOTAL_PORT_COUNTER_NUM];
33369
33370 + pax_track_stack();
33371 +
33372 mutex_lock(&hw_priv->lock);
33373 n = SWITCH_PORT_NUM;
33374 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
33375 diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
33376 index f0ee35d..3831c8a 100644
33377 --- a/drivers/net/mlx4/main.c
33378 +++ b/drivers/net/mlx4/main.c
33379 @@ -40,6 +40,7 @@
33380 #include <linux/dma-mapping.h>
33381 #include <linux/slab.h>
33382 #include <linux/io-mapping.h>
33383 +#include <linux/sched.h>
33384
33385 #include <linux/mlx4/device.h>
33386 #include <linux/mlx4/doorbell.h>
33387 @@ -762,6 +763,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
33388 u64 icm_size;
33389 int err;
33390
33391 + pax_track_stack();
33392 +
33393 err = mlx4_QUERY_FW(dev);
33394 if (err) {
33395 if (err == -EACCES)
33396 diff --git a/drivers/net/niu.c b/drivers/net/niu.c
33397 index ed47585..5e5be8f 100644
33398 --- a/drivers/net/niu.c
33399 +++ b/drivers/net/niu.c
33400 @@ -9061,6 +9061,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
33401 int i, num_irqs, err;
33402 u8 first_ldg;
33403
33404 + pax_track_stack();
33405 +
33406 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
33407 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
33408 ldg_num_map[i] = first_ldg + i;
33409 diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
33410 index 80b6f36..5cd8938 100644
33411 --- a/drivers/net/pcnet32.c
33412 +++ b/drivers/net/pcnet32.c
33413 @@ -270,7 +270,7 @@ struct pcnet32_private {
33414 struct sk_buff **rx_skbuff;
33415 dma_addr_t *tx_dma_addr;
33416 dma_addr_t *rx_dma_addr;
33417 - struct pcnet32_access a;
33418 + struct pcnet32_access *a;
33419 spinlock_t lock; /* Guard lock */
33420 unsigned int cur_rx, cur_tx; /* The next free ring entry */
33421 unsigned int rx_ring_size; /* current rx ring size */
33422 @@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct net_device *dev)
33423 u16 val;
33424
33425 netif_wake_queue(dev);
33426 - val = lp->a.read_csr(ioaddr, CSR3);
33427 + val = lp->a->read_csr(ioaddr, CSR3);
33428 val &= 0x00ff;
33429 - lp->a.write_csr(ioaddr, CSR3, val);
33430 + lp->a->write_csr(ioaddr, CSR3, val);
33431 napi_enable(&lp->napi);
33432 }
33433
33434 @@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
33435 r = mii_link_ok(&lp->mii_if);
33436 } else if (lp->chip_version >= PCNET32_79C970A) {
33437 ulong ioaddr = dev->base_addr; /* card base I/O address */
33438 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
33439 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
33440 } else { /* can not detect link on really old chips */
33441 r = 1;
33442 }
33443 @@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
33444 pcnet32_netif_stop(dev);
33445
33446 spin_lock_irqsave(&lp->lock, flags);
33447 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
33448 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
33449
33450 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
33451
33452 @@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
33453 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
33454 {
33455 struct pcnet32_private *lp = netdev_priv(dev);
33456 - struct pcnet32_access *a = &lp->a; /* access to registers */
33457 + struct pcnet32_access *a = lp->a; /* access to registers */
33458 ulong ioaddr = dev->base_addr; /* card base I/O address */
33459 struct sk_buff *skb; /* sk buff */
33460 int x, i; /* counters */
33461 @@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
33462 pcnet32_netif_stop(dev);
33463
33464 spin_lock_irqsave(&lp->lock, flags);
33465 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
33466 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
33467
33468 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
33469
33470 /* Reset the PCNET32 */
33471 - lp->a.reset(ioaddr);
33472 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33473 + lp->a->reset(ioaddr);
33474 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33475
33476 /* switch pcnet32 to 32bit mode */
33477 - lp->a.write_bcr(ioaddr, 20, 2);
33478 + lp->a->write_bcr(ioaddr, 20, 2);
33479
33480 /* purge & init rings but don't actually restart */
33481 pcnet32_restart(dev, 0x0000);
33482
33483 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
33484 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
33485
33486 /* Initialize Transmit buffers. */
33487 size = data_len + 15;
33488 @@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
33489
33490 /* set int loopback in CSR15 */
33491 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
33492 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
33493 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
33494
33495 teststatus = cpu_to_le16(0x8000);
33496 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
33497 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
33498
33499 /* Check status of descriptors */
33500 for (x = 0; x < numbuffs; x++) {
33501 @@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
33502 }
33503 }
33504
33505 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
33506 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
33507 wmb();
33508 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
33509 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
33510 @@ -1015,7 +1015,7 @@ clean_up:
33511 pcnet32_restart(dev, CSR0_NORMAL);
33512 } else {
33513 pcnet32_purge_rx_ring(dev);
33514 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
33515 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
33516 }
33517 spin_unlock_irqrestore(&lp->lock, flags);
33518
33519 @@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct net_device *dev,
33520 enum ethtool_phys_id_state state)
33521 {
33522 struct pcnet32_private *lp = netdev_priv(dev);
33523 - struct pcnet32_access *a = &lp->a;
33524 + struct pcnet32_access *a = lp->a;
33525 ulong ioaddr = dev->base_addr;
33526 unsigned long flags;
33527 int i;
33528 @@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
33529 {
33530 int csr5;
33531 struct pcnet32_private *lp = netdev_priv(dev);
33532 - struct pcnet32_access *a = &lp->a;
33533 + struct pcnet32_access *a = lp->a;
33534 ulong ioaddr = dev->base_addr;
33535 int ticks;
33536
33537 @@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
33538 spin_lock_irqsave(&lp->lock, flags);
33539 if (pcnet32_tx(dev)) {
33540 /* reset the chip to clear the error condition, then restart */
33541 - lp->a.reset(ioaddr);
33542 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33543 + lp->a->reset(ioaddr);
33544 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33545 pcnet32_restart(dev, CSR0_START);
33546 netif_wake_queue(dev);
33547 }
33548 @@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
33549 __napi_complete(napi);
33550
33551 /* clear interrupt masks */
33552 - val = lp->a.read_csr(ioaddr, CSR3);
33553 + val = lp->a->read_csr(ioaddr, CSR3);
33554 val &= 0x00ff;
33555 - lp->a.write_csr(ioaddr, CSR3, val);
33556 + lp->a->write_csr(ioaddr, CSR3, val);
33557
33558 /* Set interrupt enable. */
33559 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
33560 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
33561
33562 spin_unlock_irqrestore(&lp->lock, flags);
33563 }
33564 @@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
33565 int i, csr0;
33566 u16 *buff = ptr;
33567 struct pcnet32_private *lp = netdev_priv(dev);
33568 - struct pcnet32_access *a = &lp->a;
33569 + struct pcnet32_access *a = lp->a;
33570 ulong ioaddr = dev->base_addr;
33571 unsigned long flags;
33572
33573 @@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
33574 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
33575 if (lp->phymask & (1 << j)) {
33576 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
33577 - lp->a.write_bcr(ioaddr, 33,
33578 + lp->a->write_bcr(ioaddr, 33,
33579 (j << 5) | i);
33580 - *buff++ = lp->a.read_bcr(ioaddr, 34);
33581 + *buff++ = lp->a->read_bcr(ioaddr, 34);
33582 }
33583 }
33584 }
33585 @@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
33586 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
33587 lp->options |= PCNET32_PORT_FD;
33588
33589 - lp->a = *a;
33590 + lp->a = a;
33591
33592 /* prior to register_netdev, dev->name is not yet correct */
33593 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
33594 @@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
33595 if (lp->mii) {
33596 /* lp->phycount and lp->phymask are set to 0 by memset above */
33597
33598 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
33599 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
33600 /* scan for PHYs */
33601 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
33602 unsigned short id1, id2;
33603 @@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
33604 pr_info("Found PHY %04x:%04x at address %d\n",
33605 id1, id2, i);
33606 }
33607 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
33608 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
33609 if (lp->phycount > 1)
33610 lp->options |= PCNET32_PORT_MII;
33611 }
33612 @@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_device *dev)
33613 }
33614
33615 /* Reset the PCNET32 */
33616 - lp->a.reset(ioaddr);
33617 + lp->a->reset(ioaddr);
33618
33619 /* switch pcnet32 to 32bit mode */
33620 - lp->a.write_bcr(ioaddr, 20, 2);
33621 + lp->a->write_bcr(ioaddr, 20, 2);
33622
33623 netif_printk(lp, ifup, KERN_DEBUG, dev,
33624 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
33625 @@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_device *dev)
33626 (u32) (lp->init_dma_addr));
33627
33628 /* set/reset autoselect bit */
33629 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
33630 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
33631 if (lp->options & PCNET32_PORT_ASEL)
33632 val |= 2;
33633 - lp->a.write_bcr(ioaddr, 2, val);
33634 + lp->a->write_bcr(ioaddr, 2, val);
33635
33636 /* handle full duplex setting */
33637 if (lp->mii_if.full_duplex) {
33638 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
33639 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
33640 if (lp->options & PCNET32_PORT_FD) {
33641 val |= 1;
33642 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
33643 @@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_device *dev)
33644 if (lp->chip_version == 0x2627)
33645 val |= 3;
33646 }
33647 - lp->a.write_bcr(ioaddr, 9, val);
33648 + lp->a->write_bcr(ioaddr, 9, val);
33649 }
33650
33651 /* set/reset GPSI bit in test register */
33652 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
33653 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
33654 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
33655 val |= 0x10;
33656 - lp->a.write_csr(ioaddr, 124, val);
33657 + lp->a->write_csr(ioaddr, 124, val);
33658
33659 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
33660 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
33661 @@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_device *dev)
33662 * duplex, and/or enable auto negotiation, and clear DANAS
33663 */
33664 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
33665 - lp->a.write_bcr(ioaddr, 32,
33666 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
33667 + lp->a->write_bcr(ioaddr, 32,
33668 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
33669 /* disable Auto Negotiation, set 10Mpbs, HD */
33670 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
33671 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
33672 if (lp->options & PCNET32_PORT_FD)
33673 val |= 0x10;
33674 if (lp->options & PCNET32_PORT_100)
33675 val |= 0x08;
33676 - lp->a.write_bcr(ioaddr, 32, val);
33677 + lp->a->write_bcr(ioaddr, 32, val);
33678 } else {
33679 if (lp->options & PCNET32_PORT_ASEL) {
33680 - lp->a.write_bcr(ioaddr, 32,
33681 - lp->a.read_bcr(ioaddr,
33682 + lp->a->write_bcr(ioaddr, 32,
33683 + lp->a->read_bcr(ioaddr,
33684 32) | 0x0080);
33685 /* enable auto negotiate, setup, disable fd */
33686 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
33687 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
33688 val |= 0x20;
33689 - lp->a.write_bcr(ioaddr, 32, val);
33690 + lp->a->write_bcr(ioaddr, 32, val);
33691 }
33692 }
33693 } else {
33694 @@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_device *dev)
33695 * There is really no good other way to handle multiple PHYs
33696 * other than turning off all automatics
33697 */
33698 - val = lp->a.read_bcr(ioaddr, 2);
33699 - lp->a.write_bcr(ioaddr, 2, val & ~2);
33700 - val = lp->a.read_bcr(ioaddr, 32);
33701 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
33702 + val = lp->a->read_bcr(ioaddr, 2);
33703 + lp->a->write_bcr(ioaddr, 2, val & ~2);
33704 + val = lp->a->read_bcr(ioaddr, 32);
33705 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
33706
33707 if (!(lp->options & PCNET32_PORT_ASEL)) {
33708 /* setup ecmd */
33709 @@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_device *dev)
33710 ethtool_cmd_speed_set(&ecmd,
33711 (lp->options & PCNET32_PORT_100) ?
33712 SPEED_100 : SPEED_10);
33713 - bcr9 = lp->a.read_bcr(ioaddr, 9);
33714 + bcr9 = lp->a->read_bcr(ioaddr, 9);
33715
33716 if (lp->options & PCNET32_PORT_FD) {
33717 ecmd.duplex = DUPLEX_FULL;
33718 @@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_device *dev)
33719 ecmd.duplex = DUPLEX_HALF;
33720 bcr9 |= ~(1 << 0);
33721 }
33722 - lp->a.write_bcr(ioaddr, 9, bcr9);
33723 + lp->a->write_bcr(ioaddr, 9, bcr9);
33724 }
33725
33726 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
33727 @@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_device *dev)
33728
33729 #ifdef DO_DXSUFLO
33730 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
33731 - val = lp->a.read_csr(ioaddr, CSR3);
33732 + val = lp->a->read_csr(ioaddr, CSR3);
33733 val |= 0x40;
33734 - lp->a.write_csr(ioaddr, CSR3, val);
33735 + lp->a->write_csr(ioaddr, CSR3, val);
33736 }
33737 #endif
33738
33739 @@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_device *dev)
33740 napi_enable(&lp->napi);
33741
33742 /* Re-initialize the PCNET32, and start it when done. */
33743 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
33744 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
33745 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
33746 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
33747
33748 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33749 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
33750 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33751 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
33752
33753 netif_start_queue(dev);
33754
33755 @@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_device *dev)
33756
33757 i = 0;
33758 while (i++ < 100)
33759 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
33760 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
33761 break;
33762 /*
33763 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
33764 * reports that doing so triggers a bug in the '974.
33765 */
33766 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
33767 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
33768
33769 netif_printk(lp, ifup, KERN_DEBUG, dev,
33770 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
33771 i,
33772 (u32) (lp->init_dma_addr),
33773 - lp->a.read_csr(ioaddr, CSR0));
33774 + lp->a->read_csr(ioaddr, CSR0));
33775
33776 spin_unlock_irqrestore(&lp->lock, flags);
33777
33778 @@ -2218,7 +2218,7 @@ err_free_ring:
33779 * Switch back to 16bit mode to avoid problems with dumb
33780 * DOS packet driver after a warm reboot
33781 */
33782 - lp->a.write_bcr(ioaddr, 20, 4);
33783 + lp->a->write_bcr(ioaddr, 20, 4);
33784
33785 err_free_irq:
33786 spin_unlock_irqrestore(&lp->lock, flags);
33787 @@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
33788
33789 /* wait for stop */
33790 for (i = 0; i < 100; i++)
33791 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
33792 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
33793 break;
33794
33795 if (i >= 100)
33796 @@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
33797 return;
33798
33799 /* ReInit Ring */
33800 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
33801 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
33802 i = 0;
33803 while (i++ < 1000)
33804 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
33805 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
33806 break;
33807
33808 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
33809 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
33810 }
33811
33812 static void pcnet32_tx_timeout(struct net_device *dev)
33813 @@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
33814 /* Transmitter timeout, serious problems. */
33815 if (pcnet32_debug & NETIF_MSG_DRV)
33816 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
33817 - dev->name, lp->a.read_csr(ioaddr, CSR0));
33818 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
33819 + dev->name, lp->a->read_csr(ioaddr, CSR0));
33820 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
33821 dev->stats.tx_errors++;
33822 if (netif_msg_tx_err(lp)) {
33823 int i;
33824 @@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
33825
33826 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
33827 "%s() called, csr0 %4.4x\n",
33828 - __func__, lp->a.read_csr(ioaddr, CSR0));
33829 + __func__, lp->a->read_csr(ioaddr, CSR0));
33830
33831 /* Default status -- will not enable Successful-TxDone
33832 * interrupt when that option is available to us.
33833 @@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
33834 dev->stats.tx_bytes += skb->len;
33835
33836 /* Trigger an immediate send poll. */
33837 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
33838 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
33839
33840 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
33841 lp->tx_full = 1;
33842 @@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
33843
33844 spin_lock(&lp->lock);
33845
33846 - csr0 = lp->a.read_csr(ioaddr, CSR0);
33847 + csr0 = lp->a->read_csr(ioaddr, CSR0);
33848 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
33849 if (csr0 == 0xffff)
33850 break; /* PCMCIA remove happened */
33851 /* Acknowledge all of the current interrupt sources ASAP. */
33852 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
33853 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
33854
33855 netif_printk(lp, intr, KERN_DEBUG, dev,
33856 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
33857 - csr0, lp->a.read_csr(ioaddr, CSR0));
33858 + csr0, lp->a->read_csr(ioaddr, CSR0));
33859
33860 /* Log misc errors. */
33861 if (csr0 & 0x4000)
33862 @@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
33863 if (napi_schedule_prep(&lp->napi)) {
33864 u16 val;
33865 /* set interrupt masks */
33866 - val = lp->a.read_csr(ioaddr, CSR3);
33867 + val = lp->a->read_csr(ioaddr, CSR3);
33868 val |= 0x5f00;
33869 - lp->a.write_csr(ioaddr, CSR3, val);
33870 + lp->a->write_csr(ioaddr, CSR3, val);
33871
33872 __napi_schedule(&lp->napi);
33873 break;
33874 }
33875 - csr0 = lp->a.read_csr(ioaddr, CSR0);
33876 + csr0 = lp->a->read_csr(ioaddr, CSR0);
33877 }
33878
33879 netif_printk(lp, intr, KERN_DEBUG, dev,
33880 "exiting interrupt, csr0=%#4.4x\n",
33881 - lp->a.read_csr(ioaddr, CSR0));
33882 + lp->a->read_csr(ioaddr, CSR0));
33883
33884 spin_unlock(&lp->lock);
33885
33886 @@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_device *dev)
33887
33888 spin_lock_irqsave(&lp->lock, flags);
33889
33890 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
33891 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
33892
33893 netif_printk(lp, ifdown, KERN_DEBUG, dev,
33894 "Shutting down ethercard, status was %2.2x\n",
33895 - lp->a.read_csr(ioaddr, CSR0));
33896 + lp->a->read_csr(ioaddr, CSR0));
33897
33898 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
33899 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
33900 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
33901
33902 /*
33903 * Switch back to 16bit mode to avoid problems with dumb
33904 * DOS packet driver after a warm reboot
33905 */
33906 - lp->a.write_bcr(ioaddr, 20, 4);
33907 + lp->a->write_bcr(ioaddr, 20, 4);
33908
33909 spin_unlock_irqrestore(&lp->lock, flags);
33910
33911 @@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
33912 unsigned long flags;
33913
33914 spin_lock_irqsave(&lp->lock, flags);
33915 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
33916 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
33917 spin_unlock_irqrestore(&lp->lock, flags);
33918
33919 return &dev->stats;
33920 @@ -2577,10 +2577,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
33921 if (dev->flags & IFF_ALLMULTI) {
33922 ib->filter[0] = cpu_to_le32(~0U);
33923 ib->filter[1] = cpu_to_le32(~0U);
33924 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
33925 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
33926 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
33927 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
33928 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
33929 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
33930 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
33931 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
33932 return;
33933 }
33934 /* clear the multicast filter */
33935 @@ -2594,7 +2594,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
33936 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
33937 }
33938 for (i = 0; i < 4; i++)
33939 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
33940 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
33941 le16_to_cpu(mcast_table[i]));
33942 }
33943
33944 @@ -2609,28 +2609,28 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
33945
33946 spin_lock_irqsave(&lp->lock, flags);
33947 suspended = pcnet32_suspend(dev, &flags, 0);
33948 - csr15 = lp->a.read_csr(ioaddr, CSR15);
33949 + csr15 = lp->a->read_csr(ioaddr, CSR15);
33950 if (dev->flags & IFF_PROMISC) {
33951 /* Log any net taps. */
33952 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
33953 lp->init_block->mode =
33954 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
33955 7);
33956 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
33957 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
33958 } else {
33959 lp->init_block->mode =
33960 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
33961 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
33962 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
33963 pcnet32_load_multicast(dev);
33964 }
33965
33966 if (suspended) {
33967 int csr5;
33968 /* clear SUSPEND (SPND) - CSR5 bit 0 */
33969 - csr5 = lp->a.read_csr(ioaddr, CSR5);
33970 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
33971 + csr5 = lp->a->read_csr(ioaddr, CSR5);
33972 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
33973 } else {
33974 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
33975 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
33976 pcnet32_restart(dev, CSR0_NORMAL);
33977 netif_wake_queue(dev);
33978 }
33979 @@ -2648,8 +2648,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
33980 if (!lp->mii)
33981 return 0;
33982
33983 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
33984 - val_out = lp->a.read_bcr(ioaddr, 34);
33985 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
33986 + val_out = lp->a->read_bcr(ioaddr, 34);
33987
33988 return val_out;
33989 }
33990 @@ -2663,8 +2663,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
33991 if (!lp->mii)
33992 return;
33993
33994 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
33995 - lp->a.write_bcr(ioaddr, 34, val);
33996 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
33997 + lp->a->write_bcr(ioaddr, 34, val);
33998 }
33999
34000 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34001 @@ -2741,7 +2741,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
34002 curr_link = mii_link_ok(&lp->mii_if);
34003 } else {
34004 ulong ioaddr = dev->base_addr; /* card base I/O address */
34005 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34006 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34007 }
34008 if (!curr_link) {
34009 if (prev_link || verbose) {
34010 @@ -2764,13 +2764,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
34011 (ecmd.duplex == DUPLEX_FULL)
34012 ? "full" : "half");
34013 }
34014 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
34015 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
34016 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
34017 if (lp->mii_if.full_duplex)
34018 bcr9 |= (1 << 0);
34019 else
34020 bcr9 &= ~(1 << 0);
34021 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
34022 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
34023 }
34024 } else {
34025 netif_info(lp, link, dev, "link up\n");
34026 diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
34027 index edfa15d..002bfa9 100644
34028 --- a/drivers/net/ppp_generic.c
34029 +++ b/drivers/net/ppp_generic.c
34030 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34031 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
34032 struct ppp_stats stats;
34033 struct ppp_comp_stats cstats;
34034 - char *vers;
34035
34036 switch (cmd) {
34037 case SIOCGPPPSTATS:
34038 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34039 break;
34040
34041 case SIOCGPPPVER:
34042 - vers = PPP_VERSION;
34043 - if (copy_to_user(addr, vers, strlen(vers) + 1))
34044 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
34045 break;
34046 err = 0;
34047 break;
34048 diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
34049 index 6d657ca..d1be94b 100644
34050 --- a/drivers/net/r8169.c
34051 +++ b/drivers/net/r8169.c
34052 @@ -663,12 +663,12 @@ struct rtl8169_private {
34053 struct mdio_ops {
34054 void (*write)(void __iomem *, int, int);
34055 int (*read)(void __iomem *, int);
34056 - } mdio_ops;
34057 + } __no_const mdio_ops;
34058
34059 struct pll_power_ops {
34060 void (*down)(struct rtl8169_private *);
34061 void (*up)(struct rtl8169_private *);
34062 - } pll_power_ops;
34063 + } __no_const pll_power_ops;
34064
34065 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
34066 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
34067 diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
34068 index 3c0f131..17f8b02 100644
34069 --- a/drivers/net/sis190.c
34070 +++ b/drivers/net/sis190.c
34071 @@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
34072 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
34073 struct net_device *dev)
34074 {
34075 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
34076 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
34077 struct sis190_private *tp = netdev_priv(dev);
34078 struct pci_dev *isa_bridge;
34079 u8 reg, tmp8;
34080 diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
34081 index 4793df8..44c9849 100644
34082 --- a/drivers/net/sundance.c
34083 +++ b/drivers/net/sundance.c
34084 @@ -218,7 +218,7 @@ enum {
34085 struct pci_id_info {
34086 const char *name;
34087 };
34088 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34089 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34090 {"D-Link DFE-550TX FAST Ethernet Adapter"},
34091 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
34092 {"D-Link DFE-580TX 4 port Server Adapter"},
34093 diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
34094 index 2ea456d..3ad9523 100644
34095 --- a/drivers/net/tg3.h
34096 +++ b/drivers/net/tg3.h
34097 @@ -134,6 +134,7 @@
34098 #define CHIPREV_ID_5750_A0 0x4000
34099 #define CHIPREV_ID_5750_A1 0x4001
34100 #define CHIPREV_ID_5750_A3 0x4003
34101 +#define CHIPREV_ID_5750_C1 0x4201
34102 #define CHIPREV_ID_5750_C2 0x4202
34103 #define CHIPREV_ID_5752_A0_HW 0x5000
34104 #define CHIPREV_ID_5752_A0 0x6000
34105 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
34106 index 515f122..41dd273 100644
34107 --- a/drivers/net/tokenring/abyss.c
34108 +++ b/drivers/net/tokenring/abyss.c
34109 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
34110
34111 static int __init abyss_init (void)
34112 {
34113 - abyss_netdev_ops = tms380tr_netdev_ops;
34114 + pax_open_kernel();
34115 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34116
34117 - abyss_netdev_ops.ndo_open = abyss_open;
34118 - abyss_netdev_ops.ndo_stop = abyss_close;
34119 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
34120 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
34121 + pax_close_kernel();
34122
34123 return pci_register_driver(&abyss_driver);
34124 }
34125 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
34126 index 6153cfd..cf69c1c 100644
34127 --- a/drivers/net/tokenring/madgemc.c
34128 +++ b/drivers/net/tokenring/madgemc.c
34129 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
34130
34131 static int __init madgemc_init (void)
34132 {
34133 - madgemc_netdev_ops = tms380tr_netdev_ops;
34134 - madgemc_netdev_ops.ndo_open = madgemc_open;
34135 - madgemc_netdev_ops.ndo_stop = madgemc_close;
34136 + pax_open_kernel();
34137 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34138 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34139 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34140 + pax_close_kernel();
34141
34142 return mca_register_driver (&madgemc_driver);
34143 }
34144 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
34145 index 8d362e6..f91cc52 100644
34146 --- a/drivers/net/tokenring/proteon.c
34147 +++ b/drivers/net/tokenring/proteon.c
34148 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
34149 struct platform_device *pdev;
34150 int i, num = 0, err = 0;
34151
34152 - proteon_netdev_ops = tms380tr_netdev_ops;
34153 - proteon_netdev_ops.ndo_open = proteon_open;
34154 - proteon_netdev_ops.ndo_stop = tms380tr_close;
34155 + pax_open_kernel();
34156 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34157 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
34158 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
34159 + pax_close_kernel();
34160
34161 err = platform_driver_register(&proteon_driver);
34162 if (err)
34163 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
34164 index 46db5c5..37c1536 100644
34165 --- a/drivers/net/tokenring/skisa.c
34166 +++ b/drivers/net/tokenring/skisa.c
34167 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
34168 struct platform_device *pdev;
34169 int i, num = 0, err = 0;
34170
34171 - sk_isa_netdev_ops = tms380tr_netdev_ops;
34172 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
34173 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34174 + pax_open_kernel();
34175 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34176 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
34177 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34178 + pax_close_kernel();
34179
34180 err = platform_driver_register(&sk_isa_driver);
34181 if (err)
34182 diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
34183 index ce90efc..2676f89 100644
34184 --- a/drivers/net/tulip/de2104x.c
34185 +++ b/drivers/net/tulip/de2104x.c
34186 @@ -1795,6 +1795,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
34187 struct de_srom_info_leaf *il;
34188 void *bufp;
34189
34190 + pax_track_stack();
34191 +
34192 /* download entire eeprom */
34193 for (i = 0; i < DE_EEPROM_WORDS; i++)
34194 ((__le16 *)ee_data)[i] =
34195 diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
34196 index 959b410..c97fac2 100644
34197 --- a/drivers/net/tulip/de4x5.c
34198 +++ b/drivers/net/tulip/de4x5.c
34199 @@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34200 for (i=0; i<ETH_ALEN; i++) {
34201 tmp.addr[i] = dev->dev_addr[i];
34202 }
34203 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34204 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34205 break;
34206
34207 case DE4X5_SET_HWADDR: /* Set the hardware address */
34208 @@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34209 spin_lock_irqsave(&lp->lock, flags);
34210 memcpy(&statbuf, &lp->pktStats, ioc->len);
34211 spin_unlock_irqrestore(&lp->lock, flags);
34212 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
34213 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34214 return -EFAULT;
34215 break;
34216 }
34217 diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
34218 index fa5eee9..e074432 100644
34219 --- a/drivers/net/tulip/eeprom.c
34220 +++ b/drivers/net/tulip/eeprom.c
34221 @@ -81,7 +81,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
34222 {NULL}};
34223
34224
34225 -static const char *block_name[] __devinitdata = {
34226 +static const char *block_name[] __devinitconst = {
34227 "21140 non-MII",
34228 "21140 MII PHY",
34229 "21142 Serial PHY",
34230 diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
34231 index 862eadf..3eee1e6 100644
34232 --- a/drivers/net/tulip/winbond-840.c
34233 +++ b/drivers/net/tulip/winbond-840.c
34234 @@ -236,7 +236,7 @@ struct pci_id_info {
34235 int drv_flags; /* Driver use, intended as capability flags. */
34236 };
34237
34238 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34239 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34240 { /* Sometime a Level-One switch card. */
34241 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34242 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
34243 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
34244 index 304fe78..db112fa 100644
34245 --- a/drivers/net/usb/hso.c
34246 +++ b/drivers/net/usb/hso.c
34247 @@ -71,7 +71,7 @@
34248 #include <asm/byteorder.h>
34249 #include <linux/serial_core.h>
34250 #include <linux/serial.h>
34251 -
34252 +#include <asm/local.h>
34253
34254 #define MOD_AUTHOR "Option Wireless"
34255 #define MOD_DESCRIPTION "USB High Speed Option driver"
34256 @@ -257,7 +257,7 @@ struct hso_serial {
34257
34258 /* from usb_serial_port */
34259 struct tty_struct *tty;
34260 - int open_count;
34261 + local_t open_count;
34262 spinlock_t serial_lock;
34263
34264 int (*write_data) (struct hso_serial *serial);
34265 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
34266 struct urb *urb;
34267
34268 urb = serial->rx_urb[0];
34269 - if (serial->open_count > 0) {
34270 + if (local_read(&serial->open_count) > 0) {
34271 count = put_rxbuf_data(urb, serial);
34272 if (count == -1)
34273 return;
34274 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
34275 DUMP1(urb->transfer_buffer, urb->actual_length);
34276
34277 /* Anyone listening? */
34278 - if (serial->open_count == 0)
34279 + if (local_read(&serial->open_count) == 0)
34280 return;
34281
34282 if (status == 0) {
34283 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
34284 spin_unlock_irq(&serial->serial_lock);
34285
34286 /* check for port already opened, if not set the termios */
34287 - serial->open_count++;
34288 - if (serial->open_count == 1) {
34289 + if (local_inc_return(&serial->open_count) == 1) {
34290 serial->rx_state = RX_IDLE;
34291 /* Force default termio settings */
34292 _hso_serial_set_termios(tty, NULL);
34293 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
34294 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
34295 if (result) {
34296 hso_stop_serial_device(serial->parent);
34297 - serial->open_count--;
34298 + local_dec(&serial->open_count);
34299 kref_put(&serial->parent->ref, hso_serial_ref_free);
34300 }
34301 } else {
34302 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
34303
34304 /* reset the rts and dtr */
34305 /* do the actual close */
34306 - serial->open_count--;
34307 + local_dec(&serial->open_count);
34308
34309 - if (serial->open_count <= 0) {
34310 - serial->open_count = 0;
34311 + if (local_read(&serial->open_count) <= 0) {
34312 + local_set(&serial->open_count, 0);
34313 spin_lock_irq(&serial->serial_lock);
34314 if (serial->tty == tty) {
34315 serial->tty->driver_data = NULL;
34316 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
34317
34318 /* the actual setup */
34319 spin_lock_irqsave(&serial->serial_lock, flags);
34320 - if (serial->open_count)
34321 + if (local_read(&serial->open_count))
34322 _hso_serial_set_termios(tty, old);
34323 else
34324 tty->termios = old;
34325 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
34326 D1("Pending read interrupt on port %d\n", i);
34327 spin_lock(&serial->serial_lock);
34328 if (serial->rx_state == RX_IDLE &&
34329 - serial->open_count > 0) {
34330 + local_read(&serial->open_count) > 0) {
34331 /* Setup and send a ctrl req read on
34332 * port i */
34333 if (!serial->rx_urb_filled[0]) {
34334 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
34335 /* Start all serial ports */
34336 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
34337 if (serial_table[i] && (serial_table[i]->interface == iface)) {
34338 - if (dev2ser(serial_table[i])->open_count) {
34339 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
34340 result =
34341 hso_start_serial_device(serial_table[i], GFP_NOIO);
34342 hso_kick_transmit(dev2ser(serial_table[i]));
34343 diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
34344 index 27400ed..c796e05 100644
34345 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
34346 +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
34347 @@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
34348 * Return with error code if any of the queue indices
34349 * is out of range
34350 */
34351 - if (p->ring_index[i] < 0 ||
34352 - p->ring_index[i] >= adapter->num_rx_queues)
34353 + if (p->ring_index[i] >= adapter->num_rx_queues)
34354 return -EINVAL;
34355 }
34356
34357 diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
34358 index dd36258..e47fd31 100644
34359 --- a/drivers/net/vxge/vxge-config.h
34360 +++ b/drivers/net/vxge/vxge-config.h
34361 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
34362 void (*link_down)(struct __vxge_hw_device *devh);
34363 void (*crit_err)(struct __vxge_hw_device *devh,
34364 enum vxge_hw_event type, u64 ext_data);
34365 -};
34366 +} __no_const;
34367
34368 /*
34369 * struct __vxge_hw_blockpool_entry - Block private data structure
34370 diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
34371 index 178348a2..18bb433 100644
34372 --- a/drivers/net/vxge/vxge-main.c
34373 +++ b/drivers/net/vxge/vxge-main.c
34374 @@ -100,6 +100,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
34375 struct sk_buff *completed[NR_SKB_COMPLETED];
34376 int more;
34377
34378 + pax_track_stack();
34379 +
34380 do {
34381 more = 0;
34382 skb_ptr = completed;
34383 @@ -1915,6 +1917,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
34384 u8 mtable[256] = {0}; /* CPU to vpath mapping */
34385 int index;
34386
34387 + pax_track_stack();
34388 +
34389 /*
34390 * Filling
34391 * - itable with bucket numbers
34392 diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
34393 index 4a518a3..936b334 100644
34394 --- a/drivers/net/vxge/vxge-traffic.h
34395 +++ b/drivers/net/vxge/vxge-traffic.h
34396 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
34397 struct vxge_hw_mempool_dma *dma_object,
34398 u32 index,
34399 u32 is_last);
34400 -};
34401 +} __no_const;
34402
34403 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
34404 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
34405 diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
34406 index 56aeb01..547f71f 100644
34407 --- a/drivers/net/wan/hdlc_x25.c
34408 +++ b/drivers/net/wan/hdlc_x25.c
34409 @@ -134,16 +134,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
34410
34411 static int x25_open(struct net_device *dev)
34412 {
34413 - struct lapb_register_struct cb;
34414 + static struct lapb_register_struct cb = {
34415 + .connect_confirmation = x25_connected,
34416 + .connect_indication = x25_connected,
34417 + .disconnect_confirmation = x25_disconnected,
34418 + .disconnect_indication = x25_disconnected,
34419 + .data_indication = x25_data_indication,
34420 + .data_transmit = x25_data_transmit
34421 + };
34422 int result;
34423
34424 - cb.connect_confirmation = x25_connected;
34425 - cb.connect_indication = x25_connected;
34426 - cb.disconnect_confirmation = x25_disconnected;
34427 - cb.disconnect_indication = x25_disconnected;
34428 - cb.data_indication = x25_data_indication;
34429 - cb.data_transmit = x25_data_transmit;
34430 -
34431 result = lapb_register(dev, &cb);
34432 if (result != LAPB_OK)
34433 return result;
34434 diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
34435 index 1fda46c..f2858f2 100644
34436 --- a/drivers/net/wimax/i2400m/usb-fw.c
34437 +++ b/drivers/net/wimax/i2400m/usb-fw.c
34438 @@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
34439 int do_autopm = 1;
34440 DECLARE_COMPLETION_ONSTACK(notif_completion);
34441
34442 + pax_track_stack();
34443 +
34444 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
34445 i2400m, ack, ack_size);
34446 BUG_ON(_ack == i2400m->bm_ack_buf);
34447 diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
34448 index e1b3e3c..e413f18 100644
34449 --- a/drivers/net/wireless/airo.c
34450 +++ b/drivers/net/wireless/airo.c
34451 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
34452 BSSListElement * loop_net;
34453 BSSListElement * tmp_net;
34454
34455 + pax_track_stack();
34456 +
34457 /* Blow away current list of scan results */
34458 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
34459 list_move_tail (&loop_net->list, &ai->network_free_list);
34460 @@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
34461 WepKeyRid wkr;
34462 int rc;
34463
34464 + pax_track_stack();
34465 +
34466 memset( &mySsid, 0, sizeof( mySsid ) );
34467 kfree (ai->flash);
34468 ai->flash = NULL;
34469 @@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct inode *inode,
34470 __le32 *vals = stats.vals;
34471 int len;
34472
34473 + pax_track_stack();
34474 +
34475 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
34476 return -ENOMEM;
34477 data = file->private_data;
34478 @@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
34479 /* If doLoseSync is not 1, we won't do a Lose Sync */
34480 int doLoseSync = -1;
34481
34482 + pax_track_stack();
34483 +
34484 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
34485 return -ENOMEM;
34486 data = file->private_data;
34487 @@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_device *dev,
34488 int i;
34489 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
34490
34491 + pax_track_stack();
34492 +
34493 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
34494 if (!qual)
34495 return -ENOMEM;
34496 @@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
34497 CapabilityRid cap_rid;
34498 __le32 *vals = stats_rid.vals;
34499
34500 + pax_track_stack();
34501 +
34502 /* Get stats out of the card */
34503 clear_bit(JOB_WSTATS, &local->jobs);
34504 if (local->power.event) {
34505 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
34506 index 17c4b56..00d836f 100644
34507 --- a/drivers/net/wireless/ath/ath.h
34508 +++ b/drivers/net/wireless/ath/ath.h
34509 @@ -121,6 +121,7 @@ struct ath_ops {
34510 void (*write_flush) (void *);
34511 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
34512 };
34513 +typedef struct ath_ops __no_const ath_ops_no_const;
34514
34515 struct ath_common;
34516 struct ath_bus_ops;
34517 diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
34518 index ccca724..7afbadc 100644
34519 --- a/drivers/net/wireless/ath/ath5k/debug.c
34520 +++ b/drivers/net/wireless/ath/ath5k/debug.c
34521 @@ -203,6 +203,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
34522 unsigned int v;
34523 u64 tsf;
34524
34525 + pax_track_stack();
34526 +
34527 v = ath5k_hw_reg_read(ah, AR5K_BEACON);
34528 len += snprintf(buf + len, sizeof(buf) - len,
34529 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
34530 @@ -321,6 +323,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
34531 unsigned int len = 0;
34532 unsigned int i;
34533
34534 + pax_track_stack();
34535 +
34536 len += snprintf(buf + len, sizeof(buf) - len,
34537 "DEBUG LEVEL: 0x%08x\n\n", ah->debug.level);
34538
34539 @@ -492,6 +496,8 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
34540 unsigned int len = 0;
34541 u32 filt = ath5k_hw_get_rx_filter(ah);
34542
34543 + pax_track_stack();
34544 +
34545 len += snprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n",
34546 ah->bssidmask);
34547 len += snprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ",
34548 @@ -548,6 +554,8 @@ static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
34549 unsigned int len = 0;
34550 int i;
34551
34552 + pax_track_stack();
34553 +
34554 len += snprintf(buf + len, sizeof(buf) - len,
34555 "RX\n---------------------\n");
34556 len += snprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n",
34557 @@ -665,6 +673,8 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
34558 char buf[700];
34559 unsigned int len = 0;
34560
34561 + pax_track_stack();
34562 +
34563 len += snprintf(buf + len, sizeof(buf) - len,
34564 "HW has PHY error counters:\t%s\n",
34565 ah->ah_capabilities.cap_has_phyerr_counters ?
34566 @@ -829,6 +839,8 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
34567 struct ath5k_buf *bf, *bf0;
34568 int i, n;
34569
34570 + pax_track_stack();
34571 +
34572 len += snprintf(buf + len, sizeof(buf) - len,
34573 "available txbuffers: %d\n", ah->txbuf_len);
34574
34575 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
34576 index 7c2aaad..ad14dee 100644
34577 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
34578 +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
34579 @@ -758,6 +758,8 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah)
34580 int i, im, j;
34581 int nmeasurement;
34582
34583 + pax_track_stack();
34584 +
34585 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
34586 if (ah->txchainmask & (1 << i))
34587 num_chains++;
34588 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
34589 index f80d1d6..08b773d 100644
34590 --- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
34591 +++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
34592 @@ -406,6 +406,8 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
34593 int theta_low_bin = 0;
34594 int i;
34595
34596 + pax_track_stack();
34597 +
34598 /* disregard any bin that contains <= 16 samples */
34599 thresh_accum_cnt = 16;
34600 scale_factor = 5;
34601 diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
34602 index d1eb896..8b67cd4 100644
34603 --- a/drivers/net/wireless/ath/ath9k/debug.c
34604 +++ b/drivers/net/wireless/ath/ath9k/debug.c
34605 @@ -387,6 +387,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
34606 char buf[512];
34607 unsigned int len = 0;
34608
34609 + pax_track_stack();
34610 +
34611 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
34612 len += snprintf(buf + len, sizeof(buf) - len,
34613 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
34614 @@ -477,6 +479,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
34615 u8 addr[ETH_ALEN];
34616 u32 tmp;
34617
34618 + pax_track_stack();
34619 +
34620 len += snprintf(buf + len, sizeof(buf) - len,
34621 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
34622 wiphy_name(sc->hw->wiphy),
34623 diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
34624 index d3ff33c..309398e 100644
34625 --- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
34626 +++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
34627 @@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(struct file *file, char __user *user_buf,
34628 unsigned int len = 0;
34629 int ret = 0;
34630
34631 + pax_track_stack();
34632 +
34633 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
34634
34635 ath9k_htc_ps_wakeup(priv);
34636 @@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(struct file *file, char __user *user_buf,
34637 unsigned int len = 0;
34638 int ret = 0;
34639
34640 + pax_track_stack();
34641 +
34642 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
34643
34644 ath9k_htc_ps_wakeup(priv);
34645 @@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(struct file *file, char __user *user_buf,
34646 unsigned int len = 0;
34647 int ret = 0;
34648
34649 + pax_track_stack();
34650 +
34651 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
34652
34653 ath9k_htc_ps_wakeup(priv);
34654 @@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
34655 char buf[512];
34656 unsigned int len = 0;
34657
34658 + pax_track_stack();
34659 +
34660 len += snprintf(buf + len, sizeof(buf) - len,
34661 "%20s : %10u\n", "Buffers queued",
34662 priv->debug.tx_stats.buf_queued);
34663 @@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct file *file, char __user *user_buf,
34664 char buf[512];
34665 unsigned int len = 0;
34666
34667 + pax_track_stack();
34668 +
34669 spin_lock_bh(&priv->tx.tx_lock);
34670
34671 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
34672 @@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
34673 char buf[512];
34674 unsigned int len = 0;
34675
34676 + pax_track_stack();
34677 +
34678 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
34679 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
34680
34681 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
34682 index c798890..c19a8fb 100644
34683 --- a/drivers/net/wireless/ath/ath9k/hw.h
34684 +++ b/drivers/net/wireless/ath/ath9k/hw.h
34685 @@ -588,7 +588,7 @@ struct ath_hw_private_ops {
34686
34687 /* ANI */
34688 void (*ani_cache_ini_regs)(struct ath_hw *ah);
34689 -};
34690 +} __no_const;
34691
34692 /**
34693 * struct ath_hw_ops - callbacks used by hardware code and driver code
34694 @@ -639,7 +639,7 @@ struct ath_hw_ops {
34695 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
34696 struct ath_hw_antcomb_conf *antconf);
34697
34698 -};
34699 +} __no_const;
34700
34701 struct ath_nf_limits {
34702 s16 max;
34703 @@ -652,7 +652,7 @@ struct ath_nf_limits {
34704 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
34705
34706 struct ath_hw {
34707 - struct ath_ops reg_ops;
34708 + ath_ops_no_const reg_ops;
34709
34710 struct ieee80211_hw *hw;
34711 struct ath_common common;
34712 diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
34713 index ef9ad79..f5f8d80 100644
34714 --- a/drivers/net/wireless/ipw2x00/ipw2100.c
34715 +++ b/drivers/net/wireless/ipw2x00/ipw2100.c
34716 @@ -2102,6 +2102,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
34717 int err;
34718 DECLARE_SSID_BUF(ssid);
34719
34720 + pax_track_stack();
34721 +
34722 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
34723
34724 if (ssid_len)
34725 @@ -5451,6 +5453,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
34726 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
34727 int err;
34728
34729 + pax_track_stack();
34730 +
34731 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
34732 idx, keylen, len);
34733
34734 diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
34735 index 32a9966..de69787 100644
34736 --- a/drivers/net/wireless/ipw2x00/libipw_rx.c
34737 +++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
34738 @@ -1565,6 +1565,8 @@ static void libipw_process_probe_response(struct libipw_device
34739 unsigned long flags;
34740 DECLARE_SSID_BUF(ssid);
34741
34742 + pax_track_stack();
34743 +
34744 LIBIPW_DEBUG_SCAN("'%s' (%pM"
34745 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
34746 print_ssid(ssid, info_element->data, info_element->len),
34747 diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
34748 index 66ee1562..b90412b 100644
34749 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
34750 +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
34751 @@ -3687,7 +3687,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
34752 */
34753 if (iwl3945_mod_params.disable_hw_scan) {
34754 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
34755 - iwl3945_hw_ops.hw_scan = NULL;
34756 + pax_open_kernel();
34757 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
34758 + pax_close_kernel();
34759 }
34760
34761 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
34762 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
34763 index 3789ff4..22ab151 100644
34764 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
34765 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
34766 @@ -920,6 +920,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
34767 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
34768 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
34769
34770 + pax_track_stack();
34771 +
34772 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
34773
34774 /* Treat uninitialized rate scaling data same as non-existing. */
34775 @@ -2931,6 +2933,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
34776 container_of(lq_sta, struct iwl_station_priv, lq_sta);
34777 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
34778
34779 + pax_track_stack();
34780 +
34781 /* Override starting rate (index 0) if needed for debug purposes */
34782 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
34783
34784 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
34785 index f9a407e..a6f2bb7 100644
34786 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
34787 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
34788 @@ -68,8 +68,8 @@ do { \
34789 } while (0)
34790
34791 #else
34792 -#define IWL_DEBUG(__priv, level, fmt, args...)
34793 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
34794 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
34795 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
34796 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
34797 const void *p, u32 len)
34798 {}
34799 diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
34800 index ec1485b..900c3bd 100644
34801 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
34802 +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
34803 @@ -561,6 +561,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
34804 int pos = 0;
34805 const size_t bufsz = sizeof(buf);
34806
34807 + pax_track_stack();
34808 +
34809 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
34810 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
34811 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
34812 @@ -693,6 +695,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
34813 char buf[256 * NUM_IWL_RXON_CTX];
34814 const size_t bufsz = sizeof(buf);
34815
34816 + pax_track_stack();
34817 +
34818 for_each_context(priv, ctx) {
34819 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
34820 ctx->ctxid);
34821 diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
34822 index 0a0cc96..fd49ad8 100644
34823 --- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
34824 +++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
34825 @@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
34826 int buf_len = 512;
34827 size_t len = 0;
34828
34829 + pax_track_stack();
34830 +
34831 if (*ppos != 0)
34832 return 0;
34833 if (count < sizeof(buf))
34834 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
34835 index 031cd89..bdc8435 100644
34836 --- a/drivers/net/wireless/mac80211_hwsim.c
34837 +++ b/drivers/net/wireless/mac80211_hwsim.c
34838 @@ -1670,9 +1670,11 @@ static int __init init_mac80211_hwsim(void)
34839 return -EINVAL;
34840
34841 if (fake_hw_scan) {
34842 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
34843 - mac80211_hwsim_ops.sw_scan_start = NULL;
34844 - mac80211_hwsim_ops.sw_scan_complete = NULL;
34845 + pax_open_kernel();
34846 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
34847 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
34848 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
34849 + pax_close_kernel();
34850 }
34851
34852 spin_lock_init(&hwsim_radio_lock);
34853 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
34854 index 2215c3c..64e6a47 100644
34855 --- a/drivers/net/wireless/mwifiex/main.h
34856 +++ b/drivers/net/wireless/mwifiex/main.h
34857 @@ -560,7 +560,7 @@ struct mwifiex_if_ops {
34858
34859 void (*update_mp_end_port) (struct mwifiex_adapter *, u16);
34860 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
34861 -};
34862 +} __no_const;
34863
34864 struct mwifiex_adapter {
34865 struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM];
34866 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
34867 index 29f9389..f6d2ce0 100644
34868 --- a/drivers/net/wireless/rndis_wlan.c
34869 +++ b/drivers/net/wireless/rndis_wlan.c
34870 @@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
34871
34872 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
34873
34874 - if (rts_threshold < 0 || rts_threshold > 2347)
34875 + if (rts_threshold > 2347)
34876 rts_threshold = 2347;
34877
34878 tmp = cpu_to_le32(rts_threshold);
34879 diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
34880 index 3b11642..d6bb049 100644
34881 --- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
34882 +++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
34883 @@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
34884 u8 rfpath;
34885 u8 num_total_rfpath = rtlphy->num_total_rfpath;
34886
34887 + pax_track_stack();
34888 +
34889 precommoncmdcnt = 0;
34890 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
34891 MAX_PRECMD_CNT,
34892 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
34893 index a77f1bb..c608b2b 100644
34894 --- a/drivers/net/wireless/wl1251/wl1251.h
34895 +++ b/drivers/net/wireless/wl1251/wl1251.h
34896 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
34897 void (*reset)(struct wl1251 *wl);
34898 void (*enable_irq)(struct wl1251 *wl);
34899 void (*disable_irq)(struct wl1251 *wl);
34900 -};
34901 +} __no_const;
34902
34903 struct wl1251 {
34904 struct ieee80211_hw *hw;
34905 diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
34906 index e0b3736..4b466e6 100644
34907 --- a/drivers/net/wireless/wl12xx/spi.c
34908 +++ b/drivers/net/wireless/wl12xx/spi.c
34909 @@ -281,6 +281,8 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
34910 u32 chunk_len;
34911 int i;
34912
34913 + pax_track_stack();
34914 +
34915 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
34916
34917 spi_message_init(&m);
34918 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
34919 index f34b5b2..b5abb9f 100644
34920 --- a/drivers/oprofile/buffer_sync.c
34921 +++ b/drivers/oprofile/buffer_sync.c
34922 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
34923 if (cookie == NO_COOKIE)
34924 offset = pc;
34925 if (cookie == INVALID_COOKIE) {
34926 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
34927 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
34928 offset = pc;
34929 }
34930 if (cookie != last_cookie) {
34931 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
34932 /* add userspace sample */
34933
34934 if (!mm) {
34935 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
34936 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
34937 return 0;
34938 }
34939
34940 cookie = lookup_dcookie(mm, s->eip, &offset);
34941
34942 if (cookie == INVALID_COOKIE) {
34943 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
34944 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
34945 return 0;
34946 }
34947
34948 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
34949 /* ignore backtraces if failed to add a sample */
34950 if (state == sb_bt_start) {
34951 state = sb_bt_ignore;
34952 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
34953 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
34954 }
34955 }
34956 release_mm(mm);
34957 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
34958 index dd87e86..bc0148c 100644
34959 --- a/drivers/oprofile/event_buffer.c
34960 +++ b/drivers/oprofile/event_buffer.c
34961 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
34962 }
34963
34964 if (buffer_pos == buffer_size) {
34965 - atomic_inc(&oprofile_stats.event_lost_overflow);
34966 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
34967 return;
34968 }
34969
34970 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
34971 index dccd863..8d35669 100644
34972 --- a/drivers/oprofile/oprof.c
34973 +++ b/drivers/oprofile/oprof.c
34974 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
34975 if (oprofile_ops.switch_events())
34976 return;
34977
34978 - atomic_inc(&oprofile_stats.multiplex_counter);
34979 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
34980 start_switch_worker();
34981 }
34982
34983 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
34984 index 917d28e..d62d981 100644
34985 --- a/drivers/oprofile/oprofile_stats.c
34986 +++ b/drivers/oprofile/oprofile_stats.c
34987 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
34988 cpu_buf->sample_invalid_eip = 0;
34989 }
34990
34991 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
34992 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
34993 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
34994 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
34995 - atomic_set(&oprofile_stats.multiplex_counter, 0);
34996 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
34997 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
34998 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
34999 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35000 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35001 }
35002
35003
35004 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35005 index 38b6fc0..b5cbfce 100644
35006 --- a/drivers/oprofile/oprofile_stats.h
35007 +++ b/drivers/oprofile/oprofile_stats.h
35008 @@ -13,11 +13,11 @@
35009 #include <linux/atomic.h>
35010
35011 struct oprofile_stat_struct {
35012 - atomic_t sample_lost_no_mm;
35013 - atomic_t sample_lost_no_mapping;
35014 - atomic_t bt_lost_no_mapping;
35015 - atomic_t event_lost_overflow;
35016 - atomic_t multiplex_counter;
35017 + atomic_unchecked_t sample_lost_no_mm;
35018 + atomic_unchecked_t sample_lost_no_mapping;
35019 + atomic_unchecked_t bt_lost_no_mapping;
35020 + atomic_unchecked_t event_lost_overflow;
35021 + atomic_unchecked_t multiplex_counter;
35022 };
35023
35024 extern struct oprofile_stat_struct oprofile_stats;
35025 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
35026 index e9ff6f7..28e259a 100644
35027 --- a/drivers/oprofile/oprofilefs.c
35028 +++ b/drivers/oprofile/oprofilefs.c
35029 @@ -186,7 +186,7 @@ static const struct file_operations atomic_ro_fops = {
35030
35031
35032 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35033 - char const *name, atomic_t *val)
35034 + char const *name, atomic_unchecked_t *val)
35035 {
35036 return __oprofilefs_create_file(sb, root, name,
35037 &atomic_ro_fops, 0444, val);
35038 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
35039 index 3f56bc0..707d642 100644
35040 --- a/drivers/parport/procfs.c
35041 +++ b/drivers/parport/procfs.c
35042 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
35043
35044 *ppos += len;
35045
35046 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35047 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35048 }
35049
35050 #ifdef CONFIG_PARPORT_1284
35051 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
35052
35053 *ppos += len;
35054
35055 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35056 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35057 }
35058 #endif /* IEEE1284.3 support. */
35059
35060 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
35061 index 9fff878..ad0ad53 100644
35062 --- a/drivers/pci/hotplug/cpci_hotplug.h
35063 +++ b/drivers/pci/hotplug/cpci_hotplug.h
35064 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35065 int (*hardware_test) (struct slot* slot, u32 value);
35066 u8 (*get_power) (struct slot* slot);
35067 int (*set_power) (struct slot* slot, int value);
35068 -};
35069 +} __no_const;
35070
35071 struct cpci_hp_controller {
35072 unsigned int irq;
35073 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
35074 index 76ba8a1..20ca857 100644
35075 --- a/drivers/pci/hotplug/cpqphp_nvram.c
35076 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
35077 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
35078
35079 void compaq_nvram_init (void __iomem *rom_start)
35080 {
35081 +
35082 +#ifndef CONFIG_PAX_KERNEXEC
35083 if (rom_start) {
35084 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35085 }
35086 +#endif
35087 +
35088 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35089
35090 /* initialize our int15 lock */
35091 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
35092 index cbfbab1..6a9fced 100644
35093 --- a/drivers/pci/pcie/aspm.c
35094 +++ b/drivers/pci/pcie/aspm.c
35095 @@ -27,9 +27,9 @@
35096 #define MODULE_PARAM_PREFIX "pcie_aspm."
35097
35098 /* Note: those are not register definitions */
35099 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35100 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35101 -#define ASPM_STATE_L1 (4) /* L1 state */
35102 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35103 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35104 +#define ASPM_STATE_L1 (4U) /* L1 state */
35105 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35106 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35107
35108 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
35109 index 6ab6bd3..72bdc69 100644
35110 --- a/drivers/pci/probe.c
35111 +++ b/drivers/pci/probe.c
35112 @@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
35113 u32 l, sz, mask;
35114 u16 orig_cmd;
35115
35116 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
35117 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
35118
35119 if (!dev->mmio_always_on) {
35120 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
35121 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
35122 index 27911b5..5b6db88 100644
35123 --- a/drivers/pci/proc.c
35124 +++ b/drivers/pci/proc.c
35125 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
35126 static int __init pci_proc_init(void)
35127 {
35128 struct pci_dev *dev = NULL;
35129 +
35130 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
35131 +#ifdef CONFIG_GRKERNSEC_PROC_USER
35132 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35133 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35134 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35135 +#endif
35136 +#else
35137 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35138 +#endif
35139 proc_create("devices", 0, proc_bus_pci_dir,
35140 &proc_bus_pci_dev_operations);
35141 proc_initialized = 1;
35142 diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
35143 index 90832a9..419089a 100644
35144 --- a/drivers/pci/xen-pcifront.c
35145 +++ b/drivers/pci/xen-pcifront.c
35146 @@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn,
35147 struct pcifront_sd *sd = bus->sysdata;
35148 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35149
35150 + pax_track_stack();
35151 +
35152 if (verbose_request)
35153 dev_info(&pdev->xdev->dev,
35154 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
35155 @@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
35156 struct pcifront_sd *sd = bus->sysdata;
35157 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35158
35159 + pax_track_stack();
35160 +
35161 if (verbose_request)
35162 dev_info(&pdev->xdev->dev,
35163 "write dev=%04x:%02x:%02x.%01x - "
35164 @@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
35165 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35166 struct msi_desc *entry;
35167
35168 + pax_track_stack();
35169 +
35170 if (nvec > SH_INFO_MAX_VEC) {
35171 dev_err(&dev->dev, "too much vector for pci frontend: %x."
35172 " Increase SH_INFO_MAX_VEC.\n", nvec);
35173 @@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(struct pci_dev *dev)
35174 struct pcifront_sd *sd = dev->bus->sysdata;
35175 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35176
35177 + pax_track_stack();
35178 +
35179 err = do_pci_op(pdev, &op);
35180
35181 /* What should do for error ? */
35182 @@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struct pci_dev *dev, int vector[])
35183 struct pcifront_sd *sd = dev->bus->sysdata;
35184 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35185
35186 + pax_track_stack();
35187 +
35188 err = do_pci_op(pdev, &op);
35189 if (likely(!err)) {
35190 vector[0] = op.value;
35191 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
35192 index 7bd829f..a3237ad 100644
35193 --- a/drivers/platform/x86/thinkpad_acpi.c
35194 +++ b/drivers/platform/x86/thinkpad_acpi.c
35195 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
35196 return 0;
35197 }
35198
35199 -void static hotkey_mask_warn_incomplete_mask(void)
35200 +static void hotkey_mask_warn_incomplete_mask(void)
35201 {
35202 /* log only what the user can fix... */
35203 const u32 wantedmask = hotkey_driver_mask &
35204 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
35205 }
35206 }
35207
35208 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35209 - struct tp_nvram_state *newn,
35210 - const u32 event_mask)
35211 -{
35212 -
35213 #define TPACPI_COMPARE_KEY(__scancode, __member) \
35214 do { \
35215 if ((event_mask & (1 << __scancode)) && \
35216 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35217 tpacpi_hotkey_send_key(__scancode); \
35218 } while (0)
35219
35220 - void issue_volchange(const unsigned int oldvol,
35221 - const unsigned int newvol)
35222 - {
35223 - unsigned int i = oldvol;
35224 +static void issue_volchange(const unsigned int oldvol,
35225 + const unsigned int newvol,
35226 + const u32 event_mask)
35227 +{
35228 + unsigned int i = oldvol;
35229
35230 - while (i > newvol) {
35231 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35232 - i--;
35233 - }
35234 - while (i < newvol) {
35235 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35236 - i++;
35237 - }
35238 + while (i > newvol) {
35239 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35240 + i--;
35241 }
35242 + while (i < newvol) {
35243 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35244 + i++;
35245 + }
35246 +}
35247
35248 - void issue_brightnesschange(const unsigned int oldbrt,
35249 - const unsigned int newbrt)
35250 - {
35251 - unsigned int i = oldbrt;
35252 +static void issue_brightnesschange(const unsigned int oldbrt,
35253 + const unsigned int newbrt,
35254 + const u32 event_mask)
35255 +{
35256 + unsigned int i = oldbrt;
35257
35258 - while (i > newbrt) {
35259 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35260 - i--;
35261 - }
35262 - while (i < newbrt) {
35263 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35264 - i++;
35265 - }
35266 + while (i > newbrt) {
35267 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35268 + i--;
35269 + }
35270 + while (i < newbrt) {
35271 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35272 + i++;
35273 }
35274 +}
35275
35276 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35277 + struct tp_nvram_state *newn,
35278 + const u32 event_mask)
35279 +{
35280 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
35281 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
35282 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
35283 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35284 oldn->volume_level != newn->volume_level) {
35285 /* recently muted, or repeated mute keypress, or
35286 * multiple presses ending in mute */
35287 - issue_volchange(oldn->volume_level, newn->volume_level);
35288 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35289 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
35290 }
35291 } else {
35292 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35293 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35294 }
35295 if (oldn->volume_level != newn->volume_level) {
35296 - issue_volchange(oldn->volume_level, newn->volume_level);
35297 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35298 } else if (oldn->volume_toggle != newn->volume_toggle) {
35299 /* repeated vol up/down keypress at end of scale ? */
35300 if (newn->volume_level == 0)
35301 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35302 /* handle brightness */
35303 if (oldn->brightness_level != newn->brightness_level) {
35304 issue_brightnesschange(oldn->brightness_level,
35305 - newn->brightness_level);
35306 + newn->brightness_level,
35307 + event_mask);
35308 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
35309 /* repeated key presses that didn't change state */
35310 if (newn->brightness_level == 0)
35311 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35312 && !tp_features.bright_unkfw)
35313 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35314 }
35315 +}
35316
35317 #undef TPACPI_COMPARE_KEY
35318 #undef TPACPI_MAY_SEND_KEY
35319 -}
35320
35321 /*
35322 * Polling driver
35323 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
35324 index b859d16..5cc6b1a 100644
35325 --- a/drivers/pnp/pnpbios/bioscalls.c
35326 +++ b/drivers/pnp/pnpbios/bioscalls.c
35327 @@ -59,7 +59,7 @@ do { \
35328 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
35329 } while(0)
35330
35331 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
35332 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
35333 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
35334
35335 /*
35336 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35337
35338 cpu = get_cpu();
35339 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
35340 +
35341 + pax_open_kernel();
35342 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
35343 + pax_close_kernel();
35344
35345 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
35346 spin_lock_irqsave(&pnp_bios_lock, flags);
35347 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35348 :"memory");
35349 spin_unlock_irqrestore(&pnp_bios_lock, flags);
35350
35351 + pax_open_kernel();
35352 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
35353 + pax_close_kernel();
35354 +
35355 put_cpu();
35356
35357 /* If we get here and this is set then the PnP BIOS faulted on us. */
35358 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
35359 return status;
35360 }
35361
35362 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
35363 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
35364 {
35365 int i;
35366
35367 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35368 pnp_bios_callpoint.offset = header->fields.pm16offset;
35369 pnp_bios_callpoint.segment = PNP_CS16;
35370
35371 + pax_open_kernel();
35372 +
35373 for_each_possible_cpu(i) {
35374 struct desc_struct *gdt = get_cpu_gdt_table(i);
35375 if (!gdt)
35376 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35377 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
35378 (unsigned long)__va(header->fields.pm16dseg));
35379 }
35380 +
35381 + pax_close_kernel();
35382 }
35383 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
35384 index b0ecacb..7c9da2e 100644
35385 --- a/drivers/pnp/resource.c
35386 +++ b/drivers/pnp/resource.c
35387 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
35388 return 1;
35389
35390 /* check if the resource is valid */
35391 - if (*irq < 0 || *irq > 15)
35392 + if (*irq > 15)
35393 return 0;
35394
35395 /* check if the resource is reserved */
35396 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
35397 return 1;
35398
35399 /* check if the resource is valid */
35400 - if (*dma < 0 || *dma == 4 || *dma > 7)
35401 + if (*dma == 4 || *dma > 7)
35402 return 0;
35403
35404 /* check if the resource is reserved */
35405 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
35406 index bb16f5b..c751eef 100644
35407 --- a/drivers/power/bq27x00_battery.c
35408 +++ b/drivers/power/bq27x00_battery.c
35409 @@ -67,7 +67,7 @@
35410 struct bq27x00_device_info;
35411 struct bq27x00_access_methods {
35412 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
35413 -};
35414 +} __no_const;
35415
35416 enum bq27x00_chip { BQ27000, BQ27500 };
35417
35418 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
35419 index 33f5d9a..d957d3f 100644
35420 --- a/drivers/regulator/max8660.c
35421 +++ b/drivers/regulator/max8660.c
35422 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
35423 max8660->shadow_regs[MAX8660_OVER1] = 5;
35424 } else {
35425 /* Otherwise devices can be toggled via software */
35426 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
35427 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
35428 + pax_open_kernel();
35429 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
35430 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
35431 + pax_close_kernel();
35432 }
35433
35434 /*
35435 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
35436 index 3285d41..ab7c22a 100644
35437 --- a/drivers/regulator/mc13892-regulator.c
35438 +++ b/drivers/regulator/mc13892-regulator.c
35439 @@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
35440 }
35441 mc13xxx_unlock(mc13892);
35442
35443 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35444 + pax_open_kernel();
35445 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35446 = mc13892_vcam_set_mode;
35447 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35448 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35449 = mc13892_vcam_get_mode;
35450 + pax_close_kernel();
35451 for (i = 0; i < pdata->num_regulators; i++) {
35452 init_data = &pdata->regulators[i];
35453 priv->regulators[i] = regulator_register(
35454 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
35455 index cace6d3..f623fda 100644
35456 --- a/drivers/rtc/rtc-dev.c
35457 +++ b/drivers/rtc/rtc-dev.c
35458 @@ -14,6 +14,7 @@
35459 #include <linux/module.h>
35460 #include <linux/rtc.h>
35461 #include <linux/sched.h>
35462 +#include <linux/grsecurity.h>
35463 #include "rtc-core.h"
35464
35465 static dev_t rtc_devt;
35466 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
35467 if (copy_from_user(&tm, uarg, sizeof(tm)))
35468 return -EFAULT;
35469
35470 + gr_log_timechange();
35471 +
35472 return rtc_set_time(rtc, &tm);
35473
35474 case RTC_PIE_ON:
35475 diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
35476 index f66c33b..7ae5823 100644
35477 --- a/drivers/scsi/BusLogic.c
35478 +++ b/drivers/scsi/BusLogic.c
35479 @@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
35480 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
35481 *PrototypeHostAdapter)
35482 {
35483 + pax_track_stack();
35484 +
35485 /*
35486 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
35487 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
35488 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
35489 index ffb5878..e6d785c 100644
35490 --- a/drivers/scsi/aacraid/aacraid.h
35491 +++ b/drivers/scsi/aacraid/aacraid.h
35492 @@ -492,7 +492,7 @@ struct adapter_ops
35493 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
35494 /* Administrative operations */
35495 int (*adapter_comm)(struct aac_dev * dev, int comm);
35496 -};
35497 +} __no_const;
35498
35499 /*
35500 * Define which interrupt handler needs to be installed
35501 diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
35502 index 8a0b330..b4286de 100644
35503 --- a/drivers/scsi/aacraid/commctrl.c
35504 +++ b/drivers/scsi/aacraid/commctrl.c
35505 @@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
35506 u32 actual_fibsize64, actual_fibsize = 0;
35507 int i;
35508
35509 + pax_track_stack();
35510
35511 if (dev->in_reset) {
35512 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
35513 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
35514 index c7b6fed..4db0569 100644
35515 --- a/drivers/scsi/aacraid/linit.c
35516 +++ b/drivers/scsi/aacraid/linit.c
35517 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
35518 #elif defined(__devinitconst)
35519 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35520 #else
35521 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
35522 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35523 #endif
35524 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
35525 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
35526 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
35527 index d5ff142..49c0ebb 100644
35528 --- a/drivers/scsi/aic94xx/aic94xx_init.c
35529 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
35530 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
35531 .lldd_control_phy = asd_control_phy,
35532 };
35533
35534 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
35535 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
35536 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
35537 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
35538 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
35539 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
35540 index a796de9..1ef20e1 100644
35541 --- a/drivers/scsi/bfa/bfa.h
35542 +++ b/drivers/scsi/bfa/bfa.h
35543 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
35544 u32 *end);
35545 int cpe_vec_q0;
35546 int rme_vec_q0;
35547 -};
35548 +} __no_const;
35549 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
35550
35551 struct bfa_faa_cbfn_s {
35552 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
35553 index e07bd47..dbd260a 100644
35554 --- a/drivers/scsi/bfa/bfa_fcpim.c
35555 +++ b/drivers/scsi/bfa/bfa_fcpim.c
35556 @@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
35557 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
35558 {
35559 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
35560 - struct bfa_itn_s *itn;
35561 + bfa_itn_s_no_const *itn;
35562
35563 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
35564 itn->isr = isr;
35565 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
35566 index 1080bcb..a3b39e3 100644
35567 --- a/drivers/scsi/bfa/bfa_fcpim.h
35568 +++ b/drivers/scsi/bfa/bfa_fcpim.h
35569 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
35570 struct bfa_itn_s {
35571 bfa_isr_func_t isr;
35572 };
35573 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
35574
35575 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
35576 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
35577 @@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
35578 struct list_head iotag_tio_free_q; /* free IO resources */
35579 struct list_head iotag_unused_q; /* unused IO resources*/
35580 struct bfa_iotag_s *iotag_arr;
35581 - struct bfa_itn_s *itn_arr;
35582 + bfa_itn_s_no_const *itn_arr;
35583 int num_ioim_reqs;
35584 int num_fwtio_reqs;
35585 int num_itns;
35586 diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
35587 index d4f951f..197c350 100644
35588 --- a/drivers/scsi/bfa/bfa_fcs_lport.c
35589 +++ b/drivers/scsi/bfa/bfa_fcs_lport.c
35590 @@ -1700,6 +1700,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
35591 u16 len, count;
35592 u16 templen;
35593
35594 + pax_track_stack();
35595 +
35596 /*
35597 * get hba attributes
35598 */
35599 @@ -1977,6 +1979,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
35600 u8 count = 0;
35601 u16 templen;
35602
35603 + pax_track_stack();
35604 +
35605 /*
35606 * get port attributes
35607 */
35608 diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
35609 index 52628d5..f89d033 100644
35610 --- a/drivers/scsi/bfa/bfa_fcs_rport.c
35611 +++ b/drivers/scsi/bfa/bfa_fcs_rport.c
35612 @@ -1871,6 +1871,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport,
35613 struct fc_rpsc_speed_info_s speeds;
35614 struct bfa_port_attr_s pport_attr;
35615
35616 + pax_track_stack();
35617 +
35618 bfa_trc(port->fcs, rx_fchs->s_id);
35619 bfa_trc(port->fcs, rx_fchs->d_id);
35620
35621 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
35622 index 546d46b..642fa5b 100644
35623 --- a/drivers/scsi/bfa/bfa_ioc.h
35624 +++ b/drivers/scsi/bfa/bfa_ioc.h
35625 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
35626 bfa_ioc_disable_cbfn_t disable_cbfn;
35627 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
35628 bfa_ioc_reset_cbfn_t reset_cbfn;
35629 -};
35630 +} __no_const;
35631
35632 /*
35633 * IOC event notification mechanism.
35634 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
35635 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
35636 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
35637 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
35638 -};
35639 +} __no_const;
35640
35641 /*
35642 * Queue element to wait for room in request queue. FIFO order is
35643 diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
35644 index 66fb725..0fe05ab 100644
35645 --- a/drivers/scsi/bfa/bfad.c
35646 +++ b/drivers/scsi/bfa/bfad.c
35647 @@ -1019,6 +1019,8 @@ bfad_start_ops(struct bfad_s *bfad) {
35648 struct bfad_vport_s *vport, *vport_new;
35649 struct bfa_fcs_driver_info_s driver_info;
35650
35651 + pax_track_stack();
35652 +
35653 /* Limit min/max. xfer size to [64k-32MB] */
35654 if (max_xfer_size < BFAD_MIN_SECTORS >> 1)
35655 max_xfer_size = BFAD_MIN_SECTORS >> 1;
35656 diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
35657 index b4f6c9a..0eb1938 100644
35658 --- a/drivers/scsi/dpt_i2o.c
35659 +++ b/drivers/scsi/dpt_i2o.c
35660 @@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
35661 dma_addr_t addr;
35662 ulong flags = 0;
35663
35664 + pax_track_stack();
35665 +
35666 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
35667 // get user msg size in u32s
35668 if(get_user(size, &user_msg[0])){
35669 @@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
35670 s32 rcode;
35671 dma_addr_t addr;
35672
35673 + pax_track_stack();
35674 +
35675 memset(msg, 0 , sizeof(msg));
35676 len = scsi_bufflen(cmd);
35677 direction = 0x00000000;
35678 diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
35679 index 94de889..ca4f0cf 100644
35680 --- a/drivers/scsi/eata.c
35681 +++ b/drivers/scsi/eata.c
35682 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
35683 struct hostdata *ha;
35684 char name[16];
35685
35686 + pax_track_stack();
35687 +
35688 sprintf(name, "%s%d", driver_name, j);
35689
35690 if (!request_region(port_base, REGION_SIZE, driver_name)) {
35691 diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
35692 index c74c4b8..c41ca3f 100644
35693 --- a/drivers/scsi/fcoe/fcoe_ctlr.c
35694 +++ b/drivers/scsi/fcoe/fcoe_ctlr.c
35695 @@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
35696 } buf;
35697 int rc;
35698
35699 + pax_track_stack();
35700 +
35701 fiph = (struct fip_header *)skb->data;
35702 sub = fiph->fip_subcode;
35703
35704 diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
35705 index 3242bca..45a83e7 100644
35706 --- a/drivers/scsi/gdth.c
35707 +++ b/drivers/scsi/gdth.c
35708 @@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
35709 unsigned long flags;
35710 gdth_ha_str *ha;
35711
35712 + pax_track_stack();
35713 +
35714 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
35715 return -EFAULT;
35716 ha = gdth_find_ha(ldrv.ionode);
35717 @@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
35718 gdth_ha_str *ha;
35719 int rval;
35720
35721 + pax_track_stack();
35722 +
35723 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
35724 res.number >= MAX_HDRIVES)
35725 return -EFAULT;
35726 @@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg, char *cmnd)
35727 gdth_ha_str *ha;
35728 int rval;
35729
35730 + pax_track_stack();
35731 +
35732 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
35733 return -EFAULT;
35734 ha = gdth_find_ha(gen.ionode);
35735 @@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
35736 int i;
35737 gdth_cmd_str gdtcmd;
35738 char cmnd[MAX_COMMAND_SIZE];
35739 +
35740 + pax_track_stack();
35741 +
35742 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
35743
35744 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
35745 diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
35746 index 6527543..81e4fe2 100644
35747 --- a/drivers/scsi/gdth_proc.c
35748 +++ b/drivers/scsi/gdth_proc.c
35749 @@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
35750 u64 paddr;
35751
35752 char cmnd[MAX_COMMAND_SIZE];
35753 +
35754 + pax_track_stack();
35755 +
35756 memset(cmnd, 0xff, 12);
35757 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
35758
35759 @@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
35760 gdth_hget_str *phg;
35761 char cmnd[MAX_COMMAND_SIZE];
35762
35763 + pax_track_stack();
35764 +
35765 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
35766 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
35767 if (!gdtcmd || !estr)
35768 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
35769 index 351dc0b..951dc32 100644
35770 --- a/drivers/scsi/hosts.c
35771 +++ b/drivers/scsi/hosts.c
35772 @@ -42,7 +42,7 @@
35773 #include "scsi_logging.h"
35774
35775
35776 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
35777 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
35778
35779
35780 static void scsi_host_cls_release(struct device *dev)
35781 @@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
35782 * subtract one because we increment first then return, but we need to
35783 * know what the next host number was before increment
35784 */
35785 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
35786 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
35787 shost->dma_channel = 0xff;
35788
35789 /* These three are default values which can be overridden */
35790 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
35791 index 418ce83..7ee1225 100644
35792 --- a/drivers/scsi/hpsa.c
35793 +++ b/drivers/scsi/hpsa.c
35794 @@ -499,7 +499,7 @@ static inline u32 next_command(struct ctlr_info *h)
35795 u32 a;
35796
35797 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
35798 - return h->access.command_completed(h);
35799 + return h->access->command_completed(h);
35800
35801 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
35802 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
35803 @@ -2956,7 +2956,7 @@ static void start_io(struct ctlr_info *h)
35804 while (!list_empty(&h->reqQ)) {
35805 c = list_entry(h->reqQ.next, struct CommandList, list);
35806 /* can't do anything if fifo is full */
35807 - if ((h->access.fifo_full(h))) {
35808 + if ((h->access->fifo_full(h))) {
35809 dev_warn(&h->pdev->dev, "fifo full\n");
35810 break;
35811 }
35812 @@ -2966,7 +2966,7 @@ static void start_io(struct ctlr_info *h)
35813 h->Qdepth--;
35814
35815 /* Tell the controller execute command */
35816 - h->access.submit_command(h, c);
35817 + h->access->submit_command(h, c);
35818
35819 /* Put job onto the completed Q */
35820 addQ(&h->cmpQ, c);
35821 @@ -2975,17 +2975,17 @@ static void start_io(struct ctlr_info *h)
35822
35823 static inline unsigned long get_next_completion(struct ctlr_info *h)
35824 {
35825 - return h->access.command_completed(h);
35826 + return h->access->command_completed(h);
35827 }
35828
35829 static inline bool interrupt_pending(struct ctlr_info *h)
35830 {
35831 - return h->access.intr_pending(h);
35832 + return h->access->intr_pending(h);
35833 }
35834
35835 static inline long interrupt_not_for_us(struct ctlr_info *h)
35836 {
35837 - return (h->access.intr_pending(h) == 0) ||
35838 + return (h->access->intr_pending(h) == 0) ||
35839 (h->interrupts_enabled == 0);
35840 }
35841
35842 @@ -3882,7 +3882,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
35843 if (prod_index < 0)
35844 return -ENODEV;
35845 h->product_name = products[prod_index].product_name;
35846 - h->access = *(products[prod_index].access);
35847 + h->access = products[prod_index].access;
35848
35849 if (hpsa_board_disabled(h->pdev)) {
35850 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
35851 @@ -4163,7 +4163,7 @@ reinit_after_soft_reset:
35852 }
35853
35854 /* make sure the board interrupts are off */
35855 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35856 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35857
35858 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
35859 goto clean2;
35860 @@ -4197,7 +4197,7 @@ reinit_after_soft_reset:
35861 * fake ones to scoop up any residual completions.
35862 */
35863 spin_lock_irqsave(&h->lock, flags);
35864 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35865 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35866 spin_unlock_irqrestore(&h->lock, flags);
35867 free_irq(h->intr[h->intr_mode], h);
35868 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
35869 @@ -4216,9 +4216,9 @@ reinit_after_soft_reset:
35870 dev_info(&h->pdev->dev, "Board READY.\n");
35871 dev_info(&h->pdev->dev,
35872 "Waiting for stale completions to drain.\n");
35873 - h->access.set_intr_mask(h, HPSA_INTR_ON);
35874 + h->access->set_intr_mask(h, HPSA_INTR_ON);
35875 msleep(10000);
35876 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35877 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35878
35879 rc = controller_reset_failed(h->cfgtable);
35880 if (rc)
35881 @@ -4239,7 +4239,7 @@ reinit_after_soft_reset:
35882 }
35883
35884 /* Turn the interrupts on so we can service requests */
35885 - h->access.set_intr_mask(h, HPSA_INTR_ON);
35886 + h->access->set_intr_mask(h, HPSA_INTR_ON);
35887
35888 hpsa_hba_inquiry(h);
35889 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
35890 @@ -4292,7 +4292,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
35891 * To write all data in the battery backed cache to disks
35892 */
35893 hpsa_flush_cache(h);
35894 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35895 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35896 free_irq(h->intr[h->intr_mode], h);
35897 #ifdef CONFIG_PCI_MSI
35898 if (h->msix_vector)
35899 @@ -4455,7 +4455,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
35900 return;
35901 }
35902 /* Change the access methods to the performant access methods */
35903 - h->access = SA5_performant_access;
35904 + h->access = &SA5_performant_access;
35905 h->transMethod = CFGTBL_Trans_Performant;
35906 }
35907
35908 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
35909 index 7f53cea..a8c7188 100644
35910 --- a/drivers/scsi/hpsa.h
35911 +++ b/drivers/scsi/hpsa.h
35912 @@ -73,7 +73,7 @@ struct ctlr_info {
35913 unsigned int msix_vector;
35914 unsigned int msi_vector;
35915 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
35916 - struct access_method access;
35917 + struct access_method *access;
35918
35919 /* queue and queue Info */
35920 struct list_head reqQ;
35921 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
35922 index f2df059..a3a9930 100644
35923 --- a/drivers/scsi/ips.h
35924 +++ b/drivers/scsi/ips.h
35925 @@ -1027,7 +1027,7 @@ typedef struct {
35926 int (*intr)(struct ips_ha *);
35927 void (*enableint)(struct ips_ha *);
35928 uint32_t (*statupd)(struct ips_ha *);
35929 -} ips_hw_func_t;
35930 +} __no_const ips_hw_func_t;
35931
35932 typedef struct ips_ha {
35933 uint8_t ha_id[IPS_MAX_CHANNELS+1];
35934 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
35935 index d261e98..1e00f35 100644
35936 --- a/drivers/scsi/libfc/fc_exch.c
35937 +++ b/drivers/scsi/libfc/fc_exch.c
35938 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
35939 * all together if not used XXX
35940 */
35941 struct {
35942 - atomic_t no_free_exch;
35943 - atomic_t no_free_exch_xid;
35944 - atomic_t xid_not_found;
35945 - atomic_t xid_busy;
35946 - atomic_t seq_not_found;
35947 - atomic_t non_bls_resp;
35948 + atomic_unchecked_t no_free_exch;
35949 + atomic_unchecked_t no_free_exch_xid;
35950 + atomic_unchecked_t xid_not_found;
35951 + atomic_unchecked_t xid_busy;
35952 + atomic_unchecked_t seq_not_found;
35953 + atomic_unchecked_t non_bls_resp;
35954 } stats;
35955 };
35956
35957 @@ -718,7 +718,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
35958 /* allocate memory for exchange */
35959 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
35960 if (!ep) {
35961 - atomic_inc(&mp->stats.no_free_exch);
35962 + atomic_inc_unchecked(&mp->stats.no_free_exch);
35963 goto out;
35964 }
35965 memset(ep, 0, sizeof(*ep));
35966 @@ -779,7 +779,7 @@ out:
35967 return ep;
35968 err:
35969 spin_unlock_bh(&pool->lock);
35970 - atomic_inc(&mp->stats.no_free_exch_xid);
35971 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
35972 mempool_free(ep, mp->ep_pool);
35973 return NULL;
35974 }
35975 @@ -922,7 +922,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35976 xid = ntohs(fh->fh_ox_id); /* we originated exch */
35977 ep = fc_exch_find(mp, xid);
35978 if (!ep) {
35979 - atomic_inc(&mp->stats.xid_not_found);
35980 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35981 reject = FC_RJT_OX_ID;
35982 goto out;
35983 }
35984 @@ -952,7 +952,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35985 ep = fc_exch_find(mp, xid);
35986 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
35987 if (ep) {
35988 - atomic_inc(&mp->stats.xid_busy);
35989 + atomic_inc_unchecked(&mp->stats.xid_busy);
35990 reject = FC_RJT_RX_ID;
35991 goto rel;
35992 }
35993 @@ -963,7 +963,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35994 }
35995 xid = ep->xid; /* get our XID */
35996 } else if (!ep) {
35997 - atomic_inc(&mp->stats.xid_not_found);
35998 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35999 reject = FC_RJT_RX_ID; /* XID not found */
36000 goto out;
36001 }
36002 @@ -980,7 +980,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36003 } else {
36004 sp = &ep->seq;
36005 if (sp->id != fh->fh_seq_id) {
36006 - atomic_inc(&mp->stats.seq_not_found);
36007 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36008 if (f_ctl & FC_FC_END_SEQ) {
36009 /*
36010 * Update sequence_id based on incoming last
36011 @@ -1430,22 +1430,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36012
36013 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36014 if (!ep) {
36015 - atomic_inc(&mp->stats.xid_not_found);
36016 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36017 goto out;
36018 }
36019 if (ep->esb_stat & ESB_ST_COMPLETE) {
36020 - atomic_inc(&mp->stats.xid_not_found);
36021 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36022 goto rel;
36023 }
36024 if (ep->rxid == FC_XID_UNKNOWN)
36025 ep->rxid = ntohs(fh->fh_rx_id);
36026 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36027 - atomic_inc(&mp->stats.xid_not_found);
36028 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36029 goto rel;
36030 }
36031 if (ep->did != ntoh24(fh->fh_s_id) &&
36032 ep->did != FC_FID_FLOGI) {
36033 - atomic_inc(&mp->stats.xid_not_found);
36034 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36035 goto rel;
36036 }
36037 sof = fr_sof(fp);
36038 @@ -1454,7 +1454,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36039 sp->ssb_stat |= SSB_ST_RESP;
36040 sp->id = fh->fh_seq_id;
36041 } else if (sp->id != fh->fh_seq_id) {
36042 - atomic_inc(&mp->stats.seq_not_found);
36043 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36044 goto rel;
36045 }
36046
36047 @@ -1518,9 +1518,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36048 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36049
36050 if (!sp)
36051 - atomic_inc(&mp->stats.xid_not_found);
36052 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36053 else
36054 - atomic_inc(&mp->stats.non_bls_resp);
36055 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
36056
36057 fc_frame_free(fp);
36058 }
36059 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
36060 index db9238f..4378ed2 100644
36061 --- a/drivers/scsi/libsas/sas_ata.c
36062 +++ b/drivers/scsi/libsas/sas_ata.c
36063 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
36064 .postreset = ata_std_postreset,
36065 .error_handler = ata_std_error_handler,
36066 .post_internal_cmd = sas_ata_post_internal,
36067 - .qc_defer = ata_std_qc_defer,
36068 + .qc_defer = ata_std_qc_defer,
36069 .qc_prep = ata_noop_qc_prep,
36070 .qc_issue = sas_ata_qc_issue,
36071 .qc_fill_rtf = sas_ata_qc_fill_rtf,
36072 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
36073 index c088a36..01c73b0 100644
36074 --- a/drivers/scsi/lpfc/lpfc.h
36075 +++ b/drivers/scsi/lpfc/lpfc.h
36076 @@ -425,7 +425,7 @@ struct lpfc_vport {
36077 struct dentry *debug_nodelist;
36078 struct dentry *vport_debugfs_root;
36079 struct lpfc_debugfs_trc *disc_trc;
36080 - atomic_t disc_trc_cnt;
36081 + atomic_unchecked_t disc_trc_cnt;
36082 #endif
36083 uint8_t stat_data_enabled;
36084 uint8_t stat_data_blocked;
36085 @@ -835,8 +835,8 @@ struct lpfc_hba {
36086 struct timer_list fabric_block_timer;
36087 unsigned long bit_flags;
36088 #define FABRIC_COMANDS_BLOCKED 0
36089 - atomic_t num_rsrc_err;
36090 - atomic_t num_cmd_success;
36091 + atomic_unchecked_t num_rsrc_err;
36092 + atomic_unchecked_t num_cmd_success;
36093 unsigned long last_rsrc_error_time;
36094 unsigned long last_ramp_down_time;
36095 unsigned long last_ramp_up_time;
36096 @@ -850,7 +850,7 @@ struct lpfc_hba {
36097 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
36098 struct dentry *debug_slow_ring_trc;
36099 struct lpfc_debugfs_trc *slow_ring_trc;
36100 - atomic_t slow_ring_trc_cnt;
36101 + atomic_unchecked_t slow_ring_trc_cnt;
36102 /* iDiag debugfs sub-directory */
36103 struct dentry *idiag_root;
36104 struct dentry *idiag_pci_cfg;
36105 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
36106 index a0424dd..2499b6b 100644
36107 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
36108 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
36109 @@ -105,7 +105,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
36110
36111 #include <linux/debugfs.h>
36112
36113 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36114 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36115 static unsigned long lpfc_debugfs_start_time = 0L;
36116
36117 /* iDiag */
36118 @@ -146,7 +146,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
36119 lpfc_debugfs_enable = 0;
36120
36121 len = 0;
36122 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36123 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36124 (lpfc_debugfs_max_disc_trc - 1);
36125 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36126 dtp = vport->disc_trc + i;
36127 @@ -212,7 +212,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
36128 lpfc_debugfs_enable = 0;
36129
36130 len = 0;
36131 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36132 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36133 (lpfc_debugfs_max_slow_ring_trc - 1);
36134 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36135 dtp = phba->slow_ring_trc + i;
36136 @@ -635,14 +635,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
36137 !vport || !vport->disc_trc)
36138 return;
36139
36140 - index = atomic_inc_return(&vport->disc_trc_cnt) &
36141 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36142 (lpfc_debugfs_max_disc_trc - 1);
36143 dtp = vport->disc_trc + index;
36144 dtp->fmt = fmt;
36145 dtp->data1 = data1;
36146 dtp->data2 = data2;
36147 dtp->data3 = data3;
36148 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36149 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36150 dtp->jif = jiffies;
36151 #endif
36152 return;
36153 @@ -673,14 +673,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
36154 !phba || !phba->slow_ring_trc)
36155 return;
36156
36157 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36158 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36159 (lpfc_debugfs_max_slow_ring_trc - 1);
36160 dtp = phba->slow_ring_trc + index;
36161 dtp->fmt = fmt;
36162 dtp->data1 = data1;
36163 dtp->data2 = data2;
36164 dtp->data3 = data3;
36165 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36166 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36167 dtp->jif = jiffies;
36168 #endif
36169 return;
36170 @@ -3828,7 +3828,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36171 "slow_ring buffer\n");
36172 goto debug_failed;
36173 }
36174 - atomic_set(&phba->slow_ring_trc_cnt, 0);
36175 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36176 memset(phba->slow_ring_trc, 0,
36177 (sizeof(struct lpfc_debugfs_trc) *
36178 lpfc_debugfs_max_slow_ring_trc));
36179 @@ -3874,7 +3874,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36180 "buffer\n");
36181 goto debug_failed;
36182 }
36183 - atomic_set(&vport->disc_trc_cnt, 0);
36184 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36185
36186 snprintf(name, sizeof(name), "discovery_trace");
36187 vport->debug_disc_trc =
36188 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
36189 index a3c8200..31e562e 100644
36190 --- a/drivers/scsi/lpfc/lpfc_init.c
36191 +++ b/drivers/scsi/lpfc/lpfc_init.c
36192 @@ -9969,8 +9969,10 @@ lpfc_init(void)
36193 printk(LPFC_COPYRIGHT "\n");
36194
36195 if (lpfc_enable_npiv) {
36196 - lpfc_transport_functions.vport_create = lpfc_vport_create;
36197 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36198 + pax_open_kernel();
36199 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36200 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36201 + pax_close_kernel();
36202 }
36203 lpfc_transport_template =
36204 fc_attach_transport(&lpfc_transport_functions);
36205 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
36206 index eadd241..26c8e0f 100644
36207 --- a/drivers/scsi/lpfc/lpfc_scsi.c
36208 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
36209 @@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
36210 uint32_t evt_posted;
36211
36212 spin_lock_irqsave(&phba->hbalock, flags);
36213 - atomic_inc(&phba->num_rsrc_err);
36214 + atomic_inc_unchecked(&phba->num_rsrc_err);
36215 phba->last_rsrc_error_time = jiffies;
36216
36217 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36218 @@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
36219 unsigned long flags;
36220 struct lpfc_hba *phba = vport->phba;
36221 uint32_t evt_posted;
36222 - atomic_inc(&phba->num_cmd_success);
36223 + atomic_inc_unchecked(&phba->num_cmd_success);
36224
36225 if (vport->cfg_lun_queue_depth <= queue_depth)
36226 return;
36227 @@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36228 unsigned long num_rsrc_err, num_cmd_success;
36229 int i;
36230
36231 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36232 - num_cmd_success = atomic_read(&phba->num_cmd_success);
36233 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36234 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36235
36236 vports = lpfc_create_vport_work_array(phba);
36237 if (vports != NULL)
36238 @@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36239 }
36240 }
36241 lpfc_destroy_vport_work_array(phba, vports);
36242 - atomic_set(&phba->num_rsrc_err, 0);
36243 - atomic_set(&phba->num_cmd_success, 0);
36244 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36245 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36246 }
36247
36248 /**
36249 @@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
36250 }
36251 }
36252 lpfc_destroy_vport_work_array(phba, vports);
36253 - atomic_set(&phba->num_rsrc_err, 0);
36254 - atomic_set(&phba->num_cmd_success, 0);
36255 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36256 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36257 }
36258
36259 /**
36260 diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
36261 index 2e6619e..fa64494 100644
36262 --- a/drivers/scsi/megaraid/megaraid_mbox.c
36263 +++ b/drivers/scsi/megaraid/megaraid_mbox.c
36264 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
36265 int rval;
36266 int i;
36267
36268 + pax_track_stack();
36269 +
36270 // Allocate memory for the base list of scb for management module.
36271 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
36272
36273 diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
36274 index 86afb13f..c912398 100644
36275 --- a/drivers/scsi/osd/osd_initiator.c
36276 +++ b/drivers/scsi/osd/osd_initiator.c
36277 @@ -97,6 +97,8 @@ static int _osd_get_print_system_info(struct osd_dev *od,
36278 int nelem = ARRAY_SIZE(get_attrs), a = 0;
36279 int ret;
36280
36281 + pax_track_stack();
36282 +
36283 or = osd_start_request(od, GFP_KERNEL);
36284 if (!or)
36285 return -ENOMEM;
36286 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
36287 index d079f9a..d26072c 100644
36288 --- a/drivers/scsi/pmcraid.c
36289 +++ b/drivers/scsi/pmcraid.c
36290 @@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
36291 res->scsi_dev = scsi_dev;
36292 scsi_dev->hostdata = res;
36293 res->change_detected = 0;
36294 - atomic_set(&res->read_failures, 0);
36295 - atomic_set(&res->write_failures, 0);
36296 + atomic_set_unchecked(&res->read_failures, 0);
36297 + atomic_set_unchecked(&res->write_failures, 0);
36298 rc = 0;
36299 }
36300 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36301 @@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
36302
36303 /* If this was a SCSI read/write command keep count of errors */
36304 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36305 - atomic_inc(&res->read_failures);
36306 + atomic_inc_unchecked(&res->read_failures);
36307 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36308 - atomic_inc(&res->write_failures);
36309 + atomic_inc_unchecked(&res->write_failures);
36310
36311 if (!RES_IS_GSCSI(res->cfg_entry) &&
36312 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36313 @@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
36314 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36315 * hrrq_id assigned here in queuecommand
36316 */
36317 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36318 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36319 pinstance->num_hrrq;
36320 cmd->cmd_done = pmcraid_io_done;
36321
36322 @@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
36323 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36324 * hrrq_id assigned here in queuecommand
36325 */
36326 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36327 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36328 pinstance->num_hrrq;
36329
36330 if (request_size) {
36331 @@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
36332
36333 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36334 /* add resources only after host is added into system */
36335 - if (!atomic_read(&pinstance->expose_resources))
36336 + if (!atomic_read_unchecked(&pinstance->expose_resources))
36337 return;
36338
36339 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
36340 @@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instance(
36341 init_waitqueue_head(&pinstance->reset_wait_q);
36342
36343 atomic_set(&pinstance->outstanding_cmds, 0);
36344 - atomic_set(&pinstance->last_message_id, 0);
36345 - atomic_set(&pinstance->expose_resources, 0);
36346 + atomic_set_unchecked(&pinstance->last_message_id, 0);
36347 + atomic_set_unchecked(&pinstance->expose_resources, 0);
36348
36349 INIT_LIST_HEAD(&pinstance->free_res_q);
36350 INIT_LIST_HEAD(&pinstance->used_res_q);
36351 @@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
36352 /* Schedule worker thread to handle CCN and take care of adding and
36353 * removing devices to OS
36354 */
36355 - atomic_set(&pinstance->expose_resources, 1);
36356 + atomic_set_unchecked(&pinstance->expose_resources, 1);
36357 schedule_work(&pinstance->worker_q);
36358 return rc;
36359
36360 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
36361 index f920baf..4417389 100644
36362 --- a/drivers/scsi/pmcraid.h
36363 +++ b/drivers/scsi/pmcraid.h
36364 @@ -749,7 +749,7 @@ struct pmcraid_instance {
36365 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
36366
36367 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
36368 - atomic_t last_message_id;
36369 + atomic_unchecked_t last_message_id;
36370
36371 /* configuration table */
36372 struct pmcraid_config_table *cfg_table;
36373 @@ -778,7 +778,7 @@ struct pmcraid_instance {
36374 atomic_t outstanding_cmds;
36375
36376 /* should add/delete resources to mid-layer now ?*/
36377 - atomic_t expose_resources;
36378 + atomic_unchecked_t expose_resources;
36379
36380
36381
36382 @@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
36383 struct pmcraid_config_table_entry_ext cfg_entry_ext;
36384 };
36385 struct scsi_device *scsi_dev; /* Link scsi_device structure */
36386 - atomic_t read_failures; /* count of failed READ commands */
36387 - atomic_t write_failures; /* count of failed WRITE commands */
36388 + atomic_unchecked_t read_failures; /* count of failed READ commands */
36389 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
36390
36391 /* To indicate add/delete/modify during CCN */
36392 u8 change_detected;
36393 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
36394 index a03eaf4..a6b3fd9 100644
36395 --- a/drivers/scsi/qla2xxx/qla_def.h
36396 +++ b/drivers/scsi/qla2xxx/qla_def.h
36397 @@ -2244,7 +2244,7 @@ struct isp_operations {
36398 int (*get_flash_version) (struct scsi_qla_host *, void *);
36399 int (*start_scsi) (srb_t *);
36400 int (*abort_isp) (struct scsi_qla_host *);
36401 -};
36402 +} __no_const;
36403
36404 /* MSI-X Support *************************************************************/
36405
36406 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
36407 index 473c5c8..4e2f24a 100644
36408 --- a/drivers/scsi/qla4xxx/ql4_def.h
36409 +++ b/drivers/scsi/qla4xxx/ql4_def.h
36410 @@ -256,7 +256,7 @@ struct ddb_entry {
36411 atomic_t retry_relogin_timer; /* Min Time between relogins
36412 * (4000 only) */
36413 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
36414 - atomic_t relogin_retry_count; /* Num of times relogin has been
36415 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
36416 * retried */
36417
36418 uint16_t port;
36419 diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
36420 index 42ed5db..0262f9e 100644
36421 --- a/drivers/scsi/qla4xxx/ql4_init.c
36422 +++ b/drivers/scsi/qla4xxx/ql4_init.c
36423 @@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
36424 ddb_entry->fw_ddb_index = fw_ddb_index;
36425 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
36426 atomic_set(&ddb_entry->relogin_timer, 0);
36427 - atomic_set(&ddb_entry->relogin_retry_count, 0);
36428 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36429 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
36430 list_add_tail(&ddb_entry->list, &ha->ddb_list);
36431 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
36432 @@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
36433 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
36434 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
36435 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
36436 - atomic_set(&ddb_entry->relogin_retry_count, 0);
36437 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36438 atomic_set(&ddb_entry->relogin_timer, 0);
36439 clear_bit(DF_RELOGIN, &ddb_entry->flags);
36440 iscsi_unblock_session(ddb_entry->sess);
36441 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
36442 index f2364ec..44c42b1 100644
36443 --- a/drivers/scsi/qla4xxx/ql4_os.c
36444 +++ b/drivers/scsi/qla4xxx/ql4_os.c
36445 @@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
36446 ddb_entry->fw_ddb_device_state ==
36447 DDB_DS_SESSION_FAILED) {
36448 /* Reset retry relogin timer */
36449 - atomic_inc(&ddb_entry->relogin_retry_count);
36450 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
36451 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
36452 " timed out-retrying"
36453 " relogin (%d)\n",
36454 ha->host_no,
36455 ddb_entry->fw_ddb_index,
36456 - atomic_read(&ddb_entry->
36457 + atomic_read_unchecked(&ddb_entry->
36458 relogin_retry_count))
36459 );
36460 start_dpc++;
36461 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
36462 index 2aeb2e9..46e3925 100644
36463 --- a/drivers/scsi/scsi.c
36464 +++ b/drivers/scsi/scsi.c
36465 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
36466 unsigned long timeout;
36467 int rtn = 0;
36468
36469 - atomic_inc(&cmd->device->iorequest_cnt);
36470 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36471
36472 /* check if the device is still usable */
36473 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
36474 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
36475 index 6888b2c..45befa1 100644
36476 --- a/drivers/scsi/scsi_debug.c
36477 +++ b/drivers/scsi/scsi_debug.c
36478 @@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
36479 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
36480 unsigned char *cmd = (unsigned char *)scp->cmnd;
36481
36482 + pax_track_stack();
36483 +
36484 if ((errsts = check_readiness(scp, 1, devip)))
36485 return errsts;
36486 memset(arr, 0, sizeof(arr));
36487 @@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
36488 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
36489 unsigned char *cmd = (unsigned char *)scp->cmnd;
36490
36491 + pax_track_stack();
36492 +
36493 if ((errsts = check_readiness(scp, 1, devip)))
36494 return errsts;
36495 memset(arr, 0, sizeof(arr));
36496 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
36497 index b4d43ae..26edd69 100644
36498 --- a/drivers/scsi/scsi_lib.c
36499 +++ b/drivers/scsi/scsi_lib.c
36500 @@ -1413,7 +1413,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
36501 shost = sdev->host;
36502 scsi_init_cmd_errh(cmd);
36503 cmd->result = DID_NO_CONNECT << 16;
36504 - atomic_inc(&cmd->device->iorequest_cnt);
36505 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36506
36507 /*
36508 * SCSI request completion path will do scsi_device_unbusy(),
36509 @@ -1439,9 +1439,9 @@ static void scsi_softirq_done(struct request *rq)
36510
36511 INIT_LIST_HEAD(&cmd->eh_entry);
36512
36513 - atomic_inc(&cmd->device->iodone_cnt);
36514 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
36515 if (cmd->result)
36516 - atomic_inc(&cmd->device->ioerr_cnt);
36517 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
36518
36519 disposition = scsi_decide_disposition(cmd);
36520 if (disposition != SUCCESS &&
36521 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
36522 index e0bd3f7..816b8a6 100644
36523 --- a/drivers/scsi/scsi_sysfs.c
36524 +++ b/drivers/scsi/scsi_sysfs.c
36525 @@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
36526 char *buf) \
36527 { \
36528 struct scsi_device *sdev = to_scsi_device(dev); \
36529 - unsigned long long count = atomic_read(&sdev->field); \
36530 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
36531 return snprintf(buf, 20, "0x%llx\n", count); \
36532 } \
36533 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
36534 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
36535 index 84a1fdf..693b0d6 100644
36536 --- a/drivers/scsi/scsi_tgt_lib.c
36537 +++ b/drivers/scsi/scsi_tgt_lib.c
36538 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
36539 int err;
36540
36541 dprintk("%lx %u\n", uaddr, len);
36542 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
36543 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
36544 if (err) {
36545 /*
36546 * TODO: need to fixup sg_tablesize, max_segment_size,
36547 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
36548 index 1b21491..1b7f60e 100644
36549 --- a/drivers/scsi/scsi_transport_fc.c
36550 +++ b/drivers/scsi/scsi_transport_fc.c
36551 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
36552 * Netlink Infrastructure
36553 */
36554
36555 -static atomic_t fc_event_seq;
36556 +static atomic_unchecked_t fc_event_seq;
36557
36558 /**
36559 * fc_get_event_number - Obtain the next sequential FC event number
36560 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
36561 u32
36562 fc_get_event_number(void)
36563 {
36564 - return atomic_add_return(1, &fc_event_seq);
36565 + return atomic_add_return_unchecked(1, &fc_event_seq);
36566 }
36567 EXPORT_SYMBOL(fc_get_event_number);
36568
36569 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
36570 {
36571 int error;
36572
36573 - atomic_set(&fc_event_seq, 0);
36574 + atomic_set_unchecked(&fc_event_seq, 0);
36575
36576 error = transport_class_register(&fc_host_class);
36577 if (error)
36578 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
36579 char *cp;
36580
36581 *val = simple_strtoul(buf, &cp, 0);
36582 - if ((*cp && (*cp != '\n')) || (*val < 0))
36583 + if (*cp && (*cp != '\n'))
36584 return -EINVAL;
36585 /*
36586 * Check for overflow; dev_loss_tmo is u32
36587 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
36588 index 3fd16d7..ba0871f 100644
36589 --- a/drivers/scsi/scsi_transport_iscsi.c
36590 +++ b/drivers/scsi/scsi_transport_iscsi.c
36591 @@ -83,7 +83,7 @@ struct iscsi_internal {
36592 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
36593 };
36594
36595 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
36596 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
36597 static struct workqueue_struct *iscsi_eh_timer_workq;
36598
36599 /*
36600 @@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
36601 int err;
36602
36603 ihost = shost->shost_data;
36604 - session->sid = atomic_add_return(1, &iscsi_session_nr);
36605 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
36606
36607 if (id == ISCSI_MAX_TARGET) {
36608 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
36609 @@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(void)
36610 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
36611 ISCSI_TRANSPORT_VERSION);
36612
36613 - atomic_set(&iscsi_session_nr, 0);
36614 + atomic_set_unchecked(&iscsi_session_nr, 0);
36615
36616 err = class_register(&iscsi_transport_class);
36617 if (err)
36618 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
36619 index 21a045e..ec89e03 100644
36620 --- a/drivers/scsi/scsi_transport_srp.c
36621 +++ b/drivers/scsi/scsi_transport_srp.c
36622 @@ -33,7 +33,7 @@
36623 #include "scsi_transport_srp_internal.h"
36624
36625 struct srp_host_attrs {
36626 - atomic_t next_port_id;
36627 + atomic_unchecked_t next_port_id;
36628 };
36629 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
36630
36631 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
36632 struct Scsi_Host *shost = dev_to_shost(dev);
36633 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
36634
36635 - atomic_set(&srp_host->next_port_id, 0);
36636 + atomic_set_unchecked(&srp_host->next_port_id, 0);
36637 return 0;
36638 }
36639
36640 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
36641 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
36642 rport->roles = ids->roles;
36643
36644 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
36645 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
36646 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
36647
36648 transport_setup_device(&rport->dev);
36649 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
36650 index 909ed9e..1ae290a 100644
36651 --- a/drivers/scsi/sg.c
36652 +++ b/drivers/scsi/sg.c
36653 @@ -1075,7 +1075,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
36654 sdp->disk->disk_name,
36655 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
36656 NULL,
36657 - (char *)arg);
36658 + (char __user *)arg);
36659 case BLKTRACESTART:
36660 return blk_trace_startstop(sdp->device->request_queue, 1);
36661 case BLKTRACESTOP:
36662 @@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
36663 const struct file_operations * fops;
36664 };
36665
36666 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
36667 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
36668 {"allow_dio", &adio_fops},
36669 {"debug", &debug_fops},
36670 {"def_reserved_size", &dressz_fops},
36671 @@ -2325,7 +2325,7 @@ sg_proc_init(void)
36672 {
36673 int k, mask;
36674 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
36675 - struct sg_proc_leaf * leaf;
36676 + const struct sg_proc_leaf * leaf;
36677
36678 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
36679 if (!sg_proc_sgp)
36680 diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
36681 index b4543f5..e1b34b8 100644
36682 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c
36683 +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
36684 @@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
36685 int do_iounmap = 0;
36686 int do_disable_device = 1;
36687
36688 + pax_track_stack();
36689 +
36690 memset(&sym_dev, 0, sizeof(sym_dev));
36691 memset(&nvram, 0, sizeof(nvram));
36692 sym_dev.pdev = pdev;
36693 diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
36694 index a18996d..fe993cb 100644
36695 --- a/drivers/scsi/vmw_pvscsi.c
36696 +++ b/drivers/scsi/vmw_pvscsi.c
36697 @@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
36698 dma_addr_t base;
36699 unsigned i;
36700
36701 + pax_track_stack();
36702 +
36703 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
36704 cmd.reqRingNumPages = adapter->req_pages;
36705 cmd.cmpRingNumPages = adapter->cmp_pages;
36706 diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
36707 index c5f37f0..898d202 100644
36708 --- a/drivers/spi/spi-dw-pci.c
36709 +++ b/drivers/spi/spi-dw-pci.c
36710 @@ -148,7 +148,7 @@ static int spi_resume(struct pci_dev *pdev)
36711 #define spi_resume NULL
36712 #endif
36713
36714 -static const struct pci_device_id pci_ids[] __devinitdata = {
36715 +static const struct pci_device_id pci_ids[] __devinitconst = {
36716 /* Intel MID platform SPI controller 0 */
36717 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
36718 {},
36719 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
36720 index 4d1b9f5..8408fe3 100644
36721 --- a/drivers/spi/spi.c
36722 +++ b/drivers/spi/spi.c
36723 @@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *master)
36724 EXPORT_SYMBOL_GPL(spi_bus_unlock);
36725
36726 /* portable code must never pass more than 32 bytes */
36727 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
36728 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
36729
36730 static u8 *buf;
36731
36732 diff --git a/drivers/staging/ath6kl/os/linux/ar6000_drv.c b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
36733 index 32ee39a..3004c3d 100644
36734 --- a/drivers/staging/ath6kl/os/linux/ar6000_drv.c
36735 +++ b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
36736 @@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[MAX_COOKIE_NUM];
36737 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
36738
36739
36740 -static struct net_device_ops ar6000_netdev_ops = {
36741 +static net_device_ops_no_const ar6000_netdev_ops = {
36742 .ndo_init = NULL,
36743 .ndo_open = ar6000_open,
36744 .ndo_stop = ar6000_close,
36745 diff --git a/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h b/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
36746 index 39e0873..0925710 100644
36747 --- a/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
36748 +++ b/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
36749 @@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void *pHciPalInfo, void *skb);
36750 typedef struct ar6k_pal_config_s
36751 {
36752 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
36753 -}ar6k_pal_config_t;
36754 +} __no_const ar6k_pal_config_t;
36755
36756 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
36757 #endif /* _AR6K_PAL_H_ */
36758 diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
36759 index 05dada9..96171c6 100644
36760 --- a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
36761 +++ b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
36762 @@ -451,14 +451,14 @@ static void brcmf_op_if(struct brcmf_if *ifp)
36763 free_netdev(ifp->net);
36764 }
36765 /* Allocate etherdev, including space for private structure */
36766 - ifp->net = alloc_etherdev(sizeof(drvr_priv));
36767 + ifp->net = alloc_etherdev(sizeof(*drvr_priv));
36768 if (!ifp->net) {
36769 BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
36770 ret = -ENOMEM;
36771 }
36772 if (ret == 0) {
36773 strcpy(ifp->net->name, ifp->name);
36774 - memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(drvr_priv));
36775 + memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(*drvr_priv));
36776 err = brcmf_net_attach(&drvr_priv->pub, ifp->idx);
36777 if (err != 0) {
36778 BRCMF_ERROR(("%s: brcmf_net_attach failed, "
36779 @@ -1279,7 +1279,7 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
36780 BRCMF_TRACE(("%s: Enter\n", __func__));
36781
36782 /* Allocate etherdev, including space for private structure */
36783 - net = alloc_etherdev(sizeof(drvr_priv));
36784 + net = alloc_etherdev(sizeof(*drvr_priv));
36785 if (!net) {
36786 BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
36787 goto fail;
36788 @@ -1295,7 +1295,7 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
36789 /*
36790 * Save the brcmf_info into the priv
36791 */
36792 - memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
36793 + memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv));
36794
36795 /* Set network interface name if it was provided as module parameter */
36796 if (iface_name[0]) {
36797 @@ -1352,7 +1352,7 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
36798 /*
36799 * Save the brcmf_info into the priv
36800 */
36801 - memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
36802 + memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv));
36803
36804 #if defined(CONFIG_PM_SLEEP)
36805 atomic_set(&brcmf_mmc_suspend, false);
36806 diff --git a/drivers/staging/brcm80211/brcmfmac/sdio_host.h b/drivers/staging/brcm80211/brcmfmac/sdio_host.h
36807 index d345472..cedb19e 100644
36808 --- a/drivers/staging/brcm80211/brcmfmac/sdio_host.h
36809 +++ b/drivers/staging/brcm80211/brcmfmac/sdio_host.h
36810 @@ -263,7 +263,7 @@ struct brcmf_sdioh_driver {
36811 u16 func, uint bustype, u32 regsva, void *param);
36812 /* detach from device */
36813 void (*detach) (void *ch);
36814 -};
36815 +} __no_const;
36816
36817 struct sdioh_info;
36818
36819 diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h b/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
36820 index a01b01c..b3f721c 100644
36821 --- a/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
36822 +++ b/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
36823 @@ -591,7 +591,7 @@ struct phy_func_ptr {
36824 initfn_t carrsuppr;
36825 rxsigpwrfn_t rxsigpwr;
36826 detachfn_t detach;
36827 -};
36828 +} __no_const;
36829
36830 struct brcms_phy {
36831 struct brcms_phy_pub pubpi_ro;
36832 diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
36833 index 8fb3051..a8b6c67 100644
36834 --- a/drivers/staging/et131x/et1310_tx.c
36835 +++ b/drivers/staging/et131x/et1310_tx.c
36836 @@ -635,11 +635,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
36837 struct net_device_stats *stats = &etdev->net_stats;
36838
36839 if (tcb->flags & fMP_DEST_BROAD)
36840 - atomic_inc(&etdev->stats.brdcstxmt);
36841 + atomic_inc_unchecked(&etdev->stats.brdcstxmt);
36842 else if (tcb->flags & fMP_DEST_MULTI)
36843 - atomic_inc(&etdev->stats.multixmt);
36844 + atomic_inc_unchecked(&etdev->stats.multixmt);
36845 else
36846 - atomic_inc(&etdev->stats.unixmt);
36847 + atomic_inc_unchecked(&etdev->stats.unixmt);
36848
36849 if (tcb->skb) {
36850 stats->tx_bytes += tcb->skb->len;
36851 diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
36852 index 408c50b..fd65e9f 100644
36853 --- a/drivers/staging/et131x/et131x_adapter.h
36854 +++ b/drivers/staging/et131x/et131x_adapter.h
36855 @@ -106,11 +106,11 @@ struct ce_stats {
36856 * operations
36857 */
36858 u32 unircv; /* # multicast packets received */
36859 - atomic_t unixmt; /* # multicast packets for Tx */
36860 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
36861 u32 multircv; /* # multicast packets received */
36862 - atomic_t multixmt; /* # multicast packets for Tx */
36863 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
36864 u32 brdcstrcv; /* # broadcast packets received */
36865 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
36866 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
36867 u32 norcvbuf; /* # Rx packets discarded */
36868 u32 noxmtbuf; /* # Tx packets discarded */
36869
36870 diff --git a/drivers/staging/hv/channel.c b/drivers/staging/hv/channel.c
36871 index 455f47a..86205ff 100644
36872 --- a/drivers/staging/hv/channel.c
36873 +++ b/drivers/staging/hv/channel.c
36874 @@ -447,8 +447,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
36875 int ret = 0;
36876 int t;
36877
36878 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
36879 - atomic_inc(&vmbus_connection.next_gpadl_handle);
36880 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
36881 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
36882
36883 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
36884 if (ret)
36885 diff --git a/drivers/staging/hv/hv.c b/drivers/staging/hv/hv.c
36886 index 824f816..a800af7 100644
36887 --- a/drivers/staging/hv/hv.c
36888 +++ b/drivers/staging/hv/hv.c
36889 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
36890 u64 output_address = (output) ? virt_to_phys(output) : 0;
36891 u32 output_address_hi = output_address >> 32;
36892 u32 output_address_lo = output_address & 0xFFFFFFFF;
36893 - volatile void *hypercall_page = hv_context.hypercall_page;
36894 + volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
36895
36896 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
36897 "=a"(hv_status_lo) : "d" (control_hi),
36898 diff --git a/drivers/staging/hv/hv_mouse.c b/drivers/staging/hv/hv_mouse.c
36899 index d957fc2..43cedd9 100644
36900 --- a/drivers/staging/hv/hv_mouse.c
36901 +++ b/drivers/staging/hv/hv_mouse.c
36902 @@ -878,8 +878,10 @@ static void reportdesc_callback(struct hv_device *dev, void *packet, u32 len)
36903 if (hid_dev) {
36904 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
36905
36906 - hid_dev->ll_driver->open = mousevsc_hid_open;
36907 - hid_dev->ll_driver->close = mousevsc_hid_close;
36908 + pax_open_kernel();
36909 + *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
36910 + *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
36911 + pax_close_kernel();
36912
36913 hid_dev->bus = BUS_VIRTUAL;
36914 hid_dev->vendor = input_device_ctx->device_info.vendor;
36915 diff --git a/drivers/staging/hv/hyperv_vmbus.h b/drivers/staging/hv/hyperv_vmbus.h
36916 index 349ad80..3f75719 100644
36917 --- a/drivers/staging/hv/hyperv_vmbus.h
36918 +++ b/drivers/staging/hv/hyperv_vmbus.h
36919 @@ -559,7 +559,7 @@ enum vmbus_connect_state {
36920 struct vmbus_connection {
36921 enum vmbus_connect_state conn_state;
36922
36923 - atomic_t next_gpadl_handle;
36924 + atomic_unchecked_t next_gpadl_handle;
36925
36926 /*
36927 * Represents channel interrupts. Each bit position represents a
36928 diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
36929 index dbb5201..d6047c6 100644
36930 --- a/drivers/staging/hv/rndis_filter.c
36931 +++ b/drivers/staging/hv/rndis_filter.c
36932 @@ -43,7 +43,7 @@ struct rndis_device {
36933
36934 enum rndis_device_state state;
36935 u32 link_stat;
36936 - atomic_t new_req_id;
36937 + atomic_unchecked_t new_req_id;
36938
36939 spinlock_t request_lock;
36940 struct list_head req_list;
36941 @@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
36942 * template
36943 */
36944 set = &rndis_msg->msg.set_req;
36945 - set->req_id = atomic_inc_return(&dev->new_req_id);
36946 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36947
36948 /* Add to the request list */
36949 spin_lock_irqsave(&dev->request_lock, flags);
36950 @@ -622,7 +622,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
36951
36952 /* Setup the rndis set */
36953 halt = &request->request_msg.msg.halt_req;
36954 - halt->req_id = atomic_inc_return(&dev->new_req_id);
36955 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36956
36957 /* Ignore return since this msg is optional. */
36958 rndis_filter_send_request(dev, request);
36959 diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
36960 index 1c949f5..7a8b104 100644
36961 --- a/drivers/staging/hv/vmbus_drv.c
36962 +++ b/drivers/staging/hv/vmbus_drv.c
36963 @@ -660,11 +660,11 @@ int vmbus_child_device_register(struct hv_device *child_device_obj)
36964 {
36965 int ret = 0;
36966
36967 - static atomic_t device_num = ATOMIC_INIT(0);
36968 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
36969
36970 /* Set the device name. Otherwise, device_register() will fail. */
36971 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
36972 - atomic_inc_return(&device_num));
36973 + atomic_inc_return_unchecked(&device_num));
36974
36975 /* The new device belongs to this bus */
36976 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
36977 diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
36978 index 3f26f71..fb5c787 100644
36979 --- a/drivers/staging/iio/ring_generic.h
36980 +++ b/drivers/staging/iio/ring_generic.h
36981 @@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
36982
36983 int (*is_enabled)(struct iio_ring_buffer *ring);
36984 int (*enable)(struct iio_ring_buffer *ring);
36985 -};
36986 +} __no_const;
36987
36988 struct iio_ring_setup_ops {
36989 int (*preenable)(struct iio_dev *);
36990 diff --git a/drivers/staging/mei/interface.c b/drivers/staging/mei/interface.c
36991 index cfec92d..a65dacf 100644
36992 --- a/drivers/staging/mei/interface.c
36993 +++ b/drivers/staging/mei/interface.c
36994 @@ -332,7 +332,7 @@ int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl)
36995 mei_hdr->reserved = 0;
36996
36997 mei_flow_control = (struct hbm_flow_control *) &dev->wr_msg_buf[1];
36998 - memset(mei_flow_control, 0, sizeof(mei_flow_control));
36999 + memset(mei_flow_control, 0, sizeof(*mei_flow_control));
37000 mei_flow_control->host_addr = cl->host_client_id;
37001 mei_flow_control->me_addr = cl->me_client_id;
37002 mei_flow_control->cmd.cmd = MEI_FLOW_CONTROL_CMD;
37003 @@ -396,7 +396,7 @@ int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
37004
37005 mei_cli_disconnect =
37006 (struct hbm_client_disconnect_request *) &dev->wr_msg_buf[1];
37007 - memset(mei_cli_disconnect, 0, sizeof(mei_cli_disconnect));
37008 + memset(mei_cli_disconnect, 0, sizeof(*mei_cli_disconnect));
37009 mei_cli_disconnect->host_addr = cl->host_client_id;
37010 mei_cli_disconnect->me_addr = cl->me_client_id;
37011 mei_cli_disconnect->cmd.cmd = CLIENT_DISCONNECT_REQ_CMD;
37012 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
37013 index 8b307b4..a97ac91 100644
37014 --- a/drivers/staging/octeon/ethernet-rx.c
37015 +++ b/drivers/staging/octeon/ethernet-rx.c
37016 @@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37017 /* Increment RX stats for virtual ports */
37018 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37019 #ifdef CONFIG_64BIT
37020 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37021 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37022 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37023 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37024 #else
37025 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37026 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37027 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37028 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37029 #endif
37030 }
37031 netif_receive_skb(skb);
37032 @@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37033 dev->name);
37034 */
37035 #ifdef CONFIG_64BIT
37036 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37037 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37038 #else
37039 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37040 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
37041 #endif
37042 dev_kfree_skb_irq(skb);
37043 }
37044 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
37045 index a8f780e..aef1098 100644
37046 --- a/drivers/staging/octeon/ethernet.c
37047 +++ b/drivers/staging/octeon/ethernet.c
37048 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
37049 * since the RX tasklet also increments it.
37050 */
37051 #ifdef CONFIG_64BIT
37052 - atomic64_add(rx_status.dropped_packets,
37053 - (atomic64_t *)&priv->stats.rx_dropped);
37054 + atomic64_add_unchecked(rx_status.dropped_packets,
37055 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37056 #else
37057 - atomic_add(rx_status.dropped_packets,
37058 - (atomic_t *)&priv->stats.rx_dropped);
37059 + atomic_add_unchecked(rx_status.dropped_packets,
37060 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
37061 #endif
37062 }
37063
37064 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
37065 index f3c6060..56bf826 100644
37066 --- a/drivers/staging/pohmelfs/inode.c
37067 +++ b/drivers/staging/pohmelfs/inode.c
37068 @@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
37069 mutex_init(&psb->mcache_lock);
37070 psb->mcache_root = RB_ROOT;
37071 psb->mcache_timeout = msecs_to_jiffies(5000);
37072 - atomic_long_set(&psb->mcache_gen, 0);
37073 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
37074
37075 psb->trans_max_pages = 100;
37076
37077 @@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
37078 INIT_LIST_HEAD(&psb->crypto_ready_list);
37079 INIT_LIST_HEAD(&psb->crypto_active_list);
37080
37081 - atomic_set(&psb->trans_gen, 1);
37082 + atomic_set_unchecked(&psb->trans_gen, 1);
37083 atomic_long_set(&psb->total_inodes, 0);
37084
37085 mutex_init(&psb->state_lock);
37086 diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
37087 index e22665c..a2a9390 100644
37088 --- a/drivers/staging/pohmelfs/mcache.c
37089 +++ b/drivers/staging/pohmelfs/mcache.c
37090 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
37091 m->data = data;
37092 m->start = start;
37093 m->size = size;
37094 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
37095 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
37096
37097 mutex_lock(&psb->mcache_lock);
37098 err = pohmelfs_mcache_insert(psb, m);
37099 diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
37100 index 985b6b7..7699e05 100644
37101 --- a/drivers/staging/pohmelfs/netfs.h
37102 +++ b/drivers/staging/pohmelfs/netfs.h
37103 @@ -571,14 +571,14 @@ struct pohmelfs_config;
37104 struct pohmelfs_sb {
37105 struct rb_root mcache_root;
37106 struct mutex mcache_lock;
37107 - atomic_long_t mcache_gen;
37108 + atomic_long_unchecked_t mcache_gen;
37109 unsigned long mcache_timeout;
37110
37111 unsigned int idx;
37112
37113 unsigned int trans_retries;
37114
37115 - atomic_t trans_gen;
37116 + atomic_unchecked_t trans_gen;
37117
37118 unsigned int crypto_attached_size;
37119 unsigned int crypto_align_size;
37120 diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
37121 index 36a2535..0591bf4 100644
37122 --- a/drivers/staging/pohmelfs/trans.c
37123 +++ b/drivers/staging/pohmelfs/trans.c
37124 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
37125 int err;
37126 struct netfs_cmd *cmd = t->iovec.iov_base;
37127
37128 - t->gen = atomic_inc_return(&psb->trans_gen);
37129 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
37130
37131 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
37132 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
37133 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
37134 index b70cb2b..4db41a7 100644
37135 --- a/drivers/staging/rtl8712/rtl871x_io.h
37136 +++ b/drivers/staging/rtl8712/rtl871x_io.h
37137 @@ -83,7 +83,7 @@ struct _io_ops {
37138 u8 *pmem);
37139 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
37140 u8 *pmem);
37141 -};
37142 +} __no_const;
37143
37144 struct io_req {
37145 struct list_head list;
37146 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
37147 index c7b5e8b..783d6cb 100644
37148 --- a/drivers/staging/sbe-2t3e3/netdev.c
37149 +++ b/drivers/staging/sbe-2t3e3/netdev.c
37150 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37151 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
37152
37153 if (rlen)
37154 - if (copy_to_user(data, &resp, rlen))
37155 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
37156 return -EFAULT;
37157
37158 return 0;
37159 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
37160 index be21617..0954e45 100644
37161 --- a/drivers/staging/usbip/usbip_common.h
37162 +++ b/drivers/staging/usbip/usbip_common.h
37163 @@ -289,7 +289,7 @@ struct usbip_device {
37164 void (*shutdown)(struct usbip_device *);
37165 void (*reset)(struct usbip_device *);
37166 void (*unusable)(struct usbip_device *);
37167 - } eh_ops;
37168 + } __no_const eh_ops;
37169 };
37170
37171 #if 0
37172 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
37173 index 71a586e..4d8a91a 100644
37174 --- a/drivers/staging/usbip/vhci.h
37175 +++ b/drivers/staging/usbip/vhci.h
37176 @@ -85,7 +85,7 @@ struct vhci_hcd {
37177 unsigned resuming:1;
37178 unsigned long re_timeout;
37179
37180 - atomic_t seqnum;
37181 + atomic_unchecked_t seqnum;
37182
37183 /*
37184 * NOTE:
37185 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
37186 index 2ee97e2..0420b86 100644
37187 --- a/drivers/staging/usbip/vhci_hcd.c
37188 +++ b/drivers/staging/usbip/vhci_hcd.c
37189 @@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
37190 return;
37191 }
37192
37193 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37194 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37195 if (priv->seqnum == 0xffff)
37196 dev_info(&urb->dev->dev, "seqnum max\n");
37197
37198 @@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
37199 return -ENOMEM;
37200 }
37201
37202 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37203 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37204 if (unlink->seqnum == 0xffff)
37205 pr_info("seqnum max\n");
37206
37207 @@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
37208 vdev->rhport = rhport;
37209 }
37210
37211 - atomic_set(&vhci->seqnum, 0);
37212 + atomic_set_unchecked(&vhci->seqnum, 0);
37213 spin_lock_init(&vhci->lock);
37214
37215 hcd->power_budget = 0; /* no limit */
37216 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
37217 index 09c44ab..6692d83 100644
37218 --- a/drivers/staging/usbip/vhci_rx.c
37219 +++ b/drivers/staging/usbip/vhci_rx.c
37220 @@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
37221 if (!urb) {
37222 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
37223 pr_info("max seqnum %d\n",
37224 - atomic_read(&the_controller->seqnum));
37225 + atomic_read_unchecked(&the_controller->seqnum));
37226 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37227 return;
37228 }
37229 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
37230 index 7735027..30eed13 100644
37231 --- a/drivers/staging/vt6655/hostap.c
37232 +++ b/drivers/staging/vt6655/hostap.c
37233 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
37234 *
37235 */
37236
37237 +static net_device_ops_no_const apdev_netdev_ops;
37238 +
37239 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37240 {
37241 PSDevice apdev_priv;
37242 struct net_device *dev = pDevice->dev;
37243 int ret;
37244 - const struct net_device_ops apdev_netdev_ops = {
37245 - .ndo_start_xmit = pDevice->tx_80211,
37246 - };
37247
37248 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37249
37250 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37251 *apdev_priv = *pDevice;
37252 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37253
37254 + /* only half broken now */
37255 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37256 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37257
37258 pDevice->apdev->type = ARPHRD_IEEE80211;
37259 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
37260 index 51b5adf..098e320 100644
37261 --- a/drivers/staging/vt6656/hostap.c
37262 +++ b/drivers/staging/vt6656/hostap.c
37263 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
37264 *
37265 */
37266
37267 +static net_device_ops_no_const apdev_netdev_ops;
37268 +
37269 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37270 {
37271 PSDevice apdev_priv;
37272 struct net_device *dev = pDevice->dev;
37273 int ret;
37274 - const struct net_device_ops apdev_netdev_ops = {
37275 - .ndo_start_xmit = pDevice->tx_80211,
37276 - };
37277
37278 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37279
37280 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37281 *apdev_priv = *pDevice;
37282 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37283
37284 + /* only half broken now */
37285 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37286 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37287
37288 pDevice->apdev->type = ARPHRD_IEEE80211;
37289 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
37290 index 7843dfd..3db105f 100644
37291 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
37292 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
37293 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
37294
37295 struct usbctlx_completor {
37296 int (*complete) (struct usbctlx_completor *);
37297 -};
37298 +} __no_const;
37299
37300 static int
37301 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
37302 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
37303 index 1ca66ea..76f1343 100644
37304 --- a/drivers/staging/zcache/tmem.c
37305 +++ b/drivers/staging/zcache/tmem.c
37306 @@ -39,7 +39,7 @@
37307 * A tmem host implementation must use this function to register callbacks
37308 * for memory allocation.
37309 */
37310 -static struct tmem_hostops tmem_hostops;
37311 +static tmem_hostops_no_const tmem_hostops;
37312
37313 static void tmem_objnode_tree_init(void);
37314
37315 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
37316 * A tmem host implementation must use this function to register
37317 * callbacks for a page-accessible memory (PAM) implementation
37318 */
37319 -static struct tmem_pamops tmem_pamops;
37320 +static tmem_pamops_no_const tmem_pamops;
37321
37322 void tmem_register_pamops(struct tmem_pamops *m)
37323 {
37324 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
37325 index ed147c4..94fc3c6 100644
37326 --- a/drivers/staging/zcache/tmem.h
37327 +++ b/drivers/staging/zcache/tmem.h
37328 @@ -180,6 +180,7 @@ struct tmem_pamops {
37329 void (*new_obj)(struct tmem_obj *);
37330 int (*replace_in_obj)(void *, struct tmem_obj *);
37331 };
37332 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
37333 extern void tmem_register_pamops(struct tmem_pamops *m);
37334
37335 /* memory allocation methods provided by the host implementation */
37336 @@ -189,6 +190,7 @@ struct tmem_hostops {
37337 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
37338 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
37339 };
37340 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
37341 extern void tmem_register_hostops(struct tmem_hostops *m);
37342
37343 /* core tmem accessor functions */
37344 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
37345 index 26a5d8b..74434f8 100644
37346 --- a/drivers/target/iscsi/iscsi_target.c
37347 +++ b/drivers/target/iscsi/iscsi_target.c
37348 @@ -1368,7 +1368,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
37349 * outstanding_r2ts reaches zero, go ahead and send the delayed
37350 * TASK_ABORTED status.
37351 */
37352 - if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
37353 + if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
37354 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
37355 if (--cmd->outstanding_r2ts < 1) {
37356 iscsit_stop_dataout_timer(cmd);
37357 diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
37358 index 8badcb4..94c9ac6 100644
37359 --- a/drivers/target/target_core_alua.c
37360 +++ b/drivers/target/target_core_alua.c
37361 @@ -723,6 +723,8 @@ static int core_alua_update_tpg_primary_metadata(
37362 char path[ALUA_METADATA_PATH_LEN];
37363 int len;
37364
37365 + pax_track_stack();
37366 +
37367 memset(path, 0, ALUA_METADATA_PATH_LEN);
37368
37369 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
37370 @@ -986,6 +988,8 @@ static int core_alua_update_tpg_secondary_metadata(
37371 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
37372 int len;
37373
37374 + pax_track_stack();
37375 +
37376 memset(path, 0, ALUA_METADATA_PATH_LEN);
37377 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
37378
37379 diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
37380 index f04d4ef..7de212b 100644
37381 --- a/drivers/target/target_core_cdb.c
37382 +++ b/drivers/target/target_core_cdb.c
37383 @@ -933,6 +933,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
37384 int length = 0;
37385 unsigned char buf[SE_MODE_PAGE_BUF];
37386
37387 + pax_track_stack();
37388 +
37389 memset(buf, 0, SE_MODE_PAGE_BUF);
37390
37391 switch (cdb[2] & 0x3f) {
37392 diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
37393 index b2575d8..b6b28fd 100644
37394 --- a/drivers/target/target_core_configfs.c
37395 +++ b/drivers/target/target_core_configfs.c
37396 @@ -1267,6 +1267,8 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
37397 ssize_t len = 0;
37398 int reg_count = 0, prf_isid;
37399
37400 + pax_track_stack();
37401 +
37402 if (!su_dev->se_dev_ptr)
37403 return -ENODEV;
37404
37405 diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
37406 index 7fd3a16..bc2fb3e 100644
37407 --- a/drivers/target/target_core_pr.c
37408 +++ b/drivers/target/target_core_pr.c
37409 @@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_registration(
37410 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
37411 u16 tpgt;
37412
37413 + pax_track_stack();
37414 +
37415 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
37416 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
37417 /*
37418 @@ -1867,6 +1869,8 @@ static int __core_scsi3_update_aptpl_buf(
37419 ssize_t len = 0;
37420 int reg_count = 0;
37421
37422 + pax_track_stack();
37423 +
37424 memset(buf, 0, pr_aptpl_buf_len);
37425 /*
37426 * Called to clear metadata once APTPL has been deactivated.
37427 @@ -1989,6 +1993,8 @@ static int __core_scsi3_write_aptpl_to_file(
37428 char path[512];
37429 int ret;
37430
37431 + pax_track_stack();
37432 +
37433 memset(iov, 0, sizeof(struct iovec));
37434 memset(path, 0, 512);
37435
37436 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
37437 index 5c1b8c5..0cb7d0e 100644
37438 --- a/drivers/target/target_core_tmr.c
37439 +++ b/drivers/target/target_core_tmr.c
37440 @@ -255,7 +255,7 @@ static void core_tmr_drain_task_list(
37441 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
37442 cmd->t_task_list_num,
37443 atomic_read(&cmd->t_task_cdbs_left),
37444 - atomic_read(&cmd->t_task_cdbs_sent),
37445 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37446 atomic_read(&cmd->t_transport_active),
37447 atomic_read(&cmd->t_transport_stop),
37448 atomic_read(&cmd->t_transport_sent));
37449 @@ -291,7 +291,7 @@ static void core_tmr_drain_task_list(
37450 pr_debug("LUN_RESET: got t_transport_active = 1 for"
37451 " task: %p, t_fe_count: %d dev: %p\n", task,
37452 fe_count, dev);
37453 - atomic_set(&cmd->t_transport_aborted, 1);
37454 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
37455 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
37456
37457 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
37458 @@ -299,7 +299,7 @@ static void core_tmr_drain_task_list(
37459 }
37460 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
37461 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
37462 - atomic_set(&cmd->t_transport_aborted, 1);
37463 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
37464 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
37465
37466 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
37467 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
37468 index 013c100..8fd2e57 100644
37469 --- a/drivers/target/target_core_transport.c
37470 +++ b/drivers/target/target_core_transport.c
37471 @@ -1445,7 +1445,7 @@ struct se_device *transport_add_device_to_core_hba(
37472
37473 dev->queue_depth = dev_limits->queue_depth;
37474 atomic_set(&dev->depth_left, dev->queue_depth);
37475 - atomic_set(&dev->dev_ordered_id, 0);
37476 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
37477
37478 se_dev_set_default_attribs(dev, dev_limits);
37479
37480 @@ -1633,7 +1633,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
37481 * Used to determine when ORDERED commands should go from
37482 * Dormant to Active status.
37483 */
37484 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
37485 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
37486 smp_mb__after_atomic_inc();
37487 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
37488 cmd->se_ordered_id, cmd->sam_task_attr,
37489 @@ -1960,7 +1960,7 @@ static void transport_generic_request_failure(
37490 " t_transport_active: %d t_transport_stop: %d"
37491 " t_transport_sent: %d\n", cmd->t_task_list_num,
37492 atomic_read(&cmd->t_task_cdbs_left),
37493 - atomic_read(&cmd->t_task_cdbs_sent),
37494 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37495 atomic_read(&cmd->t_task_cdbs_ex_left),
37496 atomic_read(&cmd->t_transport_active),
37497 atomic_read(&cmd->t_transport_stop),
37498 @@ -2460,9 +2460,9 @@ check_depth:
37499 spin_lock_irqsave(&cmd->t_state_lock, flags);
37500 atomic_set(&task->task_active, 1);
37501 atomic_set(&task->task_sent, 1);
37502 - atomic_inc(&cmd->t_task_cdbs_sent);
37503 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
37504
37505 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
37506 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
37507 cmd->t_task_list_num)
37508 atomic_set(&cmd->transport_sent, 1);
37509
37510 @@ -4665,7 +4665,7 @@ static void transport_generic_wait_for_tasks(
37511 atomic_set(&cmd->transport_lun_stop, 0);
37512 }
37513 if (!atomic_read(&cmd->t_transport_active) ||
37514 - atomic_read(&cmd->t_transport_aborted))
37515 + atomic_read_unchecked(&cmd->t_transport_aborted))
37516 goto remove;
37517
37518 atomic_set(&cmd->t_transport_stop, 1);
37519 @@ -4900,7 +4900,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
37520 {
37521 int ret = 0;
37522
37523 - if (atomic_read(&cmd->t_transport_aborted) != 0) {
37524 + if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
37525 if (!send_status ||
37526 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
37527 return 1;
37528 @@ -4937,7 +4937,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
37529 */
37530 if (cmd->data_direction == DMA_TO_DEVICE) {
37531 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
37532 - atomic_inc(&cmd->t_transport_aborted);
37533 + atomic_inc_unchecked(&cmd->t_transport_aborted);
37534 smp_mb__after_atomic_inc();
37535 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
37536 transport_new_cmd_failure(cmd);
37537 @@ -5051,7 +5051,7 @@ static void transport_processing_shutdown(struct se_device *dev)
37538 cmd->se_tfo->get_task_tag(cmd),
37539 cmd->t_task_list_num,
37540 atomic_read(&cmd->t_task_cdbs_left),
37541 - atomic_read(&cmd->t_task_cdbs_sent),
37542 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37543 atomic_read(&cmd->t_transport_active),
37544 atomic_read(&cmd->t_transport_stop),
37545 atomic_read(&cmd->t_transport_sent));
37546 diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
37547 index d5f923b..9c78228 100644
37548 --- a/drivers/telephony/ixj.c
37549 +++ b/drivers/telephony/ixj.c
37550 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
37551 bool mContinue;
37552 char *pIn, *pOut;
37553
37554 + pax_track_stack();
37555 +
37556 if (!SCI_Prepare(j))
37557 return 0;
37558
37559 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
37560 index 4c8b665..1d931eb 100644
37561 --- a/drivers/tty/hvc/hvcs.c
37562 +++ b/drivers/tty/hvc/hvcs.c
37563 @@ -83,6 +83,7 @@
37564 #include <asm/hvcserver.h>
37565 #include <asm/uaccess.h>
37566 #include <asm/vio.h>
37567 +#include <asm/local.h>
37568
37569 /*
37570 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
37571 @@ -270,7 +271,7 @@ struct hvcs_struct {
37572 unsigned int index;
37573
37574 struct tty_struct *tty;
37575 - int open_count;
37576 + local_t open_count;
37577
37578 /*
37579 * Used to tell the driver kernel_thread what operations need to take
37580 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
37581
37582 spin_lock_irqsave(&hvcsd->lock, flags);
37583
37584 - if (hvcsd->open_count > 0) {
37585 + if (local_read(&hvcsd->open_count) > 0) {
37586 spin_unlock_irqrestore(&hvcsd->lock, flags);
37587 printk(KERN_INFO "HVCS: vterm state unchanged. "
37588 "The hvcs device node is still in use.\n");
37589 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
37590 if ((retval = hvcs_partner_connect(hvcsd)))
37591 goto error_release;
37592
37593 - hvcsd->open_count = 1;
37594 + local_set(&hvcsd->open_count, 1);
37595 hvcsd->tty = tty;
37596 tty->driver_data = hvcsd;
37597
37598 @@ -1179,7 +1180,7 @@ fast_open:
37599
37600 spin_lock_irqsave(&hvcsd->lock, flags);
37601 kref_get(&hvcsd->kref);
37602 - hvcsd->open_count++;
37603 + local_inc(&hvcsd->open_count);
37604 hvcsd->todo_mask |= HVCS_SCHED_READ;
37605 spin_unlock_irqrestore(&hvcsd->lock, flags);
37606
37607 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37608 hvcsd = tty->driver_data;
37609
37610 spin_lock_irqsave(&hvcsd->lock, flags);
37611 - if (--hvcsd->open_count == 0) {
37612 + if (local_dec_and_test(&hvcsd->open_count)) {
37613
37614 vio_disable_interrupts(hvcsd->vdev);
37615
37616 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37617 free_irq(irq, hvcsd);
37618 kref_put(&hvcsd->kref, destroy_hvcs_struct);
37619 return;
37620 - } else if (hvcsd->open_count < 0) {
37621 + } else if (local_read(&hvcsd->open_count) < 0) {
37622 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
37623 " is missmanaged.\n",
37624 - hvcsd->vdev->unit_address, hvcsd->open_count);
37625 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
37626 }
37627
37628 spin_unlock_irqrestore(&hvcsd->lock, flags);
37629 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37630
37631 spin_lock_irqsave(&hvcsd->lock, flags);
37632 /* Preserve this so that we know how many kref refs to put */
37633 - temp_open_count = hvcsd->open_count;
37634 + temp_open_count = local_read(&hvcsd->open_count);
37635
37636 /*
37637 * Don't kref put inside the spinlock because the destruction
37638 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37639 hvcsd->tty->driver_data = NULL;
37640 hvcsd->tty = NULL;
37641
37642 - hvcsd->open_count = 0;
37643 + local_set(&hvcsd->open_count, 0);
37644
37645 /* This will drop any buffered data on the floor which is OK in a hangup
37646 * scenario. */
37647 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
37648 * the middle of a write operation? This is a crummy place to do this
37649 * but we want to keep it all in the spinlock.
37650 */
37651 - if (hvcsd->open_count <= 0) {
37652 + if (local_read(&hvcsd->open_count) <= 0) {
37653 spin_unlock_irqrestore(&hvcsd->lock, flags);
37654 return -ENODEV;
37655 }
37656 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
37657 {
37658 struct hvcs_struct *hvcsd = tty->driver_data;
37659
37660 - if (!hvcsd || hvcsd->open_count <= 0)
37661 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
37662 return 0;
37663
37664 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
37665 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
37666 index ef92869..f4ebd88 100644
37667 --- a/drivers/tty/ipwireless/tty.c
37668 +++ b/drivers/tty/ipwireless/tty.c
37669 @@ -29,6 +29,7 @@
37670 #include <linux/tty_driver.h>
37671 #include <linux/tty_flip.h>
37672 #include <linux/uaccess.h>
37673 +#include <asm/local.h>
37674
37675 #include "tty.h"
37676 #include "network.h"
37677 @@ -51,7 +52,7 @@ struct ipw_tty {
37678 int tty_type;
37679 struct ipw_network *network;
37680 struct tty_struct *linux_tty;
37681 - int open_count;
37682 + local_t open_count;
37683 unsigned int control_lines;
37684 struct mutex ipw_tty_mutex;
37685 int tx_bytes_queued;
37686 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37687 mutex_unlock(&tty->ipw_tty_mutex);
37688 return -ENODEV;
37689 }
37690 - if (tty->open_count == 0)
37691 + if (local_read(&tty->open_count) == 0)
37692 tty->tx_bytes_queued = 0;
37693
37694 - tty->open_count++;
37695 + local_inc(&tty->open_count);
37696
37697 tty->linux_tty = linux_tty;
37698 linux_tty->driver_data = tty;
37699 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37700
37701 static void do_ipw_close(struct ipw_tty *tty)
37702 {
37703 - tty->open_count--;
37704 -
37705 - if (tty->open_count == 0) {
37706 + if (local_dec_return(&tty->open_count) == 0) {
37707 struct tty_struct *linux_tty = tty->linux_tty;
37708
37709 if (linux_tty != NULL) {
37710 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
37711 return;
37712
37713 mutex_lock(&tty->ipw_tty_mutex);
37714 - if (tty->open_count == 0) {
37715 + if (local_read(&tty->open_count) == 0) {
37716 mutex_unlock(&tty->ipw_tty_mutex);
37717 return;
37718 }
37719 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
37720 return;
37721 }
37722
37723 - if (!tty->open_count) {
37724 + if (!local_read(&tty->open_count)) {
37725 mutex_unlock(&tty->ipw_tty_mutex);
37726 return;
37727 }
37728 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
37729 return -ENODEV;
37730
37731 mutex_lock(&tty->ipw_tty_mutex);
37732 - if (!tty->open_count) {
37733 + if (!local_read(&tty->open_count)) {
37734 mutex_unlock(&tty->ipw_tty_mutex);
37735 return -EINVAL;
37736 }
37737 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
37738 if (!tty)
37739 return -ENODEV;
37740
37741 - if (!tty->open_count)
37742 + if (!local_read(&tty->open_count))
37743 return -EINVAL;
37744
37745 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
37746 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
37747 if (!tty)
37748 return 0;
37749
37750 - if (!tty->open_count)
37751 + if (!local_read(&tty->open_count))
37752 return 0;
37753
37754 return tty->tx_bytes_queued;
37755 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
37756 if (!tty)
37757 return -ENODEV;
37758
37759 - if (!tty->open_count)
37760 + if (!local_read(&tty->open_count))
37761 return -EINVAL;
37762
37763 return get_control_lines(tty);
37764 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
37765 if (!tty)
37766 return -ENODEV;
37767
37768 - if (!tty->open_count)
37769 + if (!local_read(&tty->open_count))
37770 return -EINVAL;
37771
37772 return set_control_lines(tty, set, clear);
37773 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
37774 if (!tty)
37775 return -ENODEV;
37776
37777 - if (!tty->open_count)
37778 + if (!local_read(&tty->open_count))
37779 return -EINVAL;
37780
37781 /* FIXME: Exactly how is the tty object locked here .. */
37782 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
37783 against a parallel ioctl etc */
37784 mutex_lock(&ttyj->ipw_tty_mutex);
37785 }
37786 - while (ttyj->open_count)
37787 + while (local_read(&ttyj->open_count))
37788 do_ipw_close(ttyj);
37789 ipwireless_disassociate_network_ttys(network,
37790 ttyj->channel_idx);
37791 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
37792 index 8a50e4e..7d9ca3d 100644
37793 --- a/drivers/tty/n_gsm.c
37794 +++ b/drivers/tty/n_gsm.c
37795 @@ -1625,7 +1625,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
37796 kref_init(&dlci->ref);
37797 mutex_init(&dlci->mutex);
37798 dlci->fifo = &dlci->_fifo;
37799 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
37800 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
37801 kfree(dlci);
37802 return NULL;
37803 }
37804 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
37805 index 39d6ab6..eb97f41 100644
37806 --- a/drivers/tty/n_tty.c
37807 +++ b/drivers/tty/n_tty.c
37808 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
37809 {
37810 *ops = tty_ldisc_N_TTY;
37811 ops->owner = NULL;
37812 - ops->refcount = ops->flags = 0;
37813 + atomic_set(&ops->refcount, 0);
37814 + ops->flags = 0;
37815 }
37816 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
37817 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
37818 index e18604b..a7d5a11 100644
37819 --- a/drivers/tty/pty.c
37820 +++ b/drivers/tty/pty.c
37821 @@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
37822 register_sysctl_table(pty_root_table);
37823
37824 /* Now create the /dev/ptmx special device */
37825 + pax_open_kernel();
37826 tty_default_fops(&ptmx_fops);
37827 - ptmx_fops.open = ptmx_open;
37828 + *(void **)&ptmx_fops.open = ptmx_open;
37829 + pax_close_kernel();
37830
37831 cdev_init(&ptmx_cdev, &ptmx_fops);
37832 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
37833 diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
37834 index 6a1241c..d04ab0d 100644
37835 --- a/drivers/tty/rocket.c
37836 +++ b/drivers/tty/rocket.c
37837 @@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
37838 struct rocket_ports tmp;
37839 int board;
37840
37841 + pax_track_stack();
37842 +
37843 if (!retports)
37844 return -EFAULT;
37845 memset(&tmp, 0, sizeof (tmp));
37846 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
37847 index 87e7e6c..89744e0 100644
37848 --- a/drivers/tty/serial/kgdboc.c
37849 +++ b/drivers/tty/serial/kgdboc.c
37850 @@ -23,8 +23,9 @@
37851 #define MAX_CONFIG_LEN 40
37852
37853 static struct kgdb_io kgdboc_io_ops;
37854 +static struct kgdb_io kgdboc_io_ops_console;
37855
37856 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37857 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
37858 static int configured = -1;
37859
37860 static char config[MAX_CONFIG_LEN];
37861 @@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
37862 kgdboc_unregister_kbd();
37863 if (configured == 1)
37864 kgdb_unregister_io_module(&kgdboc_io_ops);
37865 + else if (configured == 2)
37866 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
37867 }
37868
37869 static int configure_kgdboc(void)
37870 @@ -156,13 +159,13 @@ static int configure_kgdboc(void)
37871 int err;
37872 char *cptr = config;
37873 struct console *cons;
37874 + int is_console = 0;
37875
37876 err = kgdboc_option_setup(config);
37877 if (err || !strlen(config) || isspace(config[0]))
37878 goto noconfig;
37879
37880 err = -ENODEV;
37881 - kgdboc_io_ops.is_console = 0;
37882 kgdb_tty_driver = NULL;
37883
37884 kgdboc_use_kms = 0;
37885 @@ -183,7 +186,7 @@ static int configure_kgdboc(void)
37886 int idx;
37887 if (cons->device && cons->device(cons, &idx) == p &&
37888 idx == tty_line) {
37889 - kgdboc_io_ops.is_console = 1;
37890 + is_console = 1;
37891 break;
37892 }
37893 cons = cons->next;
37894 @@ -193,12 +196,16 @@ static int configure_kgdboc(void)
37895 kgdb_tty_line = tty_line;
37896
37897 do_register:
37898 - err = kgdb_register_io_module(&kgdboc_io_ops);
37899 + if (is_console) {
37900 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
37901 + configured = 2;
37902 + } else {
37903 + err = kgdb_register_io_module(&kgdboc_io_ops);
37904 + configured = 1;
37905 + }
37906 if (err)
37907 goto noconfig;
37908
37909 - configured = 1;
37910 -
37911 return 0;
37912
37913 noconfig:
37914 @@ -212,7 +219,7 @@ noconfig:
37915 static int __init init_kgdboc(void)
37916 {
37917 /* Already configured? */
37918 - if (configured == 1)
37919 + if (configured >= 1)
37920 return 0;
37921
37922 return configure_kgdboc();
37923 @@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
37924 if (config[len - 1] == '\n')
37925 config[len - 1] = '\0';
37926
37927 - if (configured == 1)
37928 + if (configured >= 1)
37929 cleanup_kgdboc();
37930
37931 /* Go and configure with the new params. */
37932 @@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
37933 .post_exception = kgdboc_post_exp_handler,
37934 };
37935
37936 +static struct kgdb_io kgdboc_io_ops_console = {
37937 + .name = "kgdboc",
37938 + .read_char = kgdboc_get_char,
37939 + .write_char = kgdboc_put_char,
37940 + .pre_exception = kgdboc_pre_exp_handler,
37941 + .post_exception = kgdboc_post_exp_handler,
37942 + .is_console = 1
37943 +};
37944 +
37945 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
37946 /* This is only available if kgdboc is a built in for early debugging */
37947 static int __init kgdboc_early_init(char *opt)
37948 diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
37949 index cab52f4..29fc6aa 100644
37950 --- a/drivers/tty/serial/mfd.c
37951 +++ b/drivers/tty/serial/mfd.c
37952 @@ -1423,7 +1423,7 @@ static void serial_hsu_remove(struct pci_dev *pdev)
37953 }
37954
37955 /* First 3 are UART ports, and the 4th is the DMA */
37956 -static const struct pci_device_id pci_ids[] __devinitdata = {
37957 +static const struct pci_device_id pci_ids[] __devinitconst = {
37958 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
37959 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
37960 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
37961 diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
37962 index 23bc743..d425c07 100644
37963 --- a/drivers/tty/serial/mrst_max3110.c
37964 +++ b/drivers/tty/serial/mrst_max3110.c
37965 @@ -393,6 +393,8 @@ static void max3110_con_receive(struct uart_max3110 *max)
37966 int loop = 1, num, total = 0;
37967 u8 recv_buf[512], *pbuf;
37968
37969 + pax_track_stack();
37970 +
37971 pbuf = recv_buf;
37972 do {
37973 num = max3110_read_multi(max, pbuf);
37974 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
37975 index 1a890e2..1d8139c 100644
37976 --- a/drivers/tty/tty_io.c
37977 +++ b/drivers/tty/tty_io.c
37978 @@ -3238,7 +3238,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
37979
37980 void tty_default_fops(struct file_operations *fops)
37981 {
37982 - *fops = tty_fops;
37983 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
37984 }
37985
37986 /*
37987 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
37988 index a76c808..ecbc743 100644
37989 --- a/drivers/tty/tty_ldisc.c
37990 +++ b/drivers/tty/tty_ldisc.c
37991 @@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
37992 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
37993 struct tty_ldisc_ops *ldo = ld->ops;
37994
37995 - ldo->refcount--;
37996 + atomic_dec(&ldo->refcount);
37997 module_put(ldo->owner);
37998 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37999
38000 @@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
38001 spin_lock_irqsave(&tty_ldisc_lock, flags);
38002 tty_ldiscs[disc] = new_ldisc;
38003 new_ldisc->num = disc;
38004 - new_ldisc->refcount = 0;
38005 + atomic_set(&new_ldisc->refcount, 0);
38006 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38007
38008 return ret;
38009 @@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
38010 return -EINVAL;
38011
38012 spin_lock_irqsave(&tty_ldisc_lock, flags);
38013 - if (tty_ldiscs[disc]->refcount)
38014 + if (atomic_read(&tty_ldiscs[disc]->refcount))
38015 ret = -EBUSY;
38016 else
38017 tty_ldiscs[disc] = NULL;
38018 @@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
38019 if (ldops) {
38020 ret = ERR_PTR(-EAGAIN);
38021 if (try_module_get(ldops->owner)) {
38022 - ldops->refcount++;
38023 + atomic_inc(&ldops->refcount);
38024 ret = ldops;
38025 }
38026 }
38027 @@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
38028 unsigned long flags;
38029
38030 spin_lock_irqsave(&tty_ldisc_lock, flags);
38031 - ldops->refcount--;
38032 + atomic_dec(&ldops->refcount);
38033 module_put(ldops->owner);
38034 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38035 }
38036 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
38037 index 3761ccf..2c613b3 100644
38038 --- a/drivers/tty/vt/keyboard.c
38039 +++ b/drivers/tty/vt/keyboard.c
38040 @@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
38041 kbd->kbdmode == VC_OFF) &&
38042 value != KVAL(K_SAK))
38043 return; /* SAK is allowed even in raw mode */
38044 +
38045 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
38046 + {
38047 + void *func = fn_handler[value];
38048 + if (func == fn_show_state || func == fn_show_ptregs ||
38049 + func == fn_show_mem)
38050 + return;
38051 + }
38052 +#endif
38053 +
38054 fn_handler[value](vc);
38055 }
38056
38057 diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
38058 index b3915b7..e716839 100644
38059 --- a/drivers/tty/vt/vt.c
38060 +++ b/drivers/tty/vt/vt.c
38061 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
38062
38063 static void notify_write(struct vc_data *vc, unsigned int unicode)
38064 {
38065 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
38066 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
38067 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
38068 }
38069
38070 diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
38071 index 5e096f4..0da1363 100644
38072 --- a/drivers/tty/vt/vt_ioctl.c
38073 +++ b/drivers/tty/vt/vt_ioctl.c
38074 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
38075 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
38076 return -EFAULT;
38077
38078 - if (!capable(CAP_SYS_TTY_CONFIG))
38079 - perm = 0;
38080 -
38081 switch (cmd) {
38082 case KDGKBENT:
38083 key_map = key_maps[s];
38084 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
38085 val = (i ? K_HOLE : K_NOSUCHMAP);
38086 return put_user(val, &user_kbe->kb_value);
38087 case KDSKBENT:
38088 + if (!capable(CAP_SYS_TTY_CONFIG))
38089 + perm = 0;
38090 +
38091 if (!perm)
38092 return -EPERM;
38093 if (!i && v == K_NOSUCHMAP) {
38094 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38095 int i, j, k;
38096 int ret;
38097
38098 - if (!capable(CAP_SYS_TTY_CONFIG))
38099 - perm = 0;
38100 -
38101 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
38102 if (!kbs) {
38103 ret = -ENOMEM;
38104 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38105 kfree(kbs);
38106 return ((p && *p) ? -EOVERFLOW : 0);
38107 case KDSKBSENT:
38108 + if (!capable(CAP_SYS_TTY_CONFIG))
38109 + perm = 0;
38110 +
38111 if (!perm) {
38112 ret = -EPERM;
38113 goto reterr;
38114 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
38115 index d2efe82..9440ab6 100644
38116 --- a/drivers/uio/uio.c
38117 +++ b/drivers/uio/uio.c
38118 @@ -25,6 +25,7 @@
38119 #include <linux/kobject.h>
38120 #include <linux/cdev.h>
38121 #include <linux/uio_driver.h>
38122 +#include <asm/local.h>
38123
38124 #define UIO_MAX_DEVICES (1U << MINORBITS)
38125
38126 @@ -32,10 +33,10 @@ struct uio_device {
38127 struct module *owner;
38128 struct device *dev;
38129 int minor;
38130 - atomic_t event;
38131 + atomic_unchecked_t event;
38132 struct fasync_struct *async_queue;
38133 wait_queue_head_t wait;
38134 - int vma_count;
38135 + local_t vma_count;
38136 struct uio_info *info;
38137 struct kobject *map_dir;
38138 struct kobject *portio_dir;
38139 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
38140 struct device_attribute *attr, char *buf)
38141 {
38142 struct uio_device *idev = dev_get_drvdata(dev);
38143 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
38144 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
38145 }
38146
38147 static struct device_attribute uio_class_attributes[] = {
38148 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
38149 {
38150 struct uio_device *idev = info->uio_dev;
38151
38152 - atomic_inc(&idev->event);
38153 + atomic_inc_unchecked(&idev->event);
38154 wake_up_interruptible(&idev->wait);
38155 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38156 }
38157 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
38158 }
38159
38160 listener->dev = idev;
38161 - listener->event_count = atomic_read(&idev->event);
38162 + listener->event_count = atomic_read_unchecked(&idev->event);
38163 filep->private_data = listener;
38164
38165 if (idev->info->open) {
38166 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
38167 return -EIO;
38168
38169 poll_wait(filep, &idev->wait, wait);
38170 - if (listener->event_count != atomic_read(&idev->event))
38171 + if (listener->event_count != atomic_read_unchecked(&idev->event))
38172 return POLLIN | POLLRDNORM;
38173 return 0;
38174 }
38175 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
38176 do {
38177 set_current_state(TASK_INTERRUPTIBLE);
38178
38179 - event_count = atomic_read(&idev->event);
38180 + event_count = atomic_read_unchecked(&idev->event);
38181 if (event_count != listener->event_count) {
38182 if (copy_to_user(buf, &event_count, count))
38183 retval = -EFAULT;
38184 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
38185 static void uio_vma_open(struct vm_area_struct *vma)
38186 {
38187 struct uio_device *idev = vma->vm_private_data;
38188 - idev->vma_count++;
38189 + local_inc(&idev->vma_count);
38190 }
38191
38192 static void uio_vma_close(struct vm_area_struct *vma)
38193 {
38194 struct uio_device *idev = vma->vm_private_data;
38195 - idev->vma_count--;
38196 + local_dec(&idev->vma_count);
38197 }
38198
38199 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38200 @@ -823,7 +824,7 @@ int __uio_register_device(struct module *owner,
38201 idev->owner = owner;
38202 idev->info = info;
38203 init_waitqueue_head(&idev->wait);
38204 - atomic_set(&idev->event, 0);
38205 + atomic_set_unchecked(&idev->event, 0);
38206
38207 ret = uio_get_minor(idev);
38208 if (ret)
38209 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
38210 index a845f8b..4f54072 100644
38211 --- a/drivers/usb/atm/cxacru.c
38212 +++ b/drivers/usb/atm/cxacru.c
38213 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
38214 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
38215 if (ret < 2)
38216 return -EINVAL;
38217 - if (index < 0 || index > 0x7f)
38218 + if (index > 0x7f)
38219 return -EINVAL;
38220 pos += tmp;
38221
38222 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
38223 index d3448ca..d2864ca 100644
38224 --- a/drivers/usb/atm/usbatm.c
38225 +++ b/drivers/usb/atm/usbatm.c
38226 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38227 if (printk_ratelimit())
38228 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38229 __func__, vpi, vci);
38230 - atomic_inc(&vcc->stats->rx_err);
38231 + atomic_inc_unchecked(&vcc->stats->rx_err);
38232 return;
38233 }
38234
38235 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38236 if (length > ATM_MAX_AAL5_PDU) {
38237 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38238 __func__, length, vcc);
38239 - atomic_inc(&vcc->stats->rx_err);
38240 + atomic_inc_unchecked(&vcc->stats->rx_err);
38241 goto out;
38242 }
38243
38244 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38245 if (sarb->len < pdu_length) {
38246 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38247 __func__, pdu_length, sarb->len, vcc);
38248 - atomic_inc(&vcc->stats->rx_err);
38249 + atomic_inc_unchecked(&vcc->stats->rx_err);
38250 goto out;
38251 }
38252
38253 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38254 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38255 __func__, vcc);
38256 - atomic_inc(&vcc->stats->rx_err);
38257 + atomic_inc_unchecked(&vcc->stats->rx_err);
38258 goto out;
38259 }
38260
38261 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38262 if (printk_ratelimit())
38263 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38264 __func__, length);
38265 - atomic_inc(&vcc->stats->rx_drop);
38266 + atomic_inc_unchecked(&vcc->stats->rx_drop);
38267 goto out;
38268 }
38269
38270 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38271
38272 vcc->push(vcc, skb);
38273
38274 - atomic_inc(&vcc->stats->rx);
38275 + atomic_inc_unchecked(&vcc->stats->rx);
38276 out:
38277 skb_trim(sarb, 0);
38278 }
38279 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
38280 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38281
38282 usbatm_pop(vcc, skb);
38283 - atomic_inc(&vcc->stats->tx);
38284 + atomic_inc_unchecked(&vcc->stats->tx);
38285
38286 skb = skb_dequeue(&instance->sndqueue);
38287 }
38288 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
38289 if (!left--)
38290 return sprintf(page,
38291 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38292 - atomic_read(&atm_dev->stats.aal5.tx),
38293 - atomic_read(&atm_dev->stats.aal5.tx_err),
38294 - atomic_read(&atm_dev->stats.aal5.rx),
38295 - atomic_read(&atm_dev->stats.aal5.rx_err),
38296 - atomic_read(&atm_dev->stats.aal5.rx_drop));
38297 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38298 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38299 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38300 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38301 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38302
38303 if (!left--) {
38304 if (instance->disconnected)
38305 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
38306 index 0149c09..f108812 100644
38307 --- a/drivers/usb/core/devices.c
38308 +++ b/drivers/usb/core/devices.c
38309 @@ -126,7 +126,7 @@ static const char format_endpt[] =
38310 * time it gets called.
38311 */
38312 static struct device_connect_event {
38313 - atomic_t count;
38314 + atomic_unchecked_t count;
38315 wait_queue_head_t wait;
38316 } device_event = {
38317 .count = ATOMIC_INIT(1),
38318 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
38319
38320 void usbfs_conn_disc_event(void)
38321 {
38322 - atomic_add(2, &device_event.count);
38323 + atomic_add_unchecked(2, &device_event.count);
38324 wake_up(&device_event.wait);
38325 }
38326
38327 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
38328
38329 poll_wait(file, &device_event.wait, wait);
38330
38331 - event_count = atomic_read(&device_event.count);
38332 + event_count = atomic_read_unchecked(&device_event.count);
38333 if (file->f_version != event_count) {
38334 file->f_version = event_count;
38335 return POLLIN | POLLRDNORM;
38336 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
38337 index 0b5ec23..0da3d76 100644
38338 --- a/drivers/usb/core/message.c
38339 +++ b/drivers/usb/core/message.c
38340 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
38341 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
38342 if (buf) {
38343 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
38344 - if (len > 0) {
38345 - smallbuf = kmalloc(++len, GFP_NOIO);
38346 + if (len++ > 0) {
38347 + smallbuf = kmalloc(len, GFP_NOIO);
38348 if (!smallbuf)
38349 return buf;
38350 memcpy(smallbuf, buf, len);
38351 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
38352 index 1fc8f12..20647c1 100644
38353 --- a/drivers/usb/early/ehci-dbgp.c
38354 +++ b/drivers/usb/early/ehci-dbgp.c
38355 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
38356
38357 #ifdef CONFIG_KGDB
38358 static struct kgdb_io kgdbdbgp_io_ops;
38359 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
38360 +static struct kgdb_io kgdbdbgp_io_ops_console;
38361 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
38362 #else
38363 #define dbgp_kgdb_mode (0)
38364 #endif
38365 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
38366 .write_char = kgdbdbgp_write_char,
38367 };
38368
38369 +static struct kgdb_io kgdbdbgp_io_ops_console = {
38370 + .name = "kgdbdbgp",
38371 + .read_char = kgdbdbgp_read_char,
38372 + .write_char = kgdbdbgp_write_char,
38373 + .is_console = 1
38374 +};
38375 +
38376 static int kgdbdbgp_wait_time;
38377
38378 static int __init kgdbdbgp_parse_config(char *str)
38379 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
38380 ptr++;
38381 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
38382 }
38383 - kgdb_register_io_module(&kgdbdbgp_io_ops);
38384 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
38385 + if (early_dbgp_console.index != -1)
38386 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
38387 + else
38388 + kgdb_register_io_module(&kgdbdbgp_io_ops);
38389
38390 return 0;
38391 }
38392 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
38393 index d718033..6075579 100644
38394 --- a/drivers/usb/host/xhci-mem.c
38395 +++ b/drivers/usb/host/xhci-mem.c
38396 @@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
38397 unsigned int num_tests;
38398 int i, ret;
38399
38400 + pax_track_stack();
38401 +
38402 num_tests = ARRAY_SIZE(simple_test_vector);
38403 for (i = 0; i < num_tests; i++) {
38404 ret = xhci_test_trb_in_td(xhci,
38405 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
38406 index d6bea3e..60b250e 100644
38407 --- a/drivers/usb/wusbcore/wa-hc.h
38408 +++ b/drivers/usb/wusbcore/wa-hc.h
38409 @@ -192,7 +192,7 @@ struct wahc {
38410 struct list_head xfer_delayed_list;
38411 spinlock_t xfer_list_lock;
38412 struct work_struct xfer_work;
38413 - atomic_t xfer_id_count;
38414 + atomic_unchecked_t xfer_id_count;
38415 };
38416
38417
38418 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
38419 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38420 spin_lock_init(&wa->xfer_list_lock);
38421 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38422 - atomic_set(&wa->xfer_id_count, 1);
38423 + atomic_set_unchecked(&wa->xfer_id_count, 1);
38424 }
38425
38426 /**
38427 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
38428 index 4193345..49ae93d 100644
38429 --- a/drivers/usb/wusbcore/wa-xfer.c
38430 +++ b/drivers/usb/wusbcore/wa-xfer.c
38431 @@ -295,7 +295,7 @@ out:
38432 */
38433 static void wa_xfer_id_init(struct wa_xfer *xfer)
38434 {
38435 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38436 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38437 }
38438
38439 /*
38440 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
38441 index c14c42b..f955cc2 100644
38442 --- a/drivers/vhost/vhost.c
38443 +++ b/drivers/vhost/vhost.c
38444 @@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
38445 return 0;
38446 }
38447
38448 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
38449 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
38450 {
38451 struct file *eventfp, *filep = NULL,
38452 *pollstart = NULL, *pollstop = NULL;
38453 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
38454 index b0b2ac3..89a4399 100644
38455 --- a/drivers/video/aty/aty128fb.c
38456 +++ b/drivers/video/aty/aty128fb.c
38457 @@ -148,7 +148,7 @@ enum {
38458 };
38459
38460 /* Must match above enum */
38461 -static const char *r128_family[] __devinitdata = {
38462 +static const char *r128_family[] __devinitconst = {
38463 "AGP",
38464 "PCI",
38465 "PRO AGP",
38466 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
38467 index 5c3960d..15cf8fc 100644
38468 --- a/drivers/video/fbcmap.c
38469 +++ b/drivers/video/fbcmap.c
38470 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
38471 rc = -ENODEV;
38472 goto out;
38473 }
38474 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38475 - !info->fbops->fb_setcmap)) {
38476 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38477 rc = -EINVAL;
38478 goto out1;
38479 }
38480 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
38481 index ad93629..ca6a218 100644
38482 --- a/drivers/video/fbmem.c
38483 +++ b/drivers/video/fbmem.c
38484 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38485 image->dx += image->width + 8;
38486 }
38487 } else if (rotate == FB_ROTATE_UD) {
38488 - for (x = 0; x < num && image->dx >= 0; x++) {
38489 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38490 info->fbops->fb_imageblit(info, image);
38491 image->dx -= image->width + 8;
38492 }
38493 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38494 image->dy += image->height + 8;
38495 }
38496 } else if (rotate == FB_ROTATE_CCW) {
38497 - for (x = 0; x < num && image->dy >= 0; x++) {
38498 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38499 info->fbops->fb_imageblit(info, image);
38500 image->dy -= image->height + 8;
38501 }
38502 @@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
38503 int flags = info->flags;
38504 int ret = 0;
38505
38506 + pax_track_stack();
38507 +
38508 if (var->activate & FB_ACTIVATE_INV_MODE) {
38509 struct fb_videomode mode1, mode2;
38510
38511 @@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
38512 void __user *argp = (void __user *)arg;
38513 long ret = 0;
38514
38515 + pax_track_stack();
38516 +
38517 switch (cmd) {
38518 case FBIOGET_VSCREENINFO:
38519 if (!lock_fb_info(info))
38520 @@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
38521 return -EFAULT;
38522 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38523 return -EINVAL;
38524 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38525 + if (con2fb.framebuffer >= FB_MAX)
38526 return -EINVAL;
38527 if (!registered_fb[con2fb.framebuffer])
38528 request_module("fb%d", con2fb.framebuffer);
38529 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
38530 index 5a5d092..265c5ed 100644
38531 --- a/drivers/video/geode/gx1fb_core.c
38532 +++ b/drivers/video/geode/gx1fb_core.c
38533 @@ -29,7 +29,7 @@ static int crt_option = 1;
38534 static char panel_option[32] = "";
38535
38536 /* Modes relevant to the GX1 (taken from modedb.c) */
38537 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
38538 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
38539 /* 640x480-60 VESA */
38540 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
38541 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
38542 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
38543 index 896e53d..4d87d0b 100644
38544 --- a/drivers/video/gxt4500.c
38545 +++ b/drivers/video/gxt4500.c
38546 @@ -156,7 +156,7 @@ struct gxt4500_par {
38547 static char *mode_option;
38548
38549 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
38550 -static const struct fb_videomode defaultmode __devinitdata = {
38551 +static const struct fb_videomode defaultmode __devinitconst = {
38552 .refresh = 60,
38553 .xres = 1280,
38554 .yres = 1024,
38555 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
38556 return 0;
38557 }
38558
38559 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
38560 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
38561 .id = "IBM GXT4500P",
38562 .type = FB_TYPE_PACKED_PIXELS,
38563 .visual = FB_VISUAL_PSEUDOCOLOR,
38564 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
38565 index 7672d2e..b56437f 100644
38566 --- a/drivers/video/i810/i810_accel.c
38567 +++ b/drivers/video/i810/i810_accel.c
38568 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
38569 }
38570 }
38571 printk("ringbuffer lockup!!!\n");
38572 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38573 i810_report_error(mmio);
38574 par->dev_flags |= LOCKUP;
38575 info->pixmap.scan_align = 1;
38576 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
38577 index 318f6fb..9a389c1 100644
38578 --- a/drivers/video/i810/i810_main.c
38579 +++ b/drivers/video/i810/i810_main.c
38580 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
38581 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
38582
38583 /* PCI */
38584 -static const char *i810_pci_list[] __devinitdata = {
38585 +static const char *i810_pci_list[] __devinitconst = {
38586 "Intel(R) 810 Framebuffer Device" ,
38587 "Intel(R) 810-DC100 Framebuffer Device" ,
38588 "Intel(R) 810E Framebuffer Device" ,
38589 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
38590 index de36693..3c63fc2 100644
38591 --- a/drivers/video/jz4740_fb.c
38592 +++ b/drivers/video/jz4740_fb.c
38593 @@ -136,7 +136,7 @@ struct jzfb {
38594 uint32_t pseudo_palette[16];
38595 };
38596
38597 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
38598 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
38599 .id = "JZ4740 FB",
38600 .type = FB_TYPE_PACKED_PIXELS,
38601 .visual = FB_VISUAL_TRUECOLOR,
38602 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
38603 index 3c14e43..eafa544 100644
38604 --- a/drivers/video/logo/logo_linux_clut224.ppm
38605 +++ b/drivers/video/logo/logo_linux_clut224.ppm
38606 @@ -1,1604 +1,1123 @@
38607 P3
38608 -# Standard 224-color Linux logo
38609 80 80
38610 255
38611 - 0 0 0 0 0 0 0 0 0 0 0 0
38612 - 0 0 0 0 0 0 0 0 0 0 0 0
38613 - 0 0 0 0 0 0 0 0 0 0 0 0
38614 - 0 0 0 0 0 0 0 0 0 0 0 0
38615 - 0 0 0 0 0 0 0 0 0 0 0 0
38616 - 0 0 0 0 0 0 0 0 0 0 0 0
38617 - 0 0 0 0 0 0 0 0 0 0 0 0
38618 - 0 0 0 0 0 0 0 0 0 0 0 0
38619 - 0 0 0 0 0 0 0 0 0 0 0 0
38620 - 6 6 6 6 6 6 10 10 10 10 10 10
38621 - 10 10 10 6 6 6 6 6 6 6 6 6
38622 - 0 0 0 0 0 0 0 0 0 0 0 0
38623 - 0 0 0 0 0 0 0 0 0 0 0 0
38624 - 0 0 0 0 0 0 0 0 0 0 0 0
38625 - 0 0 0 0 0 0 0 0 0 0 0 0
38626 - 0 0 0 0 0 0 0 0 0 0 0 0
38627 - 0 0 0 0 0 0 0 0 0 0 0 0
38628 - 0 0 0 0 0 0 0 0 0 0 0 0
38629 - 0 0 0 0 0 0 0 0 0 0 0 0
38630 - 0 0 0 0 0 0 0 0 0 0 0 0
38631 - 0 0 0 0 0 0 0 0 0 0 0 0
38632 - 0 0 0 0 0 0 0 0 0 0 0 0
38633 - 0 0 0 0 0 0 0 0 0 0 0 0
38634 - 0 0 0 0 0 0 0 0 0 0 0 0
38635 - 0 0 0 0 0 0 0 0 0 0 0 0
38636 - 0 0 0 0 0 0 0 0 0 0 0 0
38637 - 0 0 0 0 0 0 0 0 0 0 0 0
38638 - 0 0 0 0 0 0 0 0 0 0 0 0
38639 - 0 0 0 6 6 6 10 10 10 14 14 14
38640 - 22 22 22 26 26 26 30 30 30 34 34 34
38641 - 30 30 30 30 30 30 26 26 26 18 18 18
38642 - 14 14 14 10 10 10 6 6 6 0 0 0
38643 - 0 0 0 0 0 0 0 0 0 0 0 0
38644 - 0 0 0 0 0 0 0 0 0 0 0 0
38645 - 0 0 0 0 0 0 0 0 0 0 0 0
38646 - 0 0 0 0 0 0 0 0 0 0 0 0
38647 - 0 0 0 0 0 0 0 0 0 0 0 0
38648 - 0 0 0 0 0 0 0 0 0 0 0 0
38649 - 0 0 0 0 0 0 0 0 0 0 0 0
38650 - 0 0 0 0 0 0 0 0 0 0 0 0
38651 - 0 0 0 0 0 0 0 0 0 0 0 0
38652 - 0 0 0 0 0 1 0 0 1 0 0 0
38653 - 0 0 0 0 0 0 0 0 0 0 0 0
38654 - 0 0 0 0 0 0 0 0 0 0 0 0
38655 - 0 0 0 0 0 0 0 0 0 0 0 0
38656 - 0 0 0 0 0 0 0 0 0 0 0 0
38657 - 0 0 0 0 0 0 0 0 0 0 0 0
38658 - 0 0 0 0 0 0 0 0 0 0 0 0
38659 - 6 6 6 14 14 14 26 26 26 42 42 42
38660 - 54 54 54 66 66 66 78 78 78 78 78 78
38661 - 78 78 78 74 74 74 66 66 66 54 54 54
38662 - 42 42 42 26 26 26 18 18 18 10 10 10
38663 - 6 6 6 0 0 0 0 0 0 0 0 0
38664 - 0 0 0 0 0 0 0 0 0 0 0 0
38665 - 0 0 0 0 0 0 0 0 0 0 0 0
38666 - 0 0 0 0 0 0 0 0 0 0 0 0
38667 - 0 0 0 0 0 0 0 0 0 0 0 0
38668 - 0 0 0 0 0 0 0 0 0 0 0 0
38669 - 0 0 0 0 0 0 0 0 0 0 0 0
38670 - 0 0 0 0 0 0 0 0 0 0 0 0
38671 - 0 0 0 0 0 0 0 0 0 0 0 0
38672 - 0 0 1 0 0 0 0 0 0 0 0 0
38673 - 0 0 0 0 0 0 0 0 0 0 0 0
38674 - 0 0 0 0 0 0 0 0 0 0 0 0
38675 - 0 0 0 0 0 0 0 0 0 0 0 0
38676 - 0 0 0 0 0 0 0 0 0 0 0 0
38677 - 0 0 0 0 0 0 0 0 0 0 0 0
38678 - 0 0 0 0 0 0 0 0 0 10 10 10
38679 - 22 22 22 42 42 42 66 66 66 86 86 86
38680 - 66 66 66 38 38 38 38 38 38 22 22 22
38681 - 26 26 26 34 34 34 54 54 54 66 66 66
38682 - 86 86 86 70 70 70 46 46 46 26 26 26
38683 - 14 14 14 6 6 6 0 0 0 0 0 0
38684 - 0 0 0 0 0 0 0 0 0 0 0 0
38685 - 0 0 0 0 0 0 0 0 0 0 0 0
38686 - 0 0 0 0 0 0 0 0 0 0 0 0
38687 - 0 0 0 0 0 0 0 0 0 0 0 0
38688 - 0 0 0 0 0 0 0 0 0 0 0 0
38689 - 0 0 0 0 0 0 0 0 0 0 0 0
38690 - 0 0 0 0 0 0 0 0 0 0 0 0
38691 - 0 0 0 0 0 0 0 0 0 0 0 0
38692 - 0 0 1 0 0 1 0 0 1 0 0 0
38693 - 0 0 0 0 0 0 0 0 0 0 0 0
38694 - 0 0 0 0 0 0 0 0 0 0 0 0
38695 - 0 0 0 0 0 0 0 0 0 0 0 0
38696 - 0 0 0 0 0 0 0 0 0 0 0 0
38697 - 0 0 0 0 0 0 0 0 0 0 0 0
38698 - 0 0 0 0 0 0 10 10 10 26 26 26
38699 - 50 50 50 82 82 82 58 58 58 6 6 6
38700 - 2 2 6 2 2 6 2 2 6 2 2 6
38701 - 2 2 6 2 2 6 2 2 6 2 2 6
38702 - 6 6 6 54 54 54 86 86 86 66 66 66
38703 - 38 38 38 18 18 18 6 6 6 0 0 0
38704 - 0 0 0 0 0 0 0 0 0 0 0 0
38705 - 0 0 0 0 0 0 0 0 0 0 0 0
38706 - 0 0 0 0 0 0 0 0 0 0 0 0
38707 - 0 0 0 0 0 0 0 0 0 0 0 0
38708 - 0 0 0 0 0 0 0 0 0 0 0 0
38709 - 0 0 0 0 0 0 0 0 0 0 0 0
38710 - 0 0 0 0 0 0 0 0 0 0 0 0
38711 - 0 0 0 0 0 0 0 0 0 0 0 0
38712 - 0 0 0 0 0 0 0 0 0 0 0 0
38713 - 0 0 0 0 0 0 0 0 0 0 0 0
38714 - 0 0 0 0 0 0 0 0 0 0 0 0
38715 - 0 0 0 0 0 0 0 0 0 0 0 0
38716 - 0 0 0 0 0 0 0 0 0 0 0 0
38717 - 0 0 0 0 0 0 0 0 0 0 0 0
38718 - 0 0 0 6 6 6 22 22 22 50 50 50
38719 - 78 78 78 34 34 34 2 2 6 2 2 6
38720 - 2 2 6 2 2 6 2 2 6 2 2 6
38721 - 2 2 6 2 2 6 2 2 6 2 2 6
38722 - 2 2 6 2 2 6 6 6 6 70 70 70
38723 - 78 78 78 46 46 46 22 22 22 6 6 6
38724 - 0 0 0 0 0 0 0 0 0 0 0 0
38725 - 0 0 0 0 0 0 0 0 0 0 0 0
38726 - 0 0 0 0 0 0 0 0 0 0 0 0
38727 - 0 0 0 0 0 0 0 0 0 0 0 0
38728 - 0 0 0 0 0 0 0 0 0 0 0 0
38729 - 0 0 0 0 0 0 0 0 0 0 0 0
38730 - 0 0 0 0 0 0 0 0 0 0 0 0
38731 - 0 0 0 0 0 0 0 0 0 0 0 0
38732 - 0 0 1 0 0 1 0 0 1 0 0 0
38733 - 0 0 0 0 0 0 0 0 0 0 0 0
38734 - 0 0 0 0 0 0 0 0 0 0 0 0
38735 - 0 0 0 0 0 0 0 0 0 0 0 0
38736 - 0 0 0 0 0 0 0 0 0 0 0 0
38737 - 0 0 0 0 0 0 0 0 0 0 0 0
38738 - 6 6 6 18 18 18 42 42 42 82 82 82
38739 - 26 26 26 2 2 6 2 2 6 2 2 6
38740 - 2 2 6 2 2 6 2 2 6 2 2 6
38741 - 2 2 6 2 2 6 2 2 6 14 14 14
38742 - 46 46 46 34 34 34 6 6 6 2 2 6
38743 - 42 42 42 78 78 78 42 42 42 18 18 18
38744 - 6 6 6 0 0 0 0 0 0 0 0 0
38745 - 0 0 0 0 0 0 0 0 0 0 0 0
38746 - 0 0 0 0 0 0 0 0 0 0 0 0
38747 - 0 0 0 0 0 0 0 0 0 0 0 0
38748 - 0 0 0 0 0 0 0 0 0 0 0 0
38749 - 0 0 0 0 0 0 0 0 0 0 0 0
38750 - 0 0 0 0 0 0 0 0 0 0 0 0
38751 - 0 0 0 0 0 0 0 0 0 0 0 0
38752 - 0 0 1 0 0 0 0 0 1 0 0 0
38753 - 0 0 0 0 0 0 0 0 0 0 0 0
38754 - 0 0 0 0 0 0 0 0 0 0 0 0
38755 - 0 0 0 0 0 0 0 0 0 0 0 0
38756 - 0 0 0 0 0 0 0 0 0 0 0 0
38757 - 0 0 0 0 0 0 0 0 0 0 0 0
38758 - 10 10 10 30 30 30 66 66 66 58 58 58
38759 - 2 2 6 2 2 6 2 2 6 2 2 6
38760 - 2 2 6 2 2 6 2 2 6 2 2 6
38761 - 2 2 6 2 2 6 2 2 6 26 26 26
38762 - 86 86 86 101 101 101 46 46 46 10 10 10
38763 - 2 2 6 58 58 58 70 70 70 34 34 34
38764 - 10 10 10 0 0 0 0 0 0 0 0 0
38765 - 0 0 0 0 0 0 0 0 0 0 0 0
38766 - 0 0 0 0 0 0 0 0 0 0 0 0
38767 - 0 0 0 0 0 0 0 0 0 0 0 0
38768 - 0 0 0 0 0 0 0 0 0 0 0 0
38769 - 0 0 0 0 0 0 0 0 0 0 0 0
38770 - 0 0 0 0 0 0 0 0 0 0 0 0
38771 - 0 0 0 0 0 0 0 0 0 0 0 0
38772 - 0 0 1 0 0 1 0 0 1 0 0 0
38773 - 0 0 0 0 0 0 0 0 0 0 0 0
38774 - 0 0 0 0 0 0 0 0 0 0 0 0
38775 - 0 0 0 0 0 0 0 0 0 0 0 0
38776 - 0 0 0 0 0 0 0 0 0 0 0 0
38777 - 0 0 0 0 0 0 0 0 0 0 0 0
38778 - 14 14 14 42 42 42 86 86 86 10 10 10
38779 - 2 2 6 2 2 6 2 2 6 2 2 6
38780 - 2 2 6 2 2 6 2 2 6 2 2 6
38781 - 2 2 6 2 2 6 2 2 6 30 30 30
38782 - 94 94 94 94 94 94 58 58 58 26 26 26
38783 - 2 2 6 6 6 6 78 78 78 54 54 54
38784 - 22 22 22 6 6 6 0 0 0 0 0 0
38785 - 0 0 0 0 0 0 0 0 0 0 0 0
38786 - 0 0 0 0 0 0 0 0 0 0 0 0
38787 - 0 0 0 0 0 0 0 0 0 0 0 0
38788 - 0 0 0 0 0 0 0 0 0 0 0 0
38789 - 0 0 0 0 0 0 0 0 0 0 0 0
38790 - 0 0 0 0 0 0 0 0 0 0 0 0
38791 - 0 0 0 0 0 0 0 0 0 0 0 0
38792 - 0 0 0 0 0 0 0 0 0 0 0 0
38793 - 0 0 0 0 0 0 0 0 0 0 0 0
38794 - 0 0 0 0 0 0 0 0 0 0 0 0
38795 - 0 0 0 0 0 0 0 0 0 0 0 0
38796 - 0 0 0 0 0 0 0 0 0 0 0 0
38797 - 0 0 0 0 0 0 0 0 0 6 6 6
38798 - 22 22 22 62 62 62 62 62 62 2 2 6
38799 - 2 2 6 2 2 6 2 2 6 2 2 6
38800 - 2 2 6 2 2 6 2 2 6 2 2 6
38801 - 2 2 6 2 2 6 2 2 6 26 26 26
38802 - 54 54 54 38 38 38 18 18 18 10 10 10
38803 - 2 2 6 2 2 6 34 34 34 82 82 82
38804 - 38 38 38 14 14 14 0 0 0 0 0 0
38805 - 0 0 0 0 0 0 0 0 0 0 0 0
38806 - 0 0 0 0 0 0 0 0 0 0 0 0
38807 - 0 0 0 0 0 0 0 0 0 0 0 0
38808 - 0 0 0 0 0 0 0 0 0 0 0 0
38809 - 0 0 0 0 0 0 0 0 0 0 0 0
38810 - 0 0 0 0 0 0 0 0 0 0 0 0
38811 - 0 0 0 0 0 0 0 0 0 0 0 0
38812 - 0 0 0 0 0 1 0 0 1 0 0 0
38813 - 0 0 0 0 0 0 0 0 0 0 0 0
38814 - 0 0 0 0 0 0 0 0 0 0 0 0
38815 - 0 0 0 0 0 0 0 0 0 0 0 0
38816 - 0 0 0 0 0 0 0 0 0 0 0 0
38817 - 0 0 0 0 0 0 0 0 0 6 6 6
38818 - 30 30 30 78 78 78 30 30 30 2 2 6
38819 - 2 2 6 2 2 6 2 2 6 2 2 6
38820 - 2 2 6 2 2 6 2 2 6 2 2 6
38821 - 2 2 6 2 2 6 2 2 6 10 10 10
38822 - 10 10 10 2 2 6 2 2 6 2 2 6
38823 - 2 2 6 2 2 6 2 2 6 78 78 78
38824 - 50 50 50 18 18 18 6 6 6 0 0 0
38825 - 0 0 0 0 0 0 0 0 0 0 0 0
38826 - 0 0 0 0 0 0 0 0 0 0 0 0
38827 - 0 0 0 0 0 0 0 0 0 0 0 0
38828 - 0 0 0 0 0 0 0 0 0 0 0 0
38829 - 0 0 0 0 0 0 0 0 0 0 0 0
38830 - 0 0 0 0 0 0 0 0 0 0 0 0
38831 - 0 0 0 0 0 0 0 0 0 0 0 0
38832 - 0 0 1 0 0 0 0 0 0 0 0 0
38833 - 0 0 0 0 0 0 0 0 0 0 0 0
38834 - 0 0 0 0 0 0 0 0 0 0 0 0
38835 - 0 0 0 0 0 0 0 0 0 0 0 0
38836 - 0 0 0 0 0 0 0 0 0 0 0 0
38837 - 0 0 0 0 0 0 0 0 0 10 10 10
38838 - 38 38 38 86 86 86 14 14 14 2 2 6
38839 - 2 2 6 2 2 6 2 2 6 2 2 6
38840 - 2 2 6 2 2 6 2 2 6 2 2 6
38841 - 2 2 6 2 2 6 2 2 6 2 2 6
38842 - 2 2 6 2 2 6 2 2 6 2 2 6
38843 - 2 2 6 2 2 6 2 2 6 54 54 54
38844 - 66 66 66 26 26 26 6 6 6 0 0 0
38845 - 0 0 0 0 0 0 0 0 0 0 0 0
38846 - 0 0 0 0 0 0 0 0 0 0 0 0
38847 - 0 0 0 0 0 0 0 0 0 0 0 0
38848 - 0 0 0 0 0 0 0 0 0 0 0 0
38849 - 0 0 0 0 0 0 0 0 0 0 0 0
38850 - 0 0 0 0 0 0 0 0 0 0 0 0
38851 - 0 0 0 0 0 0 0 0 0 0 0 0
38852 - 0 0 0 0 0 1 0 0 1 0 0 0
38853 - 0 0 0 0 0 0 0 0 0 0 0 0
38854 - 0 0 0 0 0 0 0 0 0 0 0 0
38855 - 0 0 0 0 0 0 0 0 0 0 0 0
38856 - 0 0 0 0 0 0 0 0 0 0 0 0
38857 - 0 0 0 0 0 0 0 0 0 14 14 14
38858 - 42 42 42 82 82 82 2 2 6 2 2 6
38859 - 2 2 6 6 6 6 10 10 10 2 2 6
38860 - 2 2 6 2 2 6 2 2 6 2 2 6
38861 - 2 2 6 2 2 6 2 2 6 6 6 6
38862 - 14 14 14 10 10 10 2 2 6 2 2 6
38863 - 2 2 6 2 2 6 2 2 6 18 18 18
38864 - 82 82 82 34 34 34 10 10 10 0 0 0
38865 - 0 0 0 0 0 0 0 0 0 0 0 0
38866 - 0 0 0 0 0 0 0 0 0 0 0 0
38867 - 0 0 0 0 0 0 0 0 0 0 0 0
38868 - 0 0 0 0 0 0 0 0 0 0 0 0
38869 - 0 0 0 0 0 0 0 0 0 0 0 0
38870 - 0 0 0 0 0 0 0 0 0 0 0 0
38871 - 0 0 0 0 0 0 0 0 0 0 0 0
38872 - 0 0 1 0 0 0 0 0 0 0 0 0
38873 - 0 0 0 0 0 0 0 0 0 0 0 0
38874 - 0 0 0 0 0 0 0 0 0 0 0 0
38875 - 0 0 0 0 0 0 0 0 0 0 0 0
38876 - 0 0 0 0 0 0 0 0 0 0 0 0
38877 - 0 0 0 0 0 0 0 0 0 14 14 14
38878 - 46 46 46 86 86 86 2 2 6 2 2 6
38879 - 6 6 6 6 6 6 22 22 22 34 34 34
38880 - 6 6 6 2 2 6 2 2 6 2 2 6
38881 - 2 2 6 2 2 6 18 18 18 34 34 34
38882 - 10 10 10 50 50 50 22 22 22 2 2 6
38883 - 2 2 6 2 2 6 2 2 6 10 10 10
38884 - 86 86 86 42 42 42 14 14 14 0 0 0
38885 - 0 0 0 0 0 0 0 0 0 0 0 0
38886 - 0 0 0 0 0 0 0 0 0 0 0 0
38887 - 0 0 0 0 0 0 0 0 0 0 0 0
38888 - 0 0 0 0 0 0 0 0 0 0 0 0
38889 - 0 0 0 0 0 0 0 0 0 0 0 0
38890 - 0 0 0 0 0 0 0 0 0 0 0 0
38891 - 0 0 0 0 0 0 0 0 0 0 0 0
38892 - 0 0 1 0 0 1 0 0 1 0 0 0
38893 - 0 0 0 0 0 0 0 0 0 0 0 0
38894 - 0 0 0 0 0 0 0 0 0 0 0 0
38895 - 0 0 0 0 0 0 0 0 0 0 0 0
38896 - 0 0 0 0 0 0 0 0 0 0 0 0
38897 - 0 0 0 0 0 0 0 0 0 14 14 14
38898 - 46 46 46 86 86 86 2 2 6 2 2 6
38899 - 38 38 38 116 116 116 94 94 94 22 22 22
38900 - 22 22 22 2 2 6 2 2 6 2 2 6
38901 - 14 14 14 86 86 86 138 138 138 162 162 162
38902 -154 154 154 38 38 38 26 26 26 6 6 6
38903 - 2 2 6 2 2 6 2 2 6 2 2 6
38904 - 86 86 86 46 46 46 14 14 14 0 0 0
38905 - 0 0 0 0 0 0 0 0 0 0 0 0
38906 - 0 0 0 0 0 0 0 0 0 0 0 0
38907 - 0 0 0 0 0 0 0 0 0 0 0 0
38908 - 0 0 0 0 0 0 0 0 0 0 0 0
38909 - 0 0 0 0 0 0 0 0 0 0 0 0
38910 - 0 0 0 0 0 0 0 0 0 0 0 0
38911 - 0 0 0 0 0 0 0 0 0 0 0 0
38912 - 0 0 0 0 0 0 0 0 0 0 0 0
38913 - 0 0 0 0 0 0 0 0 0 0 0 0
38914 - 0 0 0 0 0 0 0 0 0 0 0 0
38915 - 0 0 0 0 0 0 0 0 0 0 0 0
38916 - 0 0 0 0 0 0 0 0 0 0 0 0
38917 - 0 0 0 0 0 0 0 0 0 14 14 14
38918 - 46 46 46 86 86 86 2 2 6 14 14 14
38919 -134 134 134 198 198 198 195 195 195 116 116 116
38920 - 10 10 10 2 2 6 2 2 6 6 6 6
38921 -101 98 89 187 187 187 210 210 210 218 218 218
38922 -214 214 214 134 134 134 14 14 14 6 6 6
38923 - 2 2 6 2 2 6 2 2 6 2 2 6
38924 - 86 86 86 50 50 50 18 18 18 6 6 6
38925 - 0 0 0 0 0 0 0 0 0 0 0 0
38926 - 0 0 0 0 0 0 0 0 0 0 0 0
38927 - 0 0 0 0 0 0 0 0 0 0 0 0
38928 - 0 0 0 0 0 0 0 0 0 0 0 0
38929 - 0 0 0 0 0 0 0 0 0 0 0 0
38930 - 0 0 0 0 0 0 0 0 0 0 0 0
38931 - 0 0 0 0 0 0 0 0 1 0 0 0
38932 - 0 0 1 0 0 1 0 0 1 0 0 0
38933 - 0 0 0 0 0 0 0 0 0 0 0 0
38934 - 0 0 0 0 0 0 0 0 0 0 0 0
38935 - 0 0 0 0 0 0 0 0 0 0 0 0
38936 - 0 0 0 0 0 0 0 0 0 0 0 0
38937 - 0 0 0 0 0 0 0 0 0 14 14 14
38938 - 46 46 46 86 86 86 2 2 6 54 54 54
38939 -218 218 218 195 195 195 226 226 226 246 246 246
38940 - 58 58 58 2 2 6 2 2 6 30 30 30
38941 -210 210 210 253 253 253 174 174 174 123 123 123
38942 -221 221 221 234 234 234 74 74 74 2 2 6
38943 - 2 2 6 2 2 6 2 2 6 2 2 6
38944 - 70 70 70 58 58 58 22 22 22 6 6 6
38945 - 0 0 0 0 0 0 0 0 0 0 0 0
38946 - 0 0 0 0 0 0 0 0 0 0 0 0
38947 - 0 0 0 0 0 0 0 0 0 0 0 0
38948 - 0 0 0 0 0 0 0 0 0 0 0 0
38949 - 0 0 0 0 0 0 0 0 0 0 0 0
38950 - 0 0 0 0 0 0 0 0 0 0 0 0
38951 - 0 0 0 0 0 0 0 0 0 0 0 0
38952 - 0 0 0 0 0 0 0 0 0 0 0 0
38953 - 0 0 0 0 0 0 0 0 0 0 0 0
38954 - 0 0 0 0 0 0 0 0 0 0 0 0
38955 - 0 0 0 0 0 0 0 0 0 0 0 0
38956 - 0 0 0 0 0 0 0 0 0 0 0 0
38957 - 0 0 0 0 0 0 0 0 0 14 14 14
38958 - 46 46 46 82 82 82 2 2 6 106 106 106
38959 -170 170 170 26 26 26 86 86 86 226 226 226
38960 -123 123 123 10 10 10 14 14 14 46 46 46
38961 -231 231 231 190 190 190 6 6 6 70 70 70
38962 - 90 90 90 238 238 238 158 158 158 2 2 6
38963 - 2 2 6 2 2 6 2 2 6 2 2 6
38964 - 70 70 70 58 58 58 22 22 22 6 6 6
38965 - 0 0 0 0 0 0 0 0 0 0 0 0
38966 - 0 0 0 0 0 0 0 0 0 0 0 0
38967 - 0 0 0 0 0 0 0 0 0 0 0 0
38968 - 0 0 0 0 0 0 0 0 0 0 0 0
38969 - 0 0 0 0 0 0 0 0 0 0 0 0
38970 - 0 0 0 0 0 0 0 0 0 0 0 0
38971 - 0 0 0 0 0 0 0 0 1 0 0 0
38972 - 0 0 1 0 0 1 0 0 1 0 0 0
38973 - 0 0 0 0 0 0 0 0 0 0 0 0
38974 - 0 0 0 0 0 0 0 0 0 0 0 0
38975 - 0 0 0 0 0 0 0 0 0 0 0 0
38976 - 0 0 0 0 0 0 0 0 0 0 0 0
38977 - 0 0 0 0 0 0 0 0 0 14 14 14
38978 - 42 42 42 86 86 86 6 6 6 116 116 116
38979 -106 106 106 6 6 6 70 70 70 149 149 149
38980 -128 128 128 18 18 18 38 38 38 54 54 54
38981 -221 221 221 106 106 106 2 2 6 14 14 14
38982 - 46 46 46 190 190 190 198 198 198 2 2 6
38983 - 2 2 6 2 2 6 2 2 6 2 2 6
38984 - 74 74 74 62 62 62 22 22 22 6 6 6
38985 - 0 0 0 0 0 0 0 0 0 0 0 0
38986 - 0 0 0 0 0 0 0 0 0 0 0 0
38987 - 0 0 0 0 0 0 0 0 0 0 0 0
38988 - 0 0 0 0 0 0 0 0 0 0 0 0
38989 - 0 0 0 0 0 0 0 0 0 0 0 0
38990 - 0 0 0 0 0 0 0 0 0 0 0 0
38991 - 0 0 0 0 0 0 0 0 1 0 0 0
38992 - 0 0 1 0 0 0 0 0 1 0 0 0
38993 - 0 0 0 0 0 0 0 0 0 0 0 0
38994 - 0 0 0 0 0 0 0 0 0 0 0 0
38995 - 0 0 0 0 0 0 0 0 0 0 0 0
38996 - 0 0 0 0 0 0 0 0 0 0 0 0
38997 - 0 0 0 0 0 0 0 0 0 14 14 14
38998 - 42 42 42 94 94 94 14 14 14 101 101 101
38999 -128 128 128 2 2 6 18 18 18 116 116 116
39000 -118 98 46 121 92 8 121 92 8 98 78 10
39001 -162 162 162 106 106 106 2 2 6 2 2 6
39002 - 2 2 6 195 195 195 195 195 195 6 6 6
39003 - 2 2 6 2 2 6 2 2 6 2 2 6
39004 - 74 74 74 62 62 62 22 22 22 6 6 6
39005 - 0 0 0 0 0 0 0 0 0 0 0 0
39006 - 0 0 0 0 0 0 0 0 0 0 0 0
39007 - 0 0 0 0 0 0 0 0 0 0 0 0
39008 - 0 0 0 0 0 0 0 0 0 0 0 0
39009 - 0 0 0 0 0 0 0 0 0 0 0 0
39010 - 0 0 0 0 0 0 0 0 0 0 0 0
39011 - 0 0 0 0 0 0 0 0 1 0 0 1
39012 - 0 0 1 0 0 0 0 0 1 0 0 0
39013 - 0 0 0 0 0 0 0 0 0 0 0 0
39014 - 0 0 0 0 0 0 0 0 0 0 0 0
39015 - 0 0 0 0 0 0 0 0 0 0 0 0
39016 - 0 0 0 0 0 0 0 0 0 0 0 0
39017 - 0 0 0 0 0 0 0 0 0 10 10 10
39018 - 38 38 38 90 90 90 14 14 14 58 58 58
39019 -210 210 210 26 26 26 54 38 6 154 114 10
39020 -226 170 11 236 186 11 225 175 15 184 144 12
39021 -215 174 15 175 146 61 37 26 9 2 2 6
39022 - 70 70 70 246 246 246 138 138 138 2 2 6
39023 - 2 2 6 2 2 6 2 2 6 2 2 6
39024 - 70 70 70 66 66 66 26 26 26 6 6 6
39025 - 0 0 0 0 0 0 0 0 0 0 0 0
39026 - 0 0 0 0 0 0 0 0 0 0 0 0
39027 - 0 0 0 0 0 0 0 0 0 0 0 0
39028 - 0 0 0 0 0 0 0 0 0 0 0 0
39029 - 0 0 0 0 0 0 0 0 0 0 0 0
39030 - 0 0 0 0 0 0 0 0 0 0 0 0
39031 - 0 0 0 0 0 0 0 0 0 0 0 0
39032 - 0 0 0 0 0 0 0 0 0 0 0 0
39033 - 0 0 0 0 0 0 0 0 0 0 0 0
39034 - 0 0 0 0 0 0 0 0 0 0 0 0
39035 - 0 0 0 0 0 0 0 0 0 0 0 0
39036 - 0 0 0 0 0 0 0 0 0 0 0 0
39037 - 0 0 0 0 0 0 0 0 0 10 10 10
39038 - 38 38 38 86 86 86 14 14 14 10 10 10
39039 -195 195 195 188 164 115 192 133 9 225 175 15
39040 -239 182 13 234 190 10 232 195 16 232 200 30
39041 -245 207 45 241 208 19 232 195 16 184 144 12
39042 -218 194 134 211 206 186 42 42 42 2 2 6
39043 - 2 2 6 2 2 6 2 2 6 2 2 6
39044 - 50 50 50 74 74 74 30 30 30 6 6 6
39045 - 0 0 0 0 0 0 0 0 0 0 0 0
39046 - 0 0 0 0 0 0 0 0 0 0 0 0
39047 - 0 0 0 0 0 0 0 0 0 0 0 0
39048 - 0 0 0 0 0 0 0 0 0 0 0 0
39049 - 0 0 0 0 0 0 0 0 0 0 0 0
39050 - 0 0 0 0 0 0 0 0 0 0 0 0
39051 - 0 0 0 0 0 0 0 0 0 0 0 0
39052 - 0 0 0 0 0 0 0 0 0 0 0 0
39053 - 0 0 0 0 0 0 0 0 0 0 0 0
39054 - 0 0 0 0 0 0 0 0 0 0 0 0
39055 - 0 0 0 0 0 0 0 0 0 0 0 0
39056 - 0 0 0 0 0 0 0 0 0 0 0 0
39057 - 0 0 0 0 0 0 0 0 0 10 10 10
39058 - 34 34 34 86 86 86 14 14 14 2 2 6
39059 -121 87 25 192 133 9 219 162 10 239 182 13
39060 -236 186 11 232 195 16 241 208 19 244 214 54
39061 -246 218 60 246 218 38 246 215 20 241 208 19
39062 -241 208 19 226 184 13 121 87 25 2 2 6
39063 - 2 2 6 2 2 6 2 2 6 2 2 6
39064 - 50 50 50 82 82 82 34 34 34 10 10 10
39065 - 0 0 0 0 0 0 0 0 0 0 0 0
39066 - 0 0 0 0 0 0 0 0 0 0 0 0
39067 - 0 0 0 0 0 0 0 0 0 0 0 0
39068 - 0 0 0 0 0 0 0 0 0 0 0 0
39069 - 0 0 0 0 0 0 0 0 0 0 0 0
39070 - 0 0 0 0 0 0 0 0 0 0 0 0
39071 - 0 0 0 0 0 0 0 0 0 0 0 0
39072 - 0 0 0 0 0 0 0 0 0 0 0 0
39073 - 0 0 0 0 0 0 0 0 0 0 0 0
39074 - 0 0 0 0 0 0 0 0 0 0 0 0
39075 - 0 0 0 0 0 0 0 0 0 0 0 0
39076 - 0 0 0 0 0 0 0 0 0 0 0 0
39077 - 0 0 0 0 0 0 0 0 0 10 10 10
39078 - 34 34 34 82 82 82 30 30 30 61 42 6
39079 -180 123 7 206 145 10 230 174 11 239 182 13
39080 -234 190 10 238 202 15 241 208 19 246 218 74
39081 -246 218 38 246 215 20 246 215 20 246 215 20
39082 -226 184 13 215 174 15 184 144 12 6 6 6
39083 - 2 2 6 2 2 6 2 2 6 2 2 6
39084 - 26 26 26 94 94 94 42 42 42 14 14 14
39085 - 0 0 0 0 0 0 0 0 0 0 0 0
39086 - 0 0 0 0 0 0 0 0 0 0 0 0
39087 - 0 0 0 0 0 0 0 0 0 0 0 0
39088 - 0 0 0 0 0 0 0 0 0 0 0 0
39089 - 0 0 0 0 0 0 0 0 0 0 0 0
39090 - 0 0 0 0 0 0 0 0 0 0 0 0
39091 - 0 0 0 0 0 0 0 0 0 0 0 0
39092 - 0 0 0 0 0 0 0 0 0 0 0 0
39093 - 0 0 0 0 0 0 0 0 0 0 0 0
39094 - 0 0 0 0 0 0 0 0 0 0 0 0
39095 - 0 0 0 0 0 0 0 0 0 0 0 0
39096 - 0 0 0 0 0 0 0 0 0 0 0 0
39097 - 0 0 0 0 0 0 0 0 0 10 10 10
39098 - 30 30 30 78 78 78 50 50 50 104 69 6
39099 -192 133 9 216 158 10 236 178 12 236 186 11
39100 -232 195 16 241 208 19 244 214 54 245 215 43
39101 -246 215 20 246 215 20 241 208 19 198 155 10
39102 -200 144 11 216 158 10 156 118 10 2 2 6
39103 - 2 2 6 2 2 6 2 2 6 2 2 6
39104 - 6 6 6 90 90 90 54 54 54 18 18 18
39105 - 6 6 6 0 0 0 0 0 0 0 0 0
39106 - 0 0 0 0 0 0 0 0 0 0 0 0
39107 - 0 0 0 0 0 0 0 0 0 0 0 0
39108 - 0 0 0 0 0 0 0 0 0 0 0 0
39109 - 0 0 0 0 0 0 0 0 0 0 0 0
39110 - 0 0 0 0 0 0 0 0 0 0 0 0
39111 - 0 0 0 0 0 0 0 0 0 0 0 0
39112 - 0 0 0 0 0 0 0 0 0 0 0 0
39113 - 0 0 0 0 0 0 0 0 0 0 0 0
39114 - 0 0 0 0 0 0 0 0 0 0 0 0
39115 - 0 0 0 0 0 0 0 0 0 0 0 0
39116 - 0 0 0 0 0 0 0 0 0 0 0 0
39117 - 0 0 0 0 0 0 0 0 0 10 10 10
39118 - 30 30 30 78 78 78 46 46 46 22 22 22
39119 -137 92 6 210 162 10 239 182 13 238 190 10
39120 -238 202 15 241 208 19 246 215 20 246 215 20
39121 -241 208 19 203 166 17 185 133 11 210 150 10
39122 -216 158 10 210 150 10 102 78 10 2 2 6
39123 - 6 6 6 54 54 54 14 14 14 2 2 6
39124 - 2 2 6 62 62 62 74 74 74 30 30 30
39125 - 10 10 10 0 0 0 0 0 0 0 0 0
39126 - 0 0 0 0 0 0 0 0 0 0 0 0
39127 - 0 0 0 0 0 0 0 0 0 0 0 0
39128 - 0 0 0 0 0 0 0 0 0 0 0 0
39129 - 0 0 0 0 0 0 0 0 0 0 0 0
39130 - 0 0 0 0 0 0 0 0 0 0 0 0
39131 - 0 0 0 0 0 0 0 0 0 0 0 0
39132 - 0 0 0 0 0 0 0 0 0 0 0 0
39133 - 0 0 0 0 0 0 0 0 0 0 0 0
39134 - 0 0 0 0 0 0 0 0 0 0 0 0
39135 - 0 0 0 0 0 0 0 0 0 0 0 0
39136 - 0 0 0 0 0 0 0 0 0 0 0 0
39137 - 0 0 0 0 0 0 0 0 0 10 10 10
39138 - 34 34 34 78 78 78 50 50 50 6 6 6
39139 - 94 70 30 139 102 15 190 146 13 226 184 13
39140 -232 200 30 232 195 16 215 174 15 190 146 13
39141 -168 122 10 192 133 9 210 150 10 213 154 11
39142 -202 150 34 182 157 106 101 98 89 2 2 6
39143 - 2 2 6 78 78 78 116 116 116 58 58 58
39144 - 2 2 6 22 22 22 90 90 90 46 46 46
39145 - 18 18 18 6 6 6 0 0 0 0 0 0
39146 - 0 0 0 0 0 0 0 0 0 0 0 0
39147 - 0 0 0 0 0 0 0 0 0 0 0 0
39148 - 0 0 0 0 0 0 0 0 0 0 0 0
39149 - 0 0 0 0 0 0 0 0 0 0 0 0
39150 - 0 0 0 0 0 0 0 0 0 0 0 0
39151 - 0 0 0 0 0 0 0 0 0 0 0 0
39152 - 0 0 0 0 0 0 0 0 0 0 0 0
39153 - 0 0 0 0 0 0 0 0 0 0 0 0
39154 - 0 0 0 0 0 0 0 0 0 0 0 0
39155 - 0 0 0 0 0 0 0 0 0 0 0 0
39156 - 0 0 0 0 0 0 0 0 0 0 0 0
39157 - 0 0 0 0 0 0 0 0 0 10 10 10
39158 - 38 38 38 86 86 86 50 50 50 6 6 6
39159 -128 128 128 174 154 114 156 107 11 168 122 10
39160 -198 155 10 184 144 12 197 138 11 200 144 11
39161 -206 145 10 206 145 10 197 138 11 188 164 115
39162 -195 195 195 198 198 198 174 174 174 14 14 14
39163 - 2 2 6 22 22 22 116 116 116 116 116 116
39164 - 22 22 22 2 2 6 74 74 74 70 70 70
39165 - 30 30 30 10 10 10 0 0 0 0 0 0
39166 - 0 0 0 0 0 0 0 0 0 0 0 0
39167 - 0 0 0 0 0 0 0 0 0 0 0 0
39168 - 0 0 0 0 0 0 0 0 0 0 0 0
39169 - 0 0 0 0 0 0 0 0 0 0 0 0
39170 - 0 0 0 0 0 0 0 0 0 0 0 0
39171 - 0 0 0 0 0 0 0 0 0 0 0 0
39172 - 0 0 0 0 0 0 0 0 0 0 0 0
39173 - 0 0 0 0 0 0 0 0 0 0 0 0
39174 - 0 0 0 0 0 0 0 0 0 0 0 0
39175 - 0 0 0 0 0 0 0 0 0 0 0 0
39176 - 0 0 0 0 0 0 0 0 0 0 0 0
39177 - 0 0 0 0 0 0 6 6 6 18 18 18
39178 - 50 50 50 101 101 101 26 26 26 10 10 10
39179 -138 138 138 190 190 190 174 154 114 156 107 11
39180 -197 138 11 200 144 11 197 138 11 192 133 9
39181 -180 123 7 190 142 34 190 178 144 187 187 187
39182 -202 202 202 221 221 221 214 214 214 66 66 66
39183 - 2 2 6 2 2 6 50 50 50 62 62 62
39184 - 6 6 6 2 2 6 10 10 10 90 90 90
39185 - 50 50 50 18 18 18 6 6 6 0 0 0
39186 - 0 0 0 0 0 0 0 0 0 0 0 0
39187 - 0 0 0 0 0 0 0 0 0 0 0 0
39188 - 0 0 0 0 0 0 0 0 0 0 0 0
39189 - 0 0 0 0 0 0 0 0 0 0 0 0
39190 - 0 0 0 0 0 0 0 0 0 0 0 0
39191 - 0 0 0 0 0 0 0 0 0 0 0 0
39192 - 0 0 0 0 0 0 0 0 0 0 0 0
39193 - 0 0 0 0 0 0 0 0 0 0 0 0
39194 - 0 0 0 0 0 0 0 0 0 0 0 0
39195 - 0 0 0 0 0 0 0 0 0 0 0 0
39196 - 0 0 0 0 0 0 0 0 0 0 0 0
39197 - 0 0 0 0 0 0 10 10 10 34 34 34
39198 - 74 74 74 74 74 74 2 2 6 6 6 6
39199 -144 144 144 198 198 198 190 190 190 178 166 146
39200 -154 121 60 156 107 11 156 107 11 168 124 44
39201 -174 154 114 187 187 187 190 190 190 210 210 210
39202 -246 246 246 253 253 253 253 253 253 182 182 182
39203 - 6 6 6 2 2 6 2 2 6 2 2 6
39204 - 2 2 6 2 2 6 2 2 6 62 62 62
39205 - 74 74 74 34 34 34 14 14 14 0 0 0
39206 - 0 0 0 0 0 0 0 0 0 0 0 0
39207 - 0 0 0 0 0 0 0 0 0 0 0 0
39208 - 0 0 0 0 0 0 0 0 0 0 0 0
39209 - 0 0 0 0 0 0 0 0 0 0 0 0
39210 - 0 0 0 0 0 0 0 0 0 0 0 0
39211 - 0 0 0 0 0 0 0 0 0 0 0 0
39212 - 0 0 0 0 0 0 0 0 0 0 0 0
39213 - 0 0 0 0 0 0 0 0 0 0 0 0
39214 - 0 0 0 0 0 0 0 0 0 0 0 0
39215 - 0 0 0 0 0 0 0 0 0 0 0 0
39216 - 0 0 0 0 0 0 0 0 0 0 0 0
39217 - 0 0 0 10 10 10 22 22 22 54 54 54
39218 - 94 94 94 18 18 18 2 2 6 46 46 46
39219 -234 234 234 221 221 221 190 190 190 190 190 190
39220 -190 190 190 187 187 187 187 187 187 190 190 190
39221 -190 190 190 195 195 195 214 214 214 242 242 242
39222 -253 253 253 253 253 253 253 253 253 253 253 253
39223 - 82 82 82 2 2 6 2 2 6 2 2 6
39224 - 2 2 6 2 2 6 2 2 6 14 14 14
39225 - 86 86 86 54 54 54 22 22 22 6 6 6
39226 - 0 0 0 0 0 0 0 0 0 0 0 0
39227 - 0 0 0 0 0 0 0 0 0 0 0 0
39228 - 0 0 0 0 0 0 0 0 0 0 0 0
39229 - 0 0 0 0 0 0 0 0 0 0 0 0
39230 - 0 0 0 0 0 0 0 0 0 0 0 0
39231 - 0 0 0 0 0 0 0 0 0 0 0 0
39232 - 0 0 0 0 0 0 0 0 0 0 0 0
39233 - 0 0 0 0 0 0 0 0 0 0 0 0
39234 - 0 0 0 0 0 0 0 0 0 0 0 0
39235 - 0 0 0 0 0 0 0 0 0 0 0 0
39236 - 0 0 0 0 0 0 0 0 0 0 0 0
39237 - 6 6 6 18 18 18 46 46 46 90 90 90
39238 - 46 46 46 18 18 18 6 6 6 182 182 182
39239 -253 253 253 246 246 246 206 206 206 190 190 190
39240 -190 190 190 190 190 190 190 190 190 190 190 190
39241 -206 206 206 231 231 231 250 250 250 253 253 253
39242 -253 253 253 253 253 253 253 253 253 253 253 253
39243 -202 202 202 14 14 14 2 2 6 2 2 6
39244 - 2 2 6 2 2 6 2 2 6 2 2 6
39245 - 42 42 42 86 86 86 42 42 42 18 18 18
39246 - 6 6 6 0 0 0 0 0 0 0 0 0
39247 - 0 0 0 0 0 0 0 0 0 0 0 0
39248 - 0 0 0 0 0 0 0 0 0 0 0 0
39249 - 0 0 0 0 0 0 0 0 0 0 0 0
39250 - 0 0 0 0 0 0 0 0 0 0 0 0
39251 - 0 0 0 0 0 0 0 0 0 0 0 0
39252 - 0 0 0 0 0 0 0 0 0 0 0 0
39253 - 0 0 0 0 0 0 0 0 0 0 0 0
39254 - 0 0 0 0 0 0 0 0 0 0 0 0
39255 - 0 0 0 0 0 0 0 0 0 0 0 0
39256 - 0 0 0 0 0 0 0 0 0 6 6 6
39257 - 14 14 14 38 38 38 74 74 74 66 66 66
39258 - 2 2 6 6 6 6 90 90 90 250 250 250
39259 -253 253 253 253 253 253 238 238 238 198 198 198
39260 -190 190 190 190 190 190 195 195 195 221 221 221
39261 -246 246 246 253 253 253 253 253 253 253 253 253
39262 -253 253 253 253 253 253 253 253 253 253 253 253
39263 -253 253 253 82 82 82 2 2 6 2 2 6
39264 - 2 2 6 2 2 6 2 2 6 2 2 6
39265 - 2 2 6 78 78 78 70 70 70 34 34 34
39266 - 14 14 14 6 6 6 0 0 0 0 0 0
39267 - 0 0 0 0 0 0 0 0 0 0 0 0
39268 - 0 0 0 0 0 0 0 0 0 0 0 0
39269 - 0 0 0 0 0 0 0 0 0 0 0 0
39270 - 0 0 0 0 0 0 0 0 0 0 0 0
39271 - 0 0 0 0 0 0 0 0 0 0 0 0
39272 - 0 0 0 0 0 0 0 0 0 0 0 0
39273 - 0 0 0 0 0 0 0 0 0 0 0 0
39274 - 0 0 0 0 0 0 0 0 0 0 0 0
39275 - 0 0 0 0 0 0 0 0 0 0 0 0
39276 - 0 0 0 0 0 0 0 0 0 14 14 14
39277 - 34 34 34 66 66 66 78 78 78 6 6 6
39278 - 2 2 6 18 18 18 218 218 218 253 253 253
39279 -253 253 253 253 253 253 253 253 253 246 246 246
39280 -226 226 226 231 231 231 246 246 246 253 253 253
39281 -253 253 253 253 253 253 253 253 253 253 253 253
39282 -253 253 253 253 253 253 253 253 253 253 253 253
39283 -253 253 253 178 178 178 2 2 6 2 2 6
39284 - 2 2 6 2 2 6 2 2 6 2 2 6
39285 - 2 2 6 18 18 18 90 90 90 62 62 62
39286 - 30 30 30 10 10 10 0 0 0 0 0 0
39287 - 0 0 0 0 0 0 0 0 0 0 0 0
39288 - 0 0 0 0 0 0 0 0 0 0 0 0
39289 - 0 0 0 0 0 0 0 0 0 0 0 0
39290 - 0 0 0 0 0 0 0 0 0 0 0 0
39291 - 0 0 0 0 0 0 0 0 0 0 0 0
39292 - 0 0 0 0 0 0 0 0 0 0 0 0
39293 - 0 0 0 0 0 0 0 0 0 0 0 0
39294 - 0 0 0 0 0 0 0 0 0 0 0 0
39295 - 0 0 0 0 0 0 0 0 0 0 0 0
39296 - 0 0 0 0 0 0 10 10 10 26 26 26
39297 - 58 58 58 90 90 90 18 18 18 2 2 6
39298 - 2 2 6 110 110 110 253 253 253 253 253 253
39299 -253 253 253 253 253 253 253 253 253 253 253 253
39300 -250 250 250 253 253 253 253 253 253 253 253 253
39301 -253 253 253 253 253 253 253 253 253 253 253 253
39302 -253 253 253 253 253 253 253 253 253 253 253 253
39303 -253 253 253 231 231 231 18 18 18 2 2 6
39304 - 2 2 6 2 2 6 2 2 6 2 2 6
39305 - 2 2 6 2 2 6 18 18 18 94 94 94
39306 - 54 54 54 26 26 26 10 10 10 0 0 0
39307 - 0 0 0 0 0 0 0 0 0 0 0 0
39308 - 0 0 0 0 0 0 0 0 0 0 0 0
39309 - 0 0 0 0 0 0 0 0 0 0 0 0
39310 - 0 0 0 0 0 0 0 0 0 0 0 0
39311 - 0 0 0 0 0 0 0 0 0 0 0 0
39312 - 0 0 0 0 0 0 0 0 0 0 0 0
39313 - 0 0 0 0 0 0 0 0 0 0 0 0
39314 - 0 0 0 0 0 0 0 0 0 0 0 0
39315 - 0 0 0 0 0 0 0 0 0 0 0 0
39316 - 0 0 0 6 6 6 22 22 22 50 50 50
39317 - 90 90 90 26 26 26 2 2 6 2 2 6
39318 - 14 14 14 195 195 195 250 250 250 253 253 253
39319 -253 253 253 253 253 253 253 253 253 253 253 253
39320 -253 253 253 253 253 253 253 253 253 253 253 253
39321 -253 253 253 253 253 253 253 253 253 253 253 253
39322 -253 253 253 253 253 253 253 253 253 253 253 253
39323 -250 250 250 242 242 242 54 54 54 2 2 6
39324 - 2 2 6 2 2 6 2 2 6 2 2 6
39325 - 2 2 6 2 2 6 2 2 6 38 38 38
39326 - 86 86 86 50 50 50 22 22 22 6 6 6
39327 - 0 0 0 0 0 0 0 0 0 0 0 0
39328 - 0 0 0 0 0 0 0 0 0 0 0 0
39329 - 0 0 0 0 0 0 0 0 0 0 0 0
39330 - 0 0 0 0 0 0 0 0 0 0 0 0
39331 - 0 0 0 0 0 0 0 0 0 0 0 0
39332 - 0 0 0 0 0 0 0 0 0 0 0 0
39333 - 0 0 0 0 0 0 0 0 0 0 0 0
39334 - 0 0 0 0 0 0 0 0 0 0 0 0
39335 - 0 0 0 0 0 0 0 0 0 0 0 0
39336 - 6 6 6 14 14 14 38 38 38 82 82 82
39337 - 34 34 34 2 2 6 2 2 6 2 2 6
39338 - 42 42 42 195 195 195 246 246 246 253 253 253
39339 -253 253 253 253 253 253 253 253 253 250 250 250
39340 -242 242 242 242 242 242 250 250 250 253 253 253
39341 -253 253 253 253 253 253 253 253 253 253 253 253
39342 -253 253 253 250 250 250 246 246 246 238 238 238
39343 -226 226 226 231 231 231 101 101 101 6 6 6
39344 - 2 2 6 2 2 6 2 2 6 2 2 6
39345 - 2 2 6 2 2 6 2 2 6 2 2 6
39346 - 38 38 38 82 82 82 42 42 42 14 14 14
39347 - 6 6 6 0 0 0 0 0 0 0 0 0
39348 - 0 0 0 0 0 0 0 0 0 0 0 0
39349 - 0 0 0 0 0 0 0 0 0 0 0 0
39350 - 0 0 0 0 0 0 0 0 0 0 0 0
39351 - 0 0 0 0 0 0 0 0 0 0 0 0
39352 - 0 0 0 0 0 0 0 0 0 0 0 0
39353 - 0 0 0 0 0 0 0 0 0 0 0 0
39354 - 0 0 0 0 0 0 0 0 0 0 0 0
39355 - 0 0 0 0 0 0 0 0 0 0 0 0
39356 - 10 10 10 26 26 26 62 62 62 66 66 66
39357 - 2 2 6 2 2 6 2 2 6 6 6 6
39358 - 70 70 70 170 170 170 206 206 206 234 234 234
39359 -246 246 246 250 250 250 250 250 250 238 238 238
39360 -226 226 226 231 231 231 238 238 238 250 250 250
39361 -250 250 250 250 250 250 246 246 246 231 231 231
39362 -214 214 214 206 206 206 202 202 202 202 202 202
39363 -198 198 198 202 202 202 182 182 182 18 18 18
39364 - 2 2 6 2 2 6 2 2 6 2 2 6
39365 - 2 2 6 2 2 6 2 2 6 2 2 6
39366 - 2 2 6 62 62 62 66 66 66 30 30 30
39367 - 10 10 10 0 0 0 0 0 0 0 0 0
39368 - 0 0 0 0 0 0 0 0 0 0 0 0
39369 - 0 0 0 0 0 0 0 0 0 0 0 0
39370 - 0 0 0 0 0 0 0 0 0 0 0 0
39371 - 0 0 0 0 0 0 0 0 0 0 0 0
39372 - 0 0 0 0 0 0 0 0 0 0 0 0
39373 - 0 0 0 0 0 0 0 0 0 0 0 0
39374 - 0 0 0 0 0 0 0 0 0 0 0 0
39375 - 0 0 0 0 0 0 0 0 0 0 0 0
39376 - 14 14 14 42 42 42 82 82 82 18 18 18
39377 - 2 2 6 2 2 6 2 2 6 10 10 10
39378 - 94 94 94 182 182 182 218 218 218 242 242 242
39379 -250 250 250 253 253 253 253 253 253 250 250 250
39380 -234 234 234 253 253 253 253 253 253 253 253 253
39381 -253 253 253 253 253 253 253 253 253 246 246 246
39382 -238 238 238 226 226 226 210 210 210 202 202 202
39383 -195 195 195 195 195 195 210 210 210 158 158 158
39384 - 6 6 6 14 14 14 50 50 50 14 14 14
39385 - 2 2 6 2 2 6 2 2 6 2 2 6
39386 - 2 2 6 6 6 6 86 86 86 46 46 46
39387 - 18 18 18 6 6 6 0 0 0 0 0 0
39388 - 0 0 0 0 0 0 0 0 0 0 0 0
39389 - 0 0 0 0 0 0 0 0 0 0 0 0
39390 - 0 0 0 0 0 0 0 0 0 0 0 0
39391 - 0 0 0 0 0 0 0 0 0 0 0 0
39392 - 0 0 0 0 0 0 0 0 0 0 0 0
39393 - 0 0 0 0 0 0 0 0 0 0 0 0
39394 - 0 0 0 0 0 0 0 0 0 0 0 0
39395 - 0 0 0 0 0 0 0 0 0 6 6 6
39396 - 22 22 22 54 54 54 70 70 70 2 2 6
39397 - 2 2 6 10 10 10 2 2 6 22 22 22
39398 -166 166 166 231 231 231 250 250 250 253 253 253
39399 -253 253 253 253 253 253 253 253 253 250 250 250
39400 -242 242 242 253 253 253 253 253 253 253 253 253
39401 -253 253 253 253 253 253 253 253 253 253 253 253
39402 -253 253 253 253 253 253 253 253 253 246 246 246
39403 -231 231 231 206 206 206 198 198 198 226 226 226
39404 - 94 94 94 2 2 6 6 6 6 38 38 38
39405 - 30 30 30 2 2 6 2 2 6 2 2 6
39406 - 2 2 6 2 2 6 62 62 62 66 66 66
39407 - 26 26 26 10 10 10 0 0 0 0 0 0
39408 - 0 0 0 0 0 0 0 0 0 0 0 0
39409 - 0 0 0 0 0 0 0 0 0 0 0 0
39410 - 0 0 0 0 0 0 0 0 0 0 0 0
39411 - 0 0 0 0 0 0 0 0 0 0 0 0
39412 - 0 0 0 0 0 0 0 0 0 0 0 0
39413 - 0 0 0 0 0 0 0 0 0 0 0 0
39414 - 0 0 0 0 0 0 0 0 0 0 0 0
39415 - 0 0 0 0 0 0 0 0 0 10 10 10
39416 - 30 30 30 74 74 74 50 50 50 2 2 6
39417 - 26 26 26 26 26 26 2 2 6 106 106 106
39418 -238 238 238 253 253 253 253 253 253 253 253 253
39419 -253 253 253 253 253 253 253 253 253 253 253 253
39420 -253 253 253 253 253 253 253 253 253 253 253 253
39421 -253 253 253 253 253 253 253 253 253 253 253 253
39422 -253 253 253 253 253 253 253 253 253 253 253 253
39423 -253 253 253 246 246 246 218 218 218 202 202 202
39424 -210 210 210 14 14 14 2 2 6 2 2 6
39425 - 30 30 30 22 22 22 2 2 6 2 2 6
39426 - 2 2 6 2 2 6 18 18 18 86 86 86
39427 - 42 42 42 14 14 14 0 0 0 0 0 0
39428 - 0 0 0 0 0 0 0 0 0 0 0 0
39429 - 0 0 0 0 0 0 0 0 0 0 0 0
39430 - 0 0 0 0 0 0 0 0 0 0 0 0
39431 - 0 0 0 0 0 0 0 0 0 0 0 0
39432 - 0 0 0 0 0 0 0 0 0 0 0 0
39433 - 0 0 0 0 0 0 0 0 0 0 0 0
39434 - 0 0 0 0 0 0 0 0 0 0 0 0
39435 - 0 0 0 0 0 0 0 0 0 14 14 14
39436 - 42 42 42 90 90 90 22 22 22 2 2 6
39437 - 42 42 42 2 2 6 18 18 18 218 218 218
39438 -253 253 253 253 253 253 253 253 253 253 253 253
39439 -253 253 253 253 253 253 253 253 253 253 253 253
39440 -253 253 253 253 253 253 253 253 253 253 253 253
39441 -253 253 253 253 253 253 253 253 253 253 253 253
39442 -253 253 253 253 253 253 253 253 253 253 253 253
39443 -253 253 253 253 253 253 250 250 250 221 221 221
39444 -218 218 218 101 101 101 2 2 6 14 14 14
39445 - 18 18 18 38 38 38 10 10 10 2 2 6
39446 - 2 2 6 2 2 6 2 2 6 78 78 78
39447 - 58 58 58 22 22 22 6 6 6 0 0 0
39448 - 0 0 0 0 0 0 0 0 0 0 0 0
39449 - 0 0 0 0 0 0 0 0 0 0 0 0
39450 - 0 0 0 0 0 0 0 0 0 0 0 0
39451 - 0 0 0 0 0 0 0 0 0 0 0 0
39452 - 0 0 0 0 0 0 0 0 0 0 0 0
39453 - 0 0 0 0 0 0 0 0 0 0 0 0
39454 - 0 0 0 0 0 0 0 0 0 0 0 0
39455 - 0 0 0 0 0 0 6 6 6 18 18 18
39456 - 54 54 54 82 82 82 2 2 6 26 26 26
39457 - 22 22 22 2 2 6 123 123 123 253 253 253
39458 -253 253 253 253 253 253 253 253 253 253 253 253
39459 -253 253 253 253 253 253 253 253 253 253 253 253
39460 -253 253 253 253 253 253 253 253 253 253 253 253
39461 -253 253 253 253 253 253 253 253 253 253 253 253
39462 -253 253 253 253 253 253 253 253 253 253 253 253
39463 -253 253 253 253 253 253 253 253 253 250 250 250
39464 -238 238 238 198 198 198 6 6 6 38 38 38
39465 - 58 58 58 26 26 26 38 38 38 2 2 6
39466 - 2 2 6 2 2 6 2 2 6 46 46 46
39467 - 78 78 78 30 30 30 10 10 10 0 0 0
39468 - 0 0 0 0 0 0 0 0 0 0 0 0
39469 - 0 0 0 0 0 0 0 0 0 0 0 0
39470 - 0 0 0 0 0 0 0 0 0 0 0 0
39471 - 0 0 0 0 0 0 0 0 0 0 0 0
39472 - 0 0 0 0 0 0 0 0 0 0 0 0
39473 - 0 0 0 0 0 0 0 0 0 0 0 0
39474 - 0 0 0 0 0 0 0 0 0 0 0 0
39475 - 0 0 0 0 0 0 10 10 10 30 30 30
39476 - 74 74 74 58 58 58 2 2 6 42 42 42
39477 - 2 2 6 22 22 22 231 231 231 253 253 253
39478 -253 253 253 253 253 253 253 253 253 253 253 253
39479 -253 253 253 253 253 253 253 253 253 250 250 250
39480 -253 253 253 253 253 253 253 253 253 253 253 253
39481 -253 253 253 253 253 253 253 253 253 253 253 253
39482 -253 253 253 253 253 253 253 253 253 253 253 253
39483 -253 253 253 253 253 253 253 253 253 253 253 253
39484 -253 253 253 246 246 246 46 46 46 38 38 38
39485 - 42 42 42 14 14 14 38 38 38 14 14 14
39486 - 2 2 6 2 2 6 2 2 6 6 6 6
39487 - 86 86 86 46 46 46 14 14 14 0 0 0
39488 - 0 0 0 0 0 0 0 0 0 0 0 0
39489 - 0 0 0 0 0 0 0 0 0 0 0 0
39490 - 0 0 0 0 0 0 0 0 0 0 0 0
39491 - 0 0 0 0 0 0 0 0 0 0 0 0
39492 - 0 0 0 0 0 0 0 0 0 0 0 0
39493 - 0 0 0 0 0 0 0 0 0 0 0 0
39494 - 0 0 0 0 0 0 0 0 0 0 0 0
39495 - 0 0 0 6 6 6 14 14 14 42 42 42
39496 - 90 90 90 18 18 18 18 18 18 26 26 26
39497 - 2 2 6 116 116 116 253 253 253 253 253 253
39498 -253 253 253 253 253 253 253 253 253 253 253 253
39499 -253 253 253 253 253 253 250 250 250 238 238 238
39500 -253 253 253 253 253 253 253 253 253 253 253 253
39501 -253 253 253 253 253 253 253 253 253 253 253 253
39502 -253 253 253 253 253 253 253 253 253 253 253 253
39503 -253 253 253 253 253 253 253 253 253 253 253 253
39504 -253 253 253 253 253 253 94 94 94 6 6 6
39505 - 2 2 6 2 2 6 10 10 10 34 34 34
39506 - 2 2 6 2 2 6 2 2 6 2 2 6
39507 - 74 74 74 58 58 58 22 22 22 6 6 6
39508 - 0 0 0 0 0 0 0 0 0 0 0 0
39509 - 0 0 0 0 0 0 0 0 0 0 0 0
39510 - 0 0 0 0 0 0 0 0 0 0 0 0
39511 - 0 0 0 0 0 0 0 0 0 0 0 0
39512 - 0 0 0 0 0 0 0 0 0 0 0 0
39513 - 0 0 0 0 0 0 0 0 0 0 0 0
39514 - 0 0 0 0 0 0 0 0 0 0 0 0
39515 - 0 0 0 10 10 10 26 26 26 66 66 66
39516 - 82 82 82 2 2 6 38 38 38 6 6 6
39517 - 14 14 14 210 210 210 253 253 253 253 253 253
39518 -253 253 253 253 253 253 253 253 253 253 253 253
39519 -253 253 253 253 253 253 246 246 246 242 242 242
39520 -253 253 253 253 253 253 253 253 253 253 253 253
39521 -253 253 253 253 253 253 253 253 253 253 253 253
39522 -253 253 253 253 253 253 253 253 253 253 253 253
39523 -253 253 253 253 253 253 253 253 253 253 253 253
39524 -253 253 253 253 253 253 144 144 144 2 2 6
39525 - 2 2 6 2 2 6 2 2 6 46 46 46
39526 - 2 2 6 2 2 6 2 2 6 2 2 6
39527 - 42 42 42 74 74 74 30 30 30 10 10 10
39528 - 0 0 0 0 0 0 0 0 0 0 0 0
39529 - 0 0 0 0 0 0 0 0 0 0 0 0
39530 - 0 0 0 0 0 0 0 0 0 0 0 0
39531 - 0 0 0 0 0 0 0 0 0 0 0 0
39532 - 0 0 0 0 0 0 0 0 0 0 0 0
39533 - 0 0 0 0 0 0 0 0 0 0 0 0
39534 - 0 0 0 0 0 0 0 0 0 0 0 0
39535 - 6 6 6 14 14 14 42 42 42 90 90 90
39536 - 26 26 26 6 6 6 42 42 42 2 2 6
39537 - 74 74 74 250 250 250 253 253 253 253 253 253
39538 -253 253 253 253 253 253 253 253 253 253 253 253
39539 -253 253 253 253 253 253 242 242 242 242 242 242
39540 -253 253 253 253 253 253 253 253 253 253 253 253
39541 -253 253 253 253 253 253 253 253 253 253 253 253
39542 -253 253 253 253 253 253 253 253 253 253 253 253
39543 -253 253 253 253 253 253 253 253 253 253 253 253
39544 -253 253 253 253 253 253 182 182 182 2 2 6
39545 - 2 2 6 2 2 6 2 2 6 46 46 46
39546 - 2 2 6 2 2 6 2 2 6 2 2 6
39547 - 10 10 10 86 86 86 38 38 38 10 10 10
39548 - 0 0 0 0 0 0 0 0 0 0 0 0
39549 - 0 0 0 0 0 0 0 0 0 0 0 0
39550 - 0 0 0 0 0 0 0 0 0 0 0 0
39551 - 0 0 0 0 0 0 0 0 0 0 0 0
39552 - 0 0 0 0 0 0 0 0 0 0 0 0
39553 - 0 0 0 0 0 0 0 0 0 0 0 0
39554 - 0 0 0 0 0 0 0 0 0 0 0 0
39555 - 10 10 10 26 26 26 66 66 66 82 82 82
39556 - 2 2 6 22 22 22 18 18 18 2 2 6
39557 -149 149 149 253 253 253 253 253 253 253 253 253
39558 -253 253 253 253 253 253 253 253 253 253 253 253
39559 -253 253 253 253 253 253 234 234 234 242 242 242
39560 -253 253 253 253 253 253 253 253 253 253 253 253
39561 -253 253 253 253 253 253 253 253 253 253 253 253
39562 -253 253 253 253 253 253 253 253 253 253 253 253
39563 -253 253 253 253 253 253 253 253 253 253 253 253
39564 -253 253 253 253 253 253 206 206 206 2 2 6
39565 - 2 2 6 2 2 6 2 2 6 38 38 38
39566 - 2 2 6 2 2 6 2 2 6 2 2 6
39567 - 6 6 6 86 86 86 46 46 46 14 14 14
39568 - 0 0 0 0 0 0 0 0 0 0 0 0
39569 - 0 0 0 0 0 0 0 0 0 0 0 0
39570 - 0 0 0 0 0 0 0 0 0 0 0 0
39571 - 0 0 0 0 0 0 0 0 0 0 0 0
39572 - 0 0 0 0 0 0 0 0 0 0 0 0
39573 - 0 0 0 0 0 0 0 0 0 0 0 0
39574 - 0 0 0 0 0 0 0 0 0 6 6 6
39575 - 18 18 18 46 46 46 86 86 86 18 18 18
39576 - 2 2 6 34 34 34 10 10 10 6 6 6
39577 -210 210 210 253 253 253 253 253 253 253 253 253
39578 -253 253 253 253 253 253 253 253 253 253 253 253
39579 -253 253 253 253 253 253 234 234 234 242 242 242
39580 -253 253 253 253 253 253 253 253 253 253 253 253
39581 -253 253 253 253 253 253 253 253 253 253 253 253
39582 -253 253 253 253 253 253 253 253 253 253 253 253
39583 -253 253 253 253 253 253 253 253 253 253 253 253
39584 -253 253 253 253 253 253 221 221 221 6 6 6
39585 - 2 2 6 2 2 6 6 6 6 30 30 30
39586 - 2 2 6 2 2 6 2 2 6 2 2 6
39587 - 2 2 6 82 82 82 54 54 54 18 18 18
39588 - 6 6 6 0 0 0 0 0 0 0 0 0
39589 - 0 0 0 0 0 0 0 0 0 0 0 0
39590 - 0 0 0 0 0 0 0 0 0 0 0 0
39591 - 0 0 0 0 0 0 0 0 0 0 0 0
39592 - 0 0 0 0 0 0 0 0 0 0 0 0
39593 - 0 0 0 0 0 0 0 0 0 0 0 0
39594 - 0 0 0 0 0 0 0 0 0 10 10 10
39595 - 26 26 26 66 66 66 62 62 62 2 2 6
39596 - 2 2 6 38 38 38 10 10 10 26 26 26
39597 -238 238 238 253 253 253 253 253 253 253 253 253
39598 -253 253 253 253 253 253 253 253 253 253 253 253
39599 -253 253 253 253 253 253 231 231 231 238 238 238
39600 -253 253 253 253 253 253 253 253 253 253 253 253
39601 -253 253 253 253 253 253 253 253 253 253 253 253
39602 -253 253 253 253 253 253 253 253 253 253 253 253
39603 -253 253 253 253 253 253 253 253 253 253 253 253
39604 -253 253 253 253 253 253 231 231 231 6 6 6
39605 - 2 2 6 2 2 6 10 10 10 30 30 30
39606 - 2 2 6 2 2 6 2 2 6 2 2 6
39607 - 2 2 6 66 66 66 58 58 58 22 22 22
39608 - 6 6 6 0 0 0 0 0 0 0 0 0
39609 - 0 0 0 0 0 0 0 0 0 0 0 0
39610 - 0 0 0 0 0 0 0 0 0 0 0 0
39611 - 0 0 0 0 0 0 0 0 0 0 0 0
39612 - 0 0 0 0 0 0 0 0 0 0 0 0
39613 - 0 0 0 0 0 0 0 0 0 0 0 0
39614 - 0 0 0 0 0 0 0 0 0 10 10 10
39615 - 38 38 38 78 78 78 6 6 6 2 2 6
39616 - 2 2 6 46 46 46 14 14 14 42 42 42
39617 -246 246 246 253 253 253 253 253 253 253 253 253
39618 -253 253 253 253 253 253 253 253 253 253 253 253
39619 -253 253 253 253 253 253 231 231 231 242 242 242
39620 -253 253 253 253 253 253 253 253 253 253 253 253
39621 -253 253 253 253 253 253 253 253 253 253 253 253
39622 -253 253 253 253 253 253 253 253 253 253 253 253
39623 -253 253 253 253 253 253 253 253 253 253 253 253
39624 -253 253 253 253 253 253 234 234 234 10 10 10
39625 - 2 2 6 2 2 6 22 22 22 14 14 14
39626 - 2 2 6 2 2 6 2 2 6 2 2 6
39627 - 2 2 6 66 66 66 62 62 62 22 22 22
39628 - 6 6 6 0 0 0 0 0 0 0 0 0
39629 - 0 0 0 0 0 0 0 0 0 0 0 0
39630 - 0 0 0 0 0 0 0 0 0 0 0 0
39631 - 0 0 0 0 0 0 0 0 0 0 0 0
39632 - 0 0 0 0 0 0 0 0 0 0 0 0
39633 - 0 0 0 0 0 0 0 0 0 0 0 0
39634 - 0 0 0 0 0 0 6 6 6 18 18 18
39635 - 50 50 50 74 74 74 2 2 6 2 2 6
39636 - 14 14 14 70 70 70 34 34 34 62 62 62
39637 -250 250 250 253 253 253 253 253 253 253 253 253
39638 -253 253 253 253 253 253 253 253 253 253 253 253
39639 -253 253 253 253 253 253 231 231 231 246 246 246
39640 -253 253 253 253 253 253 253 253 253 253 253 253
39641 -253 253 253 253 253 253 253 253 253 253 253 253
39642 -253 253 253 253 253 253 253 253 253 253 253 253
39643 -253 253 253 253 253 253 253 253 253 253 253 253
39644 -253 253 253 253 253 253 234 234 234 14 14 14
39645 - 2 2 6 2 2 6 30 30 30 2 2 6
39646 - 2 2 6 2 2 6 2 2 6 2 2 6
39647 - 2 2 6 66 66 66 62 62 62 22 22 22
39648 - 6 6 6 0 0 0 0 0 0 0 0 0
39649 - 0 0 0 0 0 0 0 0 0 0 0 0
39650 - 0 0 0 0 0 0 0 0 0 0 0 0
39651 - 0 0 0 0 0 0 0 0 0 0 0 0
39652 - 0 0 0 0 0 0 0 0 0 0 0 0
39653 - 0 0 0 0 0 0 0 0 0 0 0 0
39654 - 0 0 0 0 0 0 6 6 6 18 18 18
39655 - 54 54 54 62 62 62 2 2 6 2 2 6
39656 - 2 2 6 30 30 30 46 46 46 70 70 70
39657 -250 250 250 253 253 253 253 253 253 253 253 253
39658 -253 253 253 253 253 253 253 253 253 253 253 253
39659 -253 253 253 253 253 253 231 231 231 246 246 246
39660 -253 253 253 253 253 253 253 253 253 253 253 253
39661 -253 253 253 253 253 253 253 253 253 253 253 253
39662 -253 253 253 253 253 253 253 253 253 253 253 253
39663 -253 253 253 253 253 253 253 253 253 253 253 253
39664 -253 253 253 253 253 253 226 226 226 10 10 10
39665 - 2 2 6 6 6 6 30 30 30 2 2 6
39666 - 2 2 6 2 2 6 2 2 6 2 2 6
39667 - 2 2 6 66 66 66 58 58 58 22 22 22
39668 - 6 6 6 0 0 0 0 0 0 0 0 0
39669 - 0 0 0 0 0 0 0 0 0 0 0 0
39670 - 0 0 0 0 0 0 0 0 0 0 0 0
39671 - 0 0 0 0 0 0 0 0 0 0 0 0
39672 - 0 0 0 0 0 0 0 0 0 0 0 0
39673 - 0 0 0 0 0 0 0 0 0 0 0 0
39674 - 0 0 0 0 0 0 6 6 6 22 22 22
39675 - 58 58 58 62 62 62 2 2 6 2 2 6
39676 - 2 2 6 2 2 6 30 30 30 78 78 78
39677 -250 250 250 253 253 253 253 253 253 253 253 253
39678 -253 253 253 253 253 253 253 253 253 253 253 253
39679 -253 253 253 253 253 253 231 231 231 246 246 246
39680 -253 253 253 253 253 253 253 253 253 253 253 253
39681 -253 253 253 253 253 253 253 253 253 253 253 253
39682 -253 253 253 253 253 253 253 253 253 253 253 253
39683 -253 253 253 253 253 253 253 253 253 253 253 253
39684 -253 253 253 253 253 253 206 206 206 2 2 6
39685 - 22 22 22 34 34 34 18 14 6 22 22 22
39686 - 26 26 26 18 18 18 6 6 6 2 2 6
39687 - 2 2 6 82 82 82 54 54 54 18 18 18
39688 - 6 6 6 0 0 0 0 0 0 0 0 0
39689 - 0 0 0 0 0 0 0 0 0 0 0 0
39690 - 0 0 0 0 0 0 0 0 0 0 0 0
39691 - 0 0 0 0 0 0 0 0 0 0 0 0
39692 - 0 0 0 0 0 0 0 0 0 0 0 0
39693 - 0 0 0 0 0 0 0 0 0 0 0 0
39694 - 0 0 0 0 0 0 6 6 6 26 26 26
39695 - 62 62 62 106 106 106 74 54 14 185 133 11
39696 -210 162 10 121 92 8 6 6 6 62 62 62
39697 -238 238 238 253 253 253 253 253 253 253 253 253
39698 -253 253 253 253 253 253 253 253 253 253 253 253
39699 -253 253 253 253 253 253 231 231 231 246 246 246
39700 -253 253 253 253 253 253 253 253 253 253 253 253
39701 -253 253 253 253 253 253 253 253 253 253 253 253
39702 -253 253 253 253 253 253 253 253 253 253 253 253
39703 -253 253 253 253 253 253 253 253 253 253 253 253
39704 -253 253 253 253 253 253 158 158 158 18 18 18
39705 - 14 14 14 2 2 6 2 2 6 2 2 6
39706 - 6 6 6 18 18 18 66 66 66 38 38 38
39707 - 6 6 6 94 94 94 50 50 50 18 18 18
39708 - 6 6 6 0 0 0 0 0 0 0 0 0
39709 - 0 0 0 0 0 0 0 0 0 0 0 0
39710 - 0 0 0 0 0 0 0 0 0 0 0 0
39711 - 0 0 0 0 0 0 0 0 0 0 0 0
39712 - 0 0 0 0 0 0 0 0 0 0 0 0
39713 - 0 0 0 0 0 0 0 0 0 6 6 6
39714 - 10 10 10 10 10 10 18 18 18 38 38 38
39715 - 78 78 78 142 134 106 216 158 10 242 186 14
39716 -246 190 14 246 190 14 156 118 10 10 10 10
39717 - 90 90 90 238 238 238 253 253 253 253 253 253
39718 -253 253 253 253 253 253 253 253 253 253 253 253
39719 -253 253 253 253 253 253 231 231 231 250 250 250
39720 -253 253 253 253 253 253 253 253 253 253 253 253
39721 -253 253 253 253 253 253 253 253 253 253 253 253
39722 -253 253 253 253 253 253 253 253 253 253 253 253
39723 -253 253 253 253 253 253 253 253 253 246 230 190
39724 -238 204 91 238 204 91 181 142 44 37 26 9
39725 - 2 2 6 2 2 6 2 2 6 2 2 6
39726 - 2 2 6 2 2 6 38 38 38 46 46 46
39727 - 26 26 26 106 106 106 54 54 54 18 18 18
39728 - 6 6 6 0 0 0 0 0 0 0 0 0
39729 - 0 0 0 0 0 0 0 0 0 0 0 0
39730 - 0 0 0 0 0 0 0 0 0 0 0 0
39731 - 0 0 0 0 0 0 0 0 0 0 0 0
39732 - 0 0 0 0 0 0 0 0 0 0 0 0
39733 - 0 0 0 6 6 6 14 14 14 22 22 22
39734 - 30 30 30 38 38 38 50 50 50 70 70 70
39735 -106 106 106 190 142 34 226 170 11 242 186 14
39736 -246 190 14 246 190 14 246 190 14 154 114 10
39737 - 6 6 6 74 74 74 226 226 226 253 253 253
39738 -253 253 253 253 253 253 253 253 253 253 253 253
39739 -253 253 253 253 253 253 231 231 231 250 250 250
39740 -253 253 253 253 253 253 253 253 253 253 253 253
39741 -253 253 253 253 253 253 253 253 253 253 253 253
39742 -253 253 253 253 253 253 253 253 253 253 253 253
39743 -253 253 253 253 253 253 253 253 253 228 184 62
39744 -241 196 14 241 208 19 232 195 16 38 30 10
39745 - 2 2 6 2 2 6 2 2 6 2 2 6
39746 - 2 2 6 6 6 6 30 30 30 26 26 26
39747 -203 166 17 154 142 90 66 66 66 26 26 26
39748 - 6 6 6 0 0 0 0 0 0 0 0 0
39749 - 0 0 0 0 0 0 0 0 0 0 0 0
39750 - 0 0 0 0 0 0 0 0 0 0 0 0
39751 - 0 0 0 0 0 0 0 0 0 0 0 0
39752 - 0 0 0 0 0 0 0 0 0 0 0 0
39753 - 6 6 6 18 18 18 38 38 38 58 58 58
39754 - 78 78 78 86 86 86 101 101 101 123 123 123
39755 -175 146 61 210 150 10 234 174 13 246 186 14
39756 -246 190 14 246 190 14 246 190 14 238 190 10
39757 -102 78 10 2 2 6 46 46 46 198 198 198
39758 -253 253 253 253 253 253 253 253 253 253 253 253
39759 -253 253 253 253 253 253 234 234 234 242 242 242
39760 -253 253 253 253 253 253 253 253 253 253 253 253
39761 -253 253 253 253 253 253 253 253 253 253 253 253
39762 -253 253 253 253 253 253 253 253 253 253 253 253
39763 -253 253 253 253 253 253 253 253 253 224 178 62
39764 -242 186 14 241 196 14 210 166 10 22 18 6
39765 - 2 2 6 2 2 6 2 2 6 2 2 6
39766 - 2 2 6 2 2 6 6 6 6 121 92 8
39767 -238 202 15 232 195 16 82 82 82 34 34 34
39768 - 10 10 10 0 0 0 0 0 0 0 0 0
39769 - 0 0 0 0 0 0 0 0 0 0 0 0
39770 - 0 0 0 0 0 0 0 0 0 0 0 0
39771 - 0 0 0 0 0 0 0 0 0 0 0 0
39772 - 0 0 0 0 0 0 0 0 0 0 0 0
39773 - 14 14 14 38 38 38 70 70 70 154 122 46
39774 -190 142 34 200 144 11 197 138 11 197 138 11
39775 -213 154 11 226 170 11 242 186 14 246 190 14
39776 -246 190 14 246 190 14 246 190 14 246 190 14
39777 -225 175 15 46 32 6 2 2 6 22 22 22
39778 -158 158 158 250 250 250 253 253 253 253 253 253
39779 -253 253 253 253 253 253 253 253 253 253 253 253
39780 -253 253 253 253 253 253 253 253 253 253 253 253
39781 -253 253 253 253 253 253 253 253 253 253 253 253
39782 -253 253 253 253 253 253 253 253 253 253 253 253
39783 -253 253 253 250 250 250 242 242 242 224 178 62
39784 -239 182 13 236 186 11 213 154 11 46 32 6
39785 - 2 2 6 2 2 6 2 2 6 2 2 6
39786 - 2 2 6 2 2 6 61 42 6 225 175 15
39787 -238 190 10 236 186 11 112 100 78 42 42 42
39788 - 14 14 14 0 0 0 0 0 0 0 0 0
39789 - 0 0 0 0 0 0 0 0 0 0 0 0
39790 - 0 0 0 0 0 0 0 0 0 0 0 0
39791 - 0 0 0 0 0 0 0 0 0 0 0 0
39792 - 0 0 0 0 0 0 0 0 0 6 6 6
39793 - 22 22 22 54 54 54 154 122 46 213 154 11
39794 -226 170 11 230 174 11 226 170 11 226 170 11
39795 -236 178 12 242 186 14 246 190 14 246 190 14
39796 -246 190 14 246 190 14 246 190 14 246 190 14
39797 -241 196 14 184 144 12 10 10 10 2 2 6
39798 - 6 6 6 116 116 116 242 242 242 253 253 253
39799 -253 253 253 253 253 253 253 253 253 253 253 253
39800 -253 253 253 253 253 253 253 253 253 253 253 253
39801 -253 253 253 253 253 253 253 253 253 253 253 253
39802 -253 253 253 253 253 253 253 253 253 253 253 253
39803 -253 253 253 231 231 231 198 198 198 214 170 54
39804 -236 178 12 236 178 12 210 150 10 137 92 6
39805 - 18 14 6 2 2 6 2 2 6 2 2 6
39806 - 6 6 6 70 47 6 200 144 11 236 178 12
39807 -239 182 13 239 182 13 124 112 88 58 58 58
39808 - 22 22 22 6 6 6 0 0 0 0 0 0
39809 - 0 0 0 0 0 0 0 0 0 0 0 0
39810 - 0 0 0 0 0 0 0 0 0 0 0 0
39811 - 0 0 0 0 0 0 0 0 0 0 0 0
39812 - 0 0 0 0 0 0 0 0 0 10 10 10
39813 - 30 30 30 70 70 70 180 133 36 226 170 11
39814 -239 182 13 242 186 14 242 186 14 246 186 14
39815 -246 190 14 246 190 14 246 190 14 246 190 14
39816 -246 190 14 246 190 14 246 190 14 246 190 14
39817 -246 190 14 232 195 16 98 70 6 2 2 6
39818 - 2 2 6 2 2 6 66 66 66 221 221 221
39819 -253 253 253 253 253 253 253 253 253 253 253 253
39820 -253 253 253 253 253 253 253 253 253 253 253 253
39821 -253 253 253 253 253 253 253 253 253 253 253 253
39822 -253 253 253 253 253 253 253 253 253 253 253 253
39823 -253 253 253 206 206 206 198 198 198 214 166 58
39824 -230 174 11 230 174 11 216 158 10 192 133 9
39825 -163 110 8 116 81 8 102 78 10 116 81 8
39826 -167 114 7 197 138 11 226 170 11 239 182 13
39827 -242 186 14 242 186 14 162 146 94 78 78 78
39828 - 34 34 34 14 14 14 6 6 6 0 0 0
39829 - 0 0 0 0 0 0 0 0 0 0 0 0
39830 - 0 0 0 0 0 0 0 0 0 0 0 0
39831 - 0 0 0 0 0 0 0 0 0 0 0 0
39832 - 0 0 0 0 0 0 0 0 0 6 6 6
39833 - 30 30 30 78 78 78 190 142 34 226 170 11
39834 -239 182 13 246 190 14 246 190 14 246 190 14
39835 -246 190 14 246 190 14 246 190 14 246 190 14
39836 -246 190 14 246 190 14 246 190 14 246 190 14
39837 -246 190 14 241 196 14 203 166 17 22 18 6
39838 - 2 2 6 2 2 6 2 2 6 38 38 38
39839 -218 218 218 253 253 253 253 253 253 253 253 253
39840 -253 253 253 253 253 253 253 253 253 253 253 253
39841 -253 253 253 253 253 253 253 253 253 253 253 253
39842 -253 253 253 253 253 253 253 253 253 253 253 253
39843 -250 250 250 206 206 206 198 198 198 202 162 69
39844 -226 170 11 236 178 12 224 166 10 210 150 10
39845 -200 144 11 197 138 11 192 133 9 197 138 11
39846 -210 150 10 226 170 11 242 186 14 246 190 14
39847 -246 190 14 246 186 14 225 175 15 124 112 88
39848 - 62 62 62 30 30 30 14 14 14 6 6 6
39849 - 0 0 0 0 0 0 0 0 0 0 0 0
39850 - 0 0 0 0 0 0 0 0 0 0 0 0
39851 - 0 0 0 0 0 0 0 0 0 0 0 0
39852 - 0 0 0 0 0 0 0 0 0 10 10 10
39853 - 30 30 30 78 78 78 174 135 50 224 166 10
39854 -239 182 13 246 190 14 246 190 14 246 190 14
39855 -246 190 14 246 190 14 246 190 14 246 190 14
39856 -246 190 14 246 190 14 246 190 14 246 190 14
39857 -246 190 14 246 190 14 241 196 14 139 102 15
39858 - 2 2 6 2 2 6 2 2 6 2 2 6
39859 - 78 78 78 250 250 250 253 253 253 253 253 253
39860 -253 253 253 253 253 253 253 253 253 253 253 253
39861 -253 253 253 253 253 253 253 253 253 253 253 253
39862 -253 253 253 253 253 253 253 253 253 253 253 253
39863 -250 250 250 214 214 214 198 198 198 190 150 46
39864 -219 162 10 236 178 12 234 174 13 224 166 10
39865 -216 158 10 213 154 11 213 154 11 216 158 10
39866 -226 170 11 239 182 13 246 190 14 246 190 14
39867 -246 190 14 246 190 14 242 186 14 206 162 42
39868 -101 101 101 58 58 58 30 30 30 14 14 14
39869 - 6 6 6 0 0 0 0 0 0 0 0 0
39870 - 0 0 0 0 0 0 0 0 0 0 0 0
39871 - 0 0 0 0 0 0 0 0 0 0 0 0
39872 - 0 0 0 0 0 0 0 0 0 10 10 10
39873 - 30 30 30 74 74 74 174 135 50 216 158 10
39874 -236 178 12 246 190 14 246 190 14 246 190 14
39875 -246 190 14 246 190 14 246 190 14 246 190 14
39876 -246 190 14 246 190 14 246 190 14 246 190 14
39877 -246 190 14 246 190 14 241 196 14 226 184 13
39878 - 61 42 6 2 2 6 2 2 6 2 2 6
39879 - 22 22 22 238 238 238 253 253 253 253 253 253
39880 -253 253 253 253 253 253 253 253 253 253 253 253
39881 -253 253 253 253 253 253 253 253 253 253 253 253
39882 -253 253 253 253 253 253 253 253 253 253 253 253
39883 -253 253 253 226 226 226 187 187 187 180 133 36
39884 -216 158 10 236 178 12 239 182 13 236 178 12
39885 -230 174 11 226 170 11 226 170 11 230 174 11
39886 -236 178 12 242 186 14 246 190 14 246 190 14
39887 -246 190 14 246 190 14 246 186 14 239 182 13
39888 -206 162 42 106 106 106 66 66 66 34 34 34
39889 - 14 14 14 6 6 6 0 0 0 0 0 0
39890 - 0 0 0 0 0 0 0 0 0 0 0 0
39891 - 0 0 0 0 0 0 0 0 0 0 0 0
39892 - 0 0 0 0 0 0 0 0 0 6 6 6
39893 - 26 26 26 70 70 70 163 133 67 213 154 11
39894 -236 178 12 246 190 14 246 190 14 246 190 14
39895 -246 190 14 246 190 14 246 190 14 246 190 14
39896 -246 190 14 246 190 14 246 190 14 246 190 14
39897 -246 190 14 246 190 14 246 190 14 241 196 14
39898 -190 146 13 18 14 6 2 2 6 2 2 6
39899 - 46 46 46 246 246 246 253 253 253 253 253 253
39900 -253 253 253 253 253 253 253 253 253 253 253 253
39901 -253 253 253 253 253 253 253 253 253 253 253 253
39902 -253 253 253 253 253 253 253 253 253 253 253 253
39903 -253 253 253 221 221 221 86 86 86 156 107 11
39904 -216 158 10 236 178 12 242 186 14 246 186 14
39905 -242 186 14 239 182 13 239 182 13 242 186 14
39906 -242 186 14 246 186 14 246 190 14 246 190 14
39907 -246 190 14 246 190 14 246 190 14 246 190 14
39908 -242 186 14 225 175 15 142 122 72 66 66 66
39909 - 30 30 30 10 10 10 0 0 0 0 0 0
39910 - 0 0 0 0 0 0 0 0 0 0 0 0
39911 - 0 0 0 0 0 0 0 0 0 0 0 0
39912 - 0 0 0 0 0 0 0 0 0 6 6 6
39913 - 26 26 26 70 70 70 163 133 67 210 150 10
39914 -236 178 12 246 190 14 246 190 14 246 190 14
39915 -246 190 14 246 190 14 246 190 14 246 190 14
39916 -246 190 14 246 190 14 246 190 14 246 190 14
39917 -246 190 14 246 190 14 246 190 14 246 190 14
39918 -232 195 16 121 92 8 34 34 34 106 106 106
39919 -221 221 221 253 253 253 253 253 253 253 253 253
39920 -253 253 253 253 253 253 253 253 253 253 253 253
39921 -253 253 253 253 253 253 253 253 253 253 253 253
39922 -253 253 253 253 253 253 253 253 253 253 253 253
39923 -242 242 242 82 82 82 18 14 6 163 110 8
39924 -216 158 10 236 178 12 242 186 14 246 190 14
39925 -246 190 14 246 190 14 246 190 14 246 190 14
39926 -246 190 14 246 190 14 246 190 14 246 190 14
39927 -246 190 14 246 190 14 246 190 14 246 190 14
39928 -246 190 14 246 190 14 242 186 14 163 133 67
39929 - 46 46 46 18 18 18 6 6 6 0 0 0
39930 - 0 0 0 0 0 0 0 0 0 0 0 0
39931 - 0 0 0 0 0 0 0 0 0 0 0 0
39932 - 0 0 0 0 0 0 0 0 0 10 10 10
39933 - 30 30 30 78 78 78 163 133 67 210 150 10
39934 -236 178 12 246 186 14 246 190 14 246 190 14
39935 -246 190 14 246 190 14 246 190 14 246 190 14
39936 -246 190 14 246 190 14 246 190 14 246 190 14
39937 -246 190 14 246 190 14 246 190 14 246 190 14
39938 -241 196 14 215 174 15 190 178 144 253 253 253
39939 -253 253 253 253 253 253 253 253 253 253 253 253
39940 -253 253 253 253 253 253 253 253 253 253 253 253
39941 -253 253 253 253 253 253 253 253 253 253 253 253
39942 -253 253 253 253 253 253 253 253 253 218 218 218
39943 - 58 58 58 2 2 6 22 18 6 167 114 7
39944 -216 158 10 236 178 12 246 186 14 246 190 14
39945 -246 190 14 246 190 14 246 190 14 246 190 14
39946 -246 190 14 246 190 14 246 190 14 246 190 14
39947 -246 190 14 246 190 14 246 190 14 246 190 14
39948 -246 190 14 246 186 14 242 186 14 190 150 46
39949 - 54 54 54 22 22 22 6 6 6 0 0 0
39950 - 0 0 0 0 0 0 0 0 0 0 0 0
39951 - 0 0 0 0 0 0 0 0 0 0 0 0
39952 - 0 0 0 0 0 0 0 0 0 14 14 14
39953 - 38 38 38 86 86 86 180 133 36 213 154 11
39954 -236 178 12 246 186 14 246 190 14 246 190 14
39955 -246 190 14 246 190 14 246 190 14 246 190 14
39956 -246 190 14 246 190 14 246 190 14 246 190 14
39957 -246 190 14 246 190 14 246 190 14 246 190 14
39958 -246 190 14 232 195 16 190 146 13 214 214 214
39959 -253 253 253 253 253 253 253 253 253 253 253 253
39960 -253 253 253 253 253 253 253 253 253 253 253 253
39961 -253 253 253 253 253 253 253 253 253 253 253 253
39962 -253 253 253 250 250 250 170 170 170 26 26 26
39963 - 2 2 6 2 2 6 37 26 9 163 110 8
39964 -219 162 10 239 182 13 246 186 14 246 190 14
39965 -246 190 14 246 190 14 246 190 14 246 190 14
39966 -246 190 14 246 190 14 246 190 14 246 190 14
39967 -246 190 14 246 190 14 246 190 14 246 190 14
39968 -246 186 14 236 178 12 224 166 10 142 122 72
39969 - 46 46 46 18 18 18 6 6 6 0 0 0
39970 - 0 0 0 0 0 0 0 0 0 0 0 0
39971 - 0 0 0 0 0 0 0 0 0 0 0 0
39972 - 0 0 0 0 0 0 6 6 6 18 18 18
39973 - 50 50 50 109 106 95 192 133 9 224 166 10
39974 -242 186 14 246 190 14 246 190 14 246 190 14
39975 -246 190 14 246 190 14 246 190 14 246 190 14
39976 -246 190 14 246 190 14 246 190 14 246 190 14
39977 -246 190 14 246 190 14 246 190 14 246 190 14
39978 -242 186 14 226 184 13 210 162 10 142 110 46
39979 -226 226 226 253 253 253 253 253 253 253 253 253
39980 -253 253 253 253 253 253 253 253 253 253 253 253
39981 -253 253 253 253 253 253 253 253 253 253 253 253
39982 -198 198 198 66 66 66 2 2 6 2 2 6
39983 - 2 2 6 2 2 6 50 34 6 156 107 11
39984 -219 162 10 239 182 13 246 186 14 246 190 14
39985 -246 190 14 246 190 14 246 190 14 246 190 14
39986 -246 190 14 246 190 14 246 190 14 246 190 14
39987 -246 190 14 246 190 14 246 190 14 242 186 14
39988 -234 174 13 213 154 11 154 122 46 66 66 66
39989 - 30 30 30 10 10 10 0 0 0 0 0 0
39990 - 0 0 0 0 0 0 0 0 0 0 0 0
39991 - 0 0 0 0 0 0 0 0 0 0 0 0
39992 - 0 0 0 0 0 0 6 6 6 22 22 22
39993 - 58 58 58 154 121 60 206 145 10 234 174 13
39994 -242 186 14 246 186 14 246 190 14 246 190 14
39995 -246 190 14 246 190 14 246 190 14 246 190 14
39996 -246 190 14 246 190 14 246 190 14 246 190 14
39997 -246 190 14 246 190 14 246 190 14 246 190 14
39998 -246 186 14 236 178 12 210 162 10 163 110 8
39999 - 61 42 6 138 138 138 218 218 218 250 250 250
40000 -253 253 253 253 253 253 253 253 253 250 250 250
40001 -242 242 242 210 210 210 144 144 144 66 66 66
40002 - 6 6 6 2 2 6 2 2 6 2 2 6
40003 - 2 2 6 2 2 6 61 42 6 163 110 8
40004 -216 158 10 236 178 12 246 190 14 246 190 14
40005 -246 190 14 246 190 14 246 190 14 246 190 14
40006 -246 190 14 246 190 14 246 190 14 246 190 14
40007 -246 190 14 239 182 13 230 174 11 216 158 10
40008 -190 142 34 124 112 88 70 70 70 38 38 38
40009 - 18 18 18 6 6 6 0 0 0 0 0 0
40010 - 0 0 0 0 0 0 0 0 0 0 0 0
40011 - 0 0 0 0 0 0 0 0 0 0 0 0
40012 - 0 0 0 0 0 0 6 6 6 22 22 22
40013 - 62 62 62 168 124 44 206 145 10 224 166 10
40014 -236 178 12 239 182 13 242 186 14 242 186 14
40015 -246 186 14 246 190 14 246 190 14 246 190 14
40016 -246 190 14 246 190 14 246 190 14 246 190 14
40017 -246 190 14 246 190 14 246 190 14 246 190 14
40018 -246 190 14 236 178 12 216 158 10 175 118 6
40019 - 80 54 7 2 2 6 6 6 6 30 30 30
40020 - 54 54 54 62 62 62 50 50 50 38 38 38
40021 - 14 14 14 2 2 6 2 2 6 2 2 6
40022 - 2 2 6 2 2 6 2 2 6 2 2 6
40023 - 2 2 6 6 6 6 80 54 7 167 114 7
40024 -213 154 11 236 178 12 246 190 14 246 190 14
40025 -246 190 14 246 190 14 246 190 14 246 190 14
40026 -246 190 14 242 186 14 239 182 13 239 182 13
40027 -230 174 11 210 150 10 174 135 50 124 112 88
40028 - 82 82 82 54 54 54 34 34 34 18 18 18
40029 - 6 6 6 0 0 0 0 0 0 0 0 0
40030 - 0 0 0 0 0 0 0 0 0 0 0 0
40031 - 0 0 0 0 0 0 0 0 0 0 0 0
40032 - 0 0 0 0 0 0 6 6 6 18 18 18
40033 - 50 50 50 158 118 36 192 133 9 200 144 11
40034 -216 158 10 219 162 10 224 166 10 226 170 11
40035 -230 174 11 236 178 12 239 182 13 239 182 13
40036 -242 186 14 246 186 14 246 190 14 246 190 14
40037 -246 190 14 246 190 14 246 190 14 246 190 14
40038 -246 186 14 230 174 11 210 150 10 163 110 8
40039 -104 69 6 10 10 10 2 2 6 2 2 6
40040 - 2 2 6 2 2 6 2 2 6 2 2 6
40041 - 2 2 6 2 2 6 2 2 6 2 2 6
40042 - 2 2 6 2 2 6 2 2 6 2 2 6
40043 - 2 2 6 6 6 6 91 60 6 167 114 7
40044 -206 145 10 230 174 11 242 186 14 246 190 14
40045 -246 190 14 246 190 14 246 186 14 242 186 14
40046 -239 182 13 230 174 11 224 166 10 213 154 11
40047 -180 133 36 124 112 88 86 86 86 58 58 58
40048 - 38 38 38 22 22 22 10 10 10 6 6 6
40049 - 0 0 0 0 0 0 0 0 0 0 0 0
40050 - 0 0 0 0 0 0 0 0 0 0 0 0
40051 - 0 0 0 0 0 0 0 0 0 0 0 0
40052 - 0 0 0 0 0 0 0 0 0 14 14 14
40053 - 34 34 34 70 70 70 138 110 50 158 118 36
40054 -167 114 7 180 123 7 192 133 9 197 138 11
40055 -200 144 11 206 145 10 213 154 11 219 162 10
40056 -224 166 10 230 174 11 239 182 13 242 186 14
40057 -246 186 14 246 186 14 246 186 14 246 186 14
40058 -239 182 13 216 158 10 185 133 11 152 99 6
40059 -104 69 6 18 14 6 2 2 6 2 2 6
40060 - 2 2 6 2 2 6 2 2 6 2 2 6
40061 - 2 2 6 2 2 6 2 2 6 2 2 6
40062 - 2 2 6 2 2 6 2 2 6 2 2 6
40063 - 2 2 6 6 6 6 80 54 7 152 99 6
40064 -192 133 9 219 162 10 236 178 12 239 182 13
40065 -246 186 14 242 186 14 239 182 13 236 178 12
40066 -224 166 10 206 145 10 192 133 9 154 121 60
40067 - 94 94 94 62 62 62 42 42 42 22 22 22
40068 - 14 14 14 6 6 6 0 0 0 0 0 0
40069 - 0 0 0 0 0 0 0 0 0 0 0 0
40070 - 0 0 0 0 0 0 0 0 0 0 0 0
40071 - 0 0 0 0 0 0 0 0 0 0 0 0
40072 - 0 0 0 0 0 0 0 0 0 6 6 6
40073 - 18 18 18 34 34 34 58 58 58 78 78 78
40074 -101 98 89 124 112 88 142 110 46 156 107 11
40075 -163 110 8 167 114 7 175 118 6 180 123 7
40076 -185 133 11 197 138 11 210 150 10 219 162 10
40077 -226 170 11 236 178 12 236 178 12 234 174 13
40078 -219 162 10 197 138 11 163 110 8 130 83 6
40079 - 91 60 6 10 10 10 2 2 6 2 2 6
40080 - 18 18 18 38 38 38 38 38 38 38 38 38
40081 - 38 38 38 38 38 38 38 38 38 38 38 38
40082 - 38 38 38 38 38 38 26 26 26 2 2 6
40083 - 2 2 6 6 6 6 70 47 6 137 92 6
40084 -175 118 6 200 144 11 219 162 10 230 174 11
40085 -234 174 13 230 174 11 219 162 10 210 150 10
40086 -192 133 9 163 110 8 124 112 88 82 82 82
40087 - 50 50 50 30 30 30 14 14 14 6 6 6
40088 - 0 0 0 0 0 0 0 0 0 0 0 0
40089 - 0 0 0 0 0 0 0 0 0 0 0 0
40090 - 0 0 0 0 0 0 0 0 0 0 0 0
40091 - 0 0 0 0 0 0 0 0 0 0 0 0
40092 - 0 0 0 0 0 0 0 0 0 0 0 0
40093 - 6 6 6 14 14 14 22 22 22 34 34 34
40094 - 42 42 42 58 58 58 74 74 74 86 86 86
40095 -101 98 89 122 102 70 130 98 46 121 87 25
40096 -137 92 6 152 99 6 163 110 8 180 123 7
40097 -185 133 11 197 138 11 206 145 10 200 144 11
40098 -180 123 7 156 107 11 130 83 6 104 69 6
40099 - 50 34 6 54 54 54 110 110 110 101 98 89
40100 - 86 86 86 82 82 82 78 78 78 78 78 78
40101 - 78 78 78 78 78 78 78 78 78 78 78 78
40102 - 78 78 78 82 82 82 86 86 86 94 94 94
40103 -106 106 106 101 101 101 86 66 34 124 80 6
40104 -156 107 11 180 123 7 192 133 9 200 144 11
40105 -206 145 10 200 144 11 192 133 9 175 118 6
40106 -139 102 15 109 106 95 70 70 70 42 42 42
40107 - 22 22 22 10 10 10 0 0 0 0 0 0
40108 - 0 0 0 0 0 0 0 0 0 0 0 0
40109 - 0 0 0 0 0 0 0 0 0 0 0 0
40110 - 0 0 0 0 0 0 0 0 0 0 0 0
40111 - 0 0 0 0 0 0 0 0 0 0 0 0
40112 - 0 0 0 0 0 0 0 0 0 0 0 0
40113 - 0 0 0 0 0 0 6 6 6 10 10 10
40114 - 14 14 14 22 22 22 30 30 30 38 38 38
40115 - 50 50 50 62 62 62 74 74 74 90 90 90
40116 -101 98 89 112 100 78 121 87 25 124 80 6
40117 -137 92 6 152 99 6 152 99 6 152 99 6
40118 -138 86 6 124 80 6 98 70 6 86 66 30
40119 -101 98 89 82 82 82 58 58 58 46 46 46
40120 - 38 38 38 34 34 34 34 34 34 34 34 34
40121 - 34 34 34 34 34 34 34 34 34 34 34 34
40122 - 34 34 34 34 34 34 38 38 38 42 42 42
40123 - 54 54 54 82 82 82 94 86 76 91 60 6
40124 -134 86 6 156 107 11 167 114 7 175 118 6
40125 -175 118 6 167 114 7 152 99 6 121 87 25
40126 -101 98 89 62 62 62 34 34 34 18 18 18
40127 - 6 6 6 0 0 0 0 0 0 0 0 0
40128 - 0 0 0 0 0 0 0 0 0 0 0 0
40129 - 0 0 0 0 0 0 0 0 0 0 0 0
40130 - 0 0 0 0 0 0 0 0 0 0 0 0
40131 - 0 0 0 0 0 0 0 0 0 0 0 0
40132 - 0 0 0 0 0 0 0 0 0 0 0 0
40133 - 0 0 0 0 0 0 0 0 0 0 0 0
40134 - 0 0 0 6 6 6 6 6 6 10 10 10
40135 - 18 18 18 22 22 22 30 30 30 42 42 42
40136 - 50 50 50 66 66 66 86 86 86 101 98 89
40137 -106 86 58 98 70 6 104 69 6 104 69 6
40138 -104 69 6 91 60 6 82 62 34 90 90 90
40139 - 62 62 62 38 38 38 22 22 22 14 14 14
40140 - 10 10 10 10 10 10 10 10 10 10 10 10
40141 - 10 10 10 10 10 10 6 6 6 10 10 10
40142 - 10 10 10 10 10 10 10 10 10 14 14 14
40143 - 22 22 22 42 42 42 70 70 70 89 81 66
40144 - 80 54 7 104 69 6 124 80 6 137 92 6
40145 -134 86 6 116 81 8 100 82 52 86 86 86
40146 - 58 58 58 30 30 30 14 14 14 6 6 6
40147 - 0 0 0 0 0 0 0 0 0 0 0 0
40148 - 0 0 0 0 0 0 0 0 0 0 0 0
40149 - 0 0 0 0 0 0 0 0 0 0 0 0
40150 - 0 0 0 0 0 0 0 0 0 0 0 0
40151 - 0 0 0 0 0 0 0 0 0 0 0 0
40152 - 0 0 0 0 0 0 0 0 0 0 0 0
40153 - 0 0 0 0 0 0 0 0 0 0 0 0
40154 - 0 0 0 0 0 0 0 0 0 0 0 0
40155 - 0 0 0 6 6 6 10 10 10 14 14 14
40156 - 18 18 18 26 26 26 38 38 38 54 54 54
40157 - 70 70 70 86 86 86 94 86 76 89 81 66
40158 - 89 81 66 86 86 86 74 74 74 50 50 50
40159 - 30 30 30 14 14 14 6 6 6 0 0 0
40160 - 0 0 0 0 0 0 0 0 0 0 0 0
40161 - 0 0 0 0 0 0 0 0 0 0 0 0
40162 - 0 0 0 0 0 0 0 0 0 0 0 0
40163 - 6 6 6 18 18 18 34 34 34 58 58 58
40164 - 82 82 82 89 81 66 89 81 66 89 81 66
40165 - 94 86 66 94 86 76 74 74 74 50 50 50
40166 - 26 26 26 14 14 14 6 6 6 0 0 0
40167 - 0 0 0 0 0 0 0 0 0 0 0 0
40168 - 0 0 0 0 0 0 0 0 0 0 0 0
40169 - 0 0 0 0 0 0 0 0 0 0 0 0
40170 - 0 0 0 0 0 0 0 0 0 0 0 0
40171 - 0 0 0 0 0 0 0 0 0 0 0 0
40172 - 0 0 0 0 0 0 0 0 0 0 0 0
40173 - 0 0 0 0 0 0 0 0 0 0 0 0
40174 - 0 0 0 0 0 0 0 0 0 0 0 0
40175 - 0 0 0 0 0 0 0 0 0 0 0 0
40176 - 6 6 6 6 6 6 14 14 14 18 18 18
40177 - 30 30 30 38 38 38 46 46 46 54 54 54
40178 - 50 50 50 42 42 42 30 30 30 18 18 18
40179 - 10 10 10 0 0 0 0 0 0 0 0 0
40180 - 0 0 0 0 0 0 0 0 0 0 0 0
40181 - 0 0 0 0 0 0 0 0 0 0 0 0
40182 - 0 0 0 0 0 0 0 0 0 0 0 0
40183 - 0 0 0 6 6 6 14 14 14 26 26 26
40184 - 38 38 38 50 50 50 58 58 58 58 58 58
40185 - 54 54 54 42 42 42 30 30 30 18 18 18
40186 - 10 10 10 0 0 0 0 0 0 0 0 0
40187 - 0 0 0 0 0 0 0 0 0 0 0 0
40188 - 0 0 0 0 0 0 0 0 0 0 0 0
40189 - 0 0 0 0 0 0 0 0 0 0 0 0
40190 - 0 0 0 0 0 0 0 0 0 0 0 0
40191 - 0 0 0 0 0 0 0 0 0 0 0 0
40192 - 0 0 0 0 0 0 0 0 0 0 0 0
40193 - 0 0 0 0 0 0 0 0 0 0 0 0
40194 - 0 0 0 0 0 0 0 0 0 0 0 0
40195 - 0 0 0 0 0 0 0 0 0 0 0 0
40196 - 0 0 0 0 0 0 0 0 0 6 6 6
40197 - 6 6 6 10 10 10 14 14 14 18 18 18
40198 - 18 18 18 14 14 14 10 10 10 6 6 6
40199 - 0 0 0 0 0 0 0 0 0 0 0 0
40200 - 0 0 0 0 0 0 0 0 0 0 0 0
40201 - 0 0 0 0 0 0 0 0 0 0 0 0
40202 - 0 0 0 0 0 0 0 0 0 0 0 0
40203 - 0 0 0 0 0 0 0 0 0 6 6 6
40204 - 14 14 14 18 18 18 22 22 22 22 22 22
40205 - 18 18 18 14 14 14 10 10 10 6 6 6
40206 - 0 0 0 0 0 0 0 0 0 0 0 0
40207 - 0 0 0 0 0 0 0 0 0 0 0 0
40208 - 0 0 0 0 0 0 0 0 0 0 0 0
40209 - 0 0 0 0 0 0 0 0 0 0 0 0
40210 - 0 0 0 0 0 0 0 0 0 0 0 0
40211 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40212 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40213 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40214 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40215 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40216 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40217 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40218 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40219 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40220 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40221 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40222 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40223 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40224 +4 4 4 4 4 4
40225 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40226 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40227 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40228 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40229 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40230 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40231 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40232 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40233 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40234 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40235 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40236 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40237 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40238 +4 4 4 4 4 4
40239 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40240 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40241 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40242 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40243 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40244 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40245 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40246 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40247 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40248 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40249 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40250 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40251 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40252 +4 4 4 4 4 4
40253 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40254 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40255 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40256 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40257 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40258 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40259 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40260 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40261 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40262 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40263 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40264 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40265 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40266 +4 4 4 4 4 4
40267 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40268 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40272 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40273 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40274 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40275 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40276 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40277 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40278 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40279 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40280 +4 4 4 4 4 4
40281 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40282 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40285 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40286 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40287 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40288 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40289 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40290 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40291 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40292 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40293 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40294 +4 4 4 4 4 4
40295 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40296 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40299 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
40300 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
40301 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40302 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40303 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40304 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
40305 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40306 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
40307 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40308 +4 4 4 4 4 4
40309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40310 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40313 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
40314 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
40315 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40316 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40317 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40318 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
40319 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
40320 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
40321 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40322 +4 4 4 4 4 4
40323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40324 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40326 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40327 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
40328 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
40329 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40330 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40331 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40332 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
40333 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
40334 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
40335 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
40336 +4 4 4 4 4 4
40337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40338 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40340 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
40341 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
40342 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
40343 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
40344 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40345 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40346 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
40347 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
40348 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
40349 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
40350 +4 4 4 4 4 4
40351 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40352 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40353 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40354 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
40355 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
40356 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
40357 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
40358 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40359 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
40360 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
40361 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
40362 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
40363 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
40364 +4 4 4 4 4 4
40365 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40366 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40367 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40368 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
40369 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
40370 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
40371 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
40372 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40373 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
40374 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
40375 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
40376 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
40377 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
40378 +4 4 4 4 4 4
40379 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40380 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40381 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
40382 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
40383 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
40384 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
40385 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
40386 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
40387 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
40388 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
40389 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
40390 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
40391 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
40392 +4 4 4 4 4 4
40393 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40394 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40395 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
40396 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
40397 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
40398 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
40399 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
40400 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
40401 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
40402 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
40403 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
40404 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
40405 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
40406 +4 4 4 4 4 4
40407 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40408 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40409 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
40410 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
40411 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
40412 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
40413 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
40414 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
40415 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
40416 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
40417 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
40418 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
40419 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40420 +4 4 4 4 4 4
40421 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40422 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40423 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
40424 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
40425 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
40426 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
40427 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
40428 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
40429 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
40430 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
40431 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
40432 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
40433 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
40434 +4 4 4 4 4 4
40435 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40436 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
40437 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
40438 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
40439 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
40440 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
40441 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
40442 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
40443 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
40444 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
40445 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
40446 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
40447 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
40448 +4 4 4 4 4 4
40449 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40450 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
40451 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
40452 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
40453 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40454 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
40455 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
40456 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
40457 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
40458 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
40459 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
40460 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
40461 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
40462 +0 0 0 4 4 4
40463 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40464 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
40465 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
40466 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
40467 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
40468 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
40469 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
40470 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
40471 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
40472 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
40473 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
40474 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
40475 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
40476 +2 0 0 0 0 0
40477 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
40478 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
40479 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
40480 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
40481 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
40482 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
40483 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
40484 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
40485 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
40486 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
40487 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
40488 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
40489 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
40490 +37 38 37 0 0 0
40491 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40492 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
40493 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
40494 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
40495 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
40496 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
40497 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
40498 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
40499 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
40500 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
40501 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
40502 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
40503 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
40504 +85 115 134 4 0 0
40505 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
40506 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
40507 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
40508 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
40509 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
40510 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
40511 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
40512 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
40513 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
40514 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
40515 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
40516 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
40517 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
40518 +60 73 81 4 0 0
40519 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
40520 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
40521 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
40522 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
40523 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
40524 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
40525 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
40526 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
40527 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
40528 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
40529 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
40530 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
40531 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
40532 +16 19 21 4 0 0
40533 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
40534 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
40535 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
40536 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
40537 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
40538 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
40539 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
40540 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
40541 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
40542 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
40543 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
40544 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
40545 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
40546 +4 0 0 4 3 3
40547 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
40548 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
40549 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
40550 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
40551 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
40552 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
40553 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
40554 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
40555 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
40556 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
40557 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
40558 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
40559 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
40560 +3 2 2 4 4 4
40561 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
40562 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
40563 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
40564 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40565 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
40566 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
40567 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
40568 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
40569 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
40570 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
40571 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
40572 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
40573 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
40574 +4 4 4 4 4 4
40575 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
40576 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
40577 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
40578 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
40579 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
40580 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
40581 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
40582 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
40583 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
40584 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
40585 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
40586 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
40587 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
40588 +4 4 4 4 4 4
40589 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
40590 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
40591 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
40592 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
40593 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
40594 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40595 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
40596 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
40597 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
40598 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
40599 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
40600 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
40601 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
40602 +5 5 5 5 5 5
40603 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
40604 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
40605 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
40606 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
40607 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
40608 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40609 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
40610 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
40611 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
40612 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
40613 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
40614 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
40615 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40616 +5 5 5 4 4 4
40617 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
40618 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
40619 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
40620 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
40621 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40622 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
40623 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
40624 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
40625 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
40626 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
40627 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
40628 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40629 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40630 +4 4 4 4 4 4
40631 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
40632 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
40633 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
40634 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
40635 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
40636 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40637 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40638 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
40639 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
40640 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
40641 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
40642 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
40643 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40644 +4 4 4 4 4 4
40645 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
40646 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
40647 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
40648 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
40649 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40650 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
40651 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
40652 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
40653 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
40654 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
40655 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
40656 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40657 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40658 +4 4 4 4 4 4
40659 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
40660 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
40661 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
40662 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
40663 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40664 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40665 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40666 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
40667 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
40668 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
40669 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
40670 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40671 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40672 +4 4 4 4 4 4
40673 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
40674 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
40675 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
40676 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
40677 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40678 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
40679 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40680 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
40681 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
40682 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
40683 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40684 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40685 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40686 +4 4 4 4 4 4
40687 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
40688 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
40689 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
40690 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
40691 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40692 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
40693 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
40694 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
40695 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
40696 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
40697 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
40698 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40699 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40700 +4 4 4 4 4 4
40701 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
40702 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
40703 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
40704 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
40705 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40706 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
40707 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
40708 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
40709 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
40710 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
40711 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
40712 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40713 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40714 +4 4 4 4 4 4
40715 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
40716 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
40717 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
40718 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40719 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
40720 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
40721 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
40722 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
40723 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
40724 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
40725 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40726 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40727 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40728 +4 4 4 4 4 4
40729 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
40730 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
40731 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
40732 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40733 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40734 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
40735 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
40736 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
40737 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
40738 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
40739 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40740 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40741 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40742 +4 4 4 4 4 4
40743 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
40744 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
40745 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40746 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40747 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40748 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
40749 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
40750 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
40751 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
40752 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
40753 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40754 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40755 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40756 +4 4 4 4 4 4
40757 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
40758 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
40759 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40760 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40761 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40762 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
40763 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
40764 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
40765 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40766 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40767 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40768 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40769 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40770 +4 4 4 4 4 4
40771 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40772 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
40773 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40774 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
40775 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
40776 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
40777 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
40778 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
40779 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40780 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40781 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40782 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40783 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40784 +4 4 4 4 4 4
40785 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40786 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
40787 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40788 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
40789 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40790 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
40791 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
40792 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
40793 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40794 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40795 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40796 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40797 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40798 +4 4 4 4 4 4
40799 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
40800 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
40801 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40802 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
40803 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
40804 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
40805 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
40806 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
40807 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40808 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40809 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40810 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40811 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40812 +4 4 4 4 4 4
40813 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
40814 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
40815 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40816 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
40817 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
40818 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
40819 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
40820 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
40821 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40822 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40823 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40824 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40825 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40826 +4 4 4 4 4 4
40827 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40828 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
40829 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40830 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
40831 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
40832 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
40833 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
40834 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
40835 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40836 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40837 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40838 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40839 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40840 +4 4 4 4 4 4
40841 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
40842 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
40843 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40844 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
40845 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
40846 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
40847 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
40848 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
40849 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
40850 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40851 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40852 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40853 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40854 +4 4 4 4 4 4
40855 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40856 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
40857 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
40858 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
40859 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
40860 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
40861 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
40862 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
40863 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40864 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40865 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40866 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40867 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40868 +4 4 4 4 4 4
40869 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40870 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
40871 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40872 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
40873 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
40874 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
40875 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
40876 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
40877 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40878 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40879 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40880 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40881 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40882 +4 4 4 4 4 4
40883 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40884 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
40885 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
40886 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
40887 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
40888 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
40889 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40890 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
40891 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40892 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40893 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40894 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40895 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40896 +4 4 4 4 4 4
40897 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40898 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
40899 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
40900 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40901 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
40902 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
40903 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40904 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
40905 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40906 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40907 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40908 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40909 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40910 +4 4 4 4 4 4
40911 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40912 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
40913 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
40914 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
40915 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
40916 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
40917 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
40918 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
40919 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
40920 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40921 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40922 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40923 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40924 +4 4 4 4 4 4
40925 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40926 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
40927 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
40928 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
40929 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
40930 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
40931 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
40932 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
40933 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
40934 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40935 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40936 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40937 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40938 +4 4 4 4 4 4
40939 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
40940 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
40941 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
40942 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
40943 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40944 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
40945 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
40946 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
40947 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
40948 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40949 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40950 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40951 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40952 +4 4 4 4 4 4
40953 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40954 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
40955 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
40956 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
40957 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
40958 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
40959 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
40960 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
40961 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
40962 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40963 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40964 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40965 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40966 +4 4 4 4 4 4
40967 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
40968 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
40969 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
40970 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
40971 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
40972 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
40973 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
40974 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
40975 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
40976 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
40977 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40978 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40979 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40980 +4 4 4 4 4 4
40981 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
40982 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40983 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
40984 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
40985 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
40986 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
40987 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
40988 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
40989 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
40990 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
40991 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40992 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40993 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40994 +4 4 4 4 4 4
40995 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
40996 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40997 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
40998 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
40999 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
41000 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
41001 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41002 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
41003 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
41004 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
41005 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41006 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41007 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41008 +4 4 4 4 4 4
41009 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
41010 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
41011 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
41012 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
41013 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
41014 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
41015 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
41016 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
41017 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
41018 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
41019 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41020 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41021 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41022 +4 4 4 4 4 4
41023 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
41024 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
41025 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41026 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
41027 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
41028 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
41029 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
41030 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
41031 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
41032 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
41033 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41034 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41035 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41036 +4 4 4 4 4 4
41037 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41038 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
41039 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
41040 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
41041 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
41042 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
41043 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
41044 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
41045 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
41046 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41047 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41048 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41049 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41050 +4 4 4 4 4 4
41051 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
41052 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
41053 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
41054 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
41055 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
41056 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
41057 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
41058 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
41059 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
41060 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41061 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41062 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41063 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41064 +4 4 4 4 4 4
41065 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
41066 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
41067 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
41068 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
41069 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
41070 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
41071 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
41072 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
41073 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41074 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41075 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41076 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41077 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41078 +4 4 4 4 4 4
41079 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
41080 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41081 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
41082 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41083 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
41084 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
41085 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
41086 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
41087 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
41088 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41089 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41090 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41091 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41092 +4 4 4 4 4 4
41093 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
41094 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
41095 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
41096 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
41097 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
41098 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
41099 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
41100 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
41101 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
41102 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41103 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41104 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41105 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41106 +4 4 4 4 4 4
41107 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41108 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
41109 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
41110 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
41111 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
41112 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
41113 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
41114 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
41115 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41116 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41117 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41118 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41119 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41120 +4 4 4 4 4 4
41121 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
41122 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
41123 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41124 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
41125 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
41126 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
41127 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
41128 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
41129 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41130 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41131 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41132 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41133 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41134 +4 4 4 4 4 4
41135 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41136 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
41137 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
41138 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
41139 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
41140 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
41141 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
41142 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41143 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41144 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41145 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41146 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41147 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41148 +4 4 4 4 4 4
41149 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41150 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
41151 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41152 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
41153 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
41154 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
41155 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
41156 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
41157 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41158 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41159 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41160 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41161 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41162 +4 4 4 4 4 4
41163 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41164 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
41165 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
41166 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
41167 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
41168 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
41169 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
41170 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
41171 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41172 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41173 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41174 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41175 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41176 +4 4 4 4 4 4
41177 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41178 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
41179 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
41180 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41181 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
41182 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
41183 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
41184 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41185 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41186 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41187 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41188 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41189 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41190 +4 4 4 4 4 4
41191 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41192 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41193 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41194 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
41195 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
41196 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
41197 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
41198 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41199 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41200 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41201 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41202 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41203 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41204 +4 4 4 4 4 4
41205 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41206 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41207 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41208 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41209 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
41210 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
41211 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
41212 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41213 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41214 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41215 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41216 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41217 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41218 +4 4 4 4 4 4
41219 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41220 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41221 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41222 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41223 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41224 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
41225 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
41226 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41227 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41228 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41229 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41230 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41231 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41232 +4 4 4 4 4 4
41233 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41234 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41235 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41236 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41237 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41238 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41239 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
41240 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41241 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41242 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41243 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41244 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41245 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41246 +4 4 4 4 4 4
41247 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41248 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41249 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41250 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
41251 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
41252 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
41253 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
41254 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41255 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41256 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41257 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41258 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41259 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41260 +4 4 4 4 4 4
41261 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41262 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41263 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41264 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41265 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
41266 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41267 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41268 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41272 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41273 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41274 +4 4 4 4 4 4
41275 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41276 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41277 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41278 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41279 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
41280 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
41281 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
41282 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41285 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41286 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41287 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41288 +4 4 4 4 4 4
41289 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41290 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41291 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41292 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41293 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
41294 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
41295 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41296 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41299 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41300 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41301 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41302 +4 4 4 4 4 4
41303 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41304 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41305 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41306 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41307 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
41308 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
41309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41310 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41313 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41314 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41315 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41316 +4 4 4 4 4 4
41317 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41318 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41319 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41320 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41321 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41322 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
41323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41324 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41326 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41327 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41328 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41329 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41330 +4 4 4 4 4 4
41331 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
41332 index 087fc99..f85ed76 100644
41333 --- a/drivers/video/udlfb.c
41334 +++ b/drivers/video/udlfb.c
41335 @@ -585,11 +585,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
41336 dlfb_urb_completion(urb);
41337
41338 error:
41339 - atomic_add(bytes_sent, &dev->bytes_sent);
41340 - atomic_add(bytes_identical, &dev->bytes_identical);
41341 - atomic_add(width*height*2, &dev->bytes_rendered);
41342 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41343 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41344 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
41345 end_cycles = get_cycles();
41346 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
41347 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41348 >> 10)), /* Kcycles */
41349 &dev->cpu_kcycles_used);
41350
41351 @@ -710,11 +710,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
41352 dlfb_urb_completion(urb);
41353
41354 error:
41355 - atomic_add(bytes_sent, &dev->bytes_sent);
41356 - atomic_add(bytes_identical, &dev->bytes_identical);
41357 - atomic_add(bytes_rendered, &dev->bytes_rendered);
41358 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41359 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41360 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
41361 end_cycles = get_cycles();
41362 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
41363 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41364 >> 10)), /* Kcycles */
41365 &dev->cpu_kcycles_used);
41366 }
41367 @@ -1306,7 +1306,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
41368 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41369 struct dlfb_data *dev = fb_info->par;
41370 return snprintf(buf, PAGE_SIZE, "%u\n",
41371 - atomic_read(&dev->bytes_rendered));
41372 + atomic_read_unchecked(&dev->bytes_rendered));
41373 }
41374
41375 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41376 @@ -1314,7 +1314,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41377 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41378 struct dlfb_data *dev = fb_info->par;
41379 return snprintf(buf, PAGE_SIZE, "%u\n",
41380 - atomic_read(&dev->bytes_identical));
41381 + atomic_read_unchecked(&dev->bytes_identical));
41382 }
41383
41384 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41385 @@ -1322,7 +1322,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41386 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41387 struct dlfb_data *dev = fb_info->par;
41388 return snprintf(buf, PAGE_SIZE, "%u\n",
41389 - atomic_read(&dev->bytes_sent));
41390 + atomic_read_unchecked(&dev->bytes_sent));
41391 }
41392
41393 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41394 @@ -1330,7 +1330,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41395 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41396 struct dlfb_data *dev = fb_info->par;
41397 return snprintf(buf, PAGE_SIZE, "%u\n",
41398 - atomic_read(&dev->cpu_kcycles_used));
41399 + atomic_read_unchecked(&dev->cpu_kcycles_used));
41400 }
41401
41402 static ssize_t edid_show(
41403 @@ -1387,10 +1387,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
41404 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41405 struct dlfb_data *dev = fb_info->par;
41406
41407 - atomic_set(&dev->bytes_rendered, 0);
41408 - atomic_set(&dev->bytes_identical, 0);
41409 - atomic_set(&dev->bytes_sent, 0);
41410 - atomic_set(&dev->cpu_kcycles_used, 0);
41411 + atomic_set_unchecked(&dev->bytes_rendered, 0);
41412 + atomic_set_unchecked(&dev->bytes_identical, 0);
41413 + atomic_set_unchecked(&dev->bytes_sent, 0);
41414 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
41415
41416 return count;
41417 }
41418 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
41419 index 7f8472c..9842e87 100644
41420 --- a/drivers/video/uvesafb.c
41421 +++ b/drivers/video/uvesafb.c
41422 @@ -19,6 +19,7 @@
41423 #include <linux/io.h>
41424 #include <linux/mutex.h>
41425 #include <linux/slab.h>
41426 +#include <linux/moduleloader.h>
41427 #include <video/edid.h>
41428 #include <video/uvesafb.h>
41429 #ifdef CONFIG_X86
41430 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
41431 NULL,
41432 };
41433
41434 - return call_usermodehelper(v86d_path, argv, envp, 1);
41435 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
41436 }
41437
41438 /*
41439 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
41440 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
41441 par->pmi_setpal = par->ypan = 0;
41442 } else {
41443 +
41444 +#ifdef CONFIG_PAX_KERNEXEC
41445 +#ifdef CONFIG_MODULES
41446 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
41447 +#endif
41448 + if (!par->pmi_code) {
41449 + par->pmi_setpal = par->ypan = 0;
41450 + return 0;
41451 + }
41452 +#endif
41453 +
41454 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
41455 + task->t.regs.edi);
41456 +
41457 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41458 + pax_open_kernel();
41459 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
41460 + pax_close_kernel();
41461 +
41462 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
41463 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
41464 +#else
41465 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
41466 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
41467 +#endif
41468 +
41469 printk(KERN_INFO "uvesafb: protected mode interface info at "
41470 "%04x:%04x\n",
41471 (u16)task->t.regs.es, (u16)task->t.regs.edi);
41472 @@ -1821,6 +1844,11 @@ out:
41473 if (par->vbe_modes)
41474 kfree(par->vbe_modes);
41475
41476 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41477 + if (par->pmi_code)
41478 + module_free_exec(NULL, par->pmi_code);
41479 +#endif
41480 +
41481 framebuffer_release(info);
41482 return err;
41483 }
41484 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
41485 kfree(par->vbe_state_orig);
41486 if (par->vbe_state_saved)
41487 kfree(par->vbe_state_saved);
41488 +
41489 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41490 + if (par->pmi_code)
41491 + module_free_exec(NULL, par->pmi_code);
41492 +#endif
41493 +
41494 }
41495
41496 framebuffer_release(info);
41497 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
41498 index 501b340..86bd4cf 100644
41499 --- a/drivers/video/vesafb.c
41500 +++ b/drivers/video/vesafb.c
41501 @@ -9,6 +9,7 @@
41502 */
41503
41504 #include <linux/module.h>
41505 +#include <linux/moduleloader.h>
41506 #include <linux/kernel.h>
41507 #include <linux/errno.h>
41508 #include <linux/string.h>
41509 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
41510 static int vram_total __initdata; /* Set total amount of memory */
41511 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
41512 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
41513 -static void (*pmi_start)(void) __read_mostly;
41514 -static void (*pmi_pal) (void) __read_mostly;
41515 +static void (*pmi_start)(void) __read_only;
41516 +static void (*pmi_pal) (void) __read_only;
41517 static int depth __read_mostly;
41518 static int vga_compat __read_mostly;
41519 /* --------------------------------------------------------------------- */
41520 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
41521 unsigned int size_vmode;
41522 unsigned int size_remap;
41523 unsigned int size_total;
41524 + void *pmi_code = NULL;
41525
41526 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
41527 return -ENODEV;
41528 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
41529 size_remap = size_total;
41530 vesafb_fix.smem_len = size_remap;
41531
41532 -#ifndef __i386__
41533 - screen_info.vesapm_seg = 0;
41534 -#endif
41535 -
41536 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
41537 printk(KERN_WARNING
41538 "vesafb: cannot reserve video memory at 0x%lx\n",
41539 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
41540 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
41541 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
41542
41543 +#ifdef __i386__
41544 +
41545 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41546 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
41547 + if (!pmi_code)
41548 +#elif !defined(CONFIG_PAX_KERNEXEC)
41549 + if (0)
41550 +#endif
41551 +
41552 +#endif
41553 + screen_info.vesapm_seg = 0;
41554 +
41555 if (screen_info.vesapm_seg) {
41556 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
41557 - screen_info.vesapm_seg,screen_info.vesapm_off);
41558 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
41559 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
41560 }
41561
41562 if (screen_info.vesapm_seg < 0xc000)
41563 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
41564
41565 if (ypan || pmi_setpal) {
41566 unsigned short *pmi_base;
41567 +
41568 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
41569 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
41570 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
41571 +
41572 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41573 + pax_open_kernel();
41574 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
41575 +#else
41576 + pmi_code = pmi_base;
41577 +#endif
41578 +
41579 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
41580 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
41581 +
41582 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41583 + pmi_start = ktva_ktla(pmi_start);
41584 + pmi_pal = ktva_ktla(pmi_pal);
41585 + pax_close_kernel();
41586 +#endif
41587 +
41588 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
41589 if (pmi_base[3]) {
41590 printk(KERN_INFO "vesafb: pmi: ports = ");
41591 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
41592 info->node, info->fix.id);
41593 return 0;
41594 err:
41595 +
41596 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41597 + module_free_exec(NULL, pmi_code);
41598 +#endif
41599 +
41600 if (info->screen_base)
41601 iounmap(info->screen_base);
41602 framebuffer_release(info);
41603 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
41604 index 88714ae..16c2e11 100644
41605 --- a/drivers/video/via/via_clock.h
41606 +++ b/drivers/video/via/via_clock.h
41607 @@ -56,7 +56,7 @@ struct via_clock {
41608
41609 void (*set_engine_pll_state)(u8 state);
41610 void (*set_engine_pll)(struct via_pll_config config);
41611 -};
41612 +} __no_const;
41613
41614
41615 static inline u32 get_pll_internal_frequency(u32 ref_freq,
41616 diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
41617 index e058ace..2424d93 100644
41618 --- a/drivers/virtio/virtio_balloon.c
41619 +++ b/drivers/virtio/virtio_balloon.c
41620 @@ -174,6 +174,8 @@ static void update_balloon_stats(struct virtio_balloon *vb)
41621 struct sysinfo i;
41622 int idx = 0;
41623
41624 + pax_track_stack();
41625 +
41626 all_vm_events(events);
41627 si_meminfo(&i);
41628
41629 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
41630 index e56c934..fc22f4b 100644
41631 --- a/drivers/xen/xen-pciback/conf_space.h
41632 +++ b/drivers/xen/xen-pciback/conf_space.h
41633 @@ -44,15 +44,15 @@ struct config_field {
41634 struct {
41635 conf_dword_write write;
41636 conf_dword_read read;
41637 - } dw;
41638 + } __no_const dw;
41639 struct {
41640 conf_word_write write;
41641 conf_word_read read;
41642 - } w;
41643 + } __no_const w;
41644 struct {
41645 conf_byte_write write;
41646 conf_byte_read read;
41647 - } b;
41648 + } __no_const b;
41649 } u;
41650 struct list_head list;
41651 };
41652 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
41653 index e3c03db..93b0172 100644
41654 --- a/fs/9p/vfs_inode.c
41655 +++ b/fs/9p/vfs_inode.c
41656 @@ -1288,7 +1288,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41657 void
41658 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41659 {
41660 - char *s = nd_get_link(nd);
41661 + const char *s = nd_get_link(nd);
41662
41663 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
41664 IS_ERR(s) ? "<error>" : s);
41665 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
41666 index 79e2ca7..5828ad1 100644
41667 --- a/fs/Kconfig.binfmt
41668 +++ b/fs/Kconfig.binfmt
41669 @@ -86,7 +86,7 @@ config HAVE_AOUT
41670
41671 config BINFMT_AOUT
41672 tristate "Kernel support for a.out and ECOFF binaries"
41673 - depends on HAVE_AOUT
41674 + depends on HAVE_AOUT && BROKEN
41675 ---help---
41676 A.out (Assembler.OUTput) is a set of formats for libraries and
41677 executables used in the earliest versions of UNIX. Linux used
41678 diff --git a/fs/aio.c b/fs/aio.c
41679 index e29ec48..f083e5e 100644
41680 --- a/fs/aio.c
41681 +++ b/fs/aio.c
41682 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
41683 size += sizeof(struct io_event) * nr_events;
41684 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
41685
41686 - if (nr_pages < 0)
41687 + if (nr_pages <= 0)
41688 return -EINVAL;
41689
41690 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
41691 @@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ctx,
41692 struct aio_timeout to;
41693 int retry = 0;
41694
41695 + pax_track_stack();
41696 +
41697 /* needed to zero any padding within an entry (there shouldn't be
41698 * any, but C is fun!
41699 */
41700 @@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
41701 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41702 {
41703 ssize_t ret;
41704 + struct iovec iovstack;
41705
41706 #ifdef CONFIG_COMPAT
41707 if (compat)
41708 ret = compat_rw_copy_check_uvector(type,
41709 (struct compat_iovec __user *)kiocb->ki_buf,
41710 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41711 + kiocb->ki_nbytes, 1, &iovstack,
41712 &kiocb->ki_iovec);
41713 else
41714 #endif
41715 ret = rw_copy_check_uvector(type,
41716 (struct iovec __user *)kiocb->ki_buf,
41717 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41718 + kiocb->ki_nbytes, 1, &iovstack,
41719 &kiocb->ki_iovec);
41720 if (ret < 0)
41721 goto out;
41722
41723 + if (kiocb->ki_iovec == &iovstack) {
41724 + kiocb->ki_inline_vec = iovstack;
41725 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
41726 + }
41727 kiocb->ki_nr_segs = kiocb->ki_nbytes;
41728 kiocb->ki_cur_seg = 0;
41729 /* ki_nbytes/left now reflect bytes instead of segs */
41730 diff --git a/fs/attr.c b/fs/attr.c
41731 index 538e279..046cc6d 100644
41732 --- a/fs/attr.c
41733 +++ b/fs/attr.c
41734 @@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
41735 unsigned long limit;
41736
41737 limit = rlimit(RLIMIT_FSIZE);
41738 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
41739 if (limit != RLIM_INFINITY && offset > limit)
41740 goto out_sig;
41741 if (offset > inode->i_sb->s_maxbytes)
41742 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
41743 index e1fbdee..cd5ea56 100644
41744 --- a/fs/autofs4/waitq.c
41745 +++ b/fs/autofs4/waitq.c
41746 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
41747 {
41748 unsigned long sigpipe, flags;
41749 mm_segment_t fs;
41750 - const char *data = (const char *)addr;
41751 + const char __user *data = (const char __force_user *)addr;
41752 ssize_t wr = 0;
41753
41754 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
41755 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
41756 index 720d885..012e7f0 100644
41757 --- a/fs/befs/linuxvfs.c
41758 +++ b/fs/befs/linuxvfs.c
41759 @@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41760 {
41761 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
41762 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
41763 - char *link = nd_get_link(nd);
41764 + const char *link = nd_get_link(nd);
41765 if (!IS_ERR(link))
41766 kfree(link);
41767 }
41768 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
41769 index a6395bd..a5b24c4 100644
41770 --- a/fs/binfmt_aout.c
41771 +++ b/fs/binfmt_aout.c
41772 @@ -16,6 +16,7 @@
41773 #include <linux/string.h>
41774 #include <linux/fs.h>
41775 #include <linux/file.h>
41776 +#include <linux/security.h>
41777 #include <linux/stat.h>
41778 #include <linux/fcntl.h>
41779 #include <linux/ptrace.h>
41780 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
41781 #endif
41782 # define START_STACK(u) ((void __user *)u.start_stack)
41783
41784 + memset(&dump, 0, sizeof(dump));
41785 +
41786 fs = get_fs();
41787 set_fs(KERNEL_DS);
41788 has_dumped = 1;
41789 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
41790
41791 /* If the size of the dump file exceeds the rlimit, then see what would happen
41792 if we wrote the stack, but not the data area. */
41793 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
41794 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
41795 dump.u_dsize = 0;
41796
41797 /* Make sure we have enough room to write the stack and data areas. */
41798 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
41799 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
41800 dump.u_ssize = 0;
41801
41802 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41803 rlim = rlimit(RLIMIT_DATA);
41804 if (rlim >= RLIM_INFINITY)
41805 rlim = ~0;
41806 +
41807 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
41808 if (ex.a_data + ex.a_bss > rlim)
41809 return -ENOMEM;
41810
41811 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41812 install_exec_creds(bprm);
41813 current->flags &= ~PF_FORKNOEXEC;
41814
41815 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41816 + current->mm->pax_flags = 0UL;
41817 +#endif
41818 +
41819 +#ifdef CONFIG_PAX_PAGEEXEC
41820 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
41821 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
41822 +
41823 +#ifdef CONFIG_PAX_EMUTRAMP
41824 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
41825 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
41826 +#endif
41827 +
41828 +#ifdef CONFIG_PAX_MPROTECT
41829 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
41830 + current->mm->pax_flags |= MF_PAX_MPROTECT;
41831 +#endif
41832 +
41833 + }
41834 +#endif
41835 +
41836 if (N_MAGIC(ex) == OMAGIC) {
41837 unsigned long text_addr, map_size;
41838 loff_t pos;
41839 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41840
41841 down_write(&current->mm->mmap_sem);
41842 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
41843 - PROT_READ | PROT_WRITE | PROT_EXEC,
41844 + PROT_READ | PROT_WRITE,
41845 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
41846 fd_offset + ex.a_text);
41847 up_write(&current->mm->mmap_sem);
41848 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
41849 index 21ac5ee..171b1d0 100644
41850 --- a/fs/binfmt_elf.c
41851 +++ b/fs/binfmt_elf.c
41852 @@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump_params *cprm);
41853 #define elf_core_dump NULL
41854 #endif
41855
41856 +#ifdef CONFIG_PAX_MPROTECT
41857 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
41858 +#endif
41859 +
41860 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
41861 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
41862 #else
41863 @@ -70,6 +74,11 @@ static struct linux_binfmt elf_format = {
41864 .load_binary = load_elf_binary,
41865 .load_shlib = load_elf_library,
41866 .core_dump = elf_core_dump,
41867 +
41868 +#ifdef CONFIG_PAX_MPROTECT
41869 + .handle_mprotect= elf_handle_mprotect,
41870 +#endif
41871 +
41872 .min_coredump = ELF_EXEC_PAGESIZE,
41873 };
41874
41875 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format = {
41876
41877 static int set_brk(unsigned long start, unsigned long end)
41878 {
41879 + unsigned long e = end;
41880 +
41881 start = ELF_PAGEALIGN(start);
41882 end = ELF_PAGEALIGN(end);
41883 if (end > start) {
41884 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end)
41885 if (BAD_ADDR(addr))
41886 return addr;
41887 }
41888 - current->mm->start_brk = current->mm->brk = end;
41889 + current->mm->start_brk = current->mm->brk = e;
41890 return 0;
41891 }
41892
41893 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41894 elf_addr_t __user *u_rand_bytes;
41895 const char *k_platform = ELF_PLATFORM;
41896 const char *k_base_platform = ELF_BASE_PLATFORM;
41897 - unsigned char k_rand_bytes[16];
41898 + u32 k_rand_bytes[4];
41899 int items;
41900 elf_addr_t *elf_info;
41901 int ei_index = 0;
41902 const struct cred *cred = current_cred();
41903 struct vm_area_struct *vma;
41904 + unsigned long saved_auxv[AT_VECTOR_SIZE];
41905 +
41906 + pax_track_stack();
41907
41908 /*
41909 * In some cases (e.g. Hyper-Threading), we want to avoid L1
41910 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41911 * Generate 16 random bytes for userspace PRNG seeding.
41912 */
41913 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
41914 - u_rand_bytes = (elf_addr_t __user *)
41915 - STACK_ALLOC(p, sizeof(k_rand_bytes));
41916 + srandom32(k_rand_bytes[0] ^ random32());
41917 + srandom32(k_rand_bytes[1] ^ random32());
41918 + srandom32(k_rand_bytes[2] ^ random32());
41919 + srandom32(k_rand_bytes[3] ^ random32());
41920 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
41921 + u_rand_bytes = (elf_addr_t __user *) p;
41922 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
41923 return -EFAULT;
41924
41925 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41926 return -EFAULT;
41927 current->mm->env_end = p;
41928
41929 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
41930 +
41931 /* Put the elf_info on the stack in the right place. */
41932 sp = (elf_addr_t __user *)envp + 1;
41933 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
41934 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
41935 return -EFAULT;
41936 return 0;
41937 }
41938 @@ -381,10 +401,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41939 {
41940 struct elf_phdr *elf_phdata;
41941 struct elf_phdr *eppnt;
41942 - unsigned long load_addr = 0;
41943 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
41944 int load_addr_set = 0;
41945 unsigned long last_bss = 0, elf_bss = 0;
41946 - unsigned long error = ~0UL;
41947 + unsigned long error = -EINVAL;
41948 unsigned long total_size;
41949 int retval, i, size;
41950
41951 @@ -430,6 +450,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41952 goto out_close;
41953 }
41954
41955 +#ifdef CONFIG_PAX_SEGMEXEC
41956 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
41957 + pax_task_size = SEGMEXEC_TASK_SIZE;
41958 +#endif
41959 +
41960 eppnt = elf_phdata;
41961 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
41962 if (eppnt->p_type == PT_LOAD) {
41963 @@ -473,8 +498,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41964 k = load_addr + eppnt->p_vaddr;
41965 if (BAD_ADDR(k) ||
41966 eppnt->p_filesz > eppnt->p_memsz ||
41967 - eppnt->p_memsz > TASK_SIZE ||
41968 - TASK_SIZE - eppnt->p_memsz < k) {
41969 + eppnt->p_memsz > pax_task_size ||
41970 + pax_task_size - eppnt->p_memsz < k) {
41971 error = -ENOMEM;
41972 goto out_close;
41973 }
41974 @@ -528,6 +553,193 @@ out:
41975 return error;
41976 }
41977
41978 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
41979 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
41980 +{
41981 + unsigned long pax_flags = 0UL;
41982 +
41983 +#ifdef CONFIG_PAX_PAGEEXEC
41984 + if (elf_phdata->p_flags & PF_PAGEEXEC)
41985 + pax_flags |= MF_PAX_PAGEEXEC;
41986 +#endif
41987 +
41988 +#ifdef CONFIG_PAX_SEGMEXEC
41989 + if (elf_phdata->p_flags & PF_SEGMEXEC)
41990 + pax_flags |= MF_PAX_SEGMEXEC;
41991 +#endif
41992 +
41993 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41994 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41995 + if ((__supported_pte_mask & _PAGE_NX))
41996 + pax_flags &= ~MF_PAX_SEGMEXEC;
41997 + else
41998 + pax_flags &= ~MF_PAX_PAGEEXEC;
41999 + }
42000 +#endif
42001 +
42002 +#ifdef CONFIG_PAX_EMUTRAMP
42003 + if (elf_phdata->p_flags & PF_EMUTRAMP)
42004 + pax_flags |= MF_PAX_EMUTRAMP;
42005 +#endif
42006 +
42007 +#ifdef CONFIG_PAX_MPROTECT
42008 + if (elf_phdata->p_flags & PF_MPROTECT)
42009 + pax_flags |= MF_PAX_MPROTECT;
42010 +#endif
42011 +
42012 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42013 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
42014 + pax_flags |= MF_PAX_RANDMMAP;
42015 +#endif
42016 +
42017 + return pax_flags;
42018 +}
42019 +#endif
42020 +
42021 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
42022 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
42023 +{
42024 + unsigned long pax_flags = 0UL;
42025 +
42026 +#ifdef CONFIG_PAX_PAGEEXEC
42027 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
42028 + pax_flags |= MF_PAX_PAGEEXEC;
42029 +#endif
42030 +
42031 +#ifdef CONFIG_PAX_SEGMEXEC
42032 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
42033 + pax_flags |= MF_PAX_SEGMEXEC;
42034 +#endif
42035 +
42036 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42037 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42038 + if ((__supported_pte_mask & _PAGE_NX))
42039 + pax_flags &= ~MF_PAX_SEGMEXEC;
42040 + else
42041 + pax_flags &= ~MF_PAX_PAGEEXEC;
42042 + }
42043 +#endif
42044 +
42045 +#ifdef CONFIG_PAX_EMUTRAMP
42046 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
42047 + pax_flags |= MF_PAX_EMUTRAMP;
42048 +#endif
42049 +
42050 +#ifdef CONFIG_PAX_MPROTECT
42051 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
42052 + pax_flags |= MF_PAX_MPROTECT;
42053 +#endif
42054 +
42055 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42056 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
42057 + pax_flags |= MF_PAX_RANDMMAP;
42058 +#endif
42059 +
42060 + return pax_flags;
42061 +}
42062 +#endif
42063 +
42064 +#ifdef CONFIG_PAX_EI_PAX
42065 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
42066 +{
42067 + unsigned long pax_flags = 0UL;
42068 +
42069 +#ifdef CONFIG_PAX_PAGEEXEC
42070 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
42071 + pax_flags |= MF_PAX_PAGEEXEC;
42072 +#endif
42073 +
42074 +#ifdef CONFIG_PAX_SEGMEXEC
42075 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
42076 + pax_flags |= MF_PAX_SEGMEXEC;
42077 +#endif
42078 +
42079 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42080 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42081 + if ((__supported_pte_mask & _PAGE_NX))
42082 + pax_flags &= ~MF_PAX_SEGMEXEC;
42083 + else
42084 + pax_flags &= ~MF_PAX_PAGEEXEC;
42085 + }
42086 +#endif
42087 +
42088 +#ifdef CONFIG_PAX_EMUTRAMP
42089 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
42090 + pax_flags |= MF_PAX_EMUTRAMP;
42091 +#endif
42092 +
42093 +#ifdef CONFIG_PAX_MPROTECT
42094 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
42095 + pax_flags |= MF_PAX_MPROTECT;
42096 +#endif
42097 +
42098 +#ifdef CONFIG_PAX_ASLR
42099 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
42100 + pax_flags |= MF_PAX_RANDMMAP;
42101 +#endif
42102 +
42103 + return pax_flags;
42104 +}
42105 +#endif
42106 +
42107 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
42108 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
42109 +{
42110 + unsigned long pax_flags = 0UL;
42111 +
42112 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
42113 + unsigned long i;
42114 + int found_flags = 0;
42115 +#endif
42116 +
42117 +#ifdef CONFIG_PAX_EI_PAX
42118 + pax_flags = pax_parse_ei_pax(elf_ex);
42119 +#endif
42120 +
42121 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
42122 + for (i = 0UL; i < elf_ex->e_phnum; i++)
42123 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
42124 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
42125 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
42126 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
42127 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
42128 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
42129 + return -EINVAL;
42130 +
42131 +#ifdef CONFIG_PAX_SOFTMODE
42132 + if (pax_softmode)
42133 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
42134 + else
42135 +#endif
42136 +
42137 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
42138 + found_flags = 1;
42139 + break;
42140 + }
42141 +#endif
42142 +
42143 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
42144 + if (found_flags == 0) {
42145 + struct elf_phdr phdr;
42146 + memset(&phdr, 0, sizeof(phdr));
42147 + phdr.p_flags = PF_NOEMUTRAMP;
42148 +#ifdef CONFIG_PAX_SOFTMODE
42149 + if (pax_softmode)
42150 + pax_flags = pax_parse_softmode(&phdr);
42151 + else
42152 +#endif
42153 + pax_flags = pax_parse_hardmode(&phdr);
42154 + }
42155 +#endif
42156 +
42157 + if (0 > pax_check_flags(&pax_flags))
42158 + return -EINVAL;
42159 +
42160 + current->mm->pax_flags = pax_flags;
42161 + return 0;
42162 +}
42163 +#endif
42164 +
42165 /*
42166 * These are the functions used to load ELF style executables and shared
42167 * libraries. There is no binary dependent code anywhere else.
42168 @@ -544,6 +756,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
42169 {
42170 unsigned int random_variable = 0;
42171
42172 +#ifdef CONFIG_PAX_RANDUSTACK
42173 + if (randomize_va_space)
42174 + return stack_top - current->mm->delta_stack;
42175 +#endif
42176 +
42177 if ((current->flags & PF_RANDOMIZE) &&
42178 !(current->personality & ADDR_NO_RANDOMIZE)) {
42179 random_variable = get_random_int() & STACK_RND_MASK;
42180 @@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42181 unsigned long load_addr = 0, load_bias = 0;
42182 int load_addr_set = 0;
42183 char * elf_interpreter = NULL;
42184 - unsigned long error;
42185 + unsigned long error = 0;
42186 struct elf_phdr *elf_ppnt, *elf_phdata;
42187 unsigned long elf_bss, elf_brk;
42188 int retval, i;
42189 @@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42190 unsigned long start_code, end_code, start_data, end_data;
42191 unsigned long reloc_func_desc __maybe_unused = 0;
42192 int executable_stack = EXSTACK_DEFAULT;
42193 - unsigned long def_flags = 0;
42194 struct {
42195 struct elfhdr elf_ex;
42196 struct elfhdr interp_elf_ex;
42197 } *loc;
42198 + unsigned long pax_task_size = TASK_SIZE;
42199
42200 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
42201 if (!loc) {
42202 @@ -713,11 +930,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42203
42204 /* OK, This is the point of no return */
42205 current->flags &= ~PF_FORKNOEXEC;
42206 - current->mm->def_flags = def_flags;
42207 +
42208 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42209 + current->mm->pax_flags = 0UL;
42210 +#endif
42211 +
42212 +#ifdef CONFIG_PAX_DLRESOLVE
42213 + current->mm->call_dl_resolve = 0UL;
42214 +#endif
42215 +
42216 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
42217 + current->mm->call_syscall = 0UL;
42218 +#endif
42219 +
42220 +#ifdef CONFIG_PAX_ASLR
42221 + current->mm->delta_mmap = 0UL;
42222 + current->mm->delta_stack = 0UL;
42223 +#endif
42224 +
42225 + current->mm->def_flags = 0;
42226 +
42227 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
42228 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
42229 + send_sig(SIGKILL, current, 0);
42230 + goto out_free_dentry;
42231 + }
42232 +#endif
42233 +
42234 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
42235 + pax_set_initial_flags(bprm);
42236 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
42237 + if (pax_set_initial_flags_func)
42238 + (pax_set_initial_flags_func)(bprm);
42239 +#endif
42240 +
42241 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42242 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
42243 + current->mm->context.user_cs_limit = PAGE_SIZE;
42244 + current->mm->def_flags |= VM_PAGEEXEC;
42245 + }
42246 +#endif
42247 +
42248 +#ifdef CONFIG_PAX_SEGMEXEC
42249 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
42250 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
42251 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
42252 + pax_task_size = SEGMEXEC_TASK_SIZE;
42253 + current->mm->def_flags |= VM_NOHUGEPAGE;
42254 + }
42255 +#endif
42256 +
42257 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
42258 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42259 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
42260 + put_cpu();
42261 + }
42262 +#endif
42263
42264 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
42265 may depend on the personality. */
42266 SET_PERSONALITY(loc->elf_ex);
42267 +
42268 +#ifdef CONFIG_PAX_ASLR
42269 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42270 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
42271 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
42272 + }
42273 +#endif
42274 +
42275 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42276 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42277 + executable_stack = EXSTACK_DISABLE_X;
42278 + current->personality &= ~READ_IMPLIES_EXEC;
42279 + } else
42280 +#endif
42281 +
42282 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
42283 current->personality |= READ_IMPLIES_EXEC;
42284
42285 @@ -808,6 +1095,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42286 #else
42287 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
42288 #endif
42289 +
42290 +#ifdef CONFIG_PAX_RANDMMAP
42291 + /* PaX: randomize base address at the default exe base if requested */
42292 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
42293 +#ifdef CONFIG_SPARC64
42294 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
42295 +#else
42296 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
42297 +#endif
42298 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
42299 + elf_flags |= MAP_FIXED;
42300 + }
42301 +#endif
42302 +
42303 }
42304
42305 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
42306 @@ -840,9 +1141,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42307 * allowed task size. Note that p_filesz must always be
42308 * <= p_memsz so it is only necessary to check p_memsz.
42309 */
42310 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42311 - elf_ppnt->p_memsz > TASK_SIZE ||
42312 - TASK_SIZE - elf_ppnt->p_memsz < k) {
42313 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42314 + elf_ppnt->p_memsz > pax_task_size ||
42315 + pax_task_size - elf_ppnt->p_memsz < k) {
42316 /* set_brk can never work. Avoid overflows. */
42317 send_sig(SIGKILL, current, 0);
42318 retval = -EINVAL;
42319 @@ -870,6 +1171,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42320 start_data += load_bias;
42321 end_data += load_bias;
42322
42323 +#ifdef CONFIG_PAX_RANDMMAP
42324 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
42325 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
42326 +#endif
42327 +
42328 /* Calling set_brk effectively mmaps the pages that we need
42329 * for the bss and break sections. We must do this before
42330 * mapping in the interpreter, to make sure it doesn't wind
42331 @@ -881,9 +1187,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42332 goto out_free_dentry;
42333 }
42334 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
42335 - send_sig(SIGSEGV, current, 0);
42336 - retval = -EFAULT; /* Nobody gets to see this, but.. */
42337 - goto out_free_dentry;
42338 + /*
42339 + * This bss-zeroing can fail if the ELF
42340 + * file specifies odd protections. So
42341 + * we don't check the return value
42342 + */
42343 }
42344
42345 if (elf_interpreter) {
42346 @@ -1098,7 +1406,7 @@ out:
42347 * Decide what to dump of a segment, part, all or none.
42348 */
42349 static unsigned long vma_dump_size(struct vm_area_struct *vma,
42350 - unsigned long mm_flags)
42351 + unsigned long mm_flags, long signr)
42352 {
42353 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
42354
42355 @@ -1132,7 +1440,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
42356 if (vma->vm_file == NULL)
42357 return 0;
42358
42359 - if (FILTER(MAPPED_PRIVATE))
42360 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
42361 goto whole;
42362
42363 /*
42364 @@ -1354,9 +1662,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
42365 {
42366 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
42367 int i = 0;
42368 - do
42369 + do {
42370 i += 2;
42371 - while (auxv[i - 2] != AT_NULL);
42372 + } while (auxv[i - 2] != AT_NULL);
42373 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
42374 }
42375
42376 @@ -1862,14 +2170,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
42377 }
42378
42379 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
42380 - unsigned long mm_flags)
42381 + struct coredump_params *cprm)
42382 {
42383 struct vm_area_struct *vma;
42384 size_t size = 0;
42385
42386 for (vma = first_vma(current, gate_vma); vma != NULL;
42387 vma = next_vma(vma, gate_vma))
42388 - size += vma_dump_size(vma, mm_flags);
42389 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42390 return size;
42391 }
42392
42393 @@ -1963,7 +2271,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42394
42395 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
42396
42397 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
42398 + offset += elf_core_vma_data_size(gate_vma, cprm);
42399 offset += elf_core_extra_data_size();
42400 e_shoff = offset;
42401
42402 @@ -1977,10 +2285,12 @@ static int elf_core_dump(struct coredump_params *cprm)
42403 offset = dataoff;
42404
42405 size += sizeof(*elf);
42406 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42407 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
42408 goto end_coredump;
42409
42410 size += sizeof(*phdr4note);
42411 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42412 if (size > cprm->limit
42413 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
42414 goto end_coredump;
42415 @@ -1994,7 +2304,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42416 phdr.p_offset = offset;
42417 phdr.p_vaddr = vma->vm_start;
42418 phdr.p_paddr = 0;
42419 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
42420 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42421 phdr.p_memsz = vma->vm_end - vma->vm_start;
42422 offset += phdr.p_filesz;
42423 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
42424 @@ -2005,6 +2315,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42425 phdr.p_align = ELF_EXEC_PAGESIZE;
42426
42427 size += sizeof(phdr);
42428 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42429 if (size > cprm->limit
42430 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
42431 goto end_coredump;
42432 @@ -2029,7 +2340,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42433 unsigned long addr;
42434 unsigned long end;
42435
42436 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
42437 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42438
42439 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
42440 struct page *page;
42441 @@ -2038,6 +2349,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42442 page = get_dump_page(addr);
42443 if (page) {
42444 void *kaddr = kmap(page);
42445 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
42446 stop = ((size += PAGE_SIZE) > cprm->limit) ||
42447 !dump_write(cprm->file, kaddr,
42448 PAGE_SIZE);
42449 @@ -2055,6 +2367,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42450
42451 if (e_phnum == PN_XNUM) {
42452 size += sizeof(*shdr4extnum);
42453 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42454 if (size > cprm->limit
42455 || !dump_write(cprm->file, shdr4extnum,
42456 sizeof(*shdr4extnum)))
42457 @@ -2075,6 +2388,97 @@ out:
42458
42459 #endif /* CONFIG_ELF_CORE */
42460
42461 +#ifdef CONFIG_PAX_MPROTECT
42462 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
42463 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
42464 + * we'll remove VM_MAYWRITE for good on RELRO segments.
42465 + *
42466 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
42467 + * basis because we want to allow the common case and not the special ones.
42468 + */
42469 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
42470 +{
42471 + struct elfhdr elf_h;
42472 + struct elf_phdr elf_p;
42473 + unsigned long i;
42474 + unsigned long oldflags;
42475 + bool is_textrel_rw, is_textrel_rx, is_relro;
42476 +
42477 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
42478 + return;
42479 +
42480 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
42481 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
42482 +
42483 +#ifdef CONFIG_PAX_ELFRELOCS
42484 + /* possible TEXTREL */
42485 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
42486 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
42487 +#else
42488 + is_textrel_rw = false;
42489 + is_textrel_rx = false;
42490 +#endif
42491 +
42492 + /* possible RELRO */
42493 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
42494 +
42495 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
42496 + return;
42497 +
42498 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
42499 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
42500 +
42501 +#ifdef CONFIG_PAX_ETEXECRELOCS
42502 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42503 +#else
42504 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
42505 +#endif
42506 +
42507 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42508 + !elf_check_arch(&elf_h) ||
42509 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
42510 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
42511 + return;
42512 +
42513 + for (i = 0UL; i < elf_h.e_phnum; i++) {
42514 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
42515 + return;
42516 + switch (elf_p.p_type) {
42517 + case PT_DYNAMIC:
42518 + if (!is_textrel_rw && !is_textrel_rx)
42519 + continue;
42520 + i = 0UL;
42521 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
42522 + elf_dyn dyn;
42523 +
42524 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
42525 + return;
42526 + if (dyn.d_tag == DT_NULL)
42527 + return;
42528 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
42529 + gr_log_textrel(vma);
42530 + if (is_textrel_rw)
42531 + vma->vm_flags |= VM_MAYWRITE;
42532 + else
42533 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
42534 + vma->vm_flags &= ~VM_MAYWRITE;
42535 + return;
42536 + }
42537 + i++;
42538 + }
42539 + return;
42540 +
42541 + case PT_GNU_RELRO:
42542 + if (!is_relro)
42543 + continue;
42544 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
42545 + vma->vm_flags &= ~VM_MAYWRITE;
42546 + return;
42547 + }
42548 + }
42549 +}
42550 +#endif
42551 +
42552 static int __init init_elf_binfmt(void)
42553 {
42554 return register_binfmt(&elf_format);
42555 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
42556 index 1bffbe0..c8c283e 100644
42557 --- a/fs/binfmt_flat.c
42558 +++ b/fs/binfmt_flat.c
42559 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
42560 realdatastart = (unsigned long) -ENOMEM;
42561 printk("Unable to allocate RAM for process data, errno %d\n",
42562 (int)-realdatastart);
42563 + down_write(&current->mm->mmap_sem);
42564 do_munmap(current->mm, textpos, text_len);
42565 + up_write(&current->mm->mmap_sem);
42566 ret = realdatastart;
42567 goto err;
42568 }
42569 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42570 }
42571 if (IS_ERR_VALUE(result)) {
42572 printk("Unable to read data+bss, errno %d\n", (int)-result);
42573 + down_write(&current->mm->mmap_sem);
42574 do_munmap(current->mm, textpos, text_len);
42575 do_munmap(current->mm, realdatastart, len);
42576 + up_write(&current->mm->mmap_sem);
42577 ret = result;
42578 goto err;
42579 }
42580 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42581 }
42582 if (IS_ERR_VALUE(result)) {
42583 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
42584 + down_write(&current->mm->mmap_sem);
42585 do_munmap(current->mm, textpos, text_len + data_len + extra +
42586 MAX_SHARED_LIBS * sizeof(unsigned long));
42587 + up_write(&current->mm->mmap_sem);
42588 ret = result;
42589 goto err;
42590 }
42591 diff --git a/fs/bio.c b/fs/bio.c
42592 index 9bfade8..782f3b9 100644
42593 --- a/fs/bio.c
42594 +++ b/fs/bio.c
42595 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
42596 const int read = bio_data_dir(bio) == READ;
42597 struct bio_map_data *bmd = bio->bi_private;
42598 int i;
42599 - char *p = bmd->sgvecs[0].iov_base;
42600 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
42601
42602 __bio_for_each_segment(bvec, bio, i, 0) {
42603 char *addr = page_address(bvec->bv_page);
42604 diff --git a/fs/block_dev.c b/fs/block_dev.c
42605 index 1c44b8d..e2507b4 100644
42606 --- a/fs/block_dev.c
42607 +++ b/fs/block_dev.c
42608 @@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
42609 else if (bdev->bd_contains == bdev)
42610 return true; /* is a whole device which isn't held */
42611
42612 - else if (whole->bd_holder == bd_may_claim)
42613 + else if (whole->bd_holder == (void *)bd_may_claim)
42614 return true; /* is a partition of a device that is being partitioned */
42615 else if (whole->bd_holder != NULL)
42616 return false; /* is a partition of a held device */
42617 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
42618 index 011cab3..9ace713 100644
42619 --- a/fs/btrfs/ctree.c
42620 +++ b/fs/btrfs/ctree.c
42621 @@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
42622 free_extent_buffer(buf);
42623 add_root_to_dirty_list(root);
42624 } else {
42625 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
42626 - parent_start = parent->start;
42627 - else
42628 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
42629 + if (parent)
42630 + parent_start = parent->start;
42631 + else
42632 + parent_start = 0;
42633 + } else
42634 parent_start = 0;
42635
42636 WARN_ON(trans->transid != btrfs_header_generation(parent));
42637 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
42638 index b2d004a..6bb543d 100644
42639 --- a/fs/btrfs/inode.c
42640 +++ b/fs/btrfs/inode.c
42641 @@ -6922,7 +6922,7 @@ fail:
42642 return -ENOMEM;
42643 }
42644
42645 -static int btrfs_getattr(struct vfsmount *mnt,
42646 +int btrfs_getattr(struct vfsmount *mnt,
42647 struct dentry *dentry, struct kstat *stat)
42648 {
42649 struct inode *inode = dentry->d_inode;
42650 @@ -6934,6 +6934,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
42651 return 0;
42652 }
42653
42654 +EXPORT_SYMBOL(btrfs_getattr);
42655 +
42656 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
42657 +{
42658 + return BTRFS_I(inode)->root->anon_dev;
42659 +}
42660 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
42661 +
42662 /*
42663 * If a file is moved, it will inherit the cow and compression flags of the new
42664 * directory.
42665 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
42666 index dae5dfe..6aa01b1 100644
42667 --- a/fs/btrfs/ioctl.c
42668 +++ b/fs/btrfs/ioctl.c
42669 @@ -2704,9 +2704,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42670 for (i = 0; i < num_types; i++) {
42671 struct btrfs_space_info *tmp;
42672
42673 + /* Don't copy in more than we allocated */
42674 if (!slot_count)
42675 break;
42676
42677 + slot_count--;
42678 +
42679 info = NULL;
42680 rcu_read_lock();
42681 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
42682 @@ -2728,15 +2731,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42683 memcpy(dest, &space, sizeof(space));
42684 dest++;
42685 space_args.total_spaces++;
42686 - slot_count--;
42687 }
42688 - if (!slot_count)
42689 - break;
42690 }
42691 up_read(&info->groups_sem);
42692 }
42693
42694 - user_dest = (struct btrfs_ioctl_space_info *)
42695 + user_dest = (struct btrfs_ioctl_space_info __user *)
42696 (arg + sizeof(struct btrfs_ioctl_space_args));
42697
42698 if (copy_to_user(user_dest, dest_orig, alloc_size))
42699 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
42700 index 59bb176..be9977d 100644
42701 --- a/fs/btrfs/relocation.c
42702 +++ b/fs/btrfs/relocation.c
42703 @@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
42704 }
42705 spin_unlock(&rc->reloc_root_tree.lock);
42706
42707 - BUG_ON((struct btrfs_root *)node->data != root);
42708 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
42709
42710 if (!del) {
42711 spin_lock(&rc->reloc_root_tree.lock);
42712 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
42713 index 622f469..e8d2d55 100644
42714 --- a/fs/cachefiles/bind.c
42715 +++ b/fs/cachefiles/bind.c
42716 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
42717 args);
42718
42719 /* start by checking things over */
42720 - ASSERT(cache->fstop_percent >= 0 &&
42721 - cache->fstop_percent < cache->fcull_percent &&
42722 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
42723 cache->fcull_percent < cache->frun_percent &&
42724 cache->frun_percent < 100);
42725
42726 - ASSERT(cache->bstop_percent >= 0 &&
42727 - cache->bstop_percent < cache->bcull_percent &&
42728 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
42729 cache->bcull_percent < cache->brun_percent &&
42730 cache->brun_percent < 100);
42731
42732 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
42733 index 0a1467b..6a53245 100644
42734 --- a/fs/cachefiles/daemon.c
42735 +++ b/fs/cachefiles/daemon.c
42736 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
42737 if (n > buflen)
42738 return -EMSGSIZE;
42739
42740 - if (copy_to_user(_buffer, buffer, n) != 0)
42741 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
42742 return -EFAULT;
42743
42744 return n;
42745 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
42746 if (test_bit(CACHEFILES_DEAD, &cache->flags))
42747 return -EIO;
42748
42749 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
42750 + if (datalen > PAGE_SIZE - 1)
42751 return -EOPNOTSUPP;
42752
42753 /* drag the command string into the kernel so we can parse it */
42754 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
42755 if (args[0] != '%' || args[1] != '\0')
42756 return -EINVAL;
42757
42758 - if (fstop < 0 || fstop >= cache->fcull_percent)
42759 + if (fstop >= cache->fcull_percent)
42760 return cachefiles_daemon_range_error(cache, args);
42761
42762 cache->fstop_percent = fstop;
42763 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
42764 if (args[0] != '%' || args[1] != '\0')
42765 return -EINVAL;
42766
42767 - if (bstop < 0 || bstop >= cache->bcull_percent)
42768 + if (bstop >= cache->bcull_percent)
42769 return cachefiles_daemon_range_error(cache, args);
42770
42771 cache->bstop_percent = bstop;
42772 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
42773 index bd6bc1b..b627b53 100644
42774 --- a/fs/cachefiles/internal.h
42775 +++ b/fs/cachefiles/internal.h
42776 @@ -57,7 +57,7 @@ struct cachefiles_cache {
42777 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
42778 struct rb_root active_nodes; /* active nodes (can't be culled) */
42779 rwlock_t active_lock; /* lock for active_nodes */
42780 - atomic_t gravecounter; /* graveyard uniquifier */
42781 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
42782 unsigned frun_percent; /* when to stop culling (% files) */
42783 unsigned fcull_percent; /* when to start culling (% files) */
42784 unsigned fstop_percent; /* when to stop allocating (% files) */
42785 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
42786 * proc.c
42787 */
42788 #ifdef CONFIG_CACHEFILES_HISTOGRAM
42789 -extern atomic_t cachefiles_lookup_histogram[HZ];
42790 -extern atomic_t cachefiles_mkdir_histogram[HZ];
42791 -extern atomic_t cachefiles_create_histogram[HZ];
42792 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42793 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42794 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
42795
42796 extern int __init cachefiles_proc_init(void);
42797 extern void cachefiles_proc_cleanup(void);
42798 static inline
42799 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
42800 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
42801 {
42802 unsigned long jif = jiffies - start_jif;
42803 if (jif >= HZ)
42804 jif = HZ - 1;
42805 - atomic_inc(&histogram[jif]);
42806 + atomic_inc_unchecked(&histogram[jif]);
42807 }
42808
42809 #else
42810 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
42811 index a0358c2..d6137f2 100644
42812 --- a/fs/cachefiles/namei.c
42813 +++ b/fs/cachefiles/namei.c
42814 @@ -318,7 +318,7 @@ try_again:
42815 /* first step is to make up a grave dentry in the graveyard */
42816 sprintf(nbuffer, "%08x%08x",
42817 (uint32_t) get_seconds(),
42818 - (uint32_t) atomic_inc_return(&cache->gravecounter));
42819 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
42820
42821 /* do the multiway lock magic */
42822 trap = lock_rename(cache->graveyard, dir);
42823 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
42824 index eccd339..4c1d995 100644
42825 --- a/fs/cachefiles/proc.c
42826 +++ b/fs/cachefiles/proc.c
42827 @@ -14,9 +14,9 @@
42828 #include <linux/seq_file.h>
42829 #include "internal.h"
42830
42831 -atomic_t cachefiles_lookup_histogram[HZ];
42832 -atomic_t cachefiles_mkdir_histogram[HZ];
42833 -atomic_t cachefiles_create_histogram[HZ];
42834 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42835 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42836 +atomic_unchecked_t cachefiles_create_histogram[HZ];
42837
42838 /*
42839 * display the latency histogram
42840 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
42841 return 0;
42842 default:
42843 index = (unsigned long) v - 3;
42844 - x = atomic_read(&cachefiles_lookup_histogram[index]);
42845 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
42846 - z = atomic_read(&cachefiles_create_histogram[index]);
42847 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
42848 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
42849 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
42850 if (x == 0 && y == 0 && z == 0)
42851 return 0;
42852
42853 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
42854 index 0e3c092..818480e 100644
42855 --- a/fs/cachefiles/rdwr.c
42856 +++ b/fs/cachefiles/rdwr.c
42857 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
42858 old_fs = get_fs();
42859 set_fs(KERNEL_DS);
42860 ret = file->f_op->write(
42861 - file, (const void __user *) data, len, &pos);
42862 + file, (const void __force_user *) data, len, &pos);
42863 set_fs(old_fs);
42864 kunmap(page);
42865 if (ret != len)
42866 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
42867 index 382abc9..bd89646 100644
42868 --- a/fs/ceph/dir.c
42869 +++ b/fs/ceph/dir.c
42870 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
42871 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
42872 struct ceph_mds_client *mdsc = fsc->mdsc;
42873 unsigned frag = fpos_frag(filp->f_pos);
42874 - int off = fpos_off(filp->f_pos);
42875 + unsigned int off = fpos_off(filp->f_pos);
42876 int err;
42877 u32 ftype;
42878 struct ceph_mds_reply_info_parsed *rinfo;
42879 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
42880 index 6d40656..bc1f825 100644
42881 --- a/fs/cifs/cifs_debug.c
42882 +++ b/fs/cifs/cifs_debug.c
42883 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42884
42885 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
42886 #ifdef CONFIG_CIFS_STATS2
42887 - atomic_set(&totBufAllocCount, 0);
42888 - atomic_set(&totSmBufAllocCount, 0);
42889 + atomic_set_unchecked(&totBufAllocCount, 0);
42890 + atomic_set_unchecked(&totSmBufAllocCount, 0);
42891 #endif /* CONFIG_CIFS_STATS2 */
42892 spin_lock(&cifs_tcp_ses_lock);
42893 list_for_each(tmp1, &cifs_tcp_ses_list) {
42894 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42895 tcon = list_entry(tmp3,
42896 struct cifs_tcon,
42897 tcon_list);
42898 - atomic_set(&tcon->num_smbs_sent, 0);
42899 - atomic_set(&tcon->num_writes, 0);
42900 - atomic_set(&tcon->num_reads, 0);
42901 - atomic_set(&tcon->num_oplock_brks, 0);
42902 - atomic_set(&tcon->num_opens, 0);
42903 - atomic_set(&tcon->num_posixopens, 0);
42904 - atomic_set(&tcon->num_posixmkdirs, 0);
42905 - atomic_set(&tcon->num_closes, 0);
42906 - atomic_set(&tcon->num_deletes, 0);
42907 - atomic_set(&tcon->num_mkdirs, 0);
42908 - atomic_set(&tcon->num_rmdirs, 0);
42909 - atomic_set(&tcon->num_renames, 0);
42910 - atomic_set(&tcon->num_t2renames, 0);
42911 - atomic_set(&tcon->num_ffirst, 0);
42912 - atomic_set(&tcon->num_fnext, 0);
42913 - atomic_set(&tcon->num_fclose, 0);
42914 - atomic_set(&tcon->num_hardlinks, 0);
42915 - atomic_set(&tcon->num_symlinks, 0);
42916 - atomic_set(&tcon->num_locks, 0);
42917 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
42918 + atomic_set_unchecked(&tcon->num_writes, 0);
42919 + atomic_set_unchecked(&tcon->num_reads, 0);
42920 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
42921 + atomic_set_unchecked(&tcon->num_opens, 0);
42922 + atomic_set_unchecked(&tcon->num_posixopens, 0);
42923 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
42924 + atomic_set_unchecked(&tcon->num_closes, 0);
42925 + atomic_set_unchecked(&tcon->num_deletes, 0);
42926 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
42927 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
42928 + atomic_set_unchecked(&tcon->num_renames, 0);
42929 + atomic_set_unchecked(&tcon->num_t2renames, 0);
42930 + atomic_set_unchecked(&tcon->num_ffirst, 0);
42931 + atomic_set_unchecked(&tcon->num_fnext, 0);
42932 + atomic_set_unchecked(&tcon->num_fclose, 0);
42933 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
42934 + atomic_set_unchecked(&tcon->num_symlinks, 0);
42935 + atomic_set_unchecked(&tcon->num_locks, 0);
42936 }
42937 }
42938 }
42939 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42940 smBufAllocCount.counter, cifs_min_small);
42941 #ifdef CONFIG_CIFS_STATS2
42942 seq_printf(m, "Total Large %d Small %d Allocations\n",
42943 - atomic_read(&totBufAllocCount),
42944 - atomic_read(&totSmBufAllocCount));
42945 + atomic_read_unchecked(&totBufAllocCount),
42946 + atomic_read_unchecked(&totSmBufAllocCount));
42947 #endif /* CONFIG_CIFS_STATS2 */
42948
42949 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
42950 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42951 if (tcon->need_reconnect)
42952 seq_puts(m, "\tDISCONNECTED ");
42953 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
42954 - atomic_read(&tcon->num_smbs_sent),
42955 - atomic_read(&tcon->num_oplock_brks));
42956 + atomic_read_unchecked(&tcon->num_smbs_sent),
42957 + atomic_read_unchecked(&tcon->num_oplock_brks));
42958 seq_printf(m, "\nReads: %d Bytes: %lld",
42959 - atomic_read(&tcon->num_reads),
42960 + atomic_read_unchecked(&tcon->num_reads),
42961 (long long)(tcon->bytes_read));
42962 seq_printf(m, "\nWrites: %d Bytes: %lld",
42963 - atomic_read(&tcon->num_writes),
42964 + atomic_read_unchecked(&tcon->num_writes),
42965 (long long)(tcon->bytes_written));
42966 seq_printf(m, "\nFlushes: %d",
42967 - atomic_read(&tcon->num_flushes));
42968 + atomic_read_unchecked(&tcon->num_flushes));
42969 seq_printf(m, "\nLocks: %d HardLinks: %d "
42970 "Symlinks: %d",
42971 - atomic_read(&tcon->num_locks),
42972 - atomic_read(&tcon->num_hardlinks),
42973 - atomic_read(&tcon->num_symlinks));
42974 + atomic_read_unchecked(&tcon->num_locks),
42975 + atomic_read_unchecked(&tcon->num_hardlinks),
42976 + atomic_read_unchecked(&tcon->num_symlinks));
42977 seq_printf(m, "\nOpens: %d Closes: %d "
42978 "Deletes: %d",
42979 - atomic_read(&tcon->num_opens),
42980 - atomic_read(&tcon->num_closes),
42981 - atomic_read(&tcon->num_deletes));
42982 + atomic_read_unchecked(&tcon->num_opens),
42983 + atomic_read_unchecked(&tcon->num_closes),
42984 + atomic_read_unchecked(&tcon->num_deletes));
42985 seq_printf(m, "\nPosix Opens: %d "
42986 "Posix Mkdirs: %d",
42987 - atomic_read(&tcon->num_posixopens),
42988 - atomic_read(&tcon->num_posixmkdirs));
42989 + atomic_read_unchecked(&tcon->num_posixopens),
42990 + atomic_read_unchecked(&tcon->num_posixmkdirs));
42991 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
42992 - atomic_read(&tcon->num_mkdirs),
42993 - atomic_read(&tcon->num_rmdirs));
42994 + atomic_read_unchecked(&tcon->num_mkdirs),
42995 + atomic_read_unchecked(&tcon->num_rmdirs));
42996 seq_printf(m, "\nRenames: %d T2 Renames %d",
42997 - atomic_read(&tcon->num_renames),
42998 - atomic_read(&tcon->num_t2renames));
42999 + atomic_read_unchecked(&tcon->num_renames),
43000 + atomic_read_unchecked(&tcon->num_t2renames));
43001 seq_printf(m, "\nFindFirst: %d FNext %d "
43002 "FClose %d",
43003 - atomic_read(&tcon->num_ffirst),
43004 - atomic_read(&tcon->num_fnext),
43005 - atomic_read(&tcon->num_fclose));
43006 + atomic_read_unchecked(&tcon->num_ffirst),
43007 + atomic_read_unchecked(&tcon->num_fnext),
43008 + atomic_read_unchecked(&tcon->num_fclose));
43009 }
43010 }
43011 }
43012 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
43013 index 54b8f1e..f6a4c00 100644
43014 --- a/fs/cifs/cifsfs.c
43015 +++ b/fs/cifs/cifsfs.c
43016 @@ -981,7 +981,7 @@ cifs_init_request_bufs(void)
43017 cifs_req_cachep = kmem_cache_create("cifs_request",
43018 CIFSMaxBufSize +
43019 MAX_CIFS_HDR_SIZE, 0,
43020 - SLAB_HWCACHE_ALIGN, NULL);
43021 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
43022 if (cifs_req_cachep == NULL)
43023 return -ENOMEM;
43024
43025 @@ -1008,7 +1008,7 @@ cifs_init_request_bufs(void)
43026 efficient to alloc 1 per page off the slab compared to 17K (5page)
43027 alloc of large cifs buffers even when page debugging is on */
43028 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
43029 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
43030 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
43031 NULL);
43032 if (cifs_sm_req_cachep == NULL) {
43033 mempool_destroy(cifs_req_poolp);
43034 @@ -1093,8 +1093,8 @@ init_cifs(void)
43035 atomic_set(&bufAllocCount, 0);
43036 atomic_set(&smBufAllocCount, 0);
43037 #ifdef CONFIG_CIFS_STATS2
43038 - atomic_set(&totBufAllocCount, 0);
43039 - atomic_set(&totSmBufAllocCount, 0);
43040 + atomic_set_unchecked(&totBufAllocCount, 0);
43041 + atomic_set_unchecked(&totSmBufAllocCount, 0);
43042 #endif /* CONFIG_CIFS_STATS2 */
43043
43044 atomic_set(&midCount, 0);
43045 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
43046 index 95dad9d..fe7af1a 100644
43047 --- a/fs/cifs/cifsglob.h
43048 +++ b/fs/cifs/cifsglob.h
43049 @@ -381,28 +381,28 @@ struct cifs_tcon {
43050 __u16 Flags; /* optional support bits */
43051 enum statusEnum tidStatus;
43052 #ifdef CONFIG_CIFS_STATS
43053 - atomic_t num_smbs_sent;
43054 - atomic_t num_writes;
43055 - atomic_t num_reads;
43056 - atomic_t num_flushes;
43057 - atomic_t num_oplock_brks;
43058 - atomic_t num_opens;
43059 - atomic_t num_closes;
43060 - atomic_t num_deletes;
43061 - atomic_t num_mkdirs;
43062 - atomic_t num_posixopens;
43063 - atomic_t num_posixmkdirs;
43064 - atomic_t num_rmdirs;
43065 - atomic_t num_renames;
43066 - atomic_t num_t2renames;
43067 - atomic_t num_ffirst;
43068 - atomic_t num_fnext;
43069 - atomic_t num_fclose;
43070 - atomic_t num_hardlinks;
43071 - atomic_t num_symlinks;
43072 - atomic_t num_locks;
43073 - atomic_t num_acl_get;
43074 - atomic_t num_acl_set;
43075 + atomic_unchecked_t num_smbs_sent;
43076 + atomic_unchecked_t num_writes;
43077 + atomic_unchecked_t num_reads;
43078 + atomic_unchecked_t num_flushes;
43079 + atomic_unchecked_t num_oplock_brks;
43080 + atomic_unchecked_t num_opens;
43081 + atomic_unchecked_t num_closes;
43082 + atomic_unchecked_t num_deletes;
43083 + atomic_unchecked_t num_mkdirs;
43084 + atomic_unchecked_t num_posixopens;
43085 + atomic_unchecked_t num_posixmkdirs;
43086 + atomic_unchecked_t num_rmdirs;
43087 + atomic_unchecked_t num_renames;
43088 + atomic_unchecked_t num_t2renames;
43089 + atomic_unchecked_t num_ffirst;
43090 + atomic_unchecked_t num_fnext;
43091 + atomic_unchecked_t num_fclose;
43092 + atomic_unchecked_t num_hardlinks;
43093 + atomic_unchecked_t num_symlinks;
43094 + atomic_unchecked_t num_locks;
43095 + atomic_unchecked_t num_acl_get;
43096 + atomic_unchecked_t num_acl_set;
43097 #ifdef CONFIG_CIFS_STATS2
43098 unsigned long long time_writes;
43099 unsigned long long time_reads;
43100 @@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim)
43101 }
43102
43103 #ifdef CONFIG_CIFS_STATS
43104 -#define cifs_stats_inc atomic_inc
43105 +#define cifs_stats_inc atomic_inc_unchecked
43106
43107 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
43108 unsigned int bytes)
43109 @@ -953,8 +953,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
43110 /* Various Debug counters */
43111 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
43112 #ifdef CONFIG_CIFS_STATS2
43113 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
43114 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
43115 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
43116 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
43117 #endif
43118 GLOBAL_EXTERN atomic_t smBufAllocCount;
43119 GLOBAL_EXTERN atomic_t midCount;
43120 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
43121 index db3f18c..1f5955e 100644
43122 --- a/fs/cifs/link.c
43123 +++ b/fs/cifs/link.c
43124 @@ -593,7 +593,7 @@ symlink_exit:
43125
43126 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
43127 {
43128 - char *p = nd_get_link(nd);
43129 + const char *p = nd_get_link(nd);
43130 if (!IS_ERR(p))
43131 kfree(p);
43132 }
43133 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
43134 index 7c16933..c8212b5 100644
43135 --- a/fs/cifs/misc.c
43136 +++ b/fs/cifs/misc.c
43137 @@ -156,7 +156,7 @@ cifs_buf_get(void)
43138 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
43139 atomic_inc(&bufAllocCount);
43140 #ifdef CONFIG_CIFS_STATS2
43141 - atomic_inc(&totBufAllocCount);
43142 + atomic_inc_unchecked(&totBufAllocCount);
43143 #endif /* CONFIG_CIFS_STATS2 */
43144 }
43145
43146 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
43147 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
43148 atomic_inc(&smBufAllocCount);
43149 #ifdef CONFIG_CIFS_STATS2
43150 - atomic_inc(&totSmBufAllocCount);
43151 + atomic_inc_unchecked(&totSmBufAllocCount);
43152 #endif /* CONFIG_CIFS_STATS2 */
43153
43154 }
43155 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
43156 index 6901578..d402eb5 100644
43157 --- a/fs/coda/cache.c
43158 +++ b/fs/coda/cache.c
43159 @@ -24,7 +24,7 @@
43160 #include "coda_linux.h"
43161 #include "coda_cache.h"
43162
43163 -static atomic_t permission_epoch = ATOMIC_INIT(0);
43164 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
43165
43166 /* replace or extend an acl cache hit */
43167 void coda_cache_enter(struct inode *inode, int mask)
43168 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
43169 struct coda_inode_info *cii = ITOC(inode);
43170
43171 spin_lock(&cii->c_lock);
43172 - cii->c_cached_epoch = atomic_read(&permission_epoch);
43173 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
43174 if (cii->c_uid != current_fsuid()) {
43175 cii->c_uid = current_fsuid();
43176 cii->c_cached_perm = mask;
43177 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
43178 {
43179 struct coda_inode_info *cii = ITOC(inode);
43180 spin_lock(&cii->c_lock);
43181 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
43182 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
43183 spin_unlock(&cii->c_lock);
43184 }
43185
43186 /* remove all acl caches */
43187 void coda_cache_clear_all(struct super_block *sb)
43188 {
43189 - atomic_inc(&permission_epoch);
43190 + atomic_inc_unchecked(&permission_epoch);
43191 }
43192
43193
43194 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
43195 spin_lock(&cii->c_lock);
43196 hit = (mask & cii->c_cached_perm) == mask &&
43197 cii->c_uid == current_fsuid() &&
43198 - cii->c_cached_epoch == atomic_read(&permission_epoch);
43199 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
43200 spin_unlock(&cii->c_lock);
43201
43202 return hit;
43203 diff --git a/fs/compat.c b/fs/compat.c
43204 index 58b1da4..afcd9b8 100644
43205 --- a/fs/compat.c
43206 +++ b/fs/compat.c
43207 @@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
43208 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
43209 {
43210 compat_ino_t ino = stat->ino;
43211 - typeof(ubuf->st_uid) uid = 0;
43212 - typeof(ubuf->st_gid) gid = 0;
43213 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
43214 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
43215 int err;
43216
43217 SET_UID(uid, stat->uid);
43218 @@ -508,7 +508,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
43219
43220 set_fs(KERNEL_DS);
43221 /* The __user pointer cast is valid because of the set_fs() */
43222 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
43223 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
43224 set_fs(oldfs);
43225 /* truncating is ok because it's a user address */
43226 if (!ret)
43227 @@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
43228 goto out;
43229
43230 ret = -EINVAL;
43231 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
43232 + if (nr_segs > UIO_MAXIOV)
43233 goto out;
43234 if (nr_segs > fast_segs) {
43235 ret = -ENOMEM;
43236 @@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
43237
43238 struct compat_readdir_callback {
43239 struct compat_old_linux_dirent __user *dirent;
43240 + struct file * file;
43241 int result;
43242 };
43243
43244 @@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
43245 buf->result = -EOVERFLOW;
43246 return -EOVERFLOW;
43247 }
43248 +
43249 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43250 + return 0;
43251 +
43252 buf->result++;
43253 dirent = buf->dirent;
43254 if (!access_ok(VERIFY_WRITE, dirent,
43255 @@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
43256
43257 buf.result = 0;
43258 buf.dirent = dirent;
43259 + buf.file = file;
43260
43261 error = vfs_readdir(file, compat_fillonedir, &buf);
43262 if (buf.result)
43263 @@ -917,6 +923,7 @@ struct compat_linux_dirent {
43264 struct compat_getdents_callback {
43265 struct compat_linux_dirent __user *current_dir;
43266 struct compat_linux_dirent __user *previous;
43267 + struct file * file;
43268 int count;
43269 int error;
43270 };
43271 @@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
43272 buf->error = -EOVERFLOW;
43273 return -EOVERFLOW;
43274 }
43275 +
43276 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43277 + return 0;
43278 +
43279 dirent = buf->previous;
43280 if (dirent) {
43281 if (__put_user(offset, &dirent->d_off))
43282 @@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
43283 buf.previous = NULL;
43284 buf.count = count;
43285 buf.error = 0;
43286 + buf.file = file;
43287
43288 error = vfs_readdir(file, compat_filldir, &buf);
43289 if (error >= 0)
43290 @@ -1006,6 +1018,7 @@ out:
43291 struct compat_getdents_callback64 {
43292 struct linux_dirent64 __user *current_dir;
43293 struct linux_dirent64 __user *previous;
43294 + struct file * file;
43295 int count;
43296 int error;
43297 };
43298 @@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
43299 buf->error = -EINVAL; /* only used if we fail.. */
43300 if (reclen > buf->count)
43301 return -EINVAL;
43302 +
43303 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43304 + return 0;
43305 +
43306 dirent = buf->previous;
43307
43308 if (dirent) {
43309 @@ -1073,13 +1090,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
43310 buf.previous = NULL;
43311 buf.count = count;
43312 buf.error = 0;
43313 + buf.file = file;
43314
43315 error = vfs_readdir(file, compat_filldir64, &buf);
43316 if (error >= 0)
43317 error = buf.error;
43318 lastdirent = buf.previous;
43319 if (lastdirent) {
43320 - typeof(lastdirent->d_off) d_off = file->f_pos;
43321 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
43322 if (__put_user_unaligned(d_off, &lastdirent->d_off))
43323 error = -EFAULT;
43324 else
43325 @@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
43326 struct fdtable *fdt;
43327 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
43328
43329 + pax_track_stack();
43330 +
43331 if (n < 0)
43332 goto out_nofds;
43333
43334 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
43335 index 112e45a..b59845b 100644
43336 --- a/fs/compat_binfmt_elf.c
43337 +++ b/fs/compat_binfmt_elf.c
43338 @@ -30,11 +30,13 @@
43339 #undef elf_phdr
43340 #undef elf_shdr
43341 #undef elf_note
43342 +#undef elf_dyn
43343 #undef elf_addr_t
43344 #define elfhdr elf32_hdr
43345 #define elf_phdr elf32_phdr
43346 #define elf_shdr elf32_shdr
43347 #define elf_note elf32_note
43348 +#define elf_dyn Elf32_Dyn
43349 #define elf_addr_t Elf32_Addr
43350
43351 /*
43352 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
43353 index 51352de..93292ff 100644
43354 --- a/fs/compat_ioctl.c
43355 +++ b/fs/compat_ioctl.c
43356 @@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
43357
43358 err = get_user(palp, &up->palette);
43359 err |= get_user(length, &up->length);
43360 + if (err)
43361 + return -EFAULT;
43362
43363 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
43364 err = put_user(compat_ptr(palp), &up_native->palette);
43365 @@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
43366 return -EFAULT;
43367 if (__get_user(udata, &ss32->iomem_base))
43368 return -EFAULT;
43369 - ss.iomem_base = compat_ptr(udata);
43370 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
43371 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
43372 __get_user(ss.port_high, &ss32->port_high))
43373 return -EFAULT;
43374 @@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
43375 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
43376 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
43377 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
43378 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43379 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43380 return -EFAULT;
43381
43382 return ioctl_preallocate(file, p);
43383 @@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
43384 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
43385 {
43386 unsigned int a, b;
43387 - a = *(unsigned int *)p;
43388 - b = *(unsigned int *)q;
43389 + a = *(const unsigned int *)p;
43390 + b = *(const unsigned int *)q;
43391 if (a > b)
43392 return 1;
43393 if (a < b)
43394 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
43395 index 9a37a9b..35792b6 100644
43396 --- a/fs/configfs/dir.c
43397 +++ b/fs/configfs/dir.c
43398 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43399 }
43400 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
43401 struct configfs_dirent *next;
43402 - const char * name;
43403 + const unsigned char * name;
43404 + char d_name[sizeof(next->s_dentry->d_iname)];
43405 int len;
43406 struct inode *inode = NULL;
43407
43408 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43409 continue;
43410
43411 name = configfs_get_name(next);
43412 - len = strlen(name);
43413 + if (next->s_dentry && name == next->s_dentry->d_iname) {
43414 + len = next->s_dentry->d_name.len;
43415 + memcpy(d_name, name, len);
43416 + name = d_name;
43417 + } else
43418 + len = strlen(name);
43419
43420 /*
43421 * We'll have a dentry and an inode for
43422 diff --git a/fs/dcache.c b/fs/dcache.c
43423 index a88948b..1e32160 100644
43424 --- a/fs/dcache.c
43425 +++ b/fs/dcache.c
43426 @@ -2998,7 +2998,7 @@ void __init vfs_caches_init(unsigned long mempages)
43427 mempages -= reserve;
43428
43429 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
43430 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
43431 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
43432
43433 dcache_init();
43434 inode_init();
43435 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
43436 index 11f8582..7b633bd 100644
43437 --- a/fs/ecryptfs/inode.c
43438 +++ b/fs/ecryptfs/inode.c
43439 @@ -681,7 +681,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
43440 old_fs = get_fs();
43441 set_fs(get_ds());
43442 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
43443 - (char __user *)lower_buf,
43444 + (char __force_user *)lower_buf,
43445 lower_bufsiz);
43446 set_fs(old_fs);
43447 if (rc < 0)
43448 @@ -727,7 +727,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
43449 }
43450 old_fs = get_fs();
43451 set_fs(get_ds());
43452 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
43453 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
43454 set_fs(old_fs);
43455 if (rc < 0) {
43456 kfree(buf);
43457 @@ -742,7 +742,7 @@ out:
43458 static void
43459 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
43460 {
43461 - char *buf = nd_get_link(nd);
43462 + const char *buf = nd_get_link(nd);
43463 if (!IS_ERR(buf)) {
43464 /* Free the char* */
43465 kfree(buf);
43466 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
43467 index 940a82e..63af89e 100644
43468 --- a/fs/ecryptfs/miscdev.c
43469 +++ b/fs/ecryptfs/miscdev.c
43470 @@ -328,7 +328,7 @@ check_list:
43471 goto out_unlock_msg_ctx;
43472 i = 5;
43473 if (msg_ctx->msg) {
43474 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
43475 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
43476 goto out_unlock_msg_ctx;
43477 i += packet_length_size;
43478 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
43479 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
43480 index 3745f7c..89cc7a3 100644
43481 --- a/fs/ecryptfs/read_write.c
43482 +++ b/fs/ecryptfs/read_write.c
43483 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
43484 return -EIO;
43485 fs_save = get_fs();
43486 set_fs(get_ds());
43487 - rc = vfs_write(lower_file, data, size, &offset);
43488 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
43489 set_fs(fs_save);
43490 mark_inode_dirty_sync(ecryptfs_inode);
43491 return rc;
43492 @@ -235,7 +235,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
43493 return -EIO;
43494 fs_save = get_fs();
43495 set_fs(get_ds());
43496 - rc = vfs_read(lower_file, data, size, &offset);
43497 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
43498 set_fs(fs_save);
43499 return rc;
43500 }
43501 diff --git a/fs/exec.c b/fs/exec.c
43502 index 25dcbe5..4ffaa78 100644
43503 --- a/fs/exec.c
43504 +++ b/fs/exec.c
43505 @@ -55,12 +55,24 @@
43506 #include <linux/pipe_fs_i.h>
43507 #include <linux/oom.h>
43508 #include <linux/compat.h>
43509 +#include <linux/random.h>
43510 +#include <linux/seq_file.h>
43511 +
43512 +#ifdef CONFIG_PAX_REFCOUNT
43513 +#include <linux/kallsyms.h>
43514 +#include <linux/kdebug.h>
43515 +#endif
43516
43517 #include <asm/uaccess.h>
43518 #include <asm/mmu_context.h>
43519 #include <asm/tlb.h>
43520 #include "internal.h"
43521
43522 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
43523 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
43524 +EXPORT_SYMBOL(pax_set_initial_flags_func);
43525 +#endif
43526 +
43527 int core_uses_pid;
43528 char core_pattern[CORENAME_MAX_SIZE] = "core";
43529 unsigned int core_pipe_limit;
43530 @@ -70,7 +82,7 @@ struct core_name {
43531 char *corename;
43532 int used, size;
43533 };
43534 -static atomic_t call_count = ATOMIC_INIT(1);
43535 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
43536
43537 /* The maximal length of core_pattern is also specified in sysctl.c */
43538
43539 @@ -188,18 +200,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43540 int write)
43541 {
43542 struct page *page;
43543 - int ret;
43544
43545 -#ifdef CONFIG_STACK_GROWSUP
43546 - if (write) {
43547 - ret = expand_downwards(bprm->vma, pos);
43548 - if (ret < 0)
43549 - return NULL;
43550 - }
43551 -#endif
43552 - ret = get_user_pages(current, bprm->mm, pos,
43553 - 1, write, 1, &page, NULL);
43554 - if (ret <= 0)
43555 + if (0 > expand_downwards(bprm->vma, pos))
43556 + return NULL;
43557 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
43558 return NULL;
43559
43560 if (write) {
43561 @@ -274,6 +278,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43562 vma->vm_end = STACK_TOP_MAX;
43563 vma->vm_start = vma->vm_end - PAGE_SIZE;
43564 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
43565 +
43566 +#ifdef CONFIG_PAX_SEGMEXEC
43567 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
43568 +#endif
43569 +
43570 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
43571 INIT_LIST_HEAD(&vma->anon_vma_chain);
43572
43573 @@ -288,6 +297,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43574 mm->stack_vm = mm->total_vm = 1;
43575 up_write(&mm->mmap_sem);
43576 bprm->p = vma->vm_end - sizeof(void *);
43577 +
43578 +#ifdef CONFIG_PAX_RANDUSTACK
43579 + if (randomize_va_space)
43580 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
43581 +#endif
43582 +
43583 return 0;
43584 err:
43585 up_write(&mm->mmap_sem);
43586 @@ -396,19 +411,7 @@ err:
43587 return err;
43588 }
43589
43590 -struct user_arg_ptr {
43591 -#ifdef CONFIG_COMPAT
43592 - bool is_compat;
43593 -#endif
43594 - union {
43595 - const char __user *const __user *native;
43596 -#ifdef CONFIG_COMPAT
43597 - compat_uptr_t __user *compat;
43598 -#endif
43599 - } ptr;
43600 -};
43601 -
43602 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43603 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43604 {
43605 const char __user *native;
43606
43607 @@ -417,14 +420,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43608 compat_uptr_t compat;
43609
43610 if (get_user(compat, argv.ptr.compat + nr))
43611 - return ERR_PTR(-EFAULT);
43612 + return (const char __force_user *)ERR_PTR(-EFAULT);
43613
43614 return compat_ptr(compat);
43615 }
43616 #endif
43617
43618 if (get_user(native, argv.ptr.native + nr))
43619 - return ERR_PTR(-EFAULT);
43620 + return (const char __force_user *)ERR_PTR(-EFAULT);
43621
43622 return native;
43623 }
43624 @@ -443,7 +446,7 @@ static int count(struct user_arg_ptr argv, int max)
43625 if (!p)
43626 break;
43627
43628 - if (IS_ERR(p))
43629 + if (IS_ERR((const char __force_kernel *)p))
43630 return -EFAULT;
43631
43632 if (i++ >= max)
43633 @@ -477,7 +480,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
43634
43635 ret = -EFAULT;
43636 str = get_user_arg_ptr(argv, argc);
43637 - if (IS_ERR(str))
43638 + if (IS_ERR((const char __force_kernel *)str))
43639 goto out;
43640
43641 len = strnlen_user(str, MAX_ARG_STRLEN);
43642 @@ -559,7 +562,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
43643 int r;
43644 mm_segment_t oldfs = get_fs();
43645 struct user_arg_ptr argv = {
43646 - .ptr.native = (const char __user *const __user *)__argv,
43647 + .ptr.native = (const char __force_user *const __force_user *)__argv,
43648 };
43649
43650 set_fs(KERNEL_DS);
43651 @@ -594,7 +597,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43652 unsigned long new_end = old_end - shift;
43653 struct mmu_gather tlb;
43654
43655 - BUG_ON(new_start > new_end);
43656 + if (new_start >= new_end || new_start < mmap_min_addr)
43657 + return -ENOMEM;
43658
43659 /*
43660 * ensure there are no vmas between where we want to go
43661 @@ -603,6 +607,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43662 if (vma != find_vma(mm, new_start))
43663 return -EFAULT;
43664
43665 +#ifdef CONFIG_PAX_SEGMEXEC
43666 + BUG_ON(pax_find_mirror_vma(vma));
43667 +#endif
43668 +
43669 /*
43670 * cover the whole range: [new_start, old_end)
43671 */
43672 @@ -683,10 +691,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43673 stack_top = arch_align_stack(stack_top);
43674 stack_top = PAGE_ALIGN(stack_top);
43675
43676 - if (unlikely(stack_top < mmap_min_addr) ||
43677 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
43678 - return -ENOMEM;
43679 -
43680 stack_shift = vma->vm_end - stack_top;
43681
43682 bprm->p -= stack_shift;
43683 @@ -698,8 +702,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
43684 bprm->exec -= stack_shift;
43685
43686 down_write(&mm->mmap_sem);
43687 +
43688 + /* Move stack pages down in memory. */
43689 + if (stack_shift) {
43690 + ret = shift_arg_pages(vma, stack_shift);
43691 + if (ret)
43692 + goto out_unlock;
43693 + }
43694 +
43695 vm_flags = VM_STACK_FLAGS;
43696
43697 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43698 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43699 + vm_flags &= ~VM_EXEC;
43700 +
43701 +#ifdef CONFIG_PAX_MPROTECT
43702 + if (mm->pax_flags & MF_PAX_MPROTECT)
43703 + vm_flags &= ~VM_MAYEXEC;
43704 +#endif
43705 +
43706 + }
43707 +#endif
43708 +
43709 /*
43710 * Adjust stack execute permissions; explicitly enable for
43711 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
43712 @@ -718,13 +742,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43713 goto out_unlock;
43714 BUG_ON(prev != vma);
43715
43716 - /* Move stack pages down in memory. */
43717 - if (stack_shift) {
43718 - ret = shift_arg_pages(vma, stack_shift);
43719 - if (ret)
43720 - goto out_unlock;
43721 - }
43722 -
43723 /* mprotect_fixup is overkill to remove the temporary stack flags */
43724 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
43725
43726 @@ -805,7 +822,7 @@ int kernel_read(struct file *file, loff_t offset,
43727 old_fs = get_fs();
43728 set_fs(get_ds());
43729 /* The cast to a user pointer is valid due to the set_fs() */
43730 - result = vfs_read(file, (void __user *)addr, count, &pos);
43731 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
43732 set_fs(old_fs);
43733 return result;
43734 }
43735 @@ -1251,7 +1268,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
43736 }
43737 rcu_read_unlock();
43738
43739 - if (p->fs->users > n_fs) {
43740 + if (atomic_read(&p->fs->users) > n_fs) {
43741 bprm->unsafe |= LSM_UNSAFE_SHARE;
43742 } else {
43743 res = -EAGAIN;
43744 @@ -1454,6 +1471,11 @@ static int do_execve_common(const char *filename,
43745 struct user_arg_ptr envp,
43746 struct pt_regs *regs)
43747 {
43748 +#ifdef CONFIG_GRKERNSEC
43749 + struct file *old_exec_file;
43750 + struct acl_subject_label *old_acl;
43751 + struct rlimit old_rlim[RLIM_NLIMITS];
43752 +#endif
43753 struct linux_binprm *bprm;
43754 struct file *file;
43755 struct files_struct *displaced;
43756 @@ -1461,6 +1483,8 @@ static int do_execve_common(const char *filename,
43757 int retval;
43758 const struct cred *cred = current_cred();
43759
43760 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
43761 +
43762 /*
43763 * We move the actual failure in case of RLIMIT_NPROC excess from
43764 * set*uid() to execve() because too many poorly written programs
43765 @@ -1507,6 +1531,16 @@ static int do_execve_common(const char *filename,
43766 bprm->filename = filename;
43767 bprm->interp = filename;
43768
43769 + if (gr_process_user_ban()) {
43770 + retval = -EPERM;
43771 + goto out_file;
43772 + }
43773 +
43774 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
43775 + retval = -EACCES;
43776 + goto out_file;
43777 + }
43778 +
43779 retval = bprm_mm_init(bprm);
43780 if (retval)
43781 goto out_file;
43782 @@ -1536,9 +1570,40 @@ static int do_execve_common(const char *filename,
43783 if (retval < 0)
43784 goto out;
43785
43786 + if (!gr_tpe_allow(file)) {
43787 + retval = -EACCES;
43788 + goto out;
43789 + }
43790 +
43791 + if (gr_check_crash_exec(file)) {
43792 + retval = -EACCES;
43793 + goto out;
43794 + }
43795 +
43796 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
43797 +
43798 + gr_handle_exec_args(bprm, argv);
43799 +
43800 +#ifdef CONFIG_GRKERNSEC
43801 + old_acl = current->acl;
43802 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
43803 + old_exec_file = current->exec_file;
43804 + get_file(file);
43805 + current->exec_file = file;
43806 +#endif
43807 +
43808 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
43809 + bprm->unsafe & LSM_UNSAFE_SHARE);
43810 + if (retval < 0)
43811 + goto out_fail;
43812 +
43813 retval = search_binary_handler(bprm,regs);
43814 if (retval < 0)
43815 - goto out;
43816 + goto out_fail;
43817 +#ifdef CONFIG_GRKERNSEC
43818 + if (old_exec_file)
43819 + fput(old_exec_file);
43820 +#endif
43821
43822 /* execve succeeded */
43823 current->fs->in_exec = 0;
43824 @@ -1549,6 +1614,14 @@ static int do_execve_common(const char *filename,
43825 put_files_struct(displaced);
43826 return retval;
43827
43828 +out_fail:
43829 +#ifdef CONFIG_GRKERNSEC
43830 + current->acl = old_acl;
43831 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
43832 + fput(current->exec_file);
43833 + current->exec_file = old_exec_file;
43834 +#endif
43835 +
43836 out:
43837 if (bprm->mm) {
43838 acct_arg_size(bprm, 0);
43839 @@ -1622,7 +1695,7 @@ static int expand_corename(struct core_name *cn)
43840 {
43841 char *old_corename = cn->corename;
43842
43843 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
43844 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
43845 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
43846
43847 if (!cn->corename) {
43848 @@ -1719,7 +1792,7 @@ static int format_corename(struct core_name *cn, long signr)
43849 int pid_in_pattern = 0;
43850 int err = 0;
43851
43852 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
43853 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
43854 cn->corename = kmalloc(cn->size, GFP_KERNEL);
43855 cn->used = 0;
43856
43857 @@ -1816,6 +1889,218 @@ out:
43858 return ispipe;
43859 }
43860
43861 +int pax_check_flags(unsigned long *flags)
43862 +{
43863 + int retval = 0;
43864 +
43865 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
43866 + if (*flags & MF_PAX_SEGMEXEC)
43867 + {
43868 + *flags &= ~MF_PAX_SEGMEXEC;
43869 + retval = -EINVAL;
43870 + }
43871 +#endif
43872 +
43873 + if ((*flags & MF_PAX_PAGEEXEC)
43874 +
43875 +#ifdef CONFIG_PAX_PAGEEXEC
43876 + && (*flags & MF_PAX_SEGMEXEC)
43877 +#endif
43878 +
43879 + )
43880 + {
43881 + *flags &= ~MF_PAX_PAGEEXEC;
43882 + retval = -EINVAL;
43883 + }
43884 +
43885 + if ((*flags & MF_PAX_MPROTECT)
43886 +
43887 +#ifdef CONFIG_PAX_MPROTECT
43888 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43889 +#endif
43890 +
43891 + )
43892 + {
43893 + *flags &= ~MF_PAX_MPROTECT;
43894 + retval = -EINVAL;
43895 + }
43896 +
43897 + if ((*flags & MF_PAX_EMUTRAMP)
43898 +
43899 +#ifdef CONFIG_PAX_EMUTRAMP
43900 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43901 +#endif
43902 +
43903 + )
43904 + {
43905 + *flags &= ~MF_PAX_EMUTRAMP;
43906 + retval = -EINVAL;
43907 + }
43908 +
43909 + return retval;
43910 +}
43911 +
43912 +EXPORT_SYMBOL(pax_check_flags);
43913 +
43914 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43915 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
43916 +{
43917 + struct task_struct *tsk = current;
43918 + struct mm_struct *mm = current->mm;
43919 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
43920 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
43921 + char *path_exec = NULL;
43922 + char *path_fault = NULL;
43923 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
43924 +
43925 + if (buffer_exec && buffer_fault) {
43926 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
43927 +
43928 + down_read(&mm->mmap_sem);
43929 + vma = mm->mmap;
43930 + while (vma && (!vma_exec || !vma_fault)) {
43931 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
43932 + vma_exec = vma;
43933 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
43934 + vma_fault = vma;
43935 + vma = vma->vm_next;
43936 + }
43937 + if (vma_exec) {
43938 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
43939 + if (IS_ERR(path_exec))
43940 + path_exec = "<path too long>";
43941 + else {
43942 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
43943 + if (path_exec) {
43944 + *path_exec = 0;
43945 + path_exec = buffer_exec;
43946 + } else
43947 + path_exec = "<path too long>";
43948 + }
43949 + }
43950 + if (vma_fault) {
43951 + start = vma_fault->vm_start;
43952 + end = vma_fault->vm_end;
43953 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
43954 + if (vma_fault->vm_file) {
43955 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
43956 + if (IS_ERR(path_fault))
43957 + path_fault = "<path too long>";
43958 + else {
43959 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
43960 + if (path_fault) {
43961 + *path_fault = 0;
43962 + path_fault = buffer_fault;
43963 + } else
43964 + path_fault = "<path too long>";
43965 + }
43966 + } else
43967 + path_fault = "<anonymous mapping>";
43968 + }
43969 + up_read(&mm->mmap_sem);
43970 + }
43971 + if (tsk->signal->curr_ip)
43972 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
43973 + else
43974 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
43975 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
43976 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
43977 + task_uid(tsk), task_euid(tsk), pc, sp);
43978 + free_page((unsigned long)buffer_exec);
43979 + free_page((unsigned long)buffer_fault);
43980 + pax_report_insns(regs, pc, sp);
43981 + do_coredump(SIGKILL, SIGKILL, regs);
43982 +}
43983 +#endif
43984 +
43985 +#ifdef CONFIG_PAX_REFCOUNT
43986 +void pax_report_refcount_overflow(struct pt_regs *regs)
43987 +{
43988 + if (current->signal->curr_ip)
43989 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43990 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
43991 + else
43992 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43993 + current->comm, task_pid_nr(current), current_uid(), current_euid());
43994 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
43995 + show_regs(regs);
43996 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
43997 +}
43998 +#endif
43999 +
44000 +#ifdef CONFIG_PAX_USERCOPY
44001 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
44002 +int object_is_on_stack(const void *obj, unsigned long len)
44003 +{
44004 + const void * const stack = task_stack_page(current);
44005 + const void * const stackend = stack + THREAD_SIZE;
44006 +
44007 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44008 + const void *frame = NULL;
44009 + const void *oldframe;
44010 +#endif
44011 +
44012 + if (obj + len < obj)
44013 + return -1;
44014 +
44015 + if (obj + len <= stack || stackend <= obj)
44016 + return 0;
44017 +
44018 + if (obj < stack || stackend < obj + len)
44019 + return -1;
44020 +
44021 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44022 + oldframe = __builtin_frame_address(1);
44023 + if (oldframe)
44024 + frame = __builtin_frame_address(2);
44025 + /*
44026 + low ----------------------------------------------> high
44027 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
44028 + ^----------------^
44029 + allow copies only within here
44030 + */
44031 + while (stack <= frame && frame < stackend) {
44032 + /* if obj + len extends past the last frame, this
44033 + check won't pass and the next frame will be 0,
44034 + causing us to bail out and correctly report
44035 + the copy as invalid
44036 + */
44037 + if (obj + len <= frame)
44038 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
44039 + oldframe = frame;
44040 + frame = *(const void * const *)frame;
44041 + }
44042 + return -1;
44043 +#else
44044 + return 1;
44045 +#endif
44046 +}
44047 +
44048 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
44049 +{
44050 + if (current->signal->curr_ip)
44051 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44052 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44053 + else
44054 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44055 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44056 + dump_stack();
44057 + gr_handle_kernel_exploit();
44058 + do_group_exit(SIGKILL);
44059 +}
44060 +#endif
44061 +
44062 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
44063 +void pax_track_stack(void)
44064 +{
44065 + unsigned long sp = (unsigned long)&sp;
44066 + if (sp < current_thread_info()->lowest_stack &&
44067 + sp > (unsigned long)task_stack_page(current))
44068 + current_thread_info()->lowest_stack = sp;
44069 +}
44070 +EXPORT_SYMBOL(pax_track_stack);
44071 +#endif
44072 +
44073 static int zap_process(struct task_struct *start, int exit_code)
44074 {
44075 struct task_struct *t;
44076 @@ -2027,17 +2312,17 @@ static void wait_for_dump_helpers(struct file *file)
44077 pipe = file->f_path.dentry->d_inode->i_pipe;
44078
44079 pipe_lock(pipe);
44080 - pipe->readers++;
44081 - pipe->writers--;
44082 + atomic_inc(&pipe->readers);
44083 + atomic_dec(&pipe->writers);
44084
44085 - while ((pipe->readers > 1) && (!signal_pending(current))) {
44086 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
44087 wake_up_interruptible_sync(&pipe->wait);
44088 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44089 pipe_wait(pipe);
44090 }
44091
44092 - pipe->readers--;
44093 - pipe->writers++;
44094 + atomic_dec(&pipe->readers);
44095 + atomic_inc(&pipe->writers);
44096 pipe_unlock(pipe);
44097
44098 }
44099 @@ -2098,7 +2383,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44100 int retval = 0;
44101 int flag = 0;
44102 int ispipe;
44103 - static atomic_t core_dump_count = ATOMIC_INIT(0);
44104 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
44105 struct coredump_params cprm = {
44106 .signr = signr,
44107 .regs = regs,
44108 @@ -2113,6 +2398,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44109
44110 audit_core_dumps(signr);
44111
44112 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
44113 + gr_handle_brute_attach(current, cprm.mm_flags);
44114 +
44115 binfmt = mm->binfmt;
44116 if (!binfmt || !binfmt->core_dump)
44117 goto fail;
44118 @@ -2180,7 +2468,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44119 }
44120 cprm.limit = RLIM_INFINITY;
44121
44122 - dump_count = atomic_inc_return(&core_dump_count);
44123 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
44124 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
44125 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
44126 task_tgid_vnr(current), current->comm);
44127 @@ -2207,6 +2495,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44128 } else {
44129 struct inode *inode;
44130
44131 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
44132 +
44133 if (cprm.limit < binfmt->min_coredump)
44134 goto fail_unlock;
44135
44136 @@ -2250,7 +2540,7 @@ close_fail:
44137 filp_close(cprm.file, NULL);
44138 fail_dropcount:
44139 if (ispipe)
44140 - atomic_dec(&core_dump_count);
44141 + atomic_dec_unchecked(&core_dump_count);
44142 fail_unlock:
44143 kfree(cn.corename);
44144 fail_corename:
44145 @@ -2269,7 +2559,7 @@ fail:
44146 */
44147 int dump_write(struct file *file, const void *addr, int nr)
44148 {
44149 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
44150 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
44151 }
44152 EXPORT_SYMBOL(dump_write);
44153
44154 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
44155 index 8f44cef..cb07120 100644
44156 --- a/fs/ext2/balloc.c
44157 +++ b/fs/ext2/balloc.c
44158 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
44159
44160 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44161 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44162 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44163 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44164 sbi->s_resuid != current_fsuid() &&
44165 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44166 return 0;
44167 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
44168 index 6386d76..0a266b1 100644
44169 --- a/fs/ext3/balloc.c
44170 +++ b/fs/ext3/balloc.c
44171 @@ -1446,7 +1446,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
44172
44173 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44174 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44175 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44176 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44177 sbi->s_resuid != current_fsuid() &&
44178 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44179 return 0;
44180 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
44181 index f8224ad..fbef97c 100644
44182 --- a/fs/ext4/balloc.c
44183 +++ b/fs/ext4/balloc.c
44184 @@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct ext4_sb_info *sbi,
44185 /* Hm, nope. Are (enough) root reserved blocks available? */
44186 if (sbi->s_resuid == current_fsuid() ||
44187 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
44188 - capable(CAP_SYS_RESOURCE) ||
44189 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
44190 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
44191 + capable_nolog(CAP_SYS_RESOURCE)) {
44192
44193 if (free_blocks >= (nblocks + dirty_blocks))
44194 return 1;
44195 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
44196 index 5c38120..2291d18 100644
44197 --- a/fs/ext4/ext4.h
44198 +++ b/fs/ext4/ext4.h
44199 @@ -1180,19 +1180,19 @@ struct ext4_sb_info {
44200 unsigned long s_mb_last_start;
44201
44202 /* stats for buddy allocator */
44203 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
44204 - atomic_t s_bal_success; /* we found long enough chunks */
44205 - atomic_t s_bal_allocated; /* in blocks */
44206 - atomic_t s_bal_ex_scanned; /* total extents scanned */
44207 - atomic_t s_bal_goals; /* goal hits */
44208 - atomic_t s_bal_breaks; /* too long searches */
44209 - atomic_t s_bal_2orders; /* 2^order hits */
44210 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
44211 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
44212 + atomic_unchecked_t s_bal_allocated; /* in blocks */
44213 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
44214 + atomic_unchecked_t s_bal_goals; /* goal hits */
44215 + atomic_unchecked_t s_bal_breaks; /* too long searches */
44216 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
44217 spinlock_t s_bal_lock;
44218 unsigned long s_mb_buddies_generated;
44219 unsigned long long s_mb_generation_time;
44220 - atomic_t s_mb_lost_chunks;
44221 - atomic_t s_mb_preallocated;
44222 - atomic_t s_mb_discarded;
44223 + atomic_unchecked_t s_mb_lost_chunks;
44224 + atomic_unchecked_t s_mb_preallocated;
44225 + atomic_unchecked_t s_mb_discarded;
44226 atomic_t s_lock_busy;
44227
44228 /* locality groups */
44229 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
44230 index e4095e9..1c006c5 100644
44231 --- a/fs/ext4/file.c
44232 +++ b/fs/ext4/file.c
44233 @@ -181,8 +181,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
44234 path.dentry = mnt->mnt_root;
44235 cp = d_path(&path, buf, sizeof(buf));
44236 if (!IS_ERR(cp)) {
44237 - memcpy(sbi->s_es->s_last_mounted, cp,
44238 - sizeof(sbi->s_es->s_last_mounted));
44239 + strlcpy(sbi->s_es->s_last_mounted, cp,
44240 + sizeof(sbi->s_es->s_last_mounted));
44241 ext4_mark_super_dirty(sb);
44242 }
44243 }
44244 diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
44245 index f18bfe3..43759b1 100644
44246 --- a/fs/ext4/ioctl.c
44247 +++ b/fs/ext4/ioctl.c
44248 @@ -348,7 +348,7 @@ mext_out:
44249 if (!blk_queue_discard(q))
44250 return -EOPNOTSUPP;
44251
44252 - if (copy_from_user(&range, (struct fstrim_range *)arg,
44253 + if (copy_from_user(&range, (struct fstrim_range __user *)arg,
44254 sizeof(range)))
44255 return -EFAULT;
44256
44257 @@ -358,7 +358,7 @@ mext_out:
44258 if (ret < 0)
44259 return ret;
44260
44261 - if (copy_to_user((struct fstrim_range *)arg, &range,
44262 + if (copy_to_user((struct fstrim_range __user *)arg, &range,
44263 sizeof(range)))
44264 return -EFAULT;
44265
44266 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
44267 index 17a5a57..b6be3c5 100644
44268 --- a/fs/ext4/mballoc.c
44269 +++ b/fs/ext4/mballoc.c
44270 @@ -1795,7 +1795,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
44271 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
44272
44273 if (EXT4_SB(sb)->s_mb_stats)
44274 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
44275 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
44276
44277 break;
44278 }
44279 @@ -2089,7 +2089,7 @@ repeat:
44280 ac->ac_status = AC_STATUS_CONTINUE;
44281 ac->ac_flags |= EXT4_MB_HINT_FIRST;
44282 cr = 3;
44283 - atomic_inc(&sbi->s_mb_lost_chunks);
44284 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
44285 goto repeat;
44286 }
44287 }
44288 @@ -2132,6 +2132,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
44289 ext4_grpblk_t counters[16];
44290 } sg;
44291
44292 + pax_track_stack();
44293 +
44294 group--;
44295 if (group == 0)
44296 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
44297 @@ -2573,25 +2575,25 @@ int ext4_mb_release(struct super_block *sb)
44298 if (sbi->s_mb_stats) {
44299 ext4_msg(sb, KERN_INFO,
44300 "mballoc: %u blocks %u reqs (%u success)",
44301 - atomic_read(&sbi->s_bal_allocated),
44302 - atomic_read(&sbi->s_bal_reqs),
44303 - atomic_read(&sbi->s_bal_success));
44304 + atomic_read_unchecked(&sbi->s_bal_allocated),
44305 + atomic_read_unchecked(&sbi->s_bal_reqs),
44306 + atomic_read_unchecked(&sbi->s_bal_success));
44307 ext4_msg(sb, KERN_INFO,
44308 "mballoc: %u extents scanned, %u goal hits, "
44309 "%u 2^N hits, %u breaks, %u lost",
44310 - atomic_read(&sbi->s_bal_ex_scanned),
44311 - atomic_read(&sbi->s_bal_goals),
44312 - atomic_read(&sbi->s_bal_2orders),
44313 - atomic_read(&sbi->s_bal_breaks),
44314 - atomic_read(&sbi->s_mb_lost_chunks));
44315 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
44316 + atomic_read_unchecked(&sbi->s_bal_goals),
44317 + atomic_read_unchecked(&sbi->s_bal_2orders),
44318 + atomic_read_unchecked(&sbi->s_bal_breaks),
44319 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
44320 ext4_msg(sb, KERN_INFO,
44321 "mballoc: %lu generated and it took %Lu",
44322 sbi->s_mb_buddies_generated,
44323 sbi->s_mb_generation_time);
44324 ext4_msg(sb, KERN_INFO,
44325 "mballoc: %u preallocated, %u discarded",
44326 - atomic_read(&sbi->s_mb_preallocated),
44327 - atomic_read(&sbi->s_mb_discarded));
44328 + atomic_read_unchecked(&sbi->s_mb_preallocated),
44329 + atomic_read_unchecked(&sbi->s_mb_discarded));
44330 }
44331
44332 free_percpu(sbi->s_locality_groups);
44333 @@ -3070,16 +3072,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
44334 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
44335
44336 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
44337 - atomic_inc(&sbi->s_bal_reqs);
44338 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44339 + atomic_inc_unchecked(&sbi->s_bal_reqs);
44340 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44341 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
44342 - atomic_inc(&sbi->s_bal_success);
44343 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
44344 + atomic_inc_unchecked(&sbi->s_bal_success);
44345 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
44346 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
44347 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
44348 - atomic_inc(&sbi->s_bal_goals);
44349 + atomic_inc_unchecked(&sbi->s_bal_goals);
44350 if (ac->ac_found > sbi->s_mb_max_to_scan)
44351 - atomic_inc(&sbi->s_bal_breaks);
44352 + atomic_inc_unchecked(&sbi->s_bal_breaks);
44353 }
44354
44355 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
44356 @@ -3477,7 +3479,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
44357 trace_ext4_mb_new_inode_pa(ac, pa);
44358
44359 ext4_mb_use_inode_pa(ac, pa);
44360 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44361 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44362
44363 ei = EXT4_I(ac->ac_inode);
44364 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44365 @@ -3537,7 +3539,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
44366 trace_ext4_mb_new_group_pa(ac, pa);
44367
44368 ext4_mb_use_group_pa(ac, pa);
44369 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44370 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44371
44372 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44373 lg = ac->ac_lg;
44374 @@ -3625,7 +3627,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
44375 * from the bitmap and continue.
44376 */
44377 }
44378 - atomic_add(free, &sbi->s_mb_discarded);
44379 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
44380
44381 return err;
44382 }
44383 @@ -3643,7 +3645,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
44384 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
44385 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
44386 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
44387 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44388 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44389 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
44390
44391 return 0;
44392 diff --git a/fs/fcntl.c b/fs/fcntl.c
44393 index 22764c7..86372c9 100644
44394 --- a/fs/fcntl.c
44395 +++ b/fs/fcntl.c
44396 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
44397 if (err)
44398 return err;
44399
44400 + if (gr_handle_chroot_fowner(pid, type))
44401 + return -ENOENT;
44402 + if (gr_check_protected_task_fowner(pid, type))
44403 + return -EACCES;
44404 +
44405 f_modown(filp, pid, type, force);
44406 return 0;
44407 }
44408 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
44409
44410 static int f_setown_ex(struct file *filp, unsigned long arg)
44411 {
44412 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44413 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44414 struct f_owner_ex owner;
44415 struct pid *pid;
44416 int type;
44417 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
44418
44419 static int f_getown_ex(struct file *filp, unsigned long arg)
44420 {
44421 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44422 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44423 struct f_owner_ex owner;
44424 int ret = 0;
44425
44426 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
44427 switch (cmd) {
44428 case F_DUPFD:
44429 case F_DUPFD_CLOEXEC:
44430 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
44431 if (arg >= rlimit(RLIMIT_NOFILE))
44432 break;
44433 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
44434 diff --git a/fs/fifo.c b/fs/fifo.c
44435 index b1a524d..4ee270e 100644
44436 --- a/fs/fifo.c
44437 +++ b/fs/fifo.c
44438 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
44439 */
44440 filp->f_op = &read_pipefifo_fops;
44441 pipe->r_counter++;
44442 - if (pipe->readers++ == 0)
44443 + if (atomic_inc_return(&pipe->readers) == 1)
44444 wake_up_partner(inode);
44445
44446 - if (!pipe->writers) {
44447 + if (!atomic_read(&pipe->writers)) {
44448 if ((filp->f_flags & O_NONBLOCK)) {
44449 /* suppress POLLHUP until we have
44450 * seen a writer */
44451 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
44452 * errno=ENXIO when there is no process reading the FIFO.
44453 */
44454 ret = -ENXIO;
44455 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
44456 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
44457 goto err;
44458
44459 filp->f_op = &write_pipefifo_fops;
44460 pipe->w_counter++;
44461 - if (!pipe->writers++)
44462 + if (atomic_inc_return(&pipe->writers) == 1)
44463 wake_up_partner(inode);
44464
44465 - if (!pipe->readers) {
44466 + if (!atomic_read(&pipe->readers)) {
44467 wait_for_partner(inode, &pipe->r_counter);
44468 if (signal_pending(current))
44469 goto err_wr;
44470 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
44471 */
44472 filp->f_op = &rdwr_pipefifo_fops;
44473
44474 - pipe->readers++;
44475 - pipe->writers++;
44476 + atomic_inc(&pipe->readers);
44477 + atomic_inc(&pipe->writers);
44478 pipe->r_counter++;
44479 pipe->w_counter++;
44480 - if (pipe->readers == 1 || pipe->writers == 1)
44481 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
44482 wake_up_partner(inode);
44483 break;
44484
44485 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
44486 return 0;
44487
44488 err_rd:
44489 - if (!--pipe->readers)
44490 + if (atomic_dec_and_test(&pipe->readers))
44491 wake_up_interruptible(&pipe->wait);
44492 ret = -ERESTARTSYS;
44493 goto err;
44494
44495 err_wr:
44496 - if (!--pipe->writers)
44497 + if (atomic_dec_and_test(&pipe->writers))
44498 wake_up_interruptible(&pipe->wait);
44499 ret = -ERESTARTSYS;
44500 goto err;
44501
44502 err:
44503 - if (!pipe->readers && !pipe->writers)
44504 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
44505 free_pipe_info(inode);
44506
44507 err_nocleanup:
44508 diff --git a/fs/file.c b/fs/file.c
44509 index 4c6992d..104cdea 100644
44510 --- a/fs/file.c
44511 +++ b/fs/file.c
44512 @@ -15,6 +15,7 @@
44513 #include <linux/slab.h>
44514 #include <linux/vmalloc.h>
44515 #include <linux/file.h>
44516 +#include <linux/security.h>
44517 #include <linux/fdtable.h>
44518 #include <linux/bitops.h>
44519 #include <linux/interrupt.h>
44520 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
44521 * N.B. For clone tasks sharing a files structure, this test
44522 * will limit the total number of files that can be opened.
44523 */
44524 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
44525 if (nr >= rlimit(RLIMIT_NOFILE))
44526 return -EMFILE;
44527
44528 diff --git a/fs/filesystems.c b/fs/filesystems.c
44529 index 0845f84..7b4ebef 100644
44530 --- a/fs/filesystems.c
44531 +++ b/fs/filesystems.c
44532 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
44533 int len = dot ? dot - name : strlen(name);
44534
44535 fs = __get_fs_type(name, len);
44536 +
44537 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
44538 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
44539 +#else
44540 if (!fs && (request_module("%.*s", len, name) == 0))
44541 +#endif
44542 fs = __get_fs_type(name, len);
44543
44544 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
44545 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
44546 index 78b519c..212c0d0 100644
44547 --- a/fs/fs_struct.c
44548 +++ b/fs/fs_struct.c
44549 @@ -4,6 +4,7 @@
44550 #include <linux/path.h>
44551 #include <linux/slab.h>
44552 #include <linux/fs_struct.h>
44553 +#include <linux/grsecurity.h>
44554 #include "internal.h"
44555
44556 static inline void path_get_longterm(struct path *path)
44557 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
44558 old_root = fs->root;
44559 fs->root = *path;
44560 path_get_longterm(path);
44561 + gr_set_chroot_entries(current, path);
44562 write_seqcount_end(&fs->seq);
44563 spin_unlock(&fs->lock);
44564 if (old_root.dentry)
44565 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
44566 && fs->root.mnt == old_root->mnt) {
44567 path_get_longterm(new_root);
44568 fs->root = *new_root;
44569 + gr_set_chroot_entries(p, new_root);
44570 count++;
44571 }
44572 if (fs->pwd.dentry == old_root->dentry
44573 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
44574 spin_lock(&fs->lock);
44575 write_seqcount_begin(&fs->seq);
44576 tsk->fs = NULL;
44577 - kill = !--fs->users;
44578 + gr_clear_chroot_entries(tsk);
44579 + kill = !atomic_dec_return(&fs->users);
44580 write_seqcount_end(&fs->seq);
44581 spin_unlock(&fs->lock);
44582 task_unlock(tsk);
44583 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44584 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
44585 /* We don't need to lock fs - think why ;-) */
44586 if (fs) {
44587 - fs->users = 1;
44588 + atomic_set(&fs->users, 1);
44589 fs->in_exec = 0;
44590 spin_lock_init(&fs->lock);
44591 seqcount_init(&fs->seq);
44592 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44593 spin_lock(&old->lock);
44594 fs->root = old->root;
44595 path_get_longterm(&fs->root);
44596 + /* instead of calling gr_set_chroot_entries here,
44597 + we call it from every caller of this function
44598 + */
44599 fs->pwd = old->pwd;
44600 path_get_longterm(&fs->pwd);
44601 spin_unlock(&old->lock);
44602 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
44603
44604 task_lock(current);
44605 spin_lock(&fs->lock);
44606 - kill = !--fs->users;
44607 + kill = !atomic_dec_return(&fs->users);
44608 current->fs = new_fs;
44609 + gr_set_chroot_entries(current, &new_fs->root);
44610 spin_unlock(&fs->lock);
44611 task_unlock(current);
44612
44613 @@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
44614
44615 /* to be mentioned only in INIT_TASK */
44616 struct fs_struct init_fs = {
44617 - .users = 1,
44618 + .users = ATOMIC_INIT(1),
44619 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
44620 .seq = SEQCNT_ZERO,
44621 .umask = 0022,
44622 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
44623 task_lock(current);
44624
44625 spin_lock(&init_fs.lock);
44626 - init_fs.users++;
44627 + atomic_inc(&init_fs.users);
44628 spin_unlock(&init_fs.lock);
44629
44630 spin_lock(&fs->lock);
44631 current->fs = &init_fs;
44632 - kill = !--fs->users;
44633 + gr_set_chroot_entries(current, &current->fs->root);
44634 + kill = !atomic_dec_return(&fs->users);
44635 spin_unlock(&fs->lock);
44636
44637 task_unlock(current);
44638 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
44639 index 9905350..02eaec4 100644
44640 --- a/fs/fscache/cookie.c
44641 +++ b/fs/fscache/cookie.c
44642 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
44643 parent ? (char *) parent->def->name : "<no-parent>",
44644 def->name, netfs_data);
44645
44646 - fscache_stat(&fscache_n_acquires);
44647 + fscache_stat_unchecked(&fscache_n_acquires);
44648
44649 /* if there's no parent cookie, then we don't create one here either */
44650 if (!parent) {
44651 - fscache_stat(&fscache_n_acquires_null);
44652 + fscache_stat_unchecked(&fscache_n_acquires_null);
44653 _leave(" [no parent]");
44654 return NULL;
44655 }
44656 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
44657 /* allocate and initialise a cookie */
44658 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
44659 if (!cookie) {
44660 - fscache_stat(&fscache_n_acquires_oom);
44661 + fscache_stat_unchecked(&fscache_n_acquires_oom);
44662 _leave(" [ENOMEM]");
44663 return NULL;
44664 }
44665 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44666
44667 switch (cookie->def->type) {
44668 case FSCACHE_COOKIE_TYPE_INDEX:
44669 - fscache_stat(&fscache_n_cookie_index);
44670 + fscache_stat_unchecked(&fscache_n_cookie_index);
44671 break;
44672 case FSCACHE_COOKIE_TYPE_DATAFILE:
44673 - fscache_stat(&fscache_n_cookie_data);
44674 + fscache_stat_unchecked(&fscache_n_cookie_data);
44675 break;
44676 default:
44677 - fscache_stat(&fscache_n_cookie_special);
44678 + fscache_stat_unchecked(&fscache_n_cookie_special);
44679 break;
44680 }
44681
44682 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44683 if (fscache_acquire_non_index_cookie(cookie) < 0) {
44684 atomic_dec(&parent->n_children);
44685 __fscache_cookie_put(cookie);
44686 - fscache_stat(&fscache_n_acquires_nobufs);
44687 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
44688 _leave(" = NULL");
44689 return NULL;
44690 }
44691 }
44692
44693 - fscache_stat(&fscache_n_acquires_ok);
44694 + fscache_stat_unchecked(&fscache_n_acquires_ok);
44695 _leave(" = %p", cookie);
44696 return cookie;
44697 }
44698 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
44699 cache = fscache_select_cache_for_object(cookie->parent);
44700 if (!cache) {
44701 up_read(&fscache_addremove_sem);
44702 - fscache_stat(&fscache_n_acquires_no_cache);
44703 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
44704 _leave(" = -ENOMEDIUM [no cache]");
44705 return -ENOMEDIUM;
44706 }
44707 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
44708 object = cache->ops->alloc_object(cache, cookie);
44709 fscache_stat_d(&fscache_n_cop_alloc_object);
44710 if (IS_ERR(object)) {
44711 - fscache_stat(&fscache_n_object_no_alloc);
44712 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
44713 ret = PTR_ERR(object);
44714 goto error;
44715 }
44716
44717 - fscache_stat(&fscache_n_object_alloc);
44718 + fscache_stat_unchecked(&fscache_n_object_alloc);
44719
44720 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
44721
44722 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
44723 struct fscache_object *object;
44724 struct hlist_node *_p;
44725
44726 - fscache_stat(&fscache_n_updates);
44727 + fscache_stat_unchecked(&fscache_n_updates);
44728
44729 if (!cookie) {
44730 - fscache_stat(&fscache_n_updates_null);
44731 + fscache_stat_unchecked(&fscache_n_updates_null);
44732 _leave(" [no cookie]");
44733 return;
44734 }
44735 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44736 struct fscache_object *object;
44737 unsigned long event;
44738
44739 - fscache_stat(&fscache_n_relinquishes);
44740 + fscache_stat_unchecked(&fscache_n_relinquishes);
44741 if (retire)
44742 - fscache_stat(&fscache_n_relinquishes_retire);
44743 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
44744
44745 if (!cookie) {
44746 - fscache_stat(&fscache_n_relinquishes_null);
44747 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
44748 _leave(" [no cookie]");
44749 return;
44750 }
44751 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44752
44753 /* wait for the cookie to finish being instantiated (or to fail) */
44754 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
44755 - fscache_stat(&fscache_n_relinquishes_waitcrt);
44756 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
44757 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
44758 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
44759 }
44760 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
44761 index f6aad48..88dcf26 100644
44762 --- a/fs/fscache/internal.h
44763 +++ b/fs/fscache/internal.h
44764 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
44765 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
44766 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
44767
44768 -extern atomic_t fscache_n_op_pend;
44769 -extern atomic_t fscache_n_op_run;
44770 -extern atomic_t fscache_n_op_enqueue;
44771 -extern atomic_t fscache_n_op_deferred_release;
44772 -extern atomic_t fscache_n_op_release;
44773 -extern atomic_t fscache_n_op_gc;
44774 -extern atomic_t fscache_n_op_cancelled;
44775 -extern atomic_t fscache_n_op_rejected;
44776 -
44777 -extern atomic_t fscache_n_attr_changed;
44778 -extern atomic_t fscache_n_attr_changed_ok;
44779 -extern atomic_t fscache_n_attr_changed_nobufs;
44780 -extern atomic_t fscache_n_attr_changed_nomem;
44781 -extern atomic_t fscache_n_attr_changed_calls;
44782 -
44783 -extern atomic_t fscache_n_allocs;
44784 -extern atomic_t fscache_n_allocs_ok;
44785 -extern atomic_t fscache_n_allocs_wait;
44786 -extern atomic_t fscache_n_allocs_nobufs;
44787 -extern atomic_t fscache_n_allocs_intr;
44788 -extern atomic_t fscache_n_allocs_object_dead;
44789 -extern atomic_t fscache_n_alloc_ops;
44790 -extern atomic_t fscache_n_alloc_op_waits;
44791 -
44792 -extern atomic_t fscache_n_retrievals;
44793 -extern atomic_t fscache_n_retrievals_ok;
44794 -extern atomic_t fscache_n_retrievals_wait;
44795 -extern atomic_t fscache_n_retrievals_nodata;
44796 -extern atomic_t fscache_n_retrievals_nobufs;
44797 -extern atomic_t fscache_n_retrievals_intr;
44798 -extern atomic_t fscache_n_retrievals_nomem;
44799 -extern atomic_t fscache_n_retrievals_object_dead;
44800 -extern atomic_t fscache_n_retrieval_ops;
44801 -extern atomic_t fscache_n_retrieval_op_waits;
44802 -
44803 -extern atomic_t fscache_n_stores;
44804 -extern atomic_t fscache_n_stores_ok;
44805 -extern atomic_t fscache_n_stores_again;
44806 -extern atomic_t fscache_n_stores_nobufs;
44807 -extern atomic_t fscache_n_stores_oom;
44808 -extern atomic_t fscache_n_store_ops;
44809 -extern atomic_t fscache_n_store_calls;
44810 -extern atomic_t fscache_n_store_pages;
44811 -extern atomic_t fscache_n_store_radix_deletes;
44812 -extern atomic_t fscache_n_store_pages_over_limit;
44813 -
44814 -extern atomic_t fscache_n_store_vmscan_not_storing;
44815 -extern atomic_t fscache_n_store_vmscan_gone;
44816 -extern atomic_t fscache_n_store_vmscan_busy;
44817 -extern atomic_t fscache_n_store_vmscan_cancelled;
44818 -
44819 -extern atomic_t fscache_n_marks;
44820 -extern atomic_t fscache_n_uncaches;
44821 -
44822 -extern atomic_t fscache_n_acquires;
44823 -extern atomic_t fscache_n_acquires_null;
44824 -extern atomic_t fscache_n_acquires_no_cache;
44825 -extern atomic_t fscache_n_acquires_ok;
44826 -extern atomic_t fscache_n_acquires_nobufs;
44827 -extern atomic_t fscache_n_acquires_oom;
44828 -
44829 -extern atomic_t fscache_n_updates;
44830 -extern atomic_t fscache_n_updates_null;
44831 -extern atomic_t fscache_n_updates_run;
44832 -
44833 -extern atomic_t fscache_n_relinquishes;
44834 -extern atomic_t fscache_n_relinquishes_null;
44835 -extern atomic_t fscache_n_relinquishes_waitcrt;
44836 -extern atomic_t fscache_n_relinquishes_retire;
44837 -
44838 -extern atomic_t fscache_n_cookie_index;
44839 -extern atomic_t fscache_n_cookie_data;
44840 -extern atomic_t fscache_n_cookie_special;
44841 -
44842 -extern atomic_t fscache_n_object_alloc;
44843 -extern atomic_t fscache_n_object_no_alloc;
44844 -extern atomic_t fscache_n_object_lookups;
44845 -extern atomic_t fscache_n_object_lookups_negative;
44846 -extern atomic_t fscache_n_object_lookups_positive;
44847 -extern atomic_t fscache_n_object_lookups_timed_out;
44848 -extern atomic_t fscache_n_object_created;
44849 -extern atomic_t fscache_n_object_avail;
44850 -extern atomic_t fscache_n_object_dead;
44851 -
44852 -extern atomic_t fscache_n_checkaux_none;
44853 -extern atomic_t fscache_n_checkaux_okay;
44854 -extern atomic_t fscache_n_checkaux_update;
44855 -extern atomic_t fscache_n_checkaux_obsolete;
44856 +extern atomic_unchecked_t fscache_n_op_pend;
44857 +extern atomic_unchecked_t fscache_n_op_run;
44858 +extern atomic_unchecked_t fscache_n_op_enqueue;
44859 +extern atomic_unchecked_t fscache_n_op_deferred_release;
44860 +extern atomic_unchecked_t fscache_n_op_release;
44861 +extern atomic_unchecked_t fscache_n_op_gc;
44862 +extern atomic_unchecked_t fscache_n_op_cancelled;
44863 +extern atomic_unchecked_t fscache_n_op_rejected;
44864 +
44865 +extern atomic_unchecked_t fscache_n_attr_changed;
44866 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
44867 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
44868 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
44869 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
44870 +
44871 +extern atomic_unchecked_t fscache_n_allocs;
44872 +extern atomic_unchecked_t fscache_n_allocs_ok;
44873 +extern atomic_unchecked_t fscache_n_allocs_wait;
44874 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
44875 +extern atomic_unchecked_t fscache_n_allocs_intr;
44876 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
44877 +extern atomic_unchecked_t fscache_n_alloc_ops;
44878 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
44879 +
44880 +extern atomic_unchecked_t fscache_n_retrievals;
44881 +extern atomic_unchecked_t fscache_n_retrievals_ok;
44882 +extern atomic_unchecked_t fscache_n_retrievals_wait;
44883 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
44884 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
44885 +extern atomic_unchecked_t fscache_n_retrievals_intr;
44886 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
44887 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
44888 +extern atomic_unchecked_t fscache_n_retrieval_ops;
44889 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
44890 +
44891 +extern atomic_unchecked_t fscache_n_stores;
44892 +extern atomic_unchecked_t fscache_n_stores_ok;
44893 +extern atomic_unchecked_t fscache_n_stores_again;
44894 +extern atomic_unchecked_t fscache_n_stores_nobufs;
44895 +extern atomic_unchecked_t fscache_n_stores_oom;
44896 +extern atomic_unchecked_t fscache_n_store_ops;
44897 +extern atomic_unchecked_t fscache_n_store_calls;
44898 +extern atomic_unchecked_t fscache_n_store_pages;
44899 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
44900 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
44901 +
44902 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
44903 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
44904 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
44905 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
44906 +
44907 +extern atomic_unchecked_t fscache_n_marks;
44908 +extern atomic_unchecked_t fscache_n_uncaches;
44909 +
44910 +extern atomic_unchecked_t fscache_n_acquires;
44911 +extern atomic_unchecked_t fscache_n_acquires_null;
44912 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
44913 +extern atomic_unchecked_t fscache_n_acquires_ok;
44914 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
44915 +extern atomic_unchecked_t fscache_n_acquires_oom;
44916 +
44917 +extern atomic_unchecked_t fscache_n_updates;
44918 +extern atomic_unchecked_t fscache_n_updates_null;
44919 +extern atomic_unchecked_t fscache_n_updates_run;
44920 +
44921 +extern atomic_unchecked_t fscache_n_relinquishes;
44922 +extern atomic_unchecked_t fscache_n_relinquishes_null;
44923 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
44924 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
44925 +
44926 +extern atomic_unchecked_t fscache_n_cookie_index;
44927 +extern atomic_unchecked_t fscache_n_cookie_data;
44928 +extern atomic_unchecked_t fscache_n_cookie_special;
44929 +
44930 +extern atomic_unchecked_t fscache_n_object_alloc;
44931 +extern atomic_unchecked_t fscache_n_object_no_alloc;
44932 +extern atomic_unchecked_t fscache_n_object_lookups;
44933 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
44934 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
44935 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
44936 +extern atomic_unchecked_t fscache_n_object_created;
44937 +extern atomic_unchecked_t fscache_n_object_avail;
44938 +extern atomic_unchecked_t fscache_n_object_dead;
44939 +
44940 +extern atomic_unchecked_t fscache_n_checkaux_none;
44941 +extern atomic_unchecked_t fscache_n_checkaux_okay;
44942 +extern atomic_unchecked_t fscache_n_checkaux_update;
44943 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
44944
44945 extern atomic_t fscache_n_cop_alloc_object;
44946 extern atomic_t fscache_n_cop_lookup_object;
44947 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
44948 atomic_inc(stat);
44949 }
44950
44951 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
44952 +{
44953 + atomic_inc_unchecked(stat);
44954 +}
44955 +
44956 static inline void fscache_stat_d(atomic_t *stat)
44957 {
44958 atomic_dec(stat);
44959 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
44960
44961 #define __fscache_stat(stat) (NULL)
44962 #define fscache_stat(stat) do {} while (0)
44963 +#define fscache_stat_unchecked(stat) do {} while (0)
44964 #define fscache_stat_d(stat) do {} while (0)
44965 #endif
44966
44967 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
44968 index b6b897c..0ffff9c 100644
44969 --- a/fs/fscache/object.c
44970 +++ b/fs/fscache/object.c
44971 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44972 /* update the object metadata on disk */
44973 case FSCACHE_OBJECT_UPDATING:
44974 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
44975 - fscache_stat(&fscache_n_updates_run);
44976 + fscache_stat_unchecked(&fscache_n_updates_run);
44977 fscache_stat(&fscache_n_cop_update_object);
44978 object->cache->ops->update_object(object);
44979 fscache_stat_d(&fscache_n_cop_update_object);
44980 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44981 spin_lock(&object->lock);
44982 object->state = FSCACHE_OBJECT_DEAD;
44983 spin_unlock(&object->lock);
44984 - fscache_stat(&fscache_n_object_dead);
44985 + fscache_stat_unchecked(&fscache_n_object_dead);
44986 goto terminal_transit;
44987
44988 /* handle the parent cache of this object being withdrawn from
44989 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44990 spin_lock(&object->lock);
44991 object->state = FSCACHE_OBJECT_DEAD;
44992 spin_unlock(&object->lock);
44993 - fscache_stat(&fscache_n_object_dead);
44994 + fscache_stat_unchecked(&fscache_n_object_dead);
44995 goto terminal_transit;
44996
44997 /* complain about the object being woken up once it is
44998 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
44999 parent->cookie->def->name, cookie->def->name,
45000 object->cache->tag->name);
45001
45002 - fscache_stat(&fscache_n_object_lookups);
45003 + fscache_stat_unchecked(&fscache_n_object_lookups);
45004 fscache_stat(&fscache_n_cop_lookup_object);
45005 ret = object->cache->ops->lookup_object(object);
45006 fscache_stat_d(&fscache_n_cop_lookup_object);
45007 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
45008 if (ret == -ETIMEDOUT) {
45009 /* probably stuck behind another object, so move this one to
45010 * the back of the queue */
45011 - fscache_stat(&fscache_n_object_lookups_timed_out);
45012 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
45013 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45014 }
45015
45016 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
45017
45018 spin_lock(&object->lock);
45019 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45020 - fscache_stat(&fscache_n_object_lookups_negative);
45021 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
45022
45023 /* transit here to allow write requests to begin stacking up
45024 * and read requests to begin returning ENODATA */
45025 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
45026 * result, in which case there may be data available */
45027 spin_lock(&object->lock);
45028 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45029 - fscache_stat(&fscache_n_object_lookups_positive);
45030 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
45031
45032 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
45033
45034 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
45035 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45036 } else {
45037 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
45038 - fscache_stat(&fscache_n_object_created);
45039 + fscache_stat_unchecked(&fscache_n_object_created);
45040
45041 object->state = FSCACHE_OBJECT_AVAILABLE;
45042 spin_unlock(&object->lock);
45043 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
45044 fscache_enqueue_dependents(object);
45045
45046 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
45047 - fscache_stat(&fscache_n_object_avail);
45048 + fscache_stat_unchecked(&fscache_n_object_avail);
45049
45050 _leave("");
45051 }
45052 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45053 enum fscache_checkaux result;
45054
45055 if (!object->cookie->def->check_aux) {
45056 - fscache_stat(&fscache_n_checkaux_none);
45057 + fscache_stat_unchecked(&fscache_n_checkaux_none);
45058 return FSCACHE_CHECKAUX_OKAY;
45059 }
45060
45061 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45062 switch (result) {
45063 /* entry okay as is */
45064 case FSCACHE_CHECKAUX_OKAY:
45065 - fscache_stat(&fscache_n_checkaux_okay);
45066 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
45067 break;
45068
45069 /* entry requires update */
45070 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
45071 - fscache_stat(&fscache_n_checkaux_update);
45072 + fscache_stat_unchecked(&fscache_n_checkaux_update);
45073 break;
45074
45075 /* entry requires deletion */
45076 case FSCACHE_CHECKAUX_OBSOLETE:
45077 - fscache_stat(&fscache_n_checkaux_obsolete);
45078 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
45079 break;
45080
45081 default:
45082 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
45083 index 30afdfa..2256596 100644
45084 --- a/fs/fscache/operation.c
45085 +++ b/fs/fscache/operation.c
45086 @@ -17,7 +17,7 @@
45087 #include <linux/slab.h>
45088 #include "internal.h"
45089
45090 -atomic_t fscache_op_debug_id;
45091 +atomic_unchecked_t fscache_op_debug_id;
45092 EXPORT_SYMBOL(fscache_op_debug_id);
45093
45094 /**
45095 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
45096 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
45097 ASSERTCMP(atomic_read(&op->usage), >, 0);
45098
45099 - fscache_stat(&fscache_n_op_enqueue);
45100 + fscache_stat_unchecked(&fscache_n_op_enqueue);
45101 switch (op->flags & FSCACHE_OP_TYPE) {
45102 case FSCACHE_OP_ASYNC:
45103 _debug("queue async");
45104 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
45105 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
45106 if (op->processor)
45107 fscache_enqueue_operation(op);
45108 - fscache_stat(&fscache_n_op_run);
45109 + fscache_stat_unchecked(&fscache_n_op_run);
45110 }
45111
45112 /*
45113 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45114 if (object->n_ops > 1) {
45115 atomic_inc(&op->usage);
45116 list_add_tail(&op->pend_link, &object->pending_ops);
45117 - fscache_stat(&fscache_n_op_pend);
45118 + fscache_stat_unchecked(&fscache_n_op_pend);
45119 } else if (!list_empty(&object->pending_ops)) {
45120 atomic_inc(&op->usage);
45121 list_add_tail(&op->pend_link, &object->pending_ops);
45122 - fscache_stat(&fscache_n_op_pend);
45123 + fscache_stat_unchecked(&fscache_n_op_pend);
45124 fscache_start_operations(object);
45125 } else {
45126 ASSERTCMP(object->n_in_progress, ==, 0);
45127 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45128 object->n_exclusive++; /* reads and writes must wait */
45129 atomic_inc(&op->usage);
45130 list_add_tail(&op->pend_link, &object->pending_ops);
45131 - fscache_stat(&fscache_n_op_pend);
45132 + fscache_stat_unchecked(&fscache_n_op_pend);
45133 ret = 0;
45134 } else {
45135 /* not allowed to submit ops in any other state */
45136 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
45137 if (object->n_exclusive > 0) {
45138 atomic_inc(&op->usage);
45139 list_add_tail(&op->pend_link, &object->pending_ops);
45140 - fscache_stat(&fscache_n_op_pend);
45141 + fscache_stat_unchecked(&fscache_n_op_pend);
45142 } else if (!list_empty(&object->pending_ops)) {
45143 atomic_inc(&op->usage);
45144 list_add_tail(&op->pend_link, &object->pending_ops);
45145 - fscache_stat(&fscache_n_op_pend);
45146 + fscache_stat_unchecked(&fscache_n_op_pend);
45147 fscache_start_operations(object);
45148 } else {
45149 ASSERTCMP(object->n_exclusive, ==, 0);
45150 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
45151 object->n_ops++;
45152 atomic_inc(&op->usage);
45153 list_add_tail(&op->pend_link, &object->pending_ops);
45154 - fscache_stat(&fscache_n_op_pend);
45155 + fscache_stat_unchecked(&fscache_n_op_pend);
45156 ret = 0;
45157 } else if (object->state == FSCACHE_OBJECT_DYING ||
45158 object->state == FSCACHE_OBJECT_LC_DYING ||
45159 object->state == FSCACHE_OBJECT_WITHDRAWING) {
45160 - fscache_stat(&fscache_n_op_rejected);
45161 + fscache_stat_unchecked(&fscache_n_op_rejected);
45162 ret = -ENOBUFS;
45163 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
45164 fscache_report_unexpected_submission(object, op, ostate);
45165 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
45166
45167 ret = -EBUSY;
45168 if (!list_empty(&op->pend_link)) {
45169 - fscache_stat(&fscache_n_op_cancelled);
45170 + fscache_stat_unchecked(&fscache_n_op_cancelled);
45171 list_del_init(&op->pend_link);
45172 object->n_ops--;
45173 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
45174 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
45175 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
45176 BUG();
45177
45178 - fscache_stat(&fscache_n_op_release);
45179 + fscache_stat_unchecked(&fscache_n_op_release);
45180
45181 if (op->release) {
45182 op->release(op);
45183 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
45184 * lock, and defer it otherwise */
45185 if (!spin_trylock(&object->lock)) {
45186 _debug("defer put");
45187 - fscache_stat(&fscache_n_op_deferred_release);
45188 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
45189
45190 cache = object->cache;
45191 spin_lock(&cache->op_gc_list_lock);
45192 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
45193
45194 _debug("GC DEFERRED REL OBJ%x OP%x",
45195 object->debug_id, op->debug_id);
45196 - fscache_stat(&fscache_n_op_gc);
45197 + fscache_stat_unchecked(&fscache_n_op_gc);
45198
45199 ASSERTCMP(atomic_read(&op->usage), ==, 0);
45200
45201 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
45202 index 3f7a59b..cf196cc 100644
45203 --- a/fs/fscache/page.c
45204 +++ b/fs/fscache/page.c
45205 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45206 val = radix_tree_lookup(&cookie->stores, page->index);
45207 if (!val) {
45208 rcu_read_unlock();
45209 - fscache_stat(&fscache_n_store_vmscan_not_storing);
45210 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
45211 __fscache_uncache_page(cookie, page);
45212 return true;
45213 }
45214 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45215 spin_unlock(&cookie->stores_lock);
45216
45217 if (xpage) {
45218 - fscache_stat(&fscache_n_store_vmscan_cancelled);
45219 - fscache_stat(&fscache_n_store_radix_deletes);
45220 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
45221 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45222 ASSERTCMP(xpage, ==, page);
45223 } else {
45224 - fscache_stat(&fscache_n_store_vmscan_gone);
45225 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
45226 }
45227
45228 wake_up_bit(&cookie->flags, 0);
45229 @@ -107,7 +107,7 @@ page_busy:
45230 /* we might want to wait here, but that could deadlock the allocator as
45231 * the work threads writing to the cache may all end up sleeping
45232 * on memory allocation */
45233 - fscache_stat(&fscache_n_store_vmscan_busy);
45234 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
45235 return false;
45236 }
45237 EXPORT_SYMBOL(__fscache_maybe_release_page);
45238 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
45239 FSCACHE_COOKIE_STORING_TAG);
45240 if (!radix_tree_tag_get(&cookie->stores, page->index,
45241 FSCACHE_COOKIE_PENDING_TAG)) {
45242 - fscache_stat(&fscache_n_store_radix_deletes);
45243 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45244 xpage = radix_tree_delete(&cookie->stores, page->index);
45245 }
45246 spin_unlock(&cookie->stores_lock);
45247 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
45248
45249 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
45250
45251 - fscache_stat(&fscache_n_attr_changed_calls);
45252 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
45253
45254 if (fscache_object_is_active(object)) {
45255 fscache_stat(&fscache_n_cop_attr_changed);
45256 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45257
45258 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45259
45260 - fscache_stat(&fscache_n_attr_changed);
45261 + fscache_stat_unchecked(&fscache_n_attr_changed);
45262
45263 op = kzalloc(sizeof(*op), GFP_KERNEL);
45264 if (!op) {
45265 - fscache_stat(&fscache_n_attr_changed_nomem);
45266 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
45267 _leave(" = -ENOMEM");
45268 return -ENOMEM;
45269 }
45270 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45271 if (fscache_submit_exclusive_op(object, op) < 0)
45272 goto nobufs;
45273 spin_unlock(&cookie->lock);
45274 - fscache_stat(&fscache_n_attr_changed_ok);
45275 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
45276 fscache_put_operation(op);
45277 _leave(" = 0");
45278 return 0;
45279 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45280 nobufs:
45281 spin_unlock(&cookie->lock);
45282 kfree(op);
45283 - fscache_stat(&fscache_n_attr_changed_nobufs);
45284 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
45285 _leave(" = %d", -ENOBUFS);
45286 return -ENOBUFS;
45287 }
45288 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
45289 /* allocate a retrieval operation and attempt to submit it */
45290 op = kzalloc(sizeof(*op), GFP_NOIO);
45291 if (!op) {
45292 - fscache_stat(&fscache_n_retrievals_nomem);
45293 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45294 return NULL;
45295 }
45296
45297 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45298 return 0;
45299 }
45300
45301 - fscache_stat(&fscache_n_retrievals_wait);
45302 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
45303
45304 jif = jiffies;
45305 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
45306 fscache_wait_bit_interruptible,
45307 TASK_INTERRUPTIBLE) != 0) {
45308 - fscache_stat(&fscache_n_retrievals_intr);
45309 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45310 _leave(" = -ERESTARTSYS");
45311 return -ERESTARTSYS;
45312 }
45313 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45314 */
45315 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45316 struct fscache_retrieval *op,
45317 - atomic_t *stat_op_waits,
45318 - atomic_t *stat_object_dead)
45319 + atomic_unchecked_t *stat_op_waits,
45320 + atomic_unchecked_t *stat_object_dead)
45321 {
45322 int ret;
45323
45324 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45325 goto check_if_dead;
45326
45327 _debug(">>> WT");
45328 - fscache_stat(stat_op_waits);
45329 + fscache_stat_unchecked(stat_op_waits);
45330 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
45331 fscache_wait_bit_interruptible,
45332 TASK_INTERRUPTIBLE) < 0) {
45333 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45334
45335 check_if_dead:
45336 if (unlikely(fscache_object_is_dead(object))) {
45337 - fscache_stat(stat_object_dead);
45338 + fscache_stat_unchecked(stat_object_dead);
45339 return -ENOBUFS;
45340 }
45341 return 0;
45342 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45343
45344 _enter("%p,%p,,,", cookie, page);
45345
45346 - fscache_stat(&fscache_n_retrievals);
45347 + fscache_stat_unchecked(&fscache_n_retrievals);
45348
45349 if (hlist_empty(&cookie->backing_objects))
45350 goto nobufs;
45351 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45352 goto nobufs_unlock;
45353 spin_unlock(&cookie->lock);
45354
45355 - fscache_stat(&fscache_n_retrieval_ops);
45356 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45357
45358 /* pin the netfs read context in case we need to do the actual netfs
45359 * read because we've encountered a cache read failure */
45360 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45361
45362 error:
45363 if (ret == -ENOMEM)
45364 - fscache_stat(&fscache_n_retrievals_nomem);
45365 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45366 else if (ret == -ERESTARTSYS)
45367 - fscache_stat(&fscache_n_retrievals_intr);
45368 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45369 else if (ret == -ENODATA)
45370 - fscache_stat(&fscache_n_retrievals_nodata);
45371 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45372 else if (ret < 0)
45373 - fscache_stat(&fscache_n_retrievals_nobufs);
45374 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45375 else
45376 - fscache_stat(&fscache_n_retrievals_ok);
45377 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45378
45379 fscache_put_retrieval(op);
45380 _leave(" = %d", ret);
45381 @@ -429,7 +429,7 @@ nobufs_unlock:
45382 spin_unlock(&cookie->lock);
45383 kfree(op);
45384 nobufs:
45385 - fscache_stat(&fscache_n_retrievals_nobufs);
45386 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45387 _leave(" = -ENOBUFS");
45388 return -ENOBUFS;
45389 }
45390 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45391
45392 _enter("%p,,%d,,,", cookie, *nr_pages);
45393
45394 - fscache_stat(&fscache_n_retrievals);
45395 + fscache_stat_unchecked(&fscache_n_retrievals);
45396
45397 if (hlist_empty(&cookie->backing_objects))
45398 goto nobufs;
45399 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45400 goto nobufs_unlock;
45401 spin_unlock(&cookie->lock);
45402
45403 - fscache_stat(&fscache_n_retrieval_ops);
45404 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45405
45406 /* pin the netfs read context in case we need to do the actual netfs
45407 * read because we've encountered a cache read failure */
45408 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45409
45410 error:
45411 if (ret == -ENOMEM)
45412 - fscache_stat(&fscache_n_retrievals_nomem);
45413 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45414 else if (ret == -ERESTARTSYS)
45415 - fscache_stat(&fscache_n_retrievals_intr);
45416 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45417 else if (ret == -ENODATA)
45418 - fscache_stat(&fscache_n_retrievals_nodata);
45419 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45420 else if (ret < 0)
45421 - fscache_stat(&fscache_n_retrievals_nobufs);
45422 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45423 else
45424 - fscache_stat(&fscache_n_retrievals_ok);
45425 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45426
45427 fscache_put_retrieval(op);
45428 _leave(" = %d", ret);
45429 @@ -545,7 +545,7 @@ nobufs_unlock:
45430 spin_unlock(&cookie->lock);
45431 kfree(op);
45432 nobufs:
45433 - fscache_stat(&fscache_n_retrievals_nobufs);
45434 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45435 _leave(" = -ENOBUFS");
45436 return -ENOBUFS;
45437 }
45438 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45439
45440 _enter("%p,%p,,,", cookie, page);
45441
45442 - fscache_stat(&fscache_n_allocs);
45443 + fscache_stat_unchecked(&fscache_n_allocs);
45444
45445 if (hlist_empty(&cookie->backing_objects))
45446 goto nobufs;
45447 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45448 goto nobufs_unlock;
45449 spin_unlock(&cookie->lock);
45450
45451 - fscache_stat(&fscache_n_alloc_ops);
45452 + fscache_stat_unchecked(&fscache_n_alloc_ops);
45453
45454 ret = fscache_wait_for_retrieval_activation(
45455 object, op,
45456 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45457
45458 error:
45459 if (ret == -ERESTARTSYS)
45460 - fscache_stat(&fscache_n_allocs_intr);
45461 + fscache_stat_unchecked(&fscache_n_allocs_intr);
45462 else if (ret < 0)
45463 - fscache_stat(&fscache_n_allocs_nobufs);
45464 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45465 else
45466 - fscache_stat(&fscache_n_allocs_ok);
45467 + fscache_stat_unchecked(&fscache_n_allocs_ok);
45468
45469 fscache_put_retrieval(op);
45470 _leave(" = %d", ret);
45471 @@ -625,7 +625,7 @@ nobufs_unlock:
45472 spin_unlock(&cookie->lock);
45473 kfree(op);
45474 nobufs:
45475 - fscache_stat(&fscache_n_allocs_nobufs);
45476 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45477 _leave(" = -ENOBUFS");
45478 return -ENOBUFS;
45479 }
45480 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45481
45482 spin_lock(&cookie->stores_lock);
45483
45484 - fscache_stat(&fscache_n_store_calls);
45485 + fscache_stat_unchecked(&fscache_n_store_calls);
45486
45487 /* find a page to store */
45488 page = NULL;
45489 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45490 page = results[0];
45491 _debug("gang %d [%lx]", n, page->index);
45492 if (page->index > op->store_limit) {
45493 - fscache_stat(&fscache_n_store_pages_over_limit);
45494 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
45495 goto superseded;
45496 }
45497
45498 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45499 spin_unlock(&cookie->stores_lock);
45500 spin_unlock(&object->lock);
45501
45502 - fscache_stat(&fscache_n_store_pages);
45503 + fscache_stat_unchecked(&fscache_n_store_pages);
45504 fscache_stat(&fscache_n_cop_write_page);
45505 ret = object->cache->ops->write_page(op, page);
45506 fscache_stat_d(&fscache_n_cop_write_page);
45507 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45508 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45509 ASSERT(PageFsCache(page));
45510
45511 - fscache_stat(&fscache_n_stores);
45512 + fscache_stat_unchecked(&fscache_n_stores);
45513
45514 op = kzalloc(sizeof(*op), GFP_NOIO);
45515 if (!op)
45516 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45517 spin_unlock(&cookie->stores_lock);
45518 spin_unlock(&object->lock);
45519
45520 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
45521 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
45522 op->store_limit = object->store_limit;
45523
45524 if (fscache_submit_op(object, &op->op) < 0)
45525 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45526
45527 spin_unlock(&cookie->lock);
45528 radix_tree_preload_end();
45529 - fscache_stat(&fscache_n_store_ops);
45530 - fscache_stat(&fscache_n_stores_ok);
45531 + fscache_stat_unchecked(&fscache_n_store_ops);
45532 + fscache_stat_unchecked(&fscache_n_stores_ok);
45533
45534 /* the work queue now carries its own ref on the object */
45535 fscache_put_operation(&op->op);
45536 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45537 return 0;
45538
45539 already_queued:
45540 - fscache_stat(&fscache_n_stores_again);
45541 + fscache_stat_unchecked(&fscache_n_stores_again);
45542 already_pending:
45543 spin_unlock(&cookie->stores_lock);
45544 spin_unlock(&object->lock);
45545 spin_unlock(&cookie->lock);
45546 radix_tree_preload_end();
45547 kfree(op);
45548 - fscache_stat(&fscache_n_stores_ok);
45549 + fscache_stat_unchecked(&fscache_n_stores_ok);
45550 _leave(" = 0");
45551 return 0;
45552
45553 @@ -851,14 +851,14 @@ nobufs:
45554 spin_unlock(&cookie->lock);
45555 radix_tree_preload_end();
45556 kfree(op);
45557 - fscache_stat(&fscache_n_stores_nobufs);
45558 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
45559 _leave(" = -ENOBUFS");
45560 return -ENOBUFS;
45561
45562 nomem_free:
45563 kfree(op);
45564 nomem:
45565 - fscache_stat(&fscache_n_stores_oom);
45566 + fscache_stat_unchecked(&fscache_n_stores_oom);
45567 _leave(" = -ENOMEM");
45568 return -ENOMEM;
45569 }
45570 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
45571 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45572 ASSERTCMP(page, !=, NULL);
45573
45574 - fscache_stat(&fscache_n_uncaches);
45575 + fscache_stat_unchecked(&fscache_n_uncaches);
45576
45577 /* cache withdrawal may beat us to it */
45578 if (!PageFsCache(page))
45579 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
45580 unsigned long loop;
45581
45582 #ifdef CONFIG_FSCACHE_STATS
45583 - atomic_add(pagevec->nr, &fscache_n_marks);
45584 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
45585 #endif
45586
45587 for (loop = 0; loop < pagevec->nr; loop++) {
45588 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
45589 index 4765190..2a067f2 100644
45590 --- a/fs/fscache/stats.c
45591 +++ b/fs/fscache/stats.c
45592 @@ -18,95 +18,95 @@
45593 /*
45594 * operation counters
45595 */
45596 -atomic_t fscache_n_op_pend;
45597 -atomic_t fscache_n_op_run;
45598 -atomic_t fscache_n_op_enqueue;
45599 -atomic_t fscache_n_op_requeue;
45600 -atomic_t fscache_n_op_deferred_release;
45601 -atomic_t fscache_n_op_release;
45602 -atomic_t fscache_n_op_gc;
45603 -atomic_t fscache_n_op_cancelled;
45604 -atomic_t fscache_n_op_rejected;
45605 -
45606 -atomic_t fscache_n_attr_changed;
45607 -atomic_t fscache_n_attr_changed_ok;
45608 -atomic_t fscache_n_attr_changed_nobufs;
45609 -atomic_t fscache_n_attr_changed_nomem;
45610 -atomic_t fscache_n_attr_changed_calls;
45611 -
45612 -atomic_t fscache_n_allocs;
45613 -atomic_t fscache_n_allocs_ok;
45614 -atomic_t fscache_n_allocs_wait;
45615 -atomic_t fscache_n_allocs_nobufs;
45616 -atomic_t fscache_n_allocs_intr;
45617 -atomic_t fscache_n_allocs_object_dead;
45618 -atomic_t fscache_n_alloc_ops;
45619 -atomic_t fscache_n_alloc_op_waits;
45620 -
45621 -atomic_t fscache_n_retrievals;
45622 -atomic_t fscache_n_retrievals_ok;
45623 -atomic_t fscache_n_retrievals_wait;
45624 -atomic_t fscache_n_retrievals_nodata;
45625 -atomic_t fscache_n_retrievals_nobufs;
45626 -atomic_t fscache_n_retrievals_intr;
45627 -atomic_t fscache_n_retrievals_nomem;
45628 -atomic_t fscache_n_retrievals_object_dead;
45629 -atomic_t fscache_n_retrieval_ops;
45630 -atomic_t fscache_n_retrieval_op_waits;
45631 -
45632 -atomic_t fscache_n_stores;
45633 -atomic_t fscache_n_stores_ok;
45634 -atomic_t fscache_n_stores_again;
45635 -atomic_t fscache_n_stores_nobufs;
45636 -atomic_t fscache_n_stores_oom;
45637 -atomic_t fscache_n_store_ops;
45638 -atomic_t fscache_n_store_calls;
45639 -atomic_t fscache_n_store_pages;
45640 -atomic_t fscache_n_store_radix_deletes;
45641 -atomic_t fscache_n_store_pages_over_limit;
45642 -
45643 -atomic_t fscache_n_store_vmscan_not_storing;
45644 -atomic_t fscache_n_store_vmscan_gone;
45645 -atomic_t fscache_n_store_vmscan_busy;
45646 -atomic_t fscache_n_store_vmscan_cancelled;
45647 -
45648 -atomic_t fscache_n_marks;
45649 -atomic_t fscache_n_uncaches;
45650 -
45651 -atomic_t fscache_n_acquires;
45652 -atomic_t fscache_n_acquires_null;
45653 -atomic_t fscache_n_acquires_no_cache;
45654 -atomic_t fscache_n_acquires_ok;
45655 -atomic_t fscache_n_acquires_nobufs;
45656 -atomic_t fscache_n_acquires_oom;
45657 -
45658 -atomic_t fscache_n_updates;
45659 -atomic_t fscache_n_updates_null;
45660 -atomic_t fscache_n_updates_run;
45661 -
45662 -atomic_t fscache_n_relinquishes;
45663 -atomic_t fscache_n_relinquishes_null;
45664 -atomic_t fscache_n_relinquishes_waitcrt;
45665 -atomic_t fscache_n_relinquishes_retire;
45666 -
45667 -atomic_t fscache_n_cookie_index;
45668 -atomic_t fscache_n_cookie_data;
45669 -atomic_t fscache_n_cookie_special;
45670 -
45671 -atomic_t fscache_n_object_alloc;
45672 -atomic_t fscache_n_object_no_alloc;
45673 -atomic_t fscache_n_object_lookups;
45674 -atomic_t fscache_n_object_lookups_negative;
45675 -atomic_t fscache_n_object_lookups_positive;
45676 -atomic_t fscache_n_object_lookups_timed_out;
45677 -atomic_t fscache_n_object_created;
45678 -atomic_t fscache_n_object_avail;
45679 -atomic_t fscache_n_object_dead;
45680 -
45681 -atomic_t fscache_n_checkaux_none;
45682 -atomic_t fscache_n_checkaux_okay;
45683 -atomic_t fscache_n_checkaux_update;
45684 -atomic_t fscache_n_checkaux_obsolete;
45685 +atomic_unchecked_t fscache_n_op_pend;
45686 +atomic_unchecked_t fscache_n_op_run;
45687 +atomic_unchecked_t fscache_n_op_enqueue;
45688 +atomic_unchecked_t fscache_n_op_requeue;
45689 +atomic_unchecked_t fscache_n_op_deferred_release;
45690 +atomic_unchecked_t fscache_n_op_release;
45691 +atomic_unchecked_t fscache_n_op_gc;
45692 +atomic_unchecked_t fscache_n_op_cancelled;
45693 +atomic_unchecked_t fscache_n_op_rejected;
45694 +
45695 +atomic_unchecked_t fscache_n_attr_changed;
45696 +atomic_unchecked_t fscache_n_attr_changed_ok;
45697 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
45698 +atomic_unchecked_t fscache_n_attr_changed_nomem;
45699 +atomic_unchecked_t fscache_n_attr_changed_calls;
45700 +
45701 +atomic_unchecked_t fscache_n_allocs;
45702 +atomic_unchecked_t fscache_n_allocs_ok;
45703 +atomic_unchecked_t fscache_n_allocs_wait;
45704 +atomic_unchecked_t fscache_n_allocs_nobufs;
45705 +atomic_unchecked_t fscache_n_allocs_intr;
45706 +atomic_unchecked_t fscache_n_allocs_object_dead;
45707 +atomic_unchecked_t fscache_n_alloc_ops;
45708 +atomic_unchecked_t fscache_n_alloc_op_waits;
45709 +
45710 +atomic_unchecked_t fscache_n_retrievals;
45711 +atomic_unchecked_t fscache_n_retrievals_ok;
45712 +atomic_unchecked_t fscache_n_retrievals_wait;
45713 +atomic_unchecked_t fscache_n_retrievals_nodata;
45714 +atomic_unchecked_t fscache_n_retrievals_nobufs;
45715 +atomic_unchecked_t fscache_n_retrievals_intr;
45716 +atomic_unchecked_t fscache_n_retrievals_nomem;
45717 +atomic_unchecked_t fscache_n_retrievals_object_dead;
45718 +atomic_unchecked_t fscache_n_retrieval_ops;
45719 +atomic_unchecked_t fscache_n_retrieval_op_waits;
45720 +
45721 +atomic_unchecked_t fscache_n_stores;
45722 +atomic_unchecked_t fscache_n_stores_ok;
45723 +atomic_unchecked_t fscache_n_stores_again;
45724 +atomic_unchecked_t fscache_n_stores_nobufs;
45725 +atomic_unchecked_t fscache_n_stores_oom;
45726 +atomic_unchecked_t fscache_n_store_ops;
45727 +atomic_unchecked_t fscache_n_store_calls;
45728 +atomic_unchecked_t fscache_n_store_pages;
45729 +atomic_unchecked_t fscache_n_store_radix_deletes;
45730 +atomic_unchecked_t fscache_n_store_pages_over_limit;
45731 +
45732 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45733 +atomic_unchecked_t fscache_n_store_vmscan_gone;
45734 +atomic_unchecked_t fscache_n_store_vmscan_busy;
45735 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45736 +
45737 +atomic_unchecked_t fscache_n_marks;
45738 +atomic_unchecked_t fscache_n_uncaches;
45739 +
45740 +atomic_unchecked_t fscache_n_acquires;
45741 +atomic_unchecked_t fscache_n_acquires_null;
45742 +atomic_unchecked_t fscache_n_acquires_no_cache;
45743 +atomic_unchecked_t fscache_n_acquires_ok;
45744 +atomic_unchecked_t fscache_n_acquires_nobufs;
45745 +atomic_unchecked_t fscache_n_acquires_oom;
45746 +
45747 +atomic_unchecked_t fscache_n_updates;
45748 +atomic_unchecked_t fscache_n_updates_null;
45749 +atomic_unchecked_t fscache_n_updates_run;
45750 +
45751 +atomic_unchecked_t fscache_n_relinquishes;
45752 +atomic_unchecked_t fscache_n_relinquishes_null;
45753 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45754 +atomic_unchecked_t fscache_n_relinquishes_retire;
45755 +
45756 +atomic_unchecked_t fscache_n_cookie_index;
45757 +atomic_unchecked_t fscache_n_cookie_data;
45758 +atomic_unchecked_t fscache_n_cookie_special;
45759 +
45760 +atomic_unchecked_t fscache_n_object_alloc;
45761 +atomic_unchecked_t fscache_n_object_no_alloc;
45762 +atomic_unchecked_t fscache_n_object_lookups;
45763 +atomic_unchecked_t fscache_n_object_lookups_negative;
45764 +atomic_unchecked_t fscache_n_object_lookups_positive;
45765 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
45766 +atomic_unchecked_t fscache_n_object_created;
45767 +atomic_unchecked_t fscache_n_object_avail;
45768 +atomic_unchecked_t fscache_n_object_dead;
45769 +
45770 +atomic_unchecked_t fscache_n_checkaux_none;
45771 +atomic_unchecked_t fscache_n_checkaux_okay;
45772 +atomic_unchecked_t fscache_n_checkaux_update;
45773 +atomic_unchecked_t fscache_n_checkaux_obsolete;
45774
45775 atomic_t fscache_n_cop_alloc_object;
45776 atomic_t fscache_n_cop_lookup_object;
45777 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
45778 seq_puts(m, "FS-Cache statistics\n");
45779
45780 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
45781 - atomic_read(&fscache_n_cookie_index),
45782 - atomic_read(&fscache_n_cookie_data),
45783 - atomic_read(&fscache_n_cookie_special));
45784 + atomic_read_unchecked(&fscache_n_cookie_index),
45785 + atomic_read_unchecked(&fscache_n_cookie_data),
45786 + atomic_read_unchecked(&fscache_n_cookie_special));
45787
45788 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
45789 - atomic_read(&fscache_n_object_alloc),
45790 - atomic_read(&fscache_n_object_no_alloc),
45791 - atomic_read(&fscache_n_object_avail),
45792 - atomic_read(&fscache_n_object_dead));
45793 + atomic_read_unchecked(&fscache_n_object_alloc),
45794 + atomic_read_unchecked(&fscache_n_object_no_alloc),
45795 + atomic_read_unchecked(&fscache_n_object_avail),
45796 + atomic_read_unchecked(&fscache_n_object_dead));
45797 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
45798 - atomic_read(&fscache_n_checkaux_none),
45799 - atomic_read(&fscache_n_checkaux_okay),
45800 - atomic_read(&fscache_n_checkaux_update),
45801 - atomic_read(&fscache_n_checkaux_obsolete));
45802 + atomic_read_unchecked(&fscache_n_checkaux_none),
45803 + atomic_read_unchecked(&fscache_n_checkaux_okay),
45804 + atomic_read_unchecked(&fscache_n_checkaux_update),
45805 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
45806
45807 seq_printf(m, "Pages : mrk=%u unc=%u\n",
45808 - atomic_read(&fscache_n_marks),
45809 - atomic_read(&fscache_n_uncaches));
45810 + atomic_read_unchecked(&fscache_n_marks),
45811 + atomic_read_unchecked(&fscache_n_uncaches));
45812
45813 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
45814 " oom=%u\n",
45815 - atomic_read(&fscache_n_acquires),
45816 - atomic_read(&fscache_n_acquires_null),
45817 - atomic_read(&fscache_n_acquires_no_cache),
45818 - atomic_read(&fscache_n_acquires_ok),
45819 - atomic_read(&fscache_n_acquires_nobufs),
45820 - atomic_read(&fscache_n_acquires_oom));
45821 + atomic_read_unchecked(&fscache_n_acquires),
45822 + atomic_read_unchecked(&fscache_n_acquires_null),
45823 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
45824 + atomic_read_unchecked(&fscache_n_acquires_ok),
45825 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
45826 + atomic_read_unchecked(&fscache_n_acquires_oom));
45827
45828 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
45829 - atomic_read(&fscache_n_object_lookups),
45830 - atomic_read(&fscache_n_object_lookups_negative),
45831 - atomic_read(&fscache_n_object_lookups_positive),
45832 - atomic_read(&fscache_n_object_created),
45833 - atomic_read(&fscache_n_object_lookups_timed_out));
45834 + atomic_read_unchecked(&fscache_n_object_lookups),
45835 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
45836 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
45837 + atomic_read_unchecked(&fscache_n_object_created),
45838 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
45839
45840 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
45841 - atomic_read(&fscache_n_updates),
45842 - atomic_read(&fscache_n_updates_null),
45843 - atomic_read(&fscache_n_updates_run));
45844 + atomic_read_unchecked(&fscache_n_updates),
45845 + atomic_read_unchecked(&fscache_n_updates_null),
45846 + atomic_read_unchecked(&fscache_n_updates_run));
45847
45848 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
45849 - atomic_read(&fscache_n_relinquishes),
45850 - atomic_read(&fscache_n_relinquishes_null),
45851 - atomic_read(&fscache_n_relinquishes_waitcrt),
45852 - atomic_read(&fscache_n_relinquishes_retire));
45853 + atomic_read_unchecked(&fscache_n_relinquishes),
45854 + atomic_read_unchecked(&fscache_n_relinquishes_null),
45855 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
45856 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
45857
45858 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
45859 - atomic_read(&fscache_n_attr_changed),
45860 - atomic_read(&fscache_n_attr_changed_ok),
45861 - atomic_read(&fscache_n_attr_changed_nobufs),
45862 - atomic_read(&fscache_n_attr_changed_nomem),
45863 - atomic_read(&fscache_n_attr_changed_calls));
45864 + atomic_read_unchecked(&fscache_n_attr_changed),
45865 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
45866 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
45867 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
45868 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
45869
45870 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
45871 - atomic_read(&fscache_n_allocs),
45872 - atomic_read(&fscache_n_allocs_ok),
45873 - atomic_read(&fscache_n_allocs_wait),
45874 - atomic_read(&fscache_n_allocs_nobufs),
45875 - atomic_read(&fscache_n_allocs_intr));
45876 + atomic_read_unchecked(&fscache_n_allocs),
45877 + atomic_read_unchecked(&fscache_n_allocs_ok),
45878 + atomic_read_unchecked(&fscache_n_allocs_wait),
45879 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
45880 + atomic_read_unchecked(&fscache_n_allocs_intr));
45881 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
45882 - atomic_read(&fscache_n_alloc_ops),
45883 - atomic_read(&fscache_n_alloc_op_waits),
45884 - atomic_read(&fscache_n_allocs_object_dead));
45885 + atomic_read_unchecked(&fscache_n_alloc_ops),
45886 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
45887 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
45888
45889 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
45890 " int=%u oom=%u\n",
45891 - atomic_read(&fscache_n_retrievals),
45892 - atomic_read(&fscache_n_retrievals_ok),
45893 - atomic_read(&fscache_n_retrievals_wait),
45894 - atomic_read(&fscache_n_retrievals_nodata),
45895 - atomic_read(&fscache_n_retrievals_nobufs),
45896 - atomic_read(&fscache_n_retrievals_intr),
45897 - atomic_read(&fscache_n_retrievals_nomem));
45898 + atomic_read_unchecked(&fscache_n_retrievals),
45899 + atomic_read_unchecked(&fscache_n_retrievals_ok),
45900 + atomic_read_unchecked(&fscache_n_retrievals_wait),
45901 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
45902 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
45903 + atomic_read_unchecked(&fscache_n_retrievals_intr),
45904 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
45905 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
45906 - atomic_read(&fscache_n_retrieval_ops),
45907 - atomic_read(&fscache_n_retrieval_op_waits),
45908 - atomic_read(&fscache_n_retrievals_object_dead));
45909 + atomic_read_unchecked(&fscache_n_retrieval_ops),
45910 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
45911 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
45912
45913 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
45914 - atomic_read(&fscache_n_stores),
45915 - atomic_read(&fscache_n_stores_ok),
45916 - atomic_read(&fscache_n_stores_again),
45917 - atomic_read(&fscache_n_stores_nobufs),
45918 - atomic_read(&fscache_n_stores_oom));
45919 + atomic_read_unchecked(&fscache_n_stores),
45920 + atomic_read_unchecked(&fscache_n_stores_ok),
45921 + atomic_read_unchecked(&fscache_n_stores_again),
45922 + atomic_read_unchecked(&fscache_n_stores_nobufs),
45923 + atomic_read_unchecked(&fscache_n_stores_oom));
45924 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
45925 - atomic_read(&fscache_n_store_ops),
45926 - atomic_read(&fscache_n_store_calls),
45927 - atomic_read(&fscache_n_store_pages),
45928 - atomic_read(&fscache_n_store_radix_deletes),
45929 - atomic_read(&fscache_n_store_pages_over_limit));
45930 + atomic_read_unchecked(&fscache_n_store_ops),
45931 + atomic_read_unchecked(&fscache_n_store_calls),
45932 + atomic_read_unchecked(&fscache_n_store_pages),
45933 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
45934 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
45935
45936 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
45937 - atomic_read(&fscache_n_store_vmscan_not_storing),
45938 - atomic_read(&fscache_n_store_vmscan_gone),
45939 - atomic_read(&fscache_n_store_vmscan_busy),
45940 - atomic_read(&fscache_n_store_vmscan_cancelled));
45941 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
45942 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
45943 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
45944 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
45945
45946 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
45947 - atomic_read(&fscache_n_op_pend),
45948 - atomic_read(&fscache_n_op_run),
45949 - atomic_read(&fscache_n_op_enqueue),
45950 - atomic_read(&fscache_n_op_cancelled),
45951 - atomic_read(&fscache_n_op_rejected));
45952 + atomic_read_unchecked(&fscache_n_op_pend),
45953 + atomic_read_unchecked(&fscache_n_op_run),
45954 + atomic_read_unchecked(&fscache_n_op_enqueue),
45955 + atomic_read_unchecked(&fscache_n_op_cancelled),
45956 + atomic_read_unchecked(&fscache_n_op_rejected));
45957 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
45958 - atomic_read(&fscache_n_op_deferred_release),
45959 - atomic_read(&fscache_n_op_release),
45960 - atomic_read(&fscache_n_op_gc));
45961 + atomic_read_unchecked(&fscache_n_op_deferred_release),
45962 + atomic_read_unchecked(&fscache_n_op_release),
45963 + atomic_read_unchecked(&fscache_n_op_gc));
45964
45965 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
45966 atomic_read(&fscache_n_cop_alloc_object),
45967 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
45968 index b6cca47..ec782c3 100644
45969 --- a/fs/fuse/cuse.c
45970 +++ b/fs/fuse/cuse.c
45971 @@ -586,10 +586,12 @@ static int __init cuse_init(void)
45972 INIT_LIST_HEAD(&cuse_conntbl[i]);
45973
45974 /* inherit and extend fuse_dev_operations */
45975 - cuse_channel_fops = fuse_dev_operations;
45976 - cuse_channel_fops.owner = THIS_MODULE;
45977 - cuse_channel_fops.open = cuse_channel_open;
45978 - cuse_channel_fops.release = cuse_channel_release;
45979 + pax_open_kernel();
45980 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
45981 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
45982 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
45983 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
45984 + pax_close_kernel();
45985
45986 cuse_class = class_create(THIS_MODULE, "cuse");
45987 if (IS_ERR(cuse_class))
45988 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
45989 index 5cb8614..6865b11 100644
45990 --- a/fs/fuse/dev.c
45991 +++ b/fs/fuse/dev.c
45992 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
45993 ret = 0;
45994 pipe_lock(pipe);
45995
45996 - if (!pipe->readers) {
45997 + if (!atomic_read(&pipe->readers)) {
45998 send_sig(SIGPIPE, current, 0);
45999 if (!ret)
46000 ret = -EPIPE;
46001 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
46002 index 9f63e49..d8a64c0 100644
46003 --- a/fs/fuse/dir.c
46004 +++ b/fs/fuse/dir.c
46005 @@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry)
46006 return link;
46007 }
46008
46009 -static void free_link(char *link)
46010 +static void free_link(const char *link)
46011 {
46012 if (!IS_ERR(link))
46013 free_page((unsigned long) link);
46014 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
46015 index 900cf98..3896726 100644
46016 --- a/fs/gfs2/inode.c
46017 +++ b/fs/gfs2/inode.c
46018 @@ -1517,7 +1517,7 @@ out:
46019
46020 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46021 {
46022 - char *s = nd_get_link(nd);
46023 + const char *s = nd_get_link(nd);
46024 if (!IS_ERR(s))
46025 kfree(s);
46026 }
46027 diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
46028 index 3ebc437..eb23952 100644
46029 --- a/fs/hfs/btree.c
46030 +++ b/fs/hfs/btree.c
46031 @@ -46,11 +46,27 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
46032 case HFS_EXT_CNID:
46033 hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize,
46034 mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz));
46035 +
46036 + if (HFS_I(tree->inode)->alloc_blocks >
46037 + HFS_I(tree->inode)->first_blocks) {
46038 + printk(KERN_ERR "hfs: invalid btree extent records\n");
46039 + unlock_new_inode(tree->inode);
46040 + goto free_inode;
46041 + }
46042 +
46043 tree->inode->i_mapping->a_ops = &hfs_btree_aops;
46044 break;
46045 case HFS_CAT_CNID:
46046 hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize,
46047 mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz));
46048 +
46049 + if (!HFS_I(tree->inode)->first_blocks) {
46050 + printk(KERN_ERR "hfs: invalid btree extent records "
46051 + "(0 size).\n");
46052 + unlock_new_inode(tree->inode);
46053 + goto free_inode;
46054 + }
46055 +
46056 tree->inode->i_mapping->a_ops = &hfs_btree_aops;
46057 break;
46058 default:
46059 @@ -59,11 +75,6 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
46060 }
46061 unlock_new_inode(tree->inode);
46062
46063 - if (!HFS_I(tree->inode)->first_blocks) {
46064 - printk(KERN_ERR "hfs: invalid btree extent records (0 size).\n");
46065 - goto free_inode;
46066 - }
46067 -
46068 mapping = tree->inode->i_mapping;
46069 page = read_mapping_page(mapping, 0, NULL);
46070 if (IS_ERR(page))
46071 diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
46072 index 4dfbfec..947c9c2 100644
46073 --- a/fs/hfsplus/catalog.c
46074 +++ b/fs/hfsplus/catalog.c
46075 @@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
46076 int err;
46077 u16 type;
46078
46079 + pax_track_stack();
46080 +
46081 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
46082 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
46083 if (err)
46084 @@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
46085 int entry_size;
46086 int err;
46087
46088 + pax_track_stack();
46089 +
46090 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
46091 str->name, cnid, inode->i_nlink);
46092 err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
46093 @@ -353,6 +357,8 @@ int hfsplus_rename_cat(u32 cnid,
46094 int entry_size, type;
46095 int err;
46096
46097 + pax_track_stack();
46098 +
46099 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
46100 cnid, src_dir->i_ino, src_name->name,
46101 dst_dir->i_ino, dst_name->name);
46102 diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
46103 index 25b2443..09a3341 100644
46104 --- a/fs/hfsplus/dir.c
46105 +++ b/fs/hfsplus/dir.c
46106 @@ -131,6 +131,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
46107 struct hfsplus_readdir_data *rd;
46108 u16 type;
46109
46110 + pax_track_stack();
46111 +
46112 if (filp->f_pos >= inode->i_size)
46113 return 0;
46114
46115 diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
46116 index 4cc1e3a..ad0f70b 100644
46117 --- a/fs/hfsplus/inode.c
46118 +++ b/fs/hfsplus/inode.c
46119 @@ -501,6 +501,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
46120 int res = 0;
46121 u16 type;
46122
46123 + pax_track_stack();
46124 +
46125 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
46126
46127 HFSPLUS_I(inode)->linkid = 0;
46128 @@ -564,6 +566,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
46129 struct hfs_find_data fd;
46130 hfsplus_cat_entry entry;
46131
46132 + pax_track_stack();
46133 +
46134 if (HFSPLUS_IS_RSRC(inode))
46135 main_inode = HFSPLUS_I(inode)->rsrc_inode;
46136
46137 diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
46138 index fbaa669..c548cd0 100644
46139 --- a/fs/hfsplus/ioctl.c
46140 +++ b/fs/hfsplus/ioctl.c
46141 @@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
46142 struct hfsplus_cat_file *file;
46143 int res;
46144
46145 + pax_track_stack();
46146 +
46147 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
46148 return -EOPNOTSUPP;
46149
46150 @@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
46151 struct hfsplus_cat_file *file;
46152 ssize_t res = 0;
46153
46154 + pax_track_stack();
46155 +
46156 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
46157 return -EOPNOTSUPP;
46158
46159 diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
46160 index d24a9b6..dd9b3dd 100644
46161 --- a/fs/hfsplus/super.c
46162 +++ b/fs/hfsplus/super.c
46163 @@ -347,6 +347,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
46164 u64 last_fs_block, last_fs_page;
46165 int err;
46166
46167 + pax_track_stack();
46168 +
46169 err = -EINVAL;
46170 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
46171 if (!sbi)
46172 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
46173 index ec88953..cb5e98e 100644
46174 --- a/fs/hugetlbfs/inode.c
46175 +++ b/fs/hugetlbfs/inode.c
46176 @@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = {
46177 .kill_sb = kill_litter_super,
46178 };
46179
46180 -static struct vfsmount *hugetlbfs_vfsmount;
46181 +struct vfsmount *hugetlbfs_vfsmount;
46182
46183 static int can_do_hugetlb_shm(void)
46184 {
46185 diff --git a/fs/inode.c b/fs/inode.c
46186 index ec79246..054c36a 100644
46187 --- a/fs/inode.c
46188 +++ b/fs/inode.c
46189 @@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
46190
46191 #ifdef CONFIG_SMP
46192 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
46193 - static atomic_t shared_last_ino;
46194 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
46195 + static atomic_unchecked_t shared_last_ino;
46196 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
46197
46198 res = next - LAST_INO_BATCH;
46199 }
46200 diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
46201 index f94fc48..3bb8d30 100644
46202 --- a/fs/jbd/checkpoint.c
46203 +++ b/fs/jbd/checkpoint.c
46204 @@ -358,6 +358,8 @@ int log_do_checkpoint(journal_t *journal)
46205 tid_t this_tid;
46206 int result;
46207
46208 + pax_track_stack();
46209 +
46210 jbd_debug(1, "Start checkpoint\n");
46211
46212 /*
46213 diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
46214 index 16a5047..88ff6ca 100644
46215 --- a/fs/jffs2/compr_rtime.c
46216 +++ b/fs/jffs2/compr_rtime.c
46217 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
46218 int outpos = 0;
46219 int pos=0;
46220
46221 + pax_track_stack();
46222 +
46223 memset(positions,0,sizeof(positions));
46224
46225 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
46226 @@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
46227 int outpos = 0;
46228 int pos=0;
46229
46230 + pax_track_stack();
46231 +
46232 memset(positions,0,sizeof(positions));
46233
46234 while (outpos<destlen) {
46235 diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
46236 index 9e7cec8..4713089 100644
46237 --- a/fs/jffs2/compr_rubin.c
46238 +++ b/fs/jffs2/compr_rubin.c
46239 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
46240 int ret;
46241 uint32_t mysrclen, mydstlen;
46242
46243 + pax_track_stack();
46244 +
46245 mysrclen = *sourcelen;
46246 mydstlen = *dstlen - 8;
46247
46248 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
46249 index e513f19..2ab1351 100644
46250 --- a/fs/jffs2/erase.c
46251 +++ b/fs/jffs2/erase.c
46252 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
46253 struct jffs2_unknown_node marker = {
46254 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
46255 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46256 - .totlen = cpu_to_je32(c->cleanmarker_size)
46257 + .totlen = cpu_to_je32(c->cleanmarker_size),
46258 + .hdr_crc = cpu_to_je32(0)
46259 };
46260
46261 jffs2_prealloc_raw_node_refs(c, jeb, 1);
46262 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
46263 index 4515bea..178f2d6 100644
46264 --- a/fs/jffs2/wbuf.c
46265 +++ b/fs/jffs2/wbuf.c
46266 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
46267 {
46268 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
46269 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46270 - .totlen = constant_cpu_to_je32(8)
46271 + .totlen = constant_cpu_to_je32(8),
46272 + .hdr_crc = constant_cpu_to_je32(0)
46273 };
46274
46275 /*
46276 diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
46277 index 3e93cdd..c8a80e1 100644
46278 --- a/fs/jffs2/xattr.c
46279 +++ b/fs/jffs2/xattr.c
46280 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
46281
46282 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
46283
46284 + pax_track_stack();
46285 +
46286 /* Phase.1 : Merge same xref */
46287 for (i=0; i < XREF_TMPHASH_SIZE; i++)
46288 xref_tmphash[i] = NULL;
46289 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
46290 index 06c8a67..589dbbd 100644
46291 --- a/fs/jfs/super.c
46292 +++ b/fs/jfs/super.c
46293 @@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
46294
46295 jfs_inode_cachep =
46296 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
46297 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
46298 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
46299 init_once);
46300 if (jfs_inode_cachep == NULL)
46301 return -ENOMEM;
46302 diff --git a/fs/libfs.c b/fs/libfs.c
46303 index c18e9a1..0b04e2c 100644
46304 --- a/fs/libfs.c
46305 +++ b/fs/libfs.c
46306 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46307
46308 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
46309 struct dentry *next;
46310 + char d_name[sizeof(next->d_iname)];
46311 + const unsigned char *name;
46312 +
46313 next = list_entry(p, struct dentry, d_u.d_child);
46314 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
46315 if (!simple_positive(next)) {
46316 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46317
46318 spin_unlock(&next->d_lock);
46319 spin_unlock(&dentry->d_lock);
46320 - if (filldir(dirent, next->d_name.name,
46321 + name = next->d_name.name;
46322 + if (name == next->d_iname) {
46323 + memcpy(d_name, name, next->d_name.len);
46324 + name = d_name;
46325 + }
46326 + if (filldir(dirent, name,
46327 next->d_name.len, filp->f_pos,
46328 next->d_inode->i_ino,
46329 dt_type(next->d_inode)) < 0)
46330 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
46331 index 8392cb8..ae8ed40 100644
46332 --- a/fs/lockd/clntproc.c
46333 +++ b/fs/lockd/clntproc.c
46334 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
46335 /*
46336 * Cookie counter for NLM requests
46337 */
46338 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
46339 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
46340
46341 void nlmclnt_next_cookie(struct nlm_cookie *c)
46342 {
46343 - u32 cookie = atomic_inc_return(&nlm_cookie);
46344 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
46345
46346 memcpy(c->data, &cookie, 4);
46347 c->len=4;
46348 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
46349 struct nlm_rqst reqst, *req;
46350 int status;
46351
46352 + pax_track_stack();
46353 +
46354 req = &reqst;
46355 memset(req, 0, sizeof(*req));
46356 locks_init_lock(&req->a_args.lock.fl);
46357 diff --git a/fs/locks.c b/fs/locks.c
46358 index 703f545..150a552 100644
46359 --- a/fs/locks.c
46360 +++ b/fs/locks.c
46361 @@ -2022,16 +2022,16 @@ void locks_remove_flock(struct file *filp)
46362 return;
46363
46364 if (filp->f_op && filp->f_op->flock) {
46365 - struct file_lock fl = {
46366 + struct file_lock flock = {
46367 .fl_pid = current->tgid,
46368 .fl_file = filp,
46369 .fl_flags = FL_FLOCK,
46370 .fl_type = F_UNLCK,
46371 .fl_end = OFFSET_MAX,
46372 };
46373 - filp->f_op->flock(filp, F_SETLKW, &fl);
46374 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
46375 - fl.fl_ops->fl_release_private(&fl);
46376 + filp->f_op->flock(filp, F_SETLKW, &flock);
46377 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
46378 + flock.fl_ops->fl_release_private(&flock);
46379 }
46380
46381 lock_flocks();
46382 diff --git a/fs/logfs/super.c b/fs/logfs/super.c
46383 index ce03a18..ac8c14f 100644
46384 --- a/fs/logfs/super.c
46385 +++ b/fs/logfs/super.c
46386 @@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super_block *sb)
46387 struct logfs_disk_super _ds1, *ds1 = &_ds1;
46388 int err, valid0, valid1;
46389
46390 + pax_track_stack();
46391 +
46392 /* read first superblock */
46393 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
46394 if (err)
46395 diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
46396 index 3f32bcb..7c82c29 100644
46397 --- a/fs/minix/bitmap.c
46398 +++ b/fs/minix/bitmap.c
46399 @@ -20,10 +20,11 @@ static const int nibblemap[] = { 4,3,3,2,3,2,2,1,3,2,2,1,2,1,1,0 };
46400
46401 static DEFINE_SPINLOCK(bitmap_lock);
46402
46403 -static unsigned long count_free(struct buffer_head *map[], unsigned numblocks, __u32 numbits)
46404 +static unsigned long count_free(struct buffer_head *map[], unsigned blocksize, __u32 numbits)
46405 {
46406 unsigned i, j, sum = 0;
46407 struct buffer_head *bh;
46408 + unsigned numblocks = minix_blocks_needed(numbits, blocksize);
46409
46410 for (i=0; i<numblocks-1; i++) {
46411 if (!(bh=map[i]))
46412 @@ -105,10 +106,12 @@ int minix_new_block(struct inode * inode)
46413 return 0;
46414 }
46415
46416 -unsigned long minix_count_free_blocks(struct minix_sb_info *sbi)
46417 +unsigned long minix_count_free_blocks(struct super_block *sb)
46418 {
46419 - return (count_free(sbi->s_zmap, sbi->s_zmap_blocks,
46420 - sbi->s_nzones - sbi->s_firstdatazone + 1)
46421 + struct minix_sb_info *sbi = minix_sb(sb);
46422 + u32 bits = sbi->s_nzones - (sbi->s_firstdatazone + 1);
46423 +
46424 + return (count_free(sbi->s_zmap, sb->s_blocksize, bits)
46425 << sbi->s_log_zone_size);
46426 }
46427
46428 @@ -273,7 +276,10 @@ struct inode *minix_new_inode(const struct inode *dir, int mode, int *error)
46429 return inode;
46430 }
46431
46432 -unsigned long minix_count_free_inodes(struct minix_sb_info *sbi)
46433 +unsigned long minix_count_free_inodes(struct super_block *sb)
46434 {
46435 - return count_free(sbi->s_imap, sbi->s_imap_blocks, sbi->s_ninodes + 1);
46436 + struct minix_sb_info *sbi = minix_sb(sb);
46437 + u32 bits = sbi->s_ninodes + 1;
46438 +
46439 + return count_free(sbi->s_imap, sb->s_blocksize, bits);
46440 }
46441 diff --git a/fs/minix/inode.c b/fs/minix/inode.c
46442 index e7d23e2..1ed1351 100644
46443 --- a/fs/minix/inode.c
46444 +++ b/fs/minix/inode.c
46445 @@ -279,6 +279,27 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
46446 else if (sbi->s_mount_state & MINIX_ERROR_FS)
46447 printk("MINIX-fs: mounting file system with errors, "
46448 "running fsck is recommended\n");
46449 +
46450 + /* Apparently minix can create filesystems that allocate more blocks for
46451 + * the bitmaps than needed. We simply ignore that, but verify it didn't
46452 + * create one with not enough blocks and bail out if so.
46453 + */
46454 + block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize);
46455 + if (sbi->s_imap_blocks < block) {
46456 + printk("MINIX-fs: file system does not have enough "
46457 + "imap blocks allocated. Refusing to mount\n");
46458 + goto out_iput;
46459 + }
46460 +
46461 + block = minix_blocks_needed(
46462 + (sbi->s_nzones - (sbi->s_firstdatazone + 1)),
46463 + s->s_blocksize);
46464 + if (sbi->s_zmap_blocks < block) {
46465 + printk("MINIX-fs: file system does not have enough "
46466 + "zmap blocks allocated. Refusing to mount.\n");
46467 + goto out_iput;
46468 + }
46469 +
46470 return 0;
46471
46472 out_iput:
46473 @@ -339,10 +360,10 @@ static int minix_statfs(struct dentry *dentry, struct kstatfs *buf)
46474 buf->f_type = sb->s_magic;
46475 buf->f_bsize = sb->s_blocksize;
46476 buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size;
46477 - buf->f_bfree = minix_count_free_blocks(sbi);
46478 + buf->f_bfree = minix_count_free_blocks(sb);
46479 buf->f_bavail = buf->f_bfree;
46480 buf->f_files = sbi->s_ninodes;
46481 - buf->f_ffree = minix_count_free_inodes(sbi);
46482 + buf->f_ffree = minix_count_free_inodes(sb);
46483 buf->f_namelen = sbi->s_namelen;
46484 buf->f_fsid.val[0] = (u32)id;
46485 buf->f_fsid.val[1] = (u32)(id >> 32);
46486 diff --git a/fs/minix/minix.h b/fs/minix/minix.h
46487 index 341e212..6415fe0 100644
46488 --- a/fs/minix/minix.h
46489 +++ b/fs/minix/minix.h
46490 @@ -48,10 +48,10 @@ extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, stru
46491 extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
46492 extern struct inode * minix_new_inode(const struct inode *, int, int *);
46493 extern void minix_free_inode(struct inode * inode);
46494 -extern unsigned long minix_count_free_inodes(struct minix_sb_info *sbi);
46495 +extern unsigned long minix_count_free_inodes(struct super_block *sb);
46496 extern int minix_new_block(struct inode * inode);
46497 extern void minix_free_block(struct inode *inode, unsigned long block);
46498 -extern unsigned long minix_count_free_blocks(struct minix_sb_info *sbi);
46499 +extern unsigned long minix_count_free_blocks(struct super_block *sb);
46500 extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *);
46501 extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len);
46502
46503 @@ -88,6 +88,11 @@ static inline struct minix_inode_info *minix_i(struct inode *inode)
46504 return list_entry(inode, struct minix_inode_info, vfs_inode);
46505 }
46506
46507 +static inline unsigned minix_blocks_needed(unsigned bits, unsigned blocksize)
46508 +{
46509 + return DIV_ROUND_UP(bits, blocksize * 8);
46510 +}
46511 +
46512 #if defined(CONFIG_MINIX_FS_NATIVE_ENDIAN) && \
46513 defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED)
46514
46515 diff --git a/fs/namei.c b/fs/namei.c
46516 index 3d15072..c1ddf9c 100644
46517 --- a/fs/namei.c
46518 +++ b/fs/namei.c
46519 @@ -281,16 +281,32 @@ int generic_permission(struct inode *inode, int mask)
46520 if (ret != -EACCES)
46521 return ret;
46522
46523 +#ifdef CONFIG_GRKERNSEC
46524 + /* we'll block if we have to log due to a denied capability use */
46525 + if (mask & MAY_NOT_BLOCK)
46526 + return -ECHILD;
46527 +#endif
46528 +
46529 if (S_ISDIR(inode->i_mode)) {
46530 /* DACs are overridable for directories */
46531 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46532 - return 0;
46533 if (!(mask & MAY_WRITE))
46534 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46535 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46536 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46537 return 0;
46538 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46539 + return 0;
46540 return -EACCES;
46541 }
46542 /*
46543 + * Searching includes executable on directories, else just read.
46544 + */
46545 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46546 + if (mask == MAY_READ)
46547 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46548 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46549 + return 0;
46550 +
46551 + /*
46552 * Read/write DACs are always overridable.
46553 * Executable DACs are overridable when there is
46554 * at least one exec bit set.
46555 @@ -299,14 +315,6 @@ int generic_permission(struct inode *inode, int mask)
46556 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46557 return 0;
46558
46559 - /*
46560 - * Searching includes executable on directories, else just read.
46561 - */
46562 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46563 - if (mask == MAY_READ)
46564 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46565 - return 0;
46566 -
46567 return -EACCES;
46568 }
46569
46570 @@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
46571 return error;
46572 }
46573
46574 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
46575 + dentry->d_inode, dentry, nd->path.mnt)) {
46576 + error = -EACCES;
46577 + *p = ERR_PTR(error); /* no ->put_link(), please */
46578 + path_put(&nd->path);
46579 + return error;
46580 + }
46581 +
46582 nd->last_type = LAST_BIND;
46583 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
46584 error = PTR_ERR(*p);
46585 if (!IS_ERR(*p)) {
46586 - char *s = nd_get_link(nd);
46587 + const char *s = nd_get_link(nd);
46588 error = 0;
46589 if (s)
46590 error = __vfs_follow_link(nd, s);
46591 @@ -1622,6 +1638,21 @@ static int path_lookupat(int dfd, const char *name,
46592 if (!err)
46593 err = complete_walk(nd);
46594
46595 + if (!(nd->flags & LOOKUP_PARENT)) {
46596 +#ifdef CONFIG_GRKERNSEC
46597 + if (flags & LOOKUP_RCU) {
46598 + if (!err)
46599 + path_put(&nd->path);
46600 + err = -ECHILD;
46601 + } else
46602 +#endif
46603 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46604 + if (!err)
46605 + path_put(&nd->path);
46606 + err = -ENOENT;
46607 + }
46608 + }
46609 +
46610 if (!err && nd->flags & LOOKUP_DIRECTORY) {
46611 if (!nd->inode->i_op->lookup) {
46612 path_put(&nd->path);
46613 @@ -1649,6 +1680,15 @@ static int do_path_lookup(int dfd, const char *name,
46614 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
46615
46616 if (likely(!retval)) {
46617 + if (*name != '/' && nd->path.dentry && nd->inode) {
46618 +#ifdef CONFIG_GRKERNSEC
46619 + if (flags & LOOKUP_RCU)
46620 + return -ECHILD;
46621 +#endif
46622 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
46623 + return -ENOENT;
46624 + }
46625 +
46626 if (unlikely(!audit_dummy_context())) {
46627 if (nd->path.dentry && nd->inode)
46628 audit_inode(name, nd->path.dentry);
46629 @@ -2049,7 +2089,27 @@ static int may_open(struct path *path, int acc_mode, int flag)
46630 /*
46631 * Ensure there are no outstanding leases on the file.
46632 */
46633 - return break_lease(inode, flag);
46634 + error = break_lease(inode, flag);
46635 +
46636 + if (error)
46637 + return error;
46638 +
46639 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
46640 + error = -EPERM;
46641 + goto exit;
46642 + }
46643 +
46644 + if (gr_handle_rawio(inode)) {
46645 + error = -EPERM;
46646 + goto exit;
46647 + }
46648 +
46649 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
46650 + error = -EACCES;
46651 + goto exit;
46652 + }
46653 +exit:
46654 + return error;
46655 }
46656
46657 static int handle_truncate(struct file *filp)
46658 @@ -2110,6 +2170,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46659 error = complete_walk(nd);
46660 if (error)
46661 return ERR_PTR(error);
46662 +#ifdef CONFIG_GRKERNSEC
46663 + if (nd->flags & LOOKUP_RCU) {
46664 + error = -ECHILD;
46665 + goto exit;
46666 + }
46667 +#endif
46668 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46669 + error = -ENOENT;
46670 + goto exit;
46671 + }
46672 audit_inode(pathname, nd->path.dentry);
46673 if (open_flag & O_CREAT) {
46674 error = -EISDIR;
46675 @@ -2120,6 +2190,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46676 error = complete_walk(nd);
46677 if (error)
46678 return ERR_PTR(error);
46679 +#ifdef CONFIG_GRKERNSEC
46680 + if (nd->flags & LOOKUP_RCU) {
46681 + error = -ECHILD;
46682 + goto exit;
46683 + }
46684 +#endif
46685 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
46686 + error = -ENOENT;
46687 + goto exit;
46688 + }
46689 audit_inode(pathname, dir);
46690 goto ok;
46691 }
46692 @@ -2141,6 +2221,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46693 error = complete_walk(nd);
46694 if (error)
46695 return ERR_PTR(-ECHILD);
46696 +#ifdef CONFIG_GRKERNSEC
46697 + if (nd->flags & LOOKUP_RCU) {
46698 + error = -ECHILD;
46699 + goto exit;
46700 + }
46701 +#endif
46702 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46703 + error = -ENOENT;
46704 + goto exit;
46705 + }
46706
46707 error = -ENOTDIR;
46708 if (nd->flags & LOOKUP_DIRECTORY) {
46709 @@ -2181,6 +2271,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46710 /* Negative dentry, just create the file */
46711 if (!dentry->d_inode) {
46712 int mode = op->mode;
46713 +
46714 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
46715 + error = -EACCES;
46716 + goto exit_mutex_unlock;
46717 + }
46718 +
46719 if (!IS_POSIXACL(dir->d_inode))
46720 mode &= ~current_umask();
46721 /*
46722 @@ -2204,6 +2300,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46723 error = vfs_create(dir->d_inode, dentry, mode, nd);
46724 if (error)
46725 goto exit_mutex_unlock;
46726 + else
46727 + gr_handle_create(path->dentry, path->mnt);
46728 mutex_unlock(&dir->d_inode->i_mutex);
46729 dput(nd->path.dentry);
46730 nd->path.dentry = dentry;
46731 @@ -2213,6 +2311,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46732 /*
46733 * It already exists.
46734 */
46735 +
46736 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
46737 + error = -ENOENT;
46738 + goto exit_mutex_unlock;
46739 + }
46740 +
46741 + /* only check if O_CREAT is specified, all other checks need to go
46742 + into may_open */
46743 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
46744 + error = -EACCES;
46745 + goto exit_mutex_unlock;
46746 + }
46747 +
46748 mutex_unlock(&dir->d_inode->i_mutex);
46749 audit_inode(pathname, path->dentry);
46750
46751 @@ -2425,6 +2536,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
46752 *path = nd.path;
46753 return dentry;
46754 eexist:
46755 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
46756 + dput(dentry);
46757 + dentry = ERR_PTR(-ENOENT);
46758 + goto fail;
46759 + }
46760 dput(dentry);
46761 dentry = ERR_PTR(-EEXIST);
46762 fail:
46763 @@ -2447,6 +2563,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
46764 }
46765 EXPORT_SYMBOL(user_path_create);
46766
46767 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
46768 +{
46769 + char *tmp = getname(pathname);
46770 + struct dentry *res;
46771 + if (IS_ERR(tmp))
46772 + return ERR_CAST(tmp);
46773 + res = kern_path_create(dfd, tmp, path, is_dir);
46774 + if (IS_ERR(res))
46775 + putname(tmp);
46776 + else
46777 + *to = tmp;
46778 + return res;
46779 +}
46780 +
46781 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
46782 {
46783 int error = may_create(dir, dentry);
46784 @@ -2514,6 +2644,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
46785 error = mnt_want_write(path.mnt);
46786 if (error)
46787 goto out_dput;
46788 +
46789 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
46790 + error = -EPERM;
46791 + goto out_drop_write;
46792 + }
46793 +
46794 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
46795 + error = -EACCES;
46796 + goto out_drop_write;
46797 + }
46798 +
46799 error = security_path_mknod(&path, dentry, mode, dev);
46800 if (error)
46801 goto out_drop_write;
46802 @@ -2531,6 +2672,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
46803 }
46804 out_drop_write:
46805 mnt_drop_write(path.mnt);
46806 +
46807 + if (!error)
46808 + gr_handle_create(dentry, path.mnt);
46809 out_dput:
46810 dput(dentry);
46811 mutex_unlock(&path.dentry->d_inode->i_mutex);
46812 @@ -2580,12 +2724,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
46813 error = mnt_want_write(path.mnt);
46814 if (error)
46815 goto out_dput;
46816 +
46817 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
46818 + error = -EACCES;
46819 + goto out_drop_write;
46820 + }
46821 +
46822 error = security_path_mkdir(&path, dentry, mode);
46823 if (error)
46824 goto out_drop_write;
46825 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
46826 out_drop_write:
46827 mnt_drop_write(path.mnt);
46828 +
46829 + if (!error)
46830 + gr_handle_create(dentry, path.mnt);
46831 out_dput:
46832 dput(dentry);
46833 mutex_unlock(&path.dentry->d_inode->i_mutex);
46834 @@ -2665,6 +2818,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46835 char * name;
46836 struct dentry *dentry;
46837 struct nameidata nd;
46838 + ino_t saved_ino = 0;
46839 + dev_t saved_dev = 0;
46840
46841 error = user_path_parent(dfd, pathname, &nd, &name);
46842 if (error)
46843 @@ -2693,6 +2848,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
46844 error = -ENOENT;
46845 goto exit3;
46846 }
46847 +
46848 + saved_ino = dentry->d_inode->i_ino;
46849 + saved_dev = gr_get_dev_from_dentry(dentry);
46850 +
46851 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
46852 + error = -EACCES;
46853 + goto exit3;
46854 + }
46855 +
46856 error = mnt_want_write(nd.path.mnt);
46857 if (error)
46858 goto exit3;
46859 @@ -2700,6 +2864,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46860 if (error)
46861 goto exit4;
46862 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
46863 + if (!error && (saved_dev || saved_ino))
46864 + gr_handle_delete(saved_ino, saved_dev);
46865 exit4:
46866 mnt_drop_write(nd.path.mnt);
46867 exit3:
46868 @@ -2762,6 +2928,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46869 struct dentry *dentry;
46870 struct nameidata nd;
46871 struct inode *inode = NULL;
46872 + ino_t saved_ino = 0;
46873 + dev_t saved_dev = 0;
46874
46875 error = user_path_parent(dfd, pathname, &nd, &name);
46876 if (error)
46877 @@ -2784,6 +2952,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46878 if (!inode)
46879 goto slashes;
46880 ihold(inode);
46881 +
46882 + if (inode->i_nlink <= 1) {
46883 + saved_ino = inode->i_ino;
46884 + saved_dev = gr_get_dev_from_dentry(dentry);
46885 + }
46886 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
46887 + error = -EACCES;
46888 + goto exit2;
46889 + }
46890 +
46891 error = mnt_want_write(nd.path.mnt);
46892 if (error)
46893 goto exit2;
46894 @@ -2791,6 +2969,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46895 if (error)
46896 goto exit3;
46897 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
46898 + if (!error && (saved_ino || saved_dev))
46899 + gr_handle_delete(saved_ino, saved_dev);
46900 exit3:
46901 mnt_drop_write(nd.path.mnt);
46902 exit2:
46903 @@ -2866,10 +3046,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
46904 error = mnt_want_write(path.mnt);
46905 if (error)
46906 goto out_dput;
46907 +
46908 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
46909 + error = -EACCES;
46910 + goto out_drop_write;
46911 + }
46912 +
46913 error = security_path_symlink(&path, dentry, from);
46914 if (error)
46915 goto out_drop_write;
46916 error = vfs_symlink(path.dentry->d_inode, dentry, from);
46917 + if (!error)
46918 + gr_handle_create(dentry, path.mnt);
46919 out_drop_write:
46920 mnt_drop_write(path.mnt);
46921 out_dput:
46922 @@ -2941,6 +3129,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46923 {
46924 struct dentry *new_dentry;
46925 struct path old_path, new_path;
46926 + char *to = NULL;
46927 int how = 0;
46928 int error;
46929
46930 @@ -2964,7 +3153,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46931 if (error)
46932 return error;
46933
46934 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
46935 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
46936 error = PTR_ERR(new_dentry);
46937 if (IS_ERR(new_dentry))
46938 goto out;
46939 @@ -2975,13 +3164,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46940 error = mnt_want_write(new_path.mnt);
46941 if (error)
46942 goto out_dput;
46943 +
46944 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
46945 + old_path.dentry->d_inode,
46946 + old_path.dentry->d_inode->i_mode, to)) {
46947 + error = -EACCES;
46948 + goto out_drop_write;
46949 + }
46950 +
46951 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
46952 + old_path.dentry, old_path.mnt, to)) {
46953 + error = -EACCES;
46954 + goto out_drop_write;
46955 + }
46956 +
46957 error = security_path_link(old_path.dentry, &new_path, new_dentry);
46958 if (error)
46959 goto out_drop_write;
46960 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
46961 + if (!error)
46962 + gr_handle_create(new_dentry, new_path.mnt);
46963 out_drop_write:
46964 mnt_drop_write(new_path.mnt);
46965 out_dput:
46966 + putname(to);
46967 dput(new_dentry);
46968 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
46969 path_put(&new_path);
46970 @@ -3153,6 +3359,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46971 char *to;
46972 int error;
46973
46974 + pax_track_stack();
46975 +
46976 error = user_path_parent(olddfd, oldname, &oldnd, &from);
46977 if (error)
46978 goto exit;
46979 @@ -3209,6 +3417,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46980 if (new_dentry == trap)
46981 goto exit5;
46982
46983 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
46984 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
46985 + to);
46986 + if (error)
46987 + goto exit5;
46988 +
46989 error = mnt_want_write(oldnd.path.mnt);
46990 if (error)
46991 goto exit5;
46992 @@ -3218,6 +3432,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46993 goto exit6;
46994 error = vfs_rename(old_dir->d_inode, old_dentry,
46995 new_dir->d_inode, new_dentry);
46996 + if (!error)
46997 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
46998 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
46999 exit6:
47000 mnt_drop_write(oldnd.path.mnt);
47001 exit5:
47002 @@ -3243,6 +3460,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
47003
47004 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
47005 {
47006 + char tmpbuf[64];
47007 + const char *newlink;
47008 int len;
47009
47010 len = PTR_ERR(link);
47011 @@ -3252,7 +3471,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
47012 len = strlen(link);
47013 if (len > (unsigned) buflen)
47014 len = buflen;
47015 - if (copy_to_user(buffer, link, len))
47016 +
47017 + if (len < sizeof(tmpbuf)) {
47018 + memcpy(tmpbuf, link, len);
47019 + newlink = tmpbuf;
47020 + } else
47021 + newlink = link;
47022 +
47023 + if (copy_to_user(buffer, newlink, len))
47024 len = -EFAULT;
47025 out:
47026 return len;
47027 diff --git a/fs/namespace.c b/fs/namespace.c
47028 index e5e1c7d..019609e 100644
47029 --- a/fs/namespace.c
47030 +++ b/fs/namespace.c
47031 @@ -1329,6 +1329,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
47032 if (!(sb->s_flags & MS_RDONLY))
47033 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
47034 up_write(&sb->s_umount);
47035 +
47036 + gr_log_remount(mnt->mnt_devname, retval);
47037 +
47038 return retval;
47039 }
47040
47041 @@ -1348,6 +1351,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
47042 br_write_unlock(vfsmount_lock);
47043 up_write(&namespace_sem);
47044 release_mounts(&umount_list);
47045 +
47046 + gr_log_unmount(mnt->mnt_devname, retval);
47047 +
47048 return retval;
47049 }
47050
47051 @@ -2339,6 +2345,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47052 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
47053 MS_STRICTATIME);
47054
47055 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
47056 + retval = -EPERM;
47057 + goto dput_out;
47058 + }
47059 +
47060 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
47061 + retval = -EPERM;
47062 + goto dput_out;
47063 + }
47064 +
47065 if (flags & MS_REMOUNT)
47066 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
47067 data_page);
47068 @@ -2353,6 +2369,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47069 dev_name, data_page);
47070 dput_out:
47071 path_put(&path);
47072 +
47073 + gr_log_mount(dev_name, dir_name, retval);
47074 +
47075 return retval;
47076 }
47077
47078 @@ -2576,6 +2595,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
47079 if (error)
47080 goto out2;
47081
47082 + if (gr_handle_chroot_pivot()) {
47083 + error = -EPERM;
47084 + goto out2;
47085 + }
47086 +
47087 get_fs_root(current->fs, &root);
47088 error = lock_mount(&old);
47089 if (error)
47090 diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
47091 index 9c51f62..503b252 100644
47092 --- a/fs/ncpfs/dir.c
47093 +++ b/fs/ncpfs/dir.c
47094 @@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentry, struct nameidata *nd)
47095 int res, val = 0, len;
47096 __u8 __name[NCP_MAXPATHLEN + 1];
47097
47098 + pax_track_stack();
47099 +
47100 if (dentry == dentry->d_sb->s_root)
47101 return 1;
47102
47103 @@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
47104 int error, res, len;
47105 __u8 __name[NCP_MAXPATHLEN + 1];
47106
47107 + pax_track_stack();
47108 +
47109 error = -EIO;
47110 if (!ncp_conn_valid(server))
47111 goto finished;
47112 @@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
47113 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
47114 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
47115
47116 + pax_track_stack();
47117 +
47118 ncp_age_dentry(server, dentry);
47119 len = sizeof(__name);
47120 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
47121 @@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
47122 int error, len;
47123 __u8 __name[NCP_MAXPATHLEN + 1];
47124
47125 + pax_track_stack();
47126 +
47127 DPRINTK("ncp_mkdir: making %s/%s\n",
47128 dentry->d_parent->d_name.name, dentry->d_name.name);
47129
47130 @@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
47131 int old_len, new_len;
47132 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
47133
47134 + pax_track_stack();
47135 +
47136 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
47137 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
47138 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
47139 diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
47140 index 202f370..9d4565e 100644
47141 --- a/fs/ncpfs/inode.c
47142 +++ b/fs/ncpfs/inode.c
47143 @@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
47144 #endif
47145 struct ncp_entry_info finfo;
47146
47147 + pax_track_stack();
47148 +
47149 memset(&data, 0, sizeof(data));
47150 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
47151 if (!server)
47152 diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
47153 index 281ae95..dd895b9 100644
47154 --- a/fs/nfs/blocklayout/blocklayout.c
47155 +++ b/fs/nfs/blocklayout/blocklayout.c
47156 @@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
47157 */
47158 struct parallel_io {
47159 struct kref refcnt;
47160 - struct rpc_call_ops call_ops;
47161 + rpc_call_ops_no_const call_ops;
47162 void (*pnfs_callback) (void *data);
47163 void *data;
47164 };
47165 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
47166 index 679d2f5..ef1ffec 100644
47167 --- a/fs/nfs/inode.c
47168 +++ b/fs/nfs/inode.c
47169 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
47170 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
47171 nfsi->attrtimeo_timestamp = jiffies;
47172
47173 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
47174 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
47175 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
47176 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
47177 else
47178 @@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
47179 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
47180 }
47181
47182 -static atomic_long_t nfs_attr_generation_counter;
47183 +static atomic_long_unchecked_t nfs_attr_generation_counter;
47184
47185 static unsigned long nfs_read_attr_generation_counter(void)
47186 {
47187 - return atomic_long_read(&nfs_attr_generation_counter);
47188 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
47189 }
47190
47191 unsigned long nfs_inc_attr_generation_counter(void)
47192 {
47193 - return atomic_long_inc_return(&nfs_attr_generation_counter);
47194 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
47195 }
47196
47197 void nfs_fattr_init(struct nfs_fattr *fattr)
47198 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
47199 index 6f8bcc7..8f823c5 100644
47200 --- a/fs/nfsd/nfs4state.c
47201 +++ b/fs/nfsd/nfs4state.c
47202 @@ -3999,6 +3999,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
47203 unsigned int strhashval;
47204 int err;
47205
47206 + pax_track_stack();
47207 +
47208 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
47209 (long long) lock->lk_offset,
47210 (long long) lock->lk_length);
47211 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
47212 index f810996..cec8977 100644
47213 --- a/fs/nfsd/nfs4xdr.c
47214 +++ b/fs/nfsd/nfs4xdr.c
47215 @@ -1875,6 +1875,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
47216 .dentry = dentry,
47217 };
47218
47219 + pax_track_stack();
47220 +
47221 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
47222 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
47223 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
47224 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
47225 index acf88ae..4fd6245 100644
47226 --- a/fs/nfsd/vfs.c
47227 +++ b/fs/nfsd/vfs.c
47228 @@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47229 } else {
47230 oldfs = get_fs();
47231 set_fs(KERNEL_DS);
47232 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
47233 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
47234 set_fs(oldfs);
47235 }
47236
47237 @@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47238
47239 /* Write the data. */
47240 oldfs = get_fs(); set_fs(KERNEL_DS);
47241 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
47242 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
47243 set_fs(oldfs);
47244 if (host_err < 0)
47245 goto out_nfserr;
47246 @@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
47247 */
47248
47249 oldfs = get_fs(); set_fs(KERNEL_DS);
47250 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
47251 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
47252 set_fs(oldfs);
47253
47254 if (host_err < 0)
47255 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
47256 index 9fde1c0..14e8827 100644
47257 --- a/fs/notify/fanotify/fanotify_user.c
47258 +++ b/fs/notify/fanotify/fanotify_user.c
47259 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
47260 goto out_close_fd;
47261
47262 ret = -EFAULT;
47263 - if (copy_to_user(buf, &fanotify_event_metadata,
47264 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
47265 + copy_to_user(buf, &fanotify_event_metadata,
47266 fanotify_event_metadata.event_len))
47267 goto out_kill_access_response;
47268
47269 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
47270 index ee18815..7aa5d01 100644
47271 --- a/fs/notify/notification.c
47272 +++ b/fs/notify/notification.c
47273 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
47274 * get set to 0 so it will never get 'freed'
47275 */
47276 static struct fsnotify_event *q_overflow_event;
47277 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47278 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47279
47280 /**
47281 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
47282 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47283 */
47284 u32 fsnotify_get_cookie(void)
47285 {
47286 - return atomic_inc_return(&fsnotify_sync_cookie);
47287 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
47288 }
47289 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
47290
47291 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
47292 index 99e3610..02c1068 100644
47293 --- a/fs/ntfs/dir.c
47294 +++ b/fs/ntfs/dir.c
47295 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
47296 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
47297 ~(s64)(ndir->itype.index.block_size - 1)));
47298 /* Bounds checks. */
47299 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47300 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47301 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
47302 "inode 0x%lx or driver bug.", vdir->i_ino);
47303 goto err_out;
47304 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
47305 index c587e2d..3641eaa 100644
47306 --- a/fs/ntfs/file.c
47307 +++ b/fs/ntfs/file.c
47308 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
47309 #endif /* NTFS_RW */
47310 };
47311
47312 -const struct file_operations ntfs_empty_file_ops = {};
47313 +const struct file_operations ntfs_empty_file_ops __read_only;
47314
47315 -const struct inode_operations ntfs_empty_inode_ops = {};
47316 +const struct inode_operations ntfs_empty_inode_ops __read_only;
47317 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
47318 index 210c352..a174f83 100644
47319 --- a/fs/ocfs2/localalloc.c
47320 +++ b/fs/ocfs2/localalloc.c
47321 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
47322 goto bail;
47323 }
47324
47325 - atomic_inc(&osb->alloc_stats.moves);
47326 + atomic_inc_unchecked(&osb->alloc_stats.moves);
47327
47328 bail:
47329 if (handle)
47330 diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
47331 index 53aa41e..d7df9f1 100644
47332 --- a/fs/ocfs2/namei.c
47333 +++ b/fs/ocfs2/namei.c
47334 @@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *old_dir,
47335 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
47336 struct ocfs2_dir_lookup_result target_insert = { NULL, };
47337
47338 + pax_track_stack();
47339 +
47340 /* At some point it might be nice to break this function up a
47341 * bit. */
47342
47343 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
47344 index 4092858..51c70ff 100644
47345 --- a/fs/ocfs2/ocfs2.h
47346 +++ b/fs/ocfs2/ocfs2.h
47347 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
47348
47349 struct ocfs2_alloc_stats
47350 {
47351 - atomic_t moves;
47352 - atomic_t local_data;
47353 - atomic_t bitmap_data;
47354 - atomic_t bg_allocs;
47355 - atomic_t bg_extends;
47356 + atomic_unchecked_t moves;
47357 + atomic_unchecked_t local_data;
47358 + atomic_unchecked_t bitmap_data;
47359 + atomic_unchecked_t bg_allocs;
47360 + atomic_unchecked_t bg_extends;
47361 };
47362
47363 enum ocfs2_local_alloc_state
47364 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
47365 index ba5d97e..c77db25 100644
47366 --- a/fs/ocfs2/suballoc.c
47367 +++ b/fs/ocfs2/suballoc.c
47368 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
47369 mlog_errno(status);
47370 goto bail;
47371 }
47372 - atomic_inc(&osb->alloc_stats.bg_extends);
47373 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
47374
47375 /* You should never ask for this much metadata */
47376 BUG_ON(bits_wanted >
47377 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
47378 mlog_errno(status);
47379 goto bail;
47380 }
47381 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47382 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47383
47384 *suballoc_loc = res.sr_bg_blkno;
47385 *suballoc_bit_start = res.sr_bit_offset;
47386 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
47387 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
47388 res->sr_bits);
47389
47390 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47391 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47392
47393 BUG_ON(res->sr_bits != 1);
47394
47395 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
47396 mlog_errno(status);
47397 goto bail;
47398 }
47399 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47400 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47401
47402 BUG_ON(res.sr_bits != 1);
47403
47404 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47405 cluster_start,
47406 num_clusters);
47407 if (!status)
47408 - atomic_inc(&osb->alloc_stats.local_data);
47409 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
47410 } else {
47411 if (min_clusters > (osb->bitmap_cpg - 1)) {
47412 /* The only paths asking for contiguousness
47413 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47414 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
47415 res.sr_bg_blkno,
47416 res.sr_bit_offset);
47417 - atomic_inc(&osb->alloc_stats.bitmap_data);
47418 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
47419 *num_clusters = res.sr_bits;
47420 }
47421 }
47422 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
47423 index 56f6102..1433c29 100644
47424 --- a/fs/ocfs2/super.c
47425 +++ b/fs/ocfs2/super.c
47426 @@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
47427 "%10s => GlobalAllocs: %d LocalAllocs: %d "
47428 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
47429 "Stats",
47430 - atomic_read(&osb->alloc_stats.bitmap_data),
47431 - atomic_read(&osb->alloc_stats.local_data),
47432 - atomic_read(&osb->alloc_stats.bg_allocs),
47433 - atomic_read(&osb->alloc_stats.moves),
47434 - atomic_read(&osb->alloc_stats.bg_extends));
47435 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
47436 + atomic_read_unchecked(&osb->alloc_stats.local_data),
47437 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
47438 + atomic_read_unchecked(&osb->alloc_stats.moves),
47439 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
47440
47441 out += snprintf(buf + out, len - out,
47442 "%10s => State: %u Descriptor: %llu Size: %u bits "
47443 @@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
47444 spin_lock_init(&osb->osb_xattr_lock);
47445 ocfs2_init_steal_slots(osb);
47446
47447 - atomic_set(&osb->alloc_stats.moves, 0);
47448 - atomic_set(&osb->alloc_stats.local_data, 0);
47449 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
47450 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
47451 - atomic_set(&osb->alloc_stats.bg_extends, 0);
47452 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
47453 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
47454 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
47455 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
47456 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
47457
47458 /* Copy the blockcheck stats from the superblock probe */
47459 osb->osb_ecc_stats = *stats;
47460 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
47461 index 5d22872..523db20 100644
47462 --- a/fs/ocfs2/symlink.c
47463 +++ b/fs/ocfs2/symlink.c
47464 @@ -142,7 +142,7 @@ bail:
47465
47466 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47467 {
47468 - char *link = nd_get_link(nd);
47469 + const char *link = nd_get_link(nd);
47470 if (!IS_ERR(link))
47471 kfree(link);
47472 }
47473 diff --git a/fs/open.c b/fs/open.c
47474 index f711921..28d5958 100644
47475 --- a/fs/open.c
47476 +++ b/fs/open.c
47477 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
47478 error = locks_verify_truncate(inode, NULL, length);
47479 if (!error)
47480 error = security_path_truncate(&path);
47481 +
47482 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
47483 + error = -EACCES;
47484 +
47485 if (!error)
47486 error = do_truncate(path.dentry, length, 0, NULL);
47487
47488 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
47489 if (__mnt_is_readonly(path.mnt))
47490 res = -EROFS;
47491
47492 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
47493 + res = -EACCES;
47494 +
47495 out_path_release:
47496 path_put(&path);
47497 out:
47498 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
47499 if (error)
47500 goto dput_and_out;
47501
47502 + gr_log_chdir(path.dentry, path.mnt);
47503 +
47504 set_fs_pwd(current->fs, &path);
47505
47506 dput_and_out:
47507 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
47508 goto out_putf;
47509
47510 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
47511 +
47512 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
47513 + error = -EPERM;
47514 +
47515 + if (!error)
47516 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
47517 +
47518 if (!error)
47519 set_fs_pwd(current->fs, &file->f_path);
47520 out_putf:
47521 @@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
47522 if (error)
47523 goto dput_and_out;
47524
47525 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
47526 + goto dput_and_out;
47527 +
47528 set_fs_root(current->fs, &path);
47529 +
47530 + gr_handle_chroot_chdir(&path);
47531 +
47532 error = 0;
47533 dput_and_out:
47534 path_put(&path);
47535 @@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
47536 if (error)
47537 return error;
47538 mutex_lock(&inode->i_mutex);
47539 +
47540 + if (!gr_acl_handle_fchmod(path->dentry, path->mnt, mode)) {
47541 + error = -EACCES;
47542 + goto out_unlock;
47543 + }
47544 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
47545 + error = -EACCES;
47546 + goto out_unlock;
47547 + }
47548 +
47549 error = security_path_chmod(path->dentry, path->mnt, mode);
47550 if (error)
47551 goto out_unlock;
47552 @@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
47553 int error;
47554 struct iattr newattrs;
47555
47556 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
47557 + return -EACCES;
47558 +
47559 newattrs.ia_valid = ATTR_CTIME;
47560 if (user != (uid_t) -1) {
47561 newattrs.ia_valid |= ATTR_UID;
47562 diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
47563 index 6296b40..417c00f 100644
47564 --- a/fs/partitions/efi.c
47565 +++ b/fs/partitions/efi.c
47566 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
47567 if (!gpt)
47568 return NULL;
47569
47570 - count = le32_to_cpu(gpt->num_partition_entries) *
47571 - le32_to_cpu(gpt->sizeof_partition_entry);
47572 - if (!count)
47573 + if (!le32_to_cpu(gpt->num_partition_entries))
47574 return NULL;
47575 - pte = kzalloc(count, GFP_KERNEL);
47576 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
47577 if (!pte)
47578 return NULL;
47579
47580 + count = le32_to_cpu(gpt->num_partition_entries) *
47581 + le32_to_cpu(gpt->sizeof_partition_entry);
47582 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
47583 (u8 *) pte,
47584 count) < count) {
47585 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
47586 index af9fdf0..75b15c3 100644
47587 --- a/fs/partitions/ldm.c
47588 +++ b/fs/partitions/ldm.c
47589 @@ -1322,7 +1322,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
47590 goto found;
47591 }
47592
47593 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
47594 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
47595 if (!f) {
47596 ldm_crit ("Out of memory.");
47597 return false;
47598 diff --git a/fs/pipe.c b/fs/pipe.c
47599 index 0e0be1d..f62a72d 100644
47600 --- a/fs/pipe.c
47601 +++ b/fs/pipe.c
47602 @@ -420,9 +420,9 @@ redo:
47603 }
47604 if (bufs) /* More to do? */
47605 continue;
47606 - if (!pipe->writers)
47607 + if (!atomic_read(&pipe->writers))
47608 break;
47609 - if (!pipe->waiting_writers) {
47610 + if (!atomic_read(&pipe->waiting_writers)) {
47611 /* syscall merging: Usually we must not sleep
47612 * if O_NONBLOCK is set, or if we got some data.
47613 * But if a writer sleeps in kernel space, then
47614 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
47615 mutex_lock(&inode->i_mutex);
47616 pipe = inode->i_pipe;
47617
47618 - if (!pipe->readers) {
47619 + if (!atomic_read(&pipe->readers)) {
47620 send_sig(SIGPIPE, current, 0);
47621 ret = -EPIPE;
47622 goto out;
47623 @@ -530,7 +530,7 @@ redo1:
47624 for (;;) {
47625 int bufs;
47626
47627 - if (!pipe->readers) {
47628 + if (!atomic_read(&pipe->readers)) {
47629 send_sig(SIGPIPE, current, 0);
47630 if (!ret)
47631 ret = -EPIPE;
47632 @@ -616,9 +616,9 @@ redo2:
47633 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
47634 do_wakeup = 0;
47635 }
47636 - pipe->waiting_writers++;
47637 + atomic_inc(&pipe->waiting_writers);
47638 pipe_wait(pipe);
47639 - pipe->waiting_writers--;
47640 + atomic_dec(&pipe->waiting_writers);
47641 }
47642 out:
47643 mutex_unlock(&inode->i_mutex);
47644 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47645 mask = 0;
47646 if (filp->f_mode & FMODE_READ) {
47647 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
47648 - if (!pipe->writers && filp->f_version != pipe->w_counter)
47649 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
47650 mask |= POLLHUP;
47651 }
47652
47653 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47654 * Most Unices do not set POLLERR for FIFOs but on Linux they
47655 * behave exactly like pipes for poll().
47656 */
47657 - if (!pipe->readers)
47658 + if (!atomic_read(&pipe->readers))
47659 mask |= POLLERR;
47660 }
47661
47662 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
47663
47664 mutex_lock(&inode->i_mutex);
47665 pipe = inode->i_pipe;
47666 - pipe->readers -= decr;
47667 - pipe->writers -= decw;
47668 + atomic_sub(decr, &pipe->readers);
47669 + atomic_sub(decw, &pipe->writers);
47670
47671 - if (!pipe->readers && !pipe->writers) {
47672 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
47673 free_pipe_info(inode);
47674 } else {
47675 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
47676 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
47677
47678 if (inode->i_pipe) {
47679 ret = 0;
47680 - inode->i_pipe->readers++;
47681 + atomic_inc(&inode->i_pipe->readers);
47682 }
47683
47684 mutex_unlock(&inode->i_mutex);
47685 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
47686
47687 if (inode->i_pipe) {
47688 ret = 0;
47689 - inode->i_pipe->writers++;
47690 + atomic_inc(&inode->i_pipe->writers);
47691 }
47692
47693 mutex_unlock(&inode->i_mutex);
47694 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
47695 if (inode->i_pipe) {
47696 ret = 0;
47697 if (filp->f_mode & FMODE_READ)
47698 - inode->i_pipe->readers++;
47699 + atomic_inc(&inode->i_pipe->readers);
47700 if (filp->f_mode & FMODE_WRITE)
47701 - inode->i_pipe->writers++;
47702 + atomic_inc(&inode->i_pipe->writers);
47703 }
47704
47705 mutex_unlock(&inode->i_mutex);
47706 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
47707 inode->i_pipe = NULL;
47708 }
47709
47710 -static struct vfsmount *pipe_mnt __read_mostly;
47711 +struct vfsmount *pipe_mnt __read_mostly;
47712
47713 /*
47714 * pipefs_dname() is called from d_path().
47715 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
47716 goto fail_iput;
47717 inode->i_pipe = pipe;
47718
47719 - pipe->readers = pipe->writers = 1;
47720 + atomic_set(&pipe->readers, 1);
47721 + atomic_set(&pipe->writers, 1);
47722 inode->i_fop = &rdwr_pipefifo_fops;
47723
47724 /*
47725 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
47726 index 15af622..0e9f4467 100644
47727 --- a/fs/proc/Kconfig
47728 +++ b/fs/proc/Kconfig
47729 @@ -30,12 +30,12 @@ config PROC_FS
47730
47731 config PROC_KCORE
47732 bool "/proc/kcore support" if !ARM
47733 - depends on PROC_FS && MMU
47734 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
47735
47736 config PROC_VMCORE
47737 bool "/proc/vmcore support"
47738 - depends on PROC_FS && CRASH_DUMP
47739 - default y
47740 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
47741 + default n
47742 help
47743 Exports the dump image of crashed kernel in ELF format.
47744
47745 @@ -59,8 +59,8 @@ config PROC_SYSCTL
47746 limited in memory.
47747
47748 config PROC_PAGE_MONITOR
47749 - default y
47750 - depends on PROC_FS && MMU
47751 + default n
47752 + depends on PROC_FS && MMU && !GRKERNSEC
47753 bool "Enable /proc page monitoring" if EXPERT
47754 help
47755 Various /proc files exist to monitor process memory utilization:
47756 diff --git a/fs/proc/array.c b/fs/proc/array.c
47757 index 3a1dafd..c7fed72 100644
47758 --- a/fs/proc/array.c
47759 +++ b/fs/proc/array.c
47760 @@ -60,6 +60,7 @@
47761 #include <linux/tty.h>
47762 #include <linux/string.h>
47763 #include <linux/mman.h>
47764 +#include <linux/grsecurity.h>
47765 #include <linux/proc_fs.h>
47766 #include <linux/ioport.h>
47767 #include <linux/uaccess.h>
47768 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
47769 seq_putc(m, '\n');
47770 }
47771
47772 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47773 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
47774 +{
47775 + if (p->mm)
47776 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
47777 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
47778 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
47779 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
47780 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
47781 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
47782 + else
47783 + seq_printf(m, "PaX:\t-----\n");
47784 +}
47785 +#endif
47786 +
47787 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47788 struct pid *pid, struct task_struct *task)
47789 {
47790 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47791 task_cpus_allowed(m, task);
47792 cpuset_task_status_allowed(m, task);
47793 task_context_switch_counts(m, task);
47794 +
47795 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47796 + task_pax(m, task);
47797 +#endif
47798 +
47799 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
47800 + task_grsec_rbac(m, task);
47801 +#endif
47802 +
47803 return 0;
47804 }
47805
47806 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47807 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47808 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47809 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47810 +#endif
47811 +
47812 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47813 struct pid *pid, struct task_struct *task, int whole)
47814 {
47815 @@ -378,6 +409,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47816 char tcomm[sizeof(task->comm)];
47817 unsigned long flags;
47818
47819 + pax_track_stack();
47820 +
47821 state = *get_task_state(task);
47822 vsize = eip = esp = 0;
47823 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
47824 @@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47825 gtime = task->gtime;
47826 }
47827
47828 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47829 + if (PAX_RAND_FLAGS(mm)) {
47830 + eip = 0;
47831 + esp = 0;
47832 + wchan = 0;
47833 + }
47834 +#endif
47835 +#ifdef CONFIG_GRKERNSEC_HIDESYM
47836 + wchan = 0;
47837 + eip =0;
47838 + esp =0;
47839 +#endif
47840 +
47841 /* scale priority and nice values from timeslices to -20..20 */
47842 /* to make it look like a "normal" Unix priority/nice value */
47843 priority = task_prio(task);
47844 @@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47845 vsize,
47846 mm ? get_mm_rss(mm) : 0,
47847 rsslim,
47848 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47849 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
47850 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
47851 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
47852 +#else
47853 mm ? (permitted ? mm->start_code : 1) : 0,
47854 mm ? (permitted ? mm->end_code : 1) : 0,
47855 (permitted && mm) ? mm->start_stack : 0,
47856 +#endif
47857 esp,
47858 eip,
47859 /* The signal information here is obsolete.
47860 @@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47861
47862 return 0;
47863 }
47864 +
47865 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47866 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
47867 +{
47868 + u32 curr_ip = 0;
47869 + unsigned long flags;
47870 +
47871 + if (lock_task_sighand(task, &flags)) {
47872 + curr_ip = task->signal->curr_ip;
47873 + unlock_task_sighand(task, &flags);
47874 + }
47875 +
47876 + return sprintf(buffer, "%pI4\n", &curr_ip);
47877 +}
47878 +#endif
47879 diff --git a/fs/proc/base.c b/fs/proc/base.c
47880 index 5eb0206..fe01db4 100644
47881 --- a/fs/proc/base.c
47882 +++ b/fs/proc/base.c
47883 @@ -107,6 +107,22 @@ struct pid_entry {
47884 union proc_op op;
47885 };
47886
47887 +struct getdents_callback {
47888 + struct linux_dirent __user * current_dir;
47889 + struct linux_dirent __user * previous;
47890 + struct file * file;
47891 + int count;
47892 + int error;
47893 +};
47894 +
47895 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
47896 + loff_t offset, u64 ino, unsigned int d_type)
47897 +{
47898 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
47899 + buf->error = -EINVAL;
47900 + return 0;
47901 +}
47902 +
47903 #define NOD(NAME, MODE, IOP, FOP, OP) { \
47904 .name = (NAME), \
47905 .len = sizeof(NAME) - 1, \
47906 @@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_permission(struct task_struct *task)
47907 if (task == current)
47908 return mm;
47909
47910 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
47911 + return ERR_PTR(-EPERM);
47912 +
47913 /*
47914 * If current is actively ptrace'ing, and would also be
47915 * permitted to freshly attach with ptrace now, permit it.
47916 @@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
47917 if (!mm->arg_end)
47918 goto out_mm; /* Shh! No looking before we're done */
47919
47920 + if (gr_acl_handle_procpidmem(task))
47921 + goto out_mm;
47922 +
47923 len = mm->arg_end - mm->arg_start;
47924
47925 if (len > PAGE_SIZE)
47926 @@ -309,12 +331,28 @@ out:
47927 return res;
47928 }
47929
47930 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47931 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47932 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47933 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47934 +#endif
47935 +
47936 static int proc_pid_auxv(struct task_struct *task, char *buffer)
47937 {
47938 struct mm_struct *mm = mm_for_maps(task);
47939 int res = PTR_ERR(mm);
47940 if (mm && !IS_ERR(mm)) {
47941 unsigned int nwords = 0;
47942 +
47943 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47944 + /* allow if we're currently ptracing this task */
47945 + if (PAX_RAND_FLAGS(mm) &&
47946 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
47947 + mmput(mm);
47948 + return 0;
47949 + }
47950 +#endif
47951 +
47952 do {
47953 nwords += 2;
47954 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
47955 @@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
47956 }
47957
47958
47959 -#ifdef CONFIG_KALLSYMS
47960 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47961 /*
47962 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
47963 * Returns the resolved symbol. If that fails, simply return the address.
47964 @@ -367,7 +405,7 @@ static void unlock_trace(struct task_struct *task)
47965 mutex_unlock(&task->signal->cred_guard_mutex);
47966 }
47967
47968 -#ifdef CONFIG_STACKTRACE
47969 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47970
47971 #define MAX_STACK_TRACE_DEPTH 64
47972
47973 @@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
47974 return count;
47975 }
47976
47977 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47978 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47979 static int proc_pid_syscall(struct task_struct *task, char *buffer)
47980 {
47981 long nr;
47982 @@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
47983 /************************************************************************/
47984
47985 /* permission checks */
47986 -static int proc_fd_access_allowed(struct inode *inode)
47987 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
47988 {
47989 struct task_struct *task;
47990 int allowed = 0;
47991 @@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct inode *inode)
47992 */
47993 task = get_proc_task(inode);
47994 if (task) {
47995 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47996 + if (log)
47997 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
47998 + else
47999 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
48000 put_task_struct(task);
48001 }
48002 return allowed;
48003 @@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
48004 if (!task)
48005 goto out_no_task;
48006
48007 + if (gr_acl_handle_procpidmem(task))
48008 + goto out;
48009 +
48010 ret = -ENOMEM;
48011 page = (char *)__get_free_page(GFP_TEMPORARY);
48012 if (!page)
48013 @@ -1613,7 +1657,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
48014 path_put(&nd->path);
48015
48016 /* Are we allowed to snoop on the tasks file descriptors? */
48017 - if (!proc_fd_access_allowed(inode))
48018 + if (!proc_fd_access_allowed(inode,0))
48019 goto out;
48020
48021 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
48022 @@ -1652,8 +1696,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
48023 struct path path;
48024
48025 /* Are we allowed to snoop on the tasks file descriptors? */
48026 - if (!proc_fd_access_allowed(inode))
48027 - goto out;
48028 + /* logging this is needed for learning on chromium to work properly,
48029 + but we don't want to flood the logs from 'ps' which does a readlink
48030 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
48031 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
48032 + */
48033 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
48034 + if (!proc_fd_access_allowed(inode,0))
48035 + goto out;
48036 + } else {
48037 + if (!proc_fd_access_allowed(inode,1))
48038 + goto out;
48039 + }
48040
48041 error = PROC_I(inode)->op.proc_get_link(inode, &path);
48042 if (error)
48043 @@ -1718,7 +1772,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
48044 rcu_read_lock();
48045 cred = __task_cred(task);
48046 inode->i_uid = cred->euid;
48047 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48048 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48049 +#else
48050 inode->i_gid = cred->egid;
48051 +#endif
48052 rcu_read_unlock();
48053 }
48054 security_task_to_inode(task, inode);
48055 @@ -1736,6 +1794,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
48056 struct inode *inode = dentry->d_inode;
48057 struct task_struct *task;
48058 const struct cred *cred;
48059 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48060 + const struct cred *tmpcred = current_cred();
48061 +#endif
48062
48063 generic_fillattr(inode, stat);
48064
48065 @@ -1743,13 +1804,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
48066 stat->uid = 0;
48067 stat->gid = 0;
48068 task = pid_task(proc_pid(inode), PIDTYPE_PID);
48069 +
48070 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
48071 + rcu_read_unlock();
48072 + return -ENOENT;
48073 + }
48074 +
48075 if (task) {
48076 + cred = __task_cred(task);
48077 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48078 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
48079 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48080 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
48081 +#endif
48082 + ) {
48083 +#endif
48084 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48085 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48086 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48087 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48088 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48089 +#endif
48090 task_dumpable(task)) {
48091 - cred = __task_cred(task);
48092 stat->uid = cred->euid;
48093 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48094 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
48095 +#else
48096 stat->gid = cred->egid;
48097 +#endif
48098 }
48099 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48100 + } else {
48101 + rcu_read_unlock();
48102 + return -ENOENT;
48103 + }
48104 +#endif
48105 }
48106 rcu_read_unlock();
48107 return 0;
48108 @@ -1786,11 +1875,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
48109
48110 if (task) {
48111 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48112 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48113 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48114 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48115 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48116 +#endif
48117 task_dumpable(task)) {
48118 rcu_read_lock();
48119 cred = __task_cred(task);
48120 inode->i_uid = cred->euid;
48121 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48122 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48123 +#else
48124 inode->i_gid = cred->egid;
48125 +#endif
48126 rcu_read_unlock();
48127 } else {
48128 inode->i_uid = 0;
48129 @@ -1908,7 +2006,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
48130 int fd = proc_fd(inode);
48131
48132 if (task) {
48133 - files = get_files_struct(task);
48134 + if (!gr_acl_handle_procpidmem(task))
48135 + files = get_files_struct(task);
48136 put_task_struct(task);
48137 }
48138 if (files) {
48139 @@ -2176,11 +2275,21 @@ static const struct file_operations proc_fd_operations = {
48140 */
48141 static int proc_fd_permission(struct inode *inode, int mask)
48142 {
48143 + struct task_struct *task;
48144 int rv = generic_permission(inode, mask);
48145 - if (rv == 0)
48146 - return 0;
48147 +
48148 if (task_pid(current) == proc_pid(inode))
48149 rv = 0;
48150 +
48151 + task = get_proc_task(inode);
48152 + if (task == NULL)
48153 + return rv;
48154 +
48155 + if (gr_acl_handle_procpidmem(task))
48156 + rv = -EACCES;
48157 +
48158 + put_task_struct(task);
48159 +
48160 return rv;
48161 }
48162
48163 @@ -2290,6 +2399,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
48164 if (!task)
48165 goto out_no_task;
48166
48167 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48168 + goto out;
48169 +
48170 /*
48171 * Yes, it does not scale. And it should not. Don't add
48172 * new entries into /proc/<tgid>/ without very good reasons.
48173 @@ -2334,6 +2446,9 @@ static int proc_pident_readdir(struct file *filp,
48174 if (!task)
48175 goto out_no_task;
48176
48177 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48178 + goto out;
48179 +
48180 ret = 0;
48181 i = filp->f_pos;
48182 switch (i) {
48183 @@ -2604,7 +2719,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
48184 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
48185 void *cookie)
48186 {
48187 - char *s = nd_get_link(nd);
48188 + const char *s = nd_get_link(nd);
48189 if (!IS_ERR(s))
48190 __putname(s);
48191 }
48192 @@ -2802,7 +2917,7 @@ static const struct pid_entry tgid_base_stuff[] = {
48193 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
48194 #endif
48195 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48196 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48197 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48198 INF("syscall", S_IRUGO, proc_pid_syscall),
48199 #endif
48200 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48201 @@ -2827,10 +2942,10 @@ static const struct pid_entry tgid_base_stuff[] = {
48202 #ifdef CONFIG_SECURITY
48203 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48204 #endif
48205 -#ifdef CONFIG_KALLSYMS
48206 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48207 INF("wchan", S_IRUGO, proc_pid_wchan),
48208 #endif
48209 -#ifdef CONFIG_STACKTRACE
48210 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48211 ONE("stack", S_IRUGO, proc_pid_stack),
48212 #endif
48213 #ifdef CONFIG_SCHEDSTATS
48214 @@ -2864,6 +2979,9 @@ static const struct pid_entry tgid_base_stuff[] = {
48215 #ifdef CONFIG_HARDWALL
48216 INF("hardwall", S_IRUGO, proc_pid_hardwall),
48217 #endif
48218 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48219 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
48220 +#endif
48221 };
48222
48223 static int proc_tgid_base_readdir(struct file * filp,
48224 @@ -2989,7 +3107,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
48225 if (!inode)
48226 goto out;
48227
48228 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48229 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
48230 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48231 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48232 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
48233 +#else
48234 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
48235 +#endif
48236 inode->i_op = &proc_tgid_base_inode_operations;
48237 inode->i_fop = &proc_tgid_base_operations;
48238 inode->i_flags|=S_IMMUTABLE;
48239 @@ -3031,7 +3156,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
48240 if (!task)
48241 goto out;
48242
48243 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48244 + goto out_put_task;
48245 +
48246 result = proc_pid_instantiate(dir, dentry, task, NULL);
48247 +out_put_task:
48248 put_task_struct(task);
48249 out:
48250 return result;
48251 @@ -3096,6 +3225,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
48252 {
48253 unsigned int nr;
48254 struct task_struct *reaper;
48255 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48256 + const struct cred *tmpcred = current_cred();
48257 + const struct cred *itercred;
48258 +#endif
48259 + filldir_t __filldir = filldir;
48260 struct tgid_iter iter;
48261 struct pid_namespace *ns;
48262
48263 @@ -3119,8 +3253,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
48264 for (iter = next_tgid(ns, iter);
48265 iter.task;
48266 iter.tgid += 1, iter = next_tgid(ns, iter)) {
48267 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48268 + rcu_read_lock();
48269 + itercred = __task_cred(iter.task);
48270 +#endif
48271 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
48272 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48273 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
48274 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48275 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
48276 +#endif
48277 + )
48278 +#endif
48279 + )
48280 + __filldir = &gr_fake_filldir;
48281 + else
48282 + __filldir = filldir;
48283 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48284 + rcu_read_unlock();
48285 +#endif
48286 filp->f_pos = iter.tgid + TGID_OFFSET;
48287 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
48288 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
48289 put_task_struct(iter.task);
48290 goto out;
48291 }
48292 @@ -3148,7 +3301,7 @@ static const struct pid_entry tid_base_stuff[] = {
48293 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
48294 #endif
48295 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48296 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48297 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48298 INF("syscall", S_IRUGO, proc_pid_syscall),
48299 #endif
48300 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48301 @@ -3172,10 +3325,10 @@ static const struct pid_entry tid_base_stuff[] = {
48302 #ifdef CONFIG_SECURITY
48303 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48304 #endif
48305 -#ifdef CONFIG_KALLSYMS
48306 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48307 INF("wchan", S_IRUGO, proc_pid_wchan),
48308 #endif
48309 -#ifdef CONFIG_STACKTRACE
48310 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48311 ONE("stack", S_IRUGO, proc_pid_stack),
48312 #endif
48313 #ifdef CONFIG_SCHEDSTATS
48314 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
48315 index 82676e3..5f8518a 100644
48316 --- a/fs/proc/cmdline.c
48317 +++ b/fs/proc/cmdline.c
48318 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
48319
48320 static int __init proc_cmdline_init(void)
48321 {
48322 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48323 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
48324 +#else
48325 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
48326 +#endif
48327 return 0;
48328 }
48329 module_init(proc_cmdline_init);
48330 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
48331 index b143471..bb105e5 100644
48332 --- a/fs/proc/devices.c
48333 +++ b/fs/proc/devices.c
48334 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
48335
48336 static int __init proc_devices_init(void)
48337 {
48338 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48339 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
48340 +#else
48341 proc_create("devices", 0, NULL, &proc_devinfo_operations);
48342 +#endif
48343 return 0;
48344 }
48345 module_init(proc_devices_init);
48346 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
48347 index 7ed72d6..d5f061a 100644
48348 --- a/fs/proc/inode.c
48349 +++ b/fs/proc/inode.c
48350 @@ -18,12 +18,18 @@
48351 #include <linux/module.h>
48352 #include <linux/sysctl.h>
48353 #include <linux/slab.h>
48354 +#include <linux/grsecurity.h>
48355
48356 #include <asm/system.h>
48357 #include <asm/uaccess.h>
48358
48359 #include "internal.h"
48360
48361 +#ifdef CONFIG_PROC_SYSCTL
48362 +extern const struct inode_operations proc_sys_inode_operations;
48363 +extern const struct inode_operations proc_sys_dir_operations;
48364 +#endif
48365 +
48366 static void proc_evict_inode(struct inode *inode)
48367 {
48368 struct proc_dir_entry *de;
48369 @@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
48370 ns_ops = PROC_I(inode)->ns_ops;
48371 if (ns_ops && ns_ops->put)
48372 ns_ops->put(PROC_I(inode)->ns);
48373 +
48374 +#ifdef CONFIG_PROC_SYSCTL
48375 + if (inode->i_op == &proc_sys_inode_operations ||
48376 + inode->i_op == &proc_sys_dir_operations)
48377 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
48378 +#endif
48379 +
48380 }
48381
48382 static struct kmem_cache * proc_inode_cachep;
48383 @@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
48384 if (de->mode) {
48385 inode->i_mode = de->mode;
48386 inode->i_uid = de->uid;
48387 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48388 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48389 +#else
48390 inode->i_gid = de->gid;
48391 +#endif
48392 }
48393 if (de->size)
48394 inode->i_size = de->size;
48395 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
48396 index 7838e5c..ff92cbc 100644
48397 --- a/fs/proc/internal.h
48398 +++ b/fs/proc/internal.h
48399 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48400 struct pid *pid, struct task_struct *task);
48401 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48402 struct pid *pid, struct task_struct *task);
48403 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48404 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
48405 +#endif
48406 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
48407
48408 extern const struct file_operations proc_maps_operations;
48409 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
48410 index d245cb2..7e645bd 100644
48411 --- a/fs/proc/kcore.c
48412 +++ b/fs/proc/kcore.c
48413 @@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
48414 off_t offset = 0;
48415 struct kcore_list *m;
48416
48417 + pax_track_stack();
48418 +
48419 /* setup ELF header */
48420 elf = (struct elfhdr *) bufp;
48421 bufp += sizeof(struct elfhdr);
48422 @@ -478,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48423 * the addresses in the elf_phdr on our list.
48424 */
48425 start = kc_offset_to_vaddr(*fpos - elf_buflen);
48426 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
48427 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
48428 + if (tsz > buflen)
48429 tsz = buflen;
48430 -
48431 +
48432 while (buflen) {
48433 struct kcore_list *m;
48434
48435 @@ -509,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48436 kfree(elf_buf);
48437 } else {
48438 if (kern_addr_valid(start)) {
48439 - unsigned long n;
48440 -
48441 - n = copy_to_user(buffer, (char *)start, tsz);
48442 - /*
48443 - * We cannot distingush between fault on source
48444 - * and fault on destination. When this happens
48445 - * we clear too and hope it will trigger the
48446 - * EFAULT again.
48447 - */
48448 - if (n) {
48449 - if (clear_user(buffer + tsz - n,
48450 - n))
48451 + char *elf_buf;
48452 + mm_segment_t oldfs;
48453 +
48454 + elf_buf = kmalloc(tsz, GFP_KERNEL);
48455 + if (!elf_buf)
48456 + return -ENOMEM;
48457 + oldfs = get_fs();
48458 + set_fs(KERNEL_DS);
48459 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
48460 + set_fs(oldfs);
48461 + if (copy_to_user(buffer, elf_buf, tsz)) {
48462 + kfree(elf_buf);
48463 return -EFAULT;
48464 + }
48465 }
48466 + set_fs(oldfs);
48467 + kfree(elf_buf);
48468 } else {
48469 if (clear_user(buffer, tsz))
48470 return -EFAULT;
48471 @@ -542,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48472
48473 static int open_kcore(struct inode *inode, struct file *filp)
48474 {
48475 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
48476 + return -EPERM;
48477 +#endif
48478 if (!capable(CAP_SYS_RAWIO))
48479 return -EPERM;
48480 if (kcore_need_update)
48481 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
48482 index 5861741..32c53bc 100644
48483 --- a/fs/proc/meminfo.c
48484 +++ b/fs/proc/meminfo.c
48485 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
48486 unsigned long pages[NR_LRU_LISTS];
48487 int lru;
48488
48489 + pax_track_stack();
48490 +
48491 /*
48492 * display in kilobytes.
48493 */
48494 @@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
48495 vmi.used >> 10,
48496 vmi.largest_chunk >> 10
48497 #ifdef CONFIG_MEMORY_FAILURE
48498 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
48499 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
48500 #endif
48501 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
48502 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
48503 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
48504 index b1822dd..df622cb 100644
48505 --- a/fs/proc/nommu.c
48506 +++ b/fs/proc/nommu.c
48507 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
48508 if (len < 1)
48509 len = 1;
48510 seq_printf(m, "%*c", len, ' ');
48511 - seq_path(m, &file->f_path, "");
48512 + seq_path(m, &file->f_path, "\n\\");
48513 }
48514
48515 seq_putc(m, '\n');
48516 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
48517 index f738024..876984a 100644
48518 --- a/fs/proc/proc_net.c
48519 +++ b/fs/proc/proc_net.c
48520 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
48521 struct task_struct *task;
48522 struct nsproxy *ns;
48523 struct net *net = NULL;
48524 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48525 + const struct cred *cred = current_cred();
48526 +#endif
48527 +
48528 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48529 + if (cred->fsuid)
48530 + return net;
48531 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48532 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
48533 + return net;
48534 +#endif
48535
48536 rcu_read_lock();
48537 task = pid_task(proc_pid(dir), PIDTYPE_PID);
48538 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
48539 index 1a77dbe..56ec911 100644
48540 --- a/fs/proc/proc_sysctl.c
48541 +++ b/fs/proc/proc_sysctl.c
48542 @@ -8,11 +8,13 @@
48543 #include <linux/namei.h>
48544 #include "internal.h"
48545
48546 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
48547 +
48548 static const struct dentry_operations proc_sys_dentry_operations;
48549 static const struct file_operations proc_sys_file_operations;
48550 -static const struct inode_operations proc_sys_inode_operations;
48551 +const struct inode_operations proc_sys_inode_operations;
48552 static const struct file_operations proc_sys_dir_file_operations;
48553 -static const struct inode_operations proc_sys_dir_operations;
48554 +const struct inode_operations proc_sys_dir_operations;
48555
48556 static struct inode *proc_sys_make_inode(struct super_block *sb,
48557 struct ctl_table_header *head, struct ctl_table *table)
48558 @@ -121,8 +123,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
48559
48560 err = NULL;
48561 d_set_d_op(dentry, &proc_sys_dentry_operations);
48562 +
48563 + gr_handle_proc_create(dentry, inode);
48564 +
48565 d_add(dentry, inode);
48566
48567 + if (gr_handle_sysctl(p, MAY_EXEC))
48568 + err = ERR_PTR(-ENOENT);
48569 +
48570 out:
48571 sysctl_head_finish(head);
48572 return err;
48573 @@ -202,6 +210,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
48574 return -ENOMEM;
48575 } else {
48576 d_set_d_op(child, &proc_sys_dentry_operations);
48577 +
48578 + gr_handle_proc_create(child, inode);
48579 +
48580 d_add(child, inode);
48581 }
48582 } else {
48583 @@ -230,6 +241,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
48584 if (*pos < file->f_pos)
48585 continue;
48586
48587 + if (gr_handle_sysctl(table, 0))
48588 + continue;
48589 +
48590 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
48591 if (res)
48592 return res;
48593 @@ -355,6 +369,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
48594 if (IS_ERR(head))
48595 return PTR_ERR(head);
48596
48597 + if (table && gr_handle_sysctl(table, MAY_EXEC))
48598 + return -ENOENT;
48599 +
48600 generic_fillattr(inode, stat);
48601 if (table)
48602 stat->mode = (stat->mode & S_IFMT) | table->mode;
48603 @@ -370,17 +387,18 @@ static const struct file_operations proc_sys_file_operations = {
48604 };
48605
48606 static const struct file_operations proc_sys_dir_file_operations = {
48607 + .read = generic_read_dir,
48608 .readdir = proc_sys_readdir,
48609 .llseek = generic_file_llseek,
48610 };
48611
48612 -static const struct inode_operations proc_sys_inode_operations = {
48613 +const struct inode_operations proc_sys_inode_operations = {
48614 .permission = proc_sys_permission,
48615 .setattr = proc_sys_setattr,
48616 .getattr = proc_sys_getattr,
48617 };
48618
48619 -static const struct inode_operations proc_sys_dir_operations = {
48620 +const struct inode_operations proc_sys_dir_operations = {
48621 .lookup = proc_sys_lookup,
48622 .permission = proc_sys_permission,
48623 .setattr = proc_sys_setattr,
48624 diff --git a/fs/proc/root.c b/fs/proc/root.c
48625 index 9a8a2b7..3018df6 100644
48626 --- a/fs/proc/root.c
48627 +++ b/fs/proc/root.c
48628 @@ -123,7 +123,15 @@ void __init proc_root_init(void)
48629 #ifdef CONFIG_PROC_DEVICETREE
48630 proc_device_tree_init();
48631 #endif
48632 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48633 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48634 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
48635 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48636 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
48637 +#endif
48638 +#else
48639 proc_mkdir("bus", NULL);
48640 +#endif
48641 proc_sys_init();
48642 }
48643
48644 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
48645 index c7d4ee6..41c5564 100644
48646 --- a/fs/proc/task_mmu.c
48647 +++ b/fs/proc/task_mmu.c
48648 @@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48649 "VmExe:\t%8lu kB\n"
48650 "VmLib:\t%8lu kB\n"
48651 "VmPTE:\t%8lu kB\n"
48652 - "VmSwap:\t%8lu kB\n",
48653 - hiwater_vm << (PAGE_SHIFT-10),
48654 + "VmSwap:\t%8lu kB\n"
48655 +
48656 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48657 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
48658 +#endif
48659 +
48660 + ,hiwater_vm << (PAGE_SHIFT-10),
48661 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
48662 mm->locked_vm << (PAGE_SHIFT-10),
48663 hiwater_rss << (PAGE_SHIFT-10),
48664 @@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48665 data << (PAGE_SHIFT-10),
48666 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
48667 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
48668 - swap << (PAGE_SHIFT-10));
48669 + swap << (PAGE_SHIFT-10)
48670 +
48671 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48672 + , mm->context.user_cs_base, mm->context.user_cs_limit
48673 +#endif
48674 +
48675 + );
48676 }
48677
48678 unsigned long task_vsize(struct mm_struct *mm)
48679 @@ -207,6 +218,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
48680 return ret;
48681 }
48682
48683 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48684 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48685 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
48686 + _mm->pax_flags & MF_PAX_SEGMEXEC))
48687 +#endif
48688 +
48689 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48690 {
48691 struct mm_struct *mm = vma->vm_mm;
48692 @@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48693 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
48694 }
48695
48696 - /* We don't show the stack guard page in /proc/maps */
48697 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48698 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
48699 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
48700 +#else
48701 start = vma->vm_start;
48702 - if (stack_guard_page_start(vma, start))
48703 - start += PAGE_SIZE;
48704 end = vma->vm_end;
48705 - if (stack_guard_page_end(vma, end))
48706 - end -= PAGE_SIZE;
48707 +#endif
48708
48709 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
48710 start,
48711 @@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48712 flags & VM_WRITE ? 'w' : '-',
48713 flags & VM_EXEC ? 'x' : '-',
48714 flags & VM_MAYSHARE ? 's' : 'p',
48715 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48716 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
48717 +#else
48718 pgoff,
48719 +#endif
48720 MAJOR(dev), MINOR(dev), ino, &len);
48721
48722 /*
48723 @@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48724 */
48725 if (file) {
48726 pad_len_spaces(m, len);
48727 - seq_path(m, &file->f_path, "\n");
48728 + seq_path(m, &file->f_path, "\n\\");
48729 } else {
48730 const char *name = arch_vma_name(vma);
48731 if (!name) {
48732 @@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48733 if (vma->vm_start <= mm->brk &&
48734 vma->vm_end >= mm->start_brk) {
48735 name = "[heap]";
48736 - } else if (vma->vm_start <= mm->start_stack &&
48737 - vma->vm_end >= mm->start_stack) {
48738 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
48739 + (vma->vm_start <= mm->start_stack &&
48740 + vma->vm_end >= mm->start_stack)) {
48741 name = "[stack]";
48742 }
48743 } else {
48744 @@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m, void *v)
48745 };
48746
48747 memset(&mss, 0, sizeof mss);
48748 - mss.vma = vma;
48749 - /* mmap_sem is held in m_start */
48750 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48751 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48752 -
48753 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48754 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
48755 +#endif
48756 + mss.vma = vma;
48757 + /* mmap_sem is held in m_start */
48758 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48759 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48760 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48761 + }
48762 +#endif
48763 show_map_vma(m, vma);
48764
48765 seq_printf(m,
48766 @@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m, void *v)
48767 "KernelPageSize: %8lu kB\n"
48768 "MMUPageSize: %8lu kB\n"
48769 "Locked: %8lu kB\n",
48770 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48771 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
48772 +#else
48773 (vma->vm_end - vma->vm_start) >> 10,
48774 +#endif
48775 mss.resident >> 10,
48776 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
48777 mss.shared_clean >> 10,
48778 @@ -1031,7 +1062,7 @@ static int show_numa_map(struct seq_file *m, void *v)
48779
48780 if (file) {
48781 seq_printf(m, " file=");
48782 - seq_path(m, &file->f_path, "\n\t= ");
48783 + seq_path(m, &file->f_path, "\n\t\\= ");
48784 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
48785 seq_printf(m, " heap");
48786 } else if (vma->vm_start <= mm->start_stack &&
48787 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
48788 index 980de54..2a4db5f 100644
48789 --- a/fs/proc/task_nommu.c
48790 +++ b/fs/proc/task_nommu.c
48791 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48792 else
48793 bytes += kobjsize(mm);
48794
48795 - if (current->fs && current->fs->users > 1)
48796 + if (current->fs && atomic_read(&current->fs->users) > 1)
48797 sbytes += kobjsize(current->fs);
48798 else
48799 bytes += kobjsize(current->fs);
48800 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
48801
48802 if (file) {
48803 pad_len_spaces(m, len);
48804 - seq_path(m, &file->f_path, "");
48805 + seq_path(m, &file->f_path, "\n\\");
48806 } else if (mm) {
48807 if (vma->vm_start <= mm->start_stack &&
48808 vma->vm_end >= mm->start_stack) {
48809 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
48810 index d67908b..d13f6a6 100644
48811 --- a/fs/quota/netlink.c
48812 +++ b/fs/quota/netlink.c
48813 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
48814 void quota_send_warning(short type, unsigned int id, dev_t dev,
48815 const char warntype)
48816 {
48817 - static atomic_t seq;
48818 + static atomic_unchecked_t seq;
48819 struct sk_buff *skb;
48820 void *msg_head;
48821 int ret;
48822 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
48823 "VFS: Not enough memory to send quota warning.\n");
48824 return;
48825 }
48826 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
48827 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
48828 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
48829 if (!msg_head) {
48830 printk(KERN_ERR
48831 diff --git a/fs/readdir.c b/fs/readdir.c
48832 index 356f715..c918d38 100644
48833 --- a/fs/readdir.c
48834 +++ b/fs/readdir.c
48835 @@ -17,6 +17,7 @@
48836 #include <linux/security.h>
48837 #include <linux/syscalls.h>
48838 #include <linux/unistd.h>
48839 +#include <linux/namei.h>
48840
48841 #include <asm/uaccess.h>
48842
48843 @@ -67,6 +68,7 @@ struct old_linux_dirent {
48844
48845 struct readdir_callback {
48846 struct old_linux_dirent __user * dirent;
48847 + struct file * file;
48848 int result;
48849 };
48850
48851 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
48852 buf->result = -EOVERFLOW;
48853 return -EOVERFLOW;
48854 }
48855 +
48856 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48857 + return 0;
48858 +
48859 buf->result++;
48860 dirent = buf->dirent;
48861 if (!access_ok(VERIFY_WRITE, dirent,
48862 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
48863
48864 buf.result = 0;
48865 buf.dirent = dirent;
48866 + buf.file = file;
48867
48868 error = vfs_readdir(file, fillonedir, &buf);
48869 if (buf.result)
48870 @@ -142,6 +149,7 @@ struct linux_dirent {
48871 struct getdents_callback {
48872 struct linux_dirent __user * current_dir;
48873 struct linux_dirent __user * previous;
48874 + struct file * file;
48875 int count;
48876 int error;
48877 };
48878 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
48879 buf->error = -EOVERFLOW;
48880 return -EOVERFLOW;
48881 }
48882 +
48883 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48884 + return 0;
48885 +
48886 dirent = buf->previous;
48887 if (dirent) {
48888 if (__put_user(offset, &dirent->d_off))
48889 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
48890 buf.previous = NULL;
48891 buf.count = count;
48892 buf.error = 0;
48893 + buf.file = file;
48894
48895 error = vfs_readdir(file, filldir, &buf);
48896 if (error >= 0)
48897 @@ -229,6 +242,7 @@ out:
48898 struct getdents_callback64 {
48899 struct linux_dirent64 __user * current_dir;
48900 struct linux_dirent64 __user * previous;
48901 + struct file *file;
48902 int count;
48903 int error;
48904 };
48905 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
48906 buf->error = -EINVAL; /* only used if we fail.. */
48907 if (reclen > buf->count)
48908 return -EINVAL;
48909 +
48910 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48911 + return 0;
48912 +
48913 dirent = buf->previous;
48914 if (dirent) {
48915 if (__put_user(offset, &dirent->d_off))
48916 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48917
48918 buf.current_dir = dirent;
48919 buf.previous = NULL;
48920 + buf.file = file;
48921 buf.count = count;
48922 buf.error = 0;
48923
48924 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48925 error = buf.error;
48926 lastdirent = buf.previous;
48927 if (lastdirent) {
48928 - typeof(lastdirent->d_off) d_off = file->f_pos;
48929 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48930 if (__put_user(d_off, &lastdirent->d_off))
48931 error = -EFAULT;
48932 else
48933 diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
48934 index 133e935..349ef18 100644
48935 --- a/fs/reiserfs/dir.c
48936 +++ b/fs/reiserfs/dir.c
48937 @@ -75,6 +75,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
48938 struct reiserfs_dir_entry de;
48939 int ret = 0;
48940
48941 + pax_track_stack();
48942 +
48943 reiserfs_write_lock(inode->i_sb);
48944
48945 reiserfs_check_lock_depth(inode->i_sb, "readdir");
48946 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
48947 index 60c0804..d814f98 100644
48948 --- a/fs/reiserfs/do_balan.c
48949 +++ b/fs/reiserfs/do_balan.c
48950 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
48951 return;
48952 }
48953
48954 - atomic_inc(&(fs_generation(tb->tb_sb)));
48955 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
48956 do_balance_starts(tb);
48957
48958 /* balance leaf returns 0 except if combining L R and S into
48959 diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
48960 index a159ba5..0396a76 100644
48961 --- a/fs/reiserfs/journal.c
48962 +++ b/fs/reiserfs/journal.c
48963 @@ -2289,6 +2289,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
48964 struct buffer_head *bh;
48965 int i, j;
48966
48967 + pax_track_stack();
48968 +
48969 bh = __getblk(dev, block, bufsize);
48970 if (buffer_uptodate(bh))
48971 return (bh);
48972 diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
48973 index ef39232..0fa91ba 100644
48974 --- a/fs/reiserfs/namei.c
48975 +++ b/fs/reiserfs/namei.c
48976 @@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
48977 unsigned long savelink = 1;
48978 struct timespec ctime;
48979
48980 + pax_track_stack();
48981 +
48982 /* three balancings: (1) old name removal, (2) new name insertion
48983 and (3) maybe "save" link insertion
48984 stat data updates: (1) old directory,
48985 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
48986 index 7a99811..2c9286f 100644
48987 --- a/fs/reiserfs/procfs.c
48988 +++ b/fs/reiserfs/procfs.c
48989 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
48990 "SMALL_TAILS " : "NO_TAILS ",
48991 replay_only(sb) ? "REPLAY_ONLY " : "",
48992 convert_reiserfs(sb) ? "CONV " : "",
48993 - atomic_read(&r->s_generation_counter),
48994 + atomic_read_unchecked(&r->s_generation_counter),
48995 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
48996 SF(s_do_balance), SF(s_unneeded_left_neighbor),
48997 SF(s_good_search_by_key_reada), SF(s_bmaps),
48998 @@ -299,6 +299,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
48999 struct journal_params *jp = &rs->s_v1.s_journal;
49000 char b[BDEVNAME_SIZE];
49001
49002 + pax_track_stack();
49003 +
49004 seq_printf(m, /* on-disk fields */
49005 "jp_journal_1st_block: \t%i\n"
49006 "jp_journal_dev: \t%s[%x]\n"
49007 diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
49008 index 313d39d..3a5811b 100644
49009 --- a/fs/reiserfs/stree.c
49010 +++ b/fs/reiserfs/stree.c
49011 @@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
49012 int iter = 0;
49013 #endif
49014
49015 + pax_track_stack();
49016 +
49017 BUG_ON(!th->t_trans_id);
49018
49019 init_tb_struct(th, &s_del_balance, sb, path,
49020 @@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
49021 int retval;
49022 int quota_cut_bytes = 0;
49023
49024 + pax_track_stack();
49025 +
49026 BUG_ON(!th->t_trans_id);
49027
49028 le_key2cpu_key(&cpu_key, key);
49029 @@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
49030 int quota_cut_bytes;
49031 loff_t tail_pos = 0;
49032
49033 + pax_track_stack();
49034 +
49035 BUG_ON(!th->t_trans_id);
49036
49037 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
49038 @@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
49039 int retval;
49040 int fs_gen;
49041
49042 + pax_track_stack();
49043 +
49044 BUG_ON(!th->t_trans_id);
49045
49046 fs_gen = get_generation(inode->i_sb);
49047 @@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
49048 int fs_gen = 0;
49049 int quota_bytes = 0;
49050
49051 + pax_track_stack();
49052 +
49053 BUG_ON(!th->t_trans_id);
49054
49055 if (inode) { /* Do we count quotas for item? */
49056 diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
49057 index 14363b9..dd95a04 100644
49058 --- a/fs/reiserfs/super.c
49059 +++ b/fs/reiserfs/super.c
49060 @@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
49061 {.option_name = NULL}
49062 };
49063
49064 + pax_track_stack();
49065 +
49066 *blocks = 0;
49067 if (!options || !*options)
49068 /* use default configuration: create tails, journaling on, no
49069 diff --git a/fs/select.c b/fs/select.c
49070 index d33418f..f8e06bc 100644
49071 --- a/fs/select.c
49072 +++ b/fs/select.c
49073 @@ -20,6 +20,7 @@
49074 #include <linux/module.h>
49075 #include <linux/slab.h>
49076 #include <linux/poll.h>
49077 +#include <linux/security.h>
49078 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
49079 #include <linux/file.h>
49080 #include <linux/fdtable.h>
49081 @@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
49082 int retval, i, timed_out = 0;
49083 unsigned long slack = 0;
49084
49085 + pax_track_stack();
49086 +
49087 rcu_read_lock();
49088 retval = max_select_fd(n, fds);
49089 rcu_read_unlock();
49090 @@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
49091 /* Allocate small arguments on the stack to save memory and be faster */
49092 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
49093
49094 + pax_track_stack();
49095 +
49096 ret = -EINVAL;
49097 if (n < 0)
49098 goto out_nofds;
49099 @@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
49100 struct poll_list *walk = head;
49101 unsigned long todo = nfds;
49102
49103 + pax_track_stack();
49104 +
49105 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
49106 if (nfds > rlimit(RLIMIT_NOFILE))
49107 return -EINVAL;
49108
49109 diff --git a/fs/seq_file.c b/fs/seq_file.c
49110 index 05d6b0e..ee96362 100644
49111 --- a/fs/seq_file.c
49112 +++ b/fs/seq_file.c
49113 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m, loff_t offset)
49114 return 0;
49115 }
49116 if (!m->buf) {
49117 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
49118 + m->size = PAGE_SIZE;
49119 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
49120 if (!m->buf)
49121 return -ENOMEM;
49122 }
49123 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m, loff_t offset)
49124 Eoverflow:
49125 m->op->stop(m, p);
49126 kfree(m->buf);
49127 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
49128 + m->size <<= 1;
49129 + m->buf = kmalloc(m->size, GFP_KERNEL);
49130 return !m->buf ? -ENOMEM : -EAGAIN;
49131 }
49132
49133 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
49134 m->version = file->f_version;
49135 /* grab buffer if we didn't have one */
49136 if (!m->buf) {
49137 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
49138 + m->size = PAGE_SIZE;
49139 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
49140 if (!m->buf)
49141 goto Enomem;
49142 }
49143 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
49144 goto Fill;
49145 m->op->stop(m, p);
49146 kfree(m->buf);
49147 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
49148 + m->size <<= 1;
49149 + m->buf = kmalloc(m->size, GFP_KERNEL);
49150 if (!m->buf)
49151 goto Enomem;
49152 m->count = 0;
49153 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file *p, void *v)
49154 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
49155 void *data)
49156 {
49157 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
49158 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
49159 int res = -ENOMEM;
49160
49161 if (op) {
49162 diff --git a/fs/splice.c b/fs/splice.c
49163 index fa2defa..9a697a5 100644
49164 --- a/fs/splice.c
49165 +++ b/fs/splice.c
49166 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49167 pipe_lock(pipe);
49168
49169 for (;;) {
49170 - if (!pipe->readers) {
49171 + if (!atomic_read(&pipe->readers)) {
49172 send_sig(SIGPIPE, current, 0);
49173 if (!ret)
49174 ret = -EPIPE;
49175 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49176 do_wakeup = 0;
49177 }
49178
49179 - pipe->waiting_writers++;
49180 + atomic_inc(&pipe->waiting_writers);
49181 pipe_wait(pipe);
49182 - pipe->waiting_writers--;
49183 + atomic_dec(&pipe->waiting_writers);
49184 }
49185
49186 pipe_unlock(pipe);
49187 @@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
49188 .spd_release = spd_release_page,
49189 };
49190
49191 + pax_track_stack();
49192 +
49193 if (splice_grow_spd(pipe, &spd))
49194 return -ENOMEM;
49195
49196 @@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
49197 old_fs = get_fs();
49198 set_fs(get_ds());
49199 /* The cast to a user pointer is valid due to the set_fs() */
49200 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
49201 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
49202 set_fs(old_fs);
49203
49204 return res;
49205 @@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
49206 old_fs = get_fs();
49207 set_fs(get_ds());
49208 /* The cast to a user pointer is valid due to the set_fs() */
49209 - res = vfs_write(file, (const char __user *)buf, count, &pos);
49210 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
49211 set_fs(old_fs);
49212
49213 return res;
49214 @@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
49215 .spd_release = spd_release_page,
49216 };
49217
49218 + pax_track_stack();
49219 +
49220 if (splice_grow_spd(pipe, &spd))
49221 return -ENOMEM;
49222
49223 @@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
49224 goto err;
49225
49226 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
49227 - vec[i].iov_base = (void __user *) page_address(page);
49228 + vec[i].iov_base = (void __force_user *) page_address(page);
49229 vec[i].iov_len = this_len;
49230 spd.pages[i] = page;
49231 spd.nr_pages++;
49232 @@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
49233 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
49234 {
49235 while (!pipe->nrbufs) {
49236 - if (!pipe->writers)
49237 + if (!atomic_read(&pipe->writers))
49238 return 0;
49239
49240 - if (!pipe->waiting_writers && sd->num_spliced)
49241 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
49242 return 0;
49243
49244 if (sd->flags & SPLICE_F_NONBLOCK)
49245 @@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
49246 * out of the pipe right after the splice_to_pipe(). So set
49247 * PIPE_READERS appropriately.
49248 */
49249 - pipe->readers = 1;
49250 + atomic_set(&pipe->readers, 1);
49251
49252 current->splice_pipe = pipe;
49253 }
49254 @@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
49255 };
49256 long ret;
49257
49258 + pax_track_stack();
49259 +
49260 pipe = get_pipe_info(file);
49261 if (!pipe)
49262 return -EBADF;
49263 @@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49264 ret = -ERESTARTSYS;
49265 break;
49266 }
49267 - if (!pipe->writers)
49268 + if (!atomic_read(&pipe->writers))
49269 break;
49270 - if (!pipe->waiting_writers) {
49271 + if (!atomic_read(&pipe->waiting_writers)) {
49272 if (flags & SPLICE_F_NONBLOCK) {
49273 ret = -EAGAIN;
49274 break;
49275 @@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49276 pipe_lock(pipe);
49277
49278 while (pipe->nrbufs >= pipe->buffers) {
49279 - if (!pipe->readers) {
49280 + if (!atomic_read(&pipe->readers)) {
49281 send_sig(SIGPIPE, current, 0);
49282 ret = -EPIPE;
49283 break;
49284 @@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49285 ret = -ERESTARTSYS;
49286 break;
49287 }
49288 - pipe->waiting_writers++;
49289 + atomic_inc(&pipe->waiting_writers);
49290 pipe_wait(pipe);
49291 - pipe->waiting_writers--;
49292 + atomic_dec(&pipe->waiting_writers);
49293 }
49294
49295 pipe_unlock(pipe);
49296 @@ -1819,14 +1825,14 @@ retry:
49297 pipe_double_lock(ipipe, opipe);
49298
49299 do {
49300 - if (!opipe->readers) {
49301 + if (!atomic_read(&opipe->readers)) {
49302 send_sig(SIGPIPE, current, 0);
49303 if (!ret)
49304 ret = -EPIPE;
49305 break;
49306 }
49307
49308 - if (!ipipe->nrbufs && !ipipe->writers)
49309 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
49310 break;
49311
49312 /*
49313 @@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49314 pipe_double_lock(ipipe, opipe);
49315
49316 do {
49317 - if (!opipe->readers) {
49318 + if (!atomic_read(&opipe->readers)) {
49319 send_sig(SIGPIPE, current, 0);
49320 if (!ret)
49321 ret = -EPIPE;
49322 @@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49323 * return EAGAIN if we have the potential of some data in the
49324 * future, otherwise just return 0
49325 */
49326 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
49327 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
49328 ret = -EAGAIN;
49329
49330 pipe_unlock(ipipe);
49331 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
49332 index 1ad8c93..6633545 100644
49333 --- a/fs/sysfs/file.c
49334 +++ b/fs/sysfs/file.c
49335 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
49336
49337 struct sysfs_open_dirent {
49338 atomic_t refcnt;
49339 - atomic_t event;
49340 + atomic_unchecked_t event;
49341 wait_queue_head_t poll;
49342 struct list_head buffers; /* goes through sysfs_buffer.list */
49343 };
49344 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
49345 if (!sysfs_get_active(attr_sd))
49346 return -ENODEV;
49347
49348 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
49349 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
49350 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
49351
49352 sysfs_put_active(attr_sd);
49353 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
49354 return -ENOMEM;
49355
49356 atomic_set(&new_od->refcnt, 0);
49357 - atomic_set(&new_od->event, 1);
49358 + atomic_set_unchecked(&new_od->event, 1);
49359 init_waitqueue_head(&new_od->poll);
49360 INIT_LIST_HEAD(&new_od->buffers);
49361 goto retry;
49362 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
49363
49364 sysfs_put_active(attr_sd);
49365
49366 - if (buffer->event != atomic_read(&od->event))
49367 + if (buffer->event != atomic_read_unchecked(&od->event))
49368 goto trigger;
49369
49370 return DEFAULT_POLLMASK;
49371 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
49372
49373 od = sd->s_attr.open;
49374 if (od) {
49375 - atomic_inc(&od->event);
49376 + atomic_inc_unchecked(&od->event);
49377 wake_up_interruptible(&od->poll);
49378 }
49379
49380 diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
49381 index e34f0d9..740ea7b 100644
49382 --- a/fs/sysfs/mount.c
49383 +++ b/fs/sysfs/mount.c
49384 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
49385 .s_name = "",
49386 .s_count = ATOMIC_INIT(1),
49387 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
49388 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
49389 + .s_mode = S_IFDIR | S_IRWXU,
49390 +#else
49391 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
49392 +#endif
49393 .s_ino = 1,
49394 };
49395
49396 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
49397 index a7ac78f..02158e1 100644
49398 --- a/fs/sysfs/symlink.c
49399 +++ b/fs/sysfs/symlink.c
49400 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
49401
49402 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
49403 {
49404 - char *page = nd_get_link(nd);
49405 + const char *page = nd_get_link(nd);
49406 if (!IS_ERR(page))
49407 free_page((unsigned long)page);
49408 }
49409 diff --git a/fs/udf/inode.c b/fs/udf/inode.c
49410 index 1d1358e..408bedb 100644
49411 --- a/fs/udf/inode.c
49412 +++ b/fs/udf/inode.c
49413 @@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
49414 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
49415 int lastblock = 0;
49416
49417 + pax_track_stack();
49418 +
49419 prev_epos.offset = udf_file_entry_alloc_offset(inode);
49420 prev_epos.block = iinfo->i_location;
49421 prev_epos.bh = NULL;
49422 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
49423 index 9215700..bf1f68e 100644
49424 --- a/fs/udf/misc.c
49425 +++ b/fs/udf/misc.c
49426 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
49427
49428 u8 udf_tag_checksum(const struct tag *t)
49429 {
49430 - u8 *data = (u8 *)t;
49431 + const u8 *data = (const u8 *)t;
49432 u8 checksum = 0;
49433 int i;
49434 for (i = 0; i < sizeof(struct tag); ++i)
49435 diff --git a/fs/utimes.c b/fs/utimes.c
49436 index ba653f3..06ea4b1 100644
49437 --- a/fs/utimes.c
49438 +++ b/fs/utimes.c
49439 @@ -1,6 +1,7 @@
49440 #include <linux/compiler.h>
49441 #include <linux/file.h>
49442 #include <linux/fs.h>
49443 +#include <linux/security.h>
49444 #include <linux/linkage.h>
49445 #include <linux/mount.h>
49446 #include <linux/namei.h>
49447 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
49448 goto mnt_drop_write_and_out;
49449 }
49450 }
49451 +
49452 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
49453 + error = -EACCES;
49454 + goto mnt_drop_write_and_out;
49455 + }
49456 +
49457 mutex_lock(&inode->i_mutex);
49458 error = notify_change(path->dentry, &newattrs);
49459 mutex_unlock(&inode->i_mutex);
49460 diff --git a/fs/xattr.c b/fs/xattr.c
49461 index f060663..def7007 100644
49462 --- a/fs/xattr.c
49463 +++ b/fs/xattr.c
49464 @@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
49465 * Extended attribute SET operations
49466 */
49467 static long
49468 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
49469 +setxattr(struct path *path, const char __user *name, const void __user *value,
49470 size_t size, int flags)
49471 {
49472 int error;
49473 @@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
49474 return PTR_ERR(kvalue);
49475 }
49476
49477 - error = vfs_setxattr(d, kname, kvalue, size, flags);
49478 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
49479 + error = -EACCES;
49480 + goto out;
49481 + }
49482 +
49483 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
49484 +out:
49485 kfree(kvalue);
49486 return error;
49487 }
49488 @@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
49489 return error;
49490 error = mnt_want_write(path.mnt);
49491 if (!error) {
49492 - error = setxattr(path.dentry, name, value, size, flags);
49493 + error = setxattr(&path, name, value, size, flags);
49494 mnt_drop_write(path.mnt);
49495 }
49496 path_put(&path);
49497 @@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
49498 return error;
49499 error = mnt_want_write(path.mnt);
49500 if (!error) {
49501 - error = setxattr(path.dentry, name, value, size, flags);
49502 + error = setxattr(&path, name, value, size, flags);
49503 mnt_drop_write(path.mnt);
49504 }
49505 path_put(&path);
49506 @@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
49507 const void __user *,value, size_t, size, int, flags)
49508 {
49509 struct file *f;
49510 - struct dentry *dentry;
49511 int error = -EBADF;
49512
49513 f = fget(fd);
49514 if (!f)
49515 return error;
49516 - dentry = f->f_path.dentry;
49517 - audit_inode(NULL, dentry);
49518 + audit_inode(NULL, f->f_path.dentry);
49519 error = mnt_want_write_file(f);
49520 if (!error) {
49521 - error = setxattr(dentry, name, value, size, flags);
49522 + error = setxattr(&f->f_path, name, value, size, flags);
49523 mnt_drop_write(f->f_path.mnt);
49524 }
49525 fput(f);
49526 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
49527 index 8d5a506..7f62712 100644
49528 --- a/fs/xattr_acl.c
49529 +++ b/fs/xattr_acl.c
49530 @@ -17,8 +17,8 @@
49531 struct posix_acl *
49532 posix_acl_from_xattr(const void *value, size_t size)
49533 {
49534 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
49535 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
49536 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
49537 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
49538 int count;
49539 struct posix_acl *acl;
49540 struct posix_acl_entry *acl_e;
49541 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
49542 index 452a291..91a95f3b 100644
49543 --- a/fs/xfs/xfs_bmap.c
49544 +++ b/fs/xfs/xfs_bmap.c
49545 @@ -250,7 +250,7 @@ xfs_bmap_validate_ret(
49546 int nmap,
49547 int ret_nmap);
49548 #else
49549 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
49550 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
49551 #endif /* DEBUG */
49552
49553 STATIC int
49554 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
49555 index 79d05e8..e3e5861 100644
49556 --- a/fs/xfs/xfs_dir2_sf.c
49557 +++ b/fs/xfs/xfs_dir2_sf.c
49558 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
49559 }
49560
49561 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
49562 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49563 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
49564 + char name[sfep->namelen];
49565 + memcpy(name, sfep->name, sfep->namelen);
49566 + if (filldir(dirent, name, sfep->namelen,
49567 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
49568 + *offset = off & 0x7fffffff;
49569 + return 0;
49570 + }
49571 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49572 off & 0x7fffffff, ino, DT_UNKNOWN)) {
49573 *offset = off & 0x7fffffff;
49574 return 0;
49575 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
49576 index f7ce7de..e1a5db0 100644
49577 --- a/fs/xfs/xfs_ioctl.c
49578 +++ b/fs/xfs/xfs_ioctl.c
49579 @@ -128,7 +128,7 @@ xfs_find_handle(
49580 }
49581
49582 error = -EFAULT;
49583 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
49584 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
49585 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
49586 goto out_put;
49587
49588 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
49589 index 673704f..74315c5 100644
49590 --- a/fs/xfs/xfs_iops.c
49591 +++ b/fs/xfs/xfs_iops.c
49592 @@ -446,7 +446,7 @@ xfs_vn_put_link(
49593 struct nameidata *nd,
49594 void *p)
49595 {
49596 - char *s = nd_get_link(nd);
49597 + const char *s = nd_get_link(nd);
49598
49599 if (!IS_ERR(s))
49600 kfree(s);
49601 diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
49602 index 51fc429..a728e71 100644
49603 --- a/fs/xfs/xfs_vnodeops.c
49604 +++ b/fs/xfs/xfs_vnodeops.c
49605 @@ -123,13 +123,17 @@ xfs_readlink(
49606
49607 xfs_ilock(ip, XFS_ILOCK_SHARED);
49608
49609 - ASSERT(S_ISLNK(ip->i_d.di_mode));
49610 - ASSERT(ip->i_d.di_size <= MAXPATHLEN);
49611 -
49612 pathlen = ip->i_d.di_size;
49613 if (!pathlen)
49614 goto out;
49615
49616 + if (pathlen > MAXPATHLEN) {
49617 + xfs_alert(mp, "%s: inode (%llu) symlink length (%d) too long",
49618 + __func__, (unsigned long long)ip->i_ino, pathlen);
49619 + ASSERT(0);
49620 + return XFS_ERROR(EFSCORRUPTED);
49621 + }
49622 +
49623 if (ip->i_df.if_flags & XFS_IFINLINE) {
49624 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
49625 link[pathlen] = '\0';
49626 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
49627 new file mode 100644
49628 index 0000000..9629731
49629 --- /dev/null
49630 +++ b/grsecurity/Kconfig
49631 @@ -0,0 +1,1037 @@
49632 +#
49633 +# grecurity configuration
49634 +#
49635 +
49636 +menu "Grsecurity"
49637 +
49638 +config GRKERNSEC
49639 + bool "Grsecurity"
49640 + select CRYPTO
49641 + select CRYPTO_SHA256
49642 + help
49643 + If you say Y here, you will be able to configure many features
49644 + that will enhance the security of your system. It is highly
49645 + recommended that you say Y here and read through the help
49646 + for each option so that you fully understand the features and
49647 + can evaluate their usefulness for your machine.
49648 +
49649 +choice
49650 + prompt "Security Level"
49651 + depends on GRKERNSEC
49652 + default GRKERNSEC_CUSTOM
49653 +
49654 +config GRKERNSEC_LOW
49655 + bool "Low"
49656 + select GRKERNSEC_LINK
49657 + select GRKERNSEC_FIFO
49658 + select GRKERNSEC_RANDNET
49659 + select GRKERNSEC_DMESG
49660 + select GRKERNSEC_CHROOT
49661 + select GRKERNSEC_CHROOT_CHDIR
49662 +
49663 + help
49664 + If you choose this option, several of the grsecurity options will
49665 + be enabled that will give you greater protection against a number
49666 + of attacks, while assuring that none of your software will have any
49667 + conflicts with the additional security measures. If you run a lot
49668 + of unusual software, or you are having problems with the higher
49669 + security levels, you should say Y here. With this option, the
49670 + following features are enabled:
49671 +
49672 + - Linking restrictions
49673 + - FIFO restrictions
49674 + - Restricted dmesg
49675 + - Enforced chdir("/") on chroot
49676 + - Runtime module disabling
49677 +
49678 +config GRKERNSEC_MEDIUM
49679 + bool "Medium"
49680 + select PAX
49681 + select PAX_EI_PAX
49682 + select PAX_PT_PAX_FLAGS
49683 + select PAX_HAVE_ACL_FLAGS
49684 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49685 + select GRKERNSEC_CHROOT
49686 + select GRKERNSEC_CHROOT_SYSCTL
49687 + select GRKERNSEC_LINK
49688 + select GRKERNSEC_FIFO
49689 + select GRKERNSEC_DMESG
49690 + select GRKERNSEC_RANDNET
49691 + select GRKERNSEC_FORKFAIL
49692 + select GRKERNSEC_TIME
49693 + select GRKERNSEC_SIGNAL
49694 + select GRKERNSEC_CHROOT
49695 + select GRKERNSEC_CHROOT_UNIX
49696 + select GRKERNSEC_CHROOT_MOUNT
49697 + select GRKERNSEC_CHROOT_PIVOT
49698 + select GRKERNSEC_CHROOT_DOUBLE
49699 + select GRKERNSEC_CHROOT_CHDIR
49700 + select GRKERNSEC_CHROOT_MKNOD
49701 + select GRKERNSEC_PROC
49702 + select GRKERNSEC_PROC_USERGROUP
49703 + select PAX_RANDUSTACK
49704 + select PAX_ASLR
49705 + select PAX_RANDMMAP
49706 + select PAX_REFCOUNT if (X86 || SPARC64)
49707 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
49708 +
49709 + help
49710 + If you say Y here, several features in addition to those included
49711 + in the low additional security level will be enabled. These
49712 + features provide even more security to your system, though in rare
49713 + cases they may be incompatible with very old or poorly written
49714 + software. If you enable this option, make sure that your auth
49715 + service (identd) is running as gid 1001. With this option,
49716 + the following features (in addition to those provided in the
49717 + low additional security level) will be enabled:
49718 +
49719 + - Failed fork logging
49720 + - Time change logging
49721 + - Signal logging
49722 + - Deny mounts in chroot
49723 + - Deny double chrooting
49724 + - Deny sysctl writes in chroot
49725 + - Deny mknod in chroot
49726 + - Deny access to abstract AF_UNIX sockets out of chroot
49727 + - Deny pivot_root in chroot
49728 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
49729 + - /proc restrictions with special GID set to 10 (usually wheel)
49730 + - Address Space Layout Randomization (ASLR)
49731 + - Prevent exploitation of most refcount overflows
49732 + - Bounds checking of copying between the kernel and userland
49733 +
49734 +config GRKERNSEC_HIGH
49735 + bool "High"
49736 + select GRKERNSEC_LINK
49737 + select GRKERNSEC_FIFO
49738 + select GRKERNSEC_DMESG
49739 + select GRKERNSEC_FORKFAIL
49740 + select GRKERNSEC_TIME
49741 + select GRKERNSEC_SIGNAL
49742 + select GRKERNSEC_CHROOT
49743 + select GRKERNSEC_CHROOT_SHMAT
49744 + select GRKERNSEC_CHROOT_UNIX
49745 + select GRKERNSEC_CHROOT_MOUNT
49746 + select GRKERNSEC_CHROOT_FCHDIR
49747 + select GRKERNSEC_CHROOT_PIVOT
49748 + select GRKERNSEC_CHROOT_DOUBLE
49749 + select GRKERNSEC_CHROOT_CHDIR
49750 + select GRKERNSEC_CHROOT_MKNOD
49751 + select GRKERNSEC_CHROOT_CAPS
49752 + select GRKERNSEC_CHROOT_SYSCTL
49753 + select GRKERNSEC_CHROOT_FINDTASK
49754 + select GRKERNSEC_SYSFS_RESTRICT
49755 + select GRKERNSEC_PROC
49756 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49757 + select GRKERNSEC_HIDESYM
49758 + select GRKERNSEC_BRUTE
49759 + select GRKERNSEC_PROC_USERGROUP
49760 + select GRKERNSEC_KMEM
49761 + select GRKERNSEC_RESLOG
49762 + select GRKERNSEC_RANDNET
49763 + select GRKERNSEC_PROC_ADD
49764 + select GRKERNSEC_CHROOT_CHMOD
49765 + select GRKERNSEC_CHROOT_NICE
49766 + select GRKERNSEC_AUDIT_MOUNT
49767 + select GRKERNSEC_MODHARDEN if (MODULES)
49768 + select GRKERNSEC_HARDEN_PTRACE
49769 + select GRKERNSEC_VM86 if (X86_32)
49770 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
49771 + select PAX
49772 + select PAX_RANDUSTACK
49773 + select PAX_ASLR
49774 + select PAX_RANDMMAP
49775 + select PAX_NOEXEC
49776 + select PAX_MPROTECT
49777 + select PAX_EI_PAX
49778 + select PAX_PT_PAX_FLAGS
49779 + select PAX_HAVE_ACL_FLAGS
49780 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
49781 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
49782 + select PAX_RANDKSTACK if (X86_TSC && X86)
49783 + select PAX_SEGMEXEC if (X86_32)
49784 + select PAX_PAGEEXEC
49785 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
49786 + select PAX_EMUTRAMP if (PARISC)
49787 + select PAX_EMUSIGRT if (PARISC)
49788 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
49789 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
49790 + select PAX_REFCOUNT if (X86 || SPARC64)
49791 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
49792 + help
49793 + If you say Y here, many of the features of grsecurity will be
49794 + enabled, which will protect you against many kinds of attacks
49795 + against your system. The heightened security comes at a cost
49796 + of an increased chance of incompatibilities with rare software
49797 + on your machine. Since this security level enables PaX, you should
49798 + view <http://pax.grsecurity.net> and read about the PaX
49799 + project. While you are there, download chpax and run it on
49800 + binaries that cause problems with PaX. Also remember that
49801 + since the /proc restrictions are enabled, you must run your
49802 + identd as gid 1001. This security level enables the following
49803 + features in addition to those listed in the low and medium
49804 + security levels:
49805 +
49806 + - Additional /proc restrictions
49807 + - Chmod restrictions in chroot
49808 + - No signals, ptrace, or viewing of processes outside of chroot
49809 + - Capability restrictions in chroot
49810 + - Deny fchdir out of chroot
49811 + - Priority restrictions in chroot
49812 + - Segmentation-based implementation of PaX
49813 + - Mprotect restrictions
49814 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
49815 + - Kernel stack randomization
49816 + - Mount/unmount/remount logging
49817 + - Kernel symbol hiding
49818 + - Hardening of module auto-loading
49819 + - Ptrace restrictions
49820 + - Restricted vm86 mode
49821 + - Restricted sysfs/debugfs
49822 + - Active kernel exploit response
49823 +
49824 +config GRKERNSEC_CUSTOM
49825 + bool "Custom"
49826 + help
49827 + If you say Y here, you will be able to configure every grsecurity
49828 + option, which allows you to enable many more features that aren't
49829 + covered in the basic security levels. These additional features
49830 + include TPE, socket restrictions, and the sysctl system for
49831 + grsecurity. It is advised that you read through the help for
49832 + each option to determine its usefulness in your situation.
49833 +
49834 +endchoice
49835 +
49836 +menu "Address Space Protection"
49837 +depends on GRKERNSEC
49838 +
49839 +config GRKERNSEC_KMEM
49840 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
49841 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
49842 + help
49843 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
49844 + be written to or read from to modify or leak the contents of the running
49845 + kernel. /dev/port will also not be allowed to be opened. If you have module
49846 + support disabled, enabling this will close up four ways that are
49847 + currently used to insert malicious code into the running kernel.
49848 + Even with all these features enabled, we still highly recommend that
49849 + you use the RBAC system, as it is still possible for an attacker to
49850 + modify the running kernel through privileged I/O granted by ioperm/iopl.
49851 + If you are not using XFree86, you may be able to stop this additional
49852 + case by enabling the 'Disable privileged I/O' option. Though nothing
49853 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
49854 + but only to video memory, which is the only writing we allow in this
49855 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
49856 + not be allowed to mprotect it with PROT_WRITE later.
49857 + It is highly recommended that you say Y here if you meet all the
49858 + conditions above.
49859 +
49860 +config GRKERNSEC_VM86
49861 + bool "Restrict VM86 mode"
49862 + depends on X86_32
49863 +
49864 + help
49865 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
49866 + make use of a special execution mode on 32bit x86 processors called
49867 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
49868 + video cards and will still work with this option enabled. The purpose
49869 + of the option is to prevent exploitation of emulation errors in
49870 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
49871 + Nearly all users should be able to enable this option.
49872 +
49873 +config GRKERNSEC_IO
49874 + bool "Disable privileged I/O"
49875 + depends on X86
49876 + select RTC_CLASS
49877 + select RTC_INTF_DEV
49878 + select RTC_DRV_CMOS
49879 +
49880 + help
49881 + If you say Y here, all ioperm and iopl calls will return an error.
49882 + Ioperm and iopl can be used to modify the running kernel.
49883 + Unfortunately, some programs need this access to operate properly,
49884 + the most notable of which are XFree86 and hwclock. hwclock can be
49885 + remedied by having RTC support in the kernel, so real-time
49886 + clock support is enabled if this option is enabled, to ensure
49887 + that hwclock operates correctly. XFree86 still will not
49888 + operate correctly with this option enabled, so DO NOT CHOOSE Y
49889 + IF YOU USE XFree86. If you use XFree86 and you still want to
49890 + protect your kernel against modification, use the RBAC system.
49891 +
49892 +config GRKERNSEC_PROC_MEMMAP
49893 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
49894 + default y if (PAX_NOEXEC || PAX_ASLR)
49895 + depends on PAX_NOEXEC || PAX_ASLR
49896 + help
49897 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
49898 + give no information about the addresses of its mappings if
49899 + PaX features that rely on random addresses are enabled on the task.
49900 + If you use PaX it is greatly recommended that you say Y here as it
49901 + closes up a hole that makes the full ASLR useless for suid
49902 + binaries.
49903 +
49904 +config GRKERNSEC_BRUTE
49905 + bool "Deter exploit bruteforcing"
49906 + help
49907 + If you say Y here, attempts to bruteforce exploits against forking
49908 + daemons such as apache or sshd, as well as against suid/sgid binaries
49909 + will be deterred. When a child of a forking daemon is killed by PaX
49910 + or crashes due to an illegal instruction or other suspicious signal,
49911 + the parent process will be delayed 30 seconds upon every subsequent
49912 + fork until the administrator is able to assess the situation and
49913 + restart the daemon.
49914 + In the suid/sgid case, the attempt is logged, the user has all their
49915 + processes terminated, and they are prevented from executing any further
49916 + processes for 15 minutes.
49917 + It is recommended that you also enable signal logging in the auditing
49918 + section so that logs are generated when a process triggers a suspicious
49919 + signal.
49920 + If the sysctl option is enabled, a sysctl option with name
49921 + "deter_bruteforce" is created.
49922 +
49923 +
49924 +config GRKERNSEC_MODHARDEN
49925 + bool "Harden module auto-loading"
49926 + depends on MODULES
49927 + help
49928 + If you say Y here, module auto-loading in response to use of some
49929 + feature implemented by an unloaded module will be restricted to
49930 + root users. Enabling this option helps defend against attacks
49931 + by unprivileged users who abuse the auto-loading behavior to
49932 + cause a vulnerable module to load that is then exploited.
49933 +
49934 + If this option prevents a legitimate use of auto-loading for a
49935 + non-root user, the administrator can execute modprobe manually
49936 + with the exact name of the module mentioned in the alert log.
49937 + Alternatively, the administrator can add the module to the list
49938 + of modules loaded at boot by modifying init scripts.
49939 +
49940 + Modification of init scripts will most likely be needed on
49941 + Ubuntu servers with encrypted home directory support enabled,
49942 + as the first non-root user logging in will cause the ecb(aes),
49943 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
49944 +
49945 +config GRKERNSEC_HIDESYM
49946 + bool "Hide kernel symbols"
49947 + help
49948 + If you say Y here, getting information on loaded modules, and
49949 + displaying all kernel symbols through a syscall will be restricted
49950 + to users with CAP_SYS_MODULE. For software compatibility reasons,
49951 + /proc/kallsyms will be restricted to the root user. The RBAC
49952 + system can hide that entry even from root.
49953 +
49954 + This option also prevents leaking of kernel addresses through
49955 + several /proc entries.
49956 +
49957 + Note that this option is only effective provided the following
49958 + conditions are met:
49959 + 1) The kernel using grsecurity is not precompiled by some distribution
49960 + 2) You have also enabled GRKERNSEC_DMESG
49961 + 3) You are using the RBAC system and hiding other files such as your
49962 + kernel image and System.map. Alternatively, enabling this option
49963 + causes the permissions on /boot, /lib/modules, and the kernel
49964 + source directory to change at compile time to prevent
49965 + reading by non-root users.
49966 + If the above conditions are met, this option will aid in providing a
49967 + useful protection against local kernel exploitation of overflows
49968 + and arbitrary read/write vulnerabilities.
49969 +
49970 +config GRKERNSEC_KERN_LOCKOUT
49971 + bool "Active kernel exploit response"
49972 + depends on X86 || ARM || PPC || SPARC
49973 + help
49974 + If you say Y here, when a PaX alert is triggered due to suspicious
49975 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
49976 + or an OOPs occurs due to bad memory accesses, instead of just
49977 + terminating the offending process (and potentially allowing
49978 + a subsequent exploit from the same user), we will take one of two
49979 + actions:
49980 + If the user was root, we will panic the system
49981 + If the user was non-root, we will log the attempt, terminate
49982 + all processes owned by the user, then prevent them from creating
49983 + any new processes until the system is restarted
49984 + This deters repeated kernel exploitation/bruteforcing attempts
49985 + and is useful for later forensics.
49986 +
49987 +endmenu
49988 +menu "Role Based Access Control Options"
49989 +depends on GRKERNSEC
49990 +
49991 +config GRKERNSEC_RBAC_DEBUG
49992 + bool
49993 +
49994 +config GRKERNSEC_NO_RBAC
49995 + bool "Disable RBAC system"
49996 + help
49997 + If you say Y here, the /dev/grsec device will be removed from the kernel,
49998 + preventing the RBAC system from being enabled. You should only say Y
49999 + here if you have no intention of using the RBAC system, so as to prevent
50000 + an attacker with root access from misusing the RBAC system to hide files
50001 + and processes when loadable module support and /dev/[k]mem have been
50002 + locked down.
50003 +
50004 +config GRKERNSEC_ACL_HIDEKERN
50005 + bool "Hide kernel processes"
50006 + help
50007 + If you say Y here, all kernel threads will be hidden to all
50008 + processes but those whose subject has the "view hidden processes"
50009 + flag.
50010 +
50011 +config GRKERNSEC_ACL_MAXTRIES
50012 + int "Maximum tries before password lockout"
50013 + default 3
50014 + help
50015 + This option enforces the maximum number of times a user can attempt
50016 + to authorize themselves with the grsecurity RBAC system before being
50017 + denied the ability to attempt authorization again for a specified time.
50018 + The lower the number, the harder it will be to brute-force a password.
50019 +
50020 +config GRKERNSEC_ACL_TIMEOUT
50021 + int "Time to wait after max password tries, in seconds"
50022 + default 30
50023 + help
50024 + This option specifies the time the user must wait after attempting to
50025 + authorize to the RBAC system with the maximum number of invalid
50026 + passwords. The higher the number, the harder it will be to brute-force
50027 + a password.
50028 +
50029 +endmenu
50030 +menu "Filesystem Protections"
50031 +depends on GRKERNSEC
50032 +
50033 +config GRKERNSEC_PROC
50034 + bool "Proc restrictions"
50035 + help
50036 + If you say Y here, the permissions of the /proc filesystem
50037 + will be altered to enhance system security and privacy. You MUST
50038 + choose either a user only restriction or a user and group restriction.
50039 + Depending upon the option you choose, you can either restrict users to
50040 + see only the processes they themselves run, or choose a group that can
50041 + view all processes and files normally restricted to root if you choose
50042 + the "restrict to user only" option. NOTE: If you're running identd as
50043 + a non-root user, you will have to run it as the group you specify here.
50044 +
50045 +config GRKERNSEC_PROC_USER
50046 + bool "Restrict /proc to user only"
50047 + depends on GRKERNSEC_PROC
50048 + help
50049 + If you say Y here, non-root users will only be able to view their own
50050 + processes, and restricts them from viewing network-related information,
50051 + and viewing kernel symbol and module information.
50052 +
50053 +config GRKERNSEC_PROC_USERGROUP
50054 + bool "Allow special group"
50055 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50056 + help
50057 + If you say Y here, you will be able to select a group that will be
50058 + able to view all processes and network-related information. If you've
50059 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50060 + remain hidden. This option is useful if you want to run identd as
50061 + a non-root user.
50062 +
50063 +config GRKERNSEC_PROC_GID
50064 + int "GID for special group"
50065 + depends on GRKERNSEC_PROC_USERGROUP
50066 + default 1001
50067 +
50068 +config GRKERNSEC_PROC_ADD
50069 + bool "Additional restrictions"
50070 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50071 + help
50072 + If you say Y here, additional restrictions will be placed on
50073 + /proc that keep normal users from viewing device information and
50074 + slabinfo information that could be useful for exploits.
50075 +
50076 +config GRKERNSEC_LINK
50077 + bool "Linking restrictions"
50078 + help
50079 + If you say Y here, /tmp race exploits will be prevented, since users
50080 + will no longer be able to follow symlinks owned by other users in
50081 + world-writable +t directories (e.g. /tmp), unless the owner of the
50082 + symlink is the owner of the directory. users will also not be
50083 + able to hardlink to files they do not own. If the sysctl option is
50084 + enabled, a sysctl option with name "linking_restrictions" is created.
50085 +
50086 +config GRKERNSEC_FIFO
50087 + bool "FIFO restrictions"
50088 + help
50089 + If you say Y here, users will not be able to write to FIFOs they don't
50090 + own in world-writable +t directories (e.g. /tmp), unless the owner of
50091 + the FIFO is the same owner of the directory it's held in. If the sysctl
50092 + option is enabled, a sysctl option with name "fifo_restrictions" is
50093 + created.
50094 +
50095 +config GRKERNSEC_SYSFS_RESTRICT
50096 + bool "Sysfs/debugfs restriction"
50097 + depends on SYSFS
50098 + help
50099 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
50100 + any filesystem normally mounted under it (e.g. debugfs) will only
50101 + be accessible by root. These filesystems generally provide access
50102 + to hardware and debug information that isn't appropriate for unprivileged
50103 + users of the system. Sysfs and debugfs have also become a large source
50104 + of new vulnerabilities, ranging from infoleaks to local compromise.
50105 + There has been very little oversight with an eye toward security involved
50106 + in adding new exporters of information to these filesystems, so their
50107 + use is discouraged.
50108 + This option is equivalent to a chmod 0700 of the mount paths.
50109 +
50110 +config GRKERNSEC_ROFS
50111 + bool "Runtime read-only mount protection"
50112 + help
50113 + If you say Y here, a sysctl option with name "romount_protect" will
50114 + be created. By setting this option to 1 at runtime, filesystems
50115 + will be protected in the following ways:
50116 + * No new writable mounts will be allowed
50117 + * Existing read-only mounts won't be able to be remounted read/write
50118 + * Write operations will be denied on all block devices
50119 + This option acts independently of grsec_lock: once it is set to 1,
50120 + it cannot be turned off. Therefore, please be mindful of the resulting
50121 + behavior if this option is enabled in an init script on a read-only
50122 + filesystem. This feature is mainly intended for secure embedded systems.
50123 +
50124 +config GRKERNSEC_CHROOT
50125 + bool "Chroot jail restrictions"
50126 + help
50127 + If you say Y here, you will be able to choose several options that will
50128 + make breaking out of a chrooted jail much more difficult. If you
50129 + encounter no software incompatibilities with the following options, it
50130 + is recommended that you enable each one.
50131 +
50132 +config GRKERNSEC_CHROOT_MOUNT
50133 + bool "Deny mounts"
50134 + depends on GRKERNSEC_CHROOT
50135 + help
50136 + If you say Y here, processes inside a chroot will not be able to
50137 + mount or remount filesystems. If the sysctl option is enabled, a
50138 + sysctl option with name "chroot_deny_mount" is created.
50139 +
50140 +config GRKERNSEC_CHROOT_DOUBLE
50141 + bool "Deny double-chroots"
50142 + depends on GRKERNSEC_CHROOT
50143 + help
50144 + If you say Y here, processes inside a chroot will not be able to chroot
50145 + again outside the chroot. This is a widely used method of breaking
50146 + out of a chroot jail and should not be allowed. If the sysctl
50147 + option is enabled, a sysctl option with name
50148 + "chroot_deny_chroot" is created.
50149 +
50150 +config GRKERNSEC_CHROOT_PIVOT
50151 + bool "Deny pivot_root in chroot"
50152 + depends on GRKERNSEC_CHROOT
50153 + help
50154 + If you say Y here, processes inside a chroot will not be able to use
50155 + a function called pivot_root() that was introduced in Linux 2.3.41. It
50156 + works similar to chroot in that it changes the root filesystem. This
50157 + function could be misused in a chrooted process to attempt to break out
50158 + of the chroot, and therefore should not be allowed. If the sysctl
50159 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
50160 + created.
50161 +
50162 +config GRKERNSEC_CHROOT_CHDIR
50163 + bool "Enforce chdir(\"/\") on all chroots"
50164 + depends on GRKERNSEC_CHROOT
50165 + help
50166 + If you say Y here, the current working directory of all newly-chrooted
50167 + applications will be set to the the root directory of the chroot.
50168 + The man page on chroot(2) states:
50169 + Note that this call does not change the current working
50170 + directory, so that `.' can be outside the tree rooted at
50171 + `/'. In particular, the super-user can escape from a
50172 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50173 +
50174 + It is recommended that you say Y here, since it's not known to break
50175 + any software. If the sysctl option is enabled, a sysctl option with
50176 + name "chroot_enforce_chdir" is created.
50177 +
50178 +config GRKERNSEC_CHROOT_CHMOD
50179 + bool "Deny (f)chmod +s"
50180 + depends on GRKERNSEC_CHROOT
50181 + help
50182 + If you say Y here, processes inside a chroot will not be able to chmod
50183 + or fchmod files to make them have suid or sgid bits. This protects
50184 + against another published method of breaking a chroot. If the sysctl
50185 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
50186 + created.
50187 +
50188 +config GRKERNSEC_CHROOT_FCHDIR
50189 + bool "Deny fchdir out of chroot"
50190 + depends on GRKERNSEC_CHROOT
50191 + help
50192 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
50193 + to a file descriptor of the chrooting process that points to a directory
50194 + outside the filesystem will be stopped. If the sysctl option
50195 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50196 +
50197 +config GRKERNSEC_CHROOT_MKNOD
50198 + bool "Deny mknod"
50199 + depends on GRKERNSEC_CHROOT
50200 + help
50201 + If you say Y here, processes inside a chroot will not be allowed to
50202 + mknod. The problem with using mknod inside a chroot is that it
50203 + would allow an attacker to create a device entry that is the same
50204 + as one on the physical root of your system, which could range from
50205 + anything from the console device to a device for your harddrive (which
50206 + they could then use to wipe the drive or steal data). It is recommended
50207 + that you say Y here, unless you run into software incompatibilities.
50208 + If the sysctl option is enabled, a sysctl option with name
50209 + "chroot_deny_mknod" is created.
50210 +
50211 +config GRKERNSEC_CHROOT_SHMAT
50212 + bool "Deny shmat() out of chroot"
50213 + depends on GRKERNSEC_CHROOT
50214 + help
50215 + If you say Y here, processes inside a chroot will not be able to attach
50216 + to shared memory segments that were created outside of the chroot jail.
50217 + It is recommended that you say Y here. If the sysctl option is enabled,
50218 + a sysctl option with name "chroot_deny_shmat" is created.
50219 +
50220 +config GRKERNSEC_CHROOT_UNIX
50221 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
50222 + depends on GRKERNSEC_CHROOT
50223 + help
50224 + If you say Y here, processes inside a chroot will not be able to
50225 + connect to abstract (meaning not belonging to a filesystem) Unix
50226 + domain sockets that were bound outside of a chroot. It is recommended
50227 + that you say Y here. If the sysctl option is enabled, a sysctl option
50228 + with name "chroot_deny_unix" is created.
50229 +
50230 +config GRKERNSEC_CHROOT_FINDTASK
50231 + bool "Protect outside processes"
50232 + depends on GRKERNSEC_CHROOT
50233 + help
50234 + If you say Y here, processes inside a chroot will not be able to
50235 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50236 + getsid, or view any process outside of the chroot. If the sysctl
50237 + option is enabled, a sysctl option with name "chroot_findtask" is
50238 + created.
50239 +
50240 +config GRKERNSEC_CHROOT_NICE
50241 + bool "Restrict priority changes"
50242 + depends on GRKERNSEC_CHROOT
50243 + help
50244 + If you say Y here, processes inside a chroot will not be able to raise
50245 + the priority of processes in the chroot, or alter the priority of
50246 + processes outside the chroot. This provides more security than simply
50247 + removing CAP_SYS_NICE from the process' capability set. If the
50248 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50249 + is created.
50250 +
50251 +config GRKERNSEC_CHROOT_SYSCTL
50252 + bool "Deny sysctl writes"
50253 + depends on GRKERNSEC_CHROOT
50254 + help
50255 + If you say Y here, an attacker in a chroot will not be able to
50256 + write to sysctl entries, either by sysctl(2) or through a /proc
50257 + interface. It is strongly recommended that you say Y here. If the
50258 + sysctl option is enabled, a sysctl option with name
50259 + "chroot_deny_sysctl" is created.
50260 +
50261 +config GRKERNSEC_CHROOT_CAPS
50262 + bool "Capability restrictions"
50263 + depends on GRKERNSEC_CHROOT
50264 + help
50265 + If you say Y here, the capabilities on all processes within a
50266 + chroot jail will be lowered to stop module insertion, raw i/o,
50267 + system and net admin tasks, rebooting the system, modifying immutable
50268 + files, modifying IPC owned by another, and changing the system time.
50269 + This is left an option because it can break some apps. Disable this
50270 + if your chrooted apps are having problems performing those kinds of
50271 + tasks. If the sysctl option is enabled, a sysctl option with
50272 + name "chroot_caps" is created.
50273 +
50274 +endmenu
50275 +menu "Kernel Auditing"
50276 +depends on GRKERNSEC
50277 +
50278 +config GRKERNSEC_AUDIT_GROUP
50279 + bool "Single group for auditing"
50280 + help
50281 + If you say Y here, the exec, chdir, and (un)mount logging features
50282 + will only operate on a group you specify. This option is recommended
50283 + if you only want to watch certain users instead of having a large
50284 + amount of logs from the entire system. If the sysctl option is enabled,
50285 + a sysctl option with name "audit_group" is created.
50286 +
50287 +config GRKERNSEC_AUDIT_GID
50288 + int "GID for auditing"
50289 + depends on GRKERNSEC_AUDIT_GROUP
50290 + default 1007
50291 +
50292 +config GRKERNSEC_EXECLOG
50293 + bool "Exec logging"
50294 + help
50295 + If you say Y here, all execve() calls will be logged (since the
50296 + other exec*() calls are frontends to execve(), all execution
50297 + will be logged). Useful for shell-servers that like to keep track
50298 + of their users. If the sysctl option is enabled, a sysctl option with
50299 + name "exec_logging" is created.
50300 + WARNING: This option when enabled will produce a LOT of logs, especially
50301 + on an active system.
50302 +
50303 +config GRKERNSEC_RESLOG
50304 + bool "Resource logging"
50305 + help
50306 + If you say Y here, all attempts to overstep resource limits will
50307 + be logged with the resource name, the requested size, and the current
50308 + limit. It is highly recommended that you say Y here. If the sysctl
50309 + option is enabled, a sysctl option with name "resource_logging" is
50310 + created. If the RBAC system is enabled, the sysctl value is ignored.
50311 +
50312 +config GRKERNSEC_CHROOT_EXECLOG
50313 + bool "Log execs within chroot"
50314 + help
50315 + If you say Y here, all executions inside a chroot jail will be logged
50316 + to syslog. This can cause a large amount of logs if certain
50317 + applications (eg. djb's daemontools) are installed on the system, and
50318 + is therefore left as an option. If the sysctl option is enabled, a
50319 + sysctl option with name "chroot_execlog" is created.
50320 +
50321 +config GRKERNSEC_AUDIT_PTRACE
50322 + bool "Ptrace logging"
50323 + help
50324 + If you say Y here, all attempts to attach to a process via ptrace
50325 + will be logged. If the sysctl option is enabled, a sysctl option
50326 + with name "audit_ptrace" is created.
50327 +
50328 +config GRKERNSEC_AUDIT_CHDIR
50329 + bool "Chdir logging"
50330 + help
50331 + If you say Y here, all chdir() calls will be logged. If the sysctl
50332 + option is enabled, a sysctl option with name "audit_chdir" is created.
50333 +
50334 +config GRKERNSEC_AUDIT_MOUNT
50335 + bool "(Un)Mount logging"
50336 + help
50337 + If you say Y here, all mounts and unmounts will be logged. If the
50338 + sysctl option is enabled, a sysctl option with name "audit_mount" is
50339 + created.
50340 +
50341 +config GRKERNSEC_SIGNAL
50342 + bool "Signal logging"
50343 + help
50344 + If you say Y here, certain important signals will be logged, such as
50345 + SIGSEGV, which will as a result inform you of when a error in a program
50346 + occurred, which in some cases could mean a possible exploit attempt.
50347 + If the sysctl option is enabled, a sysctl option with name
50348 + "signal_logging" is created.
50349 +
50350 +config GRKERNSEC_FORKFAIL
50351 + bool "Fork failure logging"
50352 + help
50353 + If you say Y here, all failed fork() attempts will be logged.
50354 + This could suggest a fork bomb, or someone attempting to overstep
50355 + their process limit. If the sysctl option is enabled, a sysctl option
50356 + with name "forkfail_logging" is created.
50357 +
50358 +config GRKERNSEC_TIME
50359 + bool "Time change logging"
50360 + help
50361 + If you say Y here, any changes of the system clock will be logged.
50362 + If the sysctl option is enabled, a sysctl option with name
50363 + "timechange_logging" is created.
50364 +
50365 +config GRKERNSEC_PROC_IPADDR
50366 + bool "/proc/<pid>/ipaddr support"
50367 + help
50368 + If you say Y here, a new entry will be added to each /proc/<pid>
50369 + directory that contains the IP address of the person using the task.
50370 + The IP is carried across local TCP and AF_UNIX stream sockets.
50371 + This information can be useful for IDS/IPSes to perform remote response
50372 + to a local attack. The entry is readable by only the owner of the
50373 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50374 + the RBAC system), and thus does not create privacy concerns.
50375 +
50376 +config GRKERNSEC_RWXMAP_LOG
50377 + bool 'Denied RWX mmap/mprotect logging'
50378 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50379 + help
50380 + If you say Y here, calls to mmap() and mprotect() with explicit
50381 + usage of PROT_WRITE and PROT_EXEC together will be logged when
50382 + denied by the PAX_MPROTECT feature. If the sysctl option is
50383 + enabled, a sysctl option with name "rwxmap_logging" is created.
50384 +
50385 +config GRKERNSEC_AUDIT_TEXTREL
50386 + bool 'ELF text relocations logging (READ HELP)'
50387 + depends on PAX_MPROTECT
50388 + help
50389 + If you say Y here, text relocations will be logged with the filename
50390 + of the offending library or binary. The purpose of the feature is
50391 + to help Linux distribution developers get rid of libraries and
50392 + binaries that need text relocations which hinder the future progress
50393 + of PaX. Only Linux distribution developers should say Y here, and
50394 + never on a production machine, as this option creates an information
50395 + leak that could aid an attacker in defeating the randomization of
50396 + a single memory region. If the sysctl option is enabled, a sysctl
50397 + option with name "audit_textrel" is created.
50398 +
50399 +endmenu
50400 +
50401 +menu "Executable Protections"
50402 +depends on GRKERNSEC
50403 +
50404 +config GRKERNSEC_DMESG
50405 + bool "Dmesg(8) restriction"
50406 + help
50407 + If you say Y here, non-root users will not be able to use dmesg(8)
50408 + to view up to the last 4kb of messages in the kernel's log buffer.
50409 + The kernel's log buffer often contains kernel addresses and other
50410 + identifying information useful to an attacker in fingerprinting a
50411 + system for a targeted exploit.
50412 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
50413 + created.
50414 +
50415 +config GRKERNSEC_HARDEN_PTRACE
50416 + bool "Deter ptrace-based process snooping"
50417 + help
50418 + If you say Y here, TTY sniffers and other malicious monitoring
50419 + programs implemented through ptrace will be defeated. If you
50420 + have been using the RBAC system, this option has already been
50421 + enabled for several years for all users, with the ability to make
50422 + fine-grained exceptions.
50423 +
50424 + This option only affects the ability of non-root users to ptrace
50425 + processes that are not a descendent of the ptracing process.
50426 + This means that strace ./binary and gdb ./binary will still work,
50427 + but attaching to arbitrary processes will not. If the sysctl
50428 + option is enabled, a sysctl option with name "harden_ptrace" is
50429 + created.
50430 +
50431 +config GRKERNSEC_TPE
50432 + bool "Trusted Path Execution (TPE)"
50433 + help
50434 + If you say Y here, you will be able to choose a gid to add to the
50435 + supplementary groups of users you want to mark as "untrusted."
50436 + These users will not be able to execute any files that are not in
50437 + root-owned directories writable only by root. If the sysctl option
50438 + is enabled, a sysctl option with name "tpe" is created.
50439 +
50440 +config GRKERNSEC_TPE_ALL
50441 + bool "Partially restrict all non-root users"
50442 + depends on GRKERNSEC_TPE
50443 + help
50444 + If you say Y here, all non-root users will be covered under
50445 + a weaker TPE restriction. This is separate from, and in addition to,
50446 + the main TPE options that you have selected elsewhere. Thus, if a
50447 + "trusted" GID is chosen, this restriction applies to even that GID.
50448 + Under this restriction, all non-root users will only be allowed to
50449 + execute files in directories they own that are not group or
50450 + world-writable, or in directories owned by root and writable only by
50451 + root. If the sysctl option is enabled, a sysctl option with name
50452 + "tpe_restrict_all" is created.
50453 +
50454 +config GRKERNSEC_TPE_INVERT
50455 + bool "Invert GID option"
50456 + depends on GRKERNSEC_TPE
50457 + help
50458 + If you say Y here, the group you specify in the TPE configuration will
50459 + decide what group TPE restrictions will be *disabled* for. This
50460 + option is useful if you want TPE restrictions to be applied to most
50461 + users on the system. If the sysctl option is enabled, a sysctl option
50462 + with name "tpe_invert" is created. Unlike other sysctl options, this
50463 + entry will default to on for backward-compatibility.
50464 +
50465 +config GRKERNSEC_TPE_GID
50466 + int "GID for untrusted users"
50467 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50468 + default 1005
50469 + help
50470 + Setting this GID determines what group TPE restrictions will be
50471 + *enabled* for. If the sysctl option is enabled, a sysctl option
50472 + with name "tpe_gid" is created.
50473 +
50474 +config GRKERNSEC_TPE_GID
50475 + int "GID for trusted users"
50476 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50477 + default 1005
50478 + help
50479 + Setting this GID determines what group TPE restrictions will be
50480 + *disabled* for. If the sysctl option is enabled, a sysctl option
50481 + with name "tpe_gid" is created.
50482 +
50483 +endmenu
50484 +menu "Network Protections"
50485 +depends on GRKERNSEC
50486 +
50487 +config GRKERNSEC_RANDNET
50488 + bool "Larger entropy pools"
50489 + help
50490 + If you say Y here, the entropy pools used for many features of Linux
50491 + and grsecurity will be doubled in size. Since several grsecurity
50492 + features use additional randomness, it is recommended that you say Y
50493 + here. Saying Y here has a similar effect as modifying
50494 + /proc/sys/kernel/random/poolsize.
50495 +
50496 +config GRKERNSEC_BLACKHOLE
50497 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50498 + depends on NET
50499 + help
50500 + If you say Y here, neither TCP resets nor ICMP
50501 + destination-unreachable packets will be sent in response to packets
50502 + sent to ports for which no associated listening process exists.
50503 + This feature supports both IPV4 and IPV6 and exempts the
50504 + loopback interface from blackholing. Enabling this feature
50505 + makes a host more resilient to DoS attacks and reduces network
50506 + visibility against scanners.
50507 +
50508 + The blackhole feature as-implemented is equivalent to the FreeBSD
50509 + blackhole feature, as it prevents RST responses to all packets, not
50510 + just SYNs. Under most application behavior this causes no
50511 + problems, but applications (like haproxy) may not close certain
50512 + connections in a way that cleanly terminates them on the remote
50513 + end, leaving the remote host in LAST_ACK state. Because of this
50514 + side-effect and to prevent intentional LAST_ACK DoSes, this
50515 + feature also adds automatic mitigation against such attacks.
50516 + The mitigation drastically reduces the amount of time a socket
50517 + can spend in LAST_ACK state. If you're using haproxy and not
50518 + all servers it connects to have this option enabled, consider
50519 + disabling this feature on the haproxy host.
50520 +
50521 + If the sysctl option is enabled, two sysctl options with names
50522 + "ip_blackhole" and "lastack_retries" will be created.
50523 + While "ip_blackhole" takes the standard zero/non-zero on/off
50524 + toggle, "lastack_retries" uses the same kinds of values as
50525 + "tcp_retries1" and "tcp_retries2". The default value of 4
50526 + prevents a socket from lasting more than 45 seconds in LAST_ACK
50527 + state.
50528 +
50529 +config GRKERNSEC_SOCKET
50530 + bool "Socket restrictions"
50531 + depends on NET
50532 + help
50533 + If you say Y here, you will be able to choose from several options.
50534 + If you assign a GID on your system and add it to the supplementary
50535 + groups of users you want to restrict socket access to, this patch
50536 + will perform up to three things, based on the option(s) you choose.
50537 +
50538 +config GRKERNSEC_SOCKET_ALL
50539 + bool "Deny any sockets to group"
50540 + depends on GRKERNSEC_SOCKET
50541 + help
50542 + If you say Y here, you will be able to choose a GID of whose users will
50543 + be unable to connect to other hosts from your machine or run server
50544 + applications from your machine. If the sysctl option is enabled, a
50545 + sysctl option with name "socket_all" is created.
50546 +
50547 +config GRKERNSEC_SOCKET_ALL_GID
50548 + int "GID to deny all sockets for"
50549 + depends on GRKERNSEC_SOCKET_ALL
50550 + default 1004
50551 + help
50552 + Here you can choose the GID to disable socket access for. Remember to
50553 + add the users you want socket access disabled for to the GID
50554 + specified here. If the sysctl option is enabled, a sysctl option
50555 + with name "socket_all_gid" is created.
50556 +
50557 +config GRKERNSEC_SOCKET_CLIENT
50558 + bool "Deny client sockets to group"
50559 + depends on GRKERNSEC_SOCKET
50560 + help
50561 + If you say Y here, you will be able to choose a GID of whose users will
50562 + be unable to connect to other hosts from your machine, but will be
50563 + able to run servers. If this option is enabled, all users in the group
50564 + you specify will have to use passive mode when initiating ftp transfers
50565 + from the shell on your machine. If the sysctl option is enabled, a
50566 + sysctl option with name "socket_client" is created.
50567 +
50568 +config GRKERNSEC_SOCKET_CLIENT_GID
50569 + int "GID to deny client sockets for"
50570 + depends on GRKERNSEC_SOCKET_CLIENT
50571 + default 1003
50572 + help
50573 + Here you can choose the GID to disable client socket access for.
50574 + Remember to add the users you want client socket access disabled for to
50575 + the GID specified here. If the sysctl option is enabled, a sysctl
50576 + option with name "socket_client_gid" is created.
50577 +
50578 +config GRKERNSEC_SOCKET_SERVER
50579 + bool "Deny server sockets to group"
50580 + depends on GRKERNSEC_SOCKET
50581 + help
50582 + If you say Y here, you will be able to choose a GID of whose users will
50583 + be unable to run server applications from your machine. If the sysctl
50584 + option is enabled, a sysctl option with name "socket_server" is created.
50585 +
50586 +config GRKERNSEC_SOCKET_SERVER_GID
50587 + int "GID to deny server sockets for"
50588 + depends on GRKERNSEC_SOCKET_SERVER
50589 + default 1002
50590 + help
50591 + Here you can choose the GID to disable server socket access for.
50592 + Remember to add the users you want server socket access disabled for to
50593 + the GID specified here. If the sysctl option is enabled, a sysctl
50594 + option with name "socket_server_gid" is created.
50595 +
50596 +endmenu
50597 +menu "Sysctl support"
50598 +depends on GRKERNSEC && SYSCTL
50599 +
50600 +config GRKERNSEC_SYSCTL
50601 + bool "Sysctl support"
50602 + help
50603 + If you say Y here, you will be able to change the options that
50604 + grsecurity runs with at bootup, without having to recompile your
50605 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50606 + to enable (1) or disable (0) various features. All the sysctl entries
50607 + are mutable until the "grsec_lock" entry is set to a non-zero value.
50608 + All features enabled in the kernel configuration are disabled at boot
50609 + if you do not say Y to the "Turn on features by default" option.
50610 + All options should be set at startup, and the grsec_lock entry should
50611 + be set to a non-zero value after all the options are set.
50612 + *THIS IS EXTREMELY IMPORTANT*
50613 +
50614 +config GRKERNSEC_SYSCTL_DISTRO
50615 + bool "Extra sysctl support for distro makers (READ HELP)"
50616 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50617 + help
50618 + If you say Y here, additional sysctl options will be created
50619 + for features that affect processes running as root. Therefore,
50620 + it is critical when using this option that the grsec_lock entry be
50621 + enabled after boot. Only distros with prebuilt kernel packages
50622 + with this option enabled that can ensure grsec_lock is enabled
50623 + after boot should use this option.
50624 + *Failure to set grsec_lock after boot makes all grsec features
50625 + this option covers useless*
50626 +
50627 + Currently this option creates the following sysctl entries:
50628 + "Disable Privileged I/O": "disable_priv_io"
50629 +
50630 +config GRKERNSEC_SYSCTL_ON
50631 + bool "Turn on features by default"
50632 + depends on GRKERNSEC_SYSCTL
50633 + help
50634 + If you say Y here, instead of having all features enabled in the
50635 + kernel configuration disabled at boot time, the features will be
50636 + enabled at boot time. It is recommended you say Y here unless
50637 + there is some reason you would want all sysctl-tunable features to
50638 + be disabled by default. As mentioned elsewhere, it is important
50639 + to enable the grsec_lock entry once you have finished modifying
50640 + the sysctl entries.
50641 +
50642 +endmenu
50643 +menu "Logging Options"
50644 +depends on GRKERNSEC
50645 +
50646 +config GRKERNSEC_FLOODTIME
50647 + int "Seconds in between log messages (minimum)"
50648 + default 10
50649 + help
50650 + This option allows you to enforce the number of seconds between
50651 + grsecurity log messages. The default should be suitable for most
50652 + people, however, if you choose to change it, choose a value small enough
50653 + to allow informative logs to be produced, but large enough to
50654 + prevent flooding.
50655 +
50656 +config GRKERNSEC_FLOODBURST
50657 + int "Number of messages in a burst (maximum)"
50658 + default 6
50659 + help
50660 + This option allows you to choose the maximum number of messages allowed
50661 + within the flood time interval you chose in a separate option. The
50662 + default should be suitable for most people, however if you find that
50663 + many of your logs are being interpreted as flooding, you may want to
50664 + raise this value.
50665 +
50666 +endmenu
50667 +
50668 +endmenu
50669 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
50670 new file mode 100644
50671 index 0000000..be9ae3a
50672 --- /dev/null
50673 +++ b/grsecurity/Makefile
50674 @@ -0,0 +1,36 @@
50675 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50676 +# during 2001-2009 it has been completely redesigned by Brad Spengler
50677 +# into an RBAC system
50678 +#
50679 +# All code in this directory and various hooks inserted throughout the kernel
50680 +# are copyright Brad Spengler - Open Source Security, Inc., and released
50681 +# under the GPL v2 or higher
50682 +
50683 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50684 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
50685 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
50686 +
50687 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50688 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50689 + gracl_learn.o grsec_log.o
50690 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
50691 +
50692 +ifdef CONFIG_NET
50693 +obj-y += grsec_sock.o
50694 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50695 +endif
50696 +
50697 +ifndef CONFIG_GRKERNSEC
50698 +obj-y += grsec_disabled.o
50699 +endif
50700 +
50701 +ifdef CONFIG_GRKERNSEC_HIDESYM
50702 +extra-y := grsec_hidesym.o
50703 +$(obj)/grsec_hidesym.o:
50704 + @-chmod -f 500 /boot
50705 + @-chmod -f 500 /lib/modules
50706 + @-chmod -f 500 /lib64/modules
50707 + @-chmod -f 500 /lib32/modules
50708 + @-chmod -f 700 .
50709 + @echo ' grsec: protected kernel image paths'
50710 +endif
50711 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
50712 new file mode 100644
50713 index 0000000..09258e0
50714 --- /dev/null
50715 +++ b/grsecurity/gracl.c
50716 @@ -0,0 +1,4156 @@
50717 +#include <linux/kernel.h>
50718 +#include <linux/module.h>
50719 +#include <linux/sched.h>
50720 +#include <linux/mm.h>
50721 +#include <linux/file.h>
50722 +#include <linux/fs.h>
50723 +#include <linux/namei.h>
50724 +#include <linux/mount.h>
50725 +#include <linux/tty.h>
50726 +#include <linux/proc_fs.h>
50727 +#include <linux/lglock.h>
50728 +#include <linux/slab.h>
50729 +#include <linux/vmalloc.h>
50730 +#include <linux/types.h>
50731 +#include <linux/sysctl.h>
50732 +#include <linux/netdevice.h>
50733 +#include <linux/ptrace.h>
50734 +#include <linux/gracl.h>
50735 +#include <linux/gralloc.h>
50736 +#include <linux/grsecurity.h>
50737 +#include <linux/grinternal.h>
50738 +#include <linux/pid_namespace.h>
50739 +#include <linux/fdtable.h>
50740 +#include <linux/percpu.h>
50741 +
50742 +#include <asm/uaccess.h>
50743 +#include <asm/errno.h>
50744 +#include <asm/mman.h>
50745 +
50746 +static struct acl_role_db acl_role_set;
50747 +static struct name_db name_set;
50748 +static struct inodev_db inodev_set;
50749 +
50750 +/* for keeping track of userspace pointers used for subjects, so we
50751 + can share references in the kernel as well
50752 +*/
50753 +
50754 +static struct path real_root;
50755 +
50756 +static struct acl_subj_map_db subj_map_set;
50757 +
50758 +static struct acl_role_label *default_role;
50759 +
50760 +static struct acl_role_label *role_list;
50761 +
50762 +static u16 acl_sp_role_value;
50763 +
50764 +extern char *gr_shared_page[4];
50765 +static DEFINE_MUTEX(gr_dev_mutex);
50766 +DEFINE_RWLOCK(gr_inode_lock);
50767 +
50768 +struct gr_arg *gr_usermode;
50769 +
50770 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
50771 +
50772 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
50773 +extern void gr_clear_learn_entries(void);
50774 +
50775 +#ifdef CONFIG_GRKERNSEC_RESLOG
50776 +extern void gr_log_resource(const struct task_struct *task,
50777 + const int res, const unsigned long wanted, const int gt);
50778 +#endif
50779 +
50780 +unsigned char *gr_system_salt;
50781 +unsigned char *gr_system_sum;
50782 +
50783 +static struct sprole_pw **acl_special_roles = NULL;
50784 +static __u16 num_sprole_pws = 0;
50785 +
50786 +static struct acl_role_label *kernel_role = NULL;
50787 +
50788 +static unsigned int gr_auth_attempts = 0;
50789 +static unsigned long gr_auth_expires = 0UL;
50790 +
50791 +#ifdef CONFIG_NET
50792 +extern struct vfsmount *sock_mnt;
50793 +#endif
50794 +
50795 +extern struct vfsmount *pipe_mnt;
50796 +extern struct vfsmount *shm_mnt;
50797 +#ifdef CONFIG_HUGETLBFS
50798 +extern struct vfsmount *hugetlbfs_vfsmount;
50799 +#endif
50800 +
50801 +static struct acl_object_label *fakefs_obj_rw;
50802 +static struct acl_object_label *fakefs_obj_rwx;
50803 +
50804 +extern int gr_init_uidset(void);
50805 +extern void gr_free_uidset(void);
50806 +extern void gr_remove_uid(uid_t uid);
50807 +extern int gr_find_uid(uid_t uid);
50808 +
50809 +DECLARE_BRLOCK(vfsmount_lock);
50810 +
50811 +__inline__ int
50812 +gr_acl_is_enabled(void)
50813 +{
50814 + return (gr_status & GR_READY);
50815 +}
50816 +
50817 +#ifdef CONFIG_BTRFS_FS
50818 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
50819 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
50820 +#endif
50821 +
50822 +static inline dev_t __get_dev(const struct dentry *dentry)
50823 +{
50824 +#ifdef CONFIG_BTRFS_FS
50825 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
50826 + return get_btrfs_dev_from_inode(dentry->d_inode);
50827 + else
50828 +#endif
50829 + return dentry->d_inode->i_sb->s_dev;
50830 +}
50831 +
50832 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50833 +{
50834 + return __get_dev(dentry);
50835 +}
50836 +
50837 +static char gr_task_roletype_to_char(struct task_struct *task)
50838 +{
50839 + switch (task->role->roletype &
50840 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
50841 + GR_ROLE_SPECIAL)) {
50842 + case GR_ROLE_DEFAULT:
50843 + return 'D';
50844 + case GR_ROLE_USER:
50845 + return 'U';
50846 + case GR_ROLE_GROUP:
50847 + return 'G';
50848 + case GR_ROLE_SPECIAL:
50849 + return 'S';
50850 + }
50851 +
50852 + return 'X';
50853 +}
50854 +
50855 +char gr_roletype_to_char(void)
50856 +{
50857 + return gr_task_roletype_to_char(current);
50858 +}
50859 +
50860 +__inline__ int
50861 +gr_acl_tpe_check(void)
50862 +{
50863 + if (unlikely(!(gr_status & GR_READY)))
50864 + return 0;
50865 + if (current->role->roletype & GR_ROLE_TPE)
50866 + return 1;
50867 + else
50868 + return 0;
50869 +}
50870 +
50871 +int
50872 +gr_handle_rawio(const struct inode *inode)
50873 +{
50874 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50875 + if (inode && S_ISBLK(inode->i_mode) &&
50876 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50877 + !capable(CAP_SYS_RAWIO))
50878 + return 1;
50879 +#endif
50880 + return 0;
50881 +}
50882 +
50883 +static int
50884 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
50885 +{
50886 + if (likely(lena != lenb))
50887 + return 0;
50888 +
50889 + return !memcmp(a, b, lena);
50890 +}
50891 +
50892 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
50893 +{
50894 + *buflen -= namelen;
50895 + if (*buflen < 0)
50896 + return -ENAMETOOLONG;
50897 + *buffer -= namelen;
50898 + memcpy(*buffer, str, namelen);
50899 + return 0;
50900 +}
50901 +
50902 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
50903 +{
50904 + return prepend(buffer, buflen, name->name, name->len);
50905 +}
50906 +
50907 +static int prepend_path(const struct path *path, struct path *root,
50908 + char **buffer, int *buflen)
50909 +{
50910 + struct dentry *dentry = path->dentry;
50911 + struct vfsmount *vfsmnt = path->mnt;
50912 + bool slash = false;
50913 + int error = 0;
50914 +
50915 + while (dentry != root->dentry || vfsmnt != root->mnt) {
50916 + struct dentry * parent;
50917 +
50918 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
50919 + /* Global root? */
50920 + if (vfsmnt->mnt_parent == vfsmnt) {
50921 + goto out;
50922 + }
50923 + dentry = vfsmnt->mnt_mountpoint;
50924 + vfsmnt = vfsmnt->mnt_parent;
50925 + continue;
50926 + }
50927 + parent = dentry->d_parent;
50928 + prefetch(parent);
50929 + spin_lock(&dentry->d_lock);
50930 + error = prepend_name(buffer, buflen, &dentry->d_name);
50931 + spin_unlock(&dentry->d_lock);
50932 + if (!error)
50933 + error = prepend(buffer, buflen, "/", 1);
50934 + if (error)
50935 + break;
50936 +
50937 + slash = true;
50938 + dentry = parent;
50939 + }
50940 +
50941 +out:
50942 + if (!error && !slash)
50943 + error = prepend(buffer, buflen, "/", 1);
50944 +
50945 + return error;
50946 +}
50947 +
50948 +/* this must be called with vfsmount_lock and rename_lock held */
50949 +
50950 +static char *__our_d_path(const struct path *path, struct path *root,
50951 + char *buf, int buflen)
50952 +{
50953 + char *res = buf + buflen;
50954 + int error;
50955 +
50956 + prepend(&res, &buflen, "\0", 1);
50957 + error = prepend_path(path, root, &res, &buflen);
50958 + if (error)
50959 + return ERR_PTR(error);
50960 +
50961 + return res;
50962 +}
50963 +
50964 +static char *
50965 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
50966 +{
50967 + char *retval;
50968 +
50969 + retval = __our_d_path(path, root, buf, buflen);
50970 + if (unlikely(IS_ERR(retval)))
50971 + retval = strcpy(buf, "<path too long>");
50972 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
50973 + retval[1] = '\0';
50974 +
50975 + return retval;
50976 +}
50977 +
50978 +static char *
50979 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50980 + char *buf, int buflen)
50981 +{
50982 + struct path path;
50983 + char *res;
50984 +
50985 + path.dentry = (struct dentry *)dentry;
50986 + path.mnt = (struct vfsmount *)vfsmnt;
50987 +
50988 + /* we can use real_root.dentry, real_root.mnt, because this is only called
50989 + by the RBAC system */
50990 + res = gen_full_path(&path, &real_root, buf, buflen);
50991 +
50992 + return res;
50993 +}
50994 +
50995 +static char *
50996 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50997 + char *buf, int buflen)
50998 +{
50999 + char *res;
51000 + struct path path;
51001 + struct path root;
51002 + struct task_struct *reaper = &init_task;
51003 +
51004 + path.dentry = (struct dentry *)dentry;
51005 + path.mnt = (struct vfsmount *)vfsmnt;
51006 +
51007 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
51008 + get_fs_root(reaper->fs, &root);
51009 +
51010 + write_seqlock(&rename_lock);
51011 + br_read_lock(vfsmount_lock);
51012 + res = gen_full_path(&path, &root, buf, buflen);
51013 + br_read_unlock(vfsmount_lock);
51014 + write_sequnlock(&rename_lock);
51015 +
51016 + path_put(&root);
51017 + return res;
51018 +}
51019 +
51020 +static char *
51021 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
51022 +{
51023 + char *ret;
51024 + write_seqlock(&rename_lock);
51025 + br_read_lock(vfsmount_lock);
51026 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
51027 + PAGE_SIZE);
51028 + br_read_unlock(vfsmount_lock);
51029 + write_sequnlock(&rename_lock);
51030 + return ret;
51031 +}
51032 +
51033 +static char *
51034 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
51035 +{
51036 + char *ret;
51037 + char *buf;
51038 + int buflen;
51039 +
51040 + write_seqlock(&rename_lock);
51041 + br_read_lock(vfsmount_lock);
51042 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51043 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
51044 + buflen = (int)(ret - buf);
51045 + if (buflen >= 5)
51046 + prepend(&ret, &buflen, "/proc", 5);
51047 + else
51048 + ret = strcpy(buf, "<path too long>");
51049 + br_read_unlock(vfsmount_lock);
51050 + write_sequnlock(&rename_lock);
51051 + return ret;
51052 +}
51053 +
51054 +char *
51055 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
51056 +{
51057 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
51058 + PAGE_SIZE);
51059 +}
51060 +
51061 +char *
51062 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
51063 +{
51064 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
51065 + PAGE_SIZE);
51066 +}
51067 +
51068 +char *
51069 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
51070 +{
51071 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
51072 + PAGE_SIZE);
51073 +}
51074 +
51075 +char *
51076 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
51077 +{
51078 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
51079 + PAGE_SIZE);
51080 +}
51081 +
51082 +char *
51083 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
51084 +{
51085 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
51086 + PAGE_SIZE);
51087 +}
51088 +
51089 +__inline__ __u32
51090 +to_gr_audit(const __u32 reqmode)
51091 +{
51092 + /* masks off auditable permission flags, then shifts them to create
51093 + auditing flags, and adds the special case of append auditing if
51094 + we're requesting write */
51095 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
51096 +}
51097 +
51098 +struct acl_subject_label *
51099 +lookup_subject_map(const struct acl_subject_label *userp)
51100 +{
51101 + unsigned int index = shash(userp, subj_map_set.s_size);
51102 + struct subject_map *match;
51103 +
51104 + match = subj_map_set.s_hash[index];
51105 +
51106 + while (match && match->user != userp)
51107 + match = match->next;
51108 +
51109 + if (match != NULL)
51110 + return match->kernel;
51111 + else
51112 + return NULL;
51113 +}
51114 +
51115 +static void
51116 +insert_subj_map_entry(struct subject_map *subjmap)
51117 +{
51118 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
51119 + struct subject_map **curr;
51120 +
51121 + subjmap->prev = NULL;
51122 +
51123 + curr = &subj_map_set.s_hash[index];
51124 + if (*curr != NULL)
51125 + (*curr)->prev = subjmap;
51126 +
51127 + subjmap->next = *curr;
51128 + *curr = subjmap;
51129 +
51130 + return;
51131 +}
51132 +
51133 +static struct acl_role_label *
51134 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
51135 + const gid_t gid)
51136 +{
51137 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
51138 + struct acl_role_label *match;
51139 + struct role_allowed_ip *ipp;
51140 + unsigned int x;
51141 + u32 curr_ip = task->signal->curr_ip;
51142 +
51143 + task->signal->saved_ip = curr_ip;
51144 +
51145 + match = acl_role_set.r_hash[index];
51146 +
51147 + while (match) {
51148 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
51149 + for (x = 0; x < match->domain_child_num; x++) {
51150 + if (match->domain_children[x] == uid)
51151 + goto found;
51152 + }
51153 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
51154 + break;
51155 + match = match->next;
51156 + }
51157 +found:
51158 + if (match == NULL) {
51159 + try_group:
51160 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
51161 + match = acl_role_set.r_hash[index];
51162 +
51163 + while (match) {
51164 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
51165 + for (x = 0; x < match->domain_child_num; x++) {
51166 + if (match->domain_children[x] == gid)
51167 + goto found2;
51168 + }
51169 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
51170 + break;
51171 + match = match->next;
51172 + }
51173 +found2:
51174 + if (match == NULL)
51175 + match = default_role;
51176 + if (match->allowed_ips == NULL)
51177 + return match;
51178 + else {
51179 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51180 + if (likely
51181 + ((ntohl(curr_ip) & ipp->netmask) ==
51182 + (ntohl(ipp->addr) & ipp->netmask)))
51183 + return match;
51184 + }
51185 + match = default_role;
51186 + }
51187 + } else if (match->allowed_ips == NULL) {
51188 + return match;
51189 + } else {
51190 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51191 + if (likely
51192 + ((ntohl(curr_ip) & ipp->netmask) ==
51193 + (ntohl(ipp->addr) & ipp->netmask)))
51194 + return match;
51195 + }
51196 + goto try_group;
51197 + }
51198 +
51199 + return match;
51200 +}
51201 +
51202 +struct acl_subject_label *
51203 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
51204 + const struct acl_role_label *role)
51205 +{
51206 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
51207 + struct acl_subject_label *match;
51208 +
51209 + match = role->subj_hash[index];
51210 +
51211 + while (match && (match->inode != ino || match->device != dev ||
51212 + (match->mode & GR_DELETED))) {
51213 + match = match->next;
51214 + }
51215 +
51216 + if (match && !(match->mode & GR_DELETED))
51217 + return match;
51218 + else
51219 + return NULL;
51220 +}
51221 +
51222 +struct acl_subject_label *
51223 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
51224 + const struct acl_role_label *role)
51225 +{
51226 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
51227 + struct acl_subject_label *match;
51228 +
51229 + match = role->subj_hash[index];
51230 +
51231 + while (match && (match->inode != ino || match->device != dev ||
51232 + !(match->mode & GR_DELETED))) {
51233 + match = match->next;
51234 + }
51235 +
51236 + if (match && (match->mode & GR_DELETED))
51237 + return match;
51238 + else
51239 + return NULL;
51240 +}
51241 +
51242 +static struct acl_object_label *
51243 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
51244 + const struct acl_subject_label *subj)
51245 +{
51246 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51247 + struct acl_object_label *match;
51248 +
51249 + match = subj->obj_hash[index];
51250 +
51251 + while (match && (match->inode != ino || match->device != dev ||
51252 + (match->mode & GR_DELETED))) {
51253 + match = match->next;
51254 + }
51255 +
51256 + if (match && !(match->mode & GR_DELETED))
51257 + return match;
51258 + else
51259 + return NULL;
51260 +}
51261 +
51262 +static struct acl_object_label *
51263 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
51264 + const struct acl_subject_label *subj)
51265 +{
51266 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51267 + struct acl_object_label *match;
51268 +
51269 + match = subj->obj_hash[index];
51270 +
51271 + while (match && (match->inode != ino || match->device != dev ||
51272 + !(match->mode & GR_DELETED))) {
51273 + match = match->next;
51274 + }
51275 +
51276 + if (match && (match->mode & GR_DELETED))
51277 + return match;
51278 +
51279 + match = subj->obj_hash[index];
51280 +
51281 + while (match && (match->inode != ino || match->device != dev ||
51282 + (match->mode & GR_DELETED))) {
51283 + match = match->next;
51284 + }
51285 +
51286 + if (match && !(match->mode & GR_DELETED))
51287 + return match;
51288 + else
51289 + return NULL;
51290 +}
51291 +
51292 +static struct name_entry *
51293 +lookup_name_entry(const char *name)
51294 +{
51295 + unsigned int len = strlen(name);
51296 + unsigned int key = full_name_hash(name, len);
51297 + unsigned int index = key % name_set.n_size;
51298 + struct name_entry *match;
51299 +
51300 + match = name_set.n_hash[index];
51301 +
51302 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
51303 + match = match->next;
51304 +
51305 + return match;
51306 +}
51307 +
51308 +static struct name_entry *
51309 +lookup_name_entry_create(const char *name)
51310 +{
51311 + unsigned int len = strlen(name);
51312 + unsigned int key = full_name_hash(name, len);
51313 + unsigned int index = key % name_set.n_size;
51314 + struct name_entry *match;
51315 +
51316 + match = name_set.n_hash[index];
51317 +
51318 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51319 + !match->deleted))
51320 + match = match->next;
51321 +
51322 + if (match && match->deleted)
51323 + return match;
51324 +
51325 + match = name_set.n_hash[index];
51326 +
51327 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51328 + match->deleted))
51329 + match = match->next;
51330 +
51331 + if (match && !match->deleted)
51332 + return match;
51333 + else
51334 + return NULL;
51335 +}
51336 +
51337 +static struct inodev_entry *
51338 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
51339 +{
51340 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
51341 + struct inodev_entry *match;
51342 +
51343 + match = inodev_set.i_hash[index];
51344 +
51345 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
51346 + match = match->next;
51347 +
51348 + return match;
51349 +}
51350 +
51351 +static void
51352 +insert_inodev_entry(struct inodev_entry *entry)
51353 +{
51354 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
51355 + inodev_set.i_size);
51356 + struct inodev_entry **curr;
51357 +
51358 + entry->prev = NULL;
51359 +
51360 + curr = &inodev_set.i_hash[index];
51361 + if (*curr != NULL)
51362 + (*curr)->prev = entry;
51363 +
51364 + entry->next = *curr;
51365 + *curr = entry;
51366 +
51367 + return;
51368 +}
51369 +
51370 +static void
51371 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
51372 +{
51373 + unsigned int index =
51374 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
51375 + struct acl_role_label **curr;
51376 + struct acl_role_label *tmp;
51377 +
51378 + curr = &acl_role_set.r_hash[index];
51379 +
51380 + /* if role was already inserted due to domains and already has
51381 + a role in the same bucket as it attached, then we need to
51382 + combine these two buckets
51383 + */
51384 + if (role->next) {
51385 + tmp = role->next;
51386 + while (tmp->next)
51387 + tmp = tmp->next;
51388 + tmp->next = *curr;
51389 + } else
51390 + role->next = *curr;
51391 + *curr = role;
51392 +
51393 + return;
51394 +}
51395 +
51396 +static void
51397 +insert_acl_role_label(struct acl_role_label *role)
51398 +{
51399 + int i;
51400 +
51401 + if (role_list == NULL) {
51402 + role_list = role;
51403 + role->prev = NULL;
51404 + } else {
51405 + role->prev = role_list;
51406 + role_list = role;
51407 + }
51408 +
51409 + /* used for hash chains */
51410 + role->next = NULL;
51411 +
51412 + if (role->roletype & GR_ROLE_DOMAIN) {
51413 + for (i = 0; i < role->domain_child_num; i++)
51414 + __insert_acl_role_label(role, role->domain_children[i]);
51415 + } else
51416 + __insert_acl_role_label(role, role->uidgid);
51417 +}
51418 +
51419 +static int
51420 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
51421 +{
51422 + struct name_entry **curr, *nentry;
51423 + struct inodev_entry *ientry;
51424 + unsigned int len = strlen(name);
51425 + unsigned int key = full_name_hash(name, len);
51426 + unsigned int index = key % name_set.n_size;
51427 +
51428 + curr = &name_set.n_hash[index];
51429 +
51430 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
51431 + curr = &((*curr)->next);
51432 +
51433 + if (*curr != NULL)
51434 + return 1;
51435 +
51436 + nentry = acl_alloc(sizeof (struct name_entry));
51437 + if (nentry == NULL)
51438 + return 0;
51439 + ientry = acl_alloc(sizeof (struct inodev_entry));
51440 + if (ientry == NULL)
51441 + return 0;
51442 + ientry->nentry = nentry;
51443 +
51444 + nentry->key = key;
51445 + nentry->name = name;
51446 + nentry->inode = inode;
51447 + nentry->device = device;
51448 + nentry->len = len;
51449 + nentry->deleted = deleted;
51450 +
51451 + nentry->prev = NULL;
51452 + curr = &name_set.n_hash[index];
51453 + if (*curr != NULL)
51454 + (*curr)->prev = nentry;
51455 + nentry->next = *curr;
51456 + *curr = nentry;
51457 +
51458 + /* insert us into the table searchable by inode/dev */
51459 + insert_inodev_entry(ientry);
51460 +
51461 + return 1;
51462 +}
51463 +
51464 +static void
51465 +insert_acl_obj_label(struct acl_object_label *obj,
51466 + struct acl_subject_label *subj)
51467 +{
51468 + unsigned int index =
51469 + fhash(obj->inode, obj->device, subj->obj_hash_size);
51470 + struct acl_object_label **curr;
51471 +
51472 +
51473 + obj->prev = NULL;
51474 +
51475 + curr = &subj->obj_hash[index];
51476 + if (*curr != NULL)
51477 + (*curr)->prev = obj;
51478 +
51479 + obj->next = *curr;
51480 + *curr = obj;
51481 +
51482 + return;
51483 +}
51484 +
51485 +static void
51486 +insert_acl_subj_label(struct acl_subject_label *obj,
51487 + struct acl_role_label *role)
51488 +{
51489 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
51490 + struct acl_subject_label **curr;
51491 +
51492 + obj->prev = NULL;
51493 +
51494 + curr = &role->subj_hash[index];
51495 + if (*curr != NULL)
51496 + (*curr)->prev = obj;
51497 +
51498 + obj->next = *curr;
51499 + *curr = obj;
51500 +
51501 + return;
51502 +}
51503 +
51504 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
51505 +
51506 +static void *
51507 +create_table(__u32 * len, int elementsize)
51508 +{
51509 + unsigned int table_sizes[] = {
51510 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
51511 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
51512 + 4194301, 8388593, 16777213, 33554393, 67108859
51513 + };
51514 + void *newtable = NULL;
51515 + unsigned int pwr = 0;
51516 +
51517 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
51518 + table_sizes[pwr] <= *len)
51519 + pwr++;
51520 +
51521 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
51522 + return newtable;
51523 +
51524 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
51525 + newtable =
51526 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
51527 + else
51528 + newtable = vmalloc(table_sizes[pwr] * elementsize);
51529 +
51530 + *len = table_sizes[pwr];
51531 +
51532 + return newtable;
51533 +}
51534 +
51535 +static int
51536 +init_variables(const struct gr_arg *arg)
51537 +{
51538 + struct task_struct *reaper = &init_task;
51539 + unsigned int stacksize;
51540 +
51541 + subj_map_set.s_size = arg->role_db.num_subjects;
51542 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
51543 + name_set.n_size = arg->role_db.num_objects;
51544 + inodev_set.i_size = arg->role_db.num_objects;
51545 +
51546 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
51547 + !name_set.n_size || !inodev_set.i_size)
51548 + return 1;
51549 +
51550 + if (!gr_init_uidset())
51551 + return 1;
51552 +
51553 + /* set up the stack that holds allocation info */
51554 +
51555 + stacksize = arg->role_db.num_pointers + 5;
51556 +
51557 + if (!acl_alloc_stack_init(stacksize))
51558 + return 1;
51559 +
51560 + /* grab reference for the real root dentry and vfsmount */
51561 + get_fs_root(reaper->fs, &real_root);
51562 +
51563 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51564 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
51565 +#endif
51566 +
51567 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
51568 + if (fakefs_obj_rw == NULL)
51569 + return 1;
51570 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
51571 +
51572 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
51573 + if (fakefs_obj_rwx == NULL)
51574 + return 1;
51575 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
51576 +
51577 + subj_map_set.s_hash =
51578 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
51579 + acl_role_set.r_hash =
51580 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
51581 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
51582 + inodev_set.i_hash =
51583 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
51584 +
51585 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
51586 + !name_set.n_hash || !inodev_set.i_hash)
51587 + return 1;
51588 +
51589 + memset(subj_map_set.s_hash, 0,
51590 + sizeof(struct subject_map *) * subj_map_set.s_size);
51591 + memset(acl_role_set.r_hash, 0,
51592 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
51593 + memset(name_set.n_hash, 0,
51594 + sizeof (struct name_entry *) * name_set.n_size);
51595 + memset(inodev_set.i_hash, 0,
51596 + sizeof (struct inodev_entry *) * inodev_set.i_size);
51597 +
51598 + return 0;
51599 +}
51600 +
51601 +/* free information not needed after startup
51602 + currently contains user->kernel pointer mappings for subjects
51603 +*/
51604 +
51605 +static void
51606 +free_init_variables(void)
51607 +{
51608 + __u32 i;
51609 +
51610 + if (subj_map_set.s_hash) {
51611 + for (i = 0; i < subj_map_set.s_size; i++) {
51612 + if (subj_map_set.s_hash[i]) {
51613 + kfree(subj_map_set.s_hash[i]);
51614 + subj_map_set.s_hash[i] = NULL;
51615 + }
51616 + }
51617 +
51618 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
51619 + PAGE_SIZE)
51620 + kfree(subj_map_set.s_hash);
51621 + else
51622 + vfree(subj_map_set.s_hash);
51623 + }
51624 +
51625 + return;
51626 +}
51627 +
51628 +static void
51629 +free_variables(void)
51630 +{
51631 + struct acl_subject_label *s;
51632 + struct acl_role_label *r;
51633 + struct task_struct *task, *task2;
51634 + unsigned int x;
51635 +
51636 + gr_clear_learn_entries();
51637 +
51638 + read_lock(&tasklist_lock);
51639 + do_each_thread(task2, task) {
51640 + task->acl_sp_role = 0;
51641 + task->acl_role_id = 0;
51642 + task->acl = NULL;
51643 + task->role = NULL;
51644 + } while_each_thread(task2, task);
51645 + read_unlock(&tasklist_lock);
51646 +
51647 + /* release the reference to the real root dentry and vfsmount */
51648 + path_put(&real_root);
51649 +
51650 + /* free all object hash tables */
51651 +
51652 + FOR_EACH_ROLE_START(r)
51653 + if (r->subj_hash == NULL)
51654 + goto next_role;
51655 + FOR_EACH_SUBJECT_START(r, s, x)
51656 + if (s->obj_hash == NULL)
51657 + break;
51658 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51659 + kfree(s->obj_hash);
51660 + else
51661 + vfree(s->obj_hash);
51662 + FOR_EACH_SUBJECT_END(s, x)
51663 + FOR_EACH_NESTED_SUBJECT_START(r, s)
51664 + if (s->obj_hash == NULL)
51665 + break;
51666 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51667 + kfree(s->obj_hash);
51668 + else
51669 + vfree(s->obj_hash);
51670 + FOR_EACH_NESTED_SUBJECT_END(s)
51671 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
51672 + kfree(r->subj_hash);
51673 + else
51674 + vfree(r->subj_hash);
51675 + r->subj_hash = NULL;
51676 +next_role:
51677 + FOR_EACH_ROLE_END(r)
51678 +
51679 + acl_free_all();
51680 +
51681 + if (acl_role_set.r_hash) {
51682 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
51683 + PAGE_SIZE)
51684 + kfree(acl_role_set.r_hash);
51685 + else
51686 + vfree(acl_role_set.r_hash);
51687 + }
51688 + if (name_set.n_hash) {
51689 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
51690 + PAGE_SIZE)
51691 + kfree(name_set.n_hash);
51692 + else
51693 + vfree(name_set.n_hash);
51694 + }
51695 +
51696 + if (inodev_set.i_hash) {
51697 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
51698 + PAGE_SIZE)
51699 + kfree(inodev_set.i_hash);
51700 + else
51701 + vfree(inodev_set.i_hash);
51702 + }
51703 +
51704 + gr_free_uidset();
51705 +
51706 + memset(&name_set, 0, sizeof (struct name_db));
51707 + memset(&inodev_set, 0, sizeof (struct inodev_db));
51708 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
51709 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
51710 +
51711 + default_role = NULL;
51712 + role_list = NULL;
51713 +
51714 + return;
51715 +}
51716 +
51717 +static __u32
51718 +count_user_objs(struct acl_object_label *userp)
51719 +{
51720 + struct acl_object_label o_tmp;
51721 + __u32 num = 0;
51722 +
51723 + while (userp) {
51724 + if (copy_from_user(&o_tmp, userp,
51725 + sizeof (struct acl_object_label)))
51726 + break;
51727 +
51728 + userp = o_tmp.prev;
51729 + num++;
51730 + }
51731 +
51732 + return num;
51733 +}
51734 +
51735 +static struct acl_subject_label *
51736 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
51737 +
51738 +static int
51739 +copy_user_glob(struct acl_object_label *obj)
51740 +{
51741 + struct acl_object_label *g_tmp, **guser;
51742 + unsigned int len;
51743 + char *tmp;
51744 +
51745 + if (obj->globbed == NULL)
51746 + return 0;
51747 +
51748 + guser = &obj->globbed;
51749 + while (*guser) {
51750 + g_tmp = (struct acl_object_label *)
51751 + acl_alloc(sizeof (struct acl_object_label));
51752 + if (g_tmp == NULL)
51753 + return -ENOMEM;
51754 +
51755 + if (copy_from_user(g_tmp, *guser,
51756 + sizeof (struct acl_object_label)))
51757 + return -EFAULT;
51758 +
51759 + len = strnlen_user(g_tmp->filename, PATH_MAX);
51760 +
51761 + if (!len || len >= PATH_MAX)
51762 + return -EINVAL;
51763 +
51764 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51765 + return -ENOMEM;
51766 +
51767 + if (copy_from_user(tmp, g_tmp->filename, len))
51768 + return -EFAULT;
51769 + tmp[len-1] = '\0';
51770 + g_tmp->filename = tmp;
51771 +
51772 + *guser = g_tmp;
51773 + guser = &(g_tmp->next);
51774 + }
51775 +
51776 + return 0;
51777 +}
51778 +
51779 +static int
51780 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
51781 + struct acl_role_label *role)
51782 +{
51783 + struct acl_object_label *o_tmp;
51784 + unsigned int len;
51785 + int ret;
51786 + char *tmp;
51787 +
51788 + while (userp) {
51789 + if ((o_tmp = (struct acl_object_label *)
51790 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
51791 + return -ENOMEM;
51792 +
51793 + if (copy_from_user(o_tmp, userp,
51794 + sizeof (struct acl_object_label)))
51795 + return -EFAULT;
51796 +
51797 + userp = o_tmp->prev;
51798 +
51799 + len = strnlen_user(o_tmp->filename, PATH_MAX);
51800 +
51801 + if (!len || len >= PATH_MAX)
51802 + return -EINVAL;
51803 +
51804 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51805 + return -ENOMEM;
51806 +
51807 + if (copy_from_user(tmp, o_tmp->filename, len))
51808 + return -EFAULT;
51809 + tmp[len-1] = '\0';
51810 + o_tmp->filename = tmp;
51811 +
51812 + insert_acl_obj_label(o_tmp, subj);
51813 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
51814 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
51815 + return -ENOMEM;
51816 +
51817 + ret = copy_user_glob(o_tmp);
51818 + if (ret)
51819 + return ret;
51820 +
51821 + if (o_tmp->nested) {
51822 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
51823 + if (IS_ERR(o_tmp->nested))
51824 + return PTR_ERR(o_tmp->nested);
51825 +
51826 + /* insert into nested subject list */
51827 + o_tmp->nested->next = role->hash->first;
51828 + role->hash->first = o_tmp->nested;
51829 + }
51830 + }
51831 +
51832 + return 0;
51833 +}
51834 +
51835 +static __u32
51836 +count_user_subjs(struct acl_subject_label *userp)
51837 +{
51838 + struct acl_subject_label s_tmp;
51839 + __u32 num = 0;
51840 +
51841 + while (userp) {
51842 + if (copy_from_user(&s_tmp, userp,
51843 + sizeof (struct acl_subject_label)))
51844 + break;
51845 +
51846 + userp = s_tmp.prev;
51847 + /* do not count nested subjects against this count, since
51848 + they are not included in the hash table, but are
51849 + attached to objects. We have already counted
51850 + the subjects in userspace for the allocation
51851 + stack
51852 + */
51853 + if (!(s_tmp.mode & GR_NESTED))
51854 + num++;
51855 + }
51856 +
51857 + return num;
51858 +}
51859 +
51860 +static int
51861 +copy_user_allowedips(struct acl_role_label *rolep)
51862 +{
51863 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
51864 +
51865 + ruserip = rolep->allowed_ips;
51866 +
51867 + while (ruserip) {
51868 + rlast = rtmp;
51869 +
51870 + if ((rtmp = (struct role_allowed_ip *)
51871 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
51872 + return -ENOMEM;
51873 +
51874 + if (copy_from_user(rtmp, ruserip,
51875 + sizeof (struct role_allowed_ip)))
51876 + return -EFAULT;
51877 +
51878 + ruserip = rtmp->prev;
51879 +
51880 + if (!rlast) {
51881 + rtmp->prev = NULL;
51882 + rolep->allowed_ips = rtmp;
51883 + } else {
51884 + rlast->next = rtmp;
51885 + rtmp->prev = rlast;
51886 + }
51887 +
51888 + if (!ruserip)
51889 + rtmp->next = NULL;
51890 + }
51891 +
51892 + return 0;
51893 +}
51894 +
51895 +static int
51896 +copy_user_transitions(struct acl_role_label *rolep)
51897 +{
51898 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
51899 +
51900 + unsigned int len;
51901 + char *tmp;
51902 +
51903 + rusertp = rolep->transitions;
51904 +
51905 + while (rusertp) {
51906 + rlast = rtmp;
51907 +
51908 + if ((rtmp = (struct role_transition *)
51909 + acl_alloc(sizeof (struct role_transition))) == NULL)
51910 + return -ENOMEM;
51911 +
51912 + if (copy_from_user(rtmp, rusertp,
51913 + sizeof (struct role_transition)))
51914 + return -EFAULT;
51915 +
51916 + rusertp = rtmp->prev;
51917 +
51918 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
51919 +
51920 + if (!len || len >= GR_SPROLE_LEN)
51921 + return -EINVAL;
51922 +
51923 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51924 + return -ENOMEM;
51925 +
51926 + if (copy_from_user(tmp, rtmp->rolename, len))
51927 + return -EFAULT;
51928 + tmp[len-1] = '\0';
51929 + rtmp->rolename = tmp;
51930 +
51931 + if (!rlast) {
51932 + rtmp->prev = NULL;
51933 + rolep->transitions = rtmp;
51934 + } else {
51935 + rlast->next = rtmp;
51936 + rtmp->prev = rlast;
51937 + }
51938 +
51939 + if (!rusertp)
51940 + rtmp->next = NULL;
51941 + }
51942 +
51943 + return 0;
51944 +}
51945 +
51946 +static struct acl_subject_label *
51947 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
51948 +{
51949 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
51950 + unsigned int len;
51951 + char *tmp;
51952 + __u32 num_objs;
51953 + struct acl_ip_label **i_tmp, *i_utmp2;
51954 + struct gr_hash_struct ghash;
51955 + struct subject_map *subjmap;
51956 + unsigned int i_num;
51957 + int err;
51958 +
51959 + s_tmp = lookup_subject_map(userp);
51960 +
51961 + /* we've already copied this subject into the kernel, just return
51962 + the reference to it, and don't copy it over again
51963 + */
51964 + if (s_tmp)
51965 + return(s_tmp);
51966 +
51967 + if ((s_tmp = (struct acl_subject_label *)
51968 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
51969 + return ERR_PTR(-ENOMEM);
51970 +
51971 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
51972 + if (subjmap == NULL)
51973 + return ERR_PTR(-ENOMEM);
51974 +
51975 + subjmap->user = userp;
51976 + subjmap->kernel = s_tmp;
51977 + insert_subj_map_entry(subjmap);
51978 +
51979 + if (copy_from_user(s_tmp, userp,
51980 + sizeof (struct acl_subject_label)))
51981 + return ERR_PTR(-EFAULT);
51982 +
51983 + len = strnlen_user(s_tmp->filename, PATH_MAX);
51984 +
51985 + if (!len || len >= PATH_MAX)
51986 + return ERR_PTR(-EINVAL);
51987 +
51988 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51989 + return ERR_PTR(-ENOMEM);
51990 +
51991 + if (copy_from_user(tmp, s_tmp->filename, len))
51992 + return ERR_PTR(-EFAULT);
51993 + tmp[len-1] = '\0';
51994 + s_tmp->filename = tmp;
51995 +
51996 + if (!strcmp(s_tmp->filename, "/"))
51997 + role->root_label = s_tmp;
51998 +
51999 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
52000 + return ERR_PTR(-EFAULT);
52001 +
52002 + /* copy user and group transition tables */
52003 +
52004 + if (s_tmp->user_trans_num) {
52005 + uid_t *uidlist;
52006 +
52007 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
52008 + if (uidlist == NULL)
52009 + return ERR_PTR(-ENOMEM);
52010 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
52011 + return ERR_PTR(-EFAULT);
52012 +
52013 + s_tmp->user_transitions = uidlist;
52014 + }
52015 +
52016 + if (s_tmp->group_trans_num) {
52017 + gid_t *gidlist;
52018 +
52019 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
52020 + if (gidlist == NULL)
52021 + return ERR_PTR(-ENOMEM);
52022 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
52023 + return ERR_PTR(-EFAULT);
52024 +
52025 + s_tmp->group_transitions = gidlist;
52026 + }
52027 +
52028 + /* set up object hash table */
52029 + num_objs = count_user_objs(ghash.first);
52030 +
52031 + s_tmp->obj_hash_size = num_objs;
52032 + s_tmp->obj_hash =
52033 + (struct acl_object_label **)
52034 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
52035 +
52036 + if (!s_tmp->obj_hash)
52037 + return ERR_PTR(-ENOMEM);
52038 +
52039 + memset(s_tmp->obj_hash, 0,
52040 + s_tmp->obj_hash_size *
52041 + sizeof (struct acl_object_label *));
52042 +
52043 + /* add in objects */
52044 + err = copy_user_objs(ghash.first, s_tmp, role);
52045 +
52046 + if (err)
52047 + return ERR_PTR(err);
52048 +
52049 + /* set pointer for parent subject */
52050 + if (s_tmp->parent_subject) {
52051 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
52052 +
52053 + if (IS_ERR(s_tmp2))
52054 + return s_tmp2;
52055 +
52056 + s_tmp->parent_subject = s_tmp2;
52057 + }
52058 +
52059 + /* add in ip acls */
52060 +
52061 + if (!s_tmp->ip_num) {
52062 + s_tmp->ips = NULL;
52063 + goto insert;
52064 + }
52065 +
52066 + i_tmp =
52067 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
52068 + sizeof (struct acl_ip_label *));
52069 +
52070 + if (!i_tmp)
52071 + return ERR_PTR(-ENOMEM);
52072 +
52073 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
52074 + *(i_tmp + i_num) =
52075 + (struct acl_ip_label *)
52076 + acl_alloc(sizeof (struct acl_ip_label));
52077 + if (!*(i_tmp + i_num))
52078 + return ERR_PTR(-ENOMEM);
52079 +
52080 + if (copy_from_user
52081 + (&i_utmp2, s_tmp->ips + i_num,
52082 + sizeof (struct acl_ip_label *)))
52083 + return ERR_PTR(-EFAULT);
52084 +
52085 + if (copy_from_user
52086 + (*(i_tmp + i_num), i_utmp2,
52087 + sizeof (struct acl_ip_label)))
52088 + return ERR_PTR(-EFAULT);
52089 +
52090 + if ((*(i_tmp + i_num))->iface == NULL)
52091 + continue;
52092 +
52093 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
52094 + if (!len || len >= IFNAMSIZ)
52095 + return ERR_PTR(-EINVAL);
52096 + tmp = acl_alloc(len);
52097 + if (tmp == NULL)
52098 + return ERR_PTR(-ENOMEM);
52099 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
52100 + return ERR_PTR(-EFAULT);
52101 + (*(i_tmp + i_num))->iface = tmp;
52102 + }
52103 +
52104 + s_tmp->ips = i_tmp;
52105 +
52106 +insert:
52107 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
52108 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
52109 + return ERR_PTR(-ENOMEM);
52110 +
52111 + return s_tmp;
52112 +}
52113 +
52114 +static int
52115 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
52116 +{
52117 + struct acl_subject_label s_pre;
52118 + struct acl_subject_label * ret;
52119 + int err;
52120 +
52121 + while (userp) {
52122 + if (copy_from_user(&s_pre, userp,
52123 + sizeof (struct acl_subject_label)))
52124 + return -EFAULT;
52125 +
52126 + /* do not add nested subjects here, add
52127 + while parsing objects
52128 + */
52129 +
52130 + if (s_pre.mode & GR_NESTED) {
52131 + userp = s_pre.prev;
52132 + continue;
52133 + }
52134 +
52135 + ret = do_copy_user_subj(userp, role);
52136 +
52137 + err = PTR_ERR(ret);
52138 + if (IS_ERR(ret))
52139 + return err;
52140 +
52141 + insert_acl_subj_label(ret, role);
52142 +
52143 + userp = s_pre.prev;
52144 + }
52145 +
52146 + return 0;
52147 +}
52148 +
52149 +static int
52150 +copy_user_acl(struct gr_arg *arg)
52151 +{
52152 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
52153 + struct sprole_pw *sptmp;
52154 + struct gr_hash_struct *ghash;
52155 + uid_t *domainlist;
52156 + unsigned int r_num;
52157 + unsigned int len;
52158 + char *tmp;
52159 + int err = 0;
52160 + __u16 i;
52161 + __u32 num_subjs;
52162 +
52163 + /* we need a default and kernel role */
52164 + if (arg->role_db.num_roles < 2)
52165 + return -EINVAL;
52166 +
52167 + /* copy special role authentication info from userspace */
52168 +
52169 + num_sprole_pws = arg->num_sprole_pws;
52170 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
52171 +
52172 + if (!acl_special_roles) {
52173 + err = -ENOMEM;
52174 + goto cleanup;
52175 + }
52176 +
52177 + for (i = 0; i < num_sprole_pws; i++) {
52178 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
52179 + if (!sptmp) {
52180 + err = -ENOMEM;
52181 + goto cleanup;
52182 + }
52183 + if (copy_from_user(sptmp, arg->sprole_pws + i,
52184 + sizeof (struct sprole_pw))) {
52185 + err = -EFAULT;
52186 + goto cleanup;
52187 + }
52188 +
52189 + len =
52190 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
52191 +
52192 + if (!len || len >= GR_SPROLE_LEN) {
52193 + err = -EINVAL;
52194 + goto cleanup;
52195 + }
52196 +
52197 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
52198 + err = -ENOMEM;
52199 + goto cleanup;
52200 + }
52201 +
52202 + if (copy_from_user(tmp, sptmp->rolename, len)) {
52203 + err = -EFAULT;
52204 + goto cleanup;
52205 + }
52206 + tmp[len-1] = '\0';
52207 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52208 + printk(KERN_ALERT "Copying special role %s\n", tmp);
52209 +#endif
52210 + sptmp->rolename = tmp;
52211 + acl_special_roles[i] = sptmp;
52212 + }
52213 +
52214 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
52215 +
52216 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
52217 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
52218 +
52219 + if (!r_tmp) {
52220 + err = -ENOMEM;
52221 + goto cleanup;
52222 + }
52223 +
52224 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
52225 + sizeof (struct acl_role_label *))) {
52226 + err = -EFAULT;
52227 + goto cleanup;
52228 + }
52229 +
52230 + if (copy_from_user(r_tmp, r_utmp2,
52231 + sizeof (struct acl_role_label))) {
52232 + err = -EFAULT;
52233 + goto cleanup;
52234 + }
52235 +
52236 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
52237 +
52238 + if (!len || len >= PATH_MAX) {
52239 + err = -EINVAL;
52240 + goto cleanup;
52241 + }
52242 +
52243 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
52244 + err = -ENOMEM;
52245 + goto cleanup;
52246 + }
52247 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
52248 + err = -EFAULT;
52249 + goto cleanup;
52250 + }
52251 + tmp[len-1] = '\0';
52252 + r_tmp->rolename = tmp;
52253 +
52254 + if (!strcmp(r_tmp->rolename, "default")
52255 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
52256 + default_role = r_tmp;
52257 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
52258 + kernel_role = r_tmp;
52259 + }
52260 +
52261 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
52262 + err = -ENOMEM;
52263 + goto cleanup;
52264 + }
52265 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
52266 + err = -EFAULT;
52267 + goto cleanup;
52268 + }
52269 +
52270 + r_tmp->hash = ghash;
52271 +
52272 + num_subjs = count_user_subjs(r_tmp->hash->first);
52273 +
52274 + r_tmp->subj_hash_size = num_subjs;
52275 + r_tmp->subj_hash =
52276 + (struct acl_subject_label **)
52277 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
52278 +
52279 + if (!r_tmp->subj_hash) {
52280 + err = -ENOMEM;
52281 + goto cleanup;
52282 + }
52283 +
52284 + err = copy_user_allowedips(r_tmp);
52285 + if (err)
52286 + goto cleanup;
52287 +
52288 + /* copy domain info */
52289 + if (r_tmp->domain_children != NULL) {
52290 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
52291 + if (domainlist == NULL) {
52292 + err = -ENOMEM;
52293 + goto cleanup;
52294 + }
52295 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
52296 + err = -EFAULT;
52297 + goto cleanup;
52298 + }
52299 + r_tmp->domain_children = domainlist;
52300 + }
52301 +
52302 + err = copy_user_transitions(r_tmp);
52303 + if (err)
52304 + goto cleanup;
52305 +
52306 + memset(r_tmp->subj_hash, 0,
52307 + r_tmp->subj_hash_size *
52308 + sizeof (struct acl_subject_label *));
52309 +
52310 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
52311 +
52312 + if (err)
52313 + goto cleanup;
52314 +
52315 + /* set nested subject list to null */
52316 + r_tmp->hash->first = NULL;
52317 +
52318 + insert_acl_role_label(r_tmp);
52319 + }
52320 +
52321 + goto return_err;
52322 + cleanup:
52323 + free_variables();
52324 + return_err:
52325 + return err;
52326 +
52327 +}
52328 +
52329 +static int
52330 +gracl_init(struct gr_arg *args)
52331 +{
52332 + int error = 0;
52333 +
52334 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
52335 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
52336 +
52337 + if (init_variables(args)) {
52338 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
52339 + error = -ENOMEM;
52340 + free_variables();
52341 + goto out;
52342 + }
52343 +
52344 + error = copy_user_acl(args);
52345 + free_init_variables();
52346 + if (error) {
52347 + free_variables();
52348 + goto out;
52349 + }
52350 +
52351 + if ((error = gr_set_acls(0))) {
52352 + free_variables();
52353 + goto out;
52354 + }
52355 +
52356 + pax_open_kernel();
52357 + gr_status |= GR_READY;
52358 + pax_close_kernel();
52359 +
52360 + out:
52361 + return error;
52362 +}
52363 +
52364 +/* derived from glibc fnmatch() 0: match, 1: no match*/
52365 +
52366 +static int
52367 +glob_match(const char *p, const char *n)
52368 +{
52369 + char c;
52370 +
52371 + while ((c = *p++) != '\0') {
52372 + switch (c) {
52373 + case '?':
52374 + if (*n == '\0')
52375 + return 1;
52376 + else if (*n == '/')
52377 + return 1;
52378 + break;
52379 + case '\\':
52380 + if (*n != c)
52381 + return 1;
52382 + break;
52383 + case '*':
52384 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
52385 + if (*n == '/')
52386 + return 1;
52387 + else if (c == '?') {
52388 + if (*n == '\0')
52389 + return 1;
52390 + else
52391 + ++n;
52392 + }
52393 + }
52394 + if (c == '\0') {
52395 + return 0;
52396 + } else {
52397 + const char *endp;
52398 +
52399 + if ((endp = strchr(n, '/')) == NULL)
52400 + endp = n + strlen(n);
52401 +
52402 + if (c == '[') {
52403 + for (--p; n < endp; ++n)
52404 + if (!glob_match(p, n))
52405 + return 0;
52406 + } else if (c == '/') {
52407 + while (*n != '\0' && *n != '/')
52408 + ++n;
52409 + if (*n == '/' && !glob_match(p, n + 1))
52410 + return 0;
52411 + } else {
52412 + for (--p; n < endp; ++n)
52413 + if (*n == c && !glob_match(p, n))
52414 + return 0;
52415 + }
52416 +
52417 + return 1;
52418 + }
52419 + case '[':
52420 + {
52421 + int not;
52422 + char cold;
52423 +
52424 + if (*n == '\0' || *n == '/')
52425 + return 1;
52426 +
52427 + not = (*p == '!' || *p == '^');
52428 + if (not)
52429 + ++p;
52430 +
52431 + c = *p++;
52432 + for (;;) {
52433 + unsigned char fn = (unsigned char)*n;
52434 +
52435 + if (c == '\0')
52436 + return 1;
52437 + else {
52438 + if (c == fn)
52439 + goto matched;
52440 + cold = c;
52441 + c = *p++;
52442 +
52443 + if (c == '-' && *p != ']') {
52444 + unsigned char cend = *p++;
52445 +
52446 + if (cend == '\0')
52447 + return 1;
52448 +
52449 + if (cold <= fn && fn <= cend)
52450 + goto matched;
52451 +
52452 + c = *p++;
52453 + }
52454 + }
52455 +
52456 + if (c == ']')
52457 + break;
52458 + }
52459 + if (!not)
52460 + return 1;
52461 + break;
52462 + matched:
52463 + while (c != ']') {
52464 + if (c == '\0')
52465 + return 1;
52466 +
52467 + c = *p++;
52468 + }
52469 + if (not)
52470 + return 1;
52471 + }
52472 + break;
52473 + default:
52474 + if (c != *n)
52475 + return 1;
52476 + }
52477 +
52478 + ++n;
52479 + }
52480 +
52481 + if (*n == '\0')
52482 + return 0;
52483 +
52484 + if (*n == '/')
52485 + return 0;
52486 +
52487 + return 1;
52488 +}
52489 +
52490 +static struct acl_object_label *
52491 +chk_glob_label(struct acl_object_label *globbed,
52492 + struct dentry *dentry, struct vfsmount *mnt, char **path)
52493 +{
52494 + struct acl_object_label *tmp;
52495 +
52496 + if (*path == NULL)
52497 + *path = gr_to_filename_nolock(dentry, mnt);
52498 +
52499 + tmp = globbed;
52500 +
52501 + while (tmp) {
52502 + if (!glob_match(tmp->filename, *path))
52503 + return tmp;
52504 + tmp = tmp->next;
52505 + }
52506 +
52507 + return NULL;
52508 +}
52509 +
52510 +static struct acl_object_label *
52511 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52512 + const ino_t curr_ino, const dev_t curr_dev,
52513 + const struct acl_subject_label *subj, char **path, const int checkglob)
52514 +{
52515 + struct acl_subject_label *tmpsubj;
52516 + struct acl_object_label *retval;
52517 + struct acl_object_label *retval2;
52518 +
52519 + tmpsubj = (struct acl_subject_label *) subj;
52520 + read_lock(&gr_inode_lock);
52521 + do {
52522 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
52523 + if (retval) {
52524 + if (checkglob && retval->globbed) {
52525 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
52526 + (struct vfsmount *)orig_mnt, path);
52527 + if (retval2)
52528 + retval = retval2;
52529 + }
52530 + break;
52531 + }
52532 + } while ((tmpsubj = tmpsubj->parent_subject));
52533 + read_unlock(&gr_inode_lock);
52534 +
52535 + return retval;
52536 +}
52537 +
52538 +static __inline__ struct acl_object_label *
52539 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52540 + struct dentry *curr_dentry,
52541 + const struct acl_subject_label *subj, char **path, const int checkglob)
52542 +{
52543 + int newglob = checkglob;
52544 + ino_t inode;
52545 + dev_t device;
52546 +
52547 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
52548 + as we don't want a / * rule to match instead of the / object
52549 + don't do this for create lookups that call this function though, since they're looking up
52550 + on the parent and thus need globbing checks on all paths
52551 + */
52552 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
52553 + newglob = GR_NO_GLOB;
52554 +
52555 + spin_lock(&curr_dentry->d_lock);
52556 + inode = curr_dentry->d_inode->i_ino;
52557 + device = __get_dev(curr_dentry);
52558 + spin_unlock(&curr_dentry->d_lock);
52559 +
52560 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
52561 +}
52562 +
52563 +static struct acl_object_label *
52564 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52565 + const struct acl_subject_label *subj, char *path, const int checkglob)
52566 +{
52567 + struct dentry *dentry = (struct dentry *) l_dentry;
52568 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52569 + struct acl_object_label *retval;
52570 + struct dentry *parent;
52571 +
52572 + write_seqlock(&rename_lock);
52573 + br_read_lock(vfsmount_lock);
52574 +
52575 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
52576 +#ifdef CONFIG_NET
52577 + mnt == sock_mnt ||
52578 +#endif
52579 +#ifdef CONFIG_HUGETLBFS
52580 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
52581 +#endif
52582 + /* ignore Eric Biederman */
52583 + IS_PRIVATE(l_dentry->d_inode))) {
52584 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
52585 + goto out;
52586 + }
52587 +
52588 + for (;;) {
52589 + if (dentry == real_root.dentry && mnt == real_root.mnt)
52590 + break;
52591 +
52592 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52593 + if (mnt->mnt_parent == mnt)
52594 + break;
52595 +
52596 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52597 + if (retval != NULL)
52598 + goto out;
52599 +
52600 + dentry = mnt->mnt_mountpoint;
52601 + mnt = mnt->mnt_parent;
52602 + continue;
52603 + }
52604 +
52605 + parent = dentry->d_parent;
52606 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52607 + if (retval != NULL)
52608 + goto out;
52609 +
52610 + dentry = parent;
52611 + }
52612 +
52613 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52614 +
52615 + /* real_root is pinned so we don't have to hold a reference */
52616 + if (retval == NULL)
52617 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
52618 +out:
52619 + br_read_unlock(vfsmount_lock);
52620 + write_sequnlock(&rename_lock);
52621 +
52622 + BUG_ON(retval == NULL);
52623 +
52624 + return retval;
52625 +}
52626 +
52627 +static __inline__ struct acl_object_label *
52628 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52629 + const struct acl_subject_label *subj)
52630 +{
52631 + char *path = NULL;
52632 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
52633 +}
52634 +
52635 +static __inline__ struct acl_object_label *
52636 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52637 + const struct acl_subject_label *subj)
52638 +{
52639 + char *path = NULL;
52640 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
52641 +}
52642 +
52643 +static __inline__ struct acl_object_label *
52644 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52645 + const struct acl_subject_label *subj, char *path)
52646 +{
52647 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
52648 +}
52649 +
52650 +static struct acl_subject_label *
52651 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52652 + const struct acl_role_label *role)
52653 +{
52654 + struct dentry *dentry = (struct dentry *) l_dentry;
52655 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52656 + struct acl_subject_label *retval;
52657 + struct dentry *parent;
52658 +
52659 + write_seqlock(&rename_lock);
52660 + br_read_lock(vfsmount_lock);
52661 +
52662 + for (;;) {
52663 + if (dentry == real_root.dentry && mnt == real_root.mnt)
52664 + break;
52665 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52666 + if (mnt->mnt_parent == mnt)
52667 + break;
52668 +
52669 + spin_lock(&dentry->d_lock);
52670 + read_lock(&gr_inode_lock);
52671 + retval =
52672 + lookup_acl_subj_label(dentry->d_inode->i_ino,
52673 + __get_dev(dentry), role);
52674 + read_unlock(&gr_inode_lock);
52675 + spin_unlock(&dentry->d_lock);
52676 + if (retval != NULL)
52677 + goto out;
52678 +
52679 + dentry = mnt->mnt_mountpoint;
52680 + mnt = mnt->mnt_parent;
52681 + continue;
52682 + }
52683 +
52684 + spin_lock(&dentry->d_lock);
52685 + read_lock(&gr_inode_lock);
52686 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52687 + __get_dev(dentry), role);
52688 + read_unlock(&gr_inode_lock);
52689 + parent = dentry->d_parent;
52690 + spin_unlock(&dentry->d_lock);
52691 +
52692 + if (retval != NULL)
52693 + goto out;
52694 +
52695 + dentry = parent;
52696 + }
52697 +
52698 + spin_lock(&dentry->d_lock);
52699 + read_lock(&gr_inode_lock);
52700 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52701 + __get_dev(dentry), role);
52702 + read_unlock(&gr_inode_lock);
52703 + spin_unlock(&dentry->d_lock);
52704 +
52705 + if (unlikely(retval == NULL)) {
52706 + /* real_root is pinned, we don't need to hold a reference */
52707 + read_lock(&gr_inode_lock);
52708 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
52709 + __get_dev(real_root.dentry), role);
52710 + read_unlock(&gr_inode_lock);
52711 + }
52712 +out:
52713 + br_read_unlock(vfsmount_lock);
52714 + write_sequnlock(&rename_lock);
52715 +
52716 + BUG_ON(retval == NULL);
52717 +
52718 + return retval;
52719 +}
52720 +
52721 +static void
52722 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
52723 +{
52724 + struct task_struct *task = current;
52725 + const struct cred *cred = current_cred();
52726 +
52727 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52728 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52729 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52730 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
52731 +
52732 + return;
52733 +}
52734 +
52735 +static void
52736 +gr_log_learn_sysctl(const char *path, const __u32 mode)
52737 +{
52738 + struct task_struct *task = current;
52739 + const struct cred *cred = current_cred();
52740 +
52741 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52742 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52743 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52744 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
52745 +
52746 + return;
52747 +}
52748 +
52749 +static void
52750 +gr_log_learn_id_change(const char type, const unsigned int real,
52751 + const unsigned int effective, const unsigned int fs)
52752 +{
52753 + struct task_struct *task = current;
52754 + const struct cred *cred = current_cred();
52755 +
52756 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
52757 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52758 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52759 + type, real, effective, fs, &task->signal->saved_ip);
52760 +
52761 + return;
52762 +}
52763 +
52764 +__u32
52765 +gr_search_file(const struct dentry * dentry, const __u32 mode,
52766 + const struct vfsmount * mnt)
52767 +{
52768 + __u32 retval = mode;
52769 + struct acl_subject_label *curracl;
52770 + struct acl_object_label *currobj;
52771 +
52772 + if (unlikely(!(gr_status & GR_READY)))
52773 + return (mode & ~GR_AUDITS);
52774 +
52775 + curracl = current->acl;
52776 +
52777 + currobj = chk_obj_label(dentry, mnt, curracl);
52778 + retval = currobj->mode & mode;
52779 +
52780 + /* if we're opening a specified transfer file for writing
52781 + (e.g. /dev/initctl), then transfer our role to init
52782 + */
52783 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
52784 + current->role->roletype & GR_ROLE_PERSIST)) {
52785 + struct task_struct *task = init_pid_ns.child_reaper;
52786 +
52787 + if (task->role != current->role) {
52788 + task->acl_sp_role = 0;
52789 + task->acl_role_id = current->acl_role_id;
52790 + task->role = current->role;
52791 + rcu_read_lock();
52792 + read_lock(&grsec_exec_file_lock);
52793 + gr_apply_subject_to_task(task);
52794 + read_unlock(&grsec_exec_file_lock);
52795 + rcu_read_unlock();
52796 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
52797 + }
52798 + }
52799 +
52800 + if (unlikely
52801 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
52802 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
52803 + __u32 new_mode = mode;
52804 +
52805 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52806 +
52807 + retval = new_mode;
52808 +
52809 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
52810 + new_mode |= GR_INHERIT;
52811 +
52812 + if (!(mode & GR_NOLEARN))
52813 + gr_log_learn(dentry, mnt, new_mode);
52814 + }
52815 +
52816 + return retval;
52817 +}
52818 +
52819 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
52820 + const struct dentry *parent,
52821 + const struct vfsmount *mnt)
52822 +{
52823 + struct name_entry *match;
52824 + struct acl_object_label *matchpo;
52825 + struct acl_subject_label *curracl;
52826 + char *path;
52827 +
52828 + if (unlikely(!(gr_status & GR_READY)))
52829 + return NULL;
52830 +
52831 + preempt_disable();
52832 + path = gr_to_filename_rbac(new_dentry, mnt);
52833 + match = lookup_name_entry_create(path);
52834 +
52835 + curracl = current->acl;
52836 +
52837 + if (match) {
52838 + read_lock(&gr_inode_lock);
52839 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
52840 + read_unlock(&gr_inode_lock);
52841 +
52842 + if (matchpo) {
52843 + preempt_enable();
52844 + return matchpo;
52845 + }
52846 + }
52847 +
52848 + // lookup parent
52849 +
52850 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
52851 +
52852 + preempt_enable();
52853 + return matchpo;
52854 +}
52855 +
52856 +__u32
52857 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
52858 + const struct vfsmount * mnt, const __u32 mode)
52859 +{
52860 + struct acl_object_label *matchpo;
52861 + __u32 retval;
52862 +
52863 + if (unlikely(!(gr_status & GR_READY)))
52864 + return (mode & ~GR_AUDITS);
52865 +
52866 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
52867 +
52868 + retval = matchpo->mode & mode;
52869 +
52870 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
52871 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52872 + __u32 new_mode = mode;
52873 +
52874 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52875 +
52876 + gr_log_learn(new_dentry, mnt, new_mode);
52877 + return new_mode;
52878 + }
52879 +
52880 + return retval;
52881 +}
52882 +
52883 +__u32
52884 +gr_check_link(const struct dentry * new_dentry,
52885 + const struct dentry * parent_dentry,
52886 + const struct vfsmount * parent_mnt,
52887 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
52888 +{
52889 + struct acl_object_label *obj;
52890 + __u32 oldmode, newmode;
52891 + __u32 needmode;
52892 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
52893 + GR_DELETE | GR_INHERIT;
52894 +
52895 + if (unlikely(!(gr_status & GR_READY)))
52896 + return (GR_CREATE | GR_LINK);
52897 +
52898 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
52899 + oldmode = obj->mode;
52900 +
52901 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
52902 + newmode = obj->mode;
52903 +
52904 + needmode = newmode & checkmodes;
52905 +
52906 + // old name for hardlink must have at least the permissions of the new name
52907 + if ((oldmode & needmode) != needmode)
52908 + goto bad;
52909 +
52910 + // if old name had restrictions/auditing, make sure the new name does as well
52911 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
52912 +
52913 + // don't allow hardlinking of suid/sgid files without permission
52914 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52915 + needmode |= GR_SETID;
52916 +
52917 + if ((newmode & needmode) != needmode)
52918 + goto bad;
52919 +
52920 + // enforce minimum permissions
52921 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
52922 + return newmode;
52923 +bad:
52924 + needmode = oldmode;
52925 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52926 + needmode |= GR_SETID;
52927 +
52928 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
52929 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
52930 + return (GR_CREATE | GR_LINK);
52931 + } else if (newmode & GR_SUPPRESS)
52932 + return GR_SUPPRESS;
52933 + else
52934 + return 0;
52935 +}
52936 +
52937 +int
52938 +gr_check_hidden_task(const struct task_struct *task)
52939 +{
52940 + if (unlikely(!(gr_status & GR_READY)))
52941 + return 0;
52942 +
52943 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
52944 + return 1;
52945 +
52946 + return 0;
52947 +}
52948 +
52949 +int
52950 +gr_check_protected_task(const struct task_struct *task)
52951 +{
52952 + if (unlikely(!(gr_status & GR_READY) || !task))
52953 + return 0;
52954 +
52955 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52956 + task->acl != current->acl)
52957 + return 1;
52958 +
52959 + return 0;
52960 +}
52961 +
52962 +int
52963 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52964 +{
52965 + struct task_struct *p;
52966 + int ret = 0;
52967 +
52968 + if (unlikely(!(gr_status & GR_READY) || !pid))
52969 + return ret;
52970 +
52971 + read_lock(&tasklist_lock);
52972 + do_each_pid_task(pid, type, p) {
52973 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52974 + p->acl != current->acl) {
52975 + ret = 1;
52976 + goto out;
52977 + }
52978 + } while_each_pid_task(pid, type, p);
52979 +out:
52980 + read_unlock(&tasklist_lock);
52981 +
52982 + return ret;
52983 +}
52984 +
52985 +void
52986 +gr_copy_label(struct task_struct *tsk)
52987 +{
52988 + tsk->signal->used_accept = 0;
52989 + tsk->acl_sp_role = 0;
52990 + tsk->acl_role_id = current->acl_role_id;
52991 + tsk->acl = current->acl;
52992 + tsk->role = current->role;
52993 + tsk->signal->curr_ip = current->signal->curr_ip;
52994 + tsk->signal->saved_ip = current->signal->saved_ip;
52995 + if (current->exec_file)
52996 + get_file(current->exec_file);
52997 + tsk->exec_file = current->exec_file;
52998 + tsk->is_writable = current->is_writable;
52999 + if (unlikely(current->signal->used_accept)) {
53000 + current->signal->curr_ip = 0;
53001 + current->signal->saved_ip = 0;
53002 + }
53003 +
53004 + return;
53005 +}
53006 +
53007 +static void
53008 +gr_set_proc_res(struct task_struct *task)
53009 +{
53010 + struct acl_subject_label *proc;
53011 + unsigned short i;
53012 +
53013 + proc = task->acl;
53014 +
53015 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
53016 + return;
53017 +
53018 + for (i = 0; i < RLIM_NLIMITS; i++) {
53019 + if (!(proc->resmask & (1 << i)))
53020 + continue;
53021 +
53022 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
53023 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
53024 + }
53025 +
53026 + return;
53027 +}
53028 +
53029 +extern int __gr_process_user_ban(struct user_struct *user);
53030 +
53031 +int
53032 +gr_check_user_change(int real, int effective, int fs)
53033 +{
53034 + unsigned int i;
53035 + __u16 num;
53036 + uid_t *uidlist;
53037 + int curuid;
53038 + int realok = 0;
53039 + int effectiveok = 0;
53040 + int fsok = 0;
53041 +
53042 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
53043 + struct user_struct *user;
53044 +
53045 + if (real == -1)
53046 + goto skipit;
53047 +
53048 + user = find_user(real);
53049 + if (user == NULL)
53050 + goto skipit;
53051 +
53052 + if (__gr_process_user_ban(user)) {
53053 + /* for find_user */
53054 + free_uid(user);
53055 + return 1;
53056 + }
53057 +
53058 + /* for find_user */
53059 + free_uid(user);
53060 +
53061 +skipit:
53062 +#endif
53063 +
53064 + if (unlikely(!(gr_status & GR_READY)))
53065 + return 0;
53066 +
53067 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53068 + gr_log_learn_id_change('u', real, effective, fs);
53069 +
53070 + num = current->acl->user_trans_num;
53071 + uidlist = current->acl->user_transitions;
53072 +
53073 + if (uidlist == NULL)
53074 + return 0;
53075 +
53076 + if (real == -1)
53077 + realok = 1;
53078 + if (effective == -1)
53079 + effectiveok = 1;
53080 + if (fs == -1)
53081 + fsok = 1;
53082 +
53083 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
53084 + for (i = 0; i < num; i++) {
53085 + curuid = (int)uidlist[i];
53086 + if (real == curuid)
53087 + realok = 1;
53088 + if (effective == curuid)
53089 + effectiveok = 1;
53090 + if (fs == curuid)
53091 + fsok = 1;
53092 + }
53093 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
53094 + for (i = 0; i < num; i++) {
53095 + curuid = (int)uidlist[i];
53096 + if (real == curuid)
53097 + break;
53098 + if (effective == curuid)
53099 + break;
53100 + if (fs == curuid)
53101 + break;
53102 + }
53103 + /* not in deny list */
53104 + if (i == num) {
53105 + realok = 1;
53106 + effectiveok = 1;
53107 + fsok = 1;
53108 + }
53109 + }
53110 +
53111 + if (realok && effectiveok && fsok)
53112 + return 0;
53113 + else {
53114 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53115 + return 1;
53116 + }
53117 +}
53118 +
53119 +int
53120 +gr_check_group_change(int real, int effective, int fs)
53121 +{
53122 + unsigned int i;
53123 + __u16 num;
53124 + gid_t *gidlist;
53125 + int curgid;
53126 + int realok = 0;
53127 + int effectiveok = 0;
53128 + int fsok = 0;
53129 +
53130 + if (unlikely(!(gr_status & GR_READY)))
53131 + return 0;
53132 +
53133 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53134 + gr_log_learn_id_change('g', real, effective, fs);
53135 +
53136 + num = current->acl->group_trans_num;
53137 + gidlist = current->acl->group_transitions;
53138 +
53139 + if (gidlist == NULL)
53140 + return 0;
53141 +
53142 + if (real == -1)
53143 + realok = 1;
53144 + if (effective == -1)
53145 + effectiveok = 1;
53146 + if (fs == -1)
53147 + fsok = 1;
53148 +
53149 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
53150 + for (i = 0; i < num; i++) {
53151 + curgid = (int)gidlist[i];
53152 + if (real == curgid)
53153 + realok = 1;
53154 + if (effective == curgid)
53155 + effectiveok = 1;
53156 + if (fs == curgid)
53157 + fsok = 1;
53158 + }
53159 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
53160 + for (i = 0; i < num; i++) {
53161 + curgid = (int)gidlist[i];
53162 + if (real == curgid)
53163 + break;
53164 + if (effective == curgid)
53165 + break;
53166 + if (fs == curgid)
53167 + break;
53168 + }
53169 + /* not in deny list */
53170 + if (i == num) {
53171 + realok = 1;
53172 + effectiveok = 1;
53173 + fsok = 1;
53174 + }
53175 + }
53176 +
53177 + if (realok && effectiveok && fsok)
53178 + return 0;
53179 + else {
53180 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53181 + return 1;
53182 + }
53183 +}
53184 +
53185 +void
53186 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
53187 +{
53188 + struct acl_role_label *role = task->role;
53189 + struct acl_subject_label *subj = NULL;
53190 + struct acl_object_label *obj;
53191 + struct file *filp;
53192 +
53193 + if (unlikely(!(gr_status & GR_READY)))
53194 + return;
53195 +
53196 + filp = task->exec_file;
53197 +
53198 + /* kernel process, we'll give them the kernel role */
53199 + if (unlikely(!filp)) {
53200 + task->role = kernel_role;
53201 + task->acl = kernel_role->root_label;
53202 + return;
53203 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
53204 + role = lookup_acl_role_label(task, uid, gid);
53205 +
53206 + /* perform subject lookup in possibly new role
53207 + we can use this result below in the case where role == task->role
53208 + */
53209 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
53210 +
53211 + /* if we changed uid/gid, but result in the same role
53212 + and are using inheritance, don't lose the inherited subject
53213 + if current subject is other than what normal lookup
53214 + would result in, we arrived via inheritance, don't
53215 + lose subject
53216 + */
53217 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
53218 + (subj == task->acl)))
53219 + task->acl = subj;
53220 +
53221 + task->role = role;
53222 +
53223 + task->is_writable = 0;
53224 +
53225 + /* ignore additional mmap checks for processes that are writable
53226 + by the default ACL */
53227 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53228 + if (unlikely(obj->mode & GR_WRITE))
53229 + task->is_writable = 1;
53230 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53231 + if (unlikely(obj->mode & GR_WRITE))
53232 + task->is_writable = 1;
53233 +
53234 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53235 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53236 +#endif
53237 +
53238 + gr_set_proc_res(task);
53239 +
53240 + return;
53241 +}
53242 +
53243 +int
53244 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53245 + const int unsafe_share)
53246 +{
53247 + struct task_struct *task = current;
53248 + struct acl_subject_label *newacl;
53249 + struct acl_object_label *obj;
53250 + __u32 retmode;
53251 +
53252 + if (unlikely(!(gr_status & GR_READY)))
53253 + return 0;
53254 +
53255 + newacl = chk_subj_label(dentry, mnt, task->role);
53256 +
53257 + task_lock(task);
53258 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
53259 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
53260 + !(task->role->roletype & GR_ROLE_GOD) &&
53261 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
53262 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
53263 + task_unlock(task);
53264 + if (unsafe_share)
53265 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
53266 + else
53267 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
53268 + return -EACCES;
53269 + }
53270 + task_unlock(task);
53271 +
53272 + obj = chk_obj_label(dentry, mnt, task->acl);
53273 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
53274 +
53275 + if (!(task->acl->mode & GR_INHERITLEARN) &&
53276 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
53277 + if (obj->nested)
53278 + task->acl = obj->nested;
53279 + else
53280 + task->acl = newacl;
53281 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
53282 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
53283 +
53284 + task->is_writable = 0;
53285 +
53286 + /* ignore additional mmap checks for processes that are writable
53287 + by the default ACL */
53288 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
53289 + if (unlikely(obj->mode & GR_WRITE))
53290 + task->is_writable = 1;
53291 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
53292 + if (unlikely(obj->mode & GR_WRITE))
53293 + task->is_writable = 1;
53294 +
53295 + gr_set_proc_res(task);
53296 +
53297 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53298 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53299 +#endif
53300 + return 0;
53301 +}
53302 +
53303 +/* always called with valid inodev ptr */
53304 +static void
53305 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
53306 +{
53307 + struct acl_object_label *matchpo;
53308 + struct acl_subject_label *matchps;
53309 + struct acl_subject_label *subj;
53310 + struct acl_role_label *role;
53311 + unsigned int x;
53312 +
53313 + FOR_EACH_ROLE_START(role)
53314 + FOR_EACH_SUBJECT_START(role, subj, x)
53315 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
53316 + matchpo->mode |= GR_DELETED;
53317 + FOR_EACH_SUBJECT_END(subj,x)
53318 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
53319 + if (subj->inode == ino && subj->device == dev)
53320 + subj->mode |= GR_DELETED;
53321 + FOR_EACH_NESTED_SUBJECT_END(subj)
53322 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
53323 + matchps->mode |= GR_DELETED;
53324 + FOR_EACH_ROLE_END(role)
53325 +
53326 + inodev->nentry->deleted = 1;
53327 +
53328 + return;
53329 +}
53330 +
53331 +void
53332 +gr_handle_delete(const ino_t ino, const dev_t dev)
53333 +{
53334 + struct inodev_entry *inodev;
53335 +
53336 + if (unlikely(!(gr_status & GR_READY)))
53337 + return;
53338 +
53339 + write_lock(&gr_inode_lock);
53340 + inodev = lookup_inodev_entry(ino, dev);
53341 + if (inodev != NULL)
53342 + do_handle_delete(inodev, ino, dev);
53343 + write_unlock(&gr_inode_lock);
53344 +
53345 + return;
53346 +}
53347 +
53348 +static void
53349 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
53350 + const ino_t newinode, const dev_t newdevice,
53351 + struct acl_subject_label *subj)
53352 +{
53353 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
53354 + struct acl_object_label *match;
53355 +
53356 + match = subj->obj_hash[index];
53357 +
53358 + while (match && (match->inode != oldinode ||
53359 + match->device != olddevice ||
53360 + !(match->mode & GR_DELETED)))
53361 + match = match->next;
53362 +
53363 + if (match && (match->inode == oldinode)
53364 + && (match->device == olddevice)
53365 + && (match->mode & GR_DELETED)) {
53366 + if (match->prev == NULL) {
53367 + subj->obj_hash[index] = match->next;
53368 + if (match->next != NULL)
53369 + match->next->prev = NULL;
53370 + } else {
53371 + match->prev->next = match->next;
53372 + if (match->next != NULL)
53373 + match->next->prev = match->prev;
53374 + }
53375 + match->prev = NULL;
53376 + match->next = NULL;
53377 + match->inode = newinode;
53378 + match->device = newdevice;
53379 + match->mode &= ~GR_DELETED;
53380 +
53381 + insert_acl_obj_label(match, subj);
53382 + }
53383 +
53384 + return;
53385 +}
53386 +
53387 +static void
53388 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
53389 + const ino_t newinode, const dev_t newdevice,
53390 + struct acl_role_label *role)
53391 +{
53392 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
53393 + struct acl_subject_label *match;
53394 +
53395 + match = role->subj_hash[index];
53396 +
53397 + while (match && (match->inode != oldinode ||
53398 + match->device != olddevice ||
53399 + !(match->mode & GR_DELETED)))
53400 + match = match->next;
53401 +
53402 + if (match && (match->inode == oldinode)
53403 + && (match->device == olddevice)
53404 + && (match->mode & GR_DELETED)) {
53405 + if (match->prev == NULL) {
53406 + role->subj_hash[index] = match->next;
53407 + if (match->next != NULL)
53408 + match->next->prev = NULL;
53409 + } else {
53410 + match->prev->next = match->next;
53411 + if (match->next != NULL)
53412 + match->next->prev = match->prev;
53413 + }
53414 + match->prev = NULL;
53415 + match->next = NULL;
53416 + match->inode = newinode;
53417 + match->device = newdevice;
53418 + match->mode &= ~GR_DELETED;
53419 +
53420 + insert_acl_subj_label(match, role);
53421 + }
53422 +
53423 + return;
53424 +}
53425 +
53426 +static void
53427 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
53428 + const ino_t newinode, const dev_t newdevice)
53429 +{
53430 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
53431 + struct inodev_entry *match;
53432 +
53433 + match = inodev_set.i_hash[index];
53434 +
53435 + while (match && (match->nentry->inode != oldinode ||
53436 + match->nentry->device != olddevice || !match->nentry->deleted))
53437 + match = match->next;
53438 +
53439 + if (match && (match->nentry->inode == oldinode)
53440 + && (match->nentry->device == olddevice) &&
53441 + match->nentry->deleted) {
53442 + if (match->prev == NULL) {
53443 + inodev_set.i_hash[index] = match->next;
53444 + if (match->next != NULL)
53445 + match->next->prev = NULL;
53446 + } else {
53447 + match->prev->next = match->next;
53448 + if (match->next != NULL)
53449 + match->next->prev = match->prev;
53450 + }
53451 + match->prev = NULL;
53452 + match->next = NULL;
53453 + match->nentry->inode = newinode;
53454 + match->nentry->device = newdevice;
53455 + match->nentry->deleted = 0;
53456 +
53457 + insert_inodev_entry(match);
53458 + }
53459 +
53460 + return;
53461 +}
53462 +
53463 +static void
53464 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
53465 +{
53466 + struct acl_subject_label *subj;
53467 + struct acl_role_label *role;
53468 + unsigned int x;
53469 +
53470 + FOR_EACH_ROLE_START(role)
53471 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
53472 +
53473 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
53474 + if ((subj->inode == ino) && (subj->device == dev)) {
53475 + subj->inode = ino;
53476 + subj->device = dev;
53477 + }
53478 + FOR_EACH_NESTED_SUBJECT_END(subj)
53479 + FOR_EACH_SUBJECT_START(role, subj, x)
53480 + update_acl_obj_label(matchn->inode, matchn->device,
53481 + ino, dev, subj);
53482 + FOR_EACH_SUBJECT_END(subj,x)
53483 + FOR_EACH_ROLE_END(role)
53484 +
53485 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
53486 +
53487 + return;
53488 +}
53489 +
53490 +static void
53491 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
53492 + const struct vfsmount *mnt)
53493 +{
53494 + ino_t ino = dentry->d_inode->i_ino;
53495 + dev_t dev = __get_dev(dentry);
53496 +
53497 + __do_handle_create(matchn, ino, dev);
53498 +
53499 + return;
53500 +}
53501 +
53502 +void
53503 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53504 +{
53505 + struct name_entry *matchn;
53506 +
53507 + if (unlikely(!(gr_status & GR_READY)))
53508 + return;
53509 +
53510 + preempt_disable();
53511 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
53512 +
53513 + if (unlikely((unsigned long)matchn)) {
53514 + write_lock(&gr_inode_lock);
53515 + do_handle_create(matchn, dentry, mnt);
53516 + write_unlock(&gr_inode_lock);
53517 + }
53518 + preempt_enable();
53519 +
53520 + return;
53521 +}
53522 +
53523 +void
53524 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
53525 +{
53526 + struct name_entry *matchn;
53527 +
53528 + if (unlikely(!(gr_status & GR_READY)))
53529 + return;
53530 +
53531 + preempt_disable();
53532 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
53533 +
53534 + if (unlikely((unsigned long)matchn)) {
53535 + write_lock(&gr_inode_lock);
53536 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
53537 + write_unlock(&gr_inode_lock);
53538 + }
53539 + preempt_enable();
53540 +
53541 + return;
53542 +}
53543 +
53544 +void
53545 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53546 + struct dentry *old_dentry,
53547 + struct dentry *new_dentry,
53548 + struct vfsmount *mnt, const __u8 replace)
53549 +{
53550 + struct name_entry *matchn;
53551 + struct inodev_entry *inodev;
53552 + struct inode *inode = new_dentry->d_inode;
53553 + ino_t old_ino = old_dentry->d_inode->i_ino;
53554 + dev_t old_dev = __get_dev(old_dentry);
53555 +
53556 + /* vfs_rename swaps the name and parent link for old_dentry and
53557 + new_dentry
53558 + at this point, old_dentry has the new name, parent link, and inode
53559 + for the renamed file
53560 + if a file is being replaced by a rename, new_dentry has the inode
53561 + and name for the replaced file
53562 + */
53563 +
53564 + if (unlikely(!(gr_status & GR_READY)))
53565 + return;
53566 +
53567 + preempt_disable();
53568 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
53569 +
53570 + /* we wouldn't have to check d_inode if it weren't for
53571 + NFS silly-renaming
53572 + */
53573 +
53574 + write_lock(&gr_inode_lock);
53575 + if (unlikely(replace && inode)) {
53576 + ino_t new_ino = inode->i_ino;
53577 + dev_t new_dev = __get_dev(new_dentry);
53578 +
53579 + inodev = lookup_inodev_entry(new_ino, new_dev);
53580 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
53581 + do_handle_delete(inodev, new_ino, new_dev);
53582 + }
53583 +
53584 + inodev = lookup_inodev_entry(old_ino, old_dev);
53585 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
53586 + do_handle_delete(inodev, old_ino, old_dev);
53587 +
53588 + if (unlikely((unsigned long)matchn))
53589 + do_handle_create(matchn, old_dentry, mnt);
53590 +
53591 + write_unlock(&gr_inode_lock);
53592 + preempt_enable();
53593 +
53594 + return;
53595 +}
53596 +
53597 +static int
53598 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
53599 + unsigned char **sum)
53600 +{
53601 + struct acl_role_label *r;
53602 + struct role_allowed_ip *ipp;
53603 + struct role_transition *trans;
53604 + unsigned int i;
53605 + int found = 0;
53606 + u32 curr_ip = current->signal->curr_ip;
53607 +
53608 + current->signal->saved_ip = curr_ip;
53609 +
53610 + /* check transition table */
53611 +
53612 + for (trans = current->role->transitions; trans; trans = trans->next) {
53613 + if (!strcmp(rolename, trans->rolename)) {
53614 + found = 1;
53615 + break;
53616 + }
53617 + }
53618 +
53619 + if (!found)
53620 + return 0;
53621 +
53622 + /* handle special roles that do not require authentication
53623 + and check ip */
53624 +
53625 + FOR_EACH_ROLE_START(r)
53626 + if (!strcmp(rolename, r->rolename) &&
53627 + (r->roletype & GR_ROLE_SPECIAL)) {
53628 + found = 0;
53629 + if (r->allowed_ips != NULL) {
53630 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
53631 + if ((ntohl(curr_ip) & ipp->netmask) ==
53632 + (ntohl(ipp->addr) & ipp->netmask))
53633 + found = 1;
53634 + }
53635 + } else
53636 + found = 2;
53637 + if (!found)
53638 + return 0;
53639 +
53640 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
53641 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
53642 + *salt = NULL;
53643 + *sum = NULL;
53644 + return 1;
53645 + }
53646 + }
53647 + FOR_EACH_ROLE_END(r)
53648 +
53649 + for (i = 0; i < num_sprole_pws; i++) {
53650 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
53651 + *salt = acl_special_roles[i]->salt;
53652 + *sum = acl_special_roles[i]->sum;
53653 + return 1;
53654 + }
53655 + }
53656 +
53657 + return 0;
53658 +}
53659 +
53660 +static void
53661 +assign_special_role(char *rolename)
53662 +{
53663 + struct acl_object_label *obj;
53664 + struct acl_role_label *r;
53665 + struct acl_role_label *assigned = NULL;
53666 + struct task_struct *tsk;
53667 + struct file *filp;
53668 +
53669 + FOR_EACH_ROLE_START(r)
53670 + if (!strcmp(rolename, r->rolename) &&
53671 + (r->roletype & GR_ROLE_SPECIAL)) {
53672 + assigned = r;
53673 + break;
53674 + }
53675 + FOR_EACH_ROLE_END(r)
53676 +
53677 + if (!assigned)
53678 + return;
53679 +
53680 + read_lock(&tasklist_lock);
53681 + read_lock(&grsec_exec_file_lock);
53682 +
53683 + tsk = current->real_parent;
53684 + if (tsk == NULL)
53685 + goto out_unlock;
53686 +
53687 + filp = tsk->exec_file;
53688 + if (filp == NULL)
53689 + goto out_unlock;
53690 +
53691 + tsk->is_writable = 0;
53692 +
53693 + tsk->acl_sp_role = 1;
53694 + tsk->acl_role_id = ++acl_sp_role_value;
53695 + tsk->role = assigned;
53696 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
53697 +
53698 + /* ignore additional mmap checks for processes that are writable
53699 + by the default ACL */
53700 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53701 + if (unlikely(obj->mode & GR_WRITE))
53702 + tsk->is_writable = 1;
53703 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
53704 + if (unlikely(obj->mode & GR_WRITE))
53705 + tsk->is_writable = 1;
53706 +
53707 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53708 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
53709 +#endif
53710 +
53711 +out_unlock:
53712 + read_unlock(&grsec_exec_file_lock);
53713 + read_unlock(&tasklist_lock);
53714 + return;
53715 +}
53716 +
53717 +int gr_check_secure_terminal(struct task_struct *task)
53718 +{
53719 + struct task_struct *p, *p2, *p3;
53720 + struct files_struct *files;
53721 + struct fdtable *fdt;
53722 + struct file *our_file = NULL, *file;
53723 + int i;
53724 +
53725 + if (task->signal->tty == NULL)
53726 + return 1;
53727 +
53728 + files = get_files_struct(task);
53729 + if (files != NULL) {
53730 + rcu_read_lock();
53731 + fdt = files_fdtable(files);
53732 + for (i=0; i < fdt->max_fds; i++) {
53733 + file = fcheck_files(files, i);
53734 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
53735 + get_file(file);
53736 + our_file = file;
53737 + }
53738 + }
53739 + rcu_read_unlock();
53740 + put_files_struct(files);
53741 + }
53742 +
53743 + if (our_file == NULL)
53744 + return 1;
53745 +
53746 + read_lock(&tasklist_lock);
53747 + do_each_thread(p2, p) {
53748 + files = get_files_struct(p);
53749 + if (files == NULL ||
53750 + (p->signal && p->signal->tty == task->signal->tty)) {
53751 + if (files != NULL)
53752 + put_files_struct(files);
53753 + continue;
53754 + }
53755 + rcu_read_lock();
53756 + fdt = files_fdtable(files);
53757 + for (i=0; i < fdt->max_fds; i++) {
53758 + file = fcheck_files(files, i);
53759 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
53760 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
53761 + p3 = task;
53762 + while (p3->pid > 0) {
53763 + if (p3 == p)
53764 + break;
53765 + p3 = p3->real_parent;
53766 + }
53767 + if (p3 == p)
53768 + break;
53769 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
53770 + gr_handle_alertkill(p);
53771 + rcu_read_unlock();
53772 + put_files_struct(files);
53773 + read_unlock(&tasklist_lock);
53774 + fput(our_file);
53775 + return 0;
53776 + }
53777 + }
53778 + rcu_read_unlock();
53779 + put_files_struct(files);
53780 + } while_each_thread(p2, p);
53781 + read_unlock(&tasklist_lock);
53782 +
53783 + fput(our_file);
53784 + return 1;
53785 +}
53786 +
53787 +ssize_t
53788 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
53789 +{
53790 + struct gr_arg_wrapper uwrap;
53791 + unsigned char *sprole_salt = NULL;
53792 + unsigned char *sprole_sum = NULL;
53793 + int error = sizeof (struct gr_arg_wrapper);
53794 + int error2 = 0;
53795 +
53796 + mutex_lock(&gr_dev_mutex);
53797 +
53798 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
53799 + error = -EPERM;
53800 + goto out;
53801 + }
53802 +
53803 + if (count != sizeof (struct gr_arg_wrapper)) {
53804 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
53805 + error = -EINVAL;
53806 + goto out;
53807 + }
53808 +
53809 +
53810 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
53811 + gr_auth_expires = 0;
53812 + gr_auth_attempts = 0;
53813 + }
53814 +
53815 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
53816 + error = -EFAULT;
53817 + goto out;
53818 + }
53819 +
53820 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
53821 + error = -EINVAL;
53822 + goto out;
53823 + }
53824 +
53825 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
53826 + error = -EFAULT;
53827 + goto out;
53828 + }
53829 +
53830 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53831 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53832 + time_after(gr_auth_expires, get_seconds())) {
53833 + error = -EBUSY;
53834 + goto out;
53835 + }
53836 +
53837 + /* if non-root trying to do anything other than use a special role,
53838 + do not attempt authentication, do not count towards authentication
53839 + locking
53840 + */
53841 +
53842 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
53843 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53844 + current_uid()) {
53845 + error = -EPERM;
53846 + goto out;
53847 + }
53848 +
53849 + /* ensure pw and special role name are null terminated */
53850 +
53851 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
53852 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
53853 +
53854 + /* Okay.
53855 + * We have our enough of the argument structure..(we have yet
53856 + * to copy_from_user the tables themselves) . Copy the tables
53857 + * only if we need them, i.e. for loading operations. */
53858 +
53859 + switch (gr_usermode->mode) {
53860 + case GR_STATUS:
53861 + if (gr_status & GR_READY) {
53862 + error = 1;
53863 + if (!gr_check_secure_terminal(current))
53864 + error = 3;
53865 + } else
53866 + error = 2;
53867 + goto out;
53868 + case GR_SHUTDOWN:
53869 + if ((gr_status & GR_READY)
53870 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53871 + pax_open_kernel();
53872 + gr_status &= ~GR_READY;
53873 + pax_close_kernel();
53874 +
53875 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
53876 + free_variables();
53877 + memset(gr_usermode, 0, sizeof (struct gr_arg));
53878 + memset(gr_system_salt, 0, GR_SALT_LEN);
53879 + memset(gr_system_sum, 0, GR_SHA_LEN);
53880 + } else if (gr_status & GR_READY) {
53881 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
53882 + error = -EPERM;
53883 + } else {
53884 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
53885 + error = -EAGAIN;
53886 + }
53887 + break;
53888 + case GR_ENABLE:
53889 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
53890 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
53891 + else {
53892 + if (gr_status & GR_READY)
53893 + error = -EAGAIN;
53894 + else
53895 + error = error2;
53896 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
53897 + }
53898 + break;
53899 + case GR_RELOAD:
53900 + if (!(gr_status & GR_READY)) {
53901 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
53902 + error = -EAGAIN;
53903 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53904 + preempt_disable();
53905 +
53906 + pax_open_kernel();
53907 + gr_status &= ~GR_READY;
53908 + pax_close_kernel();
53909 +
53910 + free_variables();
53911 + if (!(error2 = gracl_init(gr_usermode))) {
53912 + preempt_enable();
53913 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
53914 + } else {
53915 + preempt_enable();
53916 + error = error2;
53917 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53918 + }
53919 + } else {
53920 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53921 + error = -EPERM;
53922 + }
53923 + break;
53924 + case GR_SEGVMOD:
53925 + if (unlikely(!(gr_status & GR_READY))) {
53926 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
53927 + error = -EAGAIN;
53928 + break;
53929 + }
53930 +
53931 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53932 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
53933 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
53934 + struct acl_subject_label *segvacl;
53935 + segvacl =
53936 + lookup_acl_subj_label(gr_usermode->segv_inode,
53937 + gr_usermode->segv_device,
53938 + current->role);
53939 + if (segvacl) {
53940 + segvacl->crashes = 0;
53941 + segvacl->expires = 0;
53942 + }
53943 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
53944 + gr_remove_uid(gr_usermode->segv_uid);
53945 + }
53946 + } else {
53947 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
53948 + error = -EPERM;
53949 + }
53950 + break;
53951 + case GR_SPROLE:
53952 + case GR_SPROLEPAM:
53953 + if (unlikely(!(gr_status & GR_READY))) {
53954 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
53955 + error = -EAGAIN;
53956 + break;
53957 + }
53958 +
53959 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
53960 + current->role->expires = 0;
53961 + current->role->auth_attempts = 0;
53962 + }
53963 +
53964 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53965 + time_after(current->role->expires, get_seconds())) {
53966 + error = -EBUSY;
53967 + goto out;
53968 + }
53969 +
53970 + if (lookup_special_role_auth
53971 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
53972 + && ((!sprole_salt && !sprole_sum)
53973 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
53974 + char *p = "";
53975 + assign_special_role(gr_usermode->sp_role);
53976 + read_lock(&tasklist_lock);
53977 + if (current->real_parent)
53978 + p = current->real_parent->role->rolename;
53979 + read_unlock(&tasklist_lock);
53980 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
53981 + p, acl_sp_role_value);
53982 + } else {
53983 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
53984 + error = -EPERM;
53985 + if(!(current->role->auth_attempts++))
53986 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53987 +
53988 + goto out;
53989 + }
53990 + break;
53991 + case GR_UNSPROLE:
53992 + if (unlikely(!(gr_status & GR_READY))) {
53993 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
53994 + error = -EAGAIN;
53995 + break;
53996 + }
53997 +
53998 + if (current->role->roletype & GR_ROLE_SPECIAL) {
53999 + char *p = "";
54000 + int i = 0;
54001 +
54002 + read_lock(&tasklist_lock);
54003 + if (current->real_parent) {
54004 + p = current->real_parent->role->rolename;
54005 + i = current->real_parent->acl_role_id;
54006 + }
54007 + read_unlock(&tasklist_lock);
54008 +
54009 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
54010 + gr_set_acls(1);
54011 + } else {
54012 + error = -EPERM;
54013 + goto out;
54014 + }
54015 + break;
54016 + default:
54017 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
54018 + error = -EINVAL;
54019 + break;
54020 + }
54021 +
54022 + if (error != -EPERM)
54023 + goto out;
54024 +
54025 + if(!(gr_auth_attempts++))
54026 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
54027 +
54028 + out:
54029 + mutex_unlock(&gr_dev_mutex);
54030 + return error;
54031 +}
54032 +
54033 +/* must be called with
54034 + rcu_read_lock();
54035 + read_lock(&tasklist_lock);
54036 + read_lock(&grsec_exec_file_lock);
54037 +*/
54038 +int gr_apply_subject_to_task(struct task_struct *task)
54039 +{
54040 + struct acl_object_label *obj;
54041 + char *tmpname;
54042 + struct acl_subject_label *tmpsubj;
54043 + struct file *filp;
54044 + struct name_entry *nmatch;
54045 +
54046 + filp = task->exec_file;
54047 + if (filp == NULL)
54048 + return 0;
54049 +
54050 + /* the following is to apply the correct subject
54051 + on binaries running when the RBAC system
54052 + is enabled, when the binaries have been
54053 + replaced or deleted since their execution
54054 + -----
54055 + when the RBAC system starts, the inode/dev
54056 + from exec_file will be one the RBAC system
54057 + is unaware of. It only knows the inode/dev
54058 + of the present file on disk, or the absence
54059 + of it.
54060 + */
54061 + preempt_disable();
54062 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
54063 +
54064 + nmatch = lookup_name_entry(tmpname);
54065 + preempt_enable();
54066 + tmpsubj = NULL;
54067 + if (nmatch) {
54068 + if (nmatch->deleted)
54069 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
54070 + else
54071 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
54072 + if (tmpsubj != NULL)
54073 + task->acl = tmpsubj;
54074 + }
54075 + if (tmpsubj == NULL)
54076 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
54077 + task->role);
54078 + if (task->acl) {
54079 + task->is_writable = 0;
54080 + /* ignore additional mmap checks for processes that are writable
54081 + by the default ACL */
54082 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54083 + if (unlikely(obj->mode & GR_WRITE))
54084 + task->is_writable = 1;
54085 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
54086 + if (unlikely(obj->mode & GR_WRITE))
54087 + task->is_writable = 1;
54088 +
54089 + gr_set_proc_res(task);
54090 +
54091 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54092 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
54093 +#endif
54094 + } else {
54095 + return 1;
54096 + }
54097 +
54098 + return 0;
54099 +}
54100 +
54101 +int
54102 +gr_set_acls(const int type)
54103 +{
54104 + struct task_struct *task, *task2;
54105 + struct acl_role_label *role = current->role;
54106 + __u16 acl_role_id = current->acl_role_id;
54107 + const struct cred *cred;
54108 + int ret;
54109 +
54110 + rcu_read_lock();
54111 + read_lock(&tasklist_lock);
54112 + read_lock(&grsec_exec_file_lock);
54113 + do_each_thread(task2, task) {
54114 + /* check to see if we're called from the exit handler,
54115 + if so, only replace ACLs that have inherited the admin
54116 + ACL */
54117 +
54118 + if (type && (task->role != role ||
54119 + task->acl_role_id != acl_role_id))
54120 + continue;
54121 +
54122 + task->acl_role_id = 0;
54123 + task->acl_sp_role = 0;
54124 +
54125 + if (task->exec_file) {
54126 + cred = __task_cred(task);
54127 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
54128 + ret = gr_apply_subject_to_task(task);
54129 + if (ret) {
54130 + read_unlock(&grsec_exec_file_lock);
54131 + read_unlock(&tasklist_lock);
54132 + rcu_read_unlock();
54133 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
54134 + return ret;
54135 + }
54136 + } else {
54137 + // it's a kernel process
54138 + task->role = kernel_role;
54139 + task->acl = kernel_role->root_label;
54140 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
54141 + task->acl->mode &= ~GR_PROCFIND;
54142 +#endif
54143 + }
54144 + } while_each_thread(task2, task);
54145 + read_unlock(&grsec_exec_file_lock);
54146 + read_unlock(&tasklist_lock);
54147 + rcu_read_unlock();
54148 +
54149 + return 0;
54150 +}
54151 +
54152 +void
54153 +gr_learn_resource(const struct task_struct *task,
54154 + const int res, const unsigned long wanted, const int gt)
54155 +{
54156 + struct acl_subject_label *acl;
54157 + const struct cred *cred;
54158 +
54159 + if (unlikely((gr_status & GR_READY) &&
54160 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
54161 + goto skip_reslog;
54162 +
54163 +#ifdef CONFIG_GRKERNSEC_RESLOG
54164 + gr_log_resource(task, res, wanted, gt);
54165 +#endif
54166 + skip_reslog:
54167 +
54168 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
54169 + return;
54170 +
54171 + acl = task->acl;
54172 +
54173 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
54174 + !(acl->resmask & (1 << (unsigned short) res))))
54175 + return;
54176 +
54177 + if (wanted >= acl->res[res].rlim_cur) {
54178 + unsigned long res_add;
54179 +
54180 + res_add = wanted;
54181 + switch (res) {
54182 + case RLIMIT_CPU:
54183 + res_add += GR_RLIM_CPU_BUMP;
54184 + break;
54185 + case RLIMIT_FSIZE:
54186 + res_add += GR_RLIM_FSIZE_BUMP;
54187 + break;
54188 + case RLIMIT_DATA:
54189 + res_add += GR_RLIM_DATA_BUMP;
54190 + break;
54191 + case RLIMIT_STACK:
54192 + res_add += GR_RLIM_STACK_BUMP;
54193 + break;
54194 + case RLIMIT_CORE:
54195 + res_add += GR_RLIM_CORE_BUMP;
54196 + break;
54197 + case RLIMIT_RSS:
54198 + res_add += GR_RLIM_RSS_BUMP;
54199 + break;
54200 + case RLIMIT_NPROC:
54201 + res_add += GR_RLIM_NPROC_BUMP;
54202 + break;
54203 + case RLIMIT_NOFILE:
54204 + res_add += GR_RLIM_NOFILE_BUMP;
54205 + break;
54206 + case RLIMIT_MEMLOCK:
54207 + res_add += GR_RLIM_MEMLOCK_BUMP;
54208 + break;
54209 + case RLIMIT_AS:
54210 + res_add += GR_RLIM_AS_BUMP;
54211 + break;
54212 + case RLIMIT_LOCKS:
54213 + res_add += GR_RLIM_LOCKS_BUMP;
54214 + break;
54215 + case RLIMIT_SIGPENDING:
54216 + res_add += GR_RLIM_SIGPENDING_BUMP;
54217 + break;
54218 + case RLIMIT_MSGQUEUE:
54219 + res_add += GR_RLIM_MSGQUEUE_BUMP;
54220 + break;
54221 + case RLIMIT_NICE:
54222 + res_add += GR_RLIM_NICE_BUMP;
54223 + break;
54224 + case RLIMIT_RTPRIO:
54225 + res_add += GR_RLIM_RTPRIO_BUMP;
54226 + break;
54227 + case RLIMIT_RTTIME:
54228 + res_add += GR_RLIM_RTTIME_BUMP;
54229 + break;
54230 + }
54231 +
54232 + acl->res[res].rlim_cur = res_add;
54233 +
54234 + if (wanted > acl->res[res].rlim_max)
54235 + acl->res[res].rlim_max = res_add;
54236 +
54237 + /* only log the subject filename, since resource logging is supported for
54238 + single-subject learning only */
54239 + rcu_read_lock();
54240 + cred = __task_cred(task);
54241 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54242 + task->role->roletype, cred->uid, cred->gid, acl->filename,
54243 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
54244 + "", (unsigned long) res, &task->signal->saved_ip);
54245 + rcu_read_unlock();
54246 + }
54247 +
54248 + return;
54249 +}
54250 +
54251 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
54252 +void
54253 +pax_set_initial_flags(struct linux_binprm *bprm)
54254 +{
54255 + struct task_struct *task = current;
54256 + struct acl_subject_label *proc;
54257 + unsigned long flags;
54258 +
54259 + if (unlikely(!(gr_status & GR_READY)))
54260 + return;
54261 +
54262 + flags = pax_get_flags(task);
54263 +
54264 + proc = task->acl;
54265 +
54266 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
54267 + flags &= ~MF_PAX_PAGEEXEC;
54268 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
54269 + flags &= ~MF_PAX_SEGMEXEC;
54270 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
54271 + flags &= ~MF_PAX_RANDMMAP;
54272 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
54273 + flags &= ~MF_PAX_EMUTRAMP;
54274 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
54275 + flags &= ~MF_PAX_MPROTECT;
54276 +
54277 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
54278 + flags |= MF_PAX_PAGEEXEC;
54279 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
54280 + flags |= MF_PAX_SEGMEXEC;
54281 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
54282 + flags |= MF_PAX_RANDMMAP;
54283 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
54284 + flags |= MF_PAX_EMUTRAMP;
54285 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
54286 + flags |= MF_PAX_MPROTECT;
54287 +
54288 + pax_set_flags(task, flags);
54289 +
54290 + return;
54291 +}
54292 +#endif
54293 +
54294 +#ifdef CONFIG_SYSCTL
54295 +/* Eric Biederman likes breaking userland ABI and every inode-based security
54296 + system to save 35kb of memory */
54297 +
54298 +/* we modify the passed in filename, but adjust it back before returning */
54299 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
54300 +{
54301 + struct name_entry *nmatch;
54302 + char *p, *lastp = NULL;
54303 + struct acl_object_label *obj = NULL, *tmp;
54304 + struct acl_subject_label *tmpsubj;
54305 + char c = '\0';
54306 +
54307 + read_lock(&gr_inode_lock);
54308 +
54309 + p = name + len - 1;
54310 + do {
54311 + nmatch = lookup_name_entry(name);
54312 + if (lastp != NULL)
54313 + *lastp = c;
54314 +
54315 + if (nmatch == NULL)
54316 + goto next_component;
54317 + tmpsubj = current->acl;
54318 + do {
54319 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
54320 + if (obj != NULL) {
54321 + tmp = obj->globbed;
54322 + while (tmp) {
54323 + if (!glob_match(tmp->filename, name)) {
54324 + obj = tmp;
54325 + goto found_obj;
54326 + }
54327 + tmp = tmp->next;
54328 + }
54329 + goto found_obj;
54330 + }
54331 + } while ((tmpsubj = tmpsubj->parent_subject));
54332 +next_component:
54333 + /* end case */
54334 + if (p == name)
54335 + break;
54336 +
54337 + while (*p != '/')
54338 + p--;
54339 + if (p == name)
54340 + lastp = p + 1;
54341 + else {
54342 + lastp = p;
54343 + p--;
54344 + }
54345 + c = *lastp;
54346 + *lastp = '\0';
54347 + } while (1);
54348 +found_obj:
54349 + read_unlock(&gr_inode_lock);
54350 + /* obj returned will always be non-null */
54351 + return obj;
54352 +}
54353 +
54354 +/* returns 0 when allowing, non-zero on error
54355 + op of 0 is used for readdir, so we don't log the names of hidden files
54356 +*/
54357 +__u32
54358 +gr_handle_sysctl(const struct ctl_table *table, const int op)
54359 +{
54360 + struct ctl_table *tmp;
54361 + const char *proc_sys = "/proc/sys";
54362 + char *path;
54363 + struct acl_object_label *obj;
54364 + unsigned short len = 0, pos = 0, depth = 0, i;
54365 + __u32 err = 0;
54366 + __u32 mode = 0;
54367 +
54368 + if (unlikely(!(gr_status & GR_READY)))
54369 + return 0;
54370 +
54371 + /* for now, ignore operations on non-sysctl entries if it's not a
54372 + readdir*/
54373 + if (table->child != NULL && op != 0)
54374 + return 0;
54375 +
54376 + mode |= GR_FIND;
54377 + /* it's only a read if it's an entry, read on dirs is for readdir */
54378 + if (op & MAY_READ)
54379 + mode |= GR_READ;
54380 + if (op & MAY_WRITE)
54381 + mode |= GR_WRITE;
54382 +
54383 + preempt_disable();
54384 +
54385 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
54386 +
54387 + /* it's only a read/write if it's an actual entry, not a dir
54388 + (which are opened for readdir)
54389 + */
54390 +
54391 + /* convert the requested sysctl entry into a pathname */
54392 +
54393 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
54394 + len += strlen(tmp->procname);
54395 + len++;
54396 + depth++;
54397 + }
54398 +
54399 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
54400 + /* deny */
54401 + goto out;
54402 + }
54403 +
54404 + memset(path, 0, PAGE_SIZE);
54405 +
54406 + memcpy(path, proc_sys, strlen(proc_sys));
54407 +
54408 + pos += strlen(proc_sys);
54409 +
54410 + for (; depth > 0; depth--) {
54411 + path[pos] = '/';
54412 + pos++;
54413 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
54414 + if (depth == i) {
54415 + memcpy(path + pos, tmp->procname,
54416 + strlen(tmp->procname));
54417 + pos += strlen(tmp->procname);
54418 + }
54419 + i++;
54420 + }
54421 + }
54422 +
54423 + obj = gr_lookup_by_name(path, pos);
54424 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
54425 +
54426 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
54427 + ((err & mode) != mode))) {
54428 + __u32 new_mode = mode;
54429 +
54430 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
54431 +
54432 + err = 0;
54433 + gr_log_learn_sysctl(path, new_mode);
54434 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
54435 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
54436 + err = -ENOENT;
54437 + } else if (!(err & GR_FIND)) {
54438 + err = -ENOENT;
54439 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
54440 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
54441 + path, (mode & GR_READ) ? " reading" : "",
54442 + (mode & GR_WRITE) ? " writing" : "");
54443 + err = -EACCES;
54444 + } else if ((err & mode) != mode) {
54445 + err = -EACCES;
54446 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
54447 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
54448 + path, (mode & GR_READ) ? " reading" : "",
54449 + (mode & GR_WRITE) ? " writing" : "");
54450 + err = 0;
54451 + } else
54452 + err = 0;
54453 +
54454 + out:
54455 + preempt_enable();
54456 +
54457 + return err;
54458 +}
54459 +#endif
54460 +
54461 +int
54462 +gr_handle_proc_ptrace(struct task_struct *task)
54463 +{
54464 + struct file *filp;
54465 + struct task_struct *tmp = task;
54466 + struct task_struct *curtemp = current;
54467 + __u32 retmode;
54468 +
54469 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54470 + if (unlikely(!(gr_status & GR_READY)))
54471 + return 0;
54472 +#endif
54473 +
54474 + read_lock(&tasklist_lock);
54475 + read_lock(&grsec_exec_file_lock);
54476 + filp = task->exec_file;
54477 +
54478 + while (tmp->pid > 0) {
54479 + if (tmp == curtemp)
54480 + break;
54481 + tmp = tmp->real_parent;
54482 + }
54483 +
54484 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54485 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
54486 + read_unlock(&grsec_exec_file_lock);
54487 + read_unlock(&tasklist_lock);
54488 + return 1;
54489 + }
54490 +
54491 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54492 + if (!(gr_status & GR_READY)) {
54493 + read_unlock(&grsec_exec_file_lock);
54494 + read_unlock(&tasklist_lock);
54495 + return 0;
54496 + }
54497 +#endif
54498 +
54499 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
54500 + read_unlock(&grsec_exec_file_lock);
54501 + read_unlock(&tasklist_lock);
54502 +
54503 + if (retmode & GR_NOPTRACE)
54504 + return 1;
54505 +
54506 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
54507 + && (current->acl != task->acl || (current->acl != current->role->root_label
54508 + && current->pid != task->pid)))
54509 + return 1;
54510 +
54511 + return 0;
54512 +}
54513 +
54514 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
54515 +{
54516 + if (unlikely(!(gr_status & GR_READY)))
54517 + return;
54518 +
54519 + if (!(current->role->roletype & GR_ROLE_GOD))
54520 + return;
54521 +
54522 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
54523 + p->role->rolename, gr_task_roletype_to_char(p),
54524 + p->acl->filename);
54525 +}
54526 +
54527 +int
54528 +gr_handle_ptrace(struct task_struct *task, const long request)
54529 +{
54530 + struct task_struct *tmp = task;
54531 + struct task_struct *curtemp = current;
54532 + __u32 retmode;
54533 +
54534 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54535 + if (unlikely(!(gr_status & GR_READY)))
54536 + return 0;
54537 +#endif
54538 +
54539 + read_lock(&tasklist_lock);
54540 + while (tmp->pid > 0) {
54541 + if (tmp == curtemp)
54542 + break;
54543 + tmp = tmp->real_parent;
54544 + }
54545 +
54546 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54547 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
54548 + read_unlock(&tasklist_lock);
54549 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54550 + return 1;
54551 + }
54552 + read_unlock(&tasklist_lock);
54553 +
54554 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54555 + if (!(gr_status & GR_READY))
54556 + return 0;
54557 +#endif
54558 +
54559 + read_lock(&grsec_exec_file_lock);
54560 + if (unlikely(!task->exec_file)) {
54561 + read_unlock(&grsec_exec_file_lock);
54562 + return 0;
54563 + }
54564 +
54565 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
54566 + read_unlock(&grsec_exec_file_lock);
54567 +
54568 + if (retmode & GR_NOPTRACE) {
54569 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54570 + return 1;
54571 + }
54572 +
54573 + if (retmode & GR_PTRACERD) {
54574 + switch (request) {
54575 + case PTRACE_SEIZE:
54576 + case PTRACE_POKETEXT:
54577 + case PTRACE_POKEDATA:
54578 + case PTRACE_POKEUSR:
54579 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
54580 + case PTRACE_SETREGS:
54581 + case PTRACE_SETFPREGS:
54582 +#endif
54583 +#ifdef CONFIG_X86
54584 + case PTRACE_SETFPXREGS:
54585 +#endif
54586 +#ifdef CONFIG_ALTIVEC
54587 + case PTRACE_SETVRREGS:
54588 +#endif
54589 + return 1;
54590 + default:
54591 + return 0;
54592 + }
54593 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
54594 + !(current->role->roletype & GR_ROLE_GOD) &&
54595 + (current->acl != task->acl)) {
54596 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54597 + return 1;
54598 + }
54599 +
54600 + return 0;
54601 +}
54602 +
54603 +static int is_writable_mmap(const struct file *filp)
54604 +{
54605 + struct task_struct *task = current;
54606 + struct acl_object_label *obj, *obj2;
54607 +
54608 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
54609 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
54610 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54611 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
54612 + task->role->root_label);
54613 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
54614 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
54615 + return 1;
54616 + }
54617 + }
54618 + return 0;
54619 +}
54620 +
54621 +int
54622 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
54623 +{
54624 + __u32 mode;
54625 +
54626 + if (unlikely(!file || !(prot & PROT_EXEC)))
54627 + return 1;
54628 +
54629 + if (is_writable_mmap(file))
54630 + return 0;
54631 +
54632 + mode =
54633 + gr_search_file(file->f_path.dentry,
54634 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54635 + file->f_path.mnt);
54636 +
54637 + if (!gr_tpe_allow(file))
54638 + return 0;
54639 +
54640 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54641 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54642 + return 0;
54643 + } else if (unlikely(!(mode & GR_EXEC))) {
54644 + return 0;
54645 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54646 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54647 + return 1;
54648 + }
54649 +
54650 + return 1;
54651 +}
54652 +
54653 +int
54654 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54655 +{
54656 + __u32 mode;
54657 +
54658 + if (unlikely(!file || !(prot & PROT_EXEC)))
54659 + return 1;
54660 +
54661 + if (is_writable_mmap(file))
54662 + return 0;
54663 +
54664 + mode =
54665 + gr_search_file(file->f_path.dentry,
54666 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54667 + file->f_path.mnt);
54668 +
54669 + if (!gr_tpe_allow(file))
54670 + return 0;
54671 +
54672 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54673 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54674 + return 0;
54675 + } else if (unlikely(!(mode & GR_EXEC))) {
54676 + return 0;
54677 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54678 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54679 + return 1;
54680 + }
54681 +
54682 + return 1;
54683 +}
54684 +
54685 +void
54686 +gr_acl_handle_psacct(struct task_struct *task, const long code)
54687 +{
54688 + unsigned long runtime;
54689 + unsigned long cputime;
54690 + unsigned int wday, cday;
54691 + __u8 whr, chr;
54692 + __u8 wmin, cmin;
54693 + __u8 wsec, csec;
54694 + struct timespec timeval;
54695 +
54696 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
54697 + !(task->acl->mode & GR_PROCACCT)))
54698 + return;
54699 +
54700 + do_posix_clock_monotonic_gettime(&timeval);
54701 + runtime = timeval.tv_sec - task->start_time.tv_sec;
54702 + wday = runtime / (3600 * 24);
54703 + runtime -= wday * (3600 * 24);
54704 + whr = runtime / 3600;
54705 + runtime -= whr * 3600;
54706 + wmin = runtime / 60;
54707 + runtime -= wmin * 60;
54708 + wsec = runtime;
54709 +
54710 + cputime = (task->utime + task->stime) / HZ;
54711 + cday = cputime / (3600 * 24);
54712 + cputime -= cday * (3600 * 24);
54713 + chr = cputime / 3600;
54714 + cputime -= chr * 3600;
54715 + cmin = cputime / 60;
54716 + cputime -= cmin * 60;
54717 + csec = cputime;
54718 +
54719 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
54720 +
54721 + return;
54722 +}
54723 +
54724 +void gr_set_kernel_label(struct task_struct *task)
54725 +{
54726 + if (gr_status & GR_READY) {
54727 + task->role = kernel_role;
54728 + task->acl = kernel_role->root_label;
54729 + }
54730 + return;
54731 +}
54732 +
54733 +#ifdef CONFIG_TASKSTATS
54734 +int gr_is_taskstats_denied(int pid)
54735 +{
54736 + struct task_struct *task;
54737 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54738 + const struct cred *cred;
54739 +#endif
54740 + int ret = 0;
54741 +
54742 + /* restrict taskstats viewing to un-chrooted root users
54743 + who have the 'view' subject flag if the RBAC system is enabled
54744 + */
54745 +
54746 + rcu_read_lock();
54747 + read_lock(&tasklist_lock);
54748 + task = find_task_by_vpid(pid);
54749 + if (task) {
54750 +#ifdef CONFIG_GRKERNSEC_CHROOT
54751 + if (proc_is_chrooted(task))
54752 + ret = -EACCES;
54753 +#endif
54754 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54755 + cred = __task_cred(task);
54756 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54757 + if (cred->uid != 0)
54758 + ret = -EACCES;
54759 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54760 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
54761 + ret = -EACCES;
54762 +#endif
54763 +#endif
54764 + if (gr_status & GR_READY) {
54765 + if (!(task->acl->mode & GR_VIEW))
54766 + ret = -EACCES;
54767 + }
54768 + } else
54769 + ret = -ENOENT;
54770 +
54771 + read_unlock(&tasklist_lock);
54772 + rcu_read_unlock();
54773 +
54774 + return ret;
54775 +}
54776 +#endif
54777 +
54778 +/* AUXV entries are filled via a descendant of search_binary_handler
54779 + after we've already applied the subject for the target
54780 +*/
54781 +int gr_acl_enable_at_secure(void)
54782 +{
54783 + if (unlikely(!(gr_status & GR_READY)))
54784 + return 0;
54785 +
54786 + if (current->acl->mode & GR_ATSECURE)
54787 + return 1;
54788 +
54789 + return 0;
54790 +}
54791 +
54792 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
54793 +{
54794 + struct task_struct *task = current;
54795 + struct dentry *dentry = file->f_path.dentry;
54796 + struct vfsmount *mnt = file->f_path.mnt;
54797 + struct acl_object_label *obj, *tmp;
54798 + struct acl_subject_label *subj;
54799 + unsigned int bufsize;
54800 + int is_not_root;
54801 + char *path;
54802 + dev_t dev = __get_dev(dentry);
54803 +
54804 + if (unlikely(!(gr_status & GR_READY)))
54805 + return 1;
54806 +
54807 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
54808 + return 1;
54809 +
54810 + /* ignore Eric Biederman */
54811 + if (IS_PRIVATE(dentry->d_inode))
54812 + return 1;
54813 +
54814 + subj = task->acl;
54815 + do {
54816 + obj = lookup_acl_obj_label(ino, dev, subj);
54817 + if (obj != NULL)
54818 + return (obj->mode & GR_FIND) ? 1 : 0;
54819 + } while ((subj = subj->parent_subject));
54820 +
54821 + /* this is purely an optimization since we're looking for an object
54822 + for the directory we're doing a readdir on
54823 + if it's possible for any globbed object to match the entry we're
54824 + filling into the directory, then the object we find here will be
54825 + an anchor point with attached globbed objects
54826 + */
54827 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
54828 + if (obj->globbed == NULL)
54829 + return (obj->mode & GR_FIND) ? 1 : 0;
54830 +
54831 + is_not_root = ((obj->filename[0] == '/') &&
54832 + (obj->filename[1] == '\0')) ? 0 : 1;
54833 + bufsize = PAGE_SIZE - namelen - is_not_root;
54834 +
54835 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
54836 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
54837 + return 1;
54838 +
54839 + preempt_disable();
54840 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
54841 + bufsize);
54842 +
54843 + bufsize = strlen(path);
54844 +
54845 + /* if base is "/", don't append an additional slash */
54846 + if (is_not_root)
54847 + *(path + bufsize) = '/';
54848 + memcpy(path + bufsize + is_not_root, name, namelen);
54849 + *(path + bufsize + namelen + is_not_root) = '\0';
54850 +
54851 + tmp = obj->globbed;
54852 + while (tmp) {
54853 + if (!glob_match(tmp->filename, path)) {
54854 + preempt_enable();
54855 + return (tmp->mode & GR_FIND) ? 1 : 0;
54856 + }
54857 + tmp = tmp->next;
54858 + }
54859 + preempt_enable();
54860 + return (obj->mode & GR_FIND) ? 1 : 0;
54861 +}
54862 +
54863 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
54864 +EXPORT_SYMBOL(gr_acl_is_enabled);
54865 +#endif
54866 +EXPORT_SYMBOL(gr_learn_resource);
54867 +EXPORT_SYMBOL(gr_set_kernel_label);
54868 +#ifdef CONFIG_SECURITY
54869 +EXPORT_SYMBOL(gr_check_user_change);
54870 +EXPORT_SYMBOL(gr_check_group_change);
54871 +#endif
54872 +
54873 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
54874 new file mode 100644
54875 index 0000000..34fefda
54876 --- /dev/null
54877 +++ b/grsecurity/gracl_alloc.c
54878 @@ -0,0 +1,105 @@
54879 +#include <linux/kernel.h>
54880 +#include <linux/mm.h>
54881 +#include <linux/slab.h>
54882 +#include <linux/vmalloc.h>
54883 +#include <linux/gracl.h>
54884 +#include <linux/grsecurity.h>
54885 +
54886 +static unsigned long alloc_stack_next = 1;
54887 +static unsigned long alloc_stack_size = 1;
54888 +static void **alloc_stack;
54889 +
54890 +static __inline__ int
54891 +alloc_pop(void)
54892 +{
54893 + if (alloc_stack_next == 1)
54894 + return 0;
54895 +
54896 + kfree(alloc_stack[alloc_stack_next - 2]);
54897 +
54898 + alloc_stack_next--;
54899 +
54900 + return 1;
54901 +}
54902 +
54903 +static __inline__ int
54904 +alloc_push(void *buf)
54905 +{
54906 + if (alloc_stack_next >= alloc_stack_size)
54907 + return 1;
54908 +
54909 + alloc_stack[alloc_stack_next - 1] = buf;
54910 +
54911 + alloc_stack_next++;
54912 +
54913 + return 0;
54914 +}
54915 +
54916 +void *
54917 +acl_alloc(unsigned long len)
54918 +{
54919 + void *ret = NULL;
54920 +
54921 + if (!len || len > PAGE_SIZE)
54922 + goto out;
54923 +
54924 + ret = kmalloc(len, GFP_KERNEL);
54925 +
54926 + if (ret) {
54927 + if (alloc_push(ret)) {
54928 + kfree(ret);
54929 + ret = NULL;
54930 + }
54931 + }
54932 +
54933 +out:
54934 + return ret;
54935 +}
54936 +
54937 +void *
54938 +acl_alloc_num(unsigned long num, unsigned long len)
54939 +{
54940 + if (!len || (num > (PAGE_SIZE / len)))
54941 + return NULL;
54942 +
54943 + return acl_alloc(num * len);
54944 +}
54945 +
54946 +void
54947 +acl_free_all(void)
54948 +{
54949 + if (gr_acl_is_enabled() || !alloc_stack)
54950 + return;
54951 +
54952 + while (alloc_pop()) ;
54953 +
54954 + if (alloc_stack) {
54955 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
54956 + kfree(alloc_stack);
54957 + else
54958 + vfree(alloc_stack);
54959 + }
54960 +
54961 + alloc_stack = NULL;
54962 + alloc_stack_size = 1;
54963 + alloc_stack_next = 1;
54964 +
54965 + return;
54966 +}
54967 +
54968 +int
54969 +acl_alloc_stack_init(unsigned long size)
54970 +{
54971 + if ((size * sizeof (void *)) <= PAGE_SIZE)
54972 + alloc_stack =
54973 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
54974 + else
54975 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
54976 +
54977 + alloc_stack_size = size;
54978 +
54979 + if (!alloc_stack)
54980 + return 0;
54981 + else
54982 + return 1;
54983 +}
54984 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
54985 new file mode 100644
54986 index 0000000..955ddfb
54987 --- /dev/null
54988 +++ b/grsecurity/gracl_cap.c
54989 @@ -0,0 +1,101 @@
54990 +#include <linux/kernel.h>
54991 +#include <linux/module.h>
54992 +#include <linux/sched.h>
54993 +#include <linux/gracl.h>
54994 +#include <linux/grsecurity.h>
54995 +#include <linux/grinternal.h>
54996 +
54997 +extern const char *captab_log[];
54998 +extern int captab_log_entries;
54999 +
55000 +int
55001 +gr_acl_is_capable(const int cap)
55002 +{
55003 + struct task_struct *task = current;
55004 + const struct cred *cred = current_cred();
55005 + struct acl_subject_label *curracl;
55006 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
55007 + kernel_cap_t cap_audit = __cap_empty_set;
55008 +
55009 + if (!gr_acl_is_enabled())
55010 + return 1;
55011 +
55012 + curracl = task->acl;
55013 +
55014 + cap_drop = curracl->cap_lower;
55015 + cap_mask = curracl->cap_mask;
55016 + cap_audit = curracl->cap_invert_audit;
55017 +
55018 + while ((curracl = curracl->parent_subject)) {
55019 + /* if the cap isn't specified in the current computed mask but is specified in the
55020 + current level subject, and is lowered in the current level subject, then add
55021 + it to the set of dropped capabilities
55022 + otherwise, add the current level subject's mask to the current computed mask
55023 + */
55024 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
55025 + cap_raise(cap_mask, cap);
55026 + if (cap_raised(curracl->cap_lower, cap))
55027 + cap_raise(cap_drop, cap);
55028 + if (cap_raised(curracl->cap_invert_audit, cap))
55029 + cap_raise(cap_audit, cap);
55030 + }
55031 + }
55032 +
55033 + if (!cap_raised(cap_drop, cap)) {
55034 + if (cap_raised(cap_audit, cap))
55035 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
55036 + return 1;
55037 + }
55038 +
55039 + curracl = task->acl;
55040 +
55041 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
55042 + && cap_raised(cred->cap_effective, cap)) {
55043 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
55044 + task->role->roletype, cred->uid,
55045 + cred->gid, task->exec_file ?
55046 + gr_to_filename(task->exec_file->f_path.dentry,
55047 + task->exec_file->f_path.mnt) : curracl->filename,
55048 + curracl->filename, 0UL,
55049 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
55050 + return 1;
55051 + }
55052 +
55053 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
55054 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
55055 + return 0;
55056 +}
55057 +
55058 +int
55059 +gr_acl_is_capable_nolog(const int cap)
55060 +{
55061 + struct acl_subject_label *curracl;
55062 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
55063 +
55064 + if (!gr_acl_is_enabled())
55065 + return 1;
55066 +
55067 + curracl = current->acl;
55068 +
55069 + cap_drop = curracl->cap_lower;
55070 + cap_mask = curracl->cap_mask;
55071 +
55072 + while ((curracl = curracl->parent_subject)) {
55073 + /* if the cap isn't specified in the current computed mask but is specified in the
55074 + current level subject, and is lowered in the current level subject, then add
55075 + it to the set of dropped capabilities
55076 + otherwise, add the current level subject's mask to the current computed mask
55077 + */
55078 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
55079 + cap_raise(cap_mask, cap);
55080 + if (cap_raised(curracl->cap_lower, cap))
55081 + cap_raise(cap_drop, cap);
55082 + }
55083 + }
55084 +
55085 + if (!cap_raised(cap_drop, cap))
55086 + return 1;
55087 +
55088 + return 0;
55089 +}
55090 +
55091 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
55092 new file mode 100644
55093 index 0000000..4eda5c3
55094 --- /dev/null
55095 +++ b/grsecurity/gracl_fs.c
55096 @@ -0,0 +1,433 @@
55097 +#include <linux/kernel.h>
55098 +#include <linux/sched.h>
55099 +#include <linux/types.h>
55100 +#include <linux/fs.h>
55101 +#include <linux/file.h>
55102 +#include <linux/stat.h>
55103 +#include <linux/grsecurity.h>
55104 +#include <linux/grinternal.h>
55105 +#include <linux/gracl.h>
55106 +
55107 +__u32
55108 +gr_acl_handle_hidden_file(const struct dentry * dentry,
55109 + const struct vfsmount * mnt)
55110 +{
55111 + __u32 mode;
55112 +
55113 + if (unlikely(!dentry->d_inode))
55114 + return GR_FIND;
55115 +
55116 + mode =
55117 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
55118 +
55119 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
55120 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55121 + return mode;
55122 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
55123 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55124 + return 0;
55125 + } else if (unlikely(!(mode & GR_FIND)))
55126 + return 0;
55127 +
55128 + return GR_FIND;
55129 +}
55130 +
55131 +__u32
55132 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
55133 + int acc_mode)
55134 +{
55135 + __u32 reqmode = GR_FIND;
55136 + __u32 mode;
55137 +
55138 + if (unlikely(!dentry->d_inode))
55139 + return reqmode;
55140 +
55141 + if (acc_mode & MAY_APPEND)
55142 + reqmode |= GR_APPEND;
55143 + else if (acc_mode & MAY_WRITE)
55144 + reqmode |= GR_WRITE;
55145 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
55146 + reqmode |= GR_READ;
55147 +
55148 + mode =
55149 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55150 + mnt);
55151 +
55152 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55153 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55154 + reqmode & GR_READ ? " reading" : "",
55155 + reqmode & GR_WRITE ? " writing" : reqmode &
55156 + GR_APPEND ? " appending" : "");
55157 + return reqmode;
55158 + } else
55159 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55160 + {
55161 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55162 + reqmode & GR_READ ? " reading" : "",
55163 + reqmode & GR_WRITE ? " writing" : reqmode &
55164 + GR_APPEND ? " appending" : "");
55165 + return 0;
55166 + } else if (unlikely((mode & reqmode) != reqmode))
55167 + return 0;
55168 +
55169 + return reqmode;
55170 +}
55171 +
55172 +__u32
55173 +gr_acl_handle_creat(const struct dentry * dentry,
55174 + const struct dentry * p_dentry,
55175 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
55176 + const int imode)
55177 +{
55178 + __u32 reqmode = GR_WRITE | GR_CREATE;
55179 + __u32 mode;
55180 +
55181 + if (acc_mode & MAY_APPEND)
55182 + reqmode |= GR_APPEND;
55183 + // if a directory was required or the directory already exists, then
55184 + // don't count this open as a read
55185 + if ((acc_mode & MAY_READ) &&
55186 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
55187 + reqmode |= GR_READ;
55188 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
55189 + reqmode |= GR_SETID;
55190 +
55191 + mode =
55192 + gr_check_create(dentry, p_dentry, p_mnt,
55193 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55194 +
55195 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55196 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55197 + reqmode & GR_READ ? " reading" : "",
55198 + reqmode & GR_WRITE ? " writing" : reqmode &
55199 + GR_APPEND ? " appending" : "");
55200 + return reqmode;
55201 + } else
55202 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55203 + {
55204 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55205 + reqmode & GR_READ ? " reading" : "",
55206 + reqmode & GR_WRITE ? " writing" : reqmode &
55207 + GR_APPEND ? " appending" : "");
55208 + return 0;
55209 + } else if (unlikely((mode & reqmode) != reqmode))
55210 + return 0;
55211 +
55212 + return reqmode;
55213 +}
55214 +
55215 +__u32
55216 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
55217 + const int fmode)
55218 +{
55219 + __u32 mode, reqmode = GR_FIND;
55220 +
55221 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
55222 + reqmode |= GR_EXEC;
55223 + if (fmode & S_IWOTH)
55224 + reqmode |= GR_WRITE;
55225 + if (fmode & S_IROTH)
55226 + reqmode |= GR_READ;
55227 +
55228 + mode =
55229 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55230 + mnt);
55231 +
55232 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55233 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55234 + reqmode & GR_READ ? " reading" : "",
55235 + reqmode & GR_WRITE ? " writing" : "",
55236 + reqmode & GR_EXEC ? " executing" : "");
55237 + return reqmode;
55238 + } else
55239 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55240 + {
55241 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55242 + reqmode & GR_READ ? " reading" : "",
55243 + reqmode & GR_WRITE ? " writing" : "",
55244 + reqmode & GR_EXEC ? " executing" : "");
55245 + return 0;
55246 + } else if (unlikely((mode & reqmode) != reqmode))
55247 + return 0;
55248 +
55249 + return reqmode;
55250 +}
55251 +
55252 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
55253 +{
55254 + __u32 mode;
55255 +
55256 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
55257 +
55258 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55259 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
55260 + return mode;
55261 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55262 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
55263 + return 0;
55264 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
55265 + return 0;
55266 +
55267 + return (reqmode);
55268 +}
55269 +
55270 +__u32
55271 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
55272 +{
55273 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
55274 +}
55275 +
55276 +__u32
55277 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
55278 +{
55279 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
55280 +}
55281 +
55282 +__u32
55283 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
55284 +{
55285 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
55286 +}
55287 +
55288 +__u32
55289 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
55290 +{
55291 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
55292 +}
55293 +
55294 +__u32
55295 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
55296 + mode_t mode)
55297 +{
55298 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
55299 + return 1;
55300 +
55301 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
55302 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
55303 + GR_FCHMOD_ACL_MSG);
55304 + } else {
55305 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
55306 + }
55307 +}
55308 +
55309 +__u32
55310 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
55311 + mode_t mode)
55312 +{
55313 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
55314 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
55315 + GR_CHMOD_ACL_MSG);
55316 + } else {
55317 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
55318 + }
55319 +}
55320 +
55321 +__u32
55322 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
55323 +{
55324 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
55325 +}
55326 +
55327 +__u32
55328 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
55329 +{
55330 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
55331 +}
55332 +
55333 +__u32
55334 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
55335 +{
55336 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
55337 +}
55338 +
55339 +__u32
55340 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
55341 +{
55342 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
55343 + GR_UNIXCONNECT_ACL_MSG);
55344 +}
55345 +
55346 +/* hardlinks require at minimum create and link permission,
55347 + any additional privilege required is based on the
55348 + privilege of the file being linked to
55349 +*/
55350 +__u32
55351 +gr_acl_handle_link(const struct dentry * new_dentry,
55352 + const struct dentry * parent_dentry,
55353 + const struct vfsmount * parent_mnt,
55354 + const struct dentry * old_dentry,
55355 + const struct vfsmount * old_mnt, const char *to)
55356 +{
55357 + __u32 mode;
55358 + __u32 needmode = GR_CREATE | GR_LINK;
55359 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
55360 +
55361 + mode =
55362 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
55363 + old_mnt);
55364 +
55365 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
55366 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55367 + return mode;
55368 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55369 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55370 + return 0;
55371 + } else if (unlikely((mode & needmode) != needmode))
55372 + return 0;
55373 +
55374 + return 1;
55375 +}
55376 +
55377 +__u32
55378 +gr_acl_handle_symlink(const struct dentry * new_dentry,
55379 + const struct dentry * parent_dentry,
55380 + const struct vfsmount * parent_mnt, const char *from)
55381 +{
55382 + __u32 needmode = GR_WRITE | GR_CREATE;
55383 + __u32 mode;
55384 +
55385 + mode =
55386 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
55387 + GR_CREATE | GR_AUDIT_CREATE |
55388 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
55389 +
55390 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
55391 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55392 + return mode;
55393 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55394 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55395 + return 0;
55396 + } else if (unlikely((mode & needmode) != needmode))
55397 + return 0;
55398 +
55399 + return (GR_WRITE | GR_CREATE);
55400 +}
55401 +
55402 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
55403 +{
55404 + __u32 mode;
55405 +
55406 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55407 +
55408 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55409 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
55410 + return mode;
55411 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55412 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
55413 + return 0;
55414 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
55415 + return 0;
55416 +
55417 + return (reqmode);
55418 +}
55419 +
55420 +__u32
55421 +gr_acl_handle_mknod(const struct dentry * new_dentry,
55422 + const struct dentry * parent_dentry,
55423 + const struct vfsmount * parent_mnt,
55424 + const int mode)
55425 +{
55426 + __u32 reqmode = GR_WRITE | GR_CREATE;
55427 + if (unlikely(mode & (S_ISUID | S_ISGID)))
55428 + reqmode |= GR_SETID;
55429 +
55430 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55431 + reqmode, GR_MKNOD_ACL_MSG);
55432 +}
55433 +
55434 +__u32
55435 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
55436 + const struct dentry *parent_dentry,
55437 + const struct vfsmount *parent_mnt)
55438 +{
55439 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55440 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
55441 +}
55442 +
55443 +#define RENAME_CHECK_SUCCESS(old, new) \
55444 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
55445 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
55446 +
55447 +int
55448 +gr_acl_handle_rename(struct dentry *new_dentry,
55449 + struct dentry *parent_dentry,
55450 + const struct vfsmount *parent_mnt,
55451 + struct dentry *old_dentry,
55452 + struct inode *old_parent_inode,
55453 + struct vfsmount *old_mnt, const char *newname)
55454 +{
55455 + __u32 comp1, comp2;
55456 + int error = 0;
55457 +
55458 + if (unlikely(!gr_acl_is_enabled()))
55459 + return 0;
55460 +
55461 + if (!new_dentry->d_inode) {
55462 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
55463 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
55464 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
55465 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
55466 + GR_DELETE | GR_AUDIT_DELETE |
55467 + GR_AUDIT_READ | GR_AUDIT_WRITE |
55468 + GR_SUPPRESS, old_mnt);
55469 + } else {
55470 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
55471 + GR_CREATE | GR_DELETE |
55472 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
55473 + GR_AUDIT_READ | GR_AUDIT_WRITE |
55474 + GR_SUPPRESS, parent_mnt);
55475 + comp2 =
55476 + gr_search_file(old_dentry,
55477 + GR_READ | GR_WRITE | GR_AUDIT_READ |
55478 + GR_DELETE | GR_AUDIT_DELETE |
55479 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
55480 + }
55481 +
55482 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
55483 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
55484 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55485 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
55486 + && !(comp2 & GR_SUPPRESS)) {
55487 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55488 + error = -EACCES;
55489 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
55490 + error = -EACCES;
55491 +
55492 + return error;
55493 +}
55494 +
55495 +void
55496 +gr_acl_handle_exit(void)
55497 +{
55498 + u16 id;
55499 + char *rolename;
55500 + struct file *exec_file;
55501 +
55502 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
55503 + !(current->role->roletype & GR_ROLE_PERSIST))) {
55504 + id = current->acl_role_id;
55505 + rolename = current->role->rolename;
55506 + gr_set_acls(1);
55507 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
55508 + }
55509 +
55510 + write_lock(&grsec_exec_file_lock);
55511 + exec_file = current->exec_file;
55512 + current->exec_file = NULL;
55513 + write_unlock(&grsec_exec_file_lock);
55514 +
55515 + if (exec_file)
55516 + fput(exec_file);
55517 +}
55518 +
55519 +int
55520 +gr_acl_handle_procpidmem(const struct task_struct *task)
55521 +{
55522 + if (unlikely(!gr_acl_is_enabled()))
55523 + return 0;
55524 +
55525 + if (task != current && task->acl->mode & GR_PROTPROCFD)
55526 + return -EACCES;
55527 +
55528 + return 0;
55529 +}
55530 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
55531 new file mode 100644
55532 index 0000000..17050ca
55533 --- /dev/null
55534 +++ b/grsecurity/gracl_ip.c
55535 @@ -0,0 +1,381 @@
55536 +#include <linux/kernel.h>
55537 +#include <asm/uaccess.h>
55538 +#include <asm/errno.h>
55539 +#include <net/sock.h>
55540 +#include <linux/file.h>
55541 +#include <linux/fs.h>
55542 +#include <linux/net.h>
55543 +#include <linux/in.h>
55544 +#include <linux/skbuff.h>
55545 +#include <linux/ip.h>
55546 +#include <linux/udp.h>
55547 +#include <linux/types.h>
55548 +#include <linux/sched.h>
55549 +#include <linux/netdevice.h>
55550 +#include <linux/inetdevice.h>
55551 +#include <linux/gracl.h>
55552 +#include <linux/grsecurity.h>
55553 +#include <linux/grinternal.h>
55554 +
55555 +#define GR_BIND 0x01
55556 +#define GR_CONNECT 0x02
55557 +#define GR_INVERT 0x04
55558 +#define GR_BINDOVERRIDE 0x08
55559 +#define GR_CONNECTOVERRIDE 0x10
55560 +#define GR_SOCK_FAMILY 0x20
55561 +
55562 +static const char * gr_protocols[IPPROTO_MAX] = {
55563 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
55564 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
55565 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
55566 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
55567 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
55568 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
55569 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
55570 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
55571 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
55572 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
55573 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
55574 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
55575 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
55576 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
55577 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
55578 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
55579 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
55580 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
55581 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
55582 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
55583 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
55584 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
55585 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
55586 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
55587 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
55588 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
55589 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
55590 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
55591 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
55592 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
55593 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
55594 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
55595 + };
55596 +
55597 +static const char * gr_socktypes[SOCK_MAX] = {
55598 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
55599 + "unknown:7", "unknown:8", "unknown:9", "packet"
55600 + };
55601 +
55602 +static const char * gr_sockfamilies[AF_MAX+1] = {
55603 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
55604 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
55605 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
55606 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
55607 + };
55608 +
55609 +const char *
55610 +gr_proto_to_name(unsigned char proto)
55611 +{
55612 + return gr_protocols[proto];
55613 +}
55614 +
55615 +const char *
55616 +gr_socktype_to_name(unsigned char type)
55617 +{
55618 + return gr_socktypes[type];
55619 +}
55620 +
55621 +const char *
55622 +gr_sockfamily_to_name(unsigned char family)
55623 +{
55624 + return gr_sockfamilies[family];
55625 +}
55626 +
55627 +int
55628 +gr_search_socket(const int domain, const int type, const int protocol)
55629 +{
55630 + struct acl_subject_label *curr;
55631 + const struct cred *cred = current_cred();
55632 +
55633 + if (unlikely(!gr_acl_is_enabled()))
55634 + goto exit;
55635 +
55636 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
55637 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
55638 + goto exit; // let the kernel handle it
55639 +
55640 + curr = current->acl;
55641 +
55642 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
55643 + /* the family is allowed, if this is PF_INET allow it only if
55644 + the extra sock type/protocol checks pass */
55645 + if (domain == PF_INET)
55646 + goto inet_check;
55647 + goto exit;
55648 + } else {
55649 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55650 + __u32 fakeip = 0;
55651 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55652 + current->role->roletype, cred->uid,
55653 + cred->gid, current->exec_file ?
55654 + gr_to_filename(current->exec_file->f_path.dentry,
55655 + current->exec_file->f_path.mnt) :
55656 + curr->filename, curr->filename,
55657 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
55658 + &current->signal->saved_ip);
55659 + goto exit;
55660 + }
55661 + goto exit_fail;
55662 + }
55663 +
55664 +inet_check:
55665 + /* the rest of this checking is for IPv4 only */
55666 + if (!curr->ips)
55667 + goto exit;
55668 +
55669 + if ((curr->ip_type & (1 << type)) &&
55670 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
55671 + goto exit;
55672 +
55673 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55674 + /* we don't place acls on raw sockets , and sometimes
55675 + dgram/ip sockets are opened for ioctl and not
55676 + bind/connect, so we'll fake a bind learn log */
55677 + if (type == SOCK_RAW || type == SOCK_PACKET) {
55678 + __u32 fakeip = 0;
55679 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55680 + current->role->roletype, cred->uid,
55681 + cred->gid, current->exec_file ?
55682 + gr_to_filename(current->exec_file->f_path.dentry,
55683 + current->exec_file->f_path.mnt) :
55684 + curr->filename, curr->filename,
55685 + &fakeip, 0, type,
55686 + protocol, GR_CONNECT, &current->signal->saved_ip);
55687 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
55688 + __u32 fakeip = 0;
55689 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55690 + current->role->roletype, cred->uid,
55691 + cred->gid, current->exec_file ?
55692 + gr_to_filename(current->exec_file->f_path.dentry,
55693 + current->exec_file->f_path.mnt) :
55694 + curr->filename, curr->filename,
55695 + &fakeip, 0, type,
55696 + protocol, GR_BIND, &current->signal->saved_ip);
55697 + }
55698 + /* we'll log when they use connect or bind */
55699 + goto exit;
55700 + }
55701 +
55702 +exit_fail:
55703 + if (domain == PF_INET)
55704 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
55705 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
55706 + else
55707 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
55708 + gr_socktype_to_name(type), protocol);
55709 +
55710 + return 0;
55711 +exit:
55712 + return 1;
55713 +}
55714 +
55715 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
55716 +{
55717 + if ((ip->mode & mode) &&
55718 + (ip_port >= ip->low) &&
55719 + (ip_port <= ip->high) &&
55720 + ((ntohl(ip_addr) & our_netmask) ==
55721 + (ntohl(our_addr) & our_netmask))
55722 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
55723 + && (ip->type & (1 << type))) {
55724 + if (ip->mode & GR_INVERT)
55725 + return 2; // specifically denied
55726 + else
55727 + return 1; // allowed
55728 + }
55729 +
55730 + return 0; // not specifically allowed, may continue parsing
55731 +}
55732 +
55733 +static int
55734 +gr_search_connectbind(const int full_mode, struct sock *sk,
55735 + struct sockaddr_in *addr, const int type)
55736 +{
55737 + char iface[IFNAMSIZ] = {0};
55738 + struct acl_subject_label *curr;
55739 + struct acl_ip_label *ip;
55740 + struct inet_sock *isk;
55741 + struct net_device *dev;
55742 + struct in_device *idev;
55743 + unsigned long i;
55744 + int ret;
55745 + int mode = full_mode & (GR_BIND | GR_CONNECT);
55746 + __u32 ip_addr = 0;
55747 + __u32 our_addr;
55748 + __u32 our_netmask;
55749 + char *p;
55750 + __u16 ip_port = 0;
55751 + const struct cred *cred = current_cred();
55752 +
55753 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
55754 + return 0;
55755 +
55756 + curr = current->acl;
55757 + isk = inet_sk(sk);
55758 +
55759 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
55760 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
55761 + addr->sin_addr.s_addr = curr->inaddr_any_override;
55762 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
55763 + struct sockaddr_in saddr;
55764 + int err;
55765 +
55766 + saddr.sin_family = AF_INET;
55767 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
55768 + saddr.sin_port = isk->inet_sport;
55769 +
55770 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55771 + if (err)
55772 + return err;
55773 +
55774 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55775 + if (err)
55776 + return err;
55777 + }
55778 +
55779 + if (!curr->ips)
55780 + return 0;
55781 +
55782 + ip_addr = addr->sin_addr.s_addr;
55783 + ip_port = ntohs(addr->sin_port);
55784 +
55785 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55786 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55787 + current->role->roletype, cred->uid,
55788 + cred->gid, current->exec_file ?
55789 + gr_to_filename(current->exec_file->f_path.dentry,
55790 + current->exec_file->f_path.mnt) :
55791 + curr->filename, curr->filename,
55792 + &ip_addr, ip_port, type,
55793 + sk->sk_protocol, mode, &current->signal->saved_ip);
55794 + return 0;
55795 + }
55796 +
55797 + for (i = 0; i < curr->ip_num; i++) {
55798 + ip = *(curr->ips + i);
55799 + if (ip->iface != NULL) {
55800 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
55801 + p = strchr(iface, ':');
55802 + if (p != NULL)
55803 + *p = '\0';
55804 + dev = dev_get_by_name(sock_net(sk), iface);
55805 + if (dev == NULL)
55806 + continue;
55807 + idev = in_dev_get(dev);
55808 + if (idev == NULL) {
55809 + dev_put(dev);
55810 + continue;
55811 + }
55812 + rcu_read_lock();
55813 + for_ifa(idev) {
55814 + if (!strcmp(ip->iface, ifa->ifa_label)) {
55815 + our_addr = ifa->ifa_address;
55816 + our_netmask = 0xffffffff;
55817 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55818 + if (ret == 1) {
55819 + rcu_read_unlock();
55820 + in_dev_put(idev);
55821 + dev_put(dev);
55822 + return 0;
55823 + } else if (ret == 2) {
55824 + rcu_read_unlock();
55825 + in_dev_put(idev);
55826 + dev_put(dev);
55827 + goto denied;
55828 + }
55829 + }
55830 + } endfor_ifa(idev);
55831 + rcu_read_unlock();
55832 + in_dev_put(idev);
55833 + dev_put(dev);
55834 + } else {
55835 + our_addr = ip->addr;
55836 + our_netmask = ip->netmask;
55837 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55838 + if (ret == 1)
55839 + return 0;
55840 + else if (ret == 2)
55841 + goto denied;
55842 + }
55843 + }
55844 +
55845 +denied:
55846 + if (mode == GR_BIND)
55847 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55848 + else if (mode == GR_CONNECT)
55849 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55850 +
55851 + return -EACCES;
55852 +}
55853 +
55854 +int
55855 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
55856 +{
55857 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
55858 +}
55859 +
55860 +int
55861 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
55862 +{
55863 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
55864 +}
55865 +
55866 +int gr_search_listen(struct socket *sock)
55867 +{
55868 + struct sock *sk = sock->sk;
55869 + struct sockaddr_in addr;
55870 +
55871 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55872 + addr.sin_port = inet_sk(sk)->inet_sport;
55873 +
55874 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55875 +}
55876 +
55877 +int gr_search_accept(struct socket *sock)
55878 +{
55879 + struct sock *sk = sock->sk;
55880 + struct sockaddr_in addr;
55881 +
55882 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55883 + addr.sin_port = inet_sk(sk)->inet_sport;
55884 +
55885 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55886 +}
55887 +
55888 +int
55889 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
55890 +{
55891 + if (addr)
55892 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
55893 + else {
55894 + struct sockaddr_in sin;
55895 + const struct inet_sock *inet = inet_sk(sk);
55896 +
55897 + sin.sin_addr.s_addr = inet->inet_daddr;
55898 + sin.sin_port = inet->inet_dport;
55899 +
55900 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55901 + }
55902 +}
55903 +
55904 +int
55905 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
55906 +{
55907 + struct sockaddr_in sin;
55908 +
55909 + if (unlikely(skb->len < sizeof (struct udphdr)))
55910 + return 0; // skip this packet
55911 +
55912 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
55913 + sin.sin_port = udp_hdr(skb)->source;
55914 +
55915 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55916 +}
55917 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
55918 new file mode 100644
55919 index 0000000..25f54ef
55920 --- /dev/null
55921 +++ b/grsecurity/gracl_learn.c
55922 @@ -0,0 +1,207 @@
55923 +#include <linux/kernel.h>
55924 +#include <linux/mm.h>
55925 +#include <linux/sched.h>
55926 +#include <linux/poll.h>
55927 +#include <linux/string.h>
55928 +#include <linux/file.h>
55929 +#include <linux/types.h>
55930 +#include <linux/vmalloc.h>
55931 +#include <linux/grinternal.h>
55932 +
55933 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
55934 + size_t count, loff_t *ppos);
55935 +extern int gr_acl_is_enabled(void);
55936 +
55937 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
55938 +static int gr_learn_attached;
55939 +
55940 +/* use a 512k buffer */
55941 +#define LEARN_BUFFER_SIZE (512 * 1024)
55942 +
55943 +static DEFINE_SPINLOCK(gr_learn_lock);
55944 +static DEFINE_MUTEX(gr_learn_user_mutex);
55945 +
55946 +/* we need to maintain two buffers, so that the kernel context of grlearn
55947 + uses a semaphore around the userspace copying, and the other kernel contexts
55948 + use a spinlock when copying into the buffer, since they cannot sleep
55949 +*/
55950 +static char *learn_buffer;
55951 +static char *learn_buffer_user;
55952 +static int learn_buffer_len;
55953 +static int learn_buffer_user_len;
55954 +
55955 +static ssize_t
55956 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
55957 +{
55958 + DECLARE_WAITQUEUE(wait, current);
55959 + ssize_t retval = 0;
55960 +
55961 + add_wait_queue(&learn_wait, &wait);
55962 + set_current_state(TASK_INTERRUPTIBLE);
55963 + do {
55964 + mutex_lock(&gr_learn_user_mutex);
55965 + spin_lock(&gr_learn_lock);
55966 + if (learn_buffer_len)
55967 + break;
55968 + spin_unlock(&gr_learn_lock);
55969 + mutex_unlock(&gr_learn_user_mutex);
55970 + if (file->f_flags & O_NONBLOCK) {
55971 + retval = -EAGAIN;
55972 + goto out;
55973 + }
55974 + if (signal_pending(current)) {
55975 + retval = -ERESTARTSYS;
55976 + goto out;
55977 + }
55978 +
55979 + schedule();
55980 + } while (1);
55981 +
55982 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
55983 + learn_buffer_user_len = learn_buffer_len;
55984 + retval = learn_buffer_len;
55985 + learn_buffer_len = 0;
55986 +
55987 + spin_unlock(&gr_learn_lock);
55988 +
55989 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
55990 + retval = -EFAULT;
55991 +
55992 + mutex_unlock(&gr_learn_user_mutex);
55993 +out:
55994 + set_current_state(TASK_RUNNING);
55995 + remove_wait_queue(&learn_wait, &wait);
55996 + return retval;
55997 +}
55998 +
55999 +static unsigned int
56000 +poll_learn(struct file * file, poll_table * wait)
56001 +{
56002 + poll_wait(file, &learn_wait, wait);
56003 +
56004 + if (learn_buffer_len)
56005 + return (POLLIN | POLLRDNORM);
56006 +
56007 + return 0;
56008 +}
56009 +
56010 +void
56011 +gr_clear_learn_entries(void)
56012 +{
56013 + char *tmp;
56014 +
56015 + mutex_lock(&gr_learn_user_mutex);
56016 + spin_lock(&gr_learn_lock);
56017 + tmp = learn_buffer;
56018 + learn_buffer = NULL;
56019 + spin_unlock(&gr_learn_lock);
56020 + if (tmp)
56021 + vfree(tmp);
56022 + if (learn_buffer_user != NULL) {
56023 + vfree(learn_buffer_user);
56024 + learn_buffer_user = NULL;
56025 + }
56026 + learn_buffer_len = 0;
56027 + mutex_unlock(&gr_learn_user_mutex);
56028 +
56029 + return;
56030 +}
56031 +
56032 +void
56033 +gr_add_learn_entry(const char *fmt, ...)
56034 +{
56035 + va_list args;
56036 + unsigned int len;
56037 +
56038 + if (!gr_learn_attached)
56039 + return;
56040 +
56041 + spin_lock(&gr_learn_lock);
56042 +
56043 + /* leave a gap at the end so we know when it's "full" but don't have to
56044 + compute the exact length of the string we're trying to append
56045 + */
56046 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
56047 + spin_unlock(&gr_learn_lock);
56048 + wake_up_interruptible(&learn_wait);
56049 + return;
56050 + }
56051 + if (learn_buffer == NULL) {
56052 + spin_unlock(&gr_learn_lock);
56053 + return;
56054 + }
56055 +
56056 + va_start(args, fmt);
56057 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
56058 + va_end(args);
56059 +
56060 + learn_buffer_len += len + 1;
56061 +
56062 + spin_unlock(&gr_learn_lock);
56063 + wake_up_interruptible(&learn_wait);
56064 +
56065 + return;
56066 +}
56067 +
56068 +static int
56069 +open_learn(struct inode *inode, struct file *file)
56070 +{
56071 + if (file->f_mode & FMODE_READ && gr_learn_attached)
56072 + return -EBUSY;
56073 + if (file->f_mode & FMODE_READ) {
56074 + int retval = 0;
56075 + mutex_lock(&gr_learn_user_mutex);
56076 + if (learn_buffer == NULL)
56077 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
56078 + if (learn_buffer_user == NULL)
56079 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
56080 + if (learn_buffer == NULL) {
56081 + retval = -ENOMEM;
56082 + goto out_error;
56083 + }
56084 + if (learn_buffer_user == NULL) {
56085 + retval = -ENOMEM;
56086 + goto out_error;
56087 + }
56088 + learn_buffer_len = 0;
56089 + learn_buffer_user_len = 0;
56090 + gr_learn_attached = 1;
56091 +out_error:
56092 + mutex_unlock(&gr_learn_user_mutex);
56093 + return retval;
56094 + }
56095 + return 0;
56096 +}
56097 +
56098 +static int
56099 +close_learn(struct inode *inode, struct file *file)
56100 +{
56101 + if (file->f_mode & FMODE_READ) {
56102 + char *tmp = NULL;
56103 + mutex_lock(&gr_learn_user_mutex);
56104 + spin_lock(&gr_learn_lock);
56105 + tmp = learn_buffer;
56106 + learn_buffer = NULL;
56107 + spin_unlock(&gr_learn_lock);
56108 + if (tmp)
56109 + vfree(tmp);
56110 + if (learn_buffer_user != NULL) {
56111 + vfree(learn_buffer_user);
56112 + learn_buffer_user = NULL;
56113 + }
56114 + learn_buffer_len = 0;
56115 + learn_buffer_user_len = 0;
56116 + gr_learn_attached = 0;
56117 + mutex_unlock(&gr_learn_user_mutex);
56118 + }
56119 +
56120 + return 0;
56121 +}
56122 +
56123 +const struct file_operations grsec_fops = {
56124 + .read = read_learn,
56125 + .write = write_grsec_handler,
56126 + .open = open_learn,
56127 + .release = close_learn,
56128 + .poll = poll_learn,
56129 +};
56130 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
56131 new file mode 100644
56132 index 0000000..39645c9
56133 --- /dev/null
56134 +++ b/grsecurity/gracl_res.c
56135 @@ -0,0 +1,68 @@
56136 +#include <linux/kernel.h>
56137 +#include <linux/sched.h>
56138 +#include <linux/gracl.h>
56139 +#include <linux/grinternal.h>
56140 +
56141 +static const char *restab_log[] = {
56142 + [RLIMIT_CPU] = "RLIMIT_CPU",
56143 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
56144 + [RLIMIT_DATA] = "RLIMIT_DATA",
56145 + [RLIMIT_STACK] = "RLIMIT_STACK",
56146 + [RLIMIT_CORE] = "RLIMIT_CORE",
56147 + [RLIMIT_RSS] = "RLIMIT_RSS",
56148 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
56149 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
56150 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
56151 + [RLIMIT_AS] = "RLIMIT_AS",
56152 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
56153 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
56154 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
56155 + [RLIMIT_NICE] = "RLIMIT_NICE",
56156 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
56157 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
56158 + [GR_CRASH_RES] = "RLIMIT_CRASH"
56159 +};
56160 +
56161 +void
56162 +gr_log_resource(const struct task_struct *task,
56163 + const int res, const unsigned long wanted, const int gt)
56164 +{
56165 + const struct cred *cred;
56166 + unsigned long rlim;
56167 +
56168 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
56169 + return;
56170 +
56171 + // not yet supported resource
56172 + if (unlikely(!restab_log[res]))
56173 + return;
56174 +
56175 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
56176 + rlim = task_rlimit_max(task, res);
56177 + else
56178 + rlim = task_rlimit(task, res);
56179 +
56180 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
56181 + return;
56182 +
56183 + rcu_read_lock();
56184 + cred = __task_cred(task);
56185 +
56186 + if (res == RLIMIT_NPROC &&
56187 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
56188 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
56189 + goto out_rcu_unlock;
56190 + else if (res == RLIMIT_MEMLOCK &&
56191 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
56192 + goto out_rcu_unlock;
56193 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
56194 + goto out_rcu_unlock;
56195 + rcu_read_unlock();
56196 +
56197 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
56198 +
56199 + return;
56200 +out_rcu_unlock:
56201 + rcu_read_unlock();
56202 + return;
56203 +}
56204 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
56205 new file mode 100644
56206 index 0000000..5556be3
56207 --- /dev/null
56208 +++ b/grsecurity/gracl_segv.c
56209 @@ -0,0 +1,299 @@
56210 +#include <linux/kernel.h>
56211 +#include <linux/mm.h>
56212 +#include <asm/uaccess.h>
56213 +#include <asm/errno.h>
56214 +#include <asm/mman.h>
56215 +#include <net/sock.h>
56216 +#include <linux/file.h>
56217 +#include <linux/fs.h>
56218 +#include <linux/net.h>
56219 +#include <linux/in.h>
56220 +#include <linux/slab.h>
56221 +#include <linux/types.h>
56222 +#include <linux/sched.h>
56223 +#include <linux/timer.h>
56224 +#include <linux/gracl.h>
56225 +#include <linux/grsecurity.h>
56226 +#include <linux/grinternal.h>
56227 +
56228 +static struct crash_uid *uid_set;
56229 +static unsigned short uid_used;
56230 +static DEFINE_SPINLOCK(gr_uid_lock);
56231 +extern rwlock_t gr_inode_lock;
56232 +extern struct acl_subject_label *
56233 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
56234 + struct acl_role_label *role);
56235 +
56236 +#ifdef CONFIG_BTRFS_FS
56237 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56238 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56239 +#endif
56240 +
56241 +static inline dev_t __get_dev(const struct dentry *dentry)
56242 +{
56243 +#ifdef CONFIG_BTRFS_FS
56244 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56245 + return get_btrfs_dev_from_inode(dentry->d_inode);
56246 + else
56247 +#endif
56248 + return dentry->d_inode->i_sb->s_dev;
56249 +}
56250 +
56251 +int
56252 +gr_init_uidset(void)
56253 +{
56254 + uid_set =
56255 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
56256 + uid_used = 0;
56257 +
56258 + return uid_set ? 1 : 0;
56259 +}
56260 +
56261 +void
56262 +gr_free_uidset(void)
56263 +{
56264 + if (uid_set)
56265 + kfree(uid_set);
56266 +
56267 + return;
56268 +}
56269 +
56270 +int
56271 +gr_find_uid(const uid_t uid)
56272 +{
56273 + struct crash_uid *tmp = uid_set;
56274 + uid_t buid;
56275 + int low = 0, high = uid_used - 1, mid;
56276 +
56277 + while (high >= low) {
56278 + mid = (low + high) >> 1;
56279 + buid = tmp[mid].uid;
56280 + if (buid == uid)
56281 + return mid;
56282 + if (buid > uid)
56283 + high = mid - 1;
56284 + if (buid < uid)
56285 + low = mid + 1;
56286 + }
56287 +
56288 + return -1;
56289 +}
56290 +
56291 +static __inline__ void
56292 +gr_insertsort(void)
56293 +{
56294 + unsigned short i, j;
56295 + struct crash_uid index;
56296 +
56297 + for (i = 1; i < uid_used; i++) {
56298 + index = uid_set[i];
56299 + j = i;
56300 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
56301 + uid_set[j] = uid_set[j - 1];
56302 + j--;
56303 + }
56304 + uid_set[j] = index;
56305 + }
56306 +
56307 + return;
56308 +}
56309 +
56310 +static __inline__ void
56311 +gr_insert_uid(const uid_t uid, const unsigned long expires)
56312 +{
56313 + int loc;
56314 +
56315 + if (uid_used == GR_UIDTABLE_MAX)
56316 + return;
56317 +
56318 + loc = gr_find_uid(uid);
56319 +
56320 + if (loc >= 0) {
56321 + uid_set[loc].expires = expires;
56322 + return;
56323 + }
56324 +
56325 + uid_set[uid_used].uid = uid;
56326 + uid_set[uid_used].expires = expires;
56327 + uid_used++;
56328 +
56329 + gr_insertsort();
56330 +
56331 + return;
56332 +}
56333 +
56334 +void
56335 +gr_remove_uid(const unsigned short loc)
56336 +{
56337 + unsigned short i;
56338 +
56339 + for (i = loc + 1; i < uid_used; i++)
56340 + uid_set[i - 1] = uid_set[i];
56341 +
56342 + uid_used--;
56343 +
56344 + return;
56345 +}
56346 +
56347 +int
56348 +gr_check_crash_uid(const uid_t uid)
56349 +{
56350 + int loc;
56351 + int ret = 0;
56352 +
56353 + if (unlikely(!gr_acl_is_enabled()))
56354 + return 0;
56355 +
56356 + spin_lock(&gr_uid_lock);
56357 + loc = gr_find_uid(uid);
56358 +
56359 + if (loc < 0)
56360 + goto out_unlock;
56361 +
56362 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
56363 + gr_remove_uid(loc);
56364 + else
56365 + ret = 1;
56366 +
56367 +out_unlock:
56368 + spin_unlock(&gr_uid_lock);
56369 + return ret;
56370 +}
56371 +
56372 +static __inline__ int
56373 +proc_is_setxid(const struct cred *cred)
56374 +{
56375 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
56376 + cred->uid != cred->fsuid)
56377 + return 1;
56378 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
56379 + cred->gid != cred->fsgid)
56380 + return 1;
56381 +
56382 + return 0;
56383 +}
56384 +
56385 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
56386 +
56387 +void
56388 +gr_handle_crash(struct task_struct *task, const int sig)
56389 +{
56390 + struct acl_subject_label *curr;
56391 + struct task_struct *tsk, *tsk2;
56392 + const struct cred *cred;
56393 + const struct cred *cred2;
56394 +
56395 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
56396 + return;
56397 +
56398 + if (unlikely(!gr_acl_is_enabled()))
56399 + return;
56400 +
56401 + curr = task->acl;
56402 +
56403 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
56404 + return;
56405 +
56406 + if (time_before_eq(curr->expires, get_seconds())) {
56407 + curr->expires = 0;
56408 + curr->crashes = 0;
56409 + }
56410 +
56411 + curr->crashes++;
56412 +
56413 + if (!curr->expires)
56414 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
56415 +
56416 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56417 + time_after(curr->expires, get_seconds())) {
56418 + rcu_read_lock();
56419 + cred = __task_cred(task);
56420 + if (cred->uid && proc_is_setxid(cred)) {
56421 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56422 + spin_lock(&gr_uid_lock);
56423 + gr_insert_uid(cred->uid, curr->expires);
56424 + spin_unlock(&gr_uid_lock);
56425 + curr->expires = 0;
56426 + curr->crashes = 0;
56427 + read_lock(&tasklist_lock);
56428 + do_each_thread(tsk2, tsk) {
56429 + cred2 = __task_cred(tsk);
56430 + if (tsk != task && cred2->uid == cred->uid)
56431 + gr_fake_force_sig(SIGKILL, tsk);
56432 + } while_each_thread(tsk2, tsk);
56433 + read_unlock(&tasklist_lock);
56434 + } else {
56435 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56436 + read_lock(&tasklist_lock);
56437 + read_lock(&grsec_exec_file_lock);
56438 + do_each_thread(tsk2, tsk) {
56439 + if (likely(tsk != task)) {
56440 + // if this thread has the same subject as the one that triggered
56441 + // RES_CRASH and it's the same binary, kill it
56442 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
56443 + gr_fake_force_sig(SIGKILL, tsk);
56444 + }
56445 + } while_each_thread(tsk2, tsk);
56446 + read_unlock(&grsec_exec_file_lock);
56447 + read_unlock(&tasklist_lock);
56448 + }
56449 + rcu_read_unlock();
56450 + }
56451 +
56452 + return;
56453 +}
56454 +
56455 +int
56456 +gr_check_crash_exec(const struct file *filp)
56457 +{
56458 + struct acl_subject_label *curr;
56459 +
56460 + if (unlikely(!gr_acl_is_enabled()))
56461 + return 0;
56462 +
56463 + read_lock(&gr_inode_lock);
56464 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
56465 + __get_dev(filp->f_path.dentry),
56466 + current->role);
56467 + read_unlock(&gr_inode_lock);
56468 +
56469 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
56470 + (!curr->crashes && !curr->expires))
56471 + return 0;
56472 +
56473 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56474 + time_after(curr->expires, get_seconds()))
56475 + return 1;
56476 + else if (time_before_eq(curr->expires, get_seconds())) {
56477 + curr->crashes = 0;
56478 + curr->expires = 0;
56479 + }
56480 +
56481 + return 0;
56482 +}
56483 +
56484 +void
56485 +gr_handle_alertkill(struct task_struct *task)
56486 +{
56487 + struct acl_subject_label *curracl;
56488 + __u32 curr_ip;
56489 + struct task_struct *p, *p2;
56490 +
56491 + if (unlikely(!gr_acl_is_enabled()))
56492 + return;
56493 +
56494 + curracl = task->acl;
56495 + curr_ip = task->signal->curr_ip;
56496 +
56497 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
56498 + read_lock(&tasklist_lock);
56499 + do_each_thread(p2, p) {
56500 + if (p->signal->curr_ip == curr_ip)
56501 + gr_fake_force_sig(SIGKILL, p);
56502 + } while_each_thread(p2, p);
56503 + read_unlock(&tasklist_lock);
56504 + } else if (curracl->mode & GR_KILLPROC)
56505 + gr_fake_force_sig(SIGKILL, task);
56506 +
56507 + return;
56508 +}
56509 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
56510 new file mode 100644
56511 index 0000000..9d83a69
56512 --- /dev/null
56513 +++ b/grsecurity/gracl_shm.c
56514 @@ -0,0 +1,40 @@
56515 +#include <linux/kernel.h>
56516 +#include <linux/mm.h>
56517 +#include <linux/sched.h>
56518 +#include <linux/file.h>
56519 +#include <linux/ipc.h>
56520 +#include <linux/gracl.h>
56521 +#include <linux/grsecurity.h>
56522 +#include <linux/grinternal.h>
56523 +
56524 +int
56525 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56526 + const time_t shm_createtime, const uid_t cuid, const int shmid)
56527 +{
56528 + struct task_struct *task;
56529 +
56530 + if (!gr_acl_is_enabled())
56531 + return 1;
56532 +
56533 + rcu_read_lock();
56534 + read_lock(&tasklist_lock);
56535 +
56536 + task = find_task_by_vpid(shm_cprid);
56537 +
56538 + if (unlikely(!task))
56539 + task = find_task_by_vpid(shm_lapid);
56540 +
56541 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
56542 + (task->pid == shm_lapid)) &&
56543 + (task->acl->mode & GR_PROTSHM) &&
56544 + (task->acl != current->acl))) {
56545 + read_unlock(&tasklist_lock);
56546 + rcu_read_unlock();
56547 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
56548 + return 0;
56549 + }
56550 + read_unlock(&tasklist_lock);
56551 + rcu_read_unlock();
56552 +
56553 + return 1;
56554 +}
56555 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
56556 new file mode 100644
56557 index 0000000..bc0be01
56558 --- /dev/null
56559 +++ b/grsecurity/grsec_chdir.c
56560 @@ -0,0 +1,19 @@
56561 +#include <linux/kernel.h>
56562 +#include <linux/sched.h>
56563 +#include <linux/fs.h>
56564 +#include <linux/file.h>
56565 +#include <linux/grsecurity.h>
56566 +#include <linux/grinternal.h>
56567 +
56568 +void
56569 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
56570 +{
56571 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56572 + if ((grsec_enable_chdir && grsec_enable_group &&
56573 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
56574 + !grsec_enable_group)) {
56575 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
56576 + }
56577 +#endif
56578 + return;
56579 +}
56580 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
56581 new file mode 100644
56582 index 0000000..a2dc675
56583 --- /dev/null
56584 +++ b/grsecurity/grsec_chroot.c
56585 @@ -0,0 +1,351 @@
56586 +#include <linux/kernel.h>
56587 +#include <linux/module.h>
56588 +#include <linux/sched.h>
56589 +#include <linux/file.h>
56590 +#include <linux/fs.h>
56591 +#include <linux/mount.h>
56592 +#include <linux/types.h>
56593 +#include <linux/pid_namespace.h>
56594 +#include <linux/grsecurity.h>
56595 +#include <linux/grinternal.h>
56596 +
56597 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
56598 +{
56599 +#ifdef CONFIG_GRKERNSEC
56600 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
56601 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
56602 + task->gr_is_chrooted = 1;
56603 + else
56604 + task->gr_is_chrooted = 0;
56605 +
56606 + task->gr_chroot_dentry = path->dentry;
56607 +#endif
56608 + return;
56609 +}
56610 +
56611 +void gr_clear_chroot_entries(struct task_struct *task)
56612 +{
56613 +#ifdef CONFIG_GRKERNSEC
56614 + task->gr_is_chrooted = 0;
56615 + task->gr_chroot_dentry = NULL;
56616 +#endif
56617 + return;
56618 +}
56619 +
56620 +int
56621 +gr_handle_chroot_unix(const pid_t pid)
56622 +{
56623 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56624 + struct task_struct *p;
56625 +
56626 + if (unlikely(!grsec_enable_chroot_unix))
56627 + return 1;
56628 +
56629 + if (likely(!proc_is_chrooted(current)))
56630 + return 1;
56631 +
56632 + rcu_read_lock();
56633 + read_lock(&tasklist_lock);
56634 + p = find_task_by_vpid_unrestricted(pid);
56635 + if (unlikely(p && !have_same_root(current, p))) {
56636 + read_unlock(&tasklist_lock);
56637 + rcu_read_unlock();
56638 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
56639 + return 0;
56640 + }
56641 + read_unlock(&tasklist_lock);
56642 + rcu_read_unlock();
56643 +#endif
56644 + return 1;
56645 +}
56646 +
56647 +int
56648 +gr_handle_chroot_nice(void)
56649 +{
56650 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56651 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
56652 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
56653 + return -EPERM;
56654 + }
56655 +#endif
56656 + return 0;
56657 +}
56658 +
56659 +int
56660 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
56661 +{
56662 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56663 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
56664 + && proc_is_chrooted(current)) {
56665 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
56666 + return -EACCES;
56667 + }
56668 +#endif
56669 + return 0;
56670 +}
56671 +
56672 +int
56673 +gr_handle_chroot_rawio(const struct inode *inode)
56674 +{
56675 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56676 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56677 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
56678 + return 1;
56679 +#endif
56680 + return 0;
56681 +}
56682 +
56683 +int
56684 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
56685 +{
56686 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56687 + struct task_struct *p;
56688 + int ret = 0;
56689 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
56690 + return ret;
56691 +
56692 + read_lock(&tasklist_lock);
56693 + do_each_pid_task(pid, type, p) {
56694 + if (!have_same_root(current, p)) {
56695 + ret = 1;
56696 + goto out;
56697 + }
56698 + } while_each_pid_task(pid, type, p);
56699 +out:
56700 + read_unlock(&tasklist_lock);
56701 + return ret;
56702 +#endif
56703 + return 0;
56704 +}
56705 +
56706 +int
56707 +gr_pid_is_chrooted(struct task_struct *p)
56708 +{
56709 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56710 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
56711 + return 0;
56712 +
56713 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
56714 + !have_same_root(current, p)) {
56715 + return 1;
56716 + }
56717 +#endif
56718 + return 0;
56719 +}
56720 +
56721 +EXPORT_SYMBOL(gr_pid_is_chrooted);
56722 +
56723 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
56724 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
56725 +{
56726 + struct path path, currentroot;
56727 + int ret = 0;
56728 +
56729 + path.dentry = (struct dentry *)u_dentry;
56730 + path.mnt = (struct vfsmount *)u_mnt;
56731 + get_fs_root(current->fs, &currentroot);
56732 + if (path_is_under(&path, &currentroot))
56733 + ret = 1;
56734 + path_put(&currentroot);
56735 +
56736 + return ret;
56737 +}
56738 +#endif
56739 +
56740 +int
56741 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
56742 +{
56743 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56744 + if (!grsec_enable_chroot_fchdir)
56745 + return 1;
56746 +
56747 + if (!proc_is_chrooted(current))
56748 + return 1;
56749 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
56750 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
56751 + return 0;
56752 + }
56753 +#endif
56754 + return 1;
56755 +}
56756 +
56757 +int
56758 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56759 + const time_t shm_createtime)
56760 +{
56761 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56762 + struct task_struct *p;
56763 + time_t starttime;
56764 +
56765 + if (unlikely(!grsec_enable_chroot_shmat))
56766 + return 1;
56767 +
56768 + if (likely(!proc_is_chrooted(current)))
56769 + return 1;
56770 +
56771 + rcu_read_lock();
56772 + read_lock(&tasklist_lock);
56773 +
56774 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
56775 + starttime = p->start_time.tv_sec;
56776 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
56777 + if (have_same_root(current, p)) {
56778 + goto allow;
56779 + } else {
56780 + read_unlock(&tasklist_lock);
56781 + rcu_read_unlock();
56782 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56783 + return 0;
56784 + }
56785 + }
56786 + /* creator exited, pid reuse, fall through to next check */
56787 + }
56788 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
56789 + if (unlikely(!have_same_root(current, p))) {
56790 + read_unlock(&tasklist_lock);
56791 + rcu_read_unlock();
56792 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56793 + return 0;
56794 + }
56795 + }
56796 +
56797 +allow:
56798 + read_unlock(&tasklist_lock);
56799 + rcu_read_unlock();
56800 +#endif
56801 + return 1;
56802 +}
56803 +
56804 +void
56805 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
56806 +{
56807 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56808 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
56809 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
56810 +#endif
56811 + return;
56812 +}
56813 +
56814 +int
56815 +gr_handle_chroot_mknod(const struct dentry *dentry,
56816 + const struct vfsmount *mnt, const int mode)
56817 +{
56818 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56819 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
56820 + proc_is_chrooted(current)) {
56821 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
56822 + return -EPERM;
56823 + }
56824 +#endif
56825 + return 0;
56826 +}
56827 +
56828 +int
56829 +gr_handle_chroot_mount(const struct dentry *dentry,
56830 + const struct vfsmount *mnt, const char *dev_name)
56831 +{
56832 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56833 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
56834 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
56835 + return -EPERM;
56836 + }
56837 +#endif
56838 + return 0;
56839 +}
56840 +
56841 +int
56842 +gr_handle_chroot_pivot(void)
56843 +{
56844 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56845 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
56846 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
56847 + return -EPERM;
56848 + }
56849 +#endif
56850 + return 0;
56851 +}
56852 +
56853 +int
56854 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
56855 +{
56856 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56857 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
56858 + !gr_is_outside_chroot(dentry, mnt)) {
56859 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
56860 + return -EPERM;
56861 + }
56862 +#endif
56863 + return 0;
56864 +}
56865 +
56866 +extern const char *captab_log[];
56867 +extern int captab_log_entries;
56868 +
56869 +int
56870 +gr_chroot_is_capable(const int cap)
56871 +{
56872 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56873 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
56874 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56875 + if (cap_raised(chroot_caps, cap)) {
56876 + const struct cred *creds = current_cred();
56877 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
56878 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
56879 + }
56880 + return 0;
56881 + }
56882 + }
56883 +#endif
56884 + return 1;
56885 +}
56886 +
56887 +int
56888 +gr_chroot_is_capable_nolog(const int cap)
56889 +{
56890 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56891 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
56892 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56893 + if (cap_raised(chroot_caps, cap)) {
56894 + return 0;
56895 + }
56896 + }
56897 +#endif
56898 + return 1;
56899 +}
56900 +
56901 +int
56902 +gr_handle_chroot_sysctl(const int op)
56903 +{
56904 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56905 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
56906 + proc_is_chrooted(current))
56907 + return -EACCES;
56908 +#endif
56909 + return 0;
56910 +}
56911 +
56912 +void
56913 +gr_handle_chroot_chdir(struct path *path)
56914 +{
56915 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56916 + if (grsec_enable_chroot_chdir)
56917 + set_fs_pwd(current->fs, path);
56918 +#endif
56919 + return;
56920 +}
56921 +
56922 +int
56923 +gr_handle_chroot_chmod(const struct dentry *dentry,
56924 + const struct vfsmount *mnt, const int mode)
56925 +{
56926 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56927 + /* allow chmod +s on directories, but not files */
56928 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
56929 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
56930 + proc_is_chrooted(current)) {
56931 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
56932 + return -EPERM;
56933 + }
56934 +#endif
56935 + return 0;
56936 +}
56937 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
56938 new file mode 100644
56939 index 0000000..d81a586
56940 --- /dev/null
56941 +++ b/grsecurity/grsec_disabled.c
56942 @@ -0,0 +1,439 @@
56943 +#include <linux/kernel.h>
56944 +#include <linux/module.h>
56945 +#include <linux/sched.h>
56946 +#include <linux/file.h>
56947 +#include <linux/fs.h>
56948 +#include <linux/kdev_t.h>
56949 +#include <linux/net.h>
56950 +#include <linux/in.h>
56951 +#include <linux/ip.h>
56952 +#include <linux/skbuff.h>
56953 +#include <linux/sysctl.h>
56954 +
56955 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56956 +void
56957 +pax_set_initial_flags(struct linux_binprm *bprm)
56958 +{
56959 + return;
56960 +}
56961 +#endif
56962 +
56963 +#ifdef CONFIG_SYSCTL
56964 +__u32
56965 +gr_handle_sysctl(const struct ctl_table * table, const int op)
56966 +{
56967 + return 0;
56968 +}
56969 +#endif
56970 +
56971 +#ifdef CONFIG_TASKSTATS
56972 +int gr_is_taskstats_denied(int pid)
56973 +{
56974 + return 0;
56975 +}
56976 +#endif
56977 +
56978 +int
56979 +gr_acl_is_enabled(void)
56980 +{
56981 + return 0;
56982 +}
56983 +
56984 +void
56985 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
56986 +{
56987 + return;
56988 +}
56989 +
56990 +int
56991 +gr_handle_rawio(const struct inode *inode)
56992 +{
56993 + return 0;
56994 +}
56995 +
56996 +void
56997 +gr_acl_handle_psacct(struct task_struct *task, const long code)
56998 +{
56999 + return;
57000 +}
57001 +
57002 +int
57003 +gr_handle_ptrace(struct task_struct *task, const long request)
57004 +{
57005 + return 0;
57006 +}
57007 +
57008 +int
57009 +gr_handle_proc_ptrace(struct task_struct *task)
57010 +{
57011 + return 0;
57012 +}
57013 +
57014 +void
57015 +gr_learn_resource(const struct task_struct *task,
57016 + const int res, const unsigned long wanted, const int gt)
57017 +{
57018 + return;
57019 +}
57020 +
57021 +int
57022 +gr_set_acls(const int type)
57023 +{
57024 + return 0;
57025 +}
57026 +
57027 +int
57028 +gr_check_hidden_task(const struct task_struct *tsk)
57029 +{
57030 + return 0;
57031 +}
57032 +
57033 +int
57034 +gr_check_protected_task(const struct task_struct *task)
57035 +{
57036 + return 0;
57037 +}
57038 +
57039 +int
57040 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
57041 +{
57042 + return 0;
57043 +}
57044 +
57045 +void
57046 +gr_copy_label(struct task_struct *tsk)
57047 +{
57048 + return;
57049 +}
57050 +
57051 +void
57052 +gr_set_pax_flags(struct task_struct *task)
57053 +{
57054 + return;
57055 +}
57056 +
57057 +int
57058 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
57059 + const int unsafe_share)
57060 +{
57061 + return 0;
57062 +}
57063 +
57064 +void
57065 +gr_handle_delete(const ino_t ino, const dev_t dev)
57066 +{
57067 + return;
57068 +}
57069 +
57070 +void
57071 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
57072 +{
57073 + return;
57074 +}
57075 +
57076 +void
57077 +gr_handle_crash(struct task_struct *task, const int sig)
57078 +{
57079 + return;
57080 +}
57081 +
57082 +int
57083 +gr_check_crash_exec(const struct file *filp)
57084 +{
57085 + return 0;
57086 +}
57087 +
57088 +int
57089 +gr_check_crash_uid(const uid_t uid)
57090 +{
57091 + return 0;
57092 +}
57093 +
57094 +void
57095 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
57096 + struct dentry *old_dentry,
57097 + struct dentry *new_dentry,
57098 + struct vfsmount *mnt, const __u8 replace)
57099 +{
57100 + return;
57101 +}
57102 +
57103 +int
57104 +gr_search_socket(const int family, const int type, const int protocol)
57105 +{
57106 + return 1;
57107 +}
57108 +
57109 +int
57110 +gr_search_connectbind(const int mode, const struct socket *sock,
57111 + const struct sockaddr_in *addr)
57112 +{
57113 + return 0;
57114 +}
57115 +
57116 +void
57117 +gr_handle_alertkill(struct task_struct *task)
57118 +{
57119 + return;
57120 +}
57121 +
57122 +__u32
57123 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
57124 +{
57125 + return 1;
57126 +}
57127 +
57128 +__u32
57129 +gr_acl_handle_hidden_file(const struct dentry * dentry,
57130 + const struct vfsmount * mnt)
57131 +{
57132 + return 1;
57133 +}
57134 +
57135 +__u32
57136 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
57137 + int acc_mode)
57138 +{
57139 + return 1;
57140 +}
57141 +
57142 +__u32
57143 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
57144 +{
57145 + return 1;
57146 +}
57147 +
57148 +__u32
57149 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
57150 +{
57151 + return 1;
57152 +}
57153 +
57154 +int
57155 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
57156 + unsigned int *vm_flags)
57157 +{
57158 + return 1;
57159 +}
57160 +
57161 +__u32
57162 +gr_acl_handle_truncate(const struct dentry * dentry,
57163 + const struct vfsmount * mnt)
57164 +{
57165 + return 1;
57166 +}
57167 +
57168 +__u32
57169 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
57170 +{
57171 + return 1;
57172 +}
57173 +
57174 +__u32
57175 +gr_acl_handle_access(const struct dentry * dentry,
57176 + const struct vfsmount * mnt, const int fmode)
57177 +{
57178 + return 1;
57179 +}
57180 +
57181 +__u32
57182 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
57183 + mode_t mode)
57184 +{
57185 + return 1;
57186 +}
57187 +
57188 +__u32
57189 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
57190 + mode_t mode)
57191 +{
57192 + return 1;
57193 +}
57194 +
57195 +__u32
57196 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
57197 +{
57198 + return 1;
57199 +}
57200 +
57201 +__u32
57202 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
57203 +{
57204 + return 1;
57205 +}
57206 +
57207 +void
57208 +grsecurity_init(void)
57209 +{
57210 + return;
57211 +}
57212 +
57213 +__u32
57214 +gr_acl_handle_mknod(const struct dentry * new_dentry,
57215 + const struct dentry * parent_dentry,
57216 + const struct vfsmount * parent_mnt,
57217 + const int mode)
57218 +{
57219 + return 1;
57220 +}
57221 +
57222 +__u32
57223 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
57224 + const struct dentry * parent_dentry,
57225 + const struct vfsmount * parent_mnt)
57226 +{
57227 + return 1;
57228 +}
57229 +
57230 +__u32
57231 +gr_acl_handle_symlink(const struct dentry * new_dentry,
57232 + const struct dentry * parent_dentry,
57233 + const struct vfsmount * parent_mnt, const char *from)
57234 +{
57235 + return 1;
57236 +}
57237 +
57238 +__u32
57239 +gr_acl_handle_link(const struct dentry * new_dentry,
57240 + const struct dentry * parent_dentry,
57241 + const struct vfsmount * parent_mnt,
57242 + const struct dentry * old_dentry,
57243 + const struct vfsmount * old_mnt, const char *to)
57244 +{
57245 + return 1;
57246 +}
57247 +
57248 +int
57249 +gr_acl_handle_rename(const struct dentry *new_dentry,
57250 + const struct dentry *parent_dentry,
57251 + const struct vfsmount *parent_mnt,
57252 + const struct dentry *old_dentry,
57253 + const struct inode *old_parent_inode,
57254 + const struct vfsmount *old_mnt, const char *newname)
57255 +{
57256 + return 0;
57257 +}
57258 +
57259 +int
57260 +gr_acl_handle_filldir(const struct file *file, const char *name,
57261 + const int namelen, const ino_t ino)
57262 +{
57263 + return 1;
57264 +}
57265 +
57266 +int
57267 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57268 + const time_t shm_createtime, const uid_t cuid, const int shmid)
57269 +{
57270 + return 1;
57271 +}
57272 +
57273 +int
57274 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
57275 +{
57276 + return 0;
57277 +}
57278 +
57279 +int
57280 +gr_search_accept(const struct socket *sock)
57281 +{
57282 + return 0;
57283 +}
57284 +
57285 +int
57286 +gr_search_listen(const struct socket *sock)
57287 +{
57288 + return 0;
57289 +}
57290 +
57291 +int
57292 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
57293 +{
57294 + return 0;
57295 +}
57296 +
57297 +__u32
57298 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
57299 +{
57300 + return 1;
57301 +}
57302 +
57303 +__u32
57304 +gr_acl_handle_creat(const struct dentry * dentry,
57305 + const struct dentry * p_dentry,
57306 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
57307 + const int imode)
57308 +{
57309 + return 1;
57310 +}
57311 +
57312 +void
57313 +gr_acl_handle_exit(void)
57314 +{
57315 + return;
57316 +}
57317 +
57318 +int
57319 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
57320 +{
57321 + return 1;
57322 +}
57323 +
57324 +void
57325 +gr_set_role_label(const uid_t uid, const gid_t gid)
57326 +{
57327 + return;
57328 +}
57329 +
57330 +int
57331 +gr_acl_handle_procpidmem(const struct task_struct *task)
57332 +{
57333 + return 0;
57334 +}
57335 +
57336 +int
57337 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
57338 +{
57339 + return 0;
57340 +}
57341 +
57342 +int
57343 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
57344 +{
57345 + return 0;
57346 +}
57347 +
57348 +void
57349 +gr_set_kernel_label(struct task_struct *task)
57350 +{
57351 + return;
57352 +}
57353 +
57354 +int
57355 +gr_check_user_change(int real, int effective, int fs)
57356 +{
57357 + return 0;
57358 +}
57359 +
57360 +int
57361 +gr_check_group_change(int real, int effective, int fs)
57362 +{
57363 + return 0;
57364 +}
57365 +
57366 +int gr_acl_enable_at_secure(void)
57367 +{
57368 + return 0;
57369 +}
57370 +
57371 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
57372 +{
57373 + return dentry->d_inode->i_sb->s_dev;
57374 +}
57375 +
57376 +EXPORT_SYMBOL(gr_learn_resource);
57377 +EXPORT_SYMBOL(gr_set_kernel_label);
57378 +#ifdef CONFIG_SECURITY
57379 +EXPORT_SYMBOL(gr_check_user_change);
57380 +EXPORT_SYMBOL(gr_check_group_change);
57381 +#endif
57382 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
57383 new file mode 100644
57384 index 0000000..2b05ada
57385 --- /dev/null
57386 +++ b/grsecurity/grsec_exec.c
57387 @@ -0,0 +1,146 @@
57388 +#include <linux/kernel.h>
57389 +#include <linux/sched.h>
57390 +#include <linux/file.h>
57391 +#include <linux/binfmts.h>
57392 +#include <linux/fs.h>
57393 +#include <linux/types.h>
57394 +#include <linux/grdefs.h>
57395 +#include <linux/grsecurity.h>
57396 +#include <linux/grinternal.h>
57397 +#include <linux/capability.h>
57398 +#include <linux/module.h>
57399 +
57400 +#include <asm/uaccess.h>
57401 +
57402 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57403 +static char gr_exec_arg_buf[132];
57404 +static DEFINE_MUTEX(gr_exec_arg_mutex);
57405 +#endif
57406 +
57407 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
57408 +
57409 +void
57410 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
57411 +{
57412 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57413 + char *grarg = gr_exec_arg_buf;
57414 + unsigned int i, x, execlen = 0;
57415 + char c;
57416 +
57417 + if (!((grsec_enable_execlog && grsec_enable_group &&
57418 + in_group_p(grsec_audit_gid))
57419 + || (grsec_enable_execlog && !grsec_enable_group)))
57420 + return;
57421 +
57422 + mutex_lock(&gr_exec_arg_mutex);
57423 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
57424 +
57425 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
57426 + const char __user *p;
57427 + unsigned int len;
57428 +
57429 + p = get_user_arg_ptr(argv, i);
57430 + if (IS_ERR(p))
57431 + goto log;
57432 +
57433 + len = strnlen_user(p, 128 - execlen);
57434 + if (len > 128 - execlen)
57435 + len = 128 - execlen;
57436 + else if (len > 0)
57437 + len--;
57438 + if (copy_from_user(grarg + execlen, p, len))
57439 + goto log;
57440 +
57441 + /* rewrite unprintable characters */
57442 + for (x = 0; x < len; x++) {
57443 + c = *(grarg + execlen + x);
57444 + if (c < 32 || c > 126)
57445 + *(grarg + execlen + x) = ' ';
57446 + }
57447 +
57448 + execlen += len;
57449 + *(grarg + execlen) = ' ';
57450 + *(grarg + execlen + 1) = '\0';
57451 + execlen++;
57452 + }
57453 +
57454 + log:
57455 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
57456 + bprm->file->f_path.mnt, grarg);
57457 + mutex_unlock(&gr_exec_arg_mutex);
57458 +#endif
57459 + return;
57460 +}
57461 +
57462 +#ifdef CONFIG_GRKERNSEC
57463 +extern int gr_acl_is_capable(const int cap);
57464 +extern int gr_acl_is_capable_nolog(const int cap);
57465 +extern int gr_chroot_is_capable(const int cap);
57466 +extern int gr_chroot_is_capable_nolog(const int cap);
57467 +#endif
57468 +
57469 +const char *captab_log[] = {
57470 + "CAP_CHOWN",
57471 + "CAP_DAC_OVERRIDE",
57472 + "CAP_DAC_READ_SEARCH",
57473 + "CAP_FOWNER",
57474 + "CAP_FSETID",
57475 + "CAP_KILL",
57476 + "CAP_SETGID",
57477 + "CAP_SETUID",
57478 + "CAP_SETPCAP",
57479 + "CAP_LINUX_IMMUTABLE",
57480 + "CAP_NET_BIND_SERVICE",
57481 + "CAP_NET_BROADCAST",
57482 + "CAP_NET_ADMIN",
57483 + "CAP_NET_RAW",
57484 + "CAP_IPC_LOCK",
57485 + "CAP_IPC_OWNER",
57486 + "CAP_SYS_MODULE",
57487 + "CAP_SYS_RAWIO",
57488 + "CAP_SYS_CHROOT",
57489 + "CAP_SYS_PTRACE",
57490 + "CAP_SYS_PACCT",
57491 + "CAP_SYS_ADMIN",
57492 + "CAP_SYS_BOOT",
57493 + "CAP_SYS_NICE",
57494 + "CAP_SYS_RESOURCE",
57495 + "CAP_SYS_TIME",
57496 + "CAP_SYS_TTY_CONFIG",
57497 + "CAP_MKNOD",
57498 + "CAP_LEASE",
57499 + "CAP_AUDIT_WRITE",
57500 + "CAP_AUDIT_CONTROL",
57501 + "CAP_SETFCAP",
57502 + "CAP_MAC_OVERRIDE",
57503 + "CAP_MAC_ADMIN",
57504 + "CAP_SYSLOG",
57505 + "CAP_WAKE_ALARM"
57506 +};
57507 +
57508 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
57509 +
57510 +int gr_is_capable(const int cap)
57511 +{
57512 +#ifdef CONFIG_GRKERNSEC
57513 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
57514 + return 1;
57515 + return 0;
57516 +#else
57517 + return 1;
57518 +#endif
57519 +}
57520 +
57521 +int gr_is_capable_nolog(const int cap)
57522 +{
57523 +#ifdef CONFIG_GRKERNSEC
57524 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
57525 + return 1;
57526 + return 0;
57527 +#else
57528 + return 1;
57529 +#endif
57530 +}
57531 +
57532 +EXPORT_SYMBOL(gr_is_capable);
57533 +EXPORT_SYMBOL(gr_is_capable_nolog);
57534 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
57535 new file mode 100644
57536 index 0000000..d3ee748
57537 --- /dev/null
57538 +++ b/grsecurity/grsec_fifo.c
57539 @@ -0,0 +1,24 @@
57540 +#include <linux/kernel.h>
57541 +#include <linux/sched.h>
57542 +#include <linux/fs.h>
57543 +#include <linux/file.h>
57544 +#include <linux/grinternal.h>
57545 +
57546 +int
57547 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
57548 + const struct dentry *dir, const int flag, const int acc_mode)
57549 +{
57550 +#ifdef CONFIG_GRKERNSEC_FIFO
57551 + const struct cred *cred = current_cred();
57552 +
57553 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
57554 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
57555 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
57556 + (cred->fsuid != dentry->d_inode->i_uid)) {
57557 + if (!inode_permission(dentry->d_inode, acc_mode))
57558 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
57559 + return -EACCES;
57560 + }
57561 +#endif
57562 + return 0;
57563 +}
57564 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
57565 new file mode 100644
57566 index 0000000..8ca18bf
57567 --- /dev/null
57568 +++ b/grsecurity/grsec_fork.c
57569 @@ -0,0 +1,23 @@
57570 +#include <linux/kernel.h>
57571 +#include <linux/sched.h>
57572 +#include <linux/grsecurity.h>
57573 +#include <linux/grinternal.h>
57574 +#include <linux/errno.h>
57575 +
57576 +void
57577 +gr_log_forkfail(const int retval)
57578 +{
57579 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
57580 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
57581 + switch (retval) {
57582 + case -EAGAIN:
57583 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
57584 + break;
57585 + case -ENOMEM:
57586 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
57587 + break;
57588 + }
57589 + }
57590 +#endif
57591 + return;
57592 +}
57593 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
57594 new file mode 100644
57595 index 0000000..356ef00
57596 --- /dev/null
57597 +++ b/grsecurity/grsec_init.c
57598 @@ -0,0 +1,269 @@
57599 +#include <linux/kernel.h>
57600 +#include <linux/sched.h>
57601 +#include <linux/mm.h>
57602 +#include <linux/gracl.h>
57603 +#include <linux/slab.h>
57604 +#include <linux/vmalloc.h>
57605 +#include <linux/percpu.h>
57606 +#include <linux/module.h>
57607 +
57608 +int grsec_enable_brute;
57609 +int grsec_enable_link;
57610 +int grsec_enable_dmesg;
57611 +int grsec_enable_harden_ptrace;
57612 +int grsec_enable_fifo;
57613 +int grsec_enable_execlog;
57614 +int grsec_enable_signal;
57615 +int grsec_enable_forkfail;
57616 +int grsec_enable_audit_ptrace;
57617 +int grsec_enable_time;
57618 +int grsec_enable_audit_textrel;
57619 +int grsec_enable_group;
57620 +int grsec_audit_gid;
57621 +int grsec_enable_chdir;
57622 +int grsec_enable_mount;
57623 +int grsec_enable_rofs;
57624 +int grsec_enable_chroot_findtask;
57625 +int grsec_enable_chroot_mount;
57626 +int grsec_enable_chroot_shmat;
57627 +int grsec_enable_chroot_fchdir;
57628 +int grsec_enable_chroot_double;
57629 +int grsec_enable_chroot_pivot;
57630 +int grsec_enable_chroot_chdir;
57631 +int grsec_enable_chroot_chmod;
57632 +int grsec_enable_chroot_mknod;
57633 +int grsec_enable_chroot_nice;
57634 +int grsec_enable_chroot_execlog;
57635 +int grsec_enable_chroot_caps;
57636 +int grsec_enable_chroot_sysctl;
57637 +int grsec_enable_chroot_unix;
57638 +int grsec_enable_tpe;
57639 +int grsec_tpe_gid;
57640 +int grsec_enable_blackhole;
57641 +#ifdef CONFIG_IPV6_MODULE
57642 +EXPORT_SYMBOL(grsec_enable_blackhole);
57643 +#endif
57644 +int grsec_lastack_retries;
57645 +int grsec_enable_tpe_all;
57646 +int grsec_enable_tpe_invert;
57647 +int grsec_enable_socket_all;
57648 +int grsec_socket_all_gid;
57649 +int grsec_enable_socket_client;
57650 +int grsec_socket_client_gid;
57651 +int grsec_enable_socket_server;
57652 +int grsec_socket_server_gid;
57653 +int grsec_resource_logging;
57654 +int grsec_disable_privio;
57655 +int grsec_enable_log_rwxmaps;
57656 +int grsec_lock;
57657 +
57658 +DEFINE_SPINLOCK(grsec_alert_lock);
57659 +unsigned long grsec_alert_wtime = 0;
57660 +unsigned long grsec_alert_fyet = 0;
57661 +
57662 +DEFINE_SPINLOCK(grsec_audit_lock);
57663 +
57664 +DEFINE_RWLOCK(grsec_exec_file_lock);
57665 +
57666 +char *gr_shared_page[4];
57667 +
57668 +char *gr_alert_log_fmt;
57669 +char *gr_audit_log_fmt;
57670 +char *gr_alert_log_buf;
57671 +char *gr_audit_log_buf;
57672 +
57673 +extern struct gr_arg *gr_usermode;
57674 +extern unsigned char *gr_system_salt;
57675 +extern unsigned char *gr_system_sum;
57676 +
57677 +void __init
57678 +grsecurity_init(void)
57679 +{
57680 + int j;
57681 + /* create the per-cpu shared pages */
57682 +
57683 +#ifdef CONFIG_X86
57684 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
57685 +#endif
57686 +
57687 + for (j = 0; j < 4; j++) {
57688 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
57689 + if (gr_shared_page[j] == NULL) {
57690 + panic("Unable to allocate grsecurity shared page");
57691 + return;
57692 + }
57693 + }
57694 +
57695 + /* allocate log buffers */
57696 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
57697 + if (!gr_alert_log_fmt) {
57698 + panic("Unable to allocate grsecurity alert log format buffer");
57699 + return;
57700 + }
57701 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
57702 + if (!gr_audit_log_fmt) {
57703 + panic("Unable to allocate grsecurity audit log format buffer");
57704 + return;
57705 + }
57706 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57707 + if (!gr_alert_log_buf) {
57708 + panic("Unable to allocate grsecurity alert log buffer");
57709 + return;
57710 + }
57711 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57712 + if (!gr_audit_log_buf) {
57713 + panic("Unable to allocate grsecurity audit log buffer");
57714 + return;
57715 + }
57716 +
57717 + /* allocate memory for authentication structure */
57718 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
57719 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
57720 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
57721 +
57722 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
57723 + panic("Unable to allocate grsecurity authentication structure");
57724 + return;
57725 + }
57726 +
57727 +
57728 +#ifdef CONFIG_GRKERNSEC_IO
57729 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
57730 + grsec_disable_privio = 1;
57731 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57732 + grsec_disable_privio = 1;
57733 +#else
57734 + grsec_disable_privio = 0;
57735 +#endif
57736 +#endif
57737 +
57738 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57739 + /* for backward compatibility, tpe_invert always defaults to on if
57740 + enabled in the kernel
57741 + */
57742 + grsec_enable_tpe_invert = 1;
57743 +#endif
57744 +
57745 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57746 +#ifndef CONFIG_GRKERNSEC_SYSCTL
57747 + grsec_lock = 1;
57748 +#endif
57749 +
57750 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57751 + grsec_enable_audit_textrel = 1;
57752 +#endif
57753 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57754 + grsec_enable_log_rwxmaps = 1;
57755 +#endif
57756 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
57757 + grsec_enable_group = 1;
57758 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
57759 +#endif
57760 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57761 + grsec_enable_chdir = 1;
57762 +#endif
57763 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57764 + grsec_enable_harden_ptrace = 1;
57765 +#endif
57766 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57767 + grsec_enable_mount = 1;
57768 +#endif
57769 +#ifdef CONFIG_GRKERNSEC_LINK
57770 + grsec_enable_link = 1;
57771 +#endif
57772 +#ifdef CONFIG_GRKERNSEC_BRUTE
57773 + grsec_enable_brute = 1;
57774 +#endif
57775 +#ifdef CONFIG_GRKERNSEC_DMESG
57776 + grsec_enable_dmesg = 1;
57777 +#endif
57778 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
57779 + grsec_enable_blackhole = 1;
57780 + grsec_lastack_retries = 4;
57781 +#endif
57782 +#ifdef CONFIG_GRKERNSEC_FIFO
57783 + grsec_enable_fifo = 1;
57784 +#endif
57785 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57786 + grsec_enable_execlog = 1;
57787 +#endif
57788 +#ifdef CONFIG_GRKERNSEC_SIGNAL
57789 + grsec_enable_signal = 1;
57790 +#endif
57791 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
57792 + grsec_enable_forkfail = 1;
57793 +#endif
57794 +#ifdef CONFIG_GRKERNSEC_TIME
57795 + grsec_enable_time = 1;
57796 +#endif
57797 +#ifdef CONFIG_GRKERNSEC_RESLOG
57798 + grsec_resource_logging = 1;
57799 +#endif
57800 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57801 + grsec_enable_chroot_findtask = 1;
57802 +#endif
57803 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57804 + grsec_enable_chroot_unix = 1;
57805 +#endif
57806 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57807 + grsec_enable_chroot_mount = 1;
57808 +#endif
57809 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57810 + grsec_enable_chroot_fchdir = 1;
57811 +#endif
57812 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57813 + grsec_enable_chroot_shmat = 1;
57814 +#endif
57815 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57816 + grsec_enable_audit_ptrace = 1;
57817 +#endif
57818 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57819 + grsec_enable_chroot_double = 1;
57820 +#endif
57821 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57822 + grsec_enable_chroot_pivot = 1;
57823 +#endif
57824 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57825 + grsec_enable_chroot_chdir = 1;
57826 +#endif
57827 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57828 + grsec_enable_chroot_chmod = 1;
57829 +#endif
57830 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57831 + grsec_enable_chroot_mknod = 1;
57832 +#endif
57833 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57834 + grsec_enable_chroot_nice = 1;
57835 +#endif
57836 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57837 + grsec_enable_chroot_execlog = 1;
57838 +#endif
57839 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57840 + grsec_enable_chroot_caps = 1;
57841 +#endif
57842 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57843 + grsec_enable_chroot_sysctl = 1;
57844 +#endif
57845 +#ifdef CONFIG_GRKERNSEC_TPE
57846 + grsec_enable_tpe = 1;
57847 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
57848 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
57849 + grsec_enable_tpe_all = 1;
57850 +#endif
57851 +#endif
57852 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
57853 + grsec_enable_socket_all = 1;
57854 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
57855 +#endif
57856 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
57857 + grsec_enable_socket_client = 1;
57858 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
57859 +#endif
57860 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57861 + grsec_enable_socket_server = 1;
57862 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
57863 +#endif
57864 +#endif
57865 +
57866 + return;
57867 +}
57868 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
57869 new file mode 100644
57870 index 0000000..3efe141
57871 --- /dev/null
57872 +++ b/grsecurity/grsec_link.c
57873 @@ -0,0 +1,43 @@
57874 +#include <linux/kernel.h>
57875 +#include <linux/sched.h>
57876 +#include <linux/fs.h>
57877 +#include <linux/file.h>
57878 +#include <linux/grinternal.h>
57879 +
57880 +int
57881 +gr_handle_follow_link(const struct inode *parent,
57882 + const struct inode *inode,
57883 + const struct dentry *dentry, const struct vfsmount *mnt)
57884 +{
57885 +#ifdef CONFIG_GRKERNSEC_LINK
57886 + const struct cred *cred = current_cred();
57887 +
57888 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
57889 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
57890 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
57891 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
57892 + return -EACCES;
57893 + }
57894 +#endif
57895 + return 0;
57896 +}
57897 +
57898 +int
57899 +gr_handle_hardlink(const struct dentry *dentry,
57900 + const struct vfsmount *mnt,
57901 + struct inode *inode, const int mode, const char *to)
57902 +{
57903 +#ifdef CONFIG_GRKERNSEC_LINK
57904 + const struct cred *cred = current_cred();
57905 +
57906 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
57907 + (!S_ISREG(mode) || (mode & S_ISUID) ||
57908 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
57909 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
57910 + !capable(CAP_FOWNER) && cred->uid) {
57911 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
57912 + return -EPERM;
57913 + }
57914 +#endif
57915 + return 0;
57916 +}
57917 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
57918 new file mode 100644
57919 index 0000000..a45d2e9
57920 --- /dev/null
57921 +++ b/grsecurity/grsec_log.c
57922 @@ -0,0 +1,322 @@
57923 +#include <linux/kernel.h>
57924 +#include <linux/sched.h>
57925 +#include <linux/file.h>
57926 +#include <linux/tty.h>
57927 +#include <linux/fs.h>
57928 +#include <linux/grinternal.h>
57929 +
57930 +#ifdef CONFIG_TREE_PREEMPT_RCU
57931 +#define DISABLE_PREEMPT() preempt_disable()
57932 +#define ENABLE_PREEMPT() preempt_enable()
57933 +#else
57934 +#define DISABLE_PREEMPT()
57935 +#define ENABLE_PREEMPT()
57936 +#endif
57937 +
57938 +#define BEGIN_LOCKS(x) \
57939 + DISABLE_PREEMPT(); \
57940 + rcu_read_lock(); \
57941 + read_lock(&tasklist_lock); \
57942 + read_lock(&grsec_exec_file_lock); \
57943 + if (x != GR_DO_AUDIT) \
57944 + spin_lock(&grsec_alert_lock); \
57945 + else \
57946 + spin_lock(&grsec_audit_lock)
57947 +
57948 +#define END_LOCKS(x) \
57949 + if (x != GR_DO_AUDIT) \
57950 + spin_unlock(&grsec_alert_lock); \
57951 + else \
57952 + spin_unlock(&grsec_audit_lock); \
57953 + read_unlock(&grsec_exec_file_lock); \
57954 + read_unlock(&tasklist_lock); \
57955 + rcu_read_unlock(); \
57956 + ENABLE_PREEMPT(); \
57957 + if (x == GR_DONT_AUDIT) \
57958 + gr_handle_alertkill(current)
57959 +
57960 +enum {
57961 + FLOODING,
57962 + NO_FLOODING
57963 +};
57964 +
57965 +extern char *gr_alert_log_fmt;
57966 +extern char *gr_audit_log_fmt;
57967 +extern char *gr_alert_log_buf;
57968 +extern char *gr_audit_log_buf;
57969 +
57970 +static int gr_log_start(int audit)
57971 +{
57972 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
57973 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
57974 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57975 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
57976 + unsigned long curr_secs = get_seconds();
57977 +
57978 + if (audit == GR_DO_AUDIT)
57979 + goto set_fmt;
57980 +
57981 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
57982 + grsec_alert_wtime = curr_secs;
57983 + grsec_alert_fyet = 0;
57984 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
57985 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
57986 + grsec_alert_fyet++;
57987 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
57988 + grsec_alert_wtime = curr_secs;
57989 + grsec_alert_fyet++;
57990 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
57991 + return FLOODING;
57992 + }
57993 + else return FLOODING;
57994 +
57995 +set_fmt:
57996 +#endif
57997 + memset(buf, 0, PAGE_SIZE);
57998 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
57999 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
58000 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58001 + } else if (current->signal->curr_ip) {
58002 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
58003 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
58004 + } else if (gr_acl_is_enabled()) {
58005 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
58006 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58007 + } else {
58008 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
58009 + strcpy(buf, fmt);
58010 + }
58011 +
58012 + return NO_FLOODING;
58013 +}
58014 +
58015 +static void gr_log_middle(int audit, const char *msg, va_list ap)
58016 + __attribute__ ((format (printf, 2, 0)));
58017 +
58018 +static void gr_log_middle(int audit, const char *msg, va_list ap)
58019 +{
58020 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58021 + unsigned int len = strlen(buf);
58022 +
58023 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
58024 +
58025 + return;
58026 +}
58027 +
58028 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
58029 + __attribute__ ((format (printf, 2, 3)));
58030 +
58031 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
58032 +{
58033 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58034 + unsigned int len = strlen(buf);
58035 + va_list ap;
58036 +
58037 + va_start(ap, msg);
58038 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
58039 + va_end(ap);
58040 +
58041 + return;
58042 +}
58043 +
58044 +static void gr_log_end(int audit, int append_default)
58045 +{
58046 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58047 +
58048 + if (append_default) {
58049 + unsigned int len = strlen(buf);
58050 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
58051 + }
58052 +
58053 + printk("%s\n", buf);
58054 +
58055 + return;
58056 +}
58057 +
58058 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
58059 +{
58060 + int logtype;
58061 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
58062 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
58063 + void *voidptr = NULL;
58064 + int num1 = 0, num2 = 0;
58065 + unsigned long ulong1 = 0, ulong2 = 0;
58066 + struct dentry *dentry = NULL;
58067 + struct vfsmount *mnt = NULL;
58068 + struct file *file = NULL;
58069 + struct task_struct *task = NULL;
58070 + const struct cred *cred, *pcred;
58071 + va_list ap;
58072 +
58073 + BEGIN_LOCKS(audit);
58074 + logtype = gr_log_start(audit);
58075 + if (logtype == FLOODING) {
58076 + END_LOCKS(audit);
58077 + return;
58078 + }
58079 + va_start(ap, argtypes);
58080 + switch (argtypes) {
58081 + case GR_TTYSNIFF:
58082 + task = va_arg(ap, struct task_struct *);
58083 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
58084 + break;
58085 + case GR_SYSCTL_HIDDEN:
58086 + str1 = va_arg(ap, char *);
58087 + gr_log_middle_varargs(audit, msg, result, str1);
58088 + break;
58089 + case GR_RBAC:
58090 + dentry = va_arg(ap, struct dentry *);
58091 + mnt = va_arg(ap, struct vfsmount *);
58092 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
58093 + break;
58094 + case GR_RBAC_STR:
58095 + dentry = va_arg(ap, struct dentry *);
58096 + mnt = va_arg(ap, struct vfsmount *);
58097 + str1 = va_arg(ap, char *);
58098 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
58099 + break;
58100 + case GR_STR_RBAC:
58101 + str1 = va_arg(ap, char *);
58102 + dentry = va_arg(ap, struct dentry *);
58103 + mnt = va_arg(ap, struct vfsmount *);
58104 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
58105 + break;
58106 + case GR_RBAC_MODE2:
58107 + dentry = va_arg(ap, struct dentry *);
58108 + mnt = va_arg(ap, struct vfsmount *);
58109 + str1 = va_arg(ap, char *);
58110 + str2 = va_arg(ap, char *);
58111 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
58112 + break;
58113 + case GR_RBAC_MODE3:
58114 + dentry = va_arg(ap, struct dentry *);
58115 + mnt = va_arg(ap, struct vfsmount *);
58116 + str1 = va_arg(ap, char *);
58117 + str2 = va_arg(ap, char *);
58118 + str3 = va_arg(ap, char *);
58119 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
58120 + break;
58121 + case GR_FILENAME:
58122 + dentry = va_arg(ap, struct dentry *);
58123 + mnt = va_arg(ap, struct vfsmount *);
58124 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
58125 + break;
58126 + case GR_STR_FILENAME:
58127 + str1 = va_arg(ap, char *);
58128 + dentry = va_arg(ap, struct dentry *);
58129 + mnt = va_arg(ap, struct vfsmount *);
58130 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
58131 + break;
58132 + case GR_FILENAME_STR:
58133 + dentry = va_arg(ap, struct dentry *);
58134 + mnt = va_arg(ap, struct vfsmount *);
58135 + str1 = va_arg(ap, char *);
58136 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
58137 + break;
58138 + case GR_FILENAME_TWO_INT:
58139 + dentry = va_arg(ap, struct dentry *);
58140 + mnt = va_arg(ap, struct vfsmount *);
58141 + num1 = va_arg(ap, int);
58142 + num2 = va_arg(ap, int);
58143 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
58144 + break;
58145 + case GR_FILENAME_TWO_INT_STR:
58146 + dentry = va_arg(ap, struct dentry *);
58147 + mnt = va_arg(ap, struct vfsmount *);
58148 + num1 = va_arg(ap, int);
58149 + num2 = va_arg(ap, int);
58150 + str1 = va_arg(ap, char *);
58151 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
58152 + break;
58153 + case GR_TEXTREL:
58154 + file = va_arg(ap, struct file *);
58155 + ulong1 = va_arg(ap, unsigned long);
58156 + ulong2 = va_arg(ap, unsigned long);
58157 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
58158 + break;
58159 + case GR_PTRACE:
58160 + task = va_arg(ap, struct task_struct *);
58161 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
58162 + break;
58163 + case GR_RESOURCE:
58164 + task = va_arg(ap, struct task_struct *);
58165 + cred = __task_cred(task);
58166 + pcred = __task_cred(task->real_parent);
58167 + ulong1 = va_arg(ap, unsigned long);
58168 + str1 = va_arg(ap, char *);
58169 + ulong2 = va_arg(ap, unsigned long);
58170 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58171 + break;
58172 + case GR_CAP:
58173 + task = va_arg(ap, struct task_struct *);
58174 + cred = __task_cred(task);
58175 + pcred = __task_cred(task->real_parent);
58176 + str1 = va_arg(ap, char *);
58177 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58178 + break;
58179 + case GR_SIG:
58180 + str1 = va_arg(ap, char *);
58181 + voidptr = va_arg(ap, void *);
58182 + gr_log_middle_varargs(audit, msg, str1, voidptr);
58183 + break;
58184 + case GR_SIG2:
58185 + task = va_arg(ap, struct task_struct *);
58186 + cred = __task_cred(task);
58187 + pcred = __task_cred(task->real_parent);
58188 + num1 = va_arg(ap, int);
58189 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58190 + break;
58191 + case GR_CRASH1:
58192 + task = va_arg(ap, struct task_struct *);
58193 + cred = __task_cred(task);
58194 + pcred = __task_cred(task->real_parent);
58195 + ulong1 = va_arg(ap, unsigned long);
58196 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
58197 + break;
58198 + case GR_CRASH2:
58199 + task = va_arg(ap, struct task_struct *);
58200 + cred = __task_cred(task);
58201 + pcred = __task_cred(task->real_parent);
58202 + ulong1 = va_arg(ap, unsigned long);
58203 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
58204 + break;
58205 + case GR_RWXMAP:
58206 + file = va_arg(ap, struct file *);
58207 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
58208 + break;
58209 + case GR_PSACCT:
58210 + {
58211 + unsigned int wday, cday;
58212 + __u8 whr, chr;
58213 + __u8 wmin, cmin;
58214 + __u8 wsec, csec;
58215 + char cur_tty[64] = { 0 };
58216 + char parent_tty[64] = { 0 };
58217 +
58218 + task = va_arg(ap, struct task_struct *);
58219 + wday = va_arg(ap, unsigned int);
58220 + cday = va_arg(ap, unsigned int);
58221 + whr = va_arg(ap, int);
58222 + chr = va_arg(ap, int);
58223 + wmin = va_arg(ap, int);
58224 + cmin = va_arg(ap, int);
58225 + wsec = va_arg(ap, int);
58226 + csec = va_arg(ap, int);
58227 + ulong1 = va_arg(ap, unsigned long);
58228 + cred = __task_cred(task);
58229 + pcred = __task_cred(task->real_parent);
58230 +
58231 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58232 + }
58233 + break;
58234 + default:
58235 + gr_log_middle(audit, msg, ap);
58236 + }
58237 + va_end(ap);
58238 + // these don't need DEFAULTSECARGS printed on the end
58239 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
58240 + gr_log_end(audit, 0);
58241 + else
58242 + gr_log_end(audit, 1);
58243 + END_LOCKS(audit);
58244 +}
58245 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
58246 new file mode 100644
58247 index 0000000..6c0416b
58248 --- /dev/null
58249 +++ b/grsecurity/grsec_mem.c
58250 @@ -0,0 +1,33 @@
58251 +#include <linux/kernel.h>
58252 +#include <linux/sched.h>
58253 +#include <linux/mm.h>
58254 +#include <linux/mman.h>
58255 +#include <linux/grinternal.h>
58256 +
58257 +void
58258 +gr_handle_ioperm(void)
58259 +{
58260 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
58261 + return;
58262 +}
58263 +
58264 +void
58265 +gr_handle_iopl(void)
58266 +{
58267 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
58268 + return;
58269 +}
58270 +
58271 +void
58272 +gr_handle_mem_readwrite(u64 from, u64 to)
58273 +{
58274 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
58275 + return;
58276 +}
58277 +
58278 +void
58279 +gr_handle_vm86(void)
58280 +{
58281 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
58282 + return;
58283 +}
58284 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
58285 new file mode 100644
58286 index 0000000..2131422
58287 --- /dev/null
58288 +++ b/grsecurity/grsec_mount.c
58289 @@ -0,0 +1,62 @@
58290 +#include <linux/kernel.h>
58291 +#include <linux/sched.h>
58292 +#include <linux/mount.h>
58293 +#include <linux/grsecurity.h>
58294 +#include <linux/grinternal.h>
58295 +
58296 +void
58297 +gr_log_remount(const char *devname, const int retval)
58298 +{
58299 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58300 + if (grsec_enable_mount && (retval >= 0))
58301 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
58302 +#endif
58303 + return;
58304 +}
58305 +
58306 +void
58307 +gr_log_unmount(const char *devname, const int retval)
58308 +{
58309 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58310 + if (grsec_enable_mount && (retval >= 0))
58311 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
58312 +#endif
58313 + return;
58314 +}
58315 +
58316 +void
58317 +gr_log_mount(const char *from, const char *to, const int retval)
58318 +{
58319 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58320 + if (grsec_enable_mount && (retval >= 0))
58321 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
58322 +#endif
58323 + return;
58324 +}
58325 +
58326 +int
58327 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
58328 +{
58329 +#ifdef CONFIG_GRKERNSEC_ROFS
58330 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
58331 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
58332 + return -EPERM;
58333 + } else
58334 + return 0;
58335 +#endif
58336 + return 0;
58337 +}
58338 +
58339 +int
58340 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
58341 +{
58342 +#ifdef CONFIG_GRKERNSEC_ROFS
58343 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
58344 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
58345 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
58346 + return -EPERM;
58347 + } else
58348 + return 0;
58349 +#endif
58350 + return 0;
58351 +}
58352 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
58353 new file mode 100644
58354 index 0000000..a3b12a0
58355 --- /dev/null
58356 +++ b/grsecurity/grsec_pax.c
58357 @@ -0,0 +1,36 @@
58358 +#include <linux/kernel.h>
58359 +#include <linux/sched.h>
58360 +#include <linux/mm.h>
58361 +#include <linux/file.h>
58362 +#include <linux/grinternal.h>
58363 +#include <linux/grsecurity.h>
58364 +
58365 +void
58366 +gr_log_textrel(struct vm_area_struct * vma)
58367 +{
58368 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58369 + if (grsec_enable_audit_textrel)
58370 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
58371 +#endif
58372 + return;
58373 +}
58374 +
58375 +void
58376 +gr_log_rwxmmap(struct file *file)
58377 +{
58378 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58379 + if (grsec_enable_log_rwxmaps)
58380 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
58381 +#endif
58382 + return;
58383 +}
58384 +
58385 +void
58386 +gr_log_rwxmprotect(struct file *file)
58387 +{
58388 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58389 + if (grsec_enable_log_rwxmaps)
58390 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
58391 +#endif
58392 + return;
58393 +}
58394 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
58395 new file mode 100644
58396 index 0000000..472c1d6
58397 --- /dev/null
58398 +++ b/grsecurity/grsec_ptrace.c
58399 @@ -0,0 +1,14 @@
58400 +#include <linux/kernel.h>
58401 +#include <linux/sched.h>
58402 +#include <linux/grinternal.h>
58403 +#include <linux/grsecurity.h>
58404 +
58405 +void
58406 +gr_audit_ptrace(struct task_struct *task)
58407 +{
58408 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58409 + if (grsec_enable_audit_ptrace)
58410 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
58411 +#endif
58412 + return;
58413 +}
58414 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
58415 new file mode 100644
58416 index 0000000..cf090b3
58417 --- /dev/null
58418 +++ b/grsecurity/grsec_sig.c
58419 @@ -0,0 +1,206 @@
58420 +#include <linux/kernel.h>
58421 +#include <linux/sched.h>
58422 +#include <linux/delay.h>
58423 +#include <linux/grsecurity.h>
58424 +#include <linux/grinternal.h>
58425 +#include <linux/hardirq.h>
58426 +
58427 +char *signames[] = {
58428 + [SIGSEGV] = "Segmentation fault",
58429 + [SIGILL] = "Illegal instruction",
58430 + [SIGABRT] = "Abort",
58431 + [SIGBUS] = "Invalid alignment/Bus error"
58432 +};
58433 +
58434 +void
58435 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
58436 +{
58437 +#ifdef CONFIG_GRKERNSEC_SIGNAL
58438 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
58439 + (sig == SIGABRT) || (sig == SIGBUS))) {
58440 + if (t->pid == current->pid) {
58441 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
58442 + } else {
58443 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
58444 + }
58445 + }
58446 +#endif
58447 + return;
58448 +}
58449 +
58450 +int
58451 +gr_handle_signal(const struct task_struct *p, const int sig)
58452 +{
58453 +#ifdef CONFIG_GRKERNSEC
58454 + if (current->pid > 1 && gr_check_protected_task(p)) {
58455 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
58456 + return -EPERM;
58457 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
58458 + return -EPERM;
58459 + }
58460 +#endif
58461 + return 0;
58462 +}
58463 +
58464 +#ifdef CONFIG_GRKERNSEC
58465 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
58466 +
58467 +int gr_fake_force_sig(int sig, struct task_struct *t)
58468 +{
58469 + unsigned long int flags;
58470 + int ret, blocked, ignored;
58471 + struct k_sigaction *action;
58472 +
58473 + spin_lock_irqsave(&t->sighand->siglock, flags);
58474 + action = &t->sighand->action[sig-1];
58475 + ignored = action->sa.sa_handler == SIG_IGN;
58476 + blocked = sigismember(&t->blocked, sig);
58477 + if (blocked || ignored) {
58478 + action->sa.sa_handler = SIG_DFL;
58479 + if (blocked) {
58480 + sigdelset(&t->blocked, sig);
58481 + recalc_sigpending_and_wake(t);
58482 + }
58483 + }
58484 + if (action->sa.sa_handler == SIG_DFL)
58485 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
58486 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
58487 +
58488 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
58489 +
58490 + return ret;
58491 +}
58492 +#endif
58493 +
58494 +#ifdef CONFIG_GRKERNSEC_BRUTE
58495 +#define GR_USER_BAN_TIME (15 * 60)
58496 +
58497 +static int __get_dumpable(unsigned long mm_flags)
58498 +{
58499 + int ret;
58500 +
58501 + ret = mm_flags & MMF_DUMPABLE_MASK;
58502 + return (ret >= 2) ? 2 : ret;
58503 +}
58504 +#endif
58505 +
58506 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
58507 +{
58508 +#ifdef CONFIG_GRKERNSEC_BRUTE
58509 + uid_t uid = 0;
58510 +
58511 + if (!grsec_enable_brute)
58512 + return;
58513 +
58514 + rcu_read_lock();
58515 + read_lock(&tasklist_lock);
58516 + read_lock(&grsec_exec_file_lock);
58517 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
58518 + p->real_parent->brute = 1;
58519 + else {
58520 + const struct cred *cred = __task_cred(p), *cred2;
58521 + struct task_struct *tsk, *tsk2;
58522 +
58523 + if (!__get_dumpable(mm_flags) && cred->uid) {
58524 + struct user_struct *user;
58525 +
58526 + uid = cred->uid;
58527 +
58528 + /* this is put upon execution past expiration */
58529 + user = find_user(uid);
58530 + if (user == NULL)
58531 + goto unlock;
58532 + user->banned = 1;
58533 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
58534 + if (user->ban_expires == ~0UL)
58535 + user->ban_expires--;
58536 +
58537 + do_each_thread(tsk2, tsk) {
58538 + cred2 = __task_cred(tsk);
58539 + if (tsk != p && cred2->uid == uid)
58540 + gr_fake_force_sig(SIGKILL, tsk);
58541 + } while_each_thread(tsk2, tsk);
58542 + }
58543 + }
58544 +unlock:
58545 + read_unlock(&grsec_exec_file_lock);
58546 + read_unlock(&tasklist_lock);
58547 + rcu_read_unlock();
58548 +
58549 + if (uid)
58550 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
58551 +
58552 +#endif
58553 + return;
58554 +}
58555 +
58556 +void gr_handle_brute_check(void)
58557 +{
58558 +#ifdef CONFIG_GRKERNSEC_BRUTE
58559 + if (current->brute)
58560 + msleep(30 * 1000);
58561 +#endif
58562 + return;
58563 +}
58564 +
58565 +void gr_handle_kernel_exploit(void)
58566 +{
58567 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
58568 + const struct cred *cred;
58569 + struct task_struct *tsk, *tsk2;
58570 + struct user_struct *user;
58571 + uid_t uid;
58572 +
58573 + if (in_irq() || in_serving_softirq() || in_nmi())
58574 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
58575 +
58576 + uid = current_uid();
58577 +
58578 + if (uid == 0)
58579 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
58580 + else {
58581 + /* kill all the processes of this user, hold a reference
58582 + to their creds struct, and prevent them from creating
58583 + another process until system reset
58584 + */
58585 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
58586 + /* we intentionally leak this ref */
58587 + user = get_uid(current->cred->user);
58588 + if (user) {
58589 + user->banned = 1;
58590 + user->ban_expires = ~0UL;
58591 + }
58592 +
58593 + read_lock(&tasklist_lock);
58594 + do_each_thread(tsk2, tsk) {
58595 + cred = __task_cred(tsk);
58596 + if (cred->uid == uid)
58597 + gr_fake_force_sig(SIGKILL, tsk);
58598 + } while_each_thread(tsk2, tsk);
58599 + read_unlock(&tasklist_lock);
58600 + }
58601 +#endif
58602 +}
58603 +
58604 +int __gr_process_user_ban(struct user_struct *user)
58605 +{
58606 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58607 + if (unlikely(user->banned)) {
58608 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
58609 + user->banned = 0;
58610 + user->ban_expires = 0;
58611 + free_uid(user);
58612 + } else
58613 + return -EPERM;
58614 + }
58615 +#endif
58616 + return 0;
58617 +}
58618 +
58619 +int gr_process_user_ban(void)
58620 +{
58621 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58622 + return __gr_process_user_ban(current->cred->user);
58623 +#endif
58624 + return 0;
58625 +}
58626 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
58627 new file mode 100644
58628 index 0000000..4030d57
58629 --- /dev/null
58630 +++ b/grsecurity/grsec_sock.c
58631 @@ -0,0 +1,244 @@
58632 +#include <linux/kernel.h>
58633 +#include <linux/module.h>
58634 +#include <linux/sched.h>
58635 +#include <linux/file.h>
58636 +#include <linux/net.h>
58637 +#include <linux/in.h>
58638 +#include <linux/ip.h>
58639 +#include <net/sock.h>
58640 +#include <net/inet_sock.h>
58641 +#include <linux/grsecurity.h>
58642 +#include <linux/grinternal.h>
58643 +#include <linux/gracl.h>
58644 +
58645 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
58646 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
58647 +
58648 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
58649 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
58650 +
58651 +#ifdef CONFIG_UNIX_MODULE
58652 +EXPORT_SYMBOL(gr_acl_handle_unix);
58653 +EXPORT_SYMBOL(gr_acl_handle_mknod);
58654 +EXPORT_SYMBOL(gr_handle_chroot_unix);
58655 +EXPORT_SYMBOL(gr_handle_create);
58656 +#endif
58657 +
58658 +#ifdef CONFIG_GRKERNSEC
58659 +#define gr_conn_table_size 32749
58660 +struct conn_table_entry {
58661 + struct conn_table_entry *next;
58662 + struct signal_struct *sig;
58663 +};
58664 +
58665 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
58666 +DEFINE_SPINLOCK(gr_conn_table_lock);
58667 +
58668 +extern const char * gr_socktype_to_name(unsigned char type);
58669 +extern const char * gr_proto_to_name(unsigned char proto);
58670 +extern const char * gr_sockfamily_to_name(unsigned char family);
58671 +
58672 +static __inline__ int
58673 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
58674 +{
58675 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
58676 +}
58677 +
58678 +static __inline__ int
58679 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
58680 + __u16 sport, __u16 dport)
58681 +{
58682 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
58683 + sig->gr_sport == sport && sig->gr_dport == dport))
58684 + return 1;
58685 + else
58686 + return 0;
58687 +}
58688 +
58689 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
58690 +{
58691 + struct conn_table_entry **match;
58692 + unsigned int index;
58693 +
58694 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58695 + sig->gr_sport, sig->gr_dport,
58696 + gr_conn_table_size);
58697 +
58698 + newent->sig = sig;
58699 +
58700 + match = &gr_conn_table[index];
58701 + newent->next = *match;
58702 + *match = newent;
58703 +
58704 + return;
58705 +}
58706 +
58707 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
58708 +{
58709 + struct conn_table_entry *match, *last = NULL;
58710 + unsigned int index;
58711 +
58712 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58713 + sig->gr_sport, sig->gr_dport,
58714 + gr_conn_table_size);
58715 +
58716 + match = gr_conn_table[index];
58717 + while (match && !conn_match(match->sig,
58718 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
58719 + sig->gr_dport)) {
58720 + last = match;
58721 + match = match->next;
58722 + }
58723 +
58724 + if (match) {
58725 + if (last)
58726 + last->next = match->next;
58727 + else
58728 + gr_conn_table[index] = NULL;
58729 + kfree(match);
58730 + }
58731 +
58732 + return;
58733 +}
58734 +
58735 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
58736 + __u16 sport, __u16 dport)
58737 +{
58738 + struct conn_table_entry *match;
58739 + unsigned int index;
58740 +
58741 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
58742 +
58743 + match = gr_conn_table[index];
58744 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
58745 + match = match->next;
58746 +
58747 + if (match)
58748 + return match->sig;
58749 + else
58750 + return NULL;
58751 +}
58752 +
58753 +#endif
58754 +
58755 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
58756 +{
58757 +#ifdef CONFIG_GRKERNSEC
58758 + struct signal_struct *sig = task->signal;
58759 + struct conn_table_entry *newent;
58760 +
58761 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
58762 + if (newent == NULL)
58763 + return;
58764 + /* no bh lock needed since we are called with bh disabled */
58765 + spin_lock(&gr_conn_table_lock);
58766 + gr_del_task_from_ip_table_nolock(sig);
58767 + sig->gr_saddr = inet->inet_rcv_saddr;
58768 + sig->gr_daddr = inet->inet_daddr;
58769 + sig->gr_sport = inet->inet_sport;
58770 + sig->gr_dport = inet->inet_dport;
58771 + gr_add_to_task_ip_table_nolock(sig, newent);
58772 + spin_unlock(&gr_conn_table_lock);
58773 +#endif
58774 + return;
58775 +}
58776 +
58777 +void gr_del_task_from_ip_table(struct task_struct *task)
58778 +{
58779 +#ifdef CONFIG_GRKERNSEC
58780 + spin_lock_bh(&gr_conn_table_lock);
58781 + gr_del_task_from_ip_table_nolock(task->signal);
58782 + spin_unlock_bh(&gr_conn_table_lock);
58783 +#endif
58784 + return;
58785 +}
58786 +
58787 +void
58788 +gr_attach_curr_ip(const struct sock *sk)
58789 +{
58790 +#ifdef CONFIG_GRKERNSEC
58791 + struct signal_struct *p, *set;
58792 + const struct inet_sock *inet = inet_sk(sk);
58793 +
58794 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
58795 + return;
58796 +
58797 + set = current->signal;
58798 +
58799 + spin_lock_bh(&gr_conn_table_lock);
58800 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
58801 + inet->inet_dport, inet->inet_sport);
58802 + if (unlikely(p != NULL)) {
58803 + set->curr_ip = p->curr_ip;
58804 + set->used_accept = 1;
58805 + gr_del_task_from_ip_table_nolock(p);
58806 + spin_unlock_bh(&gr_conn_table_lock);
58807 + return;
58808 + }
58809 + spin_unlock_bh(&gr_conn_table_lock);
58810 +
58811 + set->curr_ip = inet->inet_daddr;
58812 + set->used_accept = 1;
58813 +#endif
58814 + return;
58815 +}
58816 +
58817 +int
58818 +gr_handle_sock_all(const int family, const int type, const int protocol)
58819 +{
58820 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58821 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
58822 + (family != AF_UNIX)) {
58823 + if (family == AF_INET)
58824 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
58825 + else
58826 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
58827 + return -EACCES;
58828 + }
58829 +#endif
58830 + return 0;
58831 +}
58832 +
58833 +int
58834 +gr_handle_sock_server(const struct sockaddr *sck)
58835 +{
58836 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58837 + if (grsec_enable_socket_server &&
58838 + in_group_p(grsec_socket_server_gid) &&
58839 + sck && (sck->sa_family != AF_UNIX) &&
58840 + (sck->sa_family != AF_LOCAL)) {
58841 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58842 + return -EACCES;
58843 + }
58844 +#endif
58845 + return 0;
58846 +}
58847 +
58848 +int
58849 +gr_handle_sock_server_other(const struct sock *sck)
58850 +{
58851 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58852 + if (grsec_enable_socket_server &&
58853 + in_group_p(grsec_socket_server_gid) &&
58854 + sck && (sck->sk_family != AF_UNIX) &&
58855 + (sck->sk_family != AF_LOCAL)) {
58856 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58857 + return -EACCES;
58858 + }
58859 +#endif
58860 + return 0;
58861 +}
58862 +
58863 +int
58864 +gr_handle_sock_client(const struct sockaddr *sck)
58865 +{
58866 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58867 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
58868 + sck && (sck->sa_family != AF_UNIX) &&
58869 + (sck->sa_family != AF_LOCAL)) {
58870 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
58871 + return -EACCES;
58872 + }
58873 +#endif
58874 + return 0;
58875 +}
58876 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
58877 new file mode 100644
58878 index 0000000..174668f
58879 --- /dev/null
58880 +++ b/grsecurity/grsec_sysctl.c
58881 @@ -0,0 +1,433 @@
58882 +#include <linux/kernel.h>
58883 +#include <linux/sched.h>
58884 +#include <linux/sysctl.h>
58885 +#include <linux/grsecurity.h>
58886 +#include <linux/grinternal.h>
58887 +
58888 +int
58889 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
58890 +{
58891 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58892 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
58893 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
58894 + return -EACCES;
58895 + }
58896 +#endif
58897 + return 0;
58898 +}
58899 +
58900 +#ifdef CONFIG_GRKERNSEC_ROFS
58901 +static int __maybe_unused one = 1;
58902 +#endif
58903 +
58904 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
58905 +struct ctl_table grsecurity_table[] = {
58906 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58907 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
58908 +#ifdef CONFIG_GRKERNSEC_IO
58909 + {
58910 + .procname = "disable_priv_io",
58911 + .data = &grsec_disable_privio,
58912 + .maxlen = sizeof(int),
58913 + .mode = 0600,
58914 + .proc_handler = &proc_dointvec,
58915 + },
58916 +#endif
58917 +#endif
58918 +#ifdef CONFIG_GRKERNSEC_LINK
58919 + {
58920 + .procname = "linking_restrictions",
58921 + .data = &grsec_enable_link,
58922 + .maxlen = sizeof(int),
58923 + .mode = 0600,
58924 + .proc_handler = &proc_dointvec,
58925 + },
58926 +#endif
58927 +#ifdef CONFIG_GRKERNSEC_BRUTE
58928 + {
58929 + .procname = "deter_bruteforce",
58930 + .data = &grsec_enable_brute,
58931 + .maxlen = sizeof(int),
58932 + .mode = 0600,
58933 + .proc_handler = &proc_dointvec,
58934 + },
58935 +#endif
58936 +#ifdef CONFIG_GRKERNSEC_FIFO
58937 + {
58938 + .procname = "fifo_restrictions",
58939 + .data = &grsec_enable_fifo,
58940 + .maxlen = sizeof(int),
58941 + .mode = 0600,
58942 + .proc_handler = &proc_dointvec,
58943 + },
58944 +#endif
58945 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58946 + {
58947 + .procname = "ip_blackhole",
58948 + .data = &grsec_enable_blackhole,
58949 + .maxlen = sizeof(int),
58950 + .mode = 0600,
58951 + .proc_handler = &proc_dointvec,
58952 + },
58953 + {
58954 + .procname = "lastack_retries",
58955 + .data = &grsec_lastack_retries,
58956 + .maxlen = sizeof(int),
58957 + .mode = 0600,
58958 + .proc_handler = &proc_dointvec,
58959 + },
58960 +#endif
58961 +#ifdef CONFIG_GRKERNSEC_EXECLOG
58962 + {
58963 + .procname = "exec_logging",
58964 + .data = &grsec_enable_execlog,
58965 + .maxlen = sizeof(int),
58966 + .mode = 0600,
58967 + .proc_handler = &proc_dointvec,
58968 + },
58969 +#endif
58970 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58971 + {
58972 + .procname = "rwxmap_logging",
58973 + .data = &grsec_enable_log_rwxmaps,
58974 + .maxlen = sizeof(int),
58975 + .mode = 0600,
58976 + .proc_handler = &proc_dointvec,
58977 + },
58978 +#endif
58979 +#ifdef CONFIG_GRKERNSEC_SIGNAL
58980 + {
58981 + .procname = "signal_logging",
58982 + .data = &grsec_enable_signal,
58983 + .maxlen = sizeof(int),
58984 + .mode = 0600,
58985 + .proc_handler = &proc_dointvec,
58986 + },
58987 +#endif
58988 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
58989 + {
58990 + .procname = "forkfail_logging",
58991 + .data = &grsec_enable_forkfail,
58992 + .maxlen = sizeof(int),
58993 + .mode = 0600,
58994 + .proc_handler = &proc_dointvec,
58995 + },
58996 +#endif
58997 +#ifdef CONFIG_GRKERNSEC_TIME
58998 + {
58999 + .procname = "timechange_logging",
59000 + .data = &grsec_enable_time,
59001 + .maxlen = sizeof(int),
59002 + .mode = 0600,
59003 + .proc_handler = &proc_dointvec,
59004 + },
59005 +#endif
59006 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
59007 + {
59008 + .procname = "chroot_deny_shmat",
59009 + .data = &grsec_enable_chroot_shmat,
59010 + .maxlen = sizeof(int),
59011 + .mode = 0600,
59012 + .proc_handler = &proc_dointvec,
59013 + },
59014 +#endif
59015 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
59016 + {
59017 + .procname = "chroot_deny_unix",
59018 + .data = &grsec_enable_chroot_unix,
59019 + .maxlen = sizeof(int),
59020 + .mode = 0600,
59021 + .proc_handler = &proc_dointvec,
59022 + },
59023 +#endif
59024 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
59025 + {
59026 + .procname = "chroot_deny_mount",
59027 + .data = &grsec_enable_chroot_mount,
59028 + .maxlen = sizeof(int),
59029 + .mode = 0600,
59030 + .proc_handler = &proc_dointvec,
59031 + },
59032 +#endif
59033 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
59034 + {
59035 + .procname = "chroot_deny_fchdir",
59036 + .data = &grsec_enable_chroot_fchdir,
59037 + .maxlen = sizeof(int),
59038 + .mode = 0600,
59039 + .proc_handler = &proc_dointvec,
59040 + },
59041 +#endif
59042 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
59043 + {
59044 + .procname = "chroot_deny_chroot",
59045 + .data = &grsec_enable_chroot_double,
59046 + .maxlen = sizeof(int),
59047 + .mode = 0600,
59048 + .proc_handler = &proc_dointvec,
59049 + },
59050 +#endif
59051 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
59052 + {
59053 + .procname = "chroot_deny_pivot",
59054 + .data = &grsec_enable_chroot_pivot,
59055 + .maxlen = sizeof(int),
59056 + .mode = 0600,
59057 + .proc_handler = &proc_dointvec,
59058 + },
59059 +#endif
59060 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
59061 + {
59062 + .procname = "chroot_enforce_chdir",
59063 + .data = &grsec_enable_chroot_chdir,
59064 + .maxlen = sizeof(int),
59065 + .mode = 0600,
59066 + .proc_handler = &proc_dointvec,
59067 + },
59068 +#endif
59069 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
59070 + {
59071 + .procname = "chroot_deny_chmod",
59072 + .data = &grsec_enable_chroot_chmod,
59073 + .maxlen = sizeof(int),
59074 + .mode = 0600,
59075 + .proc_handler = &proc_dointvec,
59076 + },
59077 +#endif
59078 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
59079 + {
59080 + .procname = "chroot_deny_mknod",
59081 + .data = &grsec_enable_chroot_mknod,
59082 + .maxlen = sizeof(int),
59083 + .mode = 0600,
59084 + .proc_handler = &proc_dointvec,
59085 + },
59086 +#endif
59087 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
59088 + {
59089 + .procname = "chroot_restrict_nice",
59090 + .data = &grsec_enable_chroot_nice,
59091 + .maxlen = sizeof(int),
59092 + .mode = 0600,
59093 + .proc_handler = &proc_dointvec,
59094 + },
59095 +#endif
59096 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
59097 + {
59098 + .procname = "chroot_execlog",
59099 + .data = &grsec_enable_chroot_execlog,
59100 + .maxlen = sizeof(int),
59101 + .mode = 0600,
59102 + .proc_handler = &proc_dointvec,
59103 + },
59104 +#endif
59105 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59106 + {
59107 + .procname = "chroot_caps",
59108 + .data = &grsec_enable_chroot_caps,
59109 + .maxlen = sizeof(int),
59110 + .mode = 0600,
59111 + .proc_handler = &proc_dointvec,
59112 + },
59113 +#endif
59114 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
59115 + {
59116 + .procname = "chroot_deny_sysctl",
59117 + .data = &grsec_enable_chroot_sysctl,
59118 + .maxlen = sizeof(int),
59119 + .mode = 0600,
59120 + .proc_handler = &proc_dointvec,
59121 + },
59122 +#endif
59123 +#ifdef CONFIG_GRKERNSEC_TPE
59124 + {
59125 + .procname = "tpe",
59126 + .data = &grsec_enable_tpe,
59127 + .maxlen = sizeof(int),
59128 + .mode = 0600,
59129 + .proc_handler = &proc_dointvec,
59130 + },
59131 + {
59132 + .procname = "tpe_gid",
59133 + .data = &grsec_tpe_gid,
59134 + .maxlen = sizeof(int),
59135 + .mode = 0600,
59136 + .proc_handler = &proc_dointvec,
59137 + },
59138 +#endif
59139 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59140 + {
59141 + .procname = "tpe_invert",
59142 + .data = &grsec_enable_tpe_invert,
59143 + .maxlen = sizeof(int),
59144 + .mode = 0600,
59145 + .proc_handler = &proc_dointvec,
59146 + },
59147 +#endif
59148 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
59149 + {
59150 + .procname = "tpe_restrict_all",
59151 + .data = &grsec_enable_tpe_all,
59152 + .maxlen = sizeof(int),
59153 + .mode = 0600,
59154 + .proc_handler = &proc_dointvec,
59155 + },
59156 +#endif
59157 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59158 + {
59159 + .procname = "socket_all",
59160 + .data = &grsec_enable_socket_all,
59161 + .maxlen = sizeof(int),
59162 + .mode = 0600,
59163 + .proc_handler = &proc_dointvec,
59164 + },
59165 + {
59166 + .procname = "socket_all_gid",
59167 + .data = &grsec_socket_all_gid,
59168 + .maxlen = sizeof(int),
59169 + .mode = 0600,
59170 + .proc_handler = &proc_dointvec,
59171 + },
59172 +#endif
59173 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59174 + {
59175 + .procname = "socket_client",
59176 + .data = &grsec_enable_socket_client,
59177 + .maxlen = sizeof(int),
59178 + .mode = 0600,
59179 + .proc_handler = &proc_dointvec,
59180 + },
59181 + {
59182 + .procname = "socket_client_gid",
59183 + .data = &grsec_socket_client_gid,
59184 + .maxlen = sizeof(int),
59185 + .mode = 0600,
59186 + .proc_handler = &proc_dointvec,
59187 + },
59188 +#endif
59189 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59190 + {
59191 + .procname = "socket_server",
59192 + .data = &grsec_enable_socket_server,
59193 + .maxlen = sizeof(int),
59194 + .mode = 0600,
59195 + .proc_handler = &proc_dointvec,
59196 + },
59197 + {
59198 + .procname = "socket_server_gid",
59199 + .data = &grsec_socket_server_gid,
59200 + .maxlen = sizeof(int),
59201 + .mode = 0600,
59202 + .proc_handler = &proc_dointvec,
59203 + },
59204 +#endif
59205 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
59206 + {
59207 + .procname = "audit_group",
59208 + .data = &grsec_enable_group,
59209 + .maxlen = sizeof(int),
59210 + .mode = 0600,
59211 + .proc_handler = &proc_dointvec,
59212 + },
59213 + {
59214 + .procname = "audit_gid",
59215 + .data = &grsec_audit_gid,
59216 + .maxlen = sizeof(int),
59217 + .mode = 0600,
59218 + .proc_handler = &proc_dointvec,
59219 + },
59220 +#endif
59221 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
59222 + {
59223 + .procname = "audit_chdir",
59224 + .data = &grsec_enable_chdir,
59225 + .maxlen = sizeof(int),
59226 + .mode = 0600,
59227 + .proc_handler = &proc_dointvec,
59228 + },
59229 +#endif
59230 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59231 + {
59232 + .procname = "audit_mount",
59233 + .data = &grsec_enable_mount,
59234 + .maxlen = sizeof(int),
59235 + .mode = 0600,
59236 + .proc_handler = &proc_dointvec,
59237 + },
59238 +#endif
59239 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
59240 + {
59241 + .procname = "audit_textrel",
59242 + .data = &grsec_enable_audit_textrel,
59243 + .maxlen = sizeof(int),
59244 + .mode = 0600,
59245 + .proc_handler = &proc_dointvec,
59246 + },
59247 +#endif
59248 +#ifdef CONFIG_GRKERNSEC_DMESG
59249 + {
59250 + .procname = "dmesg",
59251 + .data = &grsec_enable_dmesg,
59252 + .maxlen = sizeof(int),
59253 + .mode = 0600,
59254 + .proc_handler = &proc_dointvec,
59255 + },
59256 +#endif
59257 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59258 + {
59259 + .procname = "chroot_findtask",
59260 + .data = &grsec_enable_chroot_findtask,
59261 + .maxlen = sizeof(int),
59262 + .mode = 0600,
59263 + .proc_handler = &proc_dointvec,
59264 + },
59265 +#endif
59266 +#ifdef CONFIG_GRKERNSEC_RESLOG
59267 + {
59268 + .procname = "resource_logging",
59269 + .data = &grsec_resource_logging,
59270 + .maxlen = sizeof(int),
59271 + .mode = 0600,
59272 + .proc_handler = &proc_dointvec,
59273 + },
59274 +#endif
59275 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
59276 + {
59277 + .procname = "audit_ptrace",
59278 + .data = &grsec_enable_audit_ptrace,
59279 + .maxlen = sizeof(int),
59280 + .mode = 0600,
59281 + .proc_handler = &proc_dointvec,
59282 + },
59283 +#endif
59284 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59285 + {
59286 + .procname = "harden_ptrace",
59287 + .data = &grsec_enable_harden_ptrace,
59288 + .maxlen = sizeof(int),
59289 + .mode = 0600,
59290 + .proc_handler = &proc_dointvec,
59291 + },
59292 +#endif
59293 + {
59294 + .procname = "grsec_lock",
59295 + .data = &grsec_lock,
59296 + .maxlen = sizeof(int),
59297 + .mode = 0600,
59298 + .proc_handler = &proc_dointvec,
59299 + },
59300 +#endif
59301 +#ifdef CONFIG_GRKERNSEC_ROFS
59302 + {
59303 + .procname = "romount_protect",
59304 + .data = &grsec_enable_rofs,
59305 + .maxlen = sizeof(int),
59306 + .mode = 0600,
59307 + .proc_handler = &proc_dointvec_minmax,
59308 + .extra1 = &one,
59309 + .extra2 = &one,
59310 + },
59311 +#endif
59312 + { }
59313 +};
59314 +#endif
59315 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
59316 new file mode 100644
59317 index 0000000..0dc13c3
59318 --- /dev/null
59319 +++ b/grsecurity/grsec_time.c
59320 @@ -0,0 +1,16 @@
59321 +#include <linux/kernel.h>
59322 +#include <linux/sched.h>
59323 +#include <linux/grinternal.h>
59324 +#include <linux/module.h>
59325 +
59326 +void
59327 +gr_log_timechange(void)
59328 +{
59329 +#ifdef CONFIG_GRKERNSEC_TIME
59330 + if (grsec_enable_time)
59331 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
59332 +#endif
59333 + return;
59334 +}
59335 +
59336 +EXPORT_SYMBOL(gr_log_timechange);
59337 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
59338 new file mode 100644
59339 index 0000000..4a78774
59340 --- /dev/null
59341 +++ b/grsecurity/grsec_tpe.c
59342 @@ -0,0 +1,39 @@
59343 +#include <linux/kernel.h>
59344 +#include <linux/sched.h>
59345 +#include <linux/file.h>
59346 +#include <linux/fs.h>
59347 +#include <linux/grinternal.h>
59348 +
59349 +extern int gr_acl_tpe_check(void);
59350 +
59351 +int
59352 +gr_tpe_allow(const struct file *file)
59353 +{
59354 +#ifdef CONFIG_GRKERNSEC
59355 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
59356 + const struct cred *cred = current_cred();
59357 +
59358 + if (cred->uid && ((grsec_enable_tpe &&
59359 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59360 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
59361 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
59362 +#else
59363 + in_group_p(grsec_tpe_gid)
59364 +#endif
59365 + ) || gr_acl_tpe_check()) &&
59366 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
59367 + (inode->i_mode & S_IWOTH))))) {
59368 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
59369 + return 0;
59370 + }
59371 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
59372 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
59373 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
59374 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
59375 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
59376 + return 0;
59377 + }
59378 +#endif
59379 +#endif
59380 + return 1;
59381 +}
59382 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
59383 new file mode 100644
59384 index 0000000..9f7b1ac
59385 --- /dev/null
59386 +++ b/grsecurity/grsum.c
59387 @@ -0,0 +1,61 @@
59388 +#include <linux/err.h>
59389 +#include <linux/kernel.h>
59390 +#include <linux/sched.h>
59391 +#include <linux/mm.h>
59392 +#include <linux/scatterlist.h>
59393 +#include <linux/crypto.h>
59394 +#include <linux/gracl.h>
59395 +
59396 +
59397 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
59398 +#error "crypto and sha256 must be built into the kernel"
59399 +#endif
59400 +
59401 +int
59402 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
59403 +{
59404 + char *p;
59405 + struct crypto_hash *tfm;
59406 + struct hash_desc desc;
59407 + struct scatterlist sg;
59408 + unsigned char temp_sum[GR_SHA_LEN];
59409 + volatile int retval = 0;
59410 + volatile int dummy = 0;
59411 + unsigned int i;
59412 +
59413 + sg_init_table(&sg, 1);
59414 +
59415 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
59416 + if (IS_ERR(tfm)) {
59417 + /* should never happen, since sha256 should be built in */
59418 + return 1;
59419 + }
59420 +
59421 + desc.tfm = tfm;
59422 + desc.flags = 0;
59423 +
59424 + crypto_hash_init(&desc);
59425 +
59426 + p = salt;
59427 + sg_set_buf(&sg, p, GR_SALT_LEN);
59428 + crypto_hash_update(&desc, &sg, sg.length);
59429 +
59430 + p = entry->pw;
59431 + sg_set_buf(&sg, p, strlen(p));
59432 +
59433 + crypto_hash_update(&desc, &sg, sg.length);
59434 +
59435 + crypto_hash_final(&desc, temp_sum);
59436 +
59437 + memset(entry->pw, 0, GR_PW_LEN);
59438 +
59439 + for (i = 0; i < GR_SHA_LEN; i++)
59440 + if (sum[i] != temp_sum[i])
59441 + retval = 1;
59442 + else
59443 + dummy = 1; // waste a cycle
59444 +
59445 + crypto_free_hash(tfm);
59446 +
59447 + return retval;
59448 +}
59449 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
59450 index 6cd5b64..f620d2d 100644
59451 --- a/include/acpi/acpi_bus.h
59452 +++ b/include/acpi/acpi_bus.h
59453 @@ -107,7 +107,7 @@ struct acpi_device_ops {
59454 acpi_op_bind bind;
59455 acpi_op_unbind unbind;
59456 acpi_op_notify notify;
59457 -};
59458 +} __no_const;
59459
59460 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
59461
59462 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
59463 index b7babf0..71e4e74 100644
59464 --- a/include/asm-generic/atomic-long.h
59465 +++ b/include/asm-generic/atomic-long.h
59466 @@ -22,6 +22,12 @@
59467
59468 typedef atomic64_t atomic_long_t;
59469
59470 +#ifdef CONFIG_PAX_REFCOUNT
59471 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
59472 +#else
59473 +typedef atomic64_t atomic_long_unchecked_t;
59474 +#endif
59475 +
59476 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
59477
59478 static inline long atomic_long_read(atomic_long_t *l)
59479 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
59480 return (long)atomic64_read(v);
59481 }
59482
59483 +#ifdef CONFIG_PAX_REFCOUNT
59484 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59485 +{
59486 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59487 +
59488 + return (long)atomic64_read_unchecked(v);
59489 +}
59490 +#endif
59491 +
59492 static inline void atomic_long_set(atomic_long_t *l, long i)
59493 {
59494 atomic64_t *v = (atomic64_t *)l;
59495 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59496 atomic64_set(v, i);
59497 }
59498
59499 +#ifdef CONFIG_PAX_REFCOUNT
59500 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59501 +{
59502 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59503 +
59504 + atomic64_set_unchecked(v, i);
59505 +}
59506 +#endif
59507 +
59508 static inline void atomic_long_inc(atomic_long_t *l)
59509 {
59510 atomic64_t *v = (atomic64_t *)l;
59511 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59512 atomic64_inc(v);
59513 }
59514
59515 +#ifdef CONFIG_PAX_REFCOUNT
59516 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59517 +{
59518 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59519 +
59520 + atomic64_inc_unchecked(v);
59521 +}
59522 +#endif
59523 +
59524 static inline void atomic_long_dec(atomic_long_t *l)
59525 {
59526 atomic64_t *v = (atomic64_t *)l;
59527 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59528 atomic64_dec(v);
59529 }
59530
59531 +#ifdef CONFIG_PAX_REFCOUNT
59532 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59533 +{
59534 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59535 +
59536 + atomic64_dec_unchecked(v);
59537 +}
59538 +#endif
59539 +
59540 static inline void atomic_long_add(long i, atomic_long_t *l)
59541 {
59542 atomic64_t *v = (atomic64_t *)l;
59543 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59544 atomic64_add(i, v);
59545 }
59546
59547 +#ifdef CONFIG_PAX_REFCOUNT
59548 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59549 +{
59550 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59551 +
59552 + atomic64_add_unchecked(i, v);
59553 +}
59554 +#endif
59555 +
59556 static inline void atomic_long_sub(long i, atomic_long_t *l)
59557 {
59558 atomic64_t *v = (atomic64_t *)l;
59559 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59560 atomic64_sub(i, v);
59561 }
59562
59563 +#ifdef CONFIG_PAX_REFCOUNT
59564 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59565 +{
59566 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59567 +
59568 + atomic64_sub_unchecked(i, v);
59569 +}
59570 +#endif
59571 +
59572 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59573 {
59574 atomic64_t *v = (atomic64_t *)l;
59575 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59576 return (long)atomic64_inc_return(v);
59577 }
59578
59579 +#ifdef CONFIG_PAX_REFCOUNT
59580 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59581 +{
59582 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59583 +
59584 + return (long)atomic64_inc_return_unchecked(v);
59585 +}
59586 +#endif
59587 +
59588 static inline long atomic_long_dec_return(atomic_long_t *l)
59589 {
59590 atomic64_t *v = (atomic64_t *)l;
59591 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59592
59593 typedef atomic_t atomic_long_t;
59594
59595 +#ifdef CONFIG_PAX_REFCOUNT
59596 +typedef atomic_unchecked_t atomic_long_unchecked_t;
59597 +#else
59598 +typedef atomic_t atomic_long_unchecked_t;
59599 +#endif
59600 +
59601 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
59602 static inline long atomic_long_read(atomic_long_t *l)
59603 {
59604 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
59605 return (long)atomic_read(v);
59606 }
59607
59608 +#ifdef CONFIG_PAX_REFCOUNT
59609 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59610 +{
59611 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59612 +
59613 + return (long)atomic_read_unchecked(v);
59614 +}
59615 +#endif
59616 +
59617 static inline void atomic_long_set(atomic_long_t *l, long i)
59618 {
59619 atomic_t *v = (atomic_t *)l;
59620 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59621 atomic_set(v, i);
59622 }
59623
59624 +#ifdef CONFIG_PAX_REFCOUNT
59625 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59626 +{
59627 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59628 +
59629 + atomic_set_unchecked(v, i);
59630 +}
59631 +#endif
59632 +
59633 static inline void atomic_long_inc(atomic_long_t *l)
59634 {
59635 atomic_t *v = (atomic_t *)l;
59636 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59637 atomic_inc(v);
59638 }
59639
59640 +#ifdef CONFIG_PAX_REFCOUNT
59641 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59642 +{
59643 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59644 +
59645 + atomic_inc_unchecked(v);
59646 +}
59647 +#endif
59648 +
59649 static inline void atomic_long_dec(atomic_long_t *l)
59650 {
59651 atomic_t *v = (atomic_t *)l;
59652 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59653 atomic_dec(v);
59654 }
59655
59656 +#ifdef CONFIG_PAX_REFCOUNT
59657 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59658 +{
59659 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59660 +
59661 + atomic_dec_unchecked(v);
59662 +}
59663 +#endif
59664 +
59665 static inline void atomic_long_add(long i, atomic_long_t *l)
59666 {
59667 atomic_t *v = (atomic_t *)l;
59668 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59669 atomic_add(i, v);
59670 }
59671
59672 +#ifdef CONFIG_PAX_REFCOUNT
59673 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59674 +{
59675 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59676 +
59677 + atomic_add_unchecked(i, v);
59678 +}
59679 +#endif
59680 +
59681 static inline void atomic_long_sub(long i, atomic_long_t *l)
59682 {
59683 atomic_t *v = (atomic_t *)l;
59684 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59685 atomic_sub(i, v);
59686 }
59687
59688 +#ifdef CONFIG_PAX_REFCOUNT
59689 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59690 +{
59691 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59692 +
59693 + atomic_sub_unchecked(i, v);
59694 +}
59695 +#endif
59696 +
59697 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59698 {
59699 atomic_t *v = (atomic_t *)l;
59700 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59701 return (long)atomic_inc_return(v);
59702 }
59703
59704 +#ifdef CONFIG_PAX_REFCOUNT
59705 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59706 +{
59707 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59708 +
59709 + return (long)atomic_inc_return_unchecked(v);
59710 +}
59711 +#endif
59712 +
59713 static inline long atomic_long_dec_return(atomic_long_t *l)
59714 {
59715 atomic_t *v = (atomic_t *)l;
59716 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59717
59718 #endif /* BITS_PER_LONG == 64 */
59719
59720 +#ifdef CONFIG_PAX_REFCOUNT
59721 +static inline void pax_refcount_needs_these_functions(void)
59722 +{
59723 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
59724 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
59725 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
59726 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
59727 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
59728 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
59729 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
59730 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
59731 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
59732 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
59733 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
59734 +
59735 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
59736 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
59737 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
59738 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
59739 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
59740 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
59741 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
59742 +}
59743 +#else
59744 +#define atomic_read_unchecked(v) atomic_read(v)
59745 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
59746 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
59747 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
59748 +#define atomic_inc_unchecked(v) atomic_inc(v)
59749 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
59750 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
59751 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
59752 +#define atomic_dec_unchecked(v) atomic_dec(v)
59753 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
59754 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
59755 +
59756 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
59757 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
59758 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
59759 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
59760 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
59761 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
59762 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
59763 +#endif
59764 +
59765 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
59766 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
59767 index b18ce4f..2ee2843 100644
59768 --- a/include/asm-generic/atomic64.h
59769 +++ b/include/asm-generic/atomic64.h
59770 @@ -16,6 +16,8 @@ typedef struct {
59771 long long counter;
59772 } atomic64_t;
59773
59774 +typedef atomic64_t atomic64_unchecked_t;
59775 +
59776 #define ATOMIC64_INIT(i) { (i) }
59777
59778 extern long long atomic64_read(const atomic64_t *v);
59779 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
59780 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
59781 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
59782
59783 +#define atomic64_read_unchecked(v) atomic64_read(v)
59784 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
59785 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
59786 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
59787 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
59788 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
59789 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
59790 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
59791 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
59792 +
59793 #endif /* _ASM_GENERIC_ATOMIC64_H */
59794 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
59795 index 1bfcfe5..e04c5c9 100644
59796 --- a/include/asm-generic/cache.h
59797 +++ b/include/asm-generic/cache.h
59798 @@ -6,7 +6,7 @@
59799 * cache lines need to provide their own cache.h.
59800 */
59801
59802 -#define L1_CACHE_SHIFT 5
59803 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
59804 +#define L1_CACHE_SHIFT 5UL
59805 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
59806
59807 #endif /* __ASM_GENERIC_CACHE_H */
59808 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
59809 index 1ca3efc..e3dc852 100644
59810 --- a/include/asm-generic/int-l64.h
59811 +++ b/include/asm-generic/int-l64.h
59812 @@ -46,6 +46,8 @@ typedef unsigned int u32;
59813 typedef signed long s64;
59814 typedef unsigned long u64;
59815
59816 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
59817 +
59818 #define S8_C(x) x
59819 #define U8_C(x) x ## U
59820 #define S16_C(x) x
59821 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
59822 index f394147..b6152b9 100644
59823 --- a/include/asm-generic/int-ll64.h
59824 +++ b/include/asm-generic/int-ll64.h
59825 @@ -51,6 +51,8 @@ typedef unsigned int u32;
59826 typedef signed long long s64;
59827 typedef unsigned long long u64;
59828
59829 +typedef unsigned long long intoverflow_t;
59830 +
59831 #define S8_C(x) x
59832 #define U8_C(x) x ## U
59833 #define S16_C(x) x
59834 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
59835 index 0232ccb..13d9165 100644
59836 --- a/include/asm-generic/kmap_types.h
59837 +++ b/include/asm-generic/kmap_types.h
59838 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
59839 KMAP_D(17) KM_NMI,
59840 KMAP_D(18) KM_NMI_PTE,
59841 KMAP_D(19) KM_KDB,
59842 +KMAP_D(20) KM_CLEARPAGE,
59843 /*
59844 * Remember to update debug_kmap_atomic() when adding new kmap types!
59845 */
59846 -KMAP_D(20) KM_TYPE_NR
59847 +KMAP_D(21) KM_TYPE_NR
59848 };
59849
59850 #undef KMAP_D
59851 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
59852 index 725612b..9cc513a 100644
59853 --- a/include/asm-generic/pgtable-nopmd.h
59854 +++ b/include/asm-generic/pgtable-nopmd.h
59855 @@ -1,14 +1,19 @@
59856 #ifndef _PGTABLE_NOPMD_H
59857 #define _PGTABLE_NOPMD_H
59858
59859 -#ifndef __ASSEMBLY__
59860 -
59861 #include <asm-generic/pgtable-nopud.h>
59862
59863 -struct mm_struct;
59864 -
59865 #define __PAGETABLE_PMD_FOLDED
59866
59867 +#define PMD_SHIFT PUD_SHIFT
59868 +#define PTRS_PER_PMD 1
59869 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
59870 +#define PMD_MASK (~(PMD_SIZE-1))
59871 +
59872 +#ifndef __ASSEMBLY__
59873 +
59874 +struct mm_struct;
59875 +
59876 /*
59877 * Having the pmd type consist of a pud gets the size right, and allows
59878 * us to conceptually access the pud entry that this pmd is folded into
59879 @@ -16,11 +21,6 @@ struct mm_struct;
59880 */
59881 typedef struct { pud_t pud; } pmd_t;
59882
59883 -#define PMD_SHIFT PUD_SHIFT
59884 -#define PTRS_PER_PMD 1
59885 -#define PMD_SIZE (1UL << PMD_SHIFT)
59886 -#define PMD_MASK (~(PMD_SIZE-1))
59887 -
59888 /*
59889 * The "pud_xxx()" functions here are trivial for a folded two-level
59890 * setup: the pmd is never bad, and a pmd always exists (as it's folded
59891 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
59892 index 810431d..ccc3638 100644
59893 --- a/include/asm-generic/pgtable-nopud.h
59894 +++ b/include/asm-generic/pgtable-nopud.h
59895 @@ -1,10 +1,15 @@
59896 #ifndef _PGTABLE_NOPUD_H
59897 #define _PGTABLE_NOPUD_H
59898
59899 -#ifndef __ASSEMBLY__
59900 -
59901 #define __PAGETABLE_PUD_FOLDED
59902
59903 +#define PUD_SHIFT PGDIR_SHIFT
59904 +#define PTRS_PER_PUD 1
59905 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
59906 +#define PUD_MASK (~(PUD_SIZE-1))
59907 +
59908 +#ifndef __ASSEMBLY__
59909 +
59910 /*
59911 * Having the pud type consist of a pgd gets the size right, and allows
59912 * us to conceptually access the pgd entry that this pud is folded into
59913 @@ -12,11 +17,6 @@
59914 */
59915 typedef struct { pgd_t pgd; } pud_t;
59916
59917 -#define PUD_SHIFT PGDIR_SHIFT
59918 -#define PTRS_PER_PUD 1
59919 -#define PUD_SIZE (1UL << PUD_SHIFT)
59920 -#define PUD_MASK (~(PUD_SIZE-1))
59921 -
59922 /*
59923 * The "pgd_xxx()" functions here are trivial for a folded two-level
59924 * setup: the pud is never bad, and a pud always exists (as it's folded
59925 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
59926 index 76bff2b..c7a14e2 100644
59927 --- a/include/asm-generic/pgtable.h
59928 +++ b/include/asm-generic/pgtable.h
59929 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
59930 #endif /* __HAVE_ARCH_PMD_WRITE */
59931 #endif
59932
59933 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
59934 +static inline unsigned long pax_open_kernel(void) { return 0; }
59935 +#endif
59936 +
59937 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
59938 +static inline unsigned long pax_close_kernel(void) { return 0; }
59939 +#endif
59940 +
59941 #endif /* !__ASSEMBLY__ */
59942
59943 #endif /* _ASM_GENERIC_PGTABLE_H */
59944 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
59945 index db22d13..1f2e3e1 100644
59946 --- a/include/asm-generic/vmlinux.lds.h
59947 +++ b/include/asm-generic/vmlinux.lds.h
59948 @@ -217,6 +217,7 @@
59949 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
59950 VMLINUX_SYMBOL(__start_rodata) = .; \
59951 *(.rodata) *(.rodata.*) \
59952 + *(.data..read_only) \
59953 *(__vermagic) /* Kernel version magic */ \
59954 . = ALIGN(8); \
59955 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
59956 @@ -723,17 +724,18 @@
59957 * section in the linker script will go there too. @phdr should have
59958 * a leading colon.
59959 *
59960 - * Note that this macros defines __per_cpu_load as an absolute symbol.
59961 + * Note that this macros defines per_cpu_load as an absolute symbol.
59962 * If there is no need to put the percpu section at a predetermined
59963 * address, use PERCPU_SECTION.
59964 */
59965 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
59966 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
59967 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
59968 + per_cpu_load = .; \
59969 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
59970 - LOAD_OFFSET) { \
59971 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
59972 PERCPU_INPUT(cacheline) \
59973 } phdr \
59974 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
59975 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
59976
59977 /**
59978 * PERCPU_SECTION - define output section for percpu area, simple version
59979 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
59980 index 9b7c2bb..76b7d1e 100644
59981 --- a/include/drm/drmP.h
59982 +++ b/include/drm/drmP.h
59983 @@ -73,6 +73,7 @@
59984 #include <linux/workqueue.h>
59985 #include <linux/poll.h>
59986 #include <asm/pgalloc.h>
59987 +#include <asm/local.h>
59988 #include "drm.h"
59989
59990 #include <linux/idr.h>
59991 @@ -1035,7 +1036,7 @@ struct drm_device {
59992
59993 /** \name Usage Counters */
59994 /*@{ */
59995 - int open_count; /**< Outstanding files open */
59996 + local_t open_count; /**< Outstanding files open */
59997 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
59998 atomic_t vma_count; /**< Outstanding vma areas open */
59999 int buf_use; /**< Buffers in use -- cannot alloc */
60000 @@ -1046,7 +1047,7 @@ struct drm_device {
60001 /*@{ */
60002 unsigned long counters;
60003 enum drm_stat_type types[15];
60004 - atomic_t counts[15];
60005 + atomic_unchecked_t counts[15];
60006 /*@} */
60007
60008 struct list_head filelist;
60009 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
60010 index 73b0712..0b7ef2f 100644
60011 --- a/include/drm/drm_crtc_helper.h
60012 +++ b/include/drm/drm_crtc_helper.h
60013 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
60014
60015 /* disable crtc when not in use - more explicit than dpms off */
60016 void (*disable)(struct drm_crtc *crtc);
60017 -};
60018 +} __no_const;
60019
60020 struct drm_encoder_helper_funcs {
60021 void (*dpms)(struct drm_encoder *encoder, int mode);
60022 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
60023 struct drm_connector *connector);
60024 /* disable encoder when not in use - more explicit than dpms off */
60025 void (*disable)(struct drm_encoder *encoder);
60026 -};
60027 +} __no_const;
60028
60029 struct drm_connector_helper_funcs {
60030 int (*get_modes)(struct drm_connector *connector);
60031 diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
60032 index c4961ea..53dfa109 100644
60033 --- a/include/drm/drm_mode.h
60034 +++ b/include/drm/drm_mode.h
60035 @@ -233,6 +233,8 @@ struct drm_mode_fb_cmd {
60036 #define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
60037 #define DRM_MODE_FB_DIRTY_FLAGS 0x03
60038
60039 +#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256
60040 +
60041 /*
60042 * Mark a region of a framebuffer as dirty.
60043 *
60044 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
60045 index 26c1f78..6722682 100644
60046 --- a/include/drm/ttm/ttm_memory.h
60047 +++ b/include/drm/ttm/ttm_memory.h
60048 @@ -47,7 +47,7 @@
60049
60050 struct ttm_mem_shrink {
60051 int (*do_shrink) (struct ttm_mem_shrink *);
60052 -};
60053 +} __no_const;
60054
60055 /**
60056 * struct ttm_mem_global - Global memory accounting structure.
60057 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
60058 index e86dfca..40cc55f 100644
60059 --- a/include/linux/a.out.h
60060 +++ b/include/linux/a.out.h
60061 @@ -39,6 +39,14 @@ enum machine_type {
60062 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
60063 };
60064
60065 +/* Constants for the N_FLAGS field */
60066 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60067 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
60068 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
60069 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
60070 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60071 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60072 +
60073 #if !defined (N_MAGIC)
60074 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
60075 #endif
60076 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
60077 index 49a83ca..df96b54 100644
60078 --- a/include/linux/atmdev.h
60079 +++ b/include/linux/atmdev.h
60080 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
60081 #endif
60082
60083 struct k_atm_aal_stats {
60084 -#define __HANDLE_ITEM(i) atomic_t i
60085 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
60086 __AAL_STAT_ITEMS
60087 #undef __HANDLE_ITEM
60088 };
60089 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
60090 index fd88a39..f4d0bad 100644
60091 --- a/include/linux/binfmts.h
60092 +++ b/include/linux/binfmts.h
60093 @@ -88,6 +88,7 @@ struct linux_binfmt {
60094 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
60095 int (*load_shlib)(struct file *);
60096 int (*core_dump)(struct coredump_params *cprm);
60097 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
60098 unsigned long min_coredump; /* minimal dump size */
60099 };
60100
60101 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
60102 index 7fbaa91..5e6a460 100644
60103 --- a/include/linux/blkdev.h
60104 +++ b/include/linux/blkdev.h
60105 @@ -1321,7 +1321,7 @@ struct block_device_operations {
60106 /* this callback is with swap_lock and sometimes page table lock held */
60107 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
60108 struct module *owner;
60109 -};
60110 +} __do_const;
60111
60112 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
60113 unsigned long);
60114 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
60115 index 8e9e4bc..88bd457 100644
60116 --- a/include/linux/blktrace_api.h
60117 +++ b/include/linux/blktrace_api.h
60118 @@ -162,7 +162,7 @@ struct blk_trace {
60119 struct dentry *dir;
60120 struct dentry *dropped_file;
60121 struct dentry *msg_file;
60122 - atomic_t dropped;
60123 + atomic_unchecked_t dropped;
60124 };
60125
60126 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
60127 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
60128 index 83195fb..0b0f77d 100644
60129 --- a/include/linux/byteorder/little_endian.h
60130 +++ b/include/linux/byteorder/little_endian.h
60131 @@ -42,51 +42,51 @@
60132
60133 static inline __le64 __cpu_to_le64p(const __u64 *p)
60134 {
60135 - return (__force __le64)*p;
60136 + return (__force const __le64)*p;
60137 }
60138 static inline __u64 __le64_to_cpup(const __le64 *p)
60139 {
60140 - return (__force __u64)*p;
60141 + return (__force const __u64)*p;
60142 }
60143 static inline __le32 __cpu_to_le32p(const __u32 *p)
60144 {
60145 - return (__force __le32)*p;
60146 + return (__force const __le32)*p;
60147 }
60148 static inline __u32 __le32_to_cpup(const __le32 *p)
60149 {
60150 - return (__force __u32)*p;
60151 + return (__force const __u32)*p;
60152 }
60153 static inline __le16 __cpu_to_le16p(const __u16 *p)
60154 {
60155 - return (__force __le16)*p;
60156 + return (__force const __le16)*p;
60157 }
60158 static inline __u16 __le16_to_cpup(const __le16 *p)
60159 {
60160 - return (__force __u16)*p;
60161 + return (__force const __u16)*p;
60162 }
60163 static inline __be64 __cpu_to_be64p(const __u64 *p)
60164 {
60165 - return (__force __be64)__swab64p(p);
60166 + return (__force const __be64)__swab64p(p);
60167 }
60168 static inline __u64 __be64_to_cpup(const __be64 *p)
60169 {
60170 - return __swab64p((__u64 *)p);
60171 + return __swab64p((const __u64 *)p);
60172 }
60173 static inline __be32 __cpu_to_be32p(const __u32 *p)
60174 {
60175 - return (__force __be32)__swab32p(p);
60176 + return (__force const __be32)__swab32p(p);
60177 }
60178 static inline __u32 __be32_to_cpup(const __be32 *p)
60179 {
60180 - return __swab32p((__u32 *)p);
60181 + return __swab32p((const __u32 *)p);
60182 }
60183 static inline __be16 __cpu_to_be16p(const __u16 *p)
60184 {
60185 - return (__force __be16)__swab16p(p);
60186 + return (__force const __be16)__swab16p(p);
60187 }
60188 static inline __u16 __be16_to_cpup(const __be16 *p)
60189 {
60190 - return __swab16p((__u16 *)p);
60191 + return __swab16p((const __u16 *)p);
60192 }
60193 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
60194 #define __le64_to_cpus(x) do { (void)(x); } while (0)
60195 diff --git a/include/linux/cache.h b/include/linux/cache.h
60196 index 4c57065..4307975 100644
60197 --- a/include/linux/cache.h
60198 +++ b/include/linux/cache.h
60199 @@ -16,6 +16,10 @@
60200 #define __read_mostly
60201 #endif
60202
60203 +#ifndef __read_only
60204 +#define __read_only __read_mostly
60205 +#endif
60206 +
60207 #ifndef ____cacheline_aligned
60208 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
60209 #endif
60210 diff --git a/include/linux/capability.h b/include/linux/capability.h
60211 index c421123..e343179 100644
60212 --- a/include/linux/capability.h
60213 +++ b/include/linux/capability.h
60214 @@ -547,6 +547,9 @@ extern bool capable(int cap);
60215 extern bool ns_capable(struct user_namespace *ns, int cap);
60216 extern bool task_ns_capable(struct task_struct *t, int cap);
60217 extern bool nsown_capable(int cap);
60218 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
60219 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
60220 +extern bool capable_nolog(int cap);
60221
60222 /* audit system wants to get cap info from files as well */
60223 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
60224 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
60225 index 04ffb2e..6799180 100644
60226 --- a/include/linux/cleancache.h
60227 +++ b/include/linux/cleancache.h
60228 @@ -31,7 +31,7 @@ struct cleancache_ops {
60229 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
60230 void (*flush_inode)(int, struct cleancache_filekey);
60231 void (*flush_fs)(int);
60232 -};
60233 +} __no_const;
60234
60235 extern struct cleancache_ops
60236 cleancache_register_ops(struct cleancache_ops *ops);
60237 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
60238 index dfadc96..c0e70c1 100644
60239 --- a/include/linux/compiler-gcc4.h
60240 +++ b/include/linux/compiler-gcc4.h
60241 @@ -31,6 +31,12 @@
60242
60243
60244 #if __GNUC_MINOR__ >= 5
60245 +
60246 +#ifdef CONSTIFY_PLUGIN
60247 +#define __no_const __attribute__((no_const))
60248 +#define __do_const __attribute__((do_const))
60249 +#endif
60250 +
60251 /*
60252 * Mark a position in code as unreachable. This can be used to
60253 * suppress control flow warnings after asm blocks that transfer
60254 @@ -46,6 +52,11 @@
60255 #define __noclone __attribute__((__noclone__))
60256
60257 #endif
60258 +
60259 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
60260 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
60261 +#define __bos0(ptr) __bos((ptr), 0)
60262 +#define __bos1(ptr) __bos((ptr), 1)
60263 #endif
60264
60265 #if __GNUC_MINOR__ > 0
60266 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
60267 index 320d6c9..8573a1c 100644
60268 --- a/include/linux/compiler.h
60269 +++ b/include/linux/compiler.h
60270 @@ -5,31 +5,62 @@
60271
60272 #ifdef __CHECKER__
60273 # define __user __attribute__((noderef, address_space(1)))
60274 +# define __force_user __force __user
60275 # define __kernel __attribute__((address_space(0)))
60276 +# define __force_kernel __force __kernel
60277 # define __safe __attribute__((safe))
60278 # define __force __attribute__((force))
60279 # define __nocast __attribute__((nocast))
60280 # define __iomem __attribute__((noderef, address_space(2)))
60281 +# define __force_iomem __force __iomem
60282 # define __acquires(x) __attribute__((context(x,0,1)))
60283 # define __releases(x) __attribute__((context(x,1,0)))
60284 # define __acquire(x) __context__(x,1)
60285 # define __release(x) __context__(x,-1)
60286 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
60287 # define __percpu __attribute__((noderef, address_space(3)))
60288 +# define __force_percpu __force __percpu
60289 #ifdef CONFIG_SPARSE_RCU_POINTER
60290 # define __rcu __attribute__((noderef, address_space(4)))
60291 +# define __force_rcu __force __rcu
60292 #else
60293 # define __rcu
60294 +# define __force_rcu
60295 #endif
60296 extern void __chk_user_ptr(const volatile void __user *);
60297 extern void __chk_io_ptr(const volatile void __iomem *);
60298 +#elif defined(CHECKER_PLUGIN)
60299 +//# define __user
60300 +//# define __force_user
60301 +//# define __kernel
60302 +//# define __force_kernel
60303 +# define __safe
60304 +# define __force
60305 +# define __nocast
60306 +# define __iomem
60307 +# define __force_iomem
60308 +# define __chk_user_ptr(x) (void)0
60309 +# define __chk_io_ptr(x) (void)0
60310 +# define __builtin_warning(x, y...) (1)
60311 +# define __acquires(x)
60312 +# define __releases(x)
60313 +# define __acquire(x) (void)0
60314 +# define __release(x) (void)0
60315 +# define __cond_lock(x,c) (c)
60316 +# define __percpu
60317 +# define __force_percpu
60318 +# define __rcu
60319 +# define __force_rcu
60320 #else
60321 # define __user
60322 +# define __force_user
60323 # define __kernel
60324 +# define __force_kernel
60325 # define __safe
60326 # define __force
60327 # define __nocast
60328 # define __iomem
60329 +# define __force_iomem
60330 # define __chk_user_ptr(x) (void)0
60331 # define __chk_io_ptr(x) (void)0
60332 # define __builtin_warning(x, y...) (1)
60333 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
60334 # define __release(x) (void)0
60335 # define __cond_lock(x,c) (c)
60336 # define __percpu
60337 +# define __force_percpu
60338 # define __rcu
60339 +# define __force_rcu
60340 #endif
60341
60342 #ifdef __KERNEL__
60343 @@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60344 # define __attribute_const__ /* unimplemented */
60345 #endif
60346
60347 +#ifndef __no_const
60348 +# define __no_const
60349 +#endif
60350 +
60351 +#ifndef __do_const
60352 +# define __do_const
60353 +#endif
60354 +
60355 /*
60356 * Tell gcc if a function is cold. The compiler will assume any path
60357 * directly leading to the call is unlikely.
60358 @@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60359 #define __cold
60360 #endif
60361
60362 +#ifndef __alloc_size
60363 +#define __alloc_size(...)
60364 +#endif
60365 +
60366 +#ifndef __bos
60367 +#define __bos(ptr, arg)
60368 +#endif
60369 +
60370 +#ifndef __bos0
60371 +#define __bos0(ptr)
60372 +#endif
60373 +
60374 +#ifndef __bos1
60375 +#define __bos1(ptr)
60376 +#endif
60377 +
60378 /* Simple shorthand for a section definition */
60379 #ifndef __section
60380 # define __section(S) __attribute__ ((__section__(#S)))
60381 @@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60382 * use is to mediate communication between process-level code and irq/NMI
60383 * handlers, all running on the same CPU.
60384 */
60385 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
60386 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
60387 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
60388
60389 #endif /* __LINUX_COMPILER_H */
60390 diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
60391 index e9eaec5..bfeb9bb 100644
60392 --- a/include/linux/cpuset.h
60393 +++ b/include/linux/cpuset.h
60394 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
60395 * nodemask.
60396 */
60397 smp_mb();
60398 - --ACCESS_ONCE(current->mems_allowed_change_disable);
60399 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
60400 }
60401
60402 static inline void set_mems_allowed(nodemask_t nodemask)
60403 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
60404 index e5e468e..f079672 100644
60405 --- a/include/linux/crypto.h
60406 +++ b/include/linux/crypto.h
60407 @@ -361,7 +361,7 @@ struct cipher_tfm {
60408 const u8 *key, unsigned int keylen);
60409 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60410 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60411 -};
60412 +} __no_const;
60413
60414 struct hash_tfm {
60415 int (*init)(struct hash_desc *desc);
60416 @@ -382,13 +382,13 @@ struct compress_tfm {
60417 int (*cot_decompress)(struct crypto_tfm *tfm,
60418 const u8 *src, unsigned int slen,
60419 u8 *dst, unsigned int *dlen);
60420 -};
60421 +} __no_const;
60422
60423 struct rng_tfm {
60424 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
60425 unsigned int dlen);
60426 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
60427 -};
60428 +} __no_const;
60429
60430 #define crt_ablkcipher crt_u.ablkcipher
60431 #define crt_aead crt_u.aead
60432 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
60433 index 7925bf0..d5143d2 100644
60434 --- a/include/linux/decompress/mm.h
60435 +++ b/include/linux/decompress/mm.h
60436 @@ -77,7 +77,7 @@ static void free(void *where)
60437 * warnings when not needed (indeed large_malloc / large_free are not
60438 * needed by inflate */
60439
60440 -#define malloc(a) kmalloc(a, GFP_KERNEL)
60441 +#define malloc(a) kmalloc((a), GFP_KERNEL)
60442 #define free(a) kfree(a)
60443
60444 #define large_malloc(a) vmalloc(a)
60445 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
60446 index 347fdc3..cd01657 100644
60447 --- a/include/linux/dma-mapping.h
60448 +++ b/include/linux/dma-mapping.h
60449 @@ -42,7 +42,7 @@ struct dma_map_ops {
60450 int (*dma_supported)(struct device *dev, u64 mask);
60451 int (*set_dma_mask)(struct device *dev, u64 mask);
60452 int is_phys;
60453 -};
60454 +} __do_const;
60455
60456 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
60457
60458 diff --git a/include/linux/efi.h b/include/linux/efi.h
60459 index 2362a0b..cfaf8fcc 100644
60460 --- a/include/linux/efi.h
60461 +++ b/include/linux/efi.h
60462 @@ -446,7 +446,7 @@ struct efivar_operations {
60463 efi_get_variable_t *get_variable;
60464 efi_get_next_variable_t *get_next_variable;
60465 efi_set_variable_t *set_variable;
60466 -};
60467 +} __no_const;
60468
60469 struct efivars {
60470 /*
60471 diff --git a/include/linux/elf.h b/include/linux/elf.h
60472 index 110821c..cb14c08 100644
60473 --- a/include/linux/elf.h
60474 +++ b/include/linux/elf.h
60475 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
60476 #define PT_GNU_EH_FRAME 0x6474e550
60477
60478 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
60479 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
60480 +
60481 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
60482 +
60483 +/* Constants for the e_flags field */
60484 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60485 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
60486 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
60487 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
60488 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60489 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60490
60491 /*
60492 * Extended Numbering
60493 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
60494 #define DT_DEBUG 21
60495 #define DT_TEXTREL 22
60496 #define DT_JMPREL 23
60497 +#define DT_FLAGS 30
60498 + #define DF_TEXTREL 0x00000004
60499 #define DT_ENCODING 32
60500 #define OLD_DT_LOOS 0x60000000
60501 #define DT_LOOS 0x6000000d
60502 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
60503 #define PF_W 0x2
60504 #define PF_X 0x1
60505
60506 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
60507 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
60508 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
60509 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
60510 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
60511 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
60512 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
60513 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
60514 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
60515 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
60516 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
60517 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
60518 +
60519 typedef struct elf32_phdr{
60520 Elf32_Word p_type;
60521 Elf32_Off p_offset;
60522 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
60523 #define EI_OSABI 7
60524 #define EI_PAD 8
60525
60526 +#define EI_PAX 14
60527 +
60528 #define ELFMAG0 0x7f /* EI_MAG */
60529 #define ELFMAG1 'E'
60530 #define ELFMAG2 'L'
60531 @@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
60532 #define elf_note elf32_note
60533 #define elf_addr_t Elf32_Off
60534 #define Elf_Half Elf32_Half
60535 +#define elf_dyn Elf32_Dyn
60536
60537 #else
60538
60539 @@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
60540 #define elf_note elf64_note
60541 #define elf_addr_t Elf64_Off
60542 #define Elf_Half Elf64_Half
60543 +#define elf_dyn Elf64_Dyn
60544
60545 #endif
60546
60547 diff --git a/include/linux/filter.h b/include/linux/filter.h
60548 index 741956f..f02f482 100644
60549 --- a/include/linux/filter.h
60550 +++ b/include/linux/filter.h
60551 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
60552
60553 struct sk_buff;
60554 struct sock;
60555 +struct bpf_jit_work;
60556
60557 struct sk_filter
60558 {
60559 @@ -141,6 +142,9 @@ struct sk_filter
60560 unsigned int len; /* Number of filter blocks */
60561 unsigned int (*bpf_func)(const struct sk_buff *skb,
60562 const struct sock_filter *filter);
60563 +#ifdef CONFIG_BPF_JIT
60564 + struct bpf_jit_work *work;
60565 +#endif
60566 struct rcu_head rcu;
60567 struct sock_filter insns[0];
60568 };
60569 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
60570 index 84ccf8e..2e9b14c 100644
60571 --- a/include/linux/firewire.h
60572 +++ b/include/linux/firewire.h
60573 @@ -428,7 +428,7 @@ struct fw_iso_context {
60574 union {
60575 fw_iso_callback_t sc;
60576 fw_iso_mc_callback_t mc;
60577 - } callback;
60578 + } __no_const callback;
60579 void *callback_data;
60580 };
60581
60582 diff --git a/include/linux/fs.h b/include/linux/fs.h
60583 index 277f497..9be66a4 100644
60584 --- a/include/linux/fs.h
60585 +++ b/include/linux/fs.h
60586 @@ -1588,7 +1588,8 @@ struct file_operations {
60587 int (*setlease)(struct file *, long, struct file_lock **);
60588 long (*fallocate)(struct file *file, int mode, loff_t offset,
60589 loff_t len);
60590 -};
60591 +} __do_const;
60592 +typedef struct file_operations __no_const file_operations_no_const;
60593
60594 struct inode_operations {
60595 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
60596 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
60597 index 003dc0f..3c4ea97 100644
60598 --- a/include/linux/fs_struct.h
60599 +++ b/include/linux/fs_struct.h
60600 @@ -6,7 +6,7 @@
60601 #include <linux/seqlock.h>
60602
60603 struct fs_struct {
60604 - int users;
60605 + atomic_t users;
60606 spinlock_t lock;
60607 seqcount_t seq;
60608 int umask;
60609 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
60610 index af095b5..cf1220c 100644
60611 --- a/include/linux/fscache-cache.h
60612 +++ b/include/linux/fscache-cache.h
60613 @@ -102,7 +102,7 @@ struct fscache_operation {
60614 fscache_operation_release_t release;
60615 };
60616
60617 -extern atomic_t fscache_op_debug_id;
60618 +extern atomic_unchecked_t fscache_op_debug_id;
60619 extern void fscache_op_work_func(struct work_struct *work);
60620
60621 extern void fscache_enqueue_operation(struct fscache_operation *);
60622 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
60623 {
60624 INIT_WORK(&op->work, fscache_op_work_func);
60625 atomic_set(&op->usage, 1);
60626 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
60627 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
60628 op->processor = processor;
60629 op->release = release;
60630 INIT_LIST_HEAD(&op->pend_link);
60631 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
60632 index 2a53f10..0187fdf 100644
60633 --- a/include/linux/fsnotify.h
60634 +++ b/include/linux/fsnotify.h
60635 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
60636 */
60637 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
60638 {
60639 - return kstrdup(name, GFP_KERNEL);
60640 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
60641 }
60642
60643 /*
60644 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
60645 index 96efa67..1261547 100644
60646 --- a/include/linux/ftrace_event.h
60647 +++ b/include/linux/ftrace_event.h
60648 @@ -97,7 +97,7 @@ struct trace_event_functions {
60649 trace_print_func raw;
60650 trace_print_func hex;
60651 trace_print_func binary;
60652 -};
60653 +} __no_const;
60654
60655 struct trace_event {
60656 struct hlist_node node;
60657 @@ -252,7 +252,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
60658 extern int trace_add_event_call(struct ftrace_event_call *call);
60659 extern void trace_remove_event_call(struct ftrace_event_call *call);
60660
60661 -#define is_signed_type(type) (((type)(-1)) < 0)
60662 +#define is_signed_type(type) (((type)(-1)) < (type)1)
60663
60664 int trace_set_clr_event(const char *system, const char *event, int set);
60665
60666 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
60667 index 02fa469..a15f279 100644
60668 --- a/include/linux/genhd.h
60669 +++ b/include/linux/genhd.h
60670 @@ -184,7 +184,7 @@ struct gendisk {
60671 struct kobject *slave_dir;
60672
60673 struct timer_rand_state *random;
60674 - atomic_t sync_io; /* RAID */
60675 + atomic_unchecked_t sync_io; /* RAID */
60676 struct disk_events *ev;
60677 #ifdef CONFIG_BLK_DEV_INTEGRITY
60678 struct blk_integrity *integrity;
60679 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
60680 new file mode 100644
60681 index 0000000..0dc3943
60682 --- /dev/null
60683 +++ b/include/linux/gracl.h
60684 @@ -0,0 +1,317 @@
60685 +#ifndef GR_ACL_H
60686 +#define GR_ACL_H
60687 +
60688 +#include <linux/grdefs.h>
60689 +#include <linux/resource.h>
60690 +#include <linux/capability.h>
60691 +#include <linux/dcache.h>
60692 +#include <asm/resource.h>
60693 +
60694 +/* Major status information */
60695 +
60696 +#define GR_VERSION "grsecurity 2.2.2"
60697 +#define GRSECURITY_VERSION 0x2202
60698 +
60699 +enum {
60700 + GR_SHUTDOWN = 0,
60701 + GR_ENABLE = 1,
60702 + GR_SPROLE = 2,
60703 + GR_RELOAD = 3,
60704 + GR_SEGVMOD = 4,
60705 + GR_STATUS = 5,
60706 + GR_UNSPROLE = 6,
60707 + GR_PASSSET = 7,
60708 + GR_SPROLEPAM = 8,
60709 +};
60710 +
60711 +/* Password setup definitions
60712 + * kernel/grhash.c */
60713 +enum {
60714 + GR_PW_LEN = 128,
60715 + GR_SALT_LEN = 16,
60716 + GR_SHA_LEN = 32,
60717 +};
60718 +
60719 +enum {
60720 + GR_SPROLE_LEN = 64,
60721 +};
60722 +
60723 +enum {
60724 + GR_NO_GLOB = 0,
60725 + GR_REG_GLOB,
60726 + GR_CREATE_GLOB
60727 +};
60728 +
60729 +#define GR_NLIMITS 32
60730 +
60731 +/* Begin Data Structures */
60732 +
60733 +struct sprole_pw {
60734 + unsigned char *rolename;
60735 + unsigned char salt[GR_SALT_LEN];
60736 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
60737 +};
60738 +
60739 +struct name_entry {
60740 + __u32 key;
60741 + ino_t inode;
60742 + dev_t device;
60743 + char *name;
60744 + __u16 len;
60745 + __u8 deleted;
60746 + struct name_entry *prev;
60747 + struct name_entry *next;
60748 +};
60749 +
60750 +struct inodev_entry {
60751 + struct name_entry *nentry;
60752 + struct inodev_entry *prev;
60753 + struct inodev_entry *next;
60754 +};
60755 +
60756 +struct acl_role_db {
60757 + struct acl_role_label **r_hash;
60758 + __u32 r_size;
60759 +};
60760 +
60761 +struct inodev_db {
60762 + struct inodev_entry **i_hash;
60763 + __u32 i_size;
60764 +};
60765 +
60766 +struct name_db {
60767 + struct name_entry **n_hash;
60768 + __u32 n_size;
60769 +};
60770 +
60771 +struct crash_uid {
60772 + uid_t uid;
60773 + unsigned long expires;
60774 +};
60775 +
60776 +struct gr_hash_struct {
60777 + void **table;
60778 + void **nametable;
60779 + void *first;
60780 + __u32 table_size;
60781 + __u32 used_size;
60782 + int type;
60783 +};
60784 +
60785 +/* Userspace Grsecurity ACL data structures */
60786 +
60787 +struct acl_subject_label {
60788 + char *filename;
60789 + ino_t inode;
60790 + dev_t device;
60791 + __u32 mode;
60792 + kernel_cap_t cap_mask;
60793 + kernel_cap_t cap_lower;
60794 + kernel_cap_t cap_invert_audit;
60795 +
60796 + struct rlimit res[GR_NLIMITS];
60797 + __u32 resmask;
60798 +
60799 + __u8 user_trans_type;
60800 + __u8 group_trans_type;
60801 + uid_t *user_transitions;
60802 + gid_t *group_transitions;
60803 + __u16 user_trans_num;
60804 + __u16 group_trans_num;
60805 +
60806 + __u32 sock_families[2];
60807 + __u32 ip_proto[8];
60808 + __u32 ip_type;
60809 + struct acl_ip_label **ips;
60810 + __u32 ip_num;
60811 + __u32 inaddr_any_override;
60812 +
60813 + __u32 crashes;
60814 + unsigned long expires;
60815 +
60816 + struct acl_subject_label *parent_subject;
60817 + struct gr_hash_struct *hash;
60818 + struct acl_subject_label *prev;
60819 + struct acl_subject_label *next;
60820 +
60821 + struct acl_object_label **obj_hash;
60822 + __u32 obj_hash_size;
60823 + __u16 pax_flags;
60824 +};
60825 +
60826 +struct role_allowed_ip {
60827 + __u32 addr;
60828 + __u32 netmask;
60829 +
60830 + struct role_allowed_ip *prev;
60831 + struct role_allowed_ip *next;
60832 +};
60833 +
60834 +struct role_transition {
60835 + char *rolename;
60836 +
60837 + struct role_transition *prev;
60838 + struct role_transition *next;
60839 +};
60840 +
60841 +struct acl_role_label {
60842 + char *rolename;
60843 + uid_t uidgid;
60844 + __u16 roletype;
60845 +
60846 + __u16 auth_attempts;
60847 + unsigned long expires;
60848 +
60849 + struct acl_subject_label *root_label;
60850 + struct gr_hash_struct *hash;
60851 +
60852 + struct acl_role_label *prev;
60853 + struct acl_role_label *next;
60854 +
60855 + struct role_transition *transitions;
60856 + struct role_allowed_ip *allowed_ips;
60857 + uid_t *domain_children;
60858 + __u16 domain_child_num;
60859 +
60860 + struct acl_subject_label **subj_hash;
60861 + __u32 subj_hash_size;
60862 +};
60863 +
60864 +struct user_acl_role_db {
60865 + struct acl_role_label **r_table;
60866 + __u32 num_pointers; /* Number of allocations to track */
60867 + __u32 num_roles; /* Number of roles */
60868 + __u32 num_domain_children; /* Number of domain children */
60869 + __u32 num_subjects; /* Number of subjects */
60870 + __u32 num_objects; /* Number of objects */
60871 +};
60872 +
60873 +struct acl_object_label {
60874 + char *filename;
60875 + ino_t inode;
60876 + dev_t device;
60877 + __u32 mode;
60878 +
60879 + struct acl_subject_label *nested;
60880 + struct acl_object_label *globbed;
60881 +
60882 + /* next two structures not used */
60883 +
60884 + struct acl_object_label *prev;
60885 + struct acl_object_label *next;
60886 +};
60887 +
60888 +struct acl_ip_label {
60889 + char *iface;
60890 + __u32 addr;
60891 + __u32 netmask;
60892 + __u16 low, high;
60893 + __u8 mode;
60894 + __u32 type;
60895 + __u32 proto[8];
60896 +
60897 + /* next two structures not used */
60898 +
60899 + struct acl_ip_label *prev;
60900 + struct acl_ip_label *next;
60901 +};
60902 +
60903 +struct gr_arg {
60904 + struct user_acl_role_db role_db;
60905 + unsigned char pw[GR_PW_LEN];
60906 + unsigned char salt[GR_SALT_LEN];
60907 + unsigned char sum[GR_SHA_LEN];
60908 + unsigned char sp_role[GR_SPROLE_LEN];
60909 + struct sprole_pw *sprole_pws;
60910 + dev_t segv_device;
60911 + ino_t segv_inode;
60912 + uid_t segv_uid;
60913 + __u16 num_sprole_pws;
60914 + __u16 mode;
60915 +};
60916 +
60917 +struct gr_arg_wrapper {
60918 + struct gr_arg *arg;
60919 + __u32 version;
60920 + __u32 size;
60921 +};
60922 +
60923 +struct subject_map {
60924 + struct acl_subject_label *user;
60925 + struct acl_subject_label *kernel;
60926 + struct subject_map *prev;
60927 + struct subject_map *next;
60928 +};
60929 +
60930 +struct acl_subj_map_db {
60931 + struct subject_map **s_hash;
60932 + __u32 s_size;
60933 +};
60934 +
60935 +/* End Data Structures Section */
60936 +
60937 +/* Hash functions generated by empirical testing by Brad Spengler
60938 + Makes good use of the low bits of the inode. Generally 0-1 times
60939 + in loop for successful match. 0-3 for unsuccessful match.
60940 + Shift/add algorithm with modulus of table size and an XOR*/
60941 +
60942 +static __inline__ unsigned int
60943 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
60944 +{
60945 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
60946 +}
60947 +
60948 + static __inline__ unsigned int
60949 +shash(const struct acl_subject_label *userp, const unsigned int sz)
60950 +{
60951 + return ((const unsigned long)userp % sz);
60952 +}
60953 +
60954 +static __inline__ unsigned int
60955 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
60956 +{
60957 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
60958 +}
60959 +
60960 +static __inline__ unsigned int
60961 +nhash(const char *name, const __u16 len, const unsigned int sz)
60962 +{
60963 + return full_name_hash((const unsigned char *)name, len) % sz;
60964 +}
60965 +
60966 +#define FOR_EACH_ROLE_START(role) \
60967 + role = role_list; \
60968 + while (role) {
60969 +
60970 +#define FOR_EACH_ROLE_END(role) \
60971 + role = role->prev; \
60972 + }
60973 +
60974 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
60975 + subj = NULL; \
60976 + iter = 0; \
60977 + while (iter < role->subj_hash_size) { \
60978 + if (subj == NULL) \
60979 + subj = role->subj_hash[iter]; \
60980 + if (subj == NULL) { \
60981 + iter++; \
60982 + continue; \
60983 + }
60984 +
60985 +#define FOR_EACH_SUBJECT_END(subj,iter) \
60986 + subj = subj->next; \
60987 + if (subj == NULL) \
60988 + iter++; \
60989 + }
60990 +
60991 +
60992 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
60993 + subj = role->hash->first; \
60994 + while (subj != NULL) {
60995 +
60996 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
60997 + subj = subj->next; \
60998 + }
60999 +
61000 +#endif
61001 +
61002 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
61003 new file mode 100644
61004 index 0000000..323ecf2
61005 --- /dev/null
61006 +++ b/include/linux/gralloc.h
61007 @@ -0,0 +1,9 @@
61008 +#ifndef __GRALLOC_H
61009 +#define __GRALLOC_H
61010 +
61011 +void acl_free_all(void);
61012 +int acl_alloc_stack_init(unsigned long size);
61013 +void *acl_alloc(unsigned long len);
61014 +void *acl_alloc_num(unsigned long num, unsigned long len);
61015 +
61016 +#endif
61017 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
61018 new file mode 100644
61019 index 0000000..b30e9bc
61020 --- /dev/null
61021 +++ b/include/linux/grdefs.h
61022 @@ -0,0 +1,140 @@
61023 +#ifndef GRDEFS_H
61024 +#define GRDEFS_H
61025 +
61026 +/* Begin grsecurity status declarations */
61027 +
61028 +enum {
61029 + GR_READY = 0x01,
61030 + GR_STATUS_INIT = 0x00 // disabled state
61031 +};
61032 +
61033 +/* Begin ACL declarations */
61034 +
61035 +/* Role flags */
61036 +
61037 +enum {
61038 + GR_ROLE_USER = 0x0001,
61039 + GR_ROLE_GROUP = 0x0002,
61040 + GR_ROLE_DEFAULT = 0x0004,
61041 + GR_ROLE_SPECIAL = 0x0008,
61042 + GR_ROLE_AUTH = 0x0010,
61043 + GR_ROLE_NOPW = 0x0020,
61044 + GR_ROLE_GOD = 0x0040,
61045 + GR_ROLE_LEARN = 0x0080,
61046 + GR_ROLE_TPE = 0x0100,
61047 + GR_ROLE_DOMAIN = 0x0200,
61048 + GR_ROLE_PAM = 0x0400,
61049 + GR_ROLE_PERSIST = 0x0800
61050 +};
61051 +
61052 +/* ACL Subject and Object mode flags */
61053 +enum {
61054 + GR_DELETED = 0x80000000
61055 +};
61056 +
61057 +/* ACL Object-only mode flags */
61058 +enum {
61059 + GR_READ = 0x00000001,
61060 + GR_APPEND = 0x00000002,
61061 + GR_WRITE = 0x00000004,
61062 + GR_EXEC = 0x00000008,
61063 + GR_FIND = 0x00000010,
61064 + GR_INHERIT = 0x00000020,
61065 + GR_SETID = 0x00000040,
61066 + GR_CREATE = 0x00000080,
61067 + GR_DELETE = 0x00000100,
61068 + GR_LINK = 0x00000200,
61069 + GR_AUDIT_READ = 0x00000400,
61070 + GR_AUDIT_APPEND = 0x00000800,
61071 + GR_AUDIT_WRITE = 0x00001000,
61072 + GR_AUDIT_EXEC = 0x00002000,
61073 + GR_AUDIT_FIND = 0x00004000,
61074 + GR_AUDIT_INHERIT= 0x00008000,
61075 + GR_AUDIT_SETID = 0x00010000,
61076 + GR_AUDIT_CREATE = 0x00020000,
61077 + GR_AUDIT_DELETE = 0x00040000,
61078 + GR_AUDIT_LINK = 0x00080000,
61079 + GR_PTRACERD = 0x00100000,
61080 + GR_NOPTRACE = 0x00200000,
61081 + GR_SUPPRESS = 0x00400000,
61082 + GR_NOLEARN = 0x00800000,
61083 + GR_INIT_TRANSFER= 0x01000000
61084 +};
61085 +
61086 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
61087 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
61088 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
61089 +
61090 +/* ACL subject-only mode flags */
61091 +enum {
61092 + GR_KILL = 0x00000001,
61093 + GR_VIEW = 0x00000002,
61094 + GR_PROTECTED = 0x00000004,
61095 + GR_LEARN = 0x00000008,
61096 + GR_OVERRIDE = 0x00000010,
61097 + /* just a placeholder, this mode is only used in userspace */
61098 + GR_DUMMY = 0x00000020,
61099 + GR_PROTSHM = 0x00000040,
61100 + GR_KILLPROC = 0x00000080,
61101 + GR_KILLIPPROC = 0x00000100,
61102 + /* just a placeholder, this mode is only used in userspace */
61103 + GR_NOTROJAN = 0x00000200,
61104 + GR_PROTPROCFD = 0x00000400,
61105 + GR_PROCACCT = 0x00000800,
61106 + GR_RELAXPTRACE = 0x00001000,
61107 + GR_NESTED = 0x00002000,
61108 + GR_INHERITLEARN = 0x00004000,
61109 + GR_PROCFIND = 0x00008000,
61110 + GR_POVERRIDE = 0x00010000,
61111 + GR_KERNELAUTH = 0x00020000,
61112 + GR_ATSECURE = 0x00040000,
61113 + GR_SHMEXEC = 0x00080000
61114 +};
61115 +
61116 +enum {
61117 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
61118 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
61119 + GR_PAX_ENABLE_MPROTECT = 0x0004,
61120 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
61121 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
61122 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
61123 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
61124 + GR_PAX_DISABLE_MPROTECT = 0x0400,
61125 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
61126 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
61127 +};
61128 +
61129 +enum {
61130 + GR_ID_USER = 0x01,
61131 + GR_ID_GROUP = 0x02,
61132 +};
61133 +
61134 +enum {
61135 + GR_ID_ALLOW = 0x01,
61136 + GR_ID_DENY = 0x02,
61137 +};
61138 +
61139 +#define GR_CRASH_RES 31
61140 +#define GR_UIDTABLE_MAX 500
61141 +
61142 +/* begin resource learning section */
61143 +enum {
61144 + GR_RLIM_CPU_BUMP = 60,
61145 + GR_RLIM_FSIZE_BUMP = 50000,
61146 + GR_RLIM_DATA_BUMP = 10000,
61147 + GR_RLIM_STACK_BUMP = 1000,
61148 + GR_RLIM_CORE_BUMP = 10000,
61149 + GR_RLIM_RSS_BUMP = 500000,
61150 + GR_RLIM_NPROC_BUMP = 1,
61151 + GR_RLIM_NOFILE_BUMP = 5,
61152 + GR_RLIM_MEMLOCK_BUMP = 50000,
61153 + GR_RLIM_AS_BUMP = 500000,
61154 + GR_RLIM_LOCKS_BUMP = 2,
61155 + GR_RLIM_SIGPENDING_BUMP = 5,
61156 + GR_RLIM_MSGQUEUE_BUMP = 10000,
61157 + GR_RLIM_NICE_BUMP = 1,
61158 + GR_RLIM_RTPRIO_BUMP = 1,
61159 + GR_RLIM_RTTIME_BUMP = 1000000
61160 +};
61161 +
61162 +#endif
61163 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
61164 new file mode 100644
61165 index 0000000..60cda84
61166 --- /dev/null
61167 +++ b/include/linux/grinternal.h
61168 @@ -0,0 +1,220 @@
61169 +#ifndef __GRINTERNAL_H
61170 +#define __GRINTERNAL_H
61171 +
61172 +#ifdef CONFIG_GRKERNSEC
61173 +
61174 +#include <linux/fs.h>
61175 +#include <linux/mnt_namespace.h>
61176 +#include <linux/nsproxy.h>
61177 +#include <linux/gracl.h>
61178 +#include <linux/grdefs.h>
61179 +#include <linux/grmsg.h>
61180 +
61181 +void gr_add_learn_entry(const char *fmt, ...)
61182 + __attribute__ ((format (printf, 1, 2)));
61183 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
61184 + const struct vfsmount *mnt);
61185 +__u32 gr_check_create(const struct dentry *new_dentry,
61186 + const struct dentry *parent,
61187 + const struct vfsmount *mnt, const __u32 mode);
61188 +int gr_check_protected_task(const struct task_struct *task);
61189 +__u32 to_gr_audit(const __u32 reqmode);
61190 +int gr_set_acls(const int type);
61191 +int gr_apply_subject_to_task(struct task_struct *task);
61192 +int gr_acl_is_enabled(void);
61193 +char gr_roletype_to_char(void);
61194 +
61195 +void gr_handle_alertkill(struct task_struct *task);
61196 +char *gr_to_filename(const struct dentry *dentry,
61197 + const struct vfsmount *mnt);
61198 +char *gr_to_filename1(const struct dentry *dentry,
61199 + const struct vfsmount *mnt);
61200 +char *gr_to_filename2(const struct dentry *dentry,
61201 + const struct vfsmount *mnt);
61202 +char *gr_to_filename3(const struct dentry *dentry,
61203 + const struct vfsmount *mnt);
61204 +
61205 +extern int grsec_enable_harden_ptrace;
61206 +extern int grsec_enable_link;
61207 +extern int grsec_enable_fifo;
61208 +extern int grsec_enable_execve;
61209 +extern int grsec_enable_shm;
61210 +extern int grsec_enable_execlog;
61211 +extern int grsec_enable_signal;
61212 +extern int grsec_enable_audit_ptrace;
61213 +extern int grsec_enable_forkfail;
61214 +extern int grsec_enable_time;
61215 +extern int grsec_enable_rofs;
61216 +extern int grsec_enable_chroot_shmat;
61217 +extern int grsec_enable_chroot_mount;
61218 +extern int grsec_enable_chroot_double;
61219 +extern int grsec_enable_chroot_pivot;
61220 +extern int grsec_enable_chroot_chdir;
61221 +extern int grsec_enable_chroot_chmod;
61222 +extern int grsec_enable_chroot_mknod;
61223 +extern int grsec_enable_chroot_fchdir;
61224 +extern int grsec_enable_chroot_nice;
61225 +extern int grsec_enable_chroot_execlog;
61226 +extern int grsec_enable_chroot_caps;
61227 +extern int grsec_enable_chroot_sysctl;
61228 +extern int grsec_enable_chroot_unix;
61229 +extern int grsec_enable_tpe;
61230 +extern int grsec_tpe_gid;
61231 +extern int grsec_enable_tpe_all;
61232 +extern int grsec_enable_tpe_invert;
61233 +extern int grsec_enable_socket_all;
61234 +extern int grsec_socket_all_gid;
61235 +extern int grsec_enable_socket_client;
61236 +extern int grsec_socket_client_gid;
61237 +extern int grsec_enable_socket_server;
61238 +extern int grsec_socket_server_gid;
61239 +extern int grsec_audit_gid;
61240 +extern int grsec_enable_group;
61241 +extern int grsec_enable_audit_textrel;
61242 +extern int grsec_enable_log_rwxmaps;
61243 +extern int grsec_enable_mount;
61244 +extern int grsec_enable_chdir;
61245 +extern int grsec_resource_logging;
61246 +extern int grsec_enable_blackhole;
61247 +extern int grsec_lastack_retries;
61248 +extern int grsec_enable_brute;
61249 +extern int grsec_lock;
61250 +
61251 +extern spinlock_t grsec_alert_lock;
61252 +extern unsigned long grsec_alert_wtime;
61253 +extern unsigned long grsec_alert_fyet;
61254 +
61255 +extern spinlock_t grsec_audit_lock;
61256 +
61257 +extern rwlock_t grsec_exec_file_lock;
61258 +
61259 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
61260 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
61261 + (tsk)->exec_file->f_vfsmnt) : "/")
61262 +
61263 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
61264 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
61265 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
61266 +
61267 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
61268 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
61269 + (tsk)->exec_file->f_vfsmnt) : "/")
61270 +
61271 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
61272 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
61273 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
61274 +
61275 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
61276 +
61277 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
61278 +
61279 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
61280 + (task)->pid, (cred)->uid, \
61281 + (cred)->euid, (cred)->gid, (cred)->egid, \
61282 + gr_parent_task_fullpath(task), \
61283 + (task)->real_parent->comm, (task)->real_parent->pid, \
61284 + (pcred)->uid, (pcred)->euid, \
61285 + (pcred)->gid, (pcred)->egid
61286 +
61287 +#define GR_CHROOT_CAPS {{ \
61288 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
61289 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
61290 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
61291 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
61292 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
61293 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
61294 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
61295 +
61296 +#define security_learn(normal_msg,args...) \
61297 +({ \
61298 + read_lock(&grsec_exec_file_lock); \
61299 + gr_add_learn_entry(normal_msg "\n", ## args); \
61300 + read_unlock(&grsec_exec_file_lock); \
61301 +})
61302 +
61303 +enum {
61304 + GR_DO_AUDIT,
61305 + GR_DONT_AUDIT,
61306 + /* used for non-audit messages that we shouldn't kill the task on */
61307 + GR_DONT_AUDIT_GOOD
61308 +};
61309 +
61310 +enum {
61311 + GR_TTYSNIFF,
61312 + GR_RBAC,
61313 + GR_RBAC_STR,
61314 + GR_STR_RBAC,
61315 + GR_RBAC_MODE2,
61316 + GR_RBAC_MODE3,
61317 + GR_FILENAME,
61318 + GR_SYSCTL_HIDDEN,
61319 + GR_NOARGS,
61320 + GR_ONE_INT,
61321 + GR_ONE_INT_TWO_STR,
61322 + GR_ONE_STR,
61323 + GR_STR_INT,
61324 + GR_TWO_STR_INT,
61325 + GR_TWO_INT,
61326 + GR_TWO_U64,
61327 + GR_THREE_INT,
61328 + GR_FIVE_INT_TWO_STR,
61329 + GR_TWO_STR,
61330 + GR_THREE_STR,
61331 + GR_FOUR_STR,
61332 + GR_STR_FILENAME,
61333 + GR_FILENAME_STR,
61334 + GR_FILENAME_TWO_INT,
61335 + GR_FILENAME_TWO_INT_STR,
61336 + GR_TEXTREL,
61337 + GR_PTRACE,
61338 + GR_RESOURCE,
61339 + GR_CAP,
61340 + GR_SIG,
61341 + GR_SIG2,
61342 + GR_CRASH1,
61343 + GR_CRASH2,
61344 + GR_PSACCT,
61345 + GR_RWXMAP
61346 +};
61347 +
61348 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
61349 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
61350 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
61351 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
61352 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
61353 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
61354 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
61355 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
61356 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
61357 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
61358 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
61359 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
61360 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
61361 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
61362 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
61363 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
61364 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
61365 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
61366 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
61367 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
61368 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
61369 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
61370 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
61371 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
61372 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
61373 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
61374 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
61375 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
61376 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
61377 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
61378 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
61379 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
61380 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
61381 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
61382 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
61383 +
61384 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
61385 +
61386 +#endif
61387 +
61388 +#endif
61389 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
61390 new file mode 100644
61391 index 0000000..9d5fd4a
61392 --- /dev/null
61393 +++ b/include/linux/grmsg.h
61394 @@ -0,0 +1,108 @@
61395 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
61396 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
61397 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
61398 +#define GR_STOPMOD_MSG "denied modification of module state by "
61399 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
61400 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
61401 +#define GR_IOPERM_MSG "denied use of ioperm() by "
61402 +#define GR_IOPL_MSG "denied use of iopl() by "
61403 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
61404 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
61405 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
61406 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
61407 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
61408 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
61409 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
61410 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
61411 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
61412 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
61413 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
61414 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
61415 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
61416 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
61417 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
61418 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
61419 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
61420 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
61421 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
61422 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
61423 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
61424 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
61425 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
61426 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
61427 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
61428 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
61429 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
61430 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
61431 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
61432 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
61433 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
61434 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
61435 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
61436 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
61437 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
61438 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
61439 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
61440 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
61441 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
61442 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
61443 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
61444 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
61445 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
61446 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
61447 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
61448 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
61449 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
61450 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
61451 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
61452 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
61453 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
61454 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
61455 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
61456 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
61457 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
61458 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
61459 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
61460 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
61461 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
61462 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
61463 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
61464 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
61465 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
61466 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
61467 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
61468 +#define GR_NICE_CHROOT_MSG "denied priority change by "
61469 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
61470 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
61471 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
61472 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
61473 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
61474 +#define GR_TIME_MSG "time set by "
61475 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
61476 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
61477 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
61478 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
61479 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
61480 +#define GR_BIND_MSG "denied bind() by "
61481 +#define GR_CONNECT_MSG "denied connect() by "
61482 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
61483 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
61484 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
61485 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
61486 +#define GR_CAP_ACL_MSG "use of %s denied for "
61487 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
61488 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
61489 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
61490 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
61491 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
61492 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
61493 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
61494 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
61495 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
61496 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
61497 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
61498 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
61499 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
61500 +#define GR_VM86_MSG "denied use of vm86 by "
61501 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
61502 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
61503 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
61504 new file mode 100644
61505 index 0000000..bd25f72
61506 --- /dev/null
61507 +++ b/include/linux/grsecurity.h
61508 @@ -0,0 +1,228 @@
61509 +#ifndef GR_SECURITY_H
61510 +#define GR_SECURITY_H
61511 +#include <linux/fs.h>
61512 +#include <linux/fs_struct.h>
61513 +#include <linux/binfmts.h>
61514 +#include <linux/gracl.h>
61515 +
61516 +/* notify of brain-dead configs */
61517 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61518 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
61519 +#endif
61520 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
61521 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
61522 +#endif
61523 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
61524 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
61525 +#endif
61526 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
61527 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
61528 +#endif
61529 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
61530 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
61531 +#endif
61532 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
61533 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
61534 +#endif
61535 +
61536 +#include <linux/compat.h>
61537 +
61538 +struct user_arg_ptr {
61539 +#ifdef CONFIG_COMPAT
61540 + bool is_compat;
61541 +#endif
61542 + union {
61543 + const char __user *const __user *native;
61544 +#ifdef CONFIG_COMPAT
61545 + compat_uptr_t __user *compat;
61546 +#endif
61547 + } ptr;
61548 +};
61549 +
61550 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
61551 +void gr_handle_brute_check(void);
61552 +void gr_handle_kernel_exploit(void);
61553 +int gr_process_user_ban(void);
61554 +
61555 +char gr_roletype_to_char(void);
61556 +
61557 +int gr_acl_enable_at_secure(void);
61558 +
61559 +int gr_check_user_change(int real, int effective, int fs);
61560 +int gr_check_group_change(int real, int effective, int fs);
61561 +
61562 +void gr_del_task_from_ip_table(struct task_struct *p);
61563 +
61564 +int gr_pid_is_chrooted(struct task_struct *p);
61565 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
61566 +int gr_handle_chroot_nice(void);
61567 +int gr_handle_chroot_sysctl(const int op);
61568 +int gr_handle_chroot_setpriority(struct task_struct *p,
61569 + const int niceval);
61570 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
61571 +int gr_handle_chroot_chroot(const struct dentry *dentry,
61572 + const struct vfsmount *mnt);
61573 +void gr_handle_chroot_chdir(struct path *path);
61574 +int gr_handle_chroot_chmod(const struct dentry *dentry,
61575 + const struct vfsmount *mnt, const int mode);
61576 +int gr_handle_chroot_mknod(const struct dentry *dentry,
61577 + const struct vfsmount *mnt, const int mode);
61578 +int gr_handle_chroot_mount(const struct dentry *dentry,
61579 + const struct vfsmount *mnt,
61580 + const char *dev_name);
61581 +int gr_handle_chroot_pivot(void);
61582 +int gr_handle_chroot_unix(const pid_t pid);
61583 +
61584 +int gr_handle_rawio(const struct inode *inode);
61585 +
61586 +void gr_handle_ioperm(void);
61587 +void gr_handle_iopl(void);
61588 +
61589 +int gr_tpe_allow(const struct file *file);
61590 +
61591 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
61592 +void gr_clear_chroot_entries(struct task_struct *task);
61593 +
61594 +void gr_log_forkfail(const int retval);
61595 +void gr_log_timechange(void);
61596 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
61597 +void gr_log_chdir(const struct dentry *dentry,
61598 + const struct vfsmount *mnt);
61599 +void gr_log_chroot_exec(const struct dentry *dentry,
61600 + const struct vfsmount *mnt);
61601 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
61602 +void gr_log_remount(const char *devname, const int retval);
61603 +void gr_log_unmount(const char *devname, const int retval);
61604 +void gr_log_mount(const char *from, const char *to, const int retval);
61605 +void gr_log_textrel(struct vm_area_struct *vma);
61606 +void gr_log_rwxmmap(struct file *file);
61607 +void gr_log_rwxmprotect(struct file *file);
61608 +
61609 +int gr_handle_follow_link(const struct inode *parent,
61610 + const struct inode *inode,
61611 + const struct dentry *dentry,
61612 + const struct vfsmount *mnt);
61613 +int gr_handle_fifo(const struct dentry *dentry,
61614 + const struct vfsmount *mnt,
61615 + const struct dentry *dir, const int flag,
61616 + const int acc_mode);
61617 +int gr_handle_hardlink(const struct dentry *dentry,
61618 + const struct vfsmount *mnt,
61619 + struct inode *inode,
61620 + const int mode, const char *to);
61621 +
61622 +int gr_is_capable(const int cap);
61623 +int gr_is_capable_nolog(const int cap);
61624 +void gr_learn_resource(const struct task_struct *task, const int limit,
61625 + const unsigned long wanted, const int gt);
61626 +void gr_copy_label(struct task_struct *tsk);
61627 +void gr_handle_crash(struct task_struct *task, const int sig);
61628 +int gr_handle_signal(const struct task_struct *p, const int sig);
61629 +int gr_check_crash_uid(const uid_t uid);
61630 +int gr_check_protected_task(const struct task_struct *task);
61631 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
61632 +int gr_acl_handle_mmap(const struct file *file,
61633 + const unsigned long prot);
61634 +int gr_acl_handle_mprotect(const struct file *file,
61635 + const unsigned long prot);
61636 +int gr_check_hidden_task(const struct task_struct *tsk);
61637 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
61638 + const struct vfsmount *mnt);
61639 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
61640 + const struct vfsmount *mnt);
61641 +__u32 gr_acl_handle_access(const struct dentry *dentry,
61642 + const struct vfsmount *mnt, const int fmode);
61643 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
61644 + const struct vfsmount *mnt, mode_t mode);
61645 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
61646 + const struct vfsmount *mnt, mode_t mode);
61647 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
61648 + const struct vfsmount *mnt);
61649 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
61650 + const struct vfsmount *mnt);
61651 +int gr_handle_ptrace(struct task_struct *task, const long request);
61652 +int gr_handle_proc_ptrace(struct task_struct *task);
61653 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
61654 + const struct vfsmount *mnt);
61655 +int gr_check_crash_exec(const struct file *filp);
61656 +int gr_acl_is_enabled(void);
61657 +void gr_set_kernel_label(struct task_struct *task);
61658 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
61659 + const gid_t gid);
61660 +int gr_set_proc_label(const struct dentry *dentry,
61661 + const struct vfsmount *mnt,
61662 + const int unsafe_share);
61663 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
61664 + const struct vfsmount *mnt);
61665 +__u32 gr_acl_handle_open(const struct dentry *dentry,
61666 + const struct vfsmount *mnt, int acc_mode);
61667 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
61668 + const struct dentry *p_dentry,
61669 + const struct vfsmount *p_mnt,
61670 + int open_flags, int acc_mode, const int imode);
61671 +void gr_handle_create(const struct dentry *dentry,
61672 + const struct vfsmount *mnt);
61673 +void gr_handle_proc_create(const struct dentry *dentry,
61674 + const struct inode *inode);
61675 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
61676 + const struct dentry *parent_dentry,
61677 + const struct vfsmount *parent_mnt,
61678 + const int mode);
61679 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
61680 + const struct dentry *parent_dentry,
61681 + const struct vfsmount *parent_mnt);
61682 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
61683 + const struct vfsmount *mnt);
61684 +void gr_handle_delete(const ino_t ino, const dev_t dev);
61685 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
61686 + const struct vfsmount *mnt);
61687 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
61688 + const struct dentry *parent_dentry,
61689 + const struct vfsmount *parent_mnt,
61690 + const char *from);
61691 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
61692 + const struct dentry *parent_dentry,
61693 + const struct vfsmount *parent_mnt,
61694 + const struct dentry *old_dentry,
61695 + const struct vfsmount *old_mnt, const char *to);
61696 +int gr_acl_handle_rename(struct dentry *new_dentry,
61697 + struct dentry *parent_dentry,
61698 + const struct vfsmount *parent_mnt,
61699 + struct dentry *old_dentry,
61700 + struct inode *old_parent_inode,
61701 + struct vfsmount *old_mnt, const char *newname);
61702 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61703 + struct dentry *old_dentry,
61704 + struct dentry *new_dentry,
61705 + struct vfsmount *mnt, const __u8 replace);
61706 +__u32 gr_check_link(const struct dentry *new_dentry,
61707 + const struct dentry *parent_dentry,
61708 + const struct vfsmount *parent_mnt,
61709 + const struct dentry *old_dentry,
61710 + const struct vfsmount *old_mnt);
61711 +int gr_acl_handle_filldir(const struct file *file, const char *name,
61712 + const unsigned int namelen, const ino_t ino);
61713 +
61714 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
61715 + const struct vfsmount *mnt);
61716 +void gr_acl_handle_exit(void);
61717 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
61718 +int gr_acl_handle_procpidmem(const struct task_struct *task);
61719 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
61720 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
61721 +void gr_audit_ptrace(struct task_struct *task);
61722 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
61723 +
61724 +#ifdef CONFIG_GRKERNSEC
61725 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
61726 +void gr_handle_vm86(void);
61727 +void gr_handle_mem_readwrite(u64 from, u64 to);
61728 +
61729 +extern int grsec_enable_dmesg;
61730 +extern int grsec_disable_privio;
61731 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61732 +extern int grsec_enable_chroot_findtask;
61733 +#endif
61734 +#endif
61735 +
61736 +#endif
61737 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
61738 new file mode 100644
61739 index 0000000..e7ffaaf
61740 --- /dev/null
61741 +++ b/include/linux/grsock.h
61742 @@ -0,0 +1,19 @@
61743 +#ifndef __GRSOCK_H
61744 +#define __GRSOCK_H
61745 +
61746 +extern void gr_attach_curr_ip(const struct sock *sk);
61747 +extern int gr_handle_sock_all(const int family, const int type,
61748 + const int protocol);
61749 +extern int gr_handle_sock_server(const struct sockaddr *sck);
61750 +extern int gr_handle_sock_server_other(const struct sock *sck);
61751 +extern int gr_handle_sock_client(const struct sockaddr *sck);
61752 +extern int gr_search_connect(struct socket * sock,
61753 + struct sockaddr_in * addr);
61754 +extern int gr_search_bind(struct socket * sock,
61755 + struct sockaddr_in * addr);
61756 +extern int gr_search_listen(struct socket * sock);
61757 +extern int gr_search_accept(struct socket * sock);
61758 +extern int gr_search_socket(const int domain, const int type,
61759 + const int protocol);
61760 +
61761 +#endif
61762 diff --git a/include/linux/hid.h b/include/linux/hid.h
61763 index 9cf8e7a..5ec94d0 100644
61764 --- a/include/linux/hid.h
61765 +++ b/include/linux/hid.h
61766 @@ -676,7 +676,7 @@ struct hid_ll_driver {
61767 unsigned int code, int value);
61768
61769 int (*parse)(struct hid_device *hdev);
61770 -};
61771 +} __no_const;
61772
61773 #define PM_HINT_FULLON 1<<5
61774 #define PM_HINT_NORMAL 1<<1
61775 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
61776 index 3a93f73..b19d0b3 100644
61777 --- a/include/linux/highmem.h
61778 +++ b/include/linux/highmem.h
61779 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
61780 kunmap_atomic(kaddr, KM_USER0);
61781 }
61782
61783 +static inline void sanitize_highpage(struct page *page)
61784 +{
61785 + void *kaddr;
61786 + unsigned long flags;
61787 +
61788 + local_irq_save(flags);
61789 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
61790 + clear_page(kaddr);
61791 + kunmap_atomic(kaddr, KM_CLEARPAGE);
61792 + local_irq_restore(flags);
61793 +}
61794 +
61795 static inline void zero_user_segments(struct page *page,
61796 unsigned start1, unsigned end1,
61797 unsigned start2, unsigned end2)
61798 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
61799 index a6c652e..1f5878f 100644
61800 --- a/include/linux/i2c.h
61801 +++ b/include/linux/i2c.h
61802 @@ -346,6 +346,7 @@ struct i2c_algorithm {
61803 /* To determine what the adapter supports */
61804 u32 (*functionality) (struct i2c_adapter *);
61805 };
61806 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
61807
61808 /*
61809 * i2c_adapter is the structure used to identify a physical i2c bus along
61810 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
61811 index a6deef4..c56a7f2 100644
61812 --- a/include/linux/i2o.h
61813 +++ b/include/linux/i2o.h
61814 @@ -564,7 +564,7 @@ struct i2o_controller {
61815 struct i2o_device *exec; /* Executive */
61816 #if BITS_PER_LONG == 64
61817 spinlock_t context_list_lock; /* lock for context_list */
61818 - atomic_t context_list_counter; /* needed for unique contexts */
61819 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
61820 struct list_head context_list; /* list of context id's
61821 and pointers */
61822 #endif
61823 diff --git a/include/linux/init.h b/include/linux/init.h
61824 index 9146f39..885354d 100644
61825 --- a/include/linux/init.h
61826 +++ b/include/linux/init.h
61827 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
61828
61829 /* Each module must use one module_init(). */
61830 #define module_init(initfn) \
61831 - static inline initcall_t __inittest(void) \
61832 + static inline __used initcall_t __inittest(void) \
61833 { return initfn; } \
61834 int init_module(void) __attribute__((alias(#initfn)));
61835
61836 /* This is only required if you want to be unloadable. */
61837 #define module_exit(exitfn) \
61838 - static inline exitcall_t __exittest(void) \
61839 + static inline __used exitcall_t __exittest(void) \
61840 { return exitfn; } \
61841 void cleanup_module(void) __attribute__((alias(#exitfn)));
61842
61843 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
61844 index d14e058..4162929 100644
61845 --- a/include/linux/init_task.h
61846 +++ b/include/linux/init_task.h
61847 @@ -126,6 +126,12 @@ extern struct cred init_cred;
61848 # define INIT_PERF_EVENTS(tsk)
61849 #endif
61850
61851 +#ifdef CONFIG_X86
61852 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
61853 +#else
61854 +#define INIT_TASK_THREAD_INFO
61855 +#endif
61856 +
61857 /*
61858 * INIT_TASK is used to set up the first task table, touch at
61859 * your own risk!. Base=0, limit=0x1fffff (=2MB)
61860 @@ -164,6 +170,7 @@ extern struct cred init_cred;
61861 RCU_INIT_POINTER(.cred, &init_cred), \
61862 .comm = "swapper", \
61863 .thread = INIT_THREAD, \
61864 + INIT_TASK_THREAD_INFO \
61865 .fs = &init_fs, \
61866 .files = &init_files, \
61867 .signal = &init_signals, \
61868 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
61869 index 9310c69..6ebb244 100644
61870 --- a/include/linux/intel-iommu.h
61871 +++ b/include/linux/intel-iommu.h
61872 @@ -296,7 +296,7 @@ struct iommu_flush {
61873 u8 fm, u64 type);
61874 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
61875 unsigned int size_order, u64 type);
61876 -};
61877 +} __no_const;
61878
61879 enum {
61880 SR_DMAR_FECTL_REG,
61881 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
61882 index f51a81b..adfcb44 100644
61883 --- a/include/linux/interrupt.h
61884 +++ b/include/linux/interrupt.h
61885 @@ -425,7 +425,7 @@ enum
61886 /* map softirq index to softirq name. update 'softirq_to_name' in
61887 * kernel/softirq.c when adding a new softirq.
61888 */
61889 -extern char *softirq_to_name[NR_SOFTIRQS];
61890 +extern const char * const softirq_to_name[NR_SOFTIRQS];
61891
61892 /* softirq mask and active fields moved to irq_cpustat_t in
61893 * asm/hardirq.h to get better cache usage. KAO
61894 @@ -433,12 +433,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
61895
61896 struct softirq_action
61897 {
61898 - void (*action)(struct softirq_action *);
61899 + void (*action)(void);
61900 };
61901
61902 asmlinkage void do_softirq(void);
61903 asmlinkage void __do_softirq(void);
61904 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
61905 +extern void open_softirq(int nr, void (*action)(void));
61906 extern void softirq_init(void);
61907 static inline void __raise_softirq_irqoff(unsigned int nr)
61908 {
61909 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
61910 index 0df513b..fe901a2 100644
61911 --- a/include/linux/kallsyms.h
61912 +++ b/include/linux/kallsyms.h
61913 @@ -15,7 +15,8 @@
61914
61915 struct module;
61916
61917 -#ifdef CONFIG_KALLSYMS
61918 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
61919 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
61920 /* Lookup the address for a symbol. Returns 0 if not found. */
61921 unsigned long kallsyms_lookup_name(const char *name);
61922
61923 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
61924 /* Stupid that this does nothing, but I didn't create this mess. */
61925 #define __print_symbol(fmt, addr)
61926 #endif /*CONFIG_KALLSYMS*/
61927 +#else /* when included by kallsyms.c, vsnprintf.c, or
61928 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
61929 +extern void __print_symbol(const char *fmt, unsigned long address);
61930 +extern int sprint_backtrace(char *buffer, unsigned long address);
61931 +extern int sprint_symbol(char *buffer, unsigned long address);
61932 +const char *kallsyms_lookup(unsigned long addr,
61933 + unsigned long *symbolsize,
61934 + unsigned long *offset,
61935 + char **modname, char *namebuf);
61936 +#endif
61937
61938 /* This macro allows us to keep printk typechecking */
61939 static void __check_printsym_format(const char *fmt, ...)
61940 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
61941 index fa39183..40160be 100644
61942 --- a/include/linux/kgdb.h
61943 +++ b/include/linux/kgdb.h
61944 @@ -53,7 +53,7 @@ extern int kgdb_connected;
61945 extern int kgdb_io_module_registered;
61946
61947 extern atomic_t kgdb_setting_breakpoint;
61948 -extern atomic_t kgdb_cpu_doing_single_step;
61949 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
61950
61951 extern struct task_struct *kgdb_usethread;
61952 extern struct task_struct *kgdb_contthread;
61953 @@ -251,7 +251,7 @@ struct kgdb_arch {
61954 void (*disable_hw_break)(struct pt_regs *regs);
61955 void (*remove_all_hw_break)(void);
61956 void (*correct_hw_break)(void);
61957 -};
61958 +} __do_const;
61959
61960 /**
61961 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
61962 @@ -276,7 +276,7 @@ struct kgdb_io {
61963 void (*pre_exception) (void);
61964 void (*post_exception) (void);
61965 int is_console;
61966 -};
61967 +} __do_const;
61968
61969 extern struct kgdb_arch arch_kgdb_ops;
61970
61971 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
61972 index 0da38cf..d23f05f 100644
61973 --- a/include/linux/kmod.h
61974 +++ b/include/linux/kmod.h
61975 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
61976 * usually useless though. */
61977 extern int __request_module(bool wait, const char *name, ...) \
61978 __attribute__((format(printf, 2, 3)));
61979 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
61980 + __attribute__((format(printf, 3, 4)));
61981 #define request_module(mod...) __request_module(true, mod)
61982 #define request_module_nowait(mod...) __request_module(false, mod)
61983 #define try_then_request_module(x, mod...) \
61984 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
61985 index eabb21a..3f030f4 100644
61986 --- a/include/linux/kvm_host.h
61987 +++ b/include/linux/kvm_host.h
61988 @@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
61989 void vcpu_load(struct kvm_vcpu *vcpu);
61990 void vcpu_put(struct kvm_vcpu *vcpu);
61991
61992 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61993 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61994 struct module *module);
61995 void kvm_exit(void);
61996
61997 @@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
61998 struct kvm_guest_debug *dbg);
61999 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
62000
62001 -int kvm_arch_init(void *opaque);
62002 +int kvm_arch_init(const void *opaque);
62003 void kvm_arch_exit(void);
62004
62005 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
62006 diff --git a/include/linux/libata.h b/include/linux/libata.h
62007 index efd6f98..5f5fd37 100644
62008 --- a/include/linux/libata.h
62009 +++ b/include/linux/libata.h
62010 @@ -909,7 +909,7 @@ struct ata_port_operations {
62011 * fields must be pointers.
62012 */
62013 const struct ata_port_operations *inherits;
62014 -};
62015 +} __do_const;
62016
62017 struct ata_port_info {
62018 unsigned long flags;
62019 diff --git a/include/linux/mca.h b/include/linux/mca.h
62020 index 3797270..7765ede 100644
62021 --- a/include/linux/mca.h
62022 +++ b/include/linux/mca.h
62023 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
62024 int region);
62025 void * (*mca_transform_memory)(struct mca_device *,
62026 void *memory);
62027 -};
62028 +} __no_const;
62029
62030 struct mca_bus {
62031 u64 default_dma_mask;
62032 diff --git a/include/linux/memory.h b/include/linux/memory.h
62033 index 935699b..11042cc 100644
62034 --- a/include/linux/memory.h
62035 +++ b/include/linux/memory.h
62036 @@ -144,7 +144,7 @@ struct memory_accessor {
62037 size_t count);
62038 ssize_t (*write)(struct memory_accessor *, const char *buf,
62039 off_t offset, size_t count);
62040 -};
62041 +} __no_const;
62042
62043 /*
62044 * Kernel text modification mutex, used for code patching. Users of this lock
62045 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
62046 index 896b5e4..1159ad0 100644
62047 --- a/include/linux/mfd/abx500.h
62048 +++ b/include/linux/mfd/abx500.h
62049 @@ -234,6 +234,7 @@ struct abx500_ops {
62050 int (*event_registers_startup_state_get) (struct device *, u8 *);
62051 int (*startup_irq_enabled) (struct device *, unsigned int);
62052 };
62053 +typedef struct abx500_ops __no_const abx500_ops_no_const;
62054
62055 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
62056 void abx500_remove_ops(struct device *dev);
62057 diff --git a/include/linux/mm.h b/include/linux/mm.h
62058 index fedc5f0..7cedb6d 100644
62059 --- a/include/linux/mm.h
62060 +++ b/include/linux/mm.h
62061 @@ -114,7 +114,14 @@ extern unsigned int kobjsize(const void *objp);
62062
62063 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
62064 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
62065 +
62066 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62067 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
62068 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
62069 +#else
62070 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
62071 +#endif
62072 +
62073 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
62074 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
62075
62076 @@ -1011,34 +1018,6 @@ int set_page_dirty(struct page *page);
62077 int set_page_dirty_lock(struct page *page);
62078 int clear_page_dirty_for_io(struct page *page);
62079
62080 -/* Is the vma a continuation of the stack vma above it? */
62081 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
62082 -{
62083 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
62084 -}
62085 -
62086 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
62087 - unsigned long addr)
62088 -{
62089 - return (vma->vm_flags & VM_GROWSDOWN) &&
62090 - (vma->vm_start == addr) &&
62091 - !vma_growsdown(vma->vm_prev, addr);
62092 -}
62093 -
62094 -/* Is the vma a continuation of the stack vma below it? */
62095 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
62096 -{
62097 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
62098 -}
62099 -
62100 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
62101 - unsigned long addr)
62102 -{
62103 - return (vma->vm_flags & VM_GROWSUP) &&
62104 - (vma->vm_end == addr) &&
62105 - !vma_growsup(vma->vm_next, addr);
62106 -}
62107 -
62108 extern unsigned long move_page_tables(struct vm_area_struct *vma,
62109 unsigned long old_addr, struct vm_area_struct *new_vma,
62110 unsigned long new_addr, unsigned long len);
62111 @@ -1133,6 +1112,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
62112 }
62113 #endif
62114
62115 +#ifdef CONFIG_MMU
62116 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
62117 +#else
62118 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
62119 +{
62120 + return __pgprot(0);
62121 +}
62122 +#endif
62123 +
62124 int vma_wants_writenotify(struct vm_area_struct *vma);
62125
62126 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
62127 @@ -1417,6 +1405,7 @@ out:
62128 }
62129
62130 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
62131 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
62132
62133 extern unsigned long do_brk(unsigned long, unsigned long);
62134
62135 @@ -1474,6 +1463,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
62136 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
62137 struct vm_area_struct **pprev);
62138
62139 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
62140 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
62141 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
62142 +
62143 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
62144 NULL if none. Assume start_addr < end_addr. */
62145 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
62146 @@ -1490,15 +1483,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
62147 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
62148 }
62149
62150 -#ifdef CONFIG_MMU
62151 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
62152 -#else
62153 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
62154 -{
62155 - return __pgprot(0);
62156 -}
62157 -#endif
62158 -
62159 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
62160 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
62161 unsigned long pfn, unsigned long size, pgprot_t);
62162 @@ -1612,7 +1596,7 @@ extern int unpoison_memory(unsigned long pfn);
62163 extern int sysctl_memory_failure_early_kill;
62164 extern int sysctl_memory_failure_recovery;
62165 extern void shake_page(struct page *p, int access);
62166 -extern atomic_long_t mce_bad_pages;
62167 +extern atomic_long_unchecked_t mce_bad_pages;
62168 extern int soft_offline_page(struct page *page, int flags);
62169
62170 extern void dump_page(struct page *page);
62171 @@ -1626,5 +1610,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
62172 unsigned int pages_per_huge_page);
62173 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
62174
62175 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
62176 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
62177 +#else
62178 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
62179 +#endif
62180 +
62181 #endif /* __KERNEL__ */
62182 #endif /* _LINUX_MM_H */
62183 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
62184 index 10a2f62..c8fa287 100644
62185 --- a/include/linux/mm_types.h
62186 +++ b/include/linux/mm_types.h
62187 @@ -230,6 +230,8 @@ struct vm_area_struct {
62188 #ifdef CONFIG_NUMA
62189 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
62190 #endif
62191 +
62192 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
62193 };
62194
62195 struct core_thread {
62196 @@ -362,6 +364,24 @@ struct mm_struct {
62197 #ifdef CONFIG_CPUMASK_OFFSTACK
62198 struct cpumask cpumask_allocation;
62199 #endif
62200 +
62201 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62202 + unsigned long pax_flags;
62203 +#endif
62204 +
62205 +#ifdef CONFIG_PAX_DLRESOLVE
62206 + unsigned long call_dl_resolve;
62207 +#endif
62208 +
62209 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
62210 + unsigned long call_syscall;
62211 +#endif
62212 +
62213 +#ifdef CONFIG_PAX_ASLR
62214 + unsigned long delta_mmap; /* randomized offset */
62215 + unsigned long delta_stack; /* randomized offset */
62216 +#endif
62217 +
62218 };
62219
62220 static inline void mm_init_cpumask(struct mm_struct *mm)
62221 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
62222 index 1d1b1e1..2a13c78 100644
62223 --- a/include/linux/mmu_notifier.h
62224 +++ b/include/linux/mmu_notifier.h
62225 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
62226 */
62227 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
62228 ({ \
62229 - pte_t __pte; \
62230 + pte_t ___pte; \
62231 struct vm_area_struct *___vma = __vma; \
62232 unsigned long ___address = __address; \
62233 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
62234 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
62235 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
62236 - __pte; \
62237 + ___pte; \
62238 })
62239
62240 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
62241 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
62242 index be1ac8d..26868ce 100644
62243 --- a/include/linux/mmzone.h
62244 +++ b/include/linux/mmzone.h
62245 @@ -356,7 +356,7 @@ struct zone {
62246 unsigned long flags; /* zone flags, see below */
62247
62248 /* Zone statistics */
62249 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62250 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62251
62252 /*
62253 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
62254 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
62255 index ae28e93..1ac2233 100644
62256 --- a/include/linux/mod_devicetable.h
62257 +++ b/include/linux/mod_devicetable.h
62258 @@ -12,7 +12,7 @@
62259 typedef unsigned long kernel_ulong_t;
62260 #endif
62261
62262 -#define PCI_ANY_ID (~0)
62263 +#define PCI_ANY_ID ((__u16)~0)
62264
62265 struct pci_device_id {
62266 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
62267 @@ -131,7 +131,7 @@ struct usb_device_id {
62268 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
62269 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
62270
62271 -#define HID_ANY_ID (~0)
62272 +#define HID_ANY_ID (~0U)
62273
62274 struct hid_device_id {
62275 __u16 bus;
62276 diff --git a/include/linux/module.h b/include/linux/module.h
62277 index 1c30087..fc2a442 100644
62278 --- a/include/linux/module.h
62279 +++ b/include/linux/module.h
62280 @@ -16,6 +16,7 @@
62281 #include <linux/kobject.h>
62282 #include <linux/moduleparam.h>
62283 #include <linux/tracepoint.h>
62284 +#include <linux/fs.h>
62285
62286 #include <linux/percpu.h>
62287 #include <asm/module.h>
62288 @@ -327,19 +328,16 @@ struct module
62289 int (*init)(void);
62290
62291 /* If this is non-NULL, vfree after init() returns */
62292 - void *module_init;
62293 + void *module_init_rx, *module_init_rw;
62294
62295 /* Here is the actual code + data, vfree'd on unload. */
62296 - void *module_core;
62297 + void *module_core_rx, *module_core_rw;
62298
62299 /* Here are the sizes of the init and core sections */
62300 - unsigned int init_size, core_size;
62301 + unsigned int init_size_rw, core_size_rw;
62302
62303 /* The size of the executable code in each section. */
62304 - unsigned int init_text_size, core_text_size;
62305 -
62306 - /* Size of RO sections of the module (text+rodata) */
62307 - unsigned int init_ro_size, core_ro_size;
62308 + unsigned int init_size_rx, core_size_rx;
62309
62310 /* Arch-specific module values */
62311 struct mod_arch_specific arch;
62312 @@ -395,6 +393,10 @@ struct module
62313 #ifdef CONFIG_EVENT_TRACING
62314 struct ftrace_event_call **trace_events;
62315 unsigned int num_trace_events;
62316 + struct file_operations trace_id;
62317 + struct file_operations trace_enable;
62318 + struct file_operations trace_format;
62319 + struct file_operations trace_filter;
62320 #endif
62321 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
62322 unsigned int num_ftrace_callsites;
62323 @@ -445,16 +447,46 @@ bool is_module_address(unsigned long addr);
62324 bool is_module_percpu_address(unsigned long addr);
62325 bool is_module_text_address(unsigned long addr);
62326
62327 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
62328 +{
62329 +
62330 +#ifdef CONFIG_PAX_KERNEXEC
62331 + if (ktla_ktva(addr) >= (unsigned long)start &&
62332 + ktla_ktva(addr) < (unsigned long)start + size)
62333 + return 1;
62334 +#endif
62335 +
62336 + return ((void *)addr >= start && (void *)addr < start + size);
62337 +}
62338 +
62339 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
62340 +{
62341 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
62342 +}
62343 +
62344 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
62345 +{
62346 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
62347 +}
62348 +
62349 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
62350 +{
62351 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
62352 +}
62353 +
62354 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
62355 +{
62356 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
62357 +}
62358 +
62359 static inline int within_module_core(unsigned long addr, struct module *mod)
62360 {
62361 - return (unsigned long)mod->module_core <= addr &&
62362 - addr < (unsigned long)mod->module_core + mod->core_size;
62363 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
62364 }
62365
62366 static inline int within_module_init(unsigned long addr, struct module *mod)
62367 {
62368 - return (unsigned long)mod->module_init <= addr &&
62369 - addr < (unsigned long)mod->module_init + mod->init_size;
62370 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
62371 }
62372
62373 /* Search for module by name: must hold module_mutex. */
62374 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
62375 index b2be02e..6a9fdb1 100644
62376 --- a/include/linux/moduleloader.h
62377 +++ b/include/linux/moduleloader.h
62378 @@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
62379 sections. Returns NULL on failure. */
62380 void *module_alloc(unsigned long size);
62381
62382 +#ifdef CONFIG_PAX_KERNEXEC
62383 +void *module_alloc_exec(unsigned long size);
62384 +#else
62385 +#define module_alloc_exec(x) module_alloc(x)
62386 +#endif
62387 +
62388 /* Free memory returned from module_alloc. */
62389 void module_free(struct module *mod, void *module_region);
62390
62391 +#ifdef CONFIG_PAX_KERNEXEC
62392 +void module_free_exec(struct module *mod, void *module_region);
62393 +#else
62394 +#define module_free_exec(x, y) module_free((x), (y))
62395 +#endif
62396 +
62397 /* Apply the given relocation to the (simplified) ELF. Return -error
62398 or 0. */
62399 int apply_relocate(Elf_Shdr *sechdrs,
62400 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
62401 index ddaae98..3c70938 100644
62402 --- a/include/linux/moduleparam.h
62403 +++ b/include/linux/moduleparam.h
62404 @@ -255,7 +255,7 @@ static inline void __kernel_param_unlock(void)
62405 * @len is usually just sizeof(string).
62406 */
62407 #define module_param_string(name, string, len, perm) \
62408 - static const struct kparam_string __param_string_##name \
62409 + static const struct kparam_string __param_string_##name __used \
62410 = { len, string }; \
62411 __module_param_call(MODULE_PARAM_PREFIX, name, \
62412 &param_ops_string, \
62413 @@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
62414 * module_param_named() for why this might be necessary.
62415 */
62416 #define module_param_array_named(name, array, type, nump, perm) \
62417 - static const struct kparam_array __param_arr_##name \
62418 + static const struct kparam_array __param_arr_##name __used \
62419 = { .max = ARRAY_SIZE(array), .num = nump, \
62420 .ops = &param_ops_##type, \
62421 .elemsize = sizeof(array[0]), .elem = array }; \
62422 diff --git a/include/linux/namei.h b/include/linux/namei.h
62423 index ffc0213..2c1f2cb 100644
62424 --- a/include/linux/namei.h
62425 +++ b/include/linux/namei.h
62426 @@ -24,7 +24,7 @@ struct nameidata {
62427 unsigned seq;
62428 int last_type;
62429 unsigned depth;
62430 - char *saved_names[MAX_NESTED_LINKS + 1];
62431 + const char *saved_names[MAX_NESTED_LINKS + 1];
62432
62433 /* Intent data */
62434 union {
62435 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
62436 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
62437 extern void unlock_rename(struct dentry *, struct dentry *);
62438
62439 -static inline void nd_set_link(struct nameidata *nd, char *path)
62440 +static inline void nd_set_link(struct nameidata *nd, const char *path)
62441 {
62442 nd->saved_names[nd->depth] = path;
62443 }
62444
62445 -static inline char *nd_get_link(struct nameidata *nd)
62446 +static inline const char *nd_get_link(const struct nameidata *nd)
62447 {
62448 return nd->saved_names[nd->depth];
62449 }
62450 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
62451 index ddee79b..67af106 100644
62452 --- a/include/linux/netdevice.h
62453 +++ b/include/linux/netdevice.h
62454 @@ -944,6 +944,7 @@ struct net_device_ops {
62455 int (*ndo_set_features)(struct net_device *dev,
62456 u32 features);
62457 };
62458 +typedef struct net_device_ops __no_const net_device_ops_no_const;
62459
62460 /*
62461 * The DEVICE structure.
62462 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
62463 new file mode 100644
62464 index 0000000..33f4af8
62465 --- /dev/null
62466 +++ b/include/linux/netfilter/xt_gradm.h
62467 @@ -0,0 +1,9 @@
62468 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
62469 +#define _LINUX_NETFILTER_XT_GRADM_H 1
62470 +
62471 +struct xt_gradm_mtinfo {
62472 + __u16 flags;
62473 + __u16 invflags;
62474 +};
62475 +
62476 +#endif
62477 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
62478 index c65a18a..0c05f3a 100644
62479 --- a/include/linux/of_pdt.h
62480 +++ b/include/linux/of_pdt.h
62481 @@ -32,7 +32,7 @@ struct of_pdt_ops {
62482
62483 /* return 0 on success; fill in 'len' with number of bytes in path */
62484 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
62485 -};
62486 +} __no_const;
62487
62488 extern void *prom_early_alloc(unsigned long size);
62489
62490 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
62491 index 49c8727..34d2ae1 100644
62492 --- a/include/linux/oprofile.h
62493 +++ b/include/linux/oprofile.h
62494 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
62495 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
62496 char const * name, ulong * val);
62497
62498 -/** Create a file for read-only access to an atomic_t. */
62499 +/** Create a file for read-only access to an atomic_unchecked_t. */
62500 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
62501 - char const * name, atomic_t * val);
62502 + char const * name, atomic_unchecked_t * val);
62503
62504 /** create a directory */
62505 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
62506 diff --git a/include/linux/padata.h b/include/linux/padata.h
62507 index 4633b2f..988bc08 100644
62508 --- a/include/linux/padata.h
62509 +++ b/include/linux/padata.h
62510 @@ -129,7 +129,7 @@ struct parallel_data {
62511 struct padata_instance *pinst;
62512 struct padata_parallel_queue __percpu *pqueue;
62513 struct padata_serial_queue __percpu *squeue;
62514 - atomic_t seq_nr;
62515 + atomic_unchecked_t seq_nr;
62516 atomic_t reorder_objects;
62517 atomic_t refcnt;
62518 unsigned int max_seq_nr;
62519 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
62520 index c816075..cd28c4d 100644
62521 --- a/include/linux/perf_event.h
62522 +++ b/include/linux/perf_event.h
62523 @@ -745,8 +745,8 @@ struct perf_event {
62524
62525 enum perf_event_active_state state;
62526 unsigned int attach_state;
62527 - local64_t count;
62528 - atomic64_t child_count;
62529 + local64_t count; /* PaX: fix it one day */
62530 + atomic64_unchecked_t child_count;
62531
62532 /*
62533 * These are the total time in nanoseconds that the event
62534 @@ -797,8 +797,8 @@ struct perf_event {
62535 * These accumulate total time (in nanoseconds) that children
62536 * events have been enabled and running, respectively.
62537 */
62538 - atomic64_t child_total_time_enabled;
62539 - atomic64_t child_total_time_running;
62540 + atomic64_unchecked_t child_total_time_enabled;
62541 + atomic64_unchecked_t child_total_time_running;
62542
62543 /*
62544 * Protect attach/detach and child_list:
62545 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
62546 index 77257c9..51d473a 100644
62547 --- a/include/linux/pipe_fs_i.h
62548 +++ b/include/linux/pipe_fs_i.h
62549 @@ -46,9 +46,9 @@ struct pipe_buffer {
62550 struct pipe_inode_info {
62551 wait_queue_head_t wait;
62552 unsigned int nrbufs, curbuf, buffers;
62553 - unsigned int readers;
62554 - unsigned int writers;
62555 - unsigned int waiting_writers;
62556 + atomic_t readers;
62557 + atomic_t writers;
62558 + atomic_t waiting_writers;
62559 unsigned int r_counter;
62560 unsigned int w_counter;
62561 struct page *tmp_page;
62562 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
62563 index daac05d..c6802ce 100644
62564 --- a/include/linux/pm_runtime.h
62565 +++ b/include/linux/pm_runtime.h
62566 @@ -99,7 +99,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
62567
62568 static inline void pm_runtime_mark_last_busy(struct device *dev)
62569 {
62570 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
62571 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
62572 }
62573
62574 #else /* !CONFIG_PM_RUNTIME */
62575 diff --git a/include/linux/poison.h b/include/linux/poison.h
62576 index 79159de..f1233a9 100644
62577 --- a/include/linux/poison.h
62578 +++ b/include/linux/poison.h
62579 @@ -19,8 +19,8 @@
62580 * under normal circumstances, used to verify that nobody uses
62581 * non-initialized list entries.
62582 */
62583 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
62584 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
62585 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
62586 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
62587
62588 /********** include/linux/timer.h **********/
62589 /*
62590 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
62591 index 58969b2..ead129b 100644
62592 --- a/include/linux/preempt.h
62593 +++ b/include/linux/preempt.h
62594 @@ -123,7 +123,7 @@ struct preempt_ops {
62595 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
62596 void (*sched_out)(struct preempt_notifier *notifier,
62597 struct task_struct *next);
62598 -};
62599 +} __no_const;
62600
62601 /**
62602 * preempt_notifier - key for installing preemption notifiers
62603 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
62604 index 643b96c..ef55a9c 100644
62605 --- a/include/linux/proc_fs.h
62606 +++ b/include/linux/proc_fs.h
62607 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
62608 return proc_create_data(name, mode, parent, proc_fops, NULL);
62609 }
62610
62611 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
62612 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
62613 +{
62614 +#ifdef CONFIG_GRKERNSEC_PROC_USER
62615 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
62616 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62617 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
62618 +#else
62619 + return proc_create_data(name, mode, parent, proc_fops, NULL);
62620 +#endif
62621 +}
62622 +
62623 +
62624 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
62625 mode_t mode, struct proc_dir_entry *base,
62626 read_proc_t *read_proc, void * data)
62627 @@ -258,7 +271,7 @@ union proc_op {
62628 int (*proc_show)(struct seq_file *m,
62629 struct pid_namespace *ns, struct pid *pid,
62630 struct task_struct *task);
62631 -};
62632 +} __no_const;
62633
62634 struct ctl_table_header;
62635 struct ctl_table;
62636 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
62637 index 800f113..af90cc8 100644
62638 --- a/include/linux/ptrace.h
62639 +++ b/include/linux/ptrace.h
62640 @@ -129,10 +129,10 @@ extern void __ptrace_unlink(struct task_struct *child);
62641 extern void exit_ptrace(struct task_struct *tracer);
62642 #define PTRACE_MODE_READ 1
62643 #define PTRACE_MODE_ATTACH 2
62644 -/* Returns 0 on success, -errno on denial. */
62645 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
62646 /* Returns true on success, false on denial. */
62647 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
62648 +/* Returns true on success, false on denial. */
62649 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
62650
62651 static inline int ptrace_reparented(struct task_struct *child)
62652 {
62653 diff --git a/include/linux/random.h b/include/linux/random.h
62654 index d13059f..2eaafaa 100644
62655 --- a/include/linux/random.h
62656 +++ b/include/linux/random.h
62657 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
62658
62659 u32 prandom32(struct rnd_state *);
62660
62661 +static inline unsigned long pax_get_random_long(void)
62662 +{
62663 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
62664 +}
62665 +
62666 /*
62667 * Handle minimum values for seeds
62668 */
62669 static inline u32 __seed(u32 x, u32 m)
62670 {
62671 - return (x < m) ? x + m : x;
62672 + return (x <= m) ? x + m + 1 : x;
62673 }
62674
62675 /**
62676 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
62677 index e0879a7..a12f962 100644
62678 --- a/include/linux/reboot.h
62679 +++ b/include/linux/reboot.h
62680 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
62681 * Architecture-specific implementations of sys_reboot commands.
62682 */
62683
62684 -extern void machine_restart(char *cmd);
62685 -extern void machine_halt(void);
62686 -extern void machine_power_off(void);
62687 +extern void machine_restart(char *cmd) __noreturn;
62688 +extern void machine_halt(void) __noreturn;
62689 +extern void machine_power_off(void) __noreturn;
62690
62691 extern void machine_shutdown(void);
62692 struct pt_regs;
62693 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
62694 */
62695
62696 extern void kernel_restart_prepare(char *cmd);
62697 -extern void kernel_restart(char *cmd);
62698 -extern void kernel_halt(void);
62699 -extern void kernel_power_off(void);
62700 +extern void kernel_restart(char *cmd) __noreturn;
62701 +extern void kernel_halt(void) __noreturn;
62702 +extern void kernel_power_off(void) __noreturn;
62703
62704 extern int C_A_D; /* for sysctl */
62705 void ctrl_alt_del(void);
62706 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
62707 * Emergency restart, callable from an interrupt handler.
62708 */
62709
62710 -extern void emergency_restart(void);
62711 +extern void emergency_restart(void) __noreturn;
62712 #include <asm/emergency-restart.h>
62713
62714 #endif
62715 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
62716 index 96d465f..b084e05 100644
62717 --- a/include/linux/reiserfs_fs.h
62718 +++ b/include/linux/reiserfs_fs.h
62719 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
62720 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
62721
62722 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
62723 -#define get_generation(s) atomic_read (&fs_generation(s))
62724 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
62725 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
62726 #define __fs_changed(gen,s) (gen != get_generation (s))
62727 #define fs_changed(gen,s) \
62728 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
62729 index 52c83b6..18ed7eb 100644
62730 --- a/include/linux/reiserfs_fs_sb.h
62731 +++ b/include/linux/reiserfs_fs_sb.h
62732 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
62733 /* Comment? -Hans */
62734 wait_queue_head_t s_wait;
62735 /* To be obsoleted soon by per buffer seals.. -Hans */
62736 - atomic_t s_generation_counter; // increased by one every time the
62737 + atomic_unchecked_t s_generation_counter; // increased by one every time the
62738 // tree gets re-balanced
62739 unsigned long s_properties; /* File system properties. Currently holds
62740 on-disk FS format */
62741 diff --git a/include/linux/relay.h b/include/linux/relay.h
62742 index 14a86bc..17d0700 100644
62743 --- a/include/linux/relay.h
62744 +++ b/include/linux/relay.h
62745 @@ -159,7 +159,7 @@ struct rchan_callbacks
62746 * The callback should return 0 if successful, negative if not.
62747 */
62748 int (*remove_buf_file)(struct dentry *dentry);
62749 -};
62750 +} __no_const;
62751
62752 /*
62753 * CONFIG_RELAY kernel API, kernel/relay.c
62754 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
62755 index c6c6084..5bf1212 100644
62756 --- a/include/linux/rfkill.h
62757 +++ b/include/linux/rfkill.h
62758 @@ -147,6 +147,7 @@ struct rfkill_ops {
62759 void (*query)(struct rfkill *rfkill, void *data);
62760 int (*set_block)(void *data, bool blocked);
62761 };
62762 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
62763
62764 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
62765 /**
62766 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
62767 index 2148b12..519b820 100644
62768 --- a/include/linux/rmap.h
62769 +++ b/include/linux/rmap.h
62770 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
62771 void anon_vma_init(void); /* create anon_vma_cachep */
62772 int anon_vma_prepare(struct vm_area_struct *);
62773 void unlink_anon_vmas(struct vm_area_struct *);
62774 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
62775 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
62776 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
62777 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
62778 void __anon_vma_link(struct vm_area_struct *);
62779
62780 static inline void anon_vma_merge(struct vm_area_struct *vma,
62781 diff --git a/include/linux/sched.h b/include/linux/sched.h
62782 index 41d0237..5a64056 100644
62783 --- a/include/linux/sched.h
62784 +++ b/include/linux/sched.h
62785 @@ -100,6 +100,7 @@ struct bio_list;
62786 struct fs_struct;
62787 struct perf_event_context;
62788 struct blk_plug;
62789 +struct linux_binprm;
62790
62791 /*
62792 * List of flags we want to share for kernel threads,
62793 @@ -380,10 +381,13 @@ struct user_namespace;
62794 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
62795
62796 extern int sysctl_max_map_count;
62797 +extern unsigned long sysctl_heap_stack_gap;
62798
62799 #include <linux/aio.h>
62800
62801 #ifdef CONFIG_MMU
62802 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
62803 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
62804 extern void arch_pick_mmap_layout(struct mm_struct *mm);
62805 extern unsigned long
62806 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
62807 @@ -629,6 +633,17 @@ struct signal_struct {
62808 #ifdef CONFIG_TASKSTATS
62809 struct taskstats *stats;
62810 #endif
62811 +
62812 +#ifdef CONFIG_GRKERNSEC
62813 + u32 curr_ip;
62814 + u32 saved_ip;
62815 + u32 gr_saddr;
62816 + u32 gr_daddr;
62817 + u16 gr_sport;
62818 + u16 gr_dport;
62819 + u8 used_accept:1;
62820 +#endif
62821 +
62822 #ifdef CONFIG_AUDIT
62823 unsigned audit_tty;
62824 struct tty_audit_buf *tty_audit_buf;
62825 @@ -710,6 +725,11 @@ struct user_struct {
62826 struct key *session_keyring; /* UID's default session keyring */
62827 #endif
62828
62829 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
62830 + unsigned int banned;
62831 + unsigned long ban_expires;
62832 +#endif
62833 +
62834 /* Hash table maintenance information */
62835 struct hlist_node uidhash_node;
62836 uid_t uid;
62837 @@ -1340,8 +1360,8 @@ struct task_struct {
62838 struct list_head thread_group;
62839
62840 struct completion *vfork_done; /* for vfork() */
62841 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
62842 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62843 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
62844 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62845
62846 cputime_t utime, stime, utimescaled, stimescaled;
62847 cputime_t gtime;
62848 @@ -1357,13 +1377,6 @@ struct task_struct {
62849 struct task_cputime cputime_expires;
62850 struct list_head cpu_timers[3];
62851
62852 -/* process credentials */
62853 - const struct cred __rcu *real_cred; /* objective and real subjective task
62854 - * credentials (COW) */
62855 - const struct cred __rcu *cred; /* effective (overridable) subjective task
62856 - * credentials (COW) */
62857 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62858 -
62859 char comm[TASK_COMM_LEN]; /* executable name excluding path
62860 - access with [gs]et_task_comm (which lock
62861 it with task_lock())
62862 @@ -1380,8 +1393,16 @@ struct task_struct {
62863 #endif
62864 /* CPU-specific state of this task */
62865 struct thread_struct thread;
62866 +/* thread_info moved to task_struct */
62867 +#ifdef CONFIG_X86
62868 + struct thread_info tinfo;
62869 +#endif
62870 /* filesystem information */
62871 struct fs_struct *fs;
62872 +
62873 + const struct cred __rcu *cred; /* effective (overridable) subjective task
62874 + * credentials (COW) */
62875 +
62876 /* open file information */
62877 struct files_struct *files;
62878 /* namespaces */
62879 @@ -1428,6 +1449,11 @@ struct task_struct {
62880 struct rt_mutex_waiter *pi_blocked_on;
62881 #endif
62882
62883 +/* process credentials */
62884 + const struct cred __rcu *real_cred; /* objective and real subjective task
62885 + * credentials (COW) */
62886 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62887 +
62888 #ifdef CONFIG_DEBUG_MUTEXES
62889 /* mutex deadlock detection */
62890 struct mutex_waiter *blocked_on;
62891 @@ -1537,6 +1563,21 @@ struct task_struct {
62892 unsigned long default_timer_slack_ns;
62893
62894 struct list_head *scm_work_list;
62895 +
62896 +#ifdef CONFIG_GRKERNSEC
62897 + /* grsecurity */
62898 + struct dentry *gr_chroot_dentry;
62899 + struct acl_subject_label *acl;
62900 + struct acl_role_label *role;
62901 + struct file *exec_file;
62902 + u16 acl_role_id;
62903 + /* is this the task that authenticated to the special role */
62904 + u8 acl_sp_role;
62905 + u8 is_writable;
62906 + u8 brute;
62907 + u8 gr_is_chrooted;
62908 +#endif
62909 +
62910 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
62911 /* Index of current stored address in ret_stack */
62912 int curr_ret_stack;
62913 @@ -1571,6 +1612,57 @@ struct task_struct {
62914 #endif
62915 };
62916
62917 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
62918 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
62919 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
62920 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
62921 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
62922 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
62923 +
62924 +#ifdef CONFIG_PAX_SOFTMODE
62925 +extern int pax_softmode;
62926 +#endif
62927 +
62928 +extern int pax_check_flags(unsigned long *);
62929 +
62930 +/* if tsk != current then task_lock must be held on it */
62931 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62932 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
62933 +{
62934 + if (likely(tsk->mm))
62935 + return tsk->mm->pax_flags;
62936 + else
62937 + return 0UL;
62938 +}
62939 +
62940 +/* if tsk != current then task_lock must be held on it */
62941 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
62942 +{
62943 + if (likely(tsk->mm)) {
62944 + tsk->mm->pax_flags = flags;
62945 + return 0;
62946 + }
62947 + return -EINVAL;
62948 +}
62949 +#endif
62950 +
62951 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62952 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
62953 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
62954 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
62955 +#endif
62956 +
62957 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
62958 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
62959 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
62960 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
62961 +
62962 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
62963 +extern void pax_track_stack(void);
62964 +#else
62965 +static inline void pax_track_stack(void) {}
62966 +#endif
62967 +
62968 /* Future-safe accessor for struct task_struct's cpus_allowed. */
62969 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
62970
62971 @@ -2074,7 +2166,9 @@ void yield(void);
62972 extern struct exec_domain default_exec_domain;
62973
62974 union thread_union {
62975 +#ifndef CONFIG_X86
62976 struct thread_info thread_info;
62977 +#endif
62978 unsigned long stack[THREAD_SIZE/sizeof(long)];
62979 };
62980
62981 @@ -2107,6 +2201,7 @@ extern struct pid_namespace init_pid_ns;
62982 */
62983
62984 extern struct task_struct *find_task_by_vpid(pid_t nr);
62985 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
62986 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
62987 struct pid_namespace *ns);
62988
62989 @@ -2243,7 +2338,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
62990 extern void exit_itimers(struct signal_struct *);
62991 extern void flush_itimer_signals(void);
62992
62993 -extern NORET_TYPE void do_group_exit(int);
62994 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
62995
62996 extern void daemonize(const char *, ...);
62997 extern int allow_signal(int);
62998 @@ -2408,13 +2503,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
62999
63000 #endif
63001
63002 -static inline int object_is_on_stack(void *obj)
63003 +static inline int object_starts_on_stack(void *obj)
63004 {
63005 - void *stack = task_stack_page(current);
63006 + const void *stack = task_stack_page(current);
63007
63008 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
63009 }
63010
63011 +#ifdef CONFIG_PAX_USERCOPY
63012 +extern int object_is_on_stack(const void *obj, unsigned long len);
63013 +#endif
63014 +
63015 extern void thread_info_cache_init(void);
63016
63017 #ifdef CONFIG_DEBUG_STACK_USAGE
63018 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
63019 index 899fbb4..1cb4138 100644
63020 --- a/include/linux/screen_info.h
63021 +++ b/include/linux/screen_info.h
63022 @@ -43,7 +43,8 @@ struct screen_info {
63023 __u16 pages; /* 0x32 */
63024 __u16 vesa_attributes; /* 0x34 */
63025 __u32 capabilities; /* 0x36 */
63026 - __u8 _reserved[6]; /* 0x3a */
63027 + __u16 vesapm_size; /* 0x3a */
63028 + __u8 _reserved[4]; /* 0x3c */
63029 } __attribute__((packed));
63030
63031 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
63032 diff --git a/include/linux/security.h b/include/linux/security.h
63033 index ebd2a53..2d949ae 100644
63034 --- a/include/linux/security.h
63035 +++ b/include/linux/security.h
63036 @@ -36,6 +36,7 @@
63037 #include <linux/key.h>
63038 #include <linux/xfrm.h>
63039 #include <linux/slab.h>
63040 +#include <linux/grsecurity.h>
63041 #include <net/flow.h>
63042
63043 /* Maximum number of letters for an LSM name string */
63044 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
63045 index be720cd..a0e1b94 100644
63046 --- a/include/linux/seq_file.h
63047 +++ b/include/linux/seq_file.h
63048 @@ -33,6 +33,7 @@ struct seq_operations {
63049 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
63050 int (*show) (struct seq_file *m, void *v);
63051 };
63052 +typedef struct seq_operations __no_const seq_operations_no_const;
63053
63054 #define SEQ_SKIP 1
63055
63056 diff --git a/include/linux/shm.h b/include/linux/shm.h
63057 index 92808b8..c28cac4 100644
63058 --- a/include/linux/shm.h
63059 +++ b/include/linux/shm.h
63060 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
63061
63062 /* The task created the shm object. NULL if the task is dead. */
63063 struct task_struct *shm_creator;
63064 +#ifdef CONFIG_GRKERNSEC
63065 + time_t shm_createtime;
63066 + pid_t shm_lapid;
63067 +#endif
63068 };
63069
63070 /* shm_mode upper byte flags */
63071 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
63072 index 0f96646..cfb757a 100644
63073 --- a/include/linux/skbuff.h
63074 +++ b/include/linux/skbuff.h
63075 @@ -610,7 +610,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
63076 */
63077 static inline int skb_queue_empty(const struct sk_buff_head *list)
63078 {
63079 - return list->next == (struct sk_buff *)list;
63080 + return list->next == (const struct sk_buff *)list;
63081 }
63082
63083 /**
63084 @@ -623,7 +623,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
63085 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63086 const struct sk_buff *skb)
63087 {
63088 - return skb->next == (struct sk_buff *)list;
63089 + return skb->next == (const struct sk_buff *)list;
63090 }
63091
63092 /**
63093 @@ -636,7 +636,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63094 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
63095 const struct sk_buff *skb)
63096 {
63097 - return skb->prev == (struct sk_buff *)list;
63098 + return skb->prev == (const struct sk_buff *)list;
63099 }
63100
63101 /**
63102 @@ -1458,7 +1458,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
63103 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
63104 */
63105 #ifndef NET_SKB_PAD
63106 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
63107 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
63108 #endif
63109
63110 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
63111 diff --git a/include/linux/slab.h b/include/linux/slab.h
63112 index 573c809..e84c132 100644
63113 --- a/include/linux/slab.h
63114 +++ b/include/linux/slab.h
63115 @@ -11,12 +11,20 @@
63116
63117 #include <linux/gfp.h>
63118 #include <linux/types.h>
63119 +#include <linux/err.h>
63120
63121 /*
63122 * Flags to pass to kmem_cache_create().
63123 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
63124 */
63125 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
63126 +
63127 +#ifdef CONFIG_PAX_USERCOPY
63128 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
63129 +#else
63130 +#define SLAB_USERCOPY 0x00000000UL
63131 +#endif
63132 +
63133 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
63134 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
63135 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
63136 @@ -87,10 +95,13 @@
63137 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
63138 * Both make kfree a no-op.
63139 */
63140 -#define ZERO_SIZE_PTR ((void *)16)
63141 +#define ZERO_SIZE_PTR \
63142 +({ \
63143 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
63144 + (void *)(-MAX_ERRNO-1L); \
63145 +})
63146
63147 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
63148 - (unsigned long)ZERO_SIZE_PTR)
63149 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
63150
63151 /*
63152 * struct kmem_cache related prototypes
63153 @@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
63154 void kfree(const void *);
63155 void kzfree(const void *);
63156 size_t ksize(const void *);
63157 +void check_object_size(const void *ptr, unsigned long n, bool to);
63158
63159 /*
63160 * Allocator specific definitions. These are mainly used to establish optimized
63161 @@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
63162
63163 void __init kmem_cache_init_late(void);
63164
63165 +#define kmalloc(x, y) \
63166 +({ \
63167 + void *___retval; \
63168 + intoverflow_t ___x = (intoverflow_t)x; \
63169 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
63170 + ___retval = NULL; \
63171 + else \
63172 + ___retval = kmalloc((size_t)___x, (y)); \
63173 + ___retval; \
63174 +})
63175 +
63176 +#define kmalloc_node(x, y, z) \
63177 +({ \
63178 + void *___retval; \
63179 + intoverflow_t ___x = (intoverflow_t)x; \
63180 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
63181 + ___retval = NULL; \
63182 + else \
63183 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
63184 + ___retval; \
63185 +})
63186 +
63187 +#define kzalloc(x, y) \
63188 +({ \
63189 + void *___retval; \
63190 + intoverflow_t ___x = (intoverflow_t)x; \
63191 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
63192 + ___retval = NULL; \
63193 + else \
63194 + ___retval = kzalloc((size_t)___x, (y)); \
63195 + ___retval; \
63196 +})
63197 +
63198 +#define __krealloc(x, y, z) \
63199 +({ \
63200 + void *___retval; \
63201 + intoverflow_t ___y = (intoverflow_t)y; \
63202 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
63203 + ___retval = NULL; \
63204 + else \
63205 + ___retval = __krealloc((x), (size_t)___y, (z)); \
63206 + ___retval; \
63207 +})
63208 +
63209 +#define krealloc(x, y, z) \
63210 +({ \
63211 + void *___retval; \
63212 + intoverflow_t ___y = (intoverflow_t)y; \
63213 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
63214 + ___retval = NULL; \
63215 + else \
63216 + ___retval = krealloc((x), (size_t)___y, (z)); \
63217 + ___retval; \
63218 +})
63219 +
63220 #endif /* _LINUX_SLAB_H */
63221 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
63222 index d00e0ba..1b3bf7b 100644
63223 --- a/include/linux/slab_def.h
63224 +++ b/include/linux/slab_def.h
63225 @@ -68,10 +68,10 @@ struct kmem_cache {
63226 unsigned long node_allocs;
63227 unsigned long node_frees;
63228 unsigned long node_overflow;
63229 - atomic_t allochit;
63230 - atomic_t allocmiss;
63231 - atomic_t freehit;
63232 - atomic_t freemiss;
63233 + atomic_unchecked_t allochit;
63234 + atomic_unchecked_t allocmiss;
63235 + atomic_unchecked_t freehit;
63236 + atomic_unchecked_t freemiss;
63237
63238 /*
63239 * If debugging is enabled, then the allocator can add additional
63240 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
63241 index f58d641..c56bf9c 100644
63242 --- a/include/linux/slub_def.h
63243 +++ b/include/linux/slub_def.h
63244 @@ -85,7 +85,7 @@ struct kmem_cache {
63245 struct kmem_cache_order_objects max;
63246 struct kmem_cache_order_objects min;
63247 gfp_t allocflags; /* gfp flags to use on each alloc */
63248 - int refcount; /* Refcount for slab cache destroy */
63249 + atomic_t refcount; /* Refcount for slab cache destroy */
63250 void (*ctor)(void *);
63251 int inuse; /* Offset to metadata */
63252 int align; /* Alignment */
63253 @@ -211,7 +211,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
63254 }
63255
63256 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
63257 -void *__kmalloc(size_t size, gfp_t flags);
63258 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
63259
63260 static __always_inline void *
63261 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
63262 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
63263 index de8832d..0147b46 100644
63264 --- a/include/linux/sonet.h
63265 +++ b/include/linux/sonet.h
63266 @@ -61,7 +61,7 @@ struct sonet_stats {
63267 #include <linux/atomic.h>
63268
63269 struct k_sonet_stats {
63270 -#define __HANDLE_ITEM(i) atomic_t i
63271 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
63272 __SONET_ITEMS
63273 #undef __HANDLE_ITEM
63274 };
63275 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
63276 index db7bcaf..1aca77e 100644
63277 --- a/include/linux/sunrpc/clnt.h
63278 +++ b/include/linux/sunrpc/clnt.h
63279 @@ -169,9 +169,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
63280 {
63281 switch (sap->sa_family) {
63282 case AF_INET:
63283 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
63284 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
63285 case AF_INET6:
63286 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
63287 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
63288 }
63289 return 0;
63290 }
63291 @@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
63292 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
63293 const struct sockaddr *src)
63294 {
63295 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
63296 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
63297 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
63298
63299 dsin->sin_family = ssin->sin_family;
63300 @@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
63301 if (sa->sa_family != AF_INET6)
63302 return 0;
63303
63304 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
63305 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
63306 }
63307
63308 #endif /* __KERNEL__ */
63309 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
63310 index e775689..9e206d9 100644
63311 --- a/include/linux/sunrpc/sched.h
63312 +++ b/include/linux/sunrpc/sched.h
63313 @@ -105,6 +105,7 @@ struct rpc_call_ops {
63314 void (*rpc_call_done)(struct rpc_task *, void *);
63315 void (*rpc_release)(void *);
63316 };
63317 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
63318
63319 struct rpc_task_setup {
63320 struct rpc_task *task;
63321 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
63322 index c14fe86..393245e 100644
63323 --- a/include/linux/sunrpc/svc_rdma.h
63324 +++ b/include/linux/sunrpc/svc_rdma.h
63325 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
63326 extern unsigned int svcrdma_max_requests;
63327 extern unsigned int svcrdma_max_req_size;
63328
63329 -extern atomic_t rdma_stat_recv;
63330 -extern atomic_t rdma_stat_read;
63331 -extern atomic_t rdma_stat_write;
63332 -extern atomic_t rdma_stat_sq_starve;
63333 -extern atomic_t rdma_stat_rq_starve;
63334 -extern atomic_t rdma_stat_rq_poll;
63335 -extern atomic_t rdma_stat_rq_prod;
63336 -extern atomic_t rdma_stat_sq_poll;
63337 -extern atomic_t rdma_stat_sq_prod;
63338 +extern atomic_unchecked_t rdma_stat_recv;
63339 +extern atomic_unchecked_t rdma_stat_read;
63340 +extern atomic_unchecked_t rdma_stat_write;
63341 +extern atomic_unchecked_t rdma_stat_sq_starve;
63342 +extern atomic_unchecked_t rdma_stat_rq_starve;
63343 +extern atomic_unchecked_t rdma_stat_rq_poll;
63344 +extern atomic_unchecked_t rdma_stat_rq_prod;
63345 +extern atomic_unchecked_t rdma_stat_sq_poll;
63346 +extern atomic_unchecked_t rdma_stat_sq_prod;
63347
63348 #define RPCRDMA_VERSION 1
63349
63350 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
63351 index 11684d9..0d245eb 100644
63352 --- a/include/linux/sysctl.h
63353 +++ b/include/linux/sysctl.h
63354 @@ -155,7 +155,11 @@ enum
63355 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
63356 };
63357
63358 -
63359 +#ifdef CONFIG_PAX_SOFTMODE
63360 +enum {
63361 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
63362 +};
63363 +#endif
63364
63365 /* CTL_VM names: */
63366 enum
63367 @@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
63368
63369 extern int proc_dostring(struct ctl_table *, int,
63370 void __user *, size_t *, loff_t *);
63371 +extern int proc_dostring_modpriv(struct ctl_table *, int,
63372 + void __user *, size_t *, loff_t *);
63373 extern int proc_dointvec(struct ctl_table *, int,
63374 void __user *, size_t *, loff_t *);
63375 extern int proc_dointvec_minmax(struct ctl_table *, int,
63376 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
63377 index ff7dc08..893e1bd 100644
63378 --- a/include/linux/tty_ldisc.h
63379 +++ b/include/linux/tty_ldisc.h
63380 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
63381
63382 struct module *owner;
63383
63384 - int refcount;
63385 + atomic_t refcount;
63386 };
63387
63388 struct tty_ldisc {
63389 diff --git a/include/linux/types.h b/include/linux/types.h
63390 index 176da8c..e45e473 100644
63391 --- a/include/linux/types.h
63392 +++ b/include/linux/types.h
63393 @@ -213,10 +213,26 @@ typedef struct {
63394 int counter;
63395 } atomic_t;
63396
63397 +#ifdef CONFIG_PAX_REFCOUNT
63398 +typedef struct {
63399 + int counter;
63400 +} atomic_unchecked_t;
63401 +#else
63402 +typedef atomic_t atomic_unchecked_t;
63403 +#endif
63404 +
63405 #ifdef CONFIG_64BIT
63406 typedef struct {
63407 long counter;
63408 } atomic64_t;
63409 +
63410 +#ifdef CONFIG_PAX_REFCOUNT
63411 +typedef struct {
63412 + long counter;
63413 +} atomic64_unchecked_t;
63414 +#else
63415 +typedef atomic64_t atomic64_unchecked_t;
63416 +#endif
63417 #endif
63418
63419 struct list_head {
63420 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
63421 index 5ca0951..ab496a5 100644
63422 --- a/include/linux/uaccess.h
63423 +++ b/include/linux/uaccess.h
63424 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
63425 long ret; \
63426 mm_segment_t old_fs = get_fs(); \
63427 \
63428 - set_fs(KERNEL_DS); \
63429 pagefault_disable(); \
63430 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
63431 - pagefault_enable(); \
63432 + set_fs(KERNEL_DS); \
63433 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
63434 set_fs(old_fs); \
63435 + pagefault_enable(); \
63436 ret; \
63437 })
63438
63439 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
63440 index 99c1b4d..bb94261 100644
63441 --- a/include/linux/unaligned/access_ok.h
63442 +++ b/include/linux/unaligned/access_ok.h
63443 @@ -6,32 +6,32 @@
63444
63445 static inline u16 get_unaligned_le16(const void *p)
63446 {
63447 - return le16_to_cpup((__le16 *)p);
63448 + return le16_to_cpup((const __le16 *)p);
63449 }
63450
63451 static inline u32 get_unaligned_le32(const void *p)
63452 {
63453 - return le32_to_cpup((__le32 *)p);
63454 + return le32_to_cpup((const __le32 *)p);
63455 }
63456
63457 static inline u64 get_unaligned_le64(const void *p)
63458 {
63459 - return le64_to_cpup((__le64 *)p);
63460 + return le64_to_cpup((const __le64 *)p);
63461 }
63462
63463 static inline u16 get_unaligned_be16(const void *p)
63464 {
63465 - return be16_to_cpup((__be16 *)p);
63466 + return be16_to_cpup((const __be16 *)p);
63467 }
63468
63469 static inline u32 get_unaligned_be32(const void *p)
63470 {
63471 - return be32_to_cpup((__be32 *)p);
63472 + return be32_to_cpup((const __be32 *)p);
63473 }
63474
63475 static inline u64 get_unaligned_be64(const void *p)
63476 {
63477 - return be64_to_cpup((__be64 *)p);
63478 + return be64_to_cpup((const __be64 *)p);
63479 }
63480
63481 static inline void put_unaligned_le16(u16 val, void *p)
63482 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
63483 index cf97b5b..40ebc87 100644
63484 --- a/include/linux/vermagic.h
63485 +++ b/include/linux/vermagic.h
63486 @@ -26,9 +26,35 @@
63487 #define MODULE_ARCH_VERMAGIC ""
63488 #endif
63489
63490 +#ifdef CONFIG_PAX_REFCOUNT
63491 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
63492 +#else
63493 +#define MODULE_PAX_REFCOUNT ""
63494 +#endif
63495 +
63496 +#ifdef CONSTIFY_PLUGIN
63497 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
63498 +#else
63499 +#define MODULE_CONSTIFY_PLUGIN ""
63500 +#endif
63501 +
63502 +#ifdef STACKLEAK_PLUGIN
63503 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
63504 +#else
63505 +#define MODULE_STACKLEAK_PLUGIN ""
63506 +#endif
63507 +
63508 +#ifdef CONFIG_GRKERNSEC
63509 +#define MODULE_GRSEC "GRSEC "
63510 +#else
63511 +#define MODULE_GRSEC ""
63512 +#endif
63513 +
63514 #define VERMAGIC_STRING \
63515 UTS_RELEASE " " \
63516 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
63517 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
63518 - MODULE_ARCH_VERMAGIC
63519 + MODULE_ARCH_VERMAGIC \
63520 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
63521 + MODULE_GRSEC
63522
63523 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
63524 index 687fb11..b342358 100644
63525 --- a/include/linux/vmalloc.h
63526 +++ b/include/linux/vmalloc.h
63527 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
63528 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
63529 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
63530 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
63531 +
63532 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
63533 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
63534 +#endif
63535 +
63536 /* bits [20..32] reserved for arch specific ioremap internals */
63537
63538 /*
63539 @@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
63540 # endif
63541 #endif
63542
63543 +#define vmalloc(x) \
63544 +({ \
63545 + void *___retval; \
63546 + intoverflow_t ___x = (intoverflow_t)x; \
63547 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
63548 + ___retval = NULL; \
63549 + else \
63550 + ___retval = vmalloc((unsigned long)___x); \
63551 + ___retval; \
63552 +})
63553 +
63554 +#define vzalloc(x) \
63555 +({ \
63556 + void *___retval; \
63557 + intoverflow_t ___x = (intoverflow_t)x; \
63558 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
63559 + ___retval = NULL; \
63560 + else \
63561 + ___retval = vzalloc((unsigned long)___x); \
63562 + ___retval; \
63563 +})
63564 +
63565 +#define __vmalloc(x, y, z) \
63566 +({ \
63567 + void *___retval; \
63568 + intoverflow_t ___x = (intoverflow_t)x; \
63569 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
63570 + ___retval = NULL; \
63571 + else \
63572 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
63573 + ___retval; \
63574 +})
63575 +
63576 +#define vmalloc_user(x) \
63577 +({ \
63578 + void *___retval; \
63579 + intoverflow_t ___x = (intoverflow_t)x; \
63580 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
63581 + ___retval = NULL; \
63582 + else \
63583 + ___retval = vmalloc_user((unsigned long)___x); \
63584 + ___retval; \
63585 +})
63586 +
63587 +#define vmalloc_exec(x) \
63588 +({ \
63589 + void *___retval; \
63590 + intoverflow_t ___x = (intoverflow_t)x; \
63591 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
63592 + ___retval = NULL; \
63593 + else \
63594 + ___retval = vmalloc_exec((unsigned long)___x); \
63595 + ___retval; \
63596 +})
63597 +
63598 +#define vmalloc_node(x, y) \
63599 +({ \
63600 + void *___retval; \
63601 + intoverflow_t ___x = (intoverflow_t)x; \
63602 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
63603 + ___retval = NULL; \
63604 + else \
63605 + ___retval = vmalloc_node((unsigned long)___x, (y));\
63606 + ___retval; \
63607 +})
63608 +
63609 +#define vzalloc_node(x, y) \
63610 +({ \
63611 + void *___retval; \
63612 + intoverflow_t ___x = (intoverflow_t)x; \
63613 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
63614 + ___retval = NULL; \
63615 + else \
63616 + ___retval = vzalloc_node((unsigned long)___x, (y));\
63617 + ___retval; \
63618 +})
63619 +
63620 +#define vmalloc_32(x) \
63621 +({ \
63622 + void *___retval; \
63623 + intoverflow_t ___x = (intoverflow_t)x; \
63624 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
63625 + ___retval = NULL; \
63626 + else \
63627 + ___retval = vmalloc_32((unsigned long)___x); \
63628 + ___retval; \
63629 +})
63630 +
63631 +#define vmalloc_32_user(x) \
63632 +({ \
63633 +void *___retval; \
63634 + intoverflow_t ___x = (intoverflow_t)x; \
63635 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
63636 + ___retval = NULL; \
63637 + else \
63638 + ___retval = vmalloc_32_user((unsigned long)___x);\
63639 + ___retval; \
63640 +})
63641 +
63642 #endif /* _LINUX_VMALLOC_H */
63643 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
63644 index 65efb92..137adbb 100644
63645 --- a/include/linux/vmstat.h
63646 +++ b/include/linux/vmstat.h
63647 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
63648 /*
63649 * Zone based page accounting with per cpu differentials.
63650 */
63651 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63652 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63653
63654 static inline void zone_page_state_add(long x, struct zone *zone,
63655 enum zone_stat_item item)
63656 {
63657 - atomic_long_add(x, &zone->vm_stat[item]);
63658 - atomic_long_add(x, &vm_stat[item]);
63659 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
63660 + atomic_long_add_unchecked(x, &vm_stat[item]);
63661 }
63662
63663 static inline unsigned long global_page_state(enum zone_stat_item item)
63664 {
63665 - long x = atomic_long_read(&vm_stat[item]);
63666 + long x = atomic_long_read_unchecked(&vm_stat[item]);
63667 #ifdef CONFIG_SMP
63668 if (x < 0)
63669 x = 0;
63670 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
63671 static inline unsigned long zone_page_state(struct zone *zone,
63672 enum zone_stat_item item)
63673 {
63674 - long x = atomic_long_read(&zone->vm_stat[item]);
63675 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63676 #ifdef CONFIG_SMP
63677 if (x < 0)
63678 x = 0;
63679 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
63680 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
63681 enum zone_stat_item item)
63682 {
63683 - long x = atomic_long_read(&zone->vm_stat[item]);
63684 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63685
63686 #ifdef CONFIG_SMP
63687 int cpu;
63688 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
63689
63690 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
63691 {
63692 - atomic_long_inc(&zone->vm_stat[item]);
63693 - atomic_long_inc(&vm_stat[item]);
63694 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
63695 + atomic_long_inc_unchecked(&vm_stat[item]);
63696 }
63697
63698 static inline void __inc_zone_page_state(struct page *page,
63699 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
63700
63701 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
63702 {
63703 - atomic_long_dec(&zone->vm_stat[item]);
63704 - atomic_long_dec(&vm_stat[item]);
63705 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
63706 + atomic_long_dec_unchecked(&vm_stat[item]);
63707 }
63708
63709 static inline void __dec_zone_page_state(struct page *page,
63710 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
63711 index 4aeff96..b378cdc 100644
63712 --- a/include/media/saa7146_vv.h
63713 +++ b/include/media/saa7146_vv.h
63714 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
63715 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
63716
63717 /* the extension can override this */
63718 - struct v4l2_ioctl_ops ops;
63719 + v4l2_ioctl_ops_no_const ops;
63720 /* pointer to the saa7146 core ops */
63721 const struct v4l2_ioctl_ops *core_ops;
63722
63723 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
63724 index c7c40f1..4f01585 100644
63725 --- a/include/media/v4l2-dev.h
63726 +++ b/include/media/v4l2-dev.h
63727 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
63728
63729
63730 struct v4l2_file_operations {
63731 - struct module *owner;
63732 + struct module * const owner;
63733 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
63734 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
63735 unsigned int (*poll) (struct file *, struct poll_table_struct *);
63736 @@ -68,6 +68,7 @@ struct v4l2_file_operations {
63737 int (*open) (struct file *);
63738 int (*release) (struct file *);
63739 };
63740 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
63741
63742 /*
63743 * Newer version of video_device, handled by videodev2.c
63744 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
63745 index dd9f1e7..8c4dd86 100644
63746 --- a/include/media/v4l2-ioctl.h
63747 +++ b/include/media/v4l2-ioctl.h
63748 @@ -272,7 +272,7 @@ struct v4l2_ioctl_ops {
63749 long (*vidioc_default) (struct file *file, void *fh,
63750 bool valid_prio, int cmd, void *arg);
63751 };
63752 -
63753 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
63754
63755 /* v4l debugging and diagnostics */
63756
63757 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
63758 index c5dedd8..a93b07b 100644
63759 --- a/include/net/caif/caif_hsi.h
63760 +++ b/include/net/caif/caif_hsi.h
63761 @@ -94,7 +94,7 @@ struct cfhsi_drv {
63762 void (*rx_done_cb) (struct cfhsi_drv *drv);
63763 void (*wake_up_cb) (struct cfhsi_drv *drv);
63764 void (*wake_down_cb) (struct cfhsi_drv *drv);
63765 -};
63766 +} __no_const;
63767
63768 /* Structure implemented by HSI device. */
63769 struct cfhsi_dev {
63770 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
63771 index 9e5425b..8136ffc 100644
63772 --- a/include/net/caif/cfctrl.h
63773 +++ b/include/net/caif/cfctrl.h
63774 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
63775 void (*radioset_rsp)(void);
63776 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
63777 struct cflayer *client_layer);
63778 -};
63779 +} __no_const;
63780
63781 /* Link Setup Parameters for CAIF-Links. */
63782 struct cfctrl_link_param {
63783 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
63784 struct cfctrl {
63785 struct cfsrvl serv;
63786 struct cfctrl_rsp res;
63787 - atomic_t req_seq_no;
63788 - atomic_t rsp_seq_no;
63789 + atomic_unchecked_t req_seq_no;
63790 + atomic_unchecked_t rsp_seq_no;
63791 struct list_head list;
63792 /* Protects from simultaneous access to first_req list */
63793 spinlock_t info_list_lock;
63794 diff --git a/include/net/flow.h b/include/net/flow.h
63795 index a094477..bc91db1 100644
63796 --- a/include/net/flow.h
63797 +++ b/include/net/flow.h
63798 @@ -207,6 +207,6 @@ extern struct flow_cache_object *flow_cache_lookup(
63799 u8 dir, flow_resolve_t resolver, void *ctx);
63800
63801 extern void flow_cache_flush(void);
63802 -extern atomic_t flow_cache_genid;
63803 +extern atomic_unchecked_t flow_cache_genid;
63804
63805 #endif
63806 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
63807 index 78c83e6..db3518d 100644
63808 --- a/include/net/inetpeer.h
63809 +++ b/include/net/inetpeer.h
63810 @@ -47,8 +47,8 @@ struct inet_peer {
63811 */
63812 union {
63813 struct {
63814 - atomic_t rid; /* Frag reception counter */
63815 - atomic_t ip_id_count; /* IP ID for the next packet */
63816 + atomic_unchecked_t rid; /* Frag reception counter */
63817 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
63818 __u32 tcp_ts;
63819 __u32 tcp_ts_stamp;
63820 };
63821 @@ -112,11 +112,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
63822 more++;
63823 inet_peer_refcheck(p);
63824 do {
63825 - old = atomic_read(&p->ip_id_count);
63826 + old = atomic_read_unchecked(&p->ip_id_count);
63827 new = old + more;
63828 if (!new)
63829 new = 1;
63830 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
63831 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
63832 return new;
63833 }
63834
63835 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
63836 index 10422ef..662570f 100644
63837 --- a/include/net/ip_fib.h
63838 +++ b/include/net/ip_fib.h
63839 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
63840
63841 #define FIB_RES_SADDR(net, res) \
63842 ((FIB_RES_NH(res).nh_saddr_genid == \
63843 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
63844 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
63845 FIB_RES_NH(res).nh_saddr : \
63846 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
63847 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
63848 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
63849 index 8fa4430..05dd772 100644
63850 --- a/include/net/ip_vs.h
63851 +++ b/include/net/ip_vs.h
63852 @@ -509,7 +509,7 @@ struct ip_vs_conn {
63853 struct ip_vs_conn *control; /* Master control connection */
63854 atomic_t n_control; /* Number of controlled ones */
63855 struct ip_vs_dest *dest; /* real server */
63856 - atomic_t in_pkts; /* incoming packet counter */
63857 + atomic_unchecked_t in_pkts; /* incoming packet counter */
63858
63859 /* packet transmitter for different forwarding methods. If it
63860 mangles the packet, it must return NF_DROP or better NF_STOLEN,
63861 @@ -647,7 +647,7 @@ struct ip_vs_dest {
63862 __be16 port; /* port number of the server */
63863 union nf_inet_addr addr; /* IP address of the server */
63864 volatile unsigned flags; /* dest status flags */
63865 - atomic_t conn_flags; /* flags to copy to conn */
63866 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
63867 atomic_t weight; /* server weight */
63868
63869 atomic_t refcnt; /* reference counter */
63870 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
63871 index 69b610a..fe3962c 100644
63872 --- a/include/net/irda/ircomm_core.h
63873 +++ b/include/net/irda/ircomm_core.h
63874 @@ -51,7 +51,7 @@ typedef struct {
63875 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
63876 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
63877 struct ircomm_info *);
63878 -} call_t;
63879 +} __no_const call_t;
63880
63881 struct ircomm_cb {
63882 irda_queue_t queue;
63883 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
63884 index 59ba38bc..d515662 100644
63885 --- a/include/net/irda/ircomm_tty.h
63886 +++ b/include/net/irda/ircomm_tty.h
63887 @@ -35,6 +35,7 @@
63888 #include <linux/termios.h>
63889 #include <linux/timer.h>
63890 #include <linux/tty.h> /* struct tty_struct */
63891 +#include <asm/local.h>
63892
63893 #include <net/irda/irias_object.h>
63894 #include <net/irda/ircomm_core.h>
63895 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
63896 unsigned short close_delay;
63897 unsigned short closing_wait; /* time to wait before closing */
63898
63899 - int open_count;
63900 - int blocked_open; /* # of blocked opens */
63901 + local_t open_count;
63902 + local_t blocked_open; /* # of blocked opens */
63903
63904 /* Protect concurent access to :
63905 * o self->open_count
63906 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
63907 index f82a1e8..82d81e8 100644
63908 --- a/include/net/iucv/af_iucv.h
63909 +++ b/include/net/iucv/af_iucv.h
63910 @@ -87,7 +87,7 @@ struct iucv_sock {
63911 struct iucv_sock_list {
63912 struct hlist_head head;
63913 rwlock_t lock;
63914 - atomic_t autobind_name;
63915 + atomic_unchecked_t autobind_name;
63916 };
63917
63918 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
63919 diff --git a/include/net/lapb.h b/include/net/lapb.h
63920 index 96cb5dd..25e8d4f 100644
63921 --- a/include/net/lapb.h
63922 +++ b/include/net/lapb.h
63923 @@ -95,7 +95,7 @@ struct lapb_cb {
63924 struct sk_buff_head write_queue;
63925 struct sk_buff_head ack_queue;
63926 unsigned char window;
63927 - struct lapb_register_struct callbacks;
63928 + struct lapb_register_struct *callbacks;
63929
63930 /* FRMR control information */
63931 struct lapb_frame frmr_data;
63932 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
63933 index 2720884..3aa5c25 100644
63934 --- a/include/net/neighbour.h
63935 +++ b/include/net/neighbour.h
63936 @@ -122,7 +122,7 @@ struct neigh_ops {
63937 void (*error_report)(struct neighbour *, struct sk_buff *);
63938 int (*output)(struct neighbour *, struct sk_buff *);
63939 int (*connected_output)(struct neighbour *, struct sk_buff *);
63940 -};
63941 +} __do_const;
63942
63943 struct pneigh_entry {
63944 struct pneigh_entry *next;
63945 diff --git a/include/net/netlink.h b/include/net/netlink.h
63946 index 98c1854..d4add7b 100644
63947 --- a/include/net/netlink.h
63948 +++ b/include/net/netlink.h
63949 @@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
63950 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
63951 {
63952 if (mark)
63953 - skb_trim(skb, (unsigned char *) mark - skb->data);
63954 + skb_trim(skb, (const unsigned char *) mark - skb->data);
63955 }
63956
63957 /**
63958 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
63959 index d786b4f..4c3dd41 100644
63960 --- a/include/net/netns/ipv4.h
63961 +++ b/include/net/netns/ipv4.h
63962 @@ -56,8 +56,8 @@ struct netns_ipv4 {
63963
63964 unsigned int sysctl_ping_group_range[2];
63965
63966 - atomic_t rt_genid;
63967 - atomic_t dev_addr_genid;
63968 + atomic_unchecked_t rt_genid;
63969 + atomic_unchecked_t dev_addr_genid;
63970
63971 #ifdef CONFIG_IP_MROUTE
63972 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
63973 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
63974 index 6a72a58..e6a127d 100644
63975 --- a/include/net/sctp/sctp.h
63976 +++ b/include/net/sctp/sctp.h
63977 @@ -318,9 +318,9 @@ do { \
63978
63979 #else /* SCTP_DEBUG */
63980
63981 -#define SCTP_DEBUG_PRINTK(whatever...)
63982 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
63983 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
63984 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
63985 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
63986 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
63987 #define SCTP_ENABLE_DEBUG
63988 #define SCTP_DISABLE_DEBUG
63989 #define SCTP_ASSERT(expr, str, func)
63990 diff --git a/include/net/sock.h b/include/net/sock.h
63991 index 8e4062f..77b041e 100644
63992 --- a/include/net/sock.h
63993 +++ b/include/net/sock.h
63994 @@ -278,7 +278,7 @@ struct sock {
63995 #ifdef CONFIG_RPS
63996 __u32 sk_rxhash;
63997 #endif
63998 - atomic_t sk_drops;
63999 + atomic_unchecked_t sk_drops;
64000 int sk_rcvbuf;
64001
64002 struct sk_filter __rcu *sk_filter;
64003 @@ -1391,7 +1391,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
64004 }
64005
64006 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
64007 - char __user *from, char *to,
64008 + char __user *from, unsigned char *to,
64009 int copy, int offset)
64010 {
64011 if (skb->ip_summed == CHECKSUM_NONE) {
64012 diff --git a/include/net/tcp.h b/include/net/tcp.h
64013 index acc620a..f4d99c6 100644
64014 --- a/include/net/tcp.h
64015 +++ b/include/net/tcp.h
64016 @@ -1401,8 +1401,8 @@ enum tcp_seq_states {
64017 struct tcp_seq_afinfo {
64018 char *name;
64019 sa_family_t family;
64020 - struct file_operations seq_fops;
64021 - struct seq_operations seq_ops;
64022 + file_operations_no_const seq_fops;
64023 + seq_operations_no_const seq_ops;
64024 };
64025
64026 struct tcp_iter_state {
64027 diff --git a/include/net/udp.h b/include/net/udp.h
64028 index 67ea6fc..e42aee8 100644
64029 --- a/include/net/udp.h
64030 +++ b/include/net/udp.h
64031 @@ -234,8 +234,8 @@ struct udp_seq_afinfo {
64032 char *name;
64033 sa_family_t family;
64034 struct udp_table *udp_table;
64035 - struct file_operations seq_fops;
64036 - struct seq_operations seq_ops;
64037 + file_operations_no_const seq_fops;
64038 + seq_operations_no_const seq_ops;
64039 };
64040
64041 struct udp_iter_state {
64042 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
64043 index b203e14..1df3991 100644
64044 --- a/include/net/xfrm.h
64045 +++ b/include/net/xfrm.h
64046 @@ -505,7 +505,7 @@ struct xfrm_policy {
64047 struct timer_list timer;
64048
64049 struct flow_cache_object flo;
64050 - atomic_t genid;
64051 + atomic_unchecked_t genid;
64052 u32 priority;
64053 u32 index;
64054 struct xfrm_mark mark;
64055 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
64056 index 2d0191c..a55797d 100644
64057 --- a/include/rdma/iw_cm.h
64058 +++ b/include/rdma/iw_cm.h
64059 @@ -120,7 +120,7 @@ struct iw_cm_verbs {
64060 int backlog);
64061
64062 int (*destroy_listen)(struct iw_cm_id *cm_id);
64063 -};
64064 +} __no_const;
64065
64066 /**
64067 * iw_create_cm_id - Create an IW CM identifier.
64068 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
64069 index 7d96829..4ba78d3 100644
64070 --- a/include/scsi/libfc.h
64071 +++ b/include/scsi/libfc.h
64072 @@ -758,6 +758,7 @@ struct libfc_function_template {
64073 */
64074 void (*disc_stop_final) (struct fc_lport *);
64075 };
64076 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
64077
64078 /**
64079 * struct fc_disc - Discovery context
64080 @@ -861,7 +862,7 @@ struct fc_lport {
64081 struct fc_vport *vport;
64082
64083 /* Operational Information */
64084 - struct libfc_function_template tt;
64085 + libfc_function_template_no_const tt;
64086 u8 link_up;
64087 u8 qfull;
64088 enum fc_lport_state state;
64089 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
64090 index d371c3c..e228a8c 100644
64091 --- a/include/scsi/scsi_device.h
64092 +++ b/include/scsi/scsi_device.h
64093 @@ -161,9 +161,9 @@ struct scsi_device {
64094 unsigned int max_device_blocked; /* what device_blocked counts down from */
64095 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
64096
64097 - atomic_t iorequest_cnt;
64098 - atomic_t iodone_cnt;
64099 - atomic_t ioerr_cnt;
64100 + atomic_unchecked_t iorequest_cnt;
64101 + atomic_unchecked_t iodone_cnt;
64102 + atomic_unchecked_t ioerr_cnt;
64103
64104 struct device sdev_gendev,
64105 sdev_dev;
64106 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
64107 index 2a65167..91e01f8 100644
64108 --- a/include/scsi/scsi_transport_fc.h
64109 +++ b/include/scsi/scsi_transport_fc.h
64110 @@ -711,7 +711,7 @@ struct fc_function_template {
64111 unsigned long show_host_system_hostname:1;
64112
64113 unsigned long disable_target_scan:1;
64114 -};
64115 +} __do_const;
64116
64117
64118 /**
64119 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
64120 index 030b87c..98a6954 100644
64121 --- a/include/sound/ak4xxx-adda.h
64122 +++ b/include/sound/ak4xxx-adda.h
64123 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
64124 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
64125 unsigned char val);
64126 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
64127 -};
64128 +} __no_const;
64129
64130 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
64131
64132 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
64133 index 8c05e47..2b5df97 100644
64134 --- a/include/sound/hwdep.h
64135 +++ b/include/sound/hwdep.h
64136 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
64137 struct snd_hwdep_dsp_status *status);
64138 int (*dsp_load)(struct snd_hwdep *hw,
64139 struct snd_hwdep_dsp_image *image);
64140 -};
64141 +} __no_const;
64142
64143 struct snd_hwdep {
64144 struct snd_card *card;
64145 diff --git a/include/sound/info.h b/include/sound/info.h
64146 index 4e94cf1..76748b1 100644
64147 --- a/include/sound/info.h
64148 +++ b/include/sound/info.h
64149 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
64150 struct snd_info_buffer *buffer);
64151 void (*write)(struct snd_info_entry *entry,
64152 struct snd_info_buffer *buffer);
64153 -};
64154 +} __no_const;
64155
64156 struct snd_info_entry_ops {
64157 int (*open)(struct snd_info_entry *entry,
64158 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
64159 index 57e71fa..a2c7534 100644
64160 --- a/include/sound/pcm.h
64161 +++ b/include/sound/pcm.h
64162 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
64163 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
64164 int (*ack)(struct snd_pcm_substream *substream);
64165 };
64166 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
64167
64168 /*
64169 *
64170 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
64171 index af1b49e..a5d55a5 100644
64172 --- a/include/sound/sb16_csp.h
64173 +++ b/include/sound/sb16_csp.h
64174 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
64175 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
64176 int (*csp_stop) (struct snd_sb_csp * p);
64177 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
64178 -};
64179 +} __no_const;
64180
64181 /*
64182 * CSP private data
64183 diff --git a/include/sound/soc.h b/include/sound/soc.h
64184 index aa19f5a..a5b8208 100644
64185 --- a/include/sound/soc.h
64186 +++ b/include/sound/soc.h
64187 @@ -676,7 +676,7 @@ struct snd_soc_platform_driver {
64188 /* platform IO - used for platform DAPM */
64189 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
64190 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
64191 -};
64192 +} __do_const;
64193
64194 struct snd_soc_platform {
64195 const char *name;
64196 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
64197 index 444cd6b..3327cc5 100644
64198 --- a/include/sound/ymfpci.h
64199 +++ b/include/sound/ymfpci.h
64200 @@ -358,7 +358,7 @@ struct snd_ymfpci {
64201 spinlock_t reg_lock;
64202 spinlock_t voice_lock;
64203 wait_queue_head_t interrupt_sleep;
64204 - atomic_t interrupt_sleep_count;
64205 + atomic_unchecked_t interrupt_sleep_count;
64206 struct snd_info_entry *proc_entry;
64207 const struct firmware *dsp_microcode;
64208 const struct firmware *controller_microcode;
64209 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
64210 index 2704065..e10f3ef 100644
64211 --- a/include/target/target_core_base.h
64212 +++ b/include/target/target_core_base.h
64213 @@ -356,7 +356,7 @@ struct t10_reservation_ops {
64214 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
64215 int (*t10_pr_register)(struct se_cmd *);
64216 int (*t10_pr_clear)(struct se_cmd *);
64217 -};
64218 +} __no_const;
64219
64220 struct t10_reservation {
64221 /* Reservation effects all target ports */
64222 @@ -496,8 +496,8 @@ struct se_cmd {
64223 atomic_t t_task_cdbs_left;
64224 atomic_t t_task_cdbs_ex_left;
64225 atomic_t t_task_cdbs_timeout_left;
64226 - atomic_t t_task_cdbs_sent;
64227 - atomic_t t_transport_aborted;
64228 + atomic_unchecked_t t_task_cdbs_sent;
64229 + atomic_unchecked_t t_transport_aborted;
64230 atomic_t t_transport_active;
64231 atomic_t t_transport_complete;
64232 atomic_t t_transport_queue_active;
64233 @@ -744,7 +744,7 @@ struct se_device {
64234 atomic_t active_cmds;
64235 atomic_t simple_cmds;
64236 atomic_t depth_left;
64237 - atomic_t dev_ordered_id;
64238 + atomic_unchecked_t dev_ordered_id;
64239 atomic_t dev_tur_active;
64240 atomic_t execute_tasks;
64241 atomic_t dev_status_thr_count;
64242 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
64243 index 1c09820..7f5ec79 100644
64244 --- a/include/trace/events/irq.h
64245 +++ b/include/trace/events/irq.h
64246 @@ -36,7 +36,7 @@ struct softirq_action;
64247 */
64248 TRACE_EVENT(irq_handler_entry,
64249
64250 - TP_PROTO(int irq, struct irqaction *action),
64251 + TP_PROTO(int irq, const struct irqaction *action),
64252
64253 TP_ARGS(irq, action),
64254
64255 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
64256 */
64257 TRACE_EVENT(irq_handler_exit,
64258
64259 - TP_PROTO(int irq, struct irqaction *action, int ret),
64260 + TP_PROTO(int irq, const struct irqaction *action, int ret),
64261
64262 TP_ARGS(irq, action, ret),
64263
64264 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
64265 index 69d485a..dd0bee7 100644
64266 --- a/include/video/udlfb.h
64267 +++ b/include/video/udlfb.h
64268 @@ -51,10 +51,10 @@ struct dlfb_data {
64269 int base8;
64270 u32 pseudo_palette[256];
64271 /* blit-only rendering path metrics, exposed through sysfs */
64272 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64273 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
64274 - atomic_t bytes_sent; /* to usb, after compression including overhead */
64275 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
64276 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64277 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
64278 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
64279 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
64280 };
64281
64282 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
64283 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
64284 index 0993a22..32ba2fe 100644
64285 --- a/include/video/uvesafb.h
64286 +++ b/include/video/uvesafb.h
64287 @@ -177,6 +177,7 @@ struct uvesafb_par {
64288 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
64289 u8 pmi_setpal; /* PMI for palette changes */
64290 u16 *pmi_base; /* protected mode interface location */
64291 + u8 *pmi_code; /* protected mode code location */
64292 void *pmi_start;
64293 void *pmi_pal;
64294 u8 *vbe_state_orig; /*
64295 diff --git a/init/Kconfig b/init/Kconfig
64296 index d627783..693a9f3 100644
64297 --- a/init/Kconfig
64298 +++ b/init/Kconfig
64299 @@ -1202,7 +1202,7 @@ config SLUB_DEBUG
64300
64301 config COMPAT_BRK
64302 bool "Disable heap randomization"
64303 - default y
64304 + default n
64305 help
64306 Randomizing heap placement makes heap exploits harder, but it
64307 also breaks ancient binaries (including anything libc5 based).
64308 diff --git a/init/do_mounts.c b/init/do_mounts.c
64309 index c0851a8..4f8977d 100644
64310 --- a/init/do_mounts.c
64311 +++ b/init/do_mounts.c
64312 @@ -287,11 +287,11 @@ static void __init get_fs_names(char *page)
64313
64314 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
64315 {
64316 - int err = sys_mount(name, "/root", fs, flags, data);
64317 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
64318 if (err)
64319 return err;
64320
64321 - sys_chdir((const char __user __force *)"/root");
64322 + sys_chdir((const char __force_user*)"/root");
64323 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
64324 printk(KERN_INFO
64325 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
64326 @@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...)
64327 va_start(args, fmt);
64328 vsprintf(buf, fmt, args);
64329 va_end(args);
64330 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
64331 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
64332 if (fd >= 0) {
64333 sys_ioctl(fd, FDEJECT, 0);
64334 sys_close(fd);
64335 }
64336 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
64337 - fd = sys_open("/dev/console", O_RDWR, 0);
64338 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
64339 if (fd >= 0) {
64340 sys_ioctl(fd, TCGETS, (long)&termios);
64341 termios.c_lflag &= ~ICANON;
64342 sys_ioctl(fd, TCSETSF, (long)&termios);
64343 - sys_read(fd, &c, 1);
64344 + sys_read(fd, (char __user *)&c, 1);
64345 termios.c_lflag |= ICANON;
64346 sys_ioctl(fd, TCSETSF, (long)&termios);
64347 sys_close(fd);
64348 @@ -488,6 +488,6 @@ void __init prepare_namespace(void)
64349 mount_root();
64350 out:
64351 devtmpfs_mount("dev");
64352 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
64353 - sys_chroot((const char __user __force *)".");
64354 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64355 + sys_chroot((const char __force_user *)".");
64356 }
64357 diff --git a/init/do_mounts.h b/init/do_mounts.h
64358 index f5b978a..69dbfe8 100644
64359 --- a/init/do_mounts.h
64360 +++ b/init/do_mounts.h
64361 @@ -15,15 +15,15 @@ extern int root_mountflags;
64362
64363 static inline int create_dev(char *name, dev_t dev)
64364 {
64365 - sys_unlink(name);
64366 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
64367 + sys_unlink((char __force_user *)name);
64368 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
64369 }
64370
64371 #if BITS_PER_LONG == 32
64372 static inline u32 bstat(char *name)
64373 {
64374 struct stat64 stat;
64375 - if (sys_stat64(name, &stat) != 0)
64376 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
64377 return 0;
64378 if (!S_ISBLK(stat.st_mode))
64379 return 0;
64380 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
64381 static inline u32 bstat(char *name)
64382 {
64383 struct stat stat;
64384 - if (sys_newstat(name, &stat) != 0)
64385 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
64386 return 0;
64387 if (!S_ISBLK(stat.st_mode))
64388 return 0;
64389 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
64390 index 3098a38..253064e 100644
64391 --- a/init/do_mounts_initrd.c
64392 +++ b/init/do_mounts_initrd.c
64393 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
64394 create_dev("/dev/root.old", Root_RAM0);
64395 /* mount initrd on rootfs' /root */
64396 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
64397 - sys_mkdir("/old", 0700);
64398 - root_fd = sys_open("/", 0, 0);
64399 - old_fd = sys_open("/old", 0, 0);
64400 + sys_mkdir((const char __force_user *)"/old", 0700);
64401 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
64402 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
64403 /* move initrd over / and chdir/chroot in initrd root */
64404 - sys_chdir("/root");
64405 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
64406 - sys_chroot(".");
64407 + sys_chdir((const char __force_user *)"/root");
64408 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64409 + sys_chroot((const char __force_user *)".");
64410
64411 /*
64412 * In case that a resume from disk is carried out by linuxrc or one of
64413 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
64414
64415 /* move initrd to rootfs' /old */
64416 sys_fchdir(old_fd);
64417 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
64418 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
64419 /* switch root and cwd back to / of rootfs */
64420 sys_fchdir(root_fd);
64421 - sys_chroot(".");
64422 + sys_chroot((const char __force_user *)".");
64423 sys_close(old_fd);
64424 sys_close(root_fd);
64425
64426 if (new_decode_dev(real_root_dev) == Root_RAM0) {
64427 - sys_chdir("/old");
64428 + sys_chdir((const char __force_user *)"/old");
64429 return;
64430 }
64431
64432 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
64433 mount_root();
64434
64435 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
64436 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
64437 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
64438 if (!error)
64439 printk("okay\n");
64440 else {
64441 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
64442 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
64443 if (error == -ENOENT)
64444 printk("/initrd does not exist. Ignored.\n");
64445 else
64446 printk("failed\n");
64447 printk(KERN_NOTICE "Unmounting old root\n");
64448 - sys_umount("/old", MNT_DETACH);
64449 + sys_umount((char __force_user *)"/old", MNT_DETACH);
64450 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
64451 if (fd < 0) {
64452 error = fd;
64453 @@ -116,11 +116,11 @@ int __init initrd_load(void)
64454 * mounted in the normal path.
64455 */
64456 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
64457 - sys_unlink("/initrd.image");
64458 + sys_unlink((const char __force_user *)"/initrd.image");
64459 handle_initrd();
64460 return 1;
64461 }
64462 }
64463 - sys_unlink("/initrd.image");
64464 + sys_unlink((const char __force_user *)"/initrd.image");
64465 return 0;
64466 }
64467 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
64468 index 32c4799..c27ee74 100644
64469 --- a/init/do_mounts_md.c
64470 +++ b/init/do_mounts_md.c
64471 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
64472 partitioned ? "_d" : "", minor,
64473 md_setup_args[ent].device_names);
64474
64475 - fd = sys_open(name, 0, 0);
64476 + fd = sys_open((char __force_user *)name, 0, 0);
64477 if (fd < 0) {
64478 printk(KERN_ERR "md: open failed - cannot start "
64479 "array %s\n", name);
64480 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
64481 * array without it
64482 */
64483 sys_close(fd);
64484 - fd = sys_open(name, 0, 0);
64485 + fd = sys_open((char __force_user *)name, 0, 0);
64486 sys_ioctl(fd, BLKRRPART, 0);
64487 }
64488 sys_close(fd);
64489 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
64490
64491 wait_for_device_probe();
64492
64493 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
64494 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
64495 if (fd >= 0) {
64496 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
64497 sys_close(fd);
64498 diff --git a/init/initramfs.c b/init/initramfs.c
64499 index 2531811..040d4d4 100644
64500 --- a/init/initramfs.c
64501 +++ b/init/initramfs.c
64502 @@ -74,7 +74,7 @@ static void __init free_hash(void)
64503 }
64504 }
64505
64506 -static long __init do_utime(char __user *filename, time_t mtime)
64507 +static long __init do_utime(__force char __user *filename, time_t mtime)
64508 {
64509 struct timespec t[2];
64510
64511 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
64512 struct dir_entry *de, *tmp;
64513 list_for_each_entry_safe(de, tmp, &dir_list, list) {
64514 list_del(&de->list);
64515 - do_utime(de->name, de->mtime);
64516 + do_utime((char __force_user *)de->name, de->mtime);
64517 kfree(de->name);
64518 kfree(de);
64519 }
64520 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
64521 if (nlink >= 2) {
64522 char *old = find_link(major, minor, ino, mode, collected);
64523 if (old)
64524 - return (sys_link(old, collected) < 0) ? -1 : 1;
64525 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
64526 }
64527 return 0;
64528 }
64529 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
64530 {
64531 struct stat st;
64532
64533 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
64534 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
64535 if (S_ISDIR(st.st_mode))
64536 - sys_rmdir(path);
64537 + sys_rmdir((char __force_user *)path);
64538 else
64539 - sys_unlink(path);
64540 + sys_unlink((char __force_user *)path);
64541 }
64542 }
64543
64544 @@ -305,7 +305,7 @@ static int __init do_name(void)
64545 int openflags = O_WRONLY|O_CREAT;
64546 if (ml != 1)
64547 openflags |= O_TRUNC;
64548 - wfd = sys_open(collected, openflags, mode);
64549 + wfd = sys_open((char __force_user *)collected, openflags, mode);
64550
64551 if (wfd >= 0) {
64552 sys_fchown(wfd, uid, gid);
64553 @@ -317,17 +317,17 @@ static int __init do_name(void)
64554 }
64555 }
64556 } else if (S_ISDIR(mode)) {
64557 - sys_mkdir(collected, mode);
64558 - sys_chown(collected, uid, gid);
64559 - sys_chmod(collected, mode);
64560 + sys_mkdir((char __force_user *)collected, mode);
64561 + sys_chown((char __force_user *)collected, uid, gid);
64562 + sys_chmod((char __force_user *)collected, mode);
64563 dir_add(collected, mtime);
64564 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
64565 S_ISFIFO(mode) || S_ISSOCK(mode)) {
64566 if (maybe_link() == 0) {
64567 - sys_mknod(collected, mode, rdev);
64568 - sys_chown(collected, uid, gid);
64569 - sys_chmod(collected, mode);
64570 - do_utime(collected, mtime);
64571 + sys_mknod((char __force_user *)collected, mode, rdev);
64572 + sys_chown((char __force_user *)collected, uid, gid);
64573 + sys_chmod((char __force_user *)collected, mode);
64574 + do_utime((char __force_user *)collected, mtime);
64575 }
64576 }
64577 return 0;
64578 @@ -336,15 +336,15 @@ static int __init do_name(void)
64579 static int __init do_copy(void)
64580 {
64581 if (count >= body_len) {
64582 - sys_write(wfd, victim, body_len);
64583 + sys_write(wfd, (char __force_user *)victim, body_len);
64584 sys_close(wfd);
64585 - do_utime(vcollected, mtime);
64586 + do_utime((char __force_user *)vcollected, mtime);
64587 kfree(vcollected);
64588 eat(body_len);
64589 state = SkipIt;
64590 return 0;
64591 } else {
64592 - sys_write(wfd, victim, count);
64593 + sys_write(wfd, (char __force_user *)victim, count);
64594 body_len -= count;
64595 eat(count);
64596 return 1;
64597 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
64598 {
64599 collected[N_ALIGN(name_len) + body_len] = '\0';
64600 clean_path(collected, 0);
64601 - sys_symlink(collected + N_ALIGN(name_len), collected);
64602 - sys_lchown(collected, uid, gid);
64603 - do_utime(collected, mtime);
64604 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
64605 + sys_lchown((char __force_user *)collected, uid, gid);
64606 + do_utime((char __force_user *)collected, mtime);
64607 state = SkipIt;
64608 next_state = Reset;
64609 return 0;
64610 diff --git a/init/main.c b/init/main.c
64611 index 03b408d..5777f59 100644
64612 --- a/init/main.c
64613 +++ b/init/main.c
64614 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
64615 extern void tc_init(void);
64616 #endif
64617
64618 +extern void grsecurity_init(void);
64619 +
64620 /*
64621 * Debug helper: via this flag we know that we are in 'early bootup code'
64622 * where only the boot processor is running with IRQ disabled. This means
64623 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
64624
64625 __setup("reset_devices", set_reset_devices);
64626
64627 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
64628 +extern char pax_enter_kernel_user[];
64629 +extern char pax_exit_kernel_user[];
64630 +extern pgdval_t clone_pgd_mask;
64631 +#endif
64632 +
64633 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
64634 +static int __init setup_pax_nouderef(char *str)
64635 +{
64636 +#ifdef CONFIG_X86_32
64637 + unsigned int cpu;
64638 + struct desc_struct *gdt;
64639 +
64640 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
64641 + gdt = get_cpu_gdt_table(cpu);
64642 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
64643 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
64644 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
64645 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
64646 + }
64647 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
64648 +#else
64649 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
64650 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
64651 + clone_pgd_mask = ~(pgdval_t)0UL;
64652 +#endif
64653 +
64654 + return 0;
64655 +}
64656 +early_param("pax_nouderef", setup_pax_nouderef);
64657 +#endif
64658 +
64659 +#ifdef CONFIG_PAX_SOFTMODE
64660 +int pax_softmode;
64661 +
64662 +static int __init setup_pax_softmode(char *str)
64663 +{
64664 + get_option(&str, &pax_softmode);
64665 + return 1;
64666 +}
64667 +__setup("pax_softmode=", setup_pax_softmode);
64668 +#endif
64669 +
64670 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
64671 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
64672 static const char *panic_later, *panic_param;
64673 @@ -678,6 +723,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
64674 {
64675 int count = preempt_count();
64676 int ret;
64677 + const char *msg1 = "", *msg2 = "";
64678
64679 if (initcall_debug)
64680 ret = do_one_initcall_debug(fn);
64681 @@ -690,15 +736,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
64682 sprintf(msgbuf, "error code %d ", ret);
64683
64684 if (preempt_count() != count) {
64685 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
64686 + msg1 = " preemption imbalance";
64687 preempt_count() = count;
64688 }
64689 if (irqs_disabled()) {
64690 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
64691 + msg2 = " disabled interrupts";
64692 local_irq_enable();
64693 }
64694 - if (msgbuf[0]) {
64695 - printk("initcall %pF returned with %s\n", fn, msgbuf);
64696 + if (msgbuf[0] || *msg1 || *msg2) {
64697 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
64698 }
64699
64700 return ret;
64701 @@ -817,7 +863,7 @@ static int __init kernel_init(void * unused)
64702 do_basic_setup();
64703
64704 /* Open the /dev/console on the rootfs, this should never fail */
64705 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
64706 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
64707 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
64708
64709 (void) sys_dup(0);
64710 @@ -830,11 +876,13 @@ static int __init kernel_init(void * unused)
64711 if (!ramdisk_execute_command)
64712 ramdisk_execute_command = "/init";
64713
64714 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
64715 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
64716 ramdisk_execute_command = NULL;
64717 prepare_namespace();
64718 }
64719
64720 + grsecurity_init();
64721 +
64722 /*
64723 * Ok, we have completed the initial bootup, and
64724 * we're essentially up and running. Get rid of the
64725 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
64726 index ed049ea..6442f7f 100644
64727 --- a/ipc/mqueue.c
64728 +++ b/ipc/mqueue.c
64729 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
64730 mq_bytes = (mq_msg_tblsz +
64731 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
64732
64733 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
64734 spin_lock(&mq_lock);
64735 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
64736 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
64737 diff --git a/ipc/msg.c b/ipc/msg.c
64738 index 7385de2..a8180e0 100644
64739 --- a/ipc/msg.c
64740 +++ b/ipc/msg.c
64741 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
64742 return security_msg_queue_associate(msq, msgflg);
64743 }
64744
64745 +static struct ipc_ops msg_ops = {
64746 + .getnew = newque,
64747 + .associate = msg_security,
64748 + .more_checks = NULL
64749 +};
64750 +
64751 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
64752 {
64753 struct ipc_namespace *ns;
64754 - struct ipc_ops msg_ops;
64755 struct ipc_params msg_params;
64756
64757 ns = current->nsproxy->ipc_ns;
64758
64759 - msg_ops.getnew = newque;
64760 - msg_ops.associate = msg_security;
64761 - msg_ops.more_checks = NULL;
64762 -
64763 msg_params.key = key;
64764 msg_params.flg = msgflg;
64765
64766 diff --git a/ipc/sem.c b/ipc/sem.c
64767 index c8e00f8..1135c4e 100644
64768 --- a/ipc/sem.c
64769 +++ b/ipc/sem.c
64770 @@ -318,10 +318,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
64771 return 0;
64772 }
64773
64774 +static struct ipc_ops sem_ops = {
64775 + .getnew = newary,
64776 + .associate = sem_security,
64777 + .more_checks = sem_more_checks
64778 +};
64779 +
64780 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64781 {
64782 struct ipc_namespace *ns;
64783 - struct ipc_ops sem_ops;
64784 struct ipc_params sem_params;
64785
64786 ns = current->nsproxy->ipc_ns;
64787 @@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64788 if (nsems < 0 || nsems > ns->sc_semmsl)
64789 return -EINVAL;
64790
64791 - sem_ops.getnew = newary;
64792 - sem_ops.associate = sem_security;
64793 - sem_ops.more_checks = sem_more_checks;
64794 -
64795 sem_params.key = key;
64796 sem_params.flg = semflg;
64797 sem_params.u.nsems = nsems;
64798 @@ -848,6 +849,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
64799 int nsems;
64800 struct list_head tasks;
64801
64802 + pax_track_stack();
64803 +
64804 sma = sem_lock_check(ns, semid);
64805 if (IS_ERR(sma))
64806 return PTR_ERR(sma);
64807 @@ -1295,6 +1298,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
64808 struct ipc_namespace *ns;
64809 struct list_head tasks;
64810
64811 + pax_track_stack();
64812 +
64813 ns = current->nsproxy->ipc_ns;
64814
64815 if (nsops < 1 || semid < 0)
64816 diff --git a/ipc/shm.c b/ipc/shm.c
64817 index 02ecf2c..c8f5627 100644
64818 --- a/ipc/shm.c
64819 +++ b/ipc/shm.c
64820 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
64821 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
64822 #endif
64823
64824 +#ifdef CONFIG_GRKERNSEC
64825 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64826 + const time_t shm_createtime, const uid_t cuid,
64827 + const int shmid);
64828 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64829 + const time_t shm_createtime);
64830 +#endif
64831 +
64832 void shm_init_ns(struct ipc_namespace *ns)
64833 {
64834 ns->shm_ctlmax = SHMMAX;
64835 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
64836 shp->shm_lprid = 0;
64837 shp->shm_atim = shp->shm_dtim = 0;
64838 shp->shm_ctim = get_seconds();
64839 +#ifdef CONFIG_GRKERNSEC
64840 + {
64841 + struct timespec timeval;
64842 + do_posix_clock_monotonic_gettime(&timeval);
64843 +
64844 + shp->shm_createtime = timeval.tv_sec;
64845 + }
64846 +#endif
64847 shp->shm_segsz = size;
64848 shp->shm_nattch = 0;
64849 shp->shm_file = file;
64850 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
64851 return 0;
64852 }
64853
64854 +static struct ipc_ops shm_ops = {
64855 + .getnew = newseg,
64856 + .associate = shm_security,
64857 + .more_checks = shm_more_checks
64858 +};
64859 +
64860 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
64861 {
64862 struct ipc_namespace *ns;
64863 - struct ipc_ops shm_ops;
64864 struct ipc_params shm_params;
64865
64866 ns = current->nsproxy->ipc_ns;
64867
64868 - shm_ops.getnew = newseg;
64869 - shm_ops.associate = shm_security;
64870 - shm_ops.more_checks = shm_more_checks;
64871 -
64872 shm_params.key = key;
64873 shm_params.flg = shmflg;
64874 shm_params.u.size = size;
64875 @@ -870,8 +887,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
64876 case SHM_LOCK:
64877 case SHM_UNLOCK:
64878 {
64879 - struct file *uninitialized_var(shm_file);
64880 -
64881 lru_add_drain_all(); /* drain pagevecs to lru lists */
64882
64883 shp = shm_lock_check(ns, shmid);
64884 @@ -1004,9 +1019,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64885 if (err)
64886 goto out_unlock;
64887
64888 +#ifdef CONFIG_GRKERNSEC
64889 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
64890 + shp->shm_perm.cuid, shmid) ||
64891 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
64892 + err = -EACCES;
64893 + goto out_unlock;
64894 + }
64895 +#endif
64896 +
64897 path = shp->shm_file->f_path;
64898 path_get(&path);
64899 shp->shm_nattch++;
64900 +#ifdef CONFIG_GRKERNSEC
64901 + shp->shm_lapid = current->pid;
64902 +#endif
64903 size = i_size_read(path.dentry->d_inode);
64904 shm_unlock(shp);
64905
64906 diff --git a/kernel/acct.c b/kernel/acct.c
64907 index fa7eb3d..7faf116 100644
64908 --- a/kernel/acct.c
64909 +++ b/kernel/acct.c
64910 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
64911 */
64912 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
64913 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
64914 - file->f_op->write(file, (char *)&ac,
64915 + file->f_op->write(file, (char __force_user *)&ac,
64916 sizeof(acct_t), &file->f_pos);
64917 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
64918 set_fs(fs);
64919 diff --git a/kernel/audit.c b/kernel/audit.c
64920 index 0a1355c..dca420f 100644
64921 --- a/kernel/audit.c
64922 +++ b/kernel/audit.c
64923 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
64924 3) suppressed due to audit_rate_limit
64925 4) suppressed due to audit_backlog_limit
64926 */
64927 -static atomic_t audit_lost = ATOMIC_INIT(0);
64928 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
64929
64930 /* The netlink socket. */
64931 static struct sock *audit_sock;
64932 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
64933 unsigned long now;
64934 int print;
64935
64936 - atomic_inc(&audit_lost);
64937 + atomic_inc_unchecked(&audit_lost);
64938
64939 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
64940
64941 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
64942 printk(KERN_WARNING
64943 "audit: audit_lost=%d audit_rate_limit=%d "
64944 "audit_backlog_limit=%d\n",
64945 - atomic_read(&audit_lost),
64946 + atomic_read_unchecked(&audit_lost),
64947 audit_rate_limit,
64948 audit_backlog_limit);
64949 audit_panic(message);
64950 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
64951 status_set.pid = audit_pid;
64952 status_set.rate_limit = audit_rate_limit;
64953 status_set.backlog_limit = audit_backlog_limit;
64954 - status_set.lost = atomic_read(&audit_lost);
64955 + status_set.lost = atomic_read_unchecked(&audit_lost);
64956 status_set.backlog = skb_queue_len(&audit_skb_queue);
64957 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
64958 &status_set, sizeof(status_set));
64959 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
64960 index ce4b054..8139ed7 100644
64961 --- a/kernel/auditsc.c
64962 +++ b/kernel/auditsc.c
64963 @@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
64964 }
64965
64966 /* global counter which is incremented every time something logs in */
64967 -static atomic_t session_id = ATOMIC_INIT(0);
64968 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
64969
64970 /**
64971 * audit_set_loginuid - set a task's audit_context loginuid
64972 @@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
64973 */
64974 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
64975 {
64976 - unsigned int sessionid = atomic_inc_return(&session_id);
64977 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
64978 struct audit_context *context = task->audit_context;
64979
64980 if (context && context->in_syscall) {
64981 diff --git a/kernel/capability.c b/kernel/capability.c
64982 index 283c529..36ac81e 100644
64983 --- a/kernel/capability.c
64984 +++ b/kernel/capability.c
64985 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
64986 * before modification is attempted and the application
64987 * fails.
64988 */
64989 + if (tocopy > ARRAY_SIZE(kdata))
64990 + return -EFAULT;
64991 +
64992 if (copy_to_user(dataptr, kdata, tocopy
64993 * sizeof(struct __user_cap_data_struct))) {
64994 return -EFAULT;
64995 @@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
64996 BUG();
64997 }
64998
64999 - if (security_capable(ns, current_cred(), cap) == 0) {
65000 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
65001 current->flags |= PF_SUPERPRIV;
65002 return true;
65003 }
65004 @@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
65005 }
65006 EXPORT_SYMBOL(ns_capable);
65007
65008 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
65009 +{
65010 + if (unlikely(!cap_valid(cap))) {
65011 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
65012 + BUG();
65013 + }
65014 +
65015 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
65016 + current->flags |= PF_SUPERPRIV;
65017 + return true;
65018 + }
65019 + return false;
65020 +}
65021 +EXPORT_SYMBOL(ns_capable_nolog);
65022 +
65023 +bool capable_nolog(int cap)
65024 +{
65025 + return ns_capable_nolog(&init_user_ns, cap);
65026 +}
65027 +EXPORT_SYMBOL(capable_nolog);
65028 +
65029 /**
65030 * task_ns_capable - Determine whether current task has a superior
65031 * capability targeted at a specific task's user namespace.
65032 @@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
65033 }
65034 EXPORT_SYMBOL(task_ns_capable);
65035
65036 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
65037 +{
65038 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
65039 +}
65040 +EXPORT_SYMBOL(task_ns_capable_nolog);
65041 +
65042 /**
65043 * nsown_capable - Check superior capability to one's own user_ns
65044 * @cap: The capability in question
65045 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
65046 index 1d2b6ce..87bf267 100644
65047 --- a/kernel/cgroup.c
65048 +++ b/kernel/cgroup.c
65049 @@ -595,6 +595,8 @@ static struct css_set *find_css_set(
65050 struct hlist_head *hhead;
65051 struct cg_cgroup_link *link;
65052
65053 + pax_track_stack();
65054 +
65055 /* First see if we already have a cgroup group that matches
65056 * the desired set */
65057 read_lock(&css_set_lock);
65058 diff --git a/kernel/compat.c b/kernel/compat.c
65059 index e2435ee..8e82199 100644
65060 --- a/kernel/compat.c
65061 +++ b/kernel/compat.c
65062 @@ -13,6 +13,7 @@
65063
65064 #include <linux/linkage.h>
65065 #include <linux/compat.h>
65066 +#include <linux/module.h>
65067 #include <linux/errno.h>
65068 #include <linux/time.h>
65069 #include <linux/signal.h>
65070 @@ -167,7 +168,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
65071 mm_segment_t oldfs;
65072 long ret;
65073
65074 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
65075 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
65076 oldfs = get_fs();
65077 set_fs(KERNEL_DS);
65078 ret = hrtimer_nanosleep_restart(restart);
65079 @@ -199,7 +200,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
65080 oldfs = get_fs();
65081 set_fs(KERNEL_DS);
65082 ret = hrtimer_nanosleep(&tu,
65083 - rmtp ? (struct timespec __user *)&rmt : NULL,
65084 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
65085 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
65086 set_fs(oldfs);
65087
65088 @@ -308,7 +309,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
65089 mm_segment_t old_fs = get_fs();
65090
65091 set_fs(KERNEL_DS);
65092 - ret = sys_sigpending((old_sigset_t __user *) &s);
65093 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
65094 set_fs(old_fs);
65095 if (ret == 0)
65096 ret = put_user(s, set);
65097 @@ -331,8 +332,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
65098 old_fs = get_fs();
65099 set_fs(KERNEL_DS);
65100 ret = sys_sigprocmask(how,
65101 - set ? (old_sigset_t __user *) &s : NULL,
65102 - oset ? (old_sigset_t __user *) &s : NULL);
65103 + set ? (old_sigset_t __force_user *) &s : NULL,
65104 + oset ? (old_sigset_t __force_user *) &s : NULL);
65105 set_fs(old_fs);
65106 if (ret == 0)
65107 if (oset)
65108 @@ -369,7 +370,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
65109 mm_segment_t old_fs = get_fs();
65110
65111 set_fs(KERNEL_DS);
65112 - ret = sys_old_getrlimit(resource, &r);
65113 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
65114 set_fs(old_fs);
65115
65116 if (!ret) {
65117 @@ -441,7 +442,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
65118 mm_segment_t old_fs = get_fs();
65119
65120 set_fs(KERNEL_DS);
65121 - ret = sys_getrusage(who, (struct rusage __user *) &r);
65122 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
65123 set_fs(old_fs);
65124
65125 if (ret)
65126 @@ -468,8 +469,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
65127 set_fs (KERNEL_DS);
65128 ret = sys_wait4(pid,
65129 (stat_addr ?
65130 - (unsigned int __user *) &status : NULL),
65131 - options, (struct rusage __user *) &r);
65132 + (unsigned int __force_user *) &status : NULL),
65133 + options, (struct rusage __force_user *) &r);
65134 set_fs (old_fs);
65135
65136 if (ret > 0) {
65137 @@ -494,8 +495,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
65138 memset(&info, 0, sizeof(info));
65139
65140 set_fs(KERNEL_DS);
65141 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
65142 - uru ? (struct rusage __user *)&ru : NULL);
65143 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
65144 + uru ? (struct rusage __force_user *)&ru : NULL);
65145 set_fs(old_fs);
65146
65147 if ((ret < 0) || (info.si_signo == 0))
65148 @@ -625,8 +626,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
65149 oldfs = get_fs();
65150 set_fs(KERNEL_DS);
65151 err = sys_timer_settime(timer_id, flags,
65152 - (struct itimerspec __user *) &newts,
65153 - (struct itimerspec __user *) &oldts);
65154 + (struct itimerspec __force_user *) &newts,
65155 + (struct itimerspec __force_user *) &oldts);
65156 set_fs(oldfs);
65157 if (!err && old && put_compat_itimerspec(old, &oldts))
65158 return -EFAULT;
65159 @@ -643,7 +644,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
65160 oldfs = get_fs();
65161 set_fs(KERNEL_DS);
65162 err = sys_timer_gettime(timer_id,
65163 - (struct itimerspec __user *) &ts);
65164 + (struct itimerspec __force_user *) &ts);
65165 set_fs(oldfs);
65166 if (!err && put_compat_itimerspec(setting, &ts))
65167 return -EFAULT;
65168 @@ -662,7 +663,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
65169 oldfs = get_fs();
65170 set_fs(KERNEL_DS);
65171 err = sys_clock_settime(which_clock,
65172 - (struct timespec __user *) &ts);
65173 + (struct timespec __force_user *) &ts);
65174 set_fs(oldfs);
65175 return err;
65176 }
65177 @@ -677,7 +678,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
65178 oldfs = get_fs();
65179 set_fs(KERNEL_DS);
65180 err = sys_clock_gettime(which_clock,
65181 - (struct timespec __user *) &ts);
65182 + (struct timespec __force_user *) &ts);
65183 set_fs(oldfs);
65184 if (!err && put_compat_timespec(&ts, tp))
65185 return -EFAULT;
65186 @@ -697,7 +698,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
65187
65188 oldfs = get_fs();
65189 set_fs(KERNEL_DS);
65190 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
65191 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
65192 set_fs(oldfs);
65193
65194 err = compat_put_timex(utp, &txc);
65195 @@ -717,7 +718,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
65196 oldfs = get_fs();
65197 set_fs(KERNEL_DS);
65198 err = sys_clock_getres(which_clock,
65199 - (struct timespec __user *) &ts);
65200 + (struct timespec __force_user *) &ts);
65201 set_fs(oldfs);
65202 if (!err && tp && put_compat_timespec(&ts, tp))
65203 return -EFAULT;
65204 @@ -729,9 +730,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
65205 long err;
65206 mm_segment_t oldfs;
65207 struct timespec tu;
65208 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
65209 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
65210
65211 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
65212 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
65213 oldfs = get_fs();
65214 set_fs(KERNEL_DS);
65215 err = clock_nanosleep_restart(restart);
65216 @@ -763,8 +764,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
65217 oldfs = get_fs();
65218 set_fs(KERNEL_DS);
65219 err = sys_clock_nanosleep(which_clock, flags,
65220 - (struct timespec __user *) &in,
65221 - (struct timespec __user *) &out);
65222 + (struct timespec __force_user *) &in,
65223 + (struct timespec __force_user *) &out);
65224 set_fs(oldfs);
65225
65226 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
65227 diff --git a/kernel/configs.c b/kernel/configs.c
65228 index 42e8fa0..9e7406b 100644
65229 --- a/kernel/configs.c
65230 +++ b/kernel/configs.c
65231 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
65232 struct proc_dir_entry *entry;
65233
65234 /* create the current config file */
65235 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
65236 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
65237 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
65238 + &ikconfig_file_ops);
65239 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65240 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
65241 + &ikconfig_file_ops);
65242 +#endif
65243 +#else
65244 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
65245 &ikconfig_file_ops);
65246 +#endif
65247 +
65248 if (!entry)
65249 return -ENOMEM;
65250
65251 diff --git a/kernel/cred.c b/kernel/cred.c
65252 index 8ef31f5..f63d997 100644
65253 --- a/kernel/cred.c
65254 +++ b/kernel/cred.c
65255 @@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
65256 */
65257 void __put_cred(struct cred *cred)
65258 {
65259 + pax_track_stack();
65260 +
65261 kdebug("__put_cred(%p{%d,%d})", cred,
65262 atomic_read(&cred->usage),
65263 read_cred_subscribers(cred));
65264 @@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
65265 {
65266 struct cred *cred;
65267
65268 + pax_track_stack();
65269 +
65270 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
65271 atomic_read(&tsk->cred->usage),
65272 read_cred_subscribers(tsk->cred));
65273 @@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct task_struct *task)
65274 {
65275 const struct cred *cred;
65276
65277 + pax_track_stack();
65278 +
65279 rcu_read_lock();
65280
65281 do {
65282 @@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
65283 {
65284 struct cred *new;
65285
65286 + pax_track_stack();
65287 +
65288 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
65289 if (!new)
65290 return NULL;
65291 @@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
65292 const struct cred *old;
65293 struct cred *new;
65294
65295 + pax_track_stack();
65296 +
65297 validate_process_creds();
65298
65299 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
65300 @@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
65301 struct thread_group_cred *tgcred = NULL;
65302 struct cred *new;
65303
65304 + pax_track_stack();
65305 +
65306 #ifdef CONFIG_KEYS
65307 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
65308 if (!tgcred)
65309 @@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
65310 struct cred *new;
65311 int ret;
65312
65313 + pax_track_stack();
65314 +
65315 if (
65316 #ifdef CONFIG_KEYS
65317 !p->cred->thread_keyring &&
65318 @@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
65319 struct task_struct *task = current;
65320 const struct cred *old = task->real_cred;
65321
65322 + pax_track_stack();
65323 +
65324 kdebug("commit_creds(%p{%d,%d})", new,
65325 atomic_read(&new->usage),
65326 read_cred_subscribers(new));
65327 @@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
65328
65329 get_cred(new); /* we will require a ref for the subj creds too */
65330
65331 + gr_set_role_label(task, new->uid, new->gid);
65332 +
65333 /* dumpability changes */
65334 if (old->euid != new->euid ||
65335 old->egid != new->egid ||
65336 @@ -549,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
65337 */
65338 void abort_creds(struct cred *new)
65339 {
65340 + pax_track_stack();
65341 +
65342 kdebug("abort_creds(%p{%d,%d})", new,
65343 atomic_read(&new->usage),
65344 read_cred_subscribers(new));
65345 @@ -572,6 +592,8 @@ const struct cred *override_creds(const struct cred *new)
65346 {
65347 const struct cred *old = current->cred;
65348
65349 + pax_track_stack();
65350 +
65351 kdebug("override_creds(%p{%d,%d})", new,
65352 atomic_read(&new->usage),
65353 read_cred_subscribers(new));
65354 @@ -601,6 +623,8 @@ void revert_creds(const struct cred *old)
65355 {
65356 const struct cred *override = current->cred;
65357
65358 + pax_track_stack();
65359 +
65360 kdebug("revert_creds(%p{%d,%d})", old,
65361 atomic_read(&old->usage),
65362 read_cred_subscribers(old));
65363 @@ -647,6 +671,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
65364 const struct cred *old;
65365 struct cred *new;
65366
65367 + pax_track_stack();
65368 +
65369 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
65370 if (!new)
65371 return NULL;
65372 @@ -701,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
65373 */
65374 int set_security_override(struct cred *new, u32 secid)
65375 {
65376 + pax_track_stack();
65377 +
65378 return security_kernel_act_as(new, secid);
65379 }
65380 EXPORT_SYMBOL(set_security_override);
65381 @@ -720,6 +748,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
65382 u32 secid;
65383 int ret;
65384
65385 + pax_track_stack();
65386 +
65387 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
65388 if (ret < 0)
65389 return ret;
65390 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
65391 index 0d7c087..01b8cef 100644
65392 --- a/kernel/debug/debug_core.c
65393 +++ b/kernel/debug/debug_core.c
65394 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
65395 */
65396 static atomic_t masters_in_kgdb;
65397 static atomic_t slaves_in_kgdb;
65398 -static atomic_t kgdb_break_tasklet_var;
65399 +static atomic_unchecked_t kgdb_break_tasklet_var;
65400 atomic_t kgdb_setting_breakpoint;
65401
65402 struct task_struct *kgdb_usethread;
65403 @@ -129,7 +129,7 @@ int kgdb_single_step;
65404 static pid_t kgdb_sstep_pid;
65405
65406 /* to keep track of the CPU which is doing the single stepping*/
65407 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65408 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65409
65410 /*
65411 * If you are debugging a problem where roundup (the collection of
65412 @@ -542,7 +542,7 @@ return_normal:
65413 * kernel will only try for the value of sstep_tries before
65414 * giving up and continuing on.
65415 */
65416 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
65417 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
65418 (kgdb_info[cpu].task &&
65419 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
65420 atomic_set(&kgdb_active, -1);
65421 @@ -636,8 +636,8 @@ cpu_master_loop:
65422 }
65423
65424 kgdb_restore:
65425 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
65426 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
65427 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
65428 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
65429 if (kgdb_info[sstep_cpu].task)
65430 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
65431 else
65432 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
65433 static void kgdb_tasklet_bpt(unsigned long ing)
65434 {
65435 kgdb_breakpoint();
65436 - atomic_set(&kgdb_break_tasklet_var, 0);
65437 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
65438 }
65439
65440 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
65441
65442 void kgdb_schedule_breakpoint(void)
65443 {
65444 - if (atomic_read(&kgdb_break_tasklet_var) ||
65445 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
65446 atomic_read(&kgdb_active) != -1 ||
65447 atomic_read(&kgdb_setting_breakpoint))
65448 return;
65449 - atomic_inc(&kgdb_break_tasklet_var);
65450 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
65451 tasklet_schedule(&kgdb_tasklet_breakpoint);
65452 }
65453 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
65454 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
65455 index 63786e7..0780cac 100644
65456 --- a/kernel/debug/kdb/kdb_main.c
65457 +++ b/kernel/debug/kdb/kdb_main.c
65458 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
65459 list_for_each_entry(mod, kdb_modules, list) {
65460
65461 kdb_printf("%-20s%8u 0x%p ", mod->name,
65462 - mod->core_size, (void *)mod);
65463 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
65464 #ifdef CONFIG_MODULE_UNLOAD
65465 kdb_printf("%4d ", module_refcount(mod));
65466 #endif
65467 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
65468 kdb_printf(" (Loading)");
65469 else
65470 kdb_printf(" (Live)");
65471 - kdb_printf(" 0x%p", mod->module_core);
65472 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
65473
65474 #ifdef CONFIG_MODULE_UNLOAD
65475 {
65476 diff --git a/kernel/events/core.c b/kernel/events/core.c
65477 index 0f85778..0d43716 100644
65478 --- a/kernel/events/core.c
65479 +++ b/kernel/events/core.c
65480 @@ -172,7 +172,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
65481 return 0;
65482 }
65483
65484 -static atomic64_t perf_event_id;
65485 +static atomic64_unchecked_t perf_event_id;
65486
65487 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
65488 enum event_type_t event_type);
65489 @@ -2535,7 +2535,7 @@ static void __perf_event_read(void *info)
65490
65491 static inline u64 perf_event_count(struct perf_event *event)
65492 {
65493 - return local64_read(&event->count) + atomic64_read(&event->child_count);
65494 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
65495 }
65496
65497 static u64 perf_event_read(struct perf_event *event)
65498 @@ -3060,9 +3060,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
65499 mutex_lock(&event->child_mutex);
65500 total += perf_event_read(event);
65501 *enabled += event->total_time_enabled +
65502 - atomic64_read(&event->child_total_time_enabled);
65503 + atomic64_read_unchecked(&event->child_total_time_enabled);
65504 *running += event->total_time_running +
65505 - atomic64_read(&event->child_total_time_running);
65506 + atomic64_read_unchecked(&event->child_total_time_running);
65507
65508 list_for_each_entry(child, &event->child_list, child_list) {
65509 total += perf_event_read(child);
65510 @@ -3448,10 +3448,10 @@ void perf_event_update_userpage(struct perf_event *event)
65511 userpg->offset -= local64_read(&event->hw.prev_count);
65512
65513 userpg->time_enabled = enabled +
65514 - atomic64_read(&event->child_total_time_enabled);
65515 + atomic64_read_unchecked(&event->child_total_time_enabled);
65516
65517 userpg->time_running = running +
65518 - atomic64_read(&event->child_total_time_running);
65519 + atomic64_read_unchecked(&event->child_total_time_running);
65520
65521 barrier();
65522 ++userpg->lock;
65523 @@ -3822,11 +3822,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
65524 values[n++] = perf_event_count(event);
65525 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
65526 values[n++] = enabled +
65527 - atomic64_read(&event->child_total_time_enabled);
65528 + atomic64_read_unchecked(&event->child_total_time_enabled);
65529 }
65530 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
65531 values[n++] = running +
65532 - atomic64_read(&event->child_total_time_running);
65533 + atomic64_read_unchecked(&event->child_total_time_running);
65534 }
65535 if (read_format & PERF_FORMAT_ID)
65536 values[n++] = primary_event_id(event);
65537 @@ -4477,12 +4477,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
65538 * need to add enough zero bytes after the string to handle
65539 * the 64bit alignment we do later.
65540 */
65541 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
65542 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
65543 if (!buf) {
65544 name = strncpy(tmp, "//enomem", sizeof(tmp));
65545 goto got_name;
65546 }
65547 - name = d_path(&file->f_path, buf, PATH_MAX);
65548 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
65549 if (IS_ERR(name)) {
65550 name = strncpy(tmp, "//toolong", sizeof(tmp));
65551 goto got_name;
65552 @@ -5833,7 +5833,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
65553 event->parent = parent_event;
65554
65555 event->ns = get_pid_ns(current->nsproxy->pid_ns);
65556 - event->id = atomic64_inc_return(&perf_event_id);
65557 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
65558
65559 event->state = PERF_EVENT_STATE_INACTIVE;
65560
65561 @@ -6355,10 +6355,10 @@ static void sync_child_event(struct perf_event *child_event,
65562 /*
65563 * Add back the child's count to the parent's count:
65564 */
65565 - atomic64_add(child_val, &parent_event->child_count);
65566 - atomic64_add(child_event->total_time_enabled,
65567 + atomic64_add_unchecked(child_val, &parent_event->child_count);
65568 + atomic64_add_unchecked(child_event->total_time_enabled,
65569 &parent_event->child_total_time_enabled);
65570 - atomic64_add(child_event->total_time_running,
65571 + atomic64_add_unchecked(child_event->total_time_running,
65572 &parent_event->child_total_time_running);
65573
65574 /*
65575 diff --git a/kernel/exit.c b/kernel/exit.c
65576 index 2913b35..4465c81 100644
65577 --- a/kernel/exit.c
65578 +++ b/kernel/exit.c
65579 @@ -57,6 +57,10 @@
65580 #include <asm/pgtable.h>
65581 #include <asm/mmu_context.h>
65582
65583 +#ifdef CONFIG_GRKERNSEC
65584 +extern rwlock_t grsec_exec_file_lock;
65585 +#endif
65586 +
65587 static void exit_mm(struct task_struct * tsk);
65588
65589 static void __unhash_process(struct task_struct *p, bool group_dead)
65590 @@ -168,6 +172,10 @@ void release_task(struct task_struct * p)
65591 struct task_struct *leader;
65592 int zap_leader;
65593 repeat:
65594 +#ifdef CONFIG_NET
65595 + gr_del_task_from_ip_table(p);
65596 +#endif
65597 +
65598 /* don't need to get the RCU readlock here - the process is dead and
65599 * can't be modifying its own credentials. But shut RCU-lockdep up */
65600 rcu_read_lock();
65601 @@ -380,7 +388,7 @@ int allow_signal(int sig)
65602 * know it'll be handled, so that they don't get converted to
65603 * SIGKILL or just silently dropped.
65604 */
65605 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
65606 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
65607 recalc_sigpending();
65608 spin_unlock_irq(&current->sighand->siglock);
65609 return 0;
65610 @@ -416,6 +424,17 @@ void daemonize(const char *name, ...)
65611 vsnprintf(current->comm, sizeof(current->comm), name, args);
65612 va_end(args);
65613
65614 +#ifdef CONFIG_GRKERNSEC
65615 + write_lock(&grsec_exec_file_lock);
65616 + if (current->exec_file) {
65617 + fput(current->exec_file);
65618 + current->exec_file = NULL;
65619 + }
65620 + write_unlock(&grsec_exec_file_lock);
65621 +#endif
65622 +
65623 + gr_set_kernel_label(current);
65624 +
65625 /*
65626 * If we were started as result of loading a module, close all of the
65627 * user space pages. We don't need them, and if we didn't close them
65628 @@ -895,6 +914,8 @@ NORET_TYPE void do_exit(long code)
65629 struct task_struct *tsk = current;
65630 int group_dead;
65631
65632 + set_fs(USER_DS);
65633 +
65634 profile_task_exit(tsk);
65635
65636 WARN_ON(blk_needs_flush_plug(tsk));
65637 @@ -911,7 +932,6 @@ NORET_TYPE void do_exit(long code)
65638 * mm_release()->clear_child_tid() from writing to a user-controlled
65639 * kernel address.
65640 */
65641 - set_fs(USER_DS);
65642
65643 ptrace_event(PTRACE_EVENT_EXIT, code);
65644
65645 @@ -973,6 +993,9 @@ NORET_TYPE void do_exit(long code)
65646 tsk->exit_code = code;
65647 taskstats_exit(tsk, group_dead);
65648
65649 + gr_acl_handle_psacct(tsk, code);
65650 + gr_acl_handle_exit();
65651 +
65652 exit_mm(tsk);
65653
65654 if (group_dead)
65655 diff --git a/kernel/fork.c b/kernel/fork.c
65656 index 8e6b6f4..9dccf00 100644
65657 --- a/kernel/fork.c
65658 +++ b/kernel/fork.c
65659 @@ -285,7 +285,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
65660 *stackend = STACK_END_MAGIC; /* for overflow detection */
65661
65662 #ifdef CONFIG_CC_STACKPROTECTOR
65663 - tsk->stack_canary = get_random_int();
65664 + tsk->stack_canary = pax_get_random_long();
65665 #endif
65666
65667 /*
65668 @@ -309,13 +309,77 @@ out:
65669 }
65670
65671 #ifdef CONFIG_MMU
65672 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
65673 +{
65674 + struct vm_area_struct *tmp;
65675 + unsigned long charge;
65676 + struct mempolicy *pol;
65677 + struct file *file;
65678 +
65679 + charge = 0;
65680 + if (mpnt->vm_flags & VM_ACCOUNT) {
65681 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65682 + if (security_vm_enough_memory(len))
65683 + goto fail_nomem;
65684 + charge = len;
65685 + }
65686 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65687 + if (!tmp)
65688 + goto fail_nomem;
65689 + *tmp = *mpnt;
65690 + tmp->vm_mm = mm;
65691 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
65692 + pol = mpol_dup(vma_policy(mpnt));
65693 + if (IS_ERR(pol))
65694 + goto fail_nomem_policy;
65695 + vma_set_policy(tmp, pol);
65696 + if (anon_vma_fork(tmp, mpnt))
65697 + goto fail_nomem_anon_vma_fork;
65698 + tmp->vm_flags &= ~VM_LOCKED;
65699 + tmp->vm_next = tmp->vm_prev = NULL;
65700 + tmp->vm_mirror = NULL;
65701 + file = tmp->vm_file;
65702 + if (file) {
65703 + struct inode *inode = file->f_path.dentry->d_inode;
65704 + struct address_space *mapping = file->f_mapping;
65705 +
65706 + get_file(file);
65707 + if (tmp->vm_flags & VM_DENYWRITE)
65708 + atomic_dec(&inode->i_writecount);
65709 + mutex_lock(&mapping->i_mmap_mutex);
65710 + if (tmp->vm_flags & VM_SHARED)
65711 + mapping->i_mmap_writable++;
65712 + flush_dcache_mmap_lock(mapping);
65713 + /* insert tmp into the share list, just after mpnt */
65714 + vma_prio_tree_add(tmp, mpnt);
65715 + flush_dcache_mmap_unlock(mapping);
65716 + mutex_unlock(&mapping->i_mmap_mutex);
65717 + }
65718 +
65719 + /*
65720 + * Clear hugetlb-related page reserves for children. This only
65721 + * affects MAP_PRIVATE mappings. Faults generated by the child
65722 + * are not guaranteed to succeed, even if read-only
65723 + */
65724 + if (is_vm_hugetlb_page(tmp))
65725 + reset_vma_resv_huge_pages(tmp);
65726 +
65727 + return tmp;
65728 +
65729 +fail_nomem_anon_vma_fork:
65730 + mpol_put(pol);
65731 +fail_nomem_policy:
65732 + kmem_cache_free(vm_area_cachep, tmp);
65733 +fail_nomem:
65734 + vm_unacct_memory(charge);
65735 + return NULL;
65736 +}
65737 +
65738 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65739 {
65740 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
65741 struct rb_node **rb_link, *rb_parent;
65742 int retval;
65743 - unsigned long charge;
65744 - struct mempolicy *pol;
65745
65746 down_write(&oldmm->mmap_sem);
65747 flush_cache_dup_mm(oldmm);
65748 @@ -327,8 +391,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65749 mm->locked_vm = 0;
65750 mm->mmap = NULL;
65751 mm->mmap_cache = NULL;
65752 - mm->free_area_cache = oldmm->mmap_base;
65753 - mm->cached_hole_size = ~0UL;
65754 + mm->free_area_cache = oldmm->free_area_cache;
65755 + mm->cached_hole_size = oldmm->cached_hole_size;
65756 mm->map_count = 0;
65757 cpumask_clear(mm_cpumask(mm));
65758 mm->mm_rb = RB_ROOT;
65759 @@ -344,8 +408,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65760
65761 prev = NULL;
65762 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
65763 - struct file *file;
65764 -
65765 if (mpnt->vm_flags & VM_DONTCOPY) {
65766 long pages = vma_pages(mpnt);
65767 mm->total_vm -= pages;
65768 @@ -353,55 +415,13 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65769 -pages);
65770 continue;
65771 }
65772 - charge = 0;
65773 - if (mpnt->vm_flags & VM_ACCOUNT) {
65774 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65775 - if (security_vm_enough_memory(len))
65776 - goto fail_nomem;
65777 - charge = len;
65778 - }
65779 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65780 - if (!tmp)
65781 - goto fail_nomem;
65782 - *tmp = *mpnt;
65783 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
65784 - pol = mpol_dup(vma_policy(mpnt));
65785 - retval = PTR_ERR(pol);
65786 - if (IS_ERR(pol))
65787 - goto fail_nomem_policy;
65788 - vma_set_policy(tmp, pol);
65789 - tmp->vm_mm = mm;
65790 - if (anon_vma_fork(tmp, mpnt))
65791 - goto fail_nomem_anon_vma_fork;
65792 - tmp->vm_flags &= ~VM_LOCKED;
65793 - tmp->vm_next = tmp->vm_prev = NULL;
65794 - file = tmp->vm_file;
65795 - if (file) {
65796 - struct inode *inode = file->f_path.dentry->d_inode;
65797 - struct address_space *mapping = file->f_mapping;
65798 -
65799 - get_file(file);
65800 - if (tmp->vm_flags & VM_DENYWRITE)
65801 - atomic_dec(&inode->i_writecount);
65802 - mutex_lock(&mapping->i_mmap_mutex);
65803 - if (tmp->vm_flags & VM_SHARED)
65804 - mapping->i_mmap_writable++;
65805 - flush_dcache_mmap_lock(mapping);
65806 - /* insert tmp into the share list, just after mpnt */
65807 - vma_prio_tree_add(tmp, mpnt);
65808 - flush_dcache_mmap_unlock(mapping);
65809 - mutex_unlock(&mapping->i_mmap_mutex);
65810 + tmp = dup_vma(mm, mpnt);
65811 + if (!tmp) {
65812 + retval = -ENOMEM;
65813 + goto out;
65814 }
65815
65816 /*
65817 - * Clear hugetlb-related page reserves for children. This only
65818 - * affects MAP_PRIVATE mappings. Faults generated by the child
65819 - * are not guaranteed to succeed, even if read-only
65820 - */
65821 - if (is_vm_hugetlb_page(tmp))
65822 - reset_vma_resv_huge_pages(tmp);
65823 -
65824 - /*
65825 * Link in the new vma and copy the page table entries.
65826 */
65827 *pprev = tmp;
65828 @@ -422,6 +442,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65829 if (retval)
65830 goto out;
65831 }
65832 +
65833 +#ifdef CONFIG_PAX_SEGMEXEC
65834 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
65835 + struct vm_area_struct *mpnt_m;
65836 +
65837 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
65838 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
65839 +
65840 + if (!mpnt->vm_mirror)
65841 + continue;
65842 +
65843 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
65844 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
65845 + mpnt->vm_mirror = mpnt_m;
65846 + } else {
65847 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
65848 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
65849 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
65850 + mpnt->vm_mirror->vm_mirror = mpnt;
65851 + }
65852 + }
65853 + BUG_ON(mpnt_m);
65854 + }
65855 +#endif
65856 +
65857 /* a new mm has just been created */
65858 arch_dup_mmap(oldmm, mm);
65859 retval = 0;
65860 @@ -430,14 +475,6 @@ out:
65861 flush_tlb_mm(oldmm);
65862 up_write(&oldmm->mmap_sem);
65863 return retval;
65864 -fail_nomem_anon_vma_fork:
65865 - mpol_put(pol);
65866 -fail_nomem_policy:
65867 - kmem_cache_free(vm_area_cachep, tmp);
65868 -fail_nomem:
65869 - retval = -ENOMEM;
65870 - vm_unacct_memory(charge);
65871 - goto out;
65872 }
65873
65874 static inline int mm_alloc_pgd(struct mm_struct *mm)
65875 @@ -837,13 +874,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
65876 spin_unlock(&fs->lock);
65877 return -EAGAIN;
65878 }
65879 - fs->users++;
65880 + atomic_inc(&fs->users);
65881 spin_unlock(&fs->lock);
65882 return 0;
65883 }
65884 tsk->fs = copy_fs_struct(fs);
65885 if (!tsk->fs)
65886 return -ENOMEM;
65887 + gr_set_chroot_entries(tsk, &tsk->fs->root);
65888 return 0;
65889 }
65890
65891 @@ -1105,6 +1143,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65892 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
65893 #endif
65894 retval = -EAGAIN;
65895 +
65896 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
65897 +
65898 if (atomic_read(&p->real_cred->user->processes) >=
65899 task_rlimit(p, RLIMIT_NPROC)) {
65900 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
65901 @@ -1264,6 +1305,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65902 if (clone_flags & CLONE_THREAD)
65903 p->tgid = current->tgid;
65904
65905 + gr_copy_label(p);
65906 +
65907 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
65908 /*
65909 * Clear TID on mm_release()?
65910 @@ -1428,6 +1471,8 @@ bad_fork_cleanup_count:
65911 bad_fork_free:
65912 free_task(p);
65913 fork_out:
65914 + gr_log_forkfail(retval);
65915 +
65916 return ERR_PTR(retval);
65917 }
65918
65919 @@ -1528,6 +1573,8 @@ long do_fork(unsigned long clone_flags,
65920 if (clone_flags & CLONE_PARENT_SETTID)
65921 put_user(nr, parent_tidptr);
65922
65923 + gr_handle_brute_check();
65924 +
65925 if (clone_flags & CLONE_VFORK) {
65926 p->vfork_done = &vfork;
65927 init_completion(&vfork);
65928 @@ -1637,7 +1684,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
65929 return 0;
65930
65931 /* don't need lock here; in the worst case we'll do useless copy */
65932 - if (fs->users == 1)
65933 + if (atomic_read(&fs->users) == 1)
65934 return 0;
65935
65936 *new_fsp = copy_fs_struct(fs);
65937 @@ -1726,7 +1773,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
65938 fs = current->fs;
65939 spin_lock(&fs->lock);
65940 current->fs = new_fs;
65941 - if (--fs->users)
65942 + gr_set_chroot_entries(current, &current->fs->root);
65943 + if (atomic_dec_return(&fs->users))
65944 new_fs = NULL;
65945 else
65946 new_fs = fs;
65947 diff --git a/kernel/futex.c b/kernel/futex.c
65948 index 11cbe05..9ff191b 100644
65949 --- a/kernel/futex.c
65950 +++ b/kernel/futex.c
65951 @@ -54,6 +54,7 @@
65952 #include <linux/mount.h>
65953 #include <linux/pagemap.h>
65954 #include <linux/syscalls.h>
65955 +#include <linux/ptrace.h>
65956 #include <linux/signal.h>
65957 #include <linux/module.h>
65958 #include <linux/magic.h>
65959 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
65960 struct page *page, *page_head;
65961 int err, ro = 0;
65962
65963 +#ifdef CONFIG_PAX_SEGMEXEC
65964 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
65965 + return -EFAULT;
65966 +#endif
65967 +
65968 /*
65969 * The futex address must be "naturally" aligned.
65970 */
65971 @@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
65972 struct futex_q q = futex_q_init;
65973 int ret;
65974
65975 + pax_track_stack();
65976 +
65977 if (!bitset)
65978 return -EINVAL;
65979 q.bitset = bitset;
65980 @@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
65981 struct futex_q q = futex_q_init;
65982 int res, ret;
65983
65984 + pax_track_stack();
65985 +
65986 if (!bitset)
65987 return -EINVAL;
65988
65989 @@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
65990 {
65991 struct robust_list_head __user *head;
65992 unsigned long ret;
65993 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
65994 const struct cred *cred = current_cred(), *pcred;
65995 +#endif
65996
65997 if (!futex_cmpxchg_enabled)
65998 return -ENOSYS;
65999 @@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
66000 if (!p)
66001 goto err_unlock;
66002 ret = -EPERM;
66003 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66004 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
66005 + goto err_unlock;
66006 +#else
66007 pcred = __task_cred(p);
66008 /* If victim is in different user_ns, then uids are not
66009 comparable, so we must have CAP_SYS_PTRACE */
66010 @@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
66011 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
66012 goto err_unlock;
66013 ok:
66014 +#endif
66015 head = p->robust_list;
66016 rcu_read_unlock();
66017 }
66018 @@ -2712,6 +2729,7 @@ static int __init futex_init(void)
66019 {
66020 u32 curval;
66021 int i;
66022 + mm_segment_t oldfs;
66023
66024 /*
66025 * This will fail and we want it. Some arch implementations do
66026 @@ -2723,8 +2741,11 @@ static int __init futex_init(void)
66027 * implementation, the non-functional ones will return
66028 * -ENOSYS.
66029 */
66030 + oldfs = get_fs();
66031 + set_fs(USER_DS);
66032 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
66033 futex_cmpxchg_enabled = 1;
66034 + set_fs(oldfs);
66035
66036 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
66037 plist_head_init(&futex_queues[i].chain);
66038 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
66039 index 5f9e689..03afa21 100644
66040 --- a/kernel/futex_compat.c
66041 +++ b/kernel/futex_compat.c
66042 @@ -10,6 +10,7 @@
66043 #include <linux/compat.h>
66044 #include <linux/nsproxy.h>
66045 #include <linux/futex.h>
66046 +#include <linux/ptrace.h>
66047
66048 #include <asm/uaccess.h>
66049
66050 @@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
66051 {
66052 struct compat_robust_list_head __user *head;
66053 unsigned long ret;
66054 - const struct cred *cred = current_cred(), *pcred;
66055 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
66056 + const struct cred *cred = current_cred();
66057 + const struct cred *pcred;
66058 +#endif
66059
66060 if (!futex_cmpxchg_enabled)
66061 return -ENOSYS;
66062 @@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
66063 if (!p)
66064 goto err_unlock;
66065 ret = -EPERM;
66066 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66067 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
66068 + goto err_unlock;
66069 +#else
66070 pcred = __task_cred(p);
66071 /* If victim is in different user_ns, then uids are not
66072 comparable, so we must have CAP_SYS_PTRACE */
66073 @@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
66074 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
66075 goto err_unlock;
66076 ok:
66077 +#endif
66078 head = p->compat_robust_list;
66079 rcu_read_unlock();
66080 }
66081 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
66082 index 9b22d03..6295b62 100644
66083 --- a/kernel/gcov/base.c
66084 +++ b/kernel/gcov/base.c
66085 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
66086 }
66087
66088 #ifdef CONFIG_MODULES
66089 -static inline int within(void *addr, void *start, unsigned long size)
66090 -{
66091 - return ((addr >= start) && (addr < start + size));
66092 -}
66093 -
66094 /* Update list and generate events when modules are unloaded. */
66095 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66096 void *data)
66097 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66098 prev = NULL;
66099 /* Remove entries located in module from linked list. */
66100 for (info = gcov_info_head; info; info = info->next) {
66101 - if (within(info, mod->module_core, mod->core_size)) {
66102 + if (within_module_core_rw((unsigned long)info, mod)) {
66103 if (prev)
66104 prev->next = info->next;
66105 else
66106 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
66107 index a9205e3..1c6f5c0 100644
66108 --- a/kernel/hrtimer.c
66109 +++ b/kernel/hrtimer.c
66110 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
66111 local_irq_restore(flags);
66112 }
66113
66114 -static void run_hrtimer_softirq(struct softirq_action *h)
66115 +static void run_hrtimer_softirq(void)
66116 {
66117 hrtimer_peek_ahead_timers();
66118 }
66119 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
66120 index a8ce450..5519bce 100644
66121 --- a/kernel/jump_label.c
66122 +++ b/kernel/jump_label.c
66123 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
66124
66125 size = (((unsigned long)stop - (unsigned long)start)
66126 / sizeof(struct jump_entry));
66127 + pax_open_kernel();
66128 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
66129 + pax_close_kernel();
66130 }
66131
66132 static void jump_label_update(struct jump_label_key *key, int enable);
66133 @@ -297,10 +299,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
66134 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
66135 struct jump_entry *iter;
66136
66137 + pax_open_kernel();
66138 for (iter = iter_start; iter < iter_stop; iter++) {
66139 if (within_module_init(iter->code, mod))
66140 iter->code = 0;
66141 }
66142 + pax_close_kernel();
66143 }
66144
66145 static int
66146 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
66147 index 079f1d3..a407562 100644
66148 --- a/kernel/kallsyms.c
66149 +++ b/kernel/kallsyms.c
66150 @@ -11,6 +11,9 @@
66151 * Changed the compression method from stem compression to "table lookup"
66152 * compression (see scripts/kallsyms.c for a more complete description)
66153 */
66154 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66155 +#define __INCLUDED_BY_HIDESYM 1
66156 +#endif
66157 #include <linux/kallsyms.h>
66158 #include <linux/module.h>
66159 #include <linux/init.h>
66160 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
66161
66162 static inline int is_kernel_inittext(unsigned long addr)
66163 {
66164 + if (system_state != SYSTEM_BOOTING)
66165 + return 0;
66166 +
66167 if (addr >= (unsigned long)_sinittext
66168 && addr <= (unsigned long)_einittext)
66169 return 1;
66170 return 0;
66171 }
66172
66173 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66174 +#ifdef CONFIG_MODULES
66175 +static inline int is_module_text(unsigned long addr)
66176 +{
66177 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
66178 + return 1;
66179 +
66180 + addr = ktla_ktva(addr);
66181 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
66182 +}
66183 +#else
66184 +static inline int is_module_text(unsigned long addr)
66185 +{
66186 + return 0;
66187 +}
66188 +#endif
66189 +#endif
66190 +
66191 static inline int is_kernel_text(unsigned long addr)
66192 {
66193 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
66194 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
66195
66196 static inline int is_kernel(unsigned long addr)
66197 {
66198 +
66199 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66200 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
66201 + return 1;
66202 +
66203 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
66204 +#else
66205 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
66206 +#endif
66207 +
66208 return 1;
66209 return in_gate_area_no_mm(addr);
66210 }
66211
66212 static int is_ksym_addr(unsigned long addr)
66213 {
66214 +
66215 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66216 + if (is_module_text(addr))
66217 + return 0;
66218 +#endif
66219 +
66220 if (all_var)
66221 return is_kernel(addr);
66222
66223 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
66224
66225 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
66226 {
66227 - iter->name[0] = '\0';
66228 iter->nameoff = get_symbol_offset(new_pos);
66229 iter->pos = new_pos;
66230 }
66231 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
66232 {
66233 struct kallsym_iter *iter = m->private;
66234
66235 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66236 + if (current_uid())
66237 + return 0;
66238 +#endif
66239 +
66240 /* Some debugging symbols have no name. Ignore them. */
66241 if (!iter->name[0])
66242 return 0;
66243 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
66244 struct kallsym_iter *iter;
66245 int ret;
66246
66247 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
66248 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
66249 if (!iter)
66250 return -ENOMEM;
66251 reset_iter(iter, 0);
66252 diff --git a/kernel/kexec.c b/kernel/kexec.c
66253 index 296fbc8..84cb857 100644
66254 --- a/kernel/kexec.c
66255 +++ b/kernel/kexec.c
66256 @@ -1033,7 +1033,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
66257 unsigned long flags)
66258 {
66259 struct compat_kexec_segment in;
66260 - struct kexec_segment out, __user *ksegments;
66261 + struct kexec_segment out;
66262 + struct kexec_segment __user *ksegments;
66263 unsigned long i, result;
66264
66265 /* Don't allow clients that don't understand the native
66266 diff --git a/kernel/kmod.c b/kernel/kmod.c
66267 index a4bea97..7a1ae9a 100644
66268 --- a/kernel/kmod.c
66269 +++ b/kernel/kmod.c
66270 @@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
66271 * If module auto-loading support is disabled then this function
66272 * becomes a no-operation.
66273 */
66274 -int __request_module(bool wait, const char *fmt, ...)
66275 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
66276 {
66277 - va_list args;
66278 char module_name[MODULE_NAME_LEN];
66279 unsigned int max_modprobes;
66280 int ret;
66281 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
66282 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
66283 static char *envp[] = { "HOME=/",
66284 "TERM=linux",
66285 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
66286 @@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...)
66287 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
66288 static int kmod_loop_msg;
66289
66290 - va_start(args, fmt);
66291 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
66292 - va_end(args);
66293 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
66294 if (ret >= MODULE_NAME_LEN)
66295 return -ENAMETOOLONG;
66296
66297 @@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...)
66298 if (ret)
66299 return ret;
66300
66301 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66302 + if (!current_uid()) {
66303 + /* hack to workaround consolekit/udisks stupidity */
66304 + read_lock(&tasklist_lock);
66305 + if (!strcmp(current->comm, "mount") &&
66306 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
66307 + read_unlock(&tasklist_lock);
66308 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
66309 + return -EPERM;
66310 + }
66311 + read_unlock(&tasklist_lock);
66312 + }
66313 +#endif
66314 +
66315 /* If modprobe needs a service that is in a module, we get a recursive
66316 * loop. Limit the number of running kmod threads to max_threads/2 or
66317 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
66318 @@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...)
66319 atomic_dec(&kmod_concurrent);
66320 return ret;
66321 }
66322 +
66323 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
66324 +{
66325 + va_list args;
66326 + int ret;
66327 +
66328 + va_start(args, fmt);
66329 + ret = ____request_module(wait, module_param, fmt, args);
66330 + va_end(args);
66331 +
66332 + return ret;
66333 +}
66334 +
66335 +int __request_module(bool wait, const char *fmt, ...)
66336 +{
66337 + va_list args;
66338 + int ret;
66339 +
66340 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66341 + if (current_uid()) {
66342 + char module_param[MODULE_NAME_LEN];
66343 +
66344 + memset(module_param, 0, sizeof(module_param));
66345 +
66346 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
66347 +
66348 + va_start(args, fmt);
66349 + ret = ____request_module(wait, module_param, fmt, args);
66350 + va_end(args);
66351 +
66352 + return ret;
66353 + }
66354 +#endif
66355 +
66356 + va_start(args, fmt);
66357 + ret = ____request_module(wait, NULL, fmt, args);
66358 + va_end(args);
66359 +
66360 + return ret;
66361 +}
66362 +
66363 EXPORT_SYMBOL(__request_module);
66364 #endif /* CONFIG_MODULES */
66365
66366 @@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
66367 *
66368 * Thus the __user pointer cast is valid here.
66369 */
66370 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
66371 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
66372
66373 /*
66374 * If ret is 0, either ____call_usermodehelper failed and the
66375 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
66376 index b30fd54..11821ec 100644
66377 --- a/kernel/kprobes.c
66378 +++ b/kernel/kprobes.c
66379 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
66380 * kernel image and loaded module images reside. This is required
66381 * so x86_64 can correctly handle the %rip-relative fixups.
66382 */
66383 - kip->insns = module_alloc(PAGE_SIZE);
66384 + kip->insns = module_alloc_exec(PAGE_SIZE);
66385 if (!kip->insns) {
66386 kfree(kip);
66387 return NULL;
66388 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
66389 */
66390 if (!list_is_singular(&kip->list)) {
66391 list_del(&kip->list);
66392 - module_free(NULL, kip->insns);
66393 + module_free_exec(NULL, kip->insns);
66394 kfree(kip);
66395 }
66396 return 1;
66397 @@ -1949,7 +1949,7 @@ static int __init init_kprobes(void)
66398 {
66399 int i, err = 0;
66400 unsigned long offset = 0, size = 0;
66401 - char *modname, namebuf[128];
66402 + char *modname, namebuf[KSYM_NAME_LEN];
66403 const char *symbol_name;
66404 void *addr;
66405 struct kprobe_blackpoint *kb;
66406 @@ -2075,7 +2075,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
66407 const char *sym = NULL;
66408 unsigned int i = *(loff_t *) v;
66409 unsigned long offset = 0;
66410 - char *modname, namebuf[128];
66411 + char *modname, namebuf[KSYM_NAME_LEN];
66412
66413 head = &kprobe_table[i];
66414 preempt_disable();
66415 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
66416 index 91d67ce..ac259df 100644
66417 --- a/kernel/lockdep.c
66418 +++ b/kernel/lockdep.c
66419 @@ -583,6 +583,10 @@ static int static_obj(void *obj)
66420 end = (unsigned long) &_end,
66421 addr = (unsigned long) obj;
66422
66423 +#ifdef CONFIG_PAX_KERNEXEC
66424 + start = ktla_ktva(start);
66425 +#endif
66426 +
66427 /*
66428 * static variable?
66429 */
66430 @@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
66431 if (!static_obj(lock->key)) {
66432 debug_locks_off();
66433 printk("INFO: trying to register non-static key.\n");
66434 + printk("lock:%pS key:%pS.\n", lock, lock->key);
66435 printk("the code is fine but needs lockdep annotation.\n");
66436 printk("turning off the locking correctness validator.\n");
66437 dump_stack();
66438 @@ -2948,7 +2953,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
66439 if (!class)
66440 return 0;
66441 }
66442 - atomic_inc((atomic_t *)&class->ops);
66443 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
66444 if (very_verbose(class)) {
66445 printk("\nacquire class [%p] %s", class->key, class->name);
66446 if (class->name_version > 1)
66447 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
66448 index 71edd2f..e0542a5 100644
66449 --- a/kernel/lockdep_proc.c
66450 +++ b/kernel/lockdep_proc.c
66451 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
66452
66453 static void print_name(struct seq_file *m, struct lock_class *class)
66454 {
66455 - char str[128];
66456 + char str[KSYM_NAME_LEN];
66457 const char *name = class->name;
66458
66459 if (!name) {
66460 diff --git a/kernel/module.c b/kernel/module.c
66461 index 04379f92..fba2faf 100644
66462 --- a/kernel/module.c
66463 +++ b/kernel/module.c
66464 @@ -58,6 +58,7 @@
66465 #include <linux/jump_label.h>
66466 #include <linux/pfn.h>
66467 #include <linux/bsearch.h>
66468 +#include <linux/grsecurity.h>
66469
66470 #define CREATE_TRACE_POINTS
66471 #include <trace/events/module.h>
66472 @@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
66473
66474 /* Bounds of module allocation, for speeding __module_address.
66475 * Protected by module_mutex. */
66476 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
66477 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
66478 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
66479
66480 int register_module_notifier(struct notifier_block * nb)
66481 {
66482 @@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66483 return true;
66484
66485 list_for_each_entry_rcu(mod, &modules, list) {
66486 - struct symsearch arr[] = {
66487 + struct symsearch modarr[] = {
66488 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
66489 NOT_GPL_ONLY, false },
66490 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
66491 @@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66492 #endif
66493 };
66494
66495 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
66496 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
66497 return true;
66498 }
66499 return false;
66500 @@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
66501 static int percpu_modalloc(struct module *mod,
66502 unsigned long size, unsigned long align)
66503 {
66504 - if (align > PAGE_SIZE) {
66505 + if (align-1 >= PAGE_SIZE) {
66506 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
66507 mod->name, align, PAGE_SIZE);
66508 align = PAGE_SIZE;
66509 @@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
66510 */
66511 #ifdef CONFIG_SYSFS
66512
66513 -#ifdef CONFIG_KALLSYMS
66514 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66515 static inline bool sect_empty(const Elf_Shdr *sect)
66516 {
66517 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
66518 @@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base,
66519
66520 static void unset_module_core_ro_nx(struct module *mod)
66521 {
66522 - set_page_attributes(mod->module_core + mod->core_text_size,
66523 - mod->module_core + mod->core_size,
66524 + set_page_attributes(mod->module_core_rw,
66525 + mod->module_core_rw + mod->core_size_rw,
66526 set_memory_x);
66527 - set_page_attributes(mod->module_core,
66528 - mod->module_core + mod->core_ro_size,
66529 + set_page_attributes(mod->module_core_rx,
66530 + mod->module_core_rx + mod->core_size_rx,
66531 set_memory_rw);
66532 }
66533
66534 static void unset_module_init_ro_nx(struct module *mod)
66535 {
66536 - set_page_attributes(mod->module_init + mod->init_text_size,
66537 - mod->module_init + mod->init_size,
66538 + set_page_attributes(mod->module_init_rw,
66539 + mod->module_init_rw + mod->init_size_rw,
66540 set_memory_x);
66541 - set_page_attributes(mod->module_init,
66542 - mod->module_init + mod->init_ro_size,
66543 + set_page_attributes(mod->module_init_rx,
66544 + mod->module_init_rx + mod->init_size_rx,
66545 set_memory_rw);
66546 }
66547
66548 @@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
66549
66550 mutex_lock(&module_mutex);
66551 list_for_each_entry_rcu(mod, &modules, list) {
66552 - if ((mod->module_core) && (mod->core_text_size)) {
66553 - set_page_attributes(mod->module_core,
66554 - mod->module_core + mod->core_text_size,
66555 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66556 + set_page_attributes(mod->module_core_rx,
66557 + mod->module_core_rx + mod->core_size_rx,
66558 set_memory_rw);
66559 }
66560 - if ((mod->module_init) && (mod->init_text_size)) {
66561 - set_page_attributes(mod->module_init,
66562 - mod->module_init + mod->init_text_size,
66563 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66564 + set_page_attributes(mod->module_init_rx,
66565 + mod->module_init_rx + mod->init_size_rx,
66566 set_memory_rw);
66567 }
66568 }
66569 @@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
66570
66571 mutex_lock(&module_mutex);
66572 list_for_each_entry_rcu(mod, &modules, list) {
66573 - if ((mod->module_core) && (mod->core_text_size)) {
66574 - set_page_attributes(mod->module_core,
66575 - mod->module_core + mod->core_text_size,
66576 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66577 + set_page_attributes(mod->module_core_rx,
66578 + mod->module_core_rx + mod->core_size_rx,
66579 set_memory_ro);
66580 }
66581 - if ((mod->module_init) && (mod->init_text_size)) {
66582 - set_page_attributes(mod->module_init,
66583 - mod->module_init + mod->init_text_size,
66584 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66585 + set_page_attributes(mod->module_init_rx,
66586 + mod->module_init_rx + mod->init_size_rx,
66587 set_memory_ro);
66588 }
66589 }
66590 @@ -1748,16 +1750,19 @@ static void free_module(struct module *mod)
66591
66592 /* This may be NULL, but that's OK */
66593 unset_module_init_ro_nx(mod);
66594 - module_free(mod, mod->module_init);
66595 + module_free(mod, mod->module_init_rw);
66596 + module_free_exec(mod, mod->module_init_rx);
66597 kfree(mod->args);
66598 percpu_modfree(mod);
66599
66600 /* Free lock-classes: */
66601 - lockdep_free_key_range(mod->module_core, mod->core_size);
66602 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
66603 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
66604
66605 /* Finally, free the core (containing the module structure) */
66606 unset_module_core_ro_nx(mod);
66607 - module_free(mod, mod->module_core);
66608 + module_free_exec(mod, mod->module_core_rx);
66609 + module_free(mod, mod->module_core_rw);
66610
66611 #ifdef CONFIG_MPU
66612 update_protections(current->mm);
66613 @@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66614 unsigned int i;
66615 int ret = 0;
66616 const struct kernel_symbol *ksym;
66617 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66618 + int is_fs_load = 0;
66619 + int register_filesystem_found = 0;
66620 + char *p;
66621 +
66622 + p = strstr(mod->args, "grsec_modharden_fs");
66623 + if (p) {
66624 + char *endptr = p + strlen("grsec_modharden_fs");
66625 + /* copy \0 as well */
66626 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
66627 + is_fs_load = 1;
66628 + }
66629 +#endif
66630
66631 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
66632 const char *name = info->strtab + sym[i].st_name;
66633
66634 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66635 + /* it's a real shame this will never get ripped and copied
66636 + upstream! ;(
66637 + */
66638 + if (is_fs_load && !strcmp(name, "register_filesystem"))
66639 + register_filesystem_found = 1;
66640 +#endif
66641 +
66642 switch (sym[i].st_shndx) {
66643 case SHN_COMMON:
66644 /* We compiled with -fno-common. These are not
66645 @@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66646 ksym = resolve_symbol_wait(mod, info, name);
66647 /* Ok if resolved. */
66648 if (ksym && !IS_ERR(ksym)) {
66649 + pax_open_kernel();
66650 sym[i].st_value = ksym->value;
66651 + pax_close_kernel();
66652 break;
66653 }
66654
66655 @@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66656 secbase = (unsigned long)mod_percpu(mod);
66657 else
66658 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
66659 + pax_open_kernel();
66660 sym[i].st_value += secbase;
66661 + pax_close_kernel();
66662 break;
66663 }
66664 }
66665
66666 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66667 + if (is_fs_load && !register_filesystem_found) {
66668 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
66669 + ret = -EPERM;
66670 + }
66671 +#endif
66672 +
66673 return ret;
66674 }
66675
66676 @@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
66677 || s->sh_entsize != ~0UL
66678 || strstarts(sname, ".init"))
66679 continue;
66680 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
66681 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66682 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
66683 + else
66684 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
66685 DEBUGP("\t%s\n", name);
66686 }
66687 - switch (m) {
66688 - case 0: /* executable */
66689 - mod->core_size = debug_align(mod->core_size);
66690 - mod->core_text_size = mod->core_size;
66691 - break;
66692 - case 1: /* RO: text and ro-data */
66693 - mod->core_size = debug_align(mod->core_size);
66694 - mod->core_ro_size = mod->core_size;
66695 - break;
66696 - case 3: /* whole core */
66697 - mod->core_size = debug_align(mod->core_size);
66698 - break;
66699 - }
66700 }
66701
66702 DEBUGP("Init section allocation order:\n");
66703 @@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
66704 || s->sh_entsize != ~0UL
66705 || !strstarts(sname, ".init"))
66706 continue;
66707 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
66708 - | INIT_OFFSET_MASK);
66709 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66710 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
66711 + else
66712 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
66713 + s->sh_entsize |= INIT_OFFSET_MASK;
66714 DEBUGP("\t%s\n", sname);
66715 }
66716 - switch (m) {
66717 - case 0: /* executable */
66718 - mod->init_size = debug_align(mod->init_size);
66719 - mod->init_text_size = mod->init_size;
66720 - break;
66721 - case 1: /* RO: text and ro-data */
66722 - mod->init_size = debug_align(mod->init_size);
66723 - mod->init_ro_size = mod->init_size;
66724 - break;
66725 - case 3: /* whole init */
66726 - mod->init_size = debug_align(mod->init_size);
66727 - break;
66728 - }
66729 }
66730 }
66731
66732 @@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66733
66734 /* Put symbol section at end of init part of module. */
66735 symsect->sh_flags |= SHF_ALLOC;
66736 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
66737 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
66738 info->index.sym) | INIT_OFFSET_MASK;
66739 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
66740
66741 @@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66742 }
66743
66744 /* Append room for core symbols at end of core part. */
66745 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
66746 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
66747 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
66748 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
66749
66750 /* Put string table section at end of init part of module. */
66751 strsect->sh_flags |= SHF_ALLOC;
66752 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
66753 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
66754 info->index.str) | INIT_OFFSET_MASK;
66755 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
66756
66757 /* Append room for core symbols' strings at end of core part. */
66758 - info->stroffs = mod->core_size;
66759 + info->stroffs = mod->core_size_rx;
66760 __set_bit(0, info->strmap);
66761 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
66762 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
66763 }
66764
66765 static void add_kallsyms(struct module *mod, const struct load_info *info)
66766 @@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66767 /* Make sure we get permanent strtab: don't use info->strtab. */
66768 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
66769
66770 + pax_open_kernel();
66771 +
66772 /* Set types up while we still have access to sections. */
66773 for (i = 0; i < mod->num_symtab; i++)
66774 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
66775
66776 - mod->core_symtab = dst = mod->module_core + info->symoffs;
66777 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
66778 src = mod->symtab;
66779 *dst = *src;
66780 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
66781 @@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66782 }
66783 mod->core_num_syms = ndst;
66784
66785 - mod->core_strtab = s = mod->module_core + info->stroffs;
66786 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
66787 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
66788 if (test_bit(i, info->strmap))
66789 *++s = mod->strtab[i];
66790 +
66791 + pax_close_kernel();
66792 }
66793 #else
66794 static inline void layout_symtab(struct module *mod, struct load_info *info)
66795 @@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size)
66796 return size == 0 ? NULL : vmalloc_exec(size);
66797 }
66798
66799 -static void *module_alloc_update_bounds(unsigned long size)
66800 +static void *module_alloc_update_bounds_rw(unsigned long size)
66801 {
66802 void *ret = module_alloc(size);
66803
66804 if (ret) {
66805 mutex_lock(&module_mutex);
66806 /* Update module bounds. */
66807 - if ((unsigned long)ret < module_addr_min)
66808 - module_addr_min = (unsigned long)ret;
66809 - if ((unsigned long)ret + size > module_addr_max)
66810 - module_addr_max = (unsigned long)ret + size;
66811 + if ((unsigned long)ret < module_addr_min_rw)
66812 + module_addr_min_rw = (unsigned long)ret;
66813 + if ((unsigned long)ret + size > module_addr_max_rw)
66814 + module_addr_max_rw = (unsigned long)ret + size;
66815 + mutex_unlock(&module_mutex);
66816 + }
66817 + return ret;
66818 +}
66819 +
66820 +static void *module_alloc_update_bounds_rx(unsigned long size)
66821 +{
66822 + void *ret = module_alloc_exec(size);
66823 +
66824 + if (ret) {
66825 + mutex_lock(&module_mutex);
66826 + /* Update module bounds. */
66827 + if ((unsigned long)ret < module_addr_min_rx)
66828 + module_addr_min_rx = (unsigned long)ret;
66829 + if ((unsigned long)ret + size > module_addr_max_rx)
66830 + module_addr_max_rx = (unsigned long)ret + size;
66831 mutex_unlock(&module_mutex);
66832 }
66833 return ret;
66834 @@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info)
66835 static int check_modinfo(struct module *mod, struct load_info *info)
66836 {
66837 const char *modmagic = get_modinfo(info, "vermagic");
66838 + const char *license = get_modinfo(info, "license");
66839 int err;
66840
66841 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
66842 + if (!license || !license_is_gpl_compatible(license))
66843 + return -ENOEXEC;
66844 +#endif
66845 +
66846 /* This is allowed: modprobe --force will invalidate it. */
66847 if (!modmagic) {
66848 err = try_to_force_load(mod, "bad vermagic");
66849 @@ -2495,7 +2538,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
66850 }
66851
66852 /* Set up license info based on the info section */
66853 - set_license(mod, get_modinfo(info, "license"));
66854 + set_license(mod, license);
66855
66856 return 0;
66857 }
66858 @@ -2589,7 +2632,7 @@ static int move_module(struct module *mod, struct load_info *info)
66859 void *ptr;
66860
66861 /* Do the allocs. */
66862 - ptr = module_alloc_update_bounds(mod->core_size);
66863 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
66864 /*
66865 * The pointer to this block is stored in the module structure
66866 * which is inside the block. Just mark it as not being a
66867 @@ -2599,23 +2642,50 @@ static int move_module(struct module *mod, struct load_info *info)
66868 if (!ptr)
66869 return -ENOMEM;
66870
66871 - memset(ptr, 0, mod->core_size);
66872 - mod->module_core = ptr;
66873 + memset(ptr, 0, mod->core_size_rw);
66874 + mod->module_core_rw = ptr;
66875
66876 - ptr = module_alloc_update_bounds(mod->init_size);
66877 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
66878 /*
66879 * The pointer to this block is stored in the module structure
66880 * which is inside the block. This block doesn't need to be
66881 * scanned as it contains data and code that will be freed
66882 * after the module is initialized.
66883 */
66884 - kmemleak_ignore(ptr);
66885 - if (!ptr && mod->init_size) {
66886 - module_free(mod, mod->module_core);
66887 + kmemleak_not_leak(ptr);
66888 + if (!ptr && mod->init_size_rw) {
66889 + module_free(mod, mod->module_core_rw);
66890 + return -ENOMEM;
66891 + }
66892 + memset(ptr, 0, mod->init_size_rw);
66893 + mod->module_init_rw = ptr;
66894 +
66895 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
66896 + kmemleak_not_leak(ptr);
66897 + if (!ptr) {
66898 + module_free(mod, mod->module_init_rw);
66899 + module_free(mod, mod->module_core_rw);
66900 return -ENOMEM;
66901 }
66902 - memset(ptr, 0, mod->init_size);
66903 - mod->module_init = ptr;
66904 +
66905 + pax_open_kernel();
66906 + memset(ptr, 0, mod->core_size_rx);
66907 + pax_close_kernel();
66908 + mod->module_core_rx = ptr;
66909 +
66910 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
66911 + kmemleak_not_leak(ptr);
66912 + if (!ptr && mod->init_size_rx) {
66913 + module_free_exec(mod, mod->module_core_rx);
66914 + module_free(mod, mod->module_init_rw);
66915 + module_free(mod, mod->module_core_rw);
66916 + return -ENOMEM;
66917 + }
66918 +
66919 + pax_open_kernel();
66920 + memset(ptr, 0, mod->init_size_rx);
66921 + pax_close_kernel();
66922 + mod->module_init_rx = ptr;
66923
66924 /* Transfer each section which specifies SHF_ALLOC */
66925 DEBUGP("final section addresses:\n");
66926 @@ -2626,16 +2696,45 @@ static int move_module(struct module *mod, struct load_info *info)
66927 if (!(shdr->sh_flags & SHF_ALLOC))
66928 continue;
66929
66930 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
66931 - dest = mod->module_init
66932 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66933 - else
66934 - dest = mod->module_core + shdr->sh_entsize;
66935 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
66936 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66937 + dest = mod->module_init_rw
66938 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66939 + else
66940 + dest = mod->module_init_rx
66941 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66942 + } else {
66943 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66944 + dest = mod->module_core_rw + shdr->sh_entsize;
66945 + else
66946 + dest = mod->module_core_rx + shdr->sh_entsize;
66947 + }
66948 +
66949 + if (shdr->sh_type != SHT_NOBITS) {
66950 +
66951 +#ifdef CONFIG_PAX_KERNEXEC
66952 +#ifdef CONFIG_X86_64
66953 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
66954 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
66955 +#endif
66956 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
66957 + pax_open_kernel();
66958 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66959 + pax_close_kernel();
66960 + } else
66961 +#endif
66962
66963 - if (shdr->sh_type != SHT_NOBITS)
66964 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66965 + }
66966 /* Update sh_addr to point to copy in image. */
66967 - shdr->sh_addr = (unsigned long)dest;
66968 +
66969 +#ifdef CONFIG_PAX_KERNEXEC
66970 + if (shdr->sh_flags & SHF_EXECINSTR)
66971 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
66972 + else
66973 +#endif
66974 +
66975 + shdr->sh_addr = (unsigned long)dest;
66976 DEBUGP("\t0x%lx %s\n",
66977 shdr->sh_addr, info->secstrings + shdr->sh_name);
66978 }
66979 @@ -2686,12 +2785,12 @@ static void flush_module_icache(const struct module *mod)
66980 * Do it before processing of module parameters, so the module
66981 * can provide parameter accessor functions of its own.
66982 */
66983 - if (mod->module_init)
66984 - flush_icache_range((unsigned long)mod->module_init,
66985 - (unsigned long)mod->module_init
66986 - + mod->init_size);
66987 - flush_icache_range((unsigned long)mod->module_core,
66988 - (unsigned long)mod->module_core + mod->core_size);
66989 + if (mod->module_init_rx)
66990 + flush_icache_range((unsigned long)mod->module_init_rx,
66991 + (unsigned long)mod->module_init_rx
66992 + + mod->init_size_rx);
66993 + flush_icache_range((unsigned long)mod->module_core_rx,
66994 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
66995
66996 set_fs(old_fs);
66997 }
66998 @@ -2771,8 +2870,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
66999 {
67000 kfree(info->strmap);
67001 percpu_modfree(mod);
67002 - module_free(mod, mod->module_init);
67003 - module_free(mod, mod->module_core);
67004 + module_free_exec(mod, mod->module_init_rx);
67005 + module_free_exec(mod, mod->module_core_rx);
67006 + module_free(mod, mod->module_init_rw);
67007 + module_free(mod, mod->module_core_rw);
67008 }
67009
67010 int __weak module_finalize(const Elf_Ehdr *hdr,
67011 @@ -2836,9 +2937,38 @@ static struct module *load_module(void __user *umod,
67012 if (err)
67013 goto free_unload;
67014
67015 + /* Now copy in args */
67016 + mod->args = strndup_user(uargs, ~0UL >> 1);
67017 + if (IS_ERR(mod->args)) {
67018 + err = PTR_ERR(mod->args);
67019 + goto free_unload;
67020 + }
67021 +
67022 /* Set up MODINFO_ATTR fields */
67023 setup_modinfo(mod, &info);
67024
67025 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67026 + {
67027 + char *p, *p2;
67028 +
67029 + if (strstr(mod->args, "grsec_modharden_netdev")) {
67030 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
67031 + err = -EPERM;
67032 + goto free_modinfo;
67033 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
67034 + p += strlen("grsec_modharden_normal");
67035 + p2 = strstr(p, "_");
67036 + if (p2) {
67037 + *p2 = '\0';
67038 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
67039 + *p2 = '_';
67040 + }
67041 + err = -EPERM;
67042 + goto free_modinfo;
67043 + }
67044 + }
67045 +#endif
67046 +
67047 /* Fix up syms, so that st_value is a pointer to location. */
67048 err = simplify_symbols(mod, &info);
67049 if (err < 0)
67050 @@ -2854,13 +2984,6 @@ static struct module *load_module(void __user *umod,
67051
67052 flush_module_icache(mod);
67053
67054 - /* Now copy in args */
67055 - mod->args = strndup_user(uargs, ~0UL >> 1);
67056 - if (IS_ERR(mod->args)) {
67057 - err = PTR_ERR(mod->args);
67058 - goto free_arch_cleanup;
67059 - }
67060 -
67061 /* Mark state as coming so strong_try_module_get() ignores us. */
67062 mod->state = MODULE_STATE_COMING;
67063
67064 @@ -2920,11 +3043,10 @@ static struct module *load_module(void __user *umod,
67065 unlock:
67066 mutex_unlock(&module_mutex);
67067 synchronize_sched();
67068 - kfree(mod->args);
67069 - free_arch_cleanup:
67070 module_arch_cleanup(mod);
67071 free_modinfo:
67072 free_modinfo(mod);
67073 + kfree(mod->args);
67074 free_unload:
67075 module_unload_free(mod);
67076 free_module:
67077 @@ -2965,16 +3087,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
67078 MODULE_STATE_COMING, mod);
67079
67080 /* Set RO and NX regions for core */
67081 - set_section_ro_nx(mod->module_core,
67082 - mod->core_text_size,
67083 - mod->core_ro_size,
67084 - mod->core_size);
67085 + set_section_ro_nx(mod->module_core_rx,
67086 + mod->core_size_rx,
67087 + mod->core_size_rx,
67088 + mod->core_size_rx);
67089
67090 /* Set RO and NX regions for init */
67091 - set_section_ro_nx(mod->module_init,
67092 - mod->init_text_size,
67093 - mod->init_ro_size,
67094 - mod->init_size);
67095 + set_section_ro_nx(mod->module_init_rx,
67096 + mod->init_size_rx,
67097 + mod->init_size_rx,
67098 + mod->init_size_rx);
67099
67100 do_mod_ctors(mod);
67101 /* Start the module */
67102 @@ -3020,11 +3142,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
67103 mod->strtab = mod->core_strtab;
67104 #endif
67105 unset_module_init_ro_nx(mod);
67106 - module_free(mod, mod->module_init);
67107 - mod->module_init = NULL;
67108 - mod->init_size = 0;
67109 - mod->init_ro_size = 0;
67110 - mod->init_text_size = 0;
67111 + module_free(mod, mod->module_init_rw);
67112 + module_free_exec(mod, mod->module_init_rx);
67113 + mod->module_init_rw = NULL;
67114 + mod->module_init_rx = NULL;
67115 + mod->init_size_rw = 0;
67116 + mod->init_size_rx = 0;
67117 mutex_unlock(&module_mutex);
67118
67119 return 0;
67120 @@ -3055,10 +3178,16 @@ static const char *get_ksymbol(struct module *mod,
67121 unsigned long nextval;
67122
67123 /* At worse, next value is at end of module */
67124 - if (within_module_init(addr, mod))
67125 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
67126 + if (within_module_init_rx(addr, mod))
67127 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
67128 + else if (within_module_init_rw(addr, mod))
67129 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
67130 + else if (within_module_core_rx(addr, mod))
67131 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
67132 + else if (within_module_core_rw(addr, mod))
67133 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
67134 else
67135 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
67136 + return NULL;
67137
67138 /* Scan for closest preceding symbol, and next symbol. (ELF
67139 starts real symbols at 1). */
67140 @@ -3304,7 +3433,7 @@ static int m_show(struct seq_file *m, void *p)
67141 char buf[8];
67142
67143 seq_printf(m, "%s %u",
67144 - mod->name, mod->init_size + mod->core_size);
67145 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
67146 print_unload_info(m, mod);
67147
67148 /* Informative for users. */
67149 @@ -3313,7 +3442,7 @@ static int m_show(struct seq_file *m, void *p)
67150 mod->state == MODULE_STATE_COMING ? "Loading":
67151 "Live");
67152 /* Used by oprofile and other similar tools. */
67153 - seq_printf(m, " 0x%pK", mod->module_core);
67154 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
67155
67156 /* Taints info */
67157 if (mod->taints)
67158 @@ -3349,7 +3478,17 @@ static const struct file_operations proc_modules_operations = {
67159
67160 static int __init proc_modules_init(void)
67161 {
67162 +#ifndef CONFIG_GRKERNSEC_HIDESYM
67163 +#ifdef CONFIG_GRKERNSEC_PROC_USER
67164 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67165 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67166 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
67167 +#else
67168 proc_create("modules", 0, NULL, &proc_modules_operations);
67169 +#endif
67170 +#else
67171 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67172 +#endif
67173 return 0;
67174 }
67175 module_init(proc_modules_init);
67176 @@ -3408,12 +3547,12 @@ struct module *__module_address(unsigned long addr)
67177 {
67178 struct module *mod;
67179
67180 - if (addr < module_addr_min || addr > module_addr_max)
67181 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
67182 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
67183 return NULL;
67184
67185 list_for_each_entry_rcu(mod, &modules, list)
67186 - if (within_module_core(addr, mod)
67187 - || within_module_init(addr, mod))
67188 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
67189 return mod;
67190 return NULL;
67191 }
67192 @@ -3447,11 +3586,20 @@ bool is_module_text_address(unsigned long addr)
67193 */
67194 struct module *__module_text_address(unsigned long addr)
67195 {
67196 - struct module *mod = __module_address(addr);
67197 + struct module *mod;
67198 +
67199 +#ifdef CONFIG_X86_32
67200 + addr = ktla_ktva(addr);
67201 +#endif
67202 +
67203 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
67204 + return NULL;
67205 +
67206 + mod = __module_address(addr);
67207 +
67208 if (mod) {
67209 /* Make sure it's within the text section. */
67210 - if (!within(addr, mod->module_init, mod->init_text_size)
67211 - && !within(addr, mod->module_core, mod->core_text_size))
67212 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
67213 mod = NULL;
67214 }
67215 return mod;
67216 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
67217 index 73da83a..fe46e99 100644
67218 --- a/kernel/mutex-debug.c
67219 +++ b/kernel/mutex-debug.c
67220 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
67221 }
67222
67223 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67224 - struct thread_info *ti)
67225 + struct task_struct *task)
67226 {
67227 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
67228
67229 /* Mark the current thread as blocked on the lock: */
67230 - ti->task->blocked_on = waiter;
67231 + task->blocked_on = waiter;
67232 }
67233
67234 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67235 - struct thread_info *ti)
67236 + struct task_struct *task)
67237 {
67238 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
67239 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
67240 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
67241 - ti->task->blocked_on = NULL;
67242 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
67243 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
67244 + task->blocked_on = NULL;
67245
67246 list_del_init(&waiter->list);
67247 waiter->task = NULL;
67248 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
67249 index 0799fd3..d06ae3b 100644
67250 --- a/kernel/mutex-debug.h
67251 +++ b/kernel/mutex-debug.h
67252 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
67253 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
67254 extern void debug_mutex_add_waiter(struct mutex *lock,
67255 struct mutex_waiter *waiter,
67256 - struct thread_info *ti);
67257 + struct task_struct *task);
67258 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67259 - struct thread_info *ti);
67260 + struct task_struct *task);
67261 extern void debug_mutex_unlock(struct mutex *lock);
67262 extern void debug_mutex_init(struct mutex *lock, const char *name,
67263 struct lock_class_key *key);
67264 diff --git a/kernel/mutex.c b/kernel/mutex.c
67265 index d607ed5..58d0a52 100644
67266 --- a/kernel/mutex.c
67267 +++ b/kernel/mutex.c
67268 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67269 spin_lock_mutex(&lock->wait_lock, flags);
67270
67271 debug_mutex_lock_common(lock, &waiter);
67272 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
67273 + debug_mutex_add_waiter(lock, &waiter, task);
67274
67275 /* add waiting tasks to the end of the waitqueue (FIFO): */
67276 list_add_tail(&waiter.list, &lock->wait_list);
67277 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67278 * TASK_UNINTERRUPTIBLE case.)
67279 */
67280 if (unlikely(signal_pending_state(state, task))) {
67281 - mutex_remove_waiter(lock, &waiter,
67282 - task_thread_info(task));
67283 + mutex_remove_waiter(lock, &waiter, task);
67284 mutex_release(&lock->dep_map, 1, ip);
67285 spin_unlock_mutex(&lock->wait_lock, flags);
67286
67287 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67288 done:
67289 lock_acquired(&lock->dep_map, ip);
67290 /* got the lock - rejoice! */
67291 - mutex_remove_waiter(lock, &waiter, current_thread_info());
67292 + mutex_remove_waiter(lock, &waiter, task);
67293 mutex_set_owner(lock);
67294
67295 /* set it to 0 if there are no waiters left: */
67296 diff --git a/kernel/padata.c b/kernel/padata.c
67297 index b91941d..0871d60 100644
67298 --- a/kernel/padata.c
67299 +++ b/kernel/padata.c
67300 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
67301 padata->pd = pd;
67302 padata->cb_cpu = cb_cpu;
67303
67304 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
67305 - atomic_set(&pd->seq_nr, -1);
67306 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
67307 + atomic_set_unchecked(&pd->seq_nr, -1);
67308
67309 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
67310 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
67311
67312 target_cpu = padata_cpu_hash(padata);
67313 queue = per_cpu_ptr(pd->pqueue, target_cpu);
67314 @@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
67315 padata_init_pqueues(pd);
67316 padata_init_squeues(pd);
67317 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
67318 - atomic_set(&pd->seq_nr, -1);
67319 + atomic_set_unchecked(&pd->seq_nr, -1);
67320 atomic_set(&pd->reorder_objects, 0);
67321 atomic_set(&pd->refcnt, 0);
67322 pd->pinst = pinst;
67323 diff --git a/kernel/panic.c b/kernel/panic.c
67324 index d7bb697..9ef9f19 100644
67325 --- a/kernel/panic.c
67326 +++ b/kernel/panic.c
67327 @@ -371,7 +371,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
67328 const char *board;
67329
67330 printk(KERN_WARNING "------------[ cut here ]------------\n");
67331 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
67332 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
67333 board = dmi_get_system_info(DMI_PRODUCT_NAME);
67334 if (board)
67335 printk(KERN_WARNING "Hardware name: %s\n", board);
67336 @@ -426,7 +426,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
67337 */
67338 void __stack_chk_fail(void)
67339 {
67340 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
67341 + dump_stack();
67342 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
67343 __builtin_return_address(0));
67344 }
67345 EXPORT_SYMBOL(__stack_chk_fail);
67346 diff --git a/kernel/pid.c b/kernel/pid.c
67347 index e432057..a2b2ac5 100644
67348 --- a/kernel/pid.c
67349 +++ b/kernel/pid.c
67350 @@ -33,6 +33,7 @@
67351 #include <linux/rculist.h>
67352 #include <linux/bootmem.h>
67353 #include <linux/hash.h>
67354 +#include <linux/security.h>
67355 #include <linux/pid_namespace.h>
67356 #include <linux/init_task.h>
67357 #include <linux/syscalls.h>
67358 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
67359
67360 int pid_max = PID_MAX_DEFAULT;
67361
67362 -#define RESERVED_PIDS 300
67363 +#define RESERVED_PIDS 500
67364
67365 int pid_max_min = RESERVED_PIDS + 1;
67366 int pid_max_max = PID_MAX_LIMIT;
67367 @@ -418,8 +419,15 @@ EXPORT_SYMBOL(pid_task);
67368 */
67369 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
67370 {
67371 + struct task_struct *task;
67372 +
67373 rcu_lockdep_assert(rcu_read_lock_held());
67374 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67375 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67376 +
67377 + if (gr_pid_is_chrooted(task))
67378 + return NULL;
67379 +
67380 + return task;
67381 }
67382
67383 struct task_struct *find_task_by_vpid(pid_t vnr)
67384 @@ -427,6 +435,12 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
67385 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
67386 }
67387
67388 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
67389 +{
67390 + rcu_lockdep_assert(rcu_read_lock_held());
67391 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
67392 +}
67393 +
67394 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
67395 {
67396 struct pid *pid;
67397 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
67398 index 640ded8..3dafb85 100644
67399 --- a/kernel/posix-cpu-timers.c
67400 +++ b/kernel/posix-cpu-timers.c
67401 @@ -6,6 +6,7 @@
67402 #include <linux/posix-timers.h>
67403 #include <linux/errno.h>
67404 #include <linux/math64.h>
67405 +#include <linux/security.h>
67406 #include <asm/uaccess.h>
67407 #include <linux/kernel_stat.h>
67408 #include <trace/events/timer.h>
67409 @@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
67410
67411 static __init int init_posix_cpu_timers(void)
67412 {
67413 - struct k_clock process = {
67414 + static struct k_clock process = {
67415 .clock_getres = process_cpu_clock_getres,
67416 .clock_get = process_cpu_clock_get,
67417 .timer_create = process_cpu_timer_create,
67418 .nsleep = process_cpu_nsleep,
67419 .nsleep_restart = process_cpu_nsleep_restart,
67420 };
67421 - struct k_clock thread = {
67422 + static struct k_clock thread = {
67423 .clock_getres = thread_cpu_clock_getres,
67424 .clock_get = thread_cpu_clock_get,
67425 .timer_create = thread_cpu_timer_create,
67426 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
67427 index 4556182..9335419 100644
67428 --- a/kernel/posix-timers.c
67429 +++ b/kernel/posix-timers.c
67430 @@ -43,6 +43,7 @@
67431 #include <linux/idr.h>
67432 #include <linux/posix-clock.h>
67433 #include <linux/posix-timers.h>
67434 +#include <linux/grsecurity.h>
67435 #include <linux/syscalls.h>
67436 #include <linux/wait.h>
67437 #include <linux/workqueue.h>
67438 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
67439 * which we beg off on and pass to do_sys_settimeofday().
67440 */
67441
67442 -static struct k_clock posix_clocks[MAX_CLOCKS];
67443 +static struct k_clock *posix_clocks[MAX_CLOCKS];
67444
67445 /*
67446 * These ones are defined below.
67447 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
67448 */
67449 static __init int init_posix_timers(void)
67450 {
67451 - struct k_clock clock_realtime = {
67452 + static struct k_clock clock_realtime = {
67453 .clock_getres = hrtimer_get_res,
67454 .clock_get = posix_clock_realtime_get,
67455 .clock_set = posix_clock_realtime_set,
67456 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
67457 .timer_get = common_timer_get,
67458 .timer_del = common_timer_del,
67459 };
67460 - struct k_clock clock_monotonic = {
67461 + static struct k_clock clock_monotonic = {
67462 .clock_getres = hrtimer_get_res,
67463 .clock_get = posix_ktime_get_ts,
67464 .nsleep = common_nsleep,
67465 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
67466 .timer_get = common_timer_get,
67467 .timer_del = common_timer_del,
67468 };
67469 - struct k_clock clock_monotonic_raw = {
67470 + static struct k_clock clock_monotonic_raw = {
67471 .clock_getres = hrtimer_get_res,
67472 .clock_get = posix_get_monotonic_raw,
67473 };
67474 - struct k_clock clock_realtime_coarse = {
67475 + static struct k_clock clock_realtime_coarse = {
67476 .clock_getres = posix_get_coarse_res,
67477 .clock_get = posix_get_realtime_coarse,
67478 };
67479 - struct k_clock clock_monotonic_coarse = {
67480 + static struct k_clock clock_monotonic_coarse = {
67481 .clock_getres = posix_get_coarse_res,
67482 .clock_get = posix_get_monotonic_coarse,
67483 };
67484 - struct k_clock clock_boottime = {
67485 + static struct k_clock clock_boottime = {
67486 .clock_getres = hrtimer_get_res,
67487 .clock_get = posix_get_boottime,
67488 .nsleep = common_nsleep,
67489 @@ -272,6 +273,8 @@ static __init int init_posix_timers(void)
67490 .timer_del = common_timer_del,
67491 };
67492
67493 + pax_track_stack();
67494 +
67495 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
67496 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
67497 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
67498 @@ -473,7 +476,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
67499 return;
67500 }
67501
67502 - posix_clocks[clock_id] = *new_clock;
67503 + posix_clocks[clock_id] = new_clock;
67504 }
67505 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
67506
67507 @@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
67508 return (id & CLOCKFD_MASK) == CLOCKFD ?
67509 &clock_posix_dynamic : &clock_posix_cpu;
67510
67511 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
67512 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
67513 return NULL;
67514 - return &posix_clocks[id];
67515 + return posix_clocks[id];
67516 }
67517
67518 static int common_timer_create(struct k_itimer *new_timer)
67519 @@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
67520 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
67521 return -EFAULT;
67522
67523 + /* only the CLOCK_REALTIME clock can be set, all other clocks
67524 + have their clock_set fptr set to a nosettime dummy function
67525 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
67526 + call common_clock_set, which calls do_sys_settimeofday, which
67527 + we hook
67528 + */
67529 +
67530 return kc->clock_set(which_clock, &new_tp);
67531 }
67532
67533 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
67534 index d523593..68197a4 100644
67535 --- a/kernel/power/poweroff.c
67536 +++ b/kernel/power/poweroff.c
67537 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
67538 .enable_mask = SYSRQ_ENABLE_BOOT,
67539 };
67540
67541 -static int pm_sysrq_init(void)
67542 +static int __init pm_sysrq_init(void)
67543 {
67544 register_sysrq_key('o', &sysrq_poweroff_op);
67545 return 0;
67546 diff --git a/kernel/power/process.c b/kernel/power/process.c
67547 index 0cf3a27..5481be4 100644
67548 --- a/kernel/power/process.c
67549 +++ b/kernel/power/process.c
67550 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
67551 u64 elapsed_csecs64;
67552 unsigned int elapsed_csecs;
67553 bool wakeup = false;
67554 + bool timedout = false;
67555
67556 do_gettimeofday(&start);
67557
67558 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
67559
67560 while (true) {
67561 todo = 0;
67562 + if (time_after(jiffies, end_time))
67563 + timedout = true;
67564 read_lock(&tasklist_lock);
67565 do_each_thread(g, p) {
67566 if (frozen(p) || !freezable(p))
67567 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
67568 * try_to_stop() after schedule() in ptrace/signal
67569 * stop sees TIF_FREEZE.
67570 */
67571 - if (!task_is_stopped_or_traced(p) &&
67572 - !freezer_should_skip(p))
67573 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
67574 todo++;
67575 + if (timedout) {
67576 + printk(KERN_ERR "Task refusing to freeze:\n");
67577 + sched_show_task(p);
67578 + }
67579 + }
67580 } while_each_thread(g, p);
67581 read_unlock(&tasklist_lock);
67582
67583 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
67584 todo += wq_busy;
67585 }
67586
67587 - if (!todo || time_after(jiffies, end_time))
67588 + if (!todo || timedout)
67589 break;
67590
67591 if (pm_wakeup_pending()) {
67592 diff --git a/kernel/printk.c b/kernel/printk.c
67593 index 28a40d8..2411bec 100644
67594 --- a/kernel/printk.c
67595 +++ b/kernel/printk.c
67596 @@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
67597 if (from_file && type != SYSLOG_ACTION_OPEN)
67598 return 0;
67599
67600 +#ifdef CONFIG_GRKERNSEC_DMESG
67601 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
67602 + return -EPERM;
67603 +#endif
67604 +
67605 if (syslog_action_restricted(type)) {
67606 if (capable(CAP_SYSLOG))
67607 return 0;
67608 diff --git a/kernel/profile.c b/kernel/profile.c
67609 index 961b389..c451353 100644
67610 --- a/kernel/profile.c
67611 +++ b/kernel/profile.c
67612 @@ -39,7 +39,7 @@ struct profile_hit {
67613 /* Oprofile timer tick hook */
67614 static int (*timer_hook)(struct pt_regs *) __read_mostly;
67615
67616 -static atomic_t *prof_buffer;
67617 +static atomic_unchecked_t *prof_buffer;
67618 static unsigned long prof_len, prof_shift;
67619
67620 int prof_on __read_mostly;
67621 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
67622 hits[i].pc = 0;
67623 continue;
67624 }
67625 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67626 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67627 hits[i].hits = hits[i].pc = 0;
67628 }
67629 }
67630 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67631 * Add the current hit(s) and flush the write-queue out
67632 * to the global buffer:
67633 */
67634 - atomic_add(nr_hits, &prof_buffer[pc]);
67635 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
67636 for (i = 0; i < NR_PROFILE_HIT; ++i) {
67637 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67638 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67639 hits[i].pc = hits[i].hits = 0;
67640 }
67641 out:
67642 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67643 {
67644 unsigned long pc;
67645 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
67646 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67647 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67648 }
67649 #endif /* !CONFIG_SMP */
67650
67651 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
67652 return -EFAULT;
67653 buf++; p++; count--; read++;
67654 }
67655 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
67656 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
67657 if (copy_to_user(buf, (void *)pnt, count))
67658 return -EFAULT;
67659 read += count;
67660 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
67661 }
67662 #endif
67663 profile_discard_flip_buffers();
67664 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
67665 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
67666 return count;
67667 }
67668
67669 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
67670 index a70d2a5..cbd4b4f 100644
67671 --- a/kernel/ptrace.c
67672 +++ b/kernel/ptrace.c
67673 @@ -161,7 +161,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
67674 return ret;
67675 }
67676
67677 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
67678 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
67679 + unsigned int log)
67680 {
67681 const struct cred *cred = current_cred(), *tcred;
67682
67683 @@ -187,7 +188,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
67684 cred->gid == tcred->sgid &&
67685 cred->gid == tcred->gid))
67686 goto ok;
67687 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
67688 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
67689 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
67690 goto ok;
67691 rcu_read_unlock();
67692 return -EPERM;
67693 @@ -196,7 +198,9 @@ ok:
67694 smp_rmb();
67695 if (task->mm)
67696 dumpable = get_dumpable(task->mm);
67697 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
67698 + if (!dumpable &&
67699 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
67700 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
67701 return -EPERM;
67702
67703 return security_ptrace_access_check(task, mode);
67704 @@ -206,7 +210,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
67705 {
67706 int err;
67707 task_lock(task);
67708 - err = __ptrace_may_access(task, mode);
67709 + err = __ptrace_may_access(task, mode, 0);
67710 + task_unlock(task);
67711 + return !err;
67712 +}
67713 +
67714 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
67715 +{
67716 + int err;
67717 + task_lock(task);
67718 + err = __ptrace_may_access(task, mode, 1);
67719 task_unlock(task);
67720 return !err;
67721 }
67722 @@ -251,7 +264,7 @@ static int ptrace_attach(struct task_struct *task, long request,
67723 goto out;
67724
67725 task_lock(task);
67726 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
67727 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
67728 task_unlock(task);
67729 if (retval)
67730 goto unlock_creds;
67731 @@ -266,7 +279,7 @@ static int ptrace_attach(struct task_struct *task, long request,
67732 task->ptrace = PT_PTRACED;
67733 if (seize)
67734 task->ptrace |= PT_SEIZED;
67735 - if (task_ns_capable(task, CAP_SYS_PTRACE))
67736 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
67737 task->ptrace |= PT_PTRACE_CAP;
67738
67739 __ptrace_link(task, current);
67740 @@ -461,6 +474,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
67741 {
67742 int copied = 0;
67743
67744 + pax_track_stack();
67745 +
67746 while (len > 0) {
67747 char buf[128];
67748 int this_len, retval;
67749 @@ -472,7 +487,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
67750 break;
67751 return -EIO;
67752 }
67753 - if (copy_to_user(dst, buf, retval))
67754 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
67755 return -EFAULT;
67756 copied += retval;
67757 src += retval;
67758 @@ -486,6 +501,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
67759 {
67760 int copied = 0;
67761
67762 + pax_track_stack();
67763 +
67764 while (len > 0) {
67765 char buf[128];
67766 int this_len, retval;
67767 @@ -669,10 +686,12 @@ int ptrace_request(struct task_struct *child, long request,
67768 bool seized = child->ptrace & PT_SEIZED;
67769 int ret = -EIO;
67770 siginfo_t siginfo, *si;
67771 - void __user *datavp = (void __user *) data;
67772 + void __user *datavp = (__force void __user *) data;
67773 unsigned long __user *datalp = datavp;
67774 unsigned long flags;
67775
67776 + pax_track_stack();
67777 +
67778 switch (request) {
67779 case PTRACE_PEEKTEXT:
67780 case PTRACE_PEEKDATA:
67781 @@ -871,14 +890,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
67782 goto out;
67783 }
67784
67785 + if (gr_handle_ptrace(child, request)) {
67786 + ret = -EPERM;
67787 + goto out_put_task_struct;
67788 + }
67789 +
67790 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67791 ret = ptrace_attach(child, request, data);
67792 /*
67793 * Some architectures need to do book-keeping after
67794 * a ptrace attach.
67795 */
67796 - if (!ret)
67797 + if (!ret) {
67798 arch_ptrace_attach(child);
67799 + gr_audit_ptrace(child);
67800 + }
67801 goto out_put_task_struct;
67802 }
67803
67804 @@ -904,7 +930,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
67805 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
67806 if (copied != sizeof(tmp))
67807 return -EIO;
67808 - return put_user(tmp, (unsigned long __user *)data);
67809 + return put_user(tmp, (__force unsigned long __user *)data);
67810 }
67811
67812 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
67813 @@ -927,6 +953,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
67814 siginfo_t siginfo;
67815 int ret;
67816
67817 + pax_track_stack();
67818 +
67819 switch (request) {
67820 case PTRACE_PEEKTEXT:
67821 case PTRACE_PEEKDATA:
67822 @@ -1014,14 +1042,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
67823 goto out;
67824 }
67825
67826 + if (gr_handle_ptrace(child, request)) {
67827 + ret = -EPERM;
67828 + goto out_put_task_struct;
67829 + }
67830 +
67831 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67832 ret = ptrace_attach(child, request, data);
67833 /*
67834 * Some architectures need to do book-keeping after
67835 * a ptrace attach.
67836 */
67837 - if (!ret)
67838 + if (!ret) {
67839 arch_ptrace_attach(child);
67840 + gr_audit_ptrace(child);
67841 + }
67842 goto out_put_task_struct;
67843 }
67844
67845 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
67846 index 98f51b1..30b950c 100644
67847 --- a/kernel/rcutorture.c
67848 +++ b/kernel/rcutorture.c
67849 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
67850 { 0 };
67851 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
67852 { 0 };
67853 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67854 -static atomic_t n_rcu_torture_alloc;
67855 -static atomic_t n_rcu_torture_alloc_fail;
67856 -static atomic_t n_rcu_torture_free;
67857 -static atomic_t n_rcu_torture_mberror;
67858 -static atomic_t n_rcu_torture_error;
67859 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67860 +static atomic_unchecked_t n_rcu_torture_alloc;
67861 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
67862 +static atomic_unchecked_t n_rcu_torture_free;
67863 +static atomic_unchecked_t n_rcu_torture_mberror;
67864 +static atomic_unchecked_t n_rcu_torture_error;
67865 static long n_rcu_torture_boost_ktrerror;
67866 static long n_rcu_torture_boost_rterror;
67867 static long n_rcu_torture_boost_failure;
67868 @@ -223,11 +223,11 @@ rcu_torture_alloc(void)
67869
67870 spin_lock_bh(&rcu_torture_lock);
67871 if (list_empty(&rcu_torture_freelist)) {
67872 - atomic_inc(&n_rcu_torture_alloc_fail);
67873 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
67874 spin_unlock_bh(&rcu_torture_lock);
67875 return NULL;
67876 }
67877 - atomic_inc(&n_rcu_torture_alloc);
67878 + atomic_inc_unchecked(&n_rcu_torture_alloc);
67879 p = rcu_torture_freelist.next;
67880 list_del_init(p);
67881 spin_unlock_bh(&rcu_torture_lock);
67882 @@ -240,7 +240,7 @@ rcu_torture_alloc(void)
67883 static void
67884 rcu_torture_free(struct rcu_torture *p)
67885 {
67886 - atomic_inc(&n_rcu_torture_free);
67887 + atomic_inc_unchecked(&n_rcu_torture_free);
67888 spin_lock_bh(&rcu_torture_lock);
67889 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
67890 spin_unlock_bh(&rcu_torture_lock);
67891 @@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
67892 i = rp->rtort_pipe_count;
67893 if (i > RCU_TORTURE_PIPE_LEN)
67894 i = RCU_TORTURE_PIPE_LEN;
67895 - atomic_inc(&rcu_torture_wcount[i]);
67896 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67897 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67898 rp->rtort_mbtest = 0;
67899 rcu_torture_free(rp);
67900 @@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
67901 i = rp->rtort_pipe_count;
67902 if (i > RCU_TORTURE_PIPE_LEN)
67903 i = RCU_TORTURE_PIPE_LEN;
67904 - atomic_inc(&rcu_torture_wcount[i]);
67905 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67906 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67907 rp->rtort_mbtest = 0;
67908 list_del(&rp->rtort_free);
67909 @@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
67910 i = old_rp->rtort_pipe_count;
67911 if (i > RCU_TORTURE_PIPE_LEN)
67912 i = RCU_TORTURE_PIPE_LEN;
67913 - atomic_inc(&rcu_torture_wcount[i]);
67914 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67915 old_rp->rtort_pipe_count++;
67916 cur_ops->deferred_free(old_rp);
67917 }
67918 @@ -950,7 +950,7 @@ static void rcu_torture_timer(unsigned long unused)
67919 return;
67920 }
67921 if (p->rtort_mbtest == 0)
67922 - atomic_inc(&n_rcu_torture_mberror);
67923 + atomic_inc_unchecked(&n_rcu_torture_mberror);
67924 spin_lock(&rand_lock);
67925 cur_ops->read_delay(&rand);
67926 n_rcu_torture_timers++;
67927 @@ -1011,7 +1011,7 @@ rcu_torture_reader(void *arg)
67928 continue;
67929 }
67930 if (p->rtort_mbtest == 0)
67931 - atomic_inc(&n_rcu_torture_mberror);
67932 + atomic_inc_unchecked(&n_rcu_torture_mberror);
67933 cur_ops->read_delay(&rand);
67934 preempt_disable();
67935 pipe_count = p->rtort_pipe_count;
67936 @@ -1070,16 +1070,16 @@ rcu_torture_printk(char *page)
67937 rcu_torture_current,
67938 rcu_torture_current_version,
67939 list_empty(&rcu_torture_freelist),
67940 - atomic_read(&n_rcu_torture_alloc),
67941 - atomic_read(&n_rcu_torture_alloc_fail),
67942 - atomic_read(&n_rcu_torture_free),
67943 - atomic_read(&n_rcu_torture_mberror),
67944 + atomic_read_unchecked(&n_rcu_torture_alloc),
67945 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
67946 + atomic_read_unchecked(&n_rcu_torture_free),
67947 + atomic_read_unchecked(&n_rcu_torture_mberror),
67948 n_rcu_torture_boost_ktrerror,
67949 n_rcu_torture_boost_rterror,
67950 n_rcu_torture_boost_failure,
67951 n_rcu_torture_boosts,
67952 n_rcu_torture_timers);
67953 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
67954 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
67955 n_rcu_torture_boost_ktrerror != 0 ||
67956 n_rcu_torture_boost_rterror != 0 ||
67957 n_rcu_torture_boost_failure != 0)
67958 @@ -1087,7 +1087,7 @@ rcu_torture_printk(char *page)
67959 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
67960 if (i > 1) {
67961 cnt += sprintf(&page[cnt], "!!! ");
67962 - atomic_inc(&n_rcu_torture_error);
67963 + atomic_inc_unchecked(&n_rcu_torture_error);
67964 WARN_ON_ONCE(1);
67965 }
67966 cnt += sprintf(&page[cnt], "Reader Pipe: ");
67967 @@ -1101,7 +1101,7 @@ rcu_torture_printk(char *page)
67968 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
67969 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67970 cnt += sprintf(&page[cnt], " %d",
67971 - atomic_read(&rcu_torture_wcount[i]));
67972 + atomic_read_unchecked(&rcu_torture_wcount[i]));
67973 }
67974 cnt += sprintf(&page[cnt], "\n");
67975 if (cur_ops->stats)
67976 @@ -1410,7 +1410,7 @@ rcu_torture_cleanup(void)
67977
67978 if (cur_ops->cleanup)
67979 cur_ops->cleanup();
67980 - if (atomic_read(&n_rcu_torture_error))
67981 + if (atomic_read_unchecked(&n_rcu_torture_error))
67982 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
67983 else
67984 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
67985 @@ -1474,17 +1474,17 @@ rcu_torture_init(void)
67986
67987 rcu_torture_current = NULL;
67988 rcu_torture_current_version = 0;
67989 - atomic_set(&n_rcu_torture_alloc, 0);
67990 - atomic_set(&n_rcu_torture_alloc_fail, 0);
67991 - atomic_set(&n_rcu_torture_free, 0);
67992 - atomic_set(&n_rcu_torture_mberror, 0);
67993 - atomic_set(&n_rcu_torture_error, 0);
67994 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
67995 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
67996 + atomic_set_unchecked(&n_rcu_torture_free, 0);
67997 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
67998 + atomic_set_unchecked(&n_rcu_torture_error, 0);
67999 n_rcu_torture_boost_ktrerror = 0;
68000 n_rcu_torture_boost_rterror = 0;
68001 n_rcu_torture_boost_failure = 0;
68002 n_rcu_torture_boosts = 0;
68003 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
68004 - atomic_set(&rcu_torture_wcount[i], 0);
68005 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
68006 for_each_possible_cpu(cpu) {
68007 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
68008 per_cpu(rcu_torture_count, cpu)[i] = 0;
68009 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
68010 index ba06207..85d8ba8 100644
68011 --- a/kernel/rcutree.c
68012 +++ b/kernel/rcutree.c
68013 @@ -356,9 +356,9 @@ void rcu_enter_nohz(void)
68014 }
68015 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68016 smp_mb__before_atomic_inc(); /* See above. */
68017 - atomic_inc(&rdtp->dynticks);
68018 + atomic_inc_unchecked(&rdtp->dynticks);
68019 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
68020 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68021 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68022 local_irq_restore(flags);
68023
68024 /* If the interrupt queued a callback, get out of dyntick mode. */
68025 @@ -387,10 +387,10 @@ void rcu_exit_nohz(void)
68026 return;
68027 }
68028 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
68029 - atomic_inc(&rdtp->dynticks);
68030 + atomic_inc_unchecked(&rdtp->dynticks);
68031 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68032 smp_mb__after_atomic_inc(); /* See above. */
68033 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68034 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68035 local_irq_restore(flags);
68036 }
68037
68038 @@ -406,14 +406,14 @@ void rcu_nmi_enter(void)
68039 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
68040
68041 if (rdtp->dynticks_nmi_nesting == 0 &&
68042 - (atomic_read(&rdtp->dynticks) & 0x1))
68043 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
68044 return;
68045 rdtp->dynticks_nmi_nesting++;
68046 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
68047 - atomic_inc(&rdtp->dynticks);
68048 + atomic_inc_unchecked(&rdtp->dynticks);
68049 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68050 smp_mb__after_atomic_inc(); /* See above. */
68051 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68052 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68053 }
68054
68055 /**
68056 @@ -432,9 +432,9 @@ void rcu_nmi_exit(void)
68057 return;
68058 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68059 smp_mb__before_atomic_inc(); /* See above. */
68060 - atomic_inc(&rdtp->dynticks);
68061 + atomic_inc_unchecked(&rdtp->dynticks);
68062 smp_mb__after_atomic_inc(); /* Force delay to next write. */
68063 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68064 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68065 }
68066
68067 /**
68068 @@ -469,7 +469,7 @@ void rcu_irq_exit(void)
68069 */
68070 static int dyntick_save_progress_counter(struct rcu_data *rdp)
68071 {
68072 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
68073 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68074 return 0;
68075 }
68076
68077 @@ -484,7 +484,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
68078 unsigned long curr;
68079 unsigned long snap;
68080
68081 - curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
68082 + curr = (unsigned long)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68083 snap = (unsigned long)rdp->dynticks_snap;
68084
68085 /*
68086 @@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
68087 /*
68088 * Do softirq processing for the current CPU.
68089 */
68090 -static void rcu_process_callbacks(struct softirq_action *unused)
68091 +static void rcu_process_callbacks(void)
68092 {
68093 __rcu_process_callbacks(&rcu_sched_state,
68094 &__get_cpu_var(rcu_sched_data));
68095 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
68096 index 01b2ccd..4f5d80a 100644
68097 --- a/kernel/rcutree.h
68098 +++ b/kernel/rcutree.h
68099 @@ -86,7 +86,7 @@
68100 struct rcu_dynticks {
68101 int dynticks_nesting; /* Track irq/process nesting level. */
68102 int dynticks_nmi_nesting; /* Track NMI nesting level. */
68103 - atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
68104 + atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
68105 };
68106
68107 /* RCU's kthread states for tracing. */
68108 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
68109 index 8aafbb8..2fca109 100644
68110 --- a/kernel/rcutree_plugin.h
68111 +++ b/kernel/rcutree_plugin.h
68112 @@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
68113
68114 /* Clean up and exit. */
68115 smp_mb(); /* ensure expedited GP seen before counter increment. */
68116 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
68117 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
68118 unlock_mb_ret:
68119 mutex_unlock(&sync_rcu_preempt_exp_mutex);
68120 mb_ret:
68121 @@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
68122
68123 #else /* #ifndef CONFIG_SMP */
68124
68125 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
68126 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
68127 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
68128 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
68129
68130 static int synchronize_sched_expedited_cpu_stop(void *data)
68131 {
68132 @@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
68133 int firstsnap, s, snap, trycount = 0;
68134
68135 /* Note that atomic_inc_return() implies full memory barrier. */
68136 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
68137 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
68138 get_online_cpus();
68139
68140 /*
68141 @@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
68142 }
68143
68144 /* Check to see if someone else did our work for us. */
68145 - s = atomic_read(&sync_sched_expedited_done);
68146 + s = atomic_read_unchecked(&sync_sched_expedited_done);
68147 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
68148 smp_mb(); /* ensure test happens before caller kfree */
68149 return;
68150 @@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
68151 * grace period works for us.
68152 */
68153 get_online_cpus();
68154 - snap = atomic_read(&sync_sched_expedited_started) - 1;
68155 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
68156 smp_mb(); /* ensure read is before try_stop_cpus(). */
68157 }
68158
68159 @@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
68160 * than we did beat us to the punch.
68161 */
68162 do {
68163 - s = atomic_read(&sync_sched_expedited_done);
68164 + s = atomic_read_unchecked(&sync_sched_expedited_done);
68165 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
68166 smp_mb(); /* ensure test happens before caller kfree */
68167 break;
68168 }
68169 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
68170 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
68171
68172 put_online_cpus();
68173 }
68174 @@ -1953,7 +1953,7 @@ int rcu_needs_cpu(int cpu)
68175 for_each_online_cpu(thatcpu) {
68176 if (thatcpu == cpu)
68177 continue;
68178 - snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
68179 + snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
68180 thatcpu).dynticks);
68181 smp_mb(); /* Order sampling of snap with end of grace period. */
68182 if ((snap & 0x1) != 0) {
68183 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
68184 index 3b0c098..43ba2d8 100644
68185 --- a/kernel/rcutree_trace.c
68186 +++ b/kernel/rcutree_trace.c
68187 @@ -74,7 +74,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
68188 rdp->qs_pending);
68189 #ifdef CONFIG_NO_HZ
68190 seq_printf(m, " dt=%d/%d/%d df=%lu",
68191 - atomic_read(&rdp->dynticks->dynticks),
68192 + atomic_read_unchecked(&rdp->dynticks->dynticks),
68193 rdp->dynticks->dynticks_nesting,
68194 rdp->dynticks->dynticks_nmi_nesting,
68195 rdp->dynticks_fqs);
68196 @@ -148,7 +148,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
68197 rdp->qs_pending);
68198 #ifdef CONFIG_NO_HZ
68199 seq_printf(m, ",%d,%d,%d,%lu",
68200 - atomic_read(&rdp->dynticks->dynticks),
68201 + atomic_read_unchecked(&rdp->dynticks->dynticks),
68202 rdp->dynticks->dynticks_nesting,
68203 rdp->dynticks->dynticks_nmi_nesting,
68204 rdp->dynticks_fqs);
68205 diff --git a/kernel/relay.c b/kernel/relay.c
68206 index 859ea5a..096e2fe 100644
68207 --- a/kernel/relay.c
68208 +++ b/kernel/relay.c
68209 @@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struct file *in,
68210 };
68211 ssize_t ret;
68212
68213 + pax_track_stack();
68214 +
68215 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
68216 return 0;
68217 if (splice_grow_spd(pipe, &spd))
68218 diff --git a/kernel/resource.c b/kernel/resource.c
68219 index c8dc249..f1e2359 100644
68220 --- a/kernel/resource.c
68221 +++ b/kernel/resource.c
68222 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
68223
68224 static int __init ioresources_init(void)
68225 {
68226 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68227 +#ifdef CONFIG_GRKERNSEC_PROC_USER
68228 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
68229 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
68230 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68231 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
68232 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
68233 +#endif
68234 +#else
68235 proc_create("ioports", 0, NULL, &proc_ioports_operations);
68236 proc_create("iomem", 0, NULL, &proc_iomem_operations);
68237 +#endif
68238 return 0;
68239 }
68240 __initcall(ioresources_init);
68241 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
68242 index 5c9ccd3..a35e22b 100644
68243 --- a/kernel/rtmutex-tester.c
68244 +++ b/kernel/rtmutex-tester.c
68245 @@ -20,7 +20,7 @@
68246 #define MAX_RT_TEST_MUTEXES 8
68247
68248 static spinlock_t rttest_lock;
68249 -static atomic_t rttest_event;
68250 +static atomic_unchecked_t rttest_event;
68251
68252 struct test_thread_data {
68253 int opcode;
68254 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68255
68256 case RTTEST_LOCKCONT:
68257 td->mutexes[td->opdata] = 1;
68258 - td->event = atomic_add_return(1, &rttest_event);
68259 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68260 return 0;
68261
68262 case RTTEST_RESET:
68263 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68264 return 0;
68265
68266 case RTTEST_RESETEVENT:
68267 - atomic_set(&rttest_event, 0);
68268 + atomic_set_unchecked(&rttest_event, 0);
68269 return 0;
68270
68271 default:
68272 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68273 return ret;
68274
68275 td->mutexes[id] = 1;
68276 - td->event = atomic_add_return(1, &rttest_event);
68277 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68278 rt_mutex_lock(&mutexes[id]);
68279 - td->event = atomic_add_return(1, &rttest_event);
68280 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68281 td->mutexes[id] = 4;
68282 return 0;
68283
68284 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68285 return ret;
68286
68287 td->mutexes[id] = 1;
68288 - td->event = atomic_add_return(1, &rttest_event);
68289 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68290 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
68291 - td->event = atomic_add_return(1, &rttest_event);
68292 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68293 td->mutexes[id] = ret ? 0 : 4;
68294 return ret ? -EINTR : 0;
68295
68296 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68297 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
68298 return ret;
68299
68300 - td->event = atomic_add_return(1, &rttest_event);
68301 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68302 rt_mutex_unlock(&mutexes[id]);
68303 - td->event = atomic_add_return(1, &rttest_event);
68304 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68305 td->mutexes[id] = 0;
68306 return 0;
68307
68308 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68309 break;
68310
68311 td->mutexes[dat] = 2;
68312 - td->event = atomic_add_return(1, &rttest_event);
68313 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68314 break;
68315
68316 default:
68317 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68318 return;
68319
68320 td->mutexes[dat] = 3;
68321 - td->event = atomic_add_return(1, &rttest_event);
68322 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68323 break;
68324
68325 case RTTEST_LOCKNOWAIT:
68326 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68327 return;
68328
68329 td->mutexes[dat] = 1;
68330 - td->event = atomic_add_return(1, &rttest_event);
68331 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68332 return;
68333
68334 default:
68335 diff --git a/kernel/sched.c b/kernel/sched.c
68336 index b50b0f0..1c6c591 100644
68337 --- a/kernel/sched.c
68338 +++ b/kernel/sched.c
68339 @@ -4264,6 +4264,8 @@ static void __sched __schedule(void)
68340 struct rq *rq;
68341 int cpu;
68342
68343 + pax_track_stack();
68344 +
68345 need_resched:
68346 preempt_disable();
68347 cpu = smp_processor_id();
68348 @@ -4950,6 +4952,8 @@ int can_nice(const struct task_struct *p, const int nice)
68349 /* convert nice value [19,-20] to rlimit style value [1,40] */
68350 int nice_rlim = 20 - nice;
68351
68352 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
68353 +
68354 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
68355 capable(CAP_SYS_NICE));
68356 }
68357 @@ -4983,7 +4987,8 @@ SYSCALL_DEFINE1(nice, int, increment)
68358 if (nice > 19)
68359 nice = 19;
68360
68361 - if (increment < 0 && !can_nice(current, nice))
68362 + if (increment < 0 && (!can_nice(current, nice) ||
68363 + gr_handle_chroot_nice()))
68364 return -EPERM;
68365
68366 retval = security_task_setnice(current, nice);
68367 @@ -5127,6 +5132,7 @@ recheck:
68368 unsigned long rlim_rtprio =
68369 task_rlimit(p, RLIMIT_RTPRIO);
68370
68371 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
68372 /* can't set/change the rt policy */
68373 if (policy != p->policy && !rlim_rtprio)
68374 return -EPERM;
68375 diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
68376 index 429242f..d7cca82 100644
68377 --- a/kernel/sched_autogroup.c
68378 +++ b/kernel/sched_autogroup.c
68379 @@ -7,7 +7,7 @@
68380
68381 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
68382 static struct autogroup autogroup_default;
68383 -static atomic_t autogroup_seq_nr;
68384 +static atomic_unchecked_t autogroup_seq_nr;
68385
68386 static void __init autogroup_init(struct task_struct *init_task)
68387 {
68388 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
68389
68390 kref_init(&ag->kref);
68391 init_rwsem(&ag->lock);
68392 - ag->id = atomic_inc_return(&autogroup_seq_nr);
68393 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
68394 ag->tg = tg;
68395 #ifdef CONFIG_RT_GROUP_SCHED
68396 /*
68397 diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
68398 index bc8ee99..b6f6492 100644
68399 --- a/kernel/sched_fair.c
68400 +++ b/kernel/sched_fair.c
68401 @@ -4062,7 +4062,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
68402 * run_rebalance_domains is triggered when needed from the scheduler tick.
68403 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
68404 */
68405 -static void run_rebalance_domains(struct softirq_action *h)
68406 +static void run_rebalance_domains(void)
68407 {
68408 int this_cpu = smp_processor_id();
68409 struct rq *this_rq = cpu_rq(this_cpu);
68410 diff --git a/kernel/signal.c b/kernel/signal.c
68411 index 291c970..304bd03 100644
68412 --- a/kernel/signal.c
68413 +++ b/kernel/signal.c
68414 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
68415
68416 int print_fatal_signals __read_mostly;
68417
68418 -static void __user *sig_handler(struct task_struct *t, int sig)
68419 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
68420 {
68421 return t->sighand->action[sig - 1].sa.sa_handler;
68422 }
68423
68424 -static int sig_handler_ignored(void __user *handler, int sig)
68425 +static int sig_handler_ignored(__sighandler_t handler, int sig)
68426 {
68427 /* Is it explicitly or implicitly ignored? */
68428 return handler == SIG_IGN ||
68429 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
68430 static int sig_task_ignored(struct task_struct *t, int sig,
68431 int from_ancestor_ns)
68432 {
68433 - void __user *handler;
68434 + __sighandler_t handler;
68435
68436 handler = sig_handler(t, sig);
68437
68438 @@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
68439 atomic_inc(&user->sigpending);
68440 rcu_read_unlock();
68441
68442 + if (!override_rlimit)
68443 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
68444 +
68445 if (override_rlimit ||
68446 atomic_read(&user->sigpending) <=
68447 task_rlimit(t, RLIMIT_SIGPENDING)) {
68448 @@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
68449
68450 int unhandled_signal(struct task_struct *tsk, int sig)
68451 {
68452 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
68453 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
68454 if (is_global_init(tsk))
68455 return 1;
68456 if (handler != SIG_IGN && handler != SIG_DFL)
68457 @@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
68458 }
68459 }
68460
68461 + /* allow glibc communication via tgkill to other threads in our
68462 + thread group */
68463 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
68464 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
68465 + && gr_handle_signal(t, sig))
68466 + return -EPERM;
68467 +
68468 return security_task_kill(t, info, sig, 0);
68469 }
68470
68471 @@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68472 return send_signal(sig, info, p, 1);
68473 }
68474
68475 -static int
68476 +int
68477 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68478 {
68479 return send_signal(sig, info, t, 0);
68480 @@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68481 unsigned long int flags;
68482 int ret, blocked, ignored;
68483 struct k_sigaction *action;
68484 + int is_unhandled = 0;
68485
68486 spin_lock_irqsave(&t->sighand->siglock, flags);
68487 action = &t->sighand->action[sig-1];
68488 @@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68489 }
68490 if (action->sa.sa_handler == SIG_DFL)
68491 t->signal->flags &= ~SIGNAL_UNKILLABLE;
68492 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
68493 + is_unhandled = 1;
68494 ret = specific_send_sig_info(sig, info, t);
68495 spin_unlock_irqrestore(&t->sighand->siglock, flags);
68496
68497 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
68498 + normal operation */
68499 + if (is_unhandled) {
68500 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
68501 + gr_handle_crash(t, sig);
68502 + }
68503 +
68504 return ret;
68505 }
68506
68507 @@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68508 ret = check_kill_permission(sig, info, p);
68509 rcu_read_unlock();
68510
68511 - if (!ret && sig)
68512 + if (!ret && sig) {
68513 ret = do_send_sig_info(sig, info, p, true);
68514 + if (!ret)
68515 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
68516 + }
68517
68518 return ret;
68519 }
68520 @@ -1909,6 +1932,8 @@ static void ptrace_do_notify(int signr, int exit_code, int why)
68521 {
68522 siginfo_t info;
68523
68524 + pax_track_stack();
68525 +
68526 memset(&info, 0, sizeof info);
68527 info.si_signo = signr;
68528 info.si_code = exit_code;
68529 @@ -2748,7 +2773,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
68530 int error = -ESRCH;
68531
68532 rcu_read_lock();
68533 - p = find_task_by_vpid(pid);
68534 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68535 + /* allow glibc communication via tgkill to other threads in our
68536 + thread group */
68537 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
68538 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
68539 + p = find_task_by_vpid_unrestricted(pid);
68540 + else
68541 +#endif
68542 + p = find_task_by_vpid(pid);
68543 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
68544 error = check_kill_permission(sig, info, p);
68545 /*
68546 diff --git a/kernel/smp.c b/kernel/smp.c
68547 index fb67dfa..f819e2e 100644
68548 --- a/kernel/smp.c
68549 +++ b/kernel/smp.c
68550 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
68551 }
68552 EXPORT_SYMBOL(smp_call_function);
68553
68554 -void ipi_call_lock(void)
68555 +void ipi_call_lock(void) __acquires(call_function.lock)
68556 {
68557 raw_spin_lock(&call_function.lock);
68558 }
68559
68560 -void ipi_call_unlock(void)
68561 +void ipi_call_unlock(void) __releases(call_function.lock)
68562 {
68563 raw_spin_unlock(&call_function.lock);
68564 }
68565
68566 -void ipi_call_lock_irq(void)
68567 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
68568 {
68569 raw_spin_lock_irq(&call_function.lock);
68570 }
68571
68572 -void ipi_call_unlock_irq(void)
68573 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
68574 {
68575 raw_spin_unlock_irq(&call_function.lock);
68576 }
68577 diff --git a/kernel/softirq.c b/kernel/softirq.c
68578 index fca82c3..1db9690 100644
68579 --- a/kernel/softirq.c
68580 +++ b/kernel/softirq.c
68581 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
68582
68583 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
68584
68585 -char *softirq_to_name[NR_SOFTIRQS] = {
68586 +const char * const softirq_to_name[NR_SOFTIRQS] = {
68587 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
68588 "TASKLET", "SCHED", "HRTIMER", "RCU"
68589 };
68590 @@ -235,7 +235,7 @@ restart:
68591 kstat_incr_softirqs_this_cpu(vec_nr);
68592
68593 trace_softirq_entry(vec_nr);
68594 - h->action(h);
68595 + h->action();
68596 trace_softirq_exit(vec_nr);
68597 if (unlikely(prev_count != preempt_count())) {
68598 printk(KERN_ERR "huh, entered softirq %u %s %p"
68599 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
68600 local_irq_restore(flags);
68601 }
68602
68603 -void open_softirq(int nr, void (*action)(struct softirq_action *))
68604 +void open_softirq(int nr, void (*action)(void))
68605 {
68606 - softirq_vec[nr].action = action;
68607 + pax_open_kernel();
68608 + *(void **)&softirq_vec[nr].action = action;
68609 + pax_close_kernel();
68610 }
68611
68612 /*
68613 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
68614
68615 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
68616
68617 -static void tasklet_action(struct softirq_action *a)
68618 +static void tasklet_action(void)
68619 {
68620 struct tasklet_struct *list;
68621
68622 @@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
68623 }
68624 }
68625
68626 -static void tasklet_hi_action(struct softirq_action *a)
68627 +static void tasklet_hi_action(void)
68628 {
68629 struct tasklet_struct *list;
68630
68631 diff --git a/kernel/sys.c b/kernel/sys.c
68632 index 1dbbe69..e96e1dd 100644
68633 --- a/kernel/sys.c
68634 +++ b/kernel/sys.c
68635 @@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
68636 error = -EACCES;
68637 goto out;
68638 }
68639 +
68640 + if (gr_handle_chroot_setpriority(p, niceval)) {
68641 + error = -EACCES;
68642 + goto out;
68643 + }
68644 +
68645 no_nice = security_task_setnice(p, niceval);
68646 if (no_nice) {
68647 error = no_nice;
68648 @@ -571,6 +577,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
68649 goto error;
68650 }
68651
68652 + if (gr_check_group_change(new->gid, new->egid, -1))
68653 + goto error;
68654 +
68655 if (rgid != (gid_t) -1 ||
68656 (egid != (gid_t) -1 && egid != old->gid))
68657 new->sgid = new->egid;
68658 @@ -600,6 +609,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
68659 old = current_cred();
68660
68661 retval = -EPERM;
68662 +
68663 + if (gr_check_group_change(gid, gid, gid))
68664 + goto error;
68665 +
68666 if (nsown_capable(CAP_SETGID))
68667 new->gid = new->egid = new->sgid = new->fsgid = gid;
68668 else if (gid == old->gid || gid == old->sgid)
68669 @@ -687,6 +700,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
68670 goto error;
68671 }
68672
68673 + if (gr_check_user_change(new->uid, new->euid, -1))
68674 + goto error;
68675 +
68676 if (new->uid != old->uid) {
68677 retval = set_user(new);
68678 if (retval < 0)
68679 @@ -731,6 +747,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
68680 old = current_cred();
68681
68682 retval = -EPERM;
68683 +
68684 + if (gr_check_crash_uid(uid))
68685 + goto error;
68686 + if (gr_check_user_change(uid, uid, uid))
68687 + goto error;
68688 +
68689 if (nsown_capable(CAP_SETUID)) {
68690 new->suid = new->uid = uid;
68691 if (uid != old->uid) {
68692 @@ -785,6 +807,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
68693 goto error;
68694 }
68695
68696 + if (gr_check_user_change(ruid, euid, -1))
68697 + goto error;
68698 +
68699 if (ruid != (uid_t) -1) {
68700 new->uid = ruid;
68701 if (ruid != old->uid) {
68702 @@ -849,6 +874,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
68703 goto error;
68704 }
68705
68706 + if (gr_check_group_change(rgid, egid, -1))
68707 + goto error;
68708 +
68709 if (rgid != (gid_t) -1)
68710 new->gid = rgid;
68711 if (egid != (gid_t) -1)
68712 @@ -895,6 +923,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68713 old = current_cred();
68714 old_fsuid = old->fsuid;
68715
68716 + if (gr_check_user_change(-1, -1, uid))
68717 + goto error;
68718 +
68719 if (uid == old->uid || uid == old->euid ||
68720 uid == old->suid || uid == old->fsuid ||
68721 nsown_capable(CAP_SETUID)) {
68722 @@ -905,6 +936,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68723 }
68724 }
68725
68726 +error:
68727 abort_creds(new);
68728 return old_fsuid;
68729
68730 @@ -931,12 +963,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
68731 if (gid == old->gid || gid == old->egid ||
68732 gid == old->sgid || gid == old->fsgid ||
68733 nsown_capable(CAP_SETGID)) {
68734 + if (gr_check_group_change(-1, -1, gid))
68735 + goto error;
68736 +
68737 if (gid != old_fsgid) {
68738 new->fsgid = gid;
68739 goto change_okay;
68740 }
68741 }
68742
68743 +error:
68744 abort_creds(new);
68745 return old_fsgid;
68746
68747 @@ -1188,7 +1224,10 @@ static int override_release(char __user *release, int len)
68748 }
68749 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
68750 snprintf(buf, len, "2.6.%u%s", v, rest);
68751 - ret = copy_to_user(release, buf, len);
68752 + if (len > sizeof(buf))
68753 + ret = -EFAULT;
68754 + else
68755 + ret = copy_to_user(release, buf, len);
68756 }
68757 return ret;
68758 }
68759 @@ -1242,19 +1281,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
68760 return -EFAULT;
68761
68762 down_read(&uts_sem);
68763 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
68764 + error = __copy_to_user(name->sysname, &utsname()->sysname,
68765 __OLD_UTS_LEN);
68766 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
68767 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
68768 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
68769 __OLD_UTS_LEN);
68770 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
68771 - error |= __copy_to_user(&name->release, &utsname()->release,
68772 + error |= __copy_to_user(name->release, &utsname()->release,
68773 __OLD_UTS_LEN);
68774 error |= __put_user(0, name->release + __OLD_UTS_LEN);
68775 - error |= __copy_to_user(&name->version, &utsname()->version,
68776 + error |= __copy_to_user(name->version, &utsname()->version,
68777 __OLD_UTS_LEN);
68778 error |= __put_user(0, name->version + __OLD_UTS_LEN);
68779 - error |= __copy_to_user(&name->machine, &utsname()->machine,
68780 + error |= __copy_to_user(name->machine, &utsname()->machine,
68781 __OLD_UTS_LEN);
68782 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
68783 up_read(&uts_sem);
68784 @@ -1717,7 +1756,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
68785 error = get_dumpable(me->mm);
68786 break;
68787 case PR_SET_DUMPABLE:
68788 - if (arg2 < 0 || arg2 > 1) {
68789 + if (arg2 > 1) {
68790 error = -EINVAL;
68791 break;
68792 }
68793 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
68794 index 11d65b5..6957b37 100644
68795 --- a/kernel/sysctl.c
68796 +++ b/kernel/sysctl.c
68797 @@ -85,6 +85,13 @@
68798
68799
68800 #if defined(CONFIG_SYSCTL)
68801 +#include <linux/grsecurity.h>
68802 +#include <linux/grinternal.h>
68803 +
68804 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
68805 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
68806 + const int op);
68807 +extern int gr_handle_chroot_sysctl(const int op);
68808
68809 /* External variables not in a header file. */
68810 extern int sysctl_overcommit_memory;
68811 @@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
68812 }
68813
68814 #endif
68815 +extern struct ctl_table grsecurity_table[];
68816
68817 static struct ctl_table root_table[];
68818 static struct ctl_table_root sysctl_table_root;
68819 @@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
68820 int sysctl_legacy_va_layout;
68821 #endif
68822
68823 +#ifdef CONFIG_PAX_SOFTMODE
68824 +static ctl_table pax_table[] = {
68825 + {
68826 + .procname = "softmode",
68827 + .data = &pax_softmode,
68828 + .maxlen = sizeof(unsigned int),
68829 + .mode = 0600,
68830 + .proc_handler = &proc_dointvec,
68831 + },
68832 +
68833 + { }
68834 +};
68835 +#endif
68836 +
68837 /* The default sysctl tables: */
68838
68839 static struct ctl_table root_table[] = {
68840 @@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
68841 #endif
68842
68843 static struct ctl_table kern_table[] = {
68844 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
68845 + {
68846 + .procname = "grsecurity",
68847 + .mode = 0500,
68848 + .child = grsecurity_table,
68849 + },
68850 +#endif
68851 +
68852 +#ifdef CONFIG_PAX_SOFTMODE
68853 + {
68854 + .procname = "pax",
68855 + .mode = 0500,
68856 + .child = pax_table,
68857 + },
68858 +#endif
68859 +
68860 {
68861 .procname = "sched_child_runs_first",
68862 .data = &sysctl_sched_child_runs_first,
68863 @@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
68864 .data = &modprobe_path,
68865 .maxlen = KMOD_PATH_LEN,
68866 .mode = 0644,
68867 - .proc_handler = proc_dostring,
68868 + .proc_handler = proc_dostring_modpriv,
68869 },
68870 {
68871 .procname = "modules_disabled",
68872 @@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
68873 .extra1 = &zero,
68874 .extra2 = &one,
68875 },
68876 +#endif
68877 {
68878 .procname = "kptr_restrict",
68879 .data = &kptr_restrict,
68880 .maxlen = sizeof(int),
68881 .mode = 0644,
68882 .proc_handler = proc_dmesg_restrict,
68883 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68884 + .extra1 = &two,
68885 +#else
68886 .extra1 = &zero,
68887 +#endif
68888 .extra2 = &two,
68889 },
68890 -#endif
68891 {
68892 .procname = "ngroups_max",
68893 .data = &ngroups_max,
68894 @@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
68895 .proc_handler = proc_dointvec_minmax,
68896 .extra1 = &zero,
68897 },
68898 + {
68899 + .procname = "heap_stack_gap",
68900 + .data = &sysctl_heap_stack_gap,
68901 + .maxlen = sizeof(sysctl_heap_stack_gap),
68902 + .mode = 0644,
68903 + .proc_handler = proc_doulongvec_minmax,
68904 + },
68905 #else
68906 {
68907 .procname = "nr_trim_pages",
68908 @@ -1709,6 +1758,17 @@ static int test_perm(int mode, int op)
68909 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
68910 {
68911 int mode;
68912 + int error;
68913 +
68914 + if (table->parent != NULL && table->parent->procname != NULL &&
68915 + table->procname != NULL &&
68916 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
68917 + return -EACCES;
68918 + if (gr_handle_chroot_sysctl(op))
68919 + return -EACCES;
68920 + error = gr_handle_sysctl(table, op);
68921 + if (error)
68922 + return error;
68923
68924 if (root->permissions)
68925 mode = root->permissions(root, current->nsproxy, table);
68926 @@ -2113,6 +2173,16 @@ int proc_dostring(struct ctl_table *table, int write,
68927 buffer, lenp, ppos);
68928 }
68929
68930 +int proc_dostring_modpriv(struct ctl_table *table, int write,
68931 + void __user *buffer, size_t *lenp, loff_t *ppos)
68932 +{
68933 + if (write && !capable(CAP_SYS_MODULE))
68934 + return -EPERM;
68935 +
68936 + return _proc_do_string(table->data, table->maxlen, write,
68937 + buffer, lenp, ppos);
68938 +}
68939 +
68940 static size_t proc_skip_spaces(char **buf)
68941 {
68942 size_t ret;
68943 @@ -2218,6 +2288,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
68944 len = strlen(tmp);
68945 if (len > *size)
68946 len = *size;
68947 + if (len > sizeof(tmp))
68948 + len = sizeof(tmp);
68949 if (copy_to_user(*buf, tmp, len))
68950 return -EFAULT;
68951 *size -= len;
68952 @@ -2534,8 +2606,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
68953 *i = val;
68954 } else {
68955 val = convdiv * (*i) / convmul;
68956 - if (!first)
68957 + if (!first) {
68958 err = proc_put_char(&buffer, &left, '\t');
68959 + if (err)
68960 + break;
68961 + }
68962 err = proc_put_long(&buffer, &left, val, false);
68963 if (err)
68964 break;
68965 @@ -2930,6 +3005,12 @@ int proc_dostring(struct ctl_table *table, int write,
68966 return -ENOSYS;
68967 }
68968
68969 +int proc_dostring_modpriv(struct ctl_table *table, int write,
68970 + void __user *buffer, size_t *lenp, loff_t *ppos)
68971 +{
68972 + return -ENOSYS;
68973 +}
68974 +
68975 int proc_dointvec(struct ctl_table *table, int write,
68976 void __user *buffer, size_t *lenp, loff_t *ppos)
68977 {
68978 @@ -2986,6 +3067,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
68979 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
68980 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
68981 EXPORT_SYMBOL(proc_dostring);
68982 +EXPORT_SYMBOL(proc_dostring_modpriv);
68983 EXPORT_SYMBOL(proc_doulongvec_minmax);
68984 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
68985 EXPORT_SYMBOL(register_sysctl_table);
68986 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
68987 index e8bffbe..2344401 100644
68988 --- a/kernel/sysctl_binary.c
68989 +++ b/kernel/sysctl_binary.c
68990 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
68991 int i;
68992
68993 set_fs(KERNEL_DS);
68994 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68995 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68996 set_fs(old_fs);
68997 if (result < 0)
68998 goto out_kfree;
68999 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
69000 }
69001
69002 set_fs(KERNEL_DS);
69003 - result = vfs_write(file, buffer, str - buffer, &pos);
69004 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69005 set_fs(old_fs);
69006 if (result < 0)
69007 goto out_kfree;
69008 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
69009 int i;
69010
69011 set_fs(KERNEL_DS);
69012 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
69013 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
69014 set_fs(old_fs);
69015 if (result < 0)
69016 goto out_kfree;
69017 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
69018 }
69019
69020 set_fs(KERNEL_DS);
69021 - result = vfs_write(file, buffer, str - buffer, &pos);
69022 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69023 set_fs(old_fs);
69024 if (result < 0)
69025 goto out_kfree;
69026 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
69027 int i;
69028
69029 set_fs(KERNEL_DS);
69030 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69031 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69032 set_fs(old_fs);
69033 if (result < 0)
69034 goto out;
69035 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69036 __le16 dnaddr;
69037
69038 set_fs(KERNEL_DS);
69039 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69040 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69041 set_fs(old_fs);
69042 if (result < 0)
69043 goto out;
69044 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69045 le16_to_cpu(dnaddr) & 0x3ff);
69046
69047 set_fs(KERNEL_DS);
69048 - result = vfs_write(file, buf, len, &pos);
69049 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
69050 set_fs(old_fs);
69051 if (result < 0)
69052 goto out;
69053 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
69054 index 362da65..ab8ef8c 100644
69055 --- a/kernel/sysctl_check.c
69056 +++ b/kernel/sysctl_check.c
69057 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
69058 set_fail(&fail, table, "Directory with extra2");
69059 } else {
69060 if ((table->proc_handler == proc_dostring) ||
69061 + (table->proc_handler == proc_dostring_modpriv) ||
69062 (table->proc_handler == proc_dointvec) ||
69063 (table->proc_handler == proc_dointvec_minmax) ||
69064 (table->proc_handler == proc_dointvec_jiffies) ||
69065 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
69066 index e660464..c8b9e67 100644
69067 --- a/kernel/taskstats.c
69068 +++ b/kernel/taskstats.c
69069 @@ -27,9 +27,12 @@
69070 #include <linux/cgroup.h>
69071 #include <linux/fs.h>
69072 #include <linux/file.h>
69073 +#include <linux/grsecurity.h>
69074 #include <net/genetlink.h>
69075 #include <linux/atomic.h>
69076
69077 +extern int gr_is_taskstats_denied(int pid);
69078 +
69079 /*
69080 * Maximum length of a cpumask that can be specified in
69081 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
69082 @@ -556,6 +559,9 @@ err:
69083
69084 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
69085 {
69086 + if (gr_is_taskstats_denied(current->pid))
69087 + return -EACCES;
69088 +
69089 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
69090 return cmd_attr_register_cpumask(info);
69091 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
69092 diff --git a/kernel/time.c b/kernel/time.c
69093 index d776062..fa8d186 100644
69094 --- a/kernel/time.c
69095 +++ b/kernel/time.c
69096 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
69097 return error;
69098
69099 if (tz) {
69100 + /* we log in do_settimeofday called below, so don't log twice
69101 + */
69102 + if (!tv)
69103 + gr_log_timechange();
69104 +
69105 /* SMP safe, global irq locking makes it work. */
69106 sys_tz = *tz;
69107 update_vsyscall_tz();
69108 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
69109 index ea5e1a9..8b8df07 100644
69110 --- a/kernel/time/alarmtimer.c
69111 +++ b/kernel/time/alarmtimer.c
69112 @@ -693,7 +693,7 @@ static int __init alarmtimer_init(void)
69113 {
69114 int error = 0;
69115 int i;
69116 - struct k_clock alarm_clock = {
69117 + static struct k_clock alarm_clock = {
69118 .clock_getres = alarm_clock_getres,
69119 .clock_get = alarm_clock_get,
69120 .timer_create = alarm_timer_create,
69121 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
69122 index c7218d1..5f4ecc6 100644
69123 --- a/kernel/time/tick-broadcast.c
69124 +++ b/kernel/time/tick-broadcast.c
69125 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
69126 * then clear the broadcast bit.
69127 */
69128 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
69129 - int cpu = smp_processor_id();
69130 + cpu = smp_processor_id();
69131
69132 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
69133 tick_broadcast_clear_oneshot(cpu);
69134 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
69135 index 2b021b0e..b673a32 100644
69136 --- a/kernel/time/timekeeping.c
69137 +++ b/kernel/time/timekeeping.c
69138 @@ -14,6 +14,7 @@
69139 #include <linux/init.h>
69140 #include <linux/mm.h>
69141 #include <linux/sched.h>
69142 +#include <linux/grsecurity.h>
69143 #include <linux/syscore_ops.h>
69144 #include <linux/clocksource.h>
69145 #include <linux/jiffies.h>
69146 @@ -361,6 +362,8 @@ int do_settimeofday(const struct timespec *tv)
69147 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
69148 return -EINVAL;
69149
69150 + gr_log_timechange();
69151 +
69152 write_seqlock_irqsave(&xtime_lock, flags);
69153
69154 timekeeping_forward_now();
69155 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
69156 index 3258455..f35227d 100644
69157 --- a/kernel/time/timer_list.c
69158 +++ b/kernel/time/timer_list.c
69159 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
69160
69161 static void print_name_offset(struct seq_file *m, void *sym)
69162 {
69163 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69164 + SEQ_printf(m, "<%p>", NULL);
69165 +#else
69166 char symname[KSYM_NAME_LEN];
69167
69168 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
69169 SEQ_printf(m, "<%pK>", sym);
69170 else
69171 SEQ_printf(m, "%s", symname);
69172 +#endif
69173 }
69174
69175 static void
69176 @@ -112,7 +116,11 @@ next_one:
69177 static void
69178 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
69179 {
69180 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69181 + SEQ_printf(m, " .base: %p\n", NULL);
69182 +#else
69183 SEQ_printf(m, " .base: %pK\n", base);
69184 +#endif
69185 SEQ_printf(m, " .index: %d\n",
69186 base->index);
69187 SEQ_printf(m, " .resolution: %Lu nsecs\n",
69188 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
69189 {
69190 struct proc_dir_entry *pe;
69191
69192 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69193 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
69194 +#else
69195 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
69196 +#endif
69197 if (!pe)
69198 return -ENOMEM;
69199 return 0;
69200 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
69201 index a5d0a3a..60c7948 100644
69202 --- a/kernel/time/timer_stats.c
69203 +++ b/kernel/time/timer_stats.c
69204 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
69205 static unsigned long nr_entries;
69206 static struct entry entries[MAX_ENTRIES];
69207
69208 -static atomic_t overflow_count;
69209 +static atomic_unchecked_t overflow_count;
69210
69211 /*
69212 * The entries are in a hash-table, for fast lookup:
69213 @@ -140,7 +140,7 @@ static void reset_entries(void)
69214 nr_entries = 0;
69215 memset(entries, 0, sizeof(entries));
69216 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
69217 - atomic_set(&overflow_count, 0);
69218 + atomic_set_unchecked(&overflow_count, 0);
69219 }
69220
69221 static struct entry *alloc_entry(void)
69222 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
69223 if (likely(entry))
69224 entry->count++;
69225 else
69226 - atomic_inc(&overflow_count);
69227 + atomic_inc_unchecked(&overflow_count);
69228
69229 out_unlock:
69230 raw_spin_unlock_irqrestore(lock, flags);
69231 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
69232
69233 static void print_name_offset(struct seq_file *m, unsigned long addr)
69234 {
69235 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69236 + seq_printf(m, "<%p>", NULL);
69237 +#else
69238 char symname[KSYM_NAME_LEN];
69239
69240 if (lookup_symbol_name(addr, symname) < 0)
69241 seq_printf(m, "<%p>", (void *)addr);
69242 else
69243 seq_printf(m, "%s", symname);
69244 +#endif
69245 }
69246
69247 static int tstats_show(struct seq_file *m, void *v)
69248 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
69249
69250 seq_puts(m, "Timer Stats Version: v0.2\n");
69251 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
69252 - if (atomic_read(&overflow_count))
69253 + if (atomic_read_unchecked(&overflow_count))
69254 seq_printf(m, "Overflow: %d entries\n",
69255 - atomic_read(&overflow_count));
69256 + atomic_read_unchecked(&overflow_count));
69257
69258 for (i = 0; i < nr_entries; i++) {
69259 entry = entries + i;
69260 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
69261 {
69262 struct proc_dir_entry *pe;
69263
69264 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69265 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
69266 +#else
69267 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
69268 +#endif
69269 if (!pe)
69270 return -ENOMEM;
69271 return 0;
69272 diff --git a/kernel/timer.c b/kernel/timer.c
69273 index 8cff361..0fb5cd8 100644
69274 --- a/kernel/timer.c
69275 +++ b/kernel/timer.c
69276 @@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
69277 /*
69278 * This function runs timers and the timer-tq in bottom half context.
69279 */
69280 -static void run_timer_softirq(struct softirq_action *h)
69281 +static void run_timer_softirq(void)
69282 {
69283 struct tvec_base *base = __this_cpu_read(tvec_bases);
69284
69285 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
69286 index 7c910a5..8b72104 100644
69287 --- a/kernel/trace/blktrace.c
69288 +++ b/kernel/trace/blktrace.c
69289 @@ -323,7 +323,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
69290 struct blk_trace *bt = filp->private_data;
69291 char buf[16];
69292
69293 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
69294 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
69295
69296 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
69297 }
69298 @@ -388,7 +388,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
69299 return 1;
69300
69301 bt = buf->chan->private_data;
69302 - atomic_inc(&bt->dropped);
69303 + atomic_inc_unchecked(&bt->dropped);
69304 return 0;
69305 }
69306
69307 @@ -489,7 +489,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
69308
69309 bt->dir = dir;
69310 bt->dev = dev;
69311 - atomic_set(&bt->dropped, 0);
69312 + atomic_set_unchecked(&bt->dropped, 0);
69313
69314 ret = -EIO;
69315 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
69316 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
69317 index c3e4575..cd9c767 100644
69318 --- a/kernel/trace/ftrace.c
69319 +++ b/kernel/trace/ftrace.c
69320 @@ -1585,12 +1585,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
69321 if (unlikely(ftrace_disabled))
69322 return 0;
69323
69324 + ret = ftrace_arch_code_modify_prepare();
69325 + FTRACE_WARN_ON(ret);
69326 + if (ret)
69327 + return 0;
69328 +
69329 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
69330 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
69331 if (ret) {
69332 ftrace_bug(ret, ip);
69333 - return 0;
69334 }
69335 - return 1;
69336 + return ret ? 0 : 1;
69337 }
69338
69339 /*
69340 @@ -2607,7 +2612,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
69341
69342 int
69343 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
69344 - void *data)
69345 + void *data)
69346 {
69347 struct ftrace_func_probe *entry;
69348 struct ftrace_page *pg;
69349 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
69350 index 17a2d44..85907e2 100644
69351 --- a/kernel/trace/trace.c
69352 +++ b/kernel/trace/trace.c
69353 @@ -3451,6 +3451,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
69354 size_t rem;
69355 unsigned int i;
69356
69357 + pax_track_stack();
69358 +
69359 if (splice_grow_spd(pipe, &spd))
69360 return -ENOMEM;
69361
69362 @@ -3926,6 +3928,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
69363 int entries, size, i;
69364 size_t ret;
69365
69366 + pax_track_stack();
69367 +
69368 if (splice_grow_spd(pipe, &spd))
69369 return -ENOMEM;
69370
69371 @@ -4093,10 +4097,9 @@ static const struct file_operations tracing_dyn_info_fops = {
69372 };
69373 #endif
69374
69375 -static struct dentry *d_tracer;
69376 -
69377 struct dentry *tracing_init_dentry(void)
69378 {
69379 + static struct dentry *d_tracer;
69380 static int once;
69381
69382 if (d_tracer)
69383 @@ -4116,10 +4119,9 @@ struct dentry *tracing_init_dentry(void)
69384 return d_tracer;
69385 }
69386
69387 -static struct dentry *d_percpu;
69388 -
69389 struct dentry *tracing_dentry_percpu(void)
69390 {
69391 + static struct dentry *d_percpu;
69392 static int once;
69393 struct dentry *d_tracer;
69394
69395 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
69396 index 581876f..a91e569 100644
69397 --- a/kernel/trace/trace_events.c
69398 +++ b/kernel/trace/trace_events.c
69399 @@ -1300,10 +1300,6 @@ static LIST_HEAD(ftrace_module_file_list);
69400 struct ftrace_module_file_ops {
69401 struct list_head list;
69402 struct module *mod;
69403 - struct file_operations id;
69404 - struct file_operations enable;
69405 - struct file_operations format;
69406 - struct file_operations filter;
69407 };
69408
69409 static struct ftrace_module_file_ops *
69410 @@ -1324,17 +1320,12 @@ trace_create_file_ops(struct module *mod)
69411
69412 file_ops->mod = mod;
69413
69414 - file_ops->id = ftrace_event_id_fops;
69415 - file_ops->id.owner = mod;
69416 -
69417 - file_ops->enable = ftrace_enable_fops;
69418 - file_ops->enable.owner = mod;
69419 -
69420 - file_ops->filter = ftrace_event_filter_fops;
69421 - file_ops->filter.owner = mod;
69422 -
69423 - file_ops->format = ftrace_event_format_fops;
69424 - file_ops->format.owner = mod;
69425 + pax_open_kernel();
69426 + *(void **)&mod->trace_id.owner = mod;
69427 + *(void **)&mod->trace_enable.owner = mod;
69428 + *(void **)&mod->trace_filter.owner = mod;
69429 + *(void **)&mod->trace_format.owner = mod;
69430 + pax_close_kernel();
69431
69432 list_add(&file_ops->list, &ftrace_module_file_list);
69433
69434 @@ -1358,8 +1349,8 @@ static void trace_module_add_events(struct module *mod)
69435
69436 for_each_event(call, start, end) {
69437 __trace_add_event_call(*call, mod,
69438 - &file_ops->id, &file_ops->enable,
69439 - &file_ops->filter, &file_ops->format);
69440 + &mod->trace_id, &mod->trace_enable,
69441 + &mod->trace_filter, &mod->trace_format);
69442 }
69443 }
69444
69445 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
69446 index 00d527c..7c5b1a3 100644
69447 --- a/kernel/trace/trace_kprobe.c
69448 +++ b/kernel/trace/trace_kprobe.c
69449 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69450 long ret;
69451 int maxlen = get_rloc_len(*(u32 *)dest);
69452 u8 *dst = get_rloc_data(dest);
69453 - u8 *src = addr;
69454 + const u8 __user *src = (const u8 __force_user *)addr;
69455 mm_segment_t old_fs = get_fs();
69456 if (!maxlen)
69457 return;
69458 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69459 pagefault_disable();
69460 do
69461 ret = __copy_from_user_inatomic(dst++, src++, 1);
69462 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
69463 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
69464 dst[-1] = '\0';
69465 pagefault_enable();
69466 set_fs(old_fs);
69467 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69468 ((u8 *)get_rloc_data(dest))[0] = '\0';
69469 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
69470 } else
69471 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
69472 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
69473 get_rloc_offs(*(u32 *)dest));
69474 }
69475 /* Return the length of string -- including null terminal byte */
69476 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
69477 set_fs(KERNEL_DS);
69478 pagefault_disable();
69479 do {
69480 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
69481 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
69482 len++;
69483 } while (c && ret == 0 && len < MAX_STRING_SIZE);
69484 pagefault_enable();
69485 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
69486 index fd3c8aa..5f324a6 100644
69487 --- a/kernel/trace/trace_mmiotrace.c
69488 +++ b/kernel/trace/trace_mmiotrace.c
69489 @@ -24,7 +24,7 @@ struct header_iter {
69490 static struct trace_array *mmio_trace_array;
69491 static bool overrun_detected;
69492 static unsigned long prev_overruns;
69493 -static atomic_t dropped_count;
69494 +static atomic_unchecked_t dropped_count;
69495
69496 static void mmio_reset_data(struct trace_array *tr)
69497 {
69498 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
69499
69500 static unsigned long count_overruns(struct trace_iterator *iter)
69501 {
69502 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
69503 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
69504 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
69505
69506 if (over > prev_overruns)
69507 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
69508 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
69509 sizeof(*entry), 0, pc);
69510 if (!event) {
69511 - atomic_inc(&dropped_count);
69512 + atomic_inc_unchecked(&dropped_count);
69513 return;
69514 }
69515 entry = ring_buffer_event_data(event);
69516 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
69517 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
69518 sizeof(*entry), 0, pc);
69519 if (!event) {
69520 - atomic_inc(&dropped_count);
69521 + atomic_inc_unchecked(&dropped_count);
69522 return;
69523 }
69524 entry = ring_buffer_event_data(event);
69525 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
69526 index 5199930..26c73a0 100644
69527 --- a/kernel/trace/trace_output.c
69528 +++ b/kernel/trace/trace_output.c
69529 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
69530
69531 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
69532 if (!IS_ERR(p)) {
69533 - p = mangle_path(s->buffer + s->len, p, "\n");
69534 + p = mangle_path(s->buffer + s->len, p, "\n\\");
69535 if (p) {
69536 s->len = p - s->buffer;
69537 return 1;
69538 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
69539 index 77575b3..6e623d1 100644
69540 --- a/kernel/trace/trace_stack.c
69541 +++ b/kernel/trace/trace_stack.c
69542 @@ -50,7 +50,7 @@ static inline void check_stack(void)
69543 return;
69544
69545 /* we do not handle interrupt stacks yet */
69546 - if (!object_is_on_stack(&this_size))
69547 + if (!object_starts_on_stack(&this_size))
69548 return;
69549
69550 local_irq_save(flags);
69551 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
69552 index 209b379..7f76423 100644
69553 --- a/kernel/trace/trace_workqueue.c
69554 +++ b/kernel/trace/trace_workqueue.c
69555 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
69556 int cpu;
69557 pid_t pid;
69558 /* Can be inserted from interrupt or user context, need to be atomic */
69559 - atomic_t inserted;
69560 + atomic_unchecked_t inserted;
69561 /*
69562 * Don't need to be atomic, works are serialized in a single workqueue thread
69563 * on a single CPU.
69564 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
69565 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
69566 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
69567 if (node->pid == wq_thread->pid) {
69568 - atomic_inc(&node->inserted);
69569 + atomic_inc_unchecked(&node->inserted);
69570 goto found;
69571 }
69572 }
69573 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
69574 tsk = get_pid_task(pid, PIDTYPE_PID);
69575 if (tsk) {
69576 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
69577 - atomic_read(&cws->inserted), cws->executed,
69578 + atomic_read_unchecked(&cws->inserted), cws->executed,
69579 tsk->comm);
69580 put_task_struct(tsk);
69581 }
69582 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
69583 index c0cb9c4..f33aa89 100644
69584 --- a/lib/Kconfig.debug
69585 +++ b/lib/Kconfig.debug
69586 @@ -1091,6 +1091,7 @@ config LATENCYTOP
69587 depends on DEBUG_KERNEL
69588 depends on STACKTRACE_SUPPORT
69589 depends on PROC_FS
69590 + depends on !GRKERNSEC_HIDESYM
69591 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
69592 select KALLSYMS
69593 select KALLSYMS_ALL
69594 diff --git a/lib/bitmap.c b/lib/bitmap.c
69595 index 2f4412e..a557e27 100644
69596 --- a/lib/bitmap.c
69597 +++ b/lib/bitmap.c
69598 @@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
69599 {
69600 int c, old_c, totaldigits, ndigits, nchunks, nbits;
69601 u32 chunk;
69602 - const char __user *ubuf = buf;
69603 + const char __user *ubuf = (const char __force_user *)buf;
69604
69605 bitmap_zero(maskp, nmaskbits);
69606
69607 @@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
69608 {
69609 if (!access_ok(VERIFY_READ, ubuf, ulen))
69610 return -EFAULT;
69611 - return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
69612 + return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
69613 }
69614 EXPORT_SYMBOL(bitmap_parse_user);
69615
69616 @@ -594,7 +594,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
69617 {
69618 unsigned a, b;
69619 int c, old_c, totaldigits;
69620 - const char __user *ubuf = buf;
69621 + const char __user *ubuf = (const char __force_user *)buf;
69622 int exp_digit, in_range;
69623
69624 totaldigits = c = 0;
69625 @@ -694,7 +694,7 @@ int bitmap_parselist_user(const char __user *ubuf,
69626 {
69627 if (!access_ok(VERIFY_READ, ubuf, ulen))
69628 return -EFAULT;
69629 - return __bitmap_parselist((const char *)ubuf,
69630 + return __bitmap_parselist((const char __force_kernel *)ubuf,
69631 ulen, 1, maskp, nmaskbits);
69632 }
69633 EXPORT_SYMBOL(bitmap_parselist_user);
69634 diff --git a/lib/bug.c b/lib/bug.c
69635 index 1955209..cbbb2ad 100644
69636 --- a/lib/bug.c
69637 +++ b/lib/bug.c
69638 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
69639 return BUG_TRAP_TYPE_NONE;
69640
69641 bug = find_bug(bugaddr);
69642 + if (!bug)
69643 + return BUG_TRAP_TYPE_NONE;
69644
69645 file = NULL;
69646 line = 0;
69647 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
69648 index a78b7c6..2c73084 100644
69649 --- a/lib/debugobjects.c
69650 +++ b/lib/debugobjects.c
69651 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
69652 if (limit > 4)
69653 return;
69654
69655 - is_on_stack = object_is_on_stack(addr);
69656 + is_on_stack = object_starts_on_stack(addr);
69657 if (is_on_stack == onstack)
69658 return;
69659
69660 diff --git a/lib/devres.c b/lib/devres.c
69661 index 7c0e953..f642b5c 100644
69662 --- a/lib/devres.c
69663 +++ b/lib/devres.c
69664 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
69665 void devm_iounmap(struct device *dev, void __iomem *addr)
69666 {
69667 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
69668 - (void *)addr));
69669 + (void __force *)addr));
69670 iounmap(addr);
69671 }
69672 EXPORT_SYMBOL(devm_iounmap);
69673 @@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
69674 {
69675 ioport_unmap(addr);
69676 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
69677 - devm_ioport_map_match, (void *)addr));
69678 + devm_ioport_map_match, (void __force *)addr));
69679 }
69680 EXPORT_SYMBOL(devm_ioport_unmap);
69681
69682 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
69683 index db07bfd..719b5ab 100644
69684 --- a/lib/dma-debug.c
69685 +++ b/lib/dma-debug.c
69686 @@ -870,7 +870,7 @@ out:
69687
69688 static void check_for_stack(struct device *dev, void *addr)
69689 {
69690 - if (object_is_on_stack(addr))
69691 + if (object_starts_on_stack(addr))
69692 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
69693 "stack [addr=%p]\n", addr);
69694 }
69695 diff --git a/lib/extable.c b/lib/extable.c
69696 index 4cac81e..63e9b8f 100644
69697 --- a/lib/extable.c
69698 +++ b/lib/extable.c
69699 @@ -13,6 +13,7 @@
69700 #include <linux/init.h>
69701 #include <linux/sort.h>
69702 #include <asm/uaccess.h>
69703 +#include <asm/pgtable.h>
69704
69705 #ifndef ARCH_HAS_SORT_EXTABLE
69706 /*
69707 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
69708 void sort_extable(struct exception_table_entry *start,
69709 struct exception_table_entry *finish)
69710 {
69711 + pax_open_kernel();
69712 sort(start, finish - start, sizeof(struct exception_table_entry),
69713 cmp_ex, NULL);
69714 + pax_close_kernel();
69715 }
69716
69717 #ifdef CONFIG_MODULES
69718 diff --git a/lib/inflate.c b/lib/inflate.c
69719 index 013a761..c28f3fc 100644
69720 --- a/lib/inflate.c
69721 +++ b/lib/inflate.c
69722 @@ -269,7 +269,7 @@ static void free(void *where)
69723 malloc_ptr = free_mem_ptr;
69724 }
69725 #else
69726 -#define malloc(a) kmalloc(a, GFP_KERNEL)
69727 +#define malloc(a) kmalloc((a), GFP_KERNEL)
69728 #define free(a) kfree(a)
69729 #endif
69730
69731 diff --git a/lib/kref.c b/lib/kref.c
69732 index 3efb882..8492f4c 100644
69733 --- a/lib/kref.c
69734 +++ b/lib/kref.c
69735 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
69736 */
69737 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
69738 {
69739 - WARN_ON(release == NULL);
69740 + BUG_ON(release == NULL);
69741 WARN_ON(release == (void (*)(struct kref *))kfree);
69742
69743 if (atomic_dec_and_test(&kref->refcount)) {
69744 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
69745 index a2f9da5..3bcadb6 100644
69746 --- a/lib/radix-tree.c
69747 +++ b/lib/radix-tree.c
69748 @@ -80,7 +80,7 @@ struct radix_tree_preload {
69749 int nr;
69750 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
69751 };
69752 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
69753 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
69754
69755 static inline void *ptr_to_indirect(void *ptr)
69756 {
69757 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
69758 index d7222a9..2172edc 100644
69759 --- a/lib/vsprintf.c
69760 +++ b/lib/vsprintf.c
69761 @@ -16,6 +16,9 @@
69762 * - scnprintf and vscnprintf
69763 */
69764
69765 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69766 +#define __INCLUDED_BY_HIDESYM 1
69767 +#endif
69768 #include <stdarg.h>
69769 #include <linux/module.h>
69770 #include <linux/types.h>
69771 @@ -432,7 +435,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
69772 char sym[KSYM_SYMBOL_LEN];
69773 if (ext == 'B')
69774 sprint_backtrace(sym, value);
69775 - else if (ext != 'f' && ext != 's')
69776 + else if (ext != 'f' && ext != 's' && ext != 'a')
69777 sprint_symbol(sym, value);
69778 else
69779 kallsyms_lookup(value, NULL, NULL, NULL, sym);
69780 @@ -796,7 +799,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
69781 return string(buf, end, uuid, spec);
69782 }
69783
69784 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69785 +int kptr_restrict __read_mostly = 2;
69786 +#else
69787 int kptr_restrict __read_mostly;
69788 +#endif
69789
69790 /*
69791 * Show a '%p' thing. A kernel extension is that the '%p' is followed
69792 @@ -810,6 +817,8 @@ int kptr_restrict __read_mostly;
69793 * - 'S' For symbolic direct pointers with offset
69794 * - 's' For symbolic direct pointers without offset
69795 * - 'B' For backtraced symbolic direct pointers with offset
69796 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
69797 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
69798 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
69799 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
69800 * - 'M' For a 6-byte MAC address, it prints the address in the
69801 @@ -854,12 +863,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69802 {
69803 if (!ptr && *fmt != 'K') {
69804 /*
69805 - * Print (null) with the same width as a pointer so it makes
69806 + * Print (nil) with the same width as a pointer so it makes
69807 * tabular output look nice.
69808 */
69809 if (spec.field_width == -1)
69810 spec.field_width = 2 * sizeof(void *);
69811 - return string(buf, end, "(null)", spec);
69812 + return string(buf, end, "(nil)", spec);
69813 }
69814
69815 switch (*fmt) {
69816 @@ -869,6 +878,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69817 /* Fallthrough */
69818 case 'S':
69819 case 's':
69820 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69821 + break;
69822 +#else
69823 + return symbol_string(buf, end, ptr, spec, *fmt);
69824 +#endif
69825 + case 'A':
69826 + case 'a':
69827 case 'B':
69828 return symbol_string(buf, end, ptr, spec, *fmt);
69829 case 'R':
69830 @@ -1627,11 +1643,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69831 typeof(type) value; \
69832 if (sizeof(type) == 8) { \
69833 args = PTR_ALIGN(args, sizeof(u32)); \
69834 - *(u32 *)&value = *(u32 *)args; \
69835 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
69836 + *(u32 *)&value = *(const u32 *)args; \
69837 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
69838 } else { \
69839 args = PTR_ALIGN(args, sizeof(type)); \
69840 - value = *(typeof(type) *)args; \
69841 + value = *(const typeof(type) *)args; \
69842 } \
69843 args += sizeof(type); \
69844 value; \
69845 @@ -1694,7 +1710,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69846 case FORMAT_TYPE_STR: {
69847 const char *str_arg = args;
69848 args += strlen(str_arg) + 1;
69849 - str = string(str, end, (char *)str_arg, spec);
69850 + str = string(str, end, str_arg, spec);
69851 break;
69852 }
69853
69854 diff --git a/localversion-grsec b/localversion-grsec
69855 new file mode 100644
69856 index 0000000..7cd6065
69857 --- /dev/null
69858 +++ b/localversion-grsec
69859 @@ -0,0 +1 @@
69860 +-grsec
69861 diff --git a/mm/Kconfig b/mm/Kconfig
69862 index f2f1ca1..0645f06 100644
69863 --- a/mm/Kconfig
69864 +++ b/mm/Kconfig
69865 @@ -238,10 +238,10 @@ config KSM
69866 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
69867
69868 config DEFAULT_MMAP_MIN_ADDR
69869 - int "Low address space to protect from user allocation"
69870 + int "Low address space to protect from user allocation"
69871 depends on MMU
69872 - default 4096
69873 - help
69874 + default 65536
69875 + help
69876 This is the portion of low virtual memory which should be protected
69877 from userspace allocation. Keeping a user from writing to low pages
69878 can help reduce the impact of kernel NULL pointer bugs.
69879 diff --git a/mm/filemap.c b/mm/filemap.c
69880 index 7771871..91bcdb4 100644
69881 --- a/mm/filemap.c
69882 +++ b/mm/filemap.c
69883 @@ -1784,7 +1784,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
69884 struct address_space *mapping = file->f_mapping;
69885
69886 if (!mapping->a_ops->readpage)
69887 - return -ENOEXEC;
69888 + return -ENODEV;
69889 file_accessed(file);
69890 vma->vm_ops = &generic_file_vm_ops;
69891 vma->vm_flags |= VM_CAN_NONLINEAR;
69892 @@ -2190,6 +2190,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
69893 *pos = i_size_read(inode);
69894
69895 if (limit != RLIM_INFINITY) {
69896 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
69897 if (*pos >= limit) {
69898 send_sig(SIGXFSZ, current, 0);
69899 return -EFBIG;
69900 diff --git a/mm/fremap.c b/mm/fremap.c
69901 index b8e0e2d..076e171 100644
69902 --- a/mm/fremap.c
69903 +++ b/mm/fremap.c
69904 @@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
69905 retry:
69906 vma = find_vma(mm, start);
69907
69908 +#ifdef CONFIG_PAX_SEGMEXEC
69909 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
69910 + goto out;
69911 +#endif
69912 +
69913 /*
69914 * Make sure the vma is shared, that it supports prefaulting,
69915 * and that the remapped range is valid and fully within
69916 diff --git a/mm/highmem.c b/mm/highmem.c
69917 index 5ef672c..d7660f4 100644
69918 --- a/mm/highmem.c
69919 +++ b/mm/highmem.c
69920 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
69921 * So no dangers, even with speculative execution.
69922 */
69923 page = pte_page(pkmap_page_table[i]);
69924 + pax_open_kernel();
69925 pte_clear(&init_mm, (unsigned long)page_address(page),
69926 &pkmap_page_table[i]);
69927 -
69928 + pax_close_kernel();
69929 set_page_address(page, NULL);
69930 need_flush = 1;
69931 }
69932 @@ -186,9 +187,11 @@ start:
69933 }
69934 }
69935 vaddr = PKMAP_ADDR(last_pkmap_nr);
69936 +
69937 + pax_open_kernel();
69938 set_pte_at(&init_mm, vaddr,
69939 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
69940 -
69941 + pax_close_kernel();
69942 pkmap_count[last_pkmap_nr] = 1;
69943 set_page_address(page, (void *)vaddr);
69944
69945 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
69946 index d819d93..468e18f 100644
69947 --- a/mm/huge_memory.c
69948 +++ b/mm/huge_memory.c
69949 @@ -702,7 +702,7 @@ out:
69950 * run pte_offset_map on the pmd, if an huge pmd could
69951 * materialize from under us from a different thread.
69952 */
69953 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
69954 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
69955 return VM_FAULT_OOM;
69956 /* if an huge pmd materialized from under us just retry later */
69957 if (unlikely(pmd_trans_huge(*pmd)))
69958 @@ -829,7 +829,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
69959
69960 for (i = 0; i < HPAGE_PMD_NR; i++) {
69961 copy_user_highpage(pages[i], page + i,
69962 - haddr + PAGE_SHIFT*i, vma);
69963 + haddr + PAGE_SIZE*i, vma);
69964 __SetPageUptodate(pages[i]);
69965 cond_resched();
69966 }
69967 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
69968 index dae27ba..e8d42be 100644
69969 --- a/mm/hugetlb.c
69970 +++ b/mm/hugetlb.c
69971 @@ -2346,6 +2346,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
69972 return 1;
69973 }
69974
69975 +#ifdef CONFIG_PAX_SEGMEXEC
69976 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
69977 +{
69978 + struct mm_struct *mm = vma->vm_mm;
69979 + struct vm_area_struct *vma_m;
69980 + unsigned long address_m;
69981 + pte_t *ptep_m;
69982 +
69983 + vma_m = pax_find_mirror_vma(vma);
69984 + if (!vma_m)
69985 + return;
69986 +
69987 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69988 + address_m = address + SEGMEXEC_TASK_SIZE;
69989 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
69990 + get_page(page_m);
69991 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
69992 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
69993 +}
69994 +#endif
69995 +
69996 /*
69997 * Hugetlb_cow() should be called with page lock of the original hugepage held.
69998 */
69999 @@ -2447,6 +2468,11 @@ retry_avoidcopy:
70000 make_huge_pte(vma, new_page, 1));
70001 page_remove_rmap(old_page);
70002 hugepage_add_new_anon_rmap(new_page, vma, address);
70003 +
70004 +#ifdef CONFIG_PAX_SEGMEXEC
70005 + pax_mirror_huge_pte(vma, address, new_page);
70006 +#endif
70007 +
70008 /* Make the old page be freed below */
70009 new_page = old_page;
70010 mmu_notifier_invalidate_range_end(mm,
70011 @@ -2598,6 +2624,10 @@ retry:
70012 && (vma->vm_flags & VM_SHARED)));
70013 set_huge_pte_at(mm, address, ptep, new_pte);
70014
70015 +#ifdef CONFIG_PAX_SEGMEXEC
70016 + pax_mirror_huge_pte(vma, address, page);
70017 +#endif
70018 +
70019 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
70020 /* Optimization, do the COW without a second fault */
70021 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
70022 @@ -2627,6 +2657,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70023 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
70024 struct hstate *h = hstate_vma(vma);
70025
70026 +#ifdef CONFIG_PAX_SEGMEXEC
70027 + struct vm_area_struct *vma_m;
70028 +#endif
70029 +
70030 ptep = huge_pte_offset(mm, address);
70031 if (ptep) {
70032 entry = huge_ptep_get(ptep);
70033 @@ -2638,6 +2672,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70034 VM_FAULT_SET_HINDEX(h - hstates);
70035 }
70036
70037 +#ifdef CONFIG_PAX_SEGMEXEC
70038 + vma_m = pax_find_mirror_vma(vma);
70039 + if (vma_m) {
70040 + unsigned long address_m;
70041 +
70042 + if (vma->vm_start > vma_m->vm_start) {
70043 + address_m = address;
70044 + address -= SEGMEXEC_TASK_SIZE;
70045 + vma = vma_m;
70046 + h = hstate_vma(vma);
70047 + } else
70048 + address_m = address + SEGMEXEC_TASK_SIZE;
70049 +
70050 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
70051 + return VM_FAULT_OOM;
70052 + address_m &= HPAGE_MASK;
70053 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
70054 + }
70055 +#endif
70056 +
70057 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
70058 if (!ptep)
70059 return VM_FAULT_OOM;
70060 diff --git a/mm/internal.h b/mm/internal.h
70061 index 2189af4..f2ca332 100644
70062 --- a/mm/internal.h
70063 +++ b/mm/internal.h
70064 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
70065 * in mm/page_alloc.c
70066 */
70067 extern void __free_pages_bootmem(struct page *page, unsigned int order);
70068 +extern void free_compound_page(struct page *page);
70069 extern void prep_compound_page(struct page *page, unsigned long order);
70070 #ifdef CONFIG_MEMORY_FAILURE
70071 extern bool is_free_buddy_page(struct page *page);
70072 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
70073 index d6880f5..ed77913 100644
70074 --- a/mm/kmemleak.c
70075 +++ b/mm/kmemleak.c
70076 @@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
70077
70078 for (i = 0; i < object->trace_len; i++) {
70079 void *ptr = (void *)object->trace[i];
70080 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
70081 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
70082 }
70083 }
70084
70085 diff --git a/mm/maccess.c b/mm/maccess.c
70086 index 4cee182..e00511d 100644
70087 --- a/mm/maccess.c
70088 +++ b/mm/maccess.c
70089 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
70090 set_fs(KERNEL_DS);
70091 pagefault_disable();
70092 ret = __copy_from_user_inatomic(dst,
70093 - (__force const void __user *)src, size);
70094 + (const void __force_user *)src, size);
70095 pagefault_enable();
70096 set_fs(old_fs);
70097
70098 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
70099
70100 set_fs(KERNEL_DS);
70101 pagefault_disable();
70102 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
70103 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
70104 pagefault_enable();
70105 set_fs(old_fs);
70106
70107 diff --git a/mm/madvise.c b/mm/madvise.c
70108 index 74bf193..feb6fd3 100644
70109 --- a/mm/madvise.c
70110 +++ b/mm/madvise.c
70111 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
70112 pgoff_t pgoff;
70113 unsigned long new_flags = vma->vm_flags;
70114
70115 +#ifdef CONFIG_PAX_SEGMEXEC
70116 + struct vm_area_struct *vma_m;
70117 +#endif
70118 +
70119 switch (behavior) {
70120 case MADV_NORMAL:
70121 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
70122 @@ -110,6 +114,13 @@ success:
70123 /*
70124 * vm_flags is protected by the mmap_sem held in write mode.
70125 */
70126 +
70127 +#ifdef CONFIG_PAX_SEGMEXEC
70128 + vma_m = pax_find_mirror_vma(vma);
70129 + if (vma_m)
70130 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
70131 +#endif
70132 +
70133 vma->vm_flags = new_flags;
70134
70135 out:
70136 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
70137 struct vm_area_struct ** prev,
70138 unsigned long start, unsigned long end)
70139 {
70140 +
70141 +#ifdef CONFIG_PAX_SEGMEXEC
70142 + struct vm_area_struct *vma_m;
70143 +#endif
70144 +
70145 *prev = vma;
70146 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
70147 return -EINVAL;
70148 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
70149 zap_page_range(vma, start, end - start, &details);
70150 } else
70151 zap_page_range(vma, start, end - start, NULL);
70152 +
70153 +#ifdef CONFIG_PAX_SEGMEXEC
70154 + vma_m = pax_find_mirror_vma(vma);
70155 + if (vma_m) {
70156 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
70157 + struct zap_details details = {
70158 + .nonlinear_vma = vma_m,
70159 + .last_index = ULONG_MAX,
70160 + };
70161 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
70162 + } else
70163 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
70164 + }
70165 +#endif
70166 +
70167 return 0;
70168 }
70169
70170 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
70171 if (end < start)
70172 goto out;
70173
70174 +#ifdef CONFIG_PAX_SEGMEXEC
70175 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70176 + if (end > SEGMEXEC_TASK_SIZE)
70177 + goto out;
70178 + } else
70179 +#endif
70180 +
70181 + if (end > TASK_SIZE)
70182 + goto out;
70183 +
70184 error = 0;
70185 if (end == start)
70186 goto out;
70187 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
70188 index 2b43ba0..fc09657 100644
70189 --- a/mm/memory-failure.c
70190 +++ b/mm/memory-failure.c
70191 @@ -60,7 +60,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
70192
70193 int sysctl_memory_failure_recovery __read_mostly = 1;
70194
70195 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70196 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70197
70198 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
70199
70200 @@ -201,7 +201,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
70201 si.si_signo = SIGBUS;
70202 si.si_errno = 0;
70203 si.si_code = BUS_MCEERR_AO;
70204 - si.si_addr = (void *)addr;
70205 + si.si_addr = (void __user *)addr;
70206 #ifdef __ARCH_SI_TRAPNO
70207 si.si_trapno = trapno;
70208 #endif
70209 @@ -1009,7 +1009,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
70210 }
70211
70212 nr_pages = 1 << compound_trans_order(hpage);
70213 - atomic_long_add(nr_pages, &mce_bad_pages);
70214 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
70215
70216 /*
70217 * We need/can do nothing about count=0 pages.
70218 @@ -1039,7 +1039,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
70219 if (!PageHWPoison(hpage)
70220 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
70221 || (p != hpage && TestSetPageHWPoison(hpage))) {
70222 - atomic_long_sub(nr_pages, &mce_bad_pages);
70223 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70224 return 0;
70225 }
70226 set_page_hwpoison_huge_page(hpage);
70227 @@ -1097,7 +1097,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
70228 }
70229 if (hwpoison_filter(p)) {
70230 if (TestClearPageHWPoison(p))
70231 - atomic_long_sub(nr_pages, &mce_bad_pages);
70232 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70233 unlock_page(hpage);
70234 put_page(hpage);
70235 return 0;
70236 @@ -1314,7 +1314,7 @@ int unpoison_memory(unsigned long pfn)
70237 return 0;
70238 }
70239 if (TestClearPageHWPoison(p))
70240 - atomic_long_sub(nr_pages, &mce_bad_pages);
70241 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70242 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
70243 return 0;
70244 }
70245 @@ -1328,7 +1328,7 @@ int unpoison_memory(unsigned long pfn)
70246 */
70247 if (TestClearPageHWPoison(page)) {
70248 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
70249 - atomic_long_sub(nr_pages, &mce_bad_pages);
70250 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70251 freeit = 1;
70252 if (PageHuge(page))
70253 clear_page_hwpoison_huge_page(page);
70254 @@ -1441,7 +1441,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
70255 }
70256 done:
70257 if (!PageHWPoison(hpage))
70258 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
70259 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
70260 set_page_hwpoison_huge_page(hpage);
70261 dequeue_hwpoisoned_huge_page(hpage);
70262 /* keep elevated page count for bad page */
70263 @@ -1572,7 +1572,7 @@ int soft_offline_page(struct page *page, int flags)
70264 return ret;
70265
70266 done:
70267 - atomic_long_add(1, &mce_bad_pages);
70268 + atomic_long_add_unchecked(1, &mce_bad_pages);
70269 SetPageHWPoison(page);
70270 /* keep elevated page count for bad page */
70271 return ret;
70272 diff --git a/mm/memory.c b/mm/memory.c
70273 index b2b8731..6080174 100644
70274 --- a/mm/memory.c
70275 +++ b/mm/memory.c
70276 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
70277 return;
70278
70279 pmd = pmd_offset(pud, start);
70280 +
70281 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
70282 pud_clear(pud);
70283 pmd_free_tlb(tlb, pmd, start);
70284 +#endif
70285 +
70286 }
70287
70288 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
70289 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
70290 if (end - 1 > ceiling - 1)
70291 return;
70292
70293 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
70294 pud = pud_offset(pgd, start);
70295 pgd_clear(pgd);
70296 pud_free_tlb(tlb, pud, start);
70297 +#endif
70298 +
70299 }
70300
70301 /*
70302 @@ -1566,12 +1573,6 @@ no_page_table:
70303 return page;
70304 }
70305
70306 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
70307 -{
70308 - return stack_guard_page_start(vma, addr) ||
70309 - stack_guard_page_end(vma, addr+PAGE_SIZE);
70310 -}
70311 -
70312 /**
70313 * __get_user_pages() - pin user pages in memory
70314 * @tsk: task_struct of target task
70315 @@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70316 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
70317 i = 0;
70318
70319 - do {
70320 + while (nr_pages) {
70321 struct vm_area_struct *vma;
70322
70323 - vma = find_extend_vma(mm, start);
70324 + vma = find_vma(mm, start);
70325 if (!vma && in_gate_area(mm, start)) {
70326 unsigned long pg = start & PAGE_MASK;
70327 pgd_t *pgd;
70328 @@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70329 goto next_page;
70330 }
70331
70332 - if (!vma ||
70333 + if (!vma || start < vma->vm_start ||
70334 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
70335 !(vm_flags & vma->vm_flags))
70336 return i ? : -EFAULT;
70337 @@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70338 int ret;
70339 unsigned int fault_flags = 0;
70340
70341 - /* For mlock, just skip the stack guard page. */
70342 - if (foll_flags & FOLL_MLOCK) {
70343 - if (stack_guard_page(vma, start))
70344 - goto next_page;
70345 - }
70346 if (foll_flags & FOLL_WRITE)
70347 fault_flags |= FAULT_FLAG_WRITE;
70348 if (nonblocking)
70349 @@ -1800,7 +1796,7 @@ next_page:
70350 start += PAGE_SIZE;
70351 nr_pages--;
70352 } while (nr_pages && start < vma->vm_end);
70353 - } while (nr_pages);
70354 + }
70355 return i;
70356 }
70357 EXPORT_SYMBOL(__get_user_pages);
70358 @@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
70359 page_add_file_rmap(page);
70360 set_pte_at(mm, addr, pte, mk_pte(page, prot));
70361
70362 +#ifdef CONFIG_PAX_SEGMEXEC
70363 + pax_mirror_file_pte(vma, addr, page, ptl);
70364 +#endif
70365 +
70366 retval = 0;
70367 pte_unmap_unlock(pte, ptl);
70368 return retval;
70369 @@ -2041,10 +2041,22 @@ out:
70370 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
70371 struct page *page)
70372 {
70373 +
70374 +#ifdef CONFIG_PAX_SEGMEXEC
70375 + struct vm_area_struct *vma_m;
70376 +#endif
70377 +
70378 if (addr < vma->vm_start || addr >= vma->vm_end)
70379 return -EFAULT;
70380 if (!page_count(page))
70381 return -EINVAL;
70382 +
70383 +#ifdef CONFIG_PAX_SEGMEXEC
70384 + vma_m = pax_find_mirror_vma(vma);
70385 + if (vma_m)
70386 + vma_m->vm_flags |= VM_INSERTPAGE;
70387 +#endif
70388 +
70389 vma->vm_flags |= VM_INSERTPAGE;
70390 return insert_page(vma, addr, page, vma->vm_page_prot);
70391 }
70392 @@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
70393 unsigned long pfn)
70394 {
70395 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
70396 + BUG_ON(vma->vm_mirror);
70397
70398 if (addr < vma->vm_start || addr >= vma->vm_end)
70399 return -EFAULT;
70400 @@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
70401 copy_user_highpage(dst, src, va, vma);
70402 }
70403
70404 +#ifdef CONFIG_PAX_SEGMEXEC
70405 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
70406 +{
70407 + struct mm_struct *mm = vma->vm_mm;
70408 + spinlock_t *ptl;
70409 + pte_t *pte, entry;
70410 +
70411 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
70412 + entry = *pte;
70413 + if (!pte_present(entry)) {
70414 + if (!pte_none(entry)) {
70415 + BUG_ON(pte_file(entry));
70416 + free_swap_and_cache(pte_to_swp_entry(entry));
70417 + pte_clear_not_present_full(mm, address, pte, 0);
70418 + }
70419 + } else {
70420 + struct page *page;
70421 +
70422 + flush_cache_page(vma, address, pte_pfn(entry));
70423 + entry = ptep_clear_flush(vma, address, pte);
70424 + BUG_ON(pte_dirty(entry));
70425 + page = vm_normal_page(vma, address, entry);
70426 + if (page) {
70427 + update_hiwater_rss(mm);
70428 + if (PageAnon(page))
70429 + dec_mm_counter_fast(mm, MM_ANONPAGES);
70430 + else
70431 + dec_mm_counter_fast(mm, MM_FILEPAGES);
70432 + page_remove_rmap(page);
70433 + page_cache_release(page);
70434 + }
70435 + }
70436 + pte_unmap_unlock(pte, ptl);
70437 +}
70438 +
70439 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
70440 + *
70441 + * the ptl of the lower mapped page is held on entry and is not released on exit
70442 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
70443 + */
70444 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70445 +{
70446 + struct mm_struct *mm = vma->vm_mm;
70447 + unsigned long address_m;
70448 + spinlock_t *ptl_m;
70449 + struct vm_area_struct *vma_m;
70450 + pmd_t *pmd_m;
70451 + pte_t *pte_m, entry_m;
70452 +
70453 + BUG_ON(!page_m || !PageAnon(page_m));
70454 +
70455 + vma_m = pax_find_mirror_vma(vma);
70456 + if (!vma_m)
70457 + return;
70458 +
70459 + BUG_ON(!PageLocked(page_m));
70460 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70461 + address_m = address + SEGMEXEC_TASK_SIZE;
70462 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70463 + pte_m = pte_offset_map(pmd_m, address_m);
70464 + ptl_m = pte_lockptr(mm, pmd_m);
70465 + if (ptl != ptl_m) {
70466 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70467 + if (!pte_none(*pte_m))
70468 + goto out;
70469 + }
70470 +
70471 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70472 + page_cache_get(page_m);
70473 + page_add_anon_rmap(page_m, vma_m, address_m);
70474 + inc_mm_counter_fast(mm, MM_ANONPAGES);
70475 + set_pte_at(mm, address_m, pte_m, entry_m);
70476 + update_mmu_cache(vma_m, address_m, entry_m);
70477 +out:
70478 + if (ptl != ptl_m)
70479 + spin_unlock(ptl_m);
70480 + pte_unmap(pte_m);
70481 + unlock_page(page_m);
70482 +}
70483 +
70484 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70485 +{
70486 + struct mm_struct *mm = vma->vm_mm;
70487 + unsigned long address_m;
70488 + spinlock_t *ptl_m;
70489 + struct vm_area_struct *vma_m;
70490 + pmd_t *pmd_m;
70491 + pte_t *pte_m, entry_m;
70492 +
70493 + BUG_ON(!page_m || PageAnon(page_m));
70494 +
70495 + vma_m = pax_find_mirror_vma(vma);
70496 + if (!vma_m)
70497 + return;
70498 +
70499 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70500 + address_m = address + SEGMEXEC_TASK_SIZE;
70501 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70502 + pte_m = pte_offset_map(pmd_m, address_m);
70503 + ptl_m = pte_lockptr(mm, pmd_m);
70504 + if (ptl != ptl_m) {
70505 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70506 + if (!pte_none(*pte_m))
70507 + goto out;
70508 + }
70509 +
70510 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70511 + page_cache_get(page_m);
70512 + page_add_file_rmap(page_m);
70513 + inc_mm_counter_fast(mm, MM_FILEPAGES);
70514 + set_pte_at(mm, address_m, pte_m, entry_m);
70515 + update_mmu_cache(vma_m, address_m, entry_m);
70516 +out:
70517 + if (ptl != ptl_m)
70518 + spin_unlock(ptl_m);
70519 + pte_unmap(pte_m);
70520 +}
70521 +
70522 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
70523 +{
70524 + struct mm_struct *mm = vma->vm_mm;
70525 + unsigned long address_m;
70526 + spinlock_t *ptl_m;
70527 + struct vm_area_struct *vma_m;
70528 + pmd_t *pmd_m;
70529 + pte_t *pte_m, entry_m;
70530 +
70531 + vma_m = pax_find_mirror_vma(vma);
70532 + if (!vma_m)
70533 + return;
70534 +
70535 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70536 + address_m = address + SEGMEXEC_TASK_SIZE;
70537 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70538 + pte_m = pte_offset_map(pmd_m, address_m);
70539 + ptl_m = pte_lockptr(mm, pmd_m);
70540 + if (ptl != ptl_m) {
70541 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70542 + if (!pte_none(*pte_m))
70543 + goto out;
70544 + }
70545 +
70546 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
70547 + set_pte_at(mm, address_m, pte_m, entry_m);
70548 +out:
70549 + if (ptl != ptl_m)
70550 + spin_unlock(ptl_m);
70551 + pte_unmap(pte_m);
70552 +}
70553 +
70554 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
70555 +{
70556 + struct page *page_m;
70557 + pte_t entry;
70558 +
70559 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
70560 + goto out;
70561 +
70562 + entry = *pte;
70563 + page_m = vm_normal_page(vma, address, entry);
70564 + if (!page_m)
70565 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
70566 + else if (PageAnon(page_m)) {
70567 + if (pax_find_mirror_vma(vma)) {
70568 + pte_unmap_unlock(pte, ptl);
70569 + lock_page(page_m);
70570 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
70571 + if (pte_same(entry, *pte))
70572 + pax_mirror_anon_pte(vma, address, page_m, ptl);
70573 + else
70574 + unlock_page(page_m);
70575 + }
70576 + } else
70577 + pax_mirror_file_pte(vma, address, page_m, ptl);
70578 +
70579 +out:
70580 + pte_unmap_unlock(pte, ptl);
70581 +}
70582 +#endif
70583 +
70584 /*
70585 * This routine handles present pages, when users try to write
70586 * to a shared page. It is done by copying the page to a new address
70587 @@ -2656,6 +2849,12 @@ gotten:
70588 */
70589 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70590 if (likely(pte_same(*page_table, orig_pte))) {
70591 +
70592 +#ifdef CONFIG_PAX_SEGMEXEC
70593 + if (pax_find_mirror_vma(vma))
70594 + BUG_ON(!trylock_page(new_page));
70595 +#endif
70596 +
70597 if (old_page) {
70598 if (!PageAnon(old_page)) {
70599 dec_mm_counter_fast(mm, MM_FILEPAGES);
70600 @@ -2707,6 +2906,10 @@ gotten:
70601 page_remove_rmap(old_page);
70602 }
70603
70604 +#ifdef CONFIG_PAX_SEGMEXEC
70605 + pax_mirror_anon_pte(vma, address, new_page, ptl);
70606 +#endif
70607 +
70608 /* Free the old page.. */
70609 new_page = old_page;
70610 ret |= VM_FAULT_WRITE;
70611 @@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70612 swap_free(entry);
70613 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
70614 try_to_free_swap(page);
70615 +
70616 +#ifdef CONFIG_PAX_SEGMEXEC
70617 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
70618 +#endif
70619 +
70620 unlock_page(page);
70621 if (swapcache) {
70622 /*
70623 @@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70624
70625 /* No need to invalidate - it was non-present before */
70626 update_mmu_cache(vma, address, page_table);
70627 +
70628 +#ifdef CONFIG_PAX_SEGMEXEC
70629 + pax_mirror_anon_pte(vma, address, page, ptl);
70630 +#endif
70631 +
70632 unlock:
70633 pte_unmap_unlock(page_table, ptl);
70634 out:
70635 @@ -3028,40 +3241,6 @@ out_release:
70636 }
70637
70638 /*
70639 - * This is like a special single-page "expand_{down|up}wards()",
70640 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
70641 - * doesn't hit another vma.
70642 - */
70643 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
70644 -{
70645 - address &= PAGE_MASK;
70646 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
70647 - struct vm_area_struct *prev = vma->vm_prev;
70648 -
70649 - /*
70650 - * Is there a mapping abutting this one below?
70651 - *
70652 - * That's only ok if it's the same stack mapping
70653 - * that has gotten split..
70654 - */
70655 - if (prev && prev->vm_end == address)
70656 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
70657 -
70658 - expand_downwards(vma, address - PAGE_SIZE);
70659 - }
70660 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
70661 - struct vm_area_struct *next = vma->vm_next;
70662 -
70663 - /* As VM_GROWSDOWN but s/below/above/ */
70664 - if (next && next->vm_start == address + PAGE_SIZE)
70665 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
70666 -
70667 - expand_upwards(vma, address + PAGE_SIZE);
70668 - }
70669 - return 0;
70670 -}
70671 -
70672 -/*
70673 * We enter with non-exclusive mmap_sem (to exclude vma changes,
70674 * but allow concurrent faults), and pte mapped but not yet locked.
70675 * We return with mmap_sem still held, but pte unmapped and unlocked.
70676 @@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70677 unsigned long address, pte_t *page_table, pmd_t *pmd,
70678 unsigned int flags)
70679 {
70680 - struct page *page;
70681 + struct page *page = NULL;
70682 spinlock_t *ptl;
70683 pte_t entry;
70684
70685 - pte_unmap(page_table);
70686 -
70687 - /* Check if we need to add a guard page to the stack */
70688 - if (check_stack_guard_page(vma, address) < 0)
70689 - return VM_FAULT_SIGBUS;
70690 -
70691 - /* Use the zero-page for reads */
70692 if (!(flags & FAULT_FLAG_WRITE)) {
70693 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
70694 vma->vm_page_prot));
70695 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70696 + ptl = pte_lockptr(mm, pmd);
70697 + spin_lock(ptl);
70698 if (!pte_none(*page_table))
70699 goto unlock;
70700 goto setpte;
70701 }
70702
70703 /* Allocate our own private page. */
70704 + pte_unmap(page_table);
70705 +
70706 if (unlikely(anon_vma_prepare(vma)))
70707 goto oom;
70708 page = alloc_zeroed_user_highpage_movable(vma, address);
70709 @@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70710 if (!pte_none(*page_table))
70711 goto release;
70712
70713 +#ifdef CONFIG_PAX_SEGMEXEC
70714 + if (pax_find_mirror_vma(vma))
70715 + BUG_ON(!trylock_page(page));
70716 +#endif
70717 +
70718 inc_mm_counter_fast(mm, MM_ANONPAGES);
70719 page_add_new_anon_rmap(page, vma, address);
70720 setpte:
70721 @@ -3116,6 +3296,12 @@ setpte:
70722
70723 /* No need to invalidate - it was non-present before */
70724 update_mmu_cache(vma, address, page_table);
70725 +
70726 +#ifdef CONFIG_PAX_SEGMEXEC
70727 + if (page)
70728 + pax_mirror_anon_pte(vma, address, page, ptl);
70729 +#endif
70730 +
70731 unlock:
70732 pte_unmap_unlock(page_table, ptl);
70733 return 0;
70734 @@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70735 */
70736 /* Only go through if we didn't race with anybody else... */
70737 if (likely(pte_same(*page_table, orig_pte))) {
70738 +
70739 +#ifdef CONFIG_PAX_SEGMEXEC
70740 + if (anon && pax_find_mirror_vma(vma))
70741 + BUG_ON(!trylock_page(page));
70742 +#endif
70743 +
70744 flush_icache_page(vma, page);
70745 entry = mk_pte(page, vma->vm_page_prot);
70746 if (flags & FAULT_FLAG_WRITE)
70747 @@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70748
70749 /* no need to invalidate: a not-present page won't be cached */
70750 update_mmu_cache(vma, address, page_table);
70751 +
70752 +#ifdef CONFIG_PAX_SEGMEXEC
70753 + if (anon)
70754 + pax_mirror_anon_pte(vma, address, page, ptl);
70755 + else
70756 + pax_mirror_file_pte(vma, address, page, ptl);
70757 +#endif
70758 +
70759 } else {
70760 if (cow_page)
70761 mem_cgroup_uncharge_page(cow_page);
70762 @@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm,
70763 if (flags & FAULT_FLAG_WRITE)
70764 flush_tlb_fix_spurious_fault(vma, address);
70765 }
70766 +
70767 +#ifdef CONFIG_PAX_SEGMEXEC
70768 + pax_mirror_pte(vma, address, pte, pmd, ptl);
70769 + return 0;
70770 +#endif
70771 +
70772 unlock:
70773 pte_unmap_unlock(pte, ptl);
70774 return 0;
70775 @@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70776 pmd_t *pmd;
70777 pte_t *pte;
70778
70779 +#ifdef CONFIG_PAX_SEGMEXEC
70780 + struct vm_area_struct *vma_m;
70781 +#endif
70782 +
70783 __set_current_state(TASK_RUNNING);
70784
70785 count_vm_event(PGFAULT);
70786 @@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70787 if (unlikely(is_vm_hugetlb_page(vma)))
70788 return hugetlb_fault(mm, vma, address, flags);
70789
70790 +#ifdef CONFIG_PAX_SEGMEXEC
70791 + vma_m = pax_find_mirror_vma(vma);
70792 + if (vma_m) {
70793 + unsigned long address_m;
70794 + pgd_t *pgd_m;
70795 + pud_t *pud_m;
70796 + pmd_t *pmd_m;
70797 +
70798 + if (vma->vm_start > vma_m->vm_start) {
70799 + address_m = address;
70800 + address -= SEGMEXEC_TASK_SIZE;
70801 + vma = vma_m;
70802 + } else
70803 + address_m = address + SEGMEXEC_TASK_SIZE;
70804 +
70805 + pgd_m = pgd_offset(mm, address_m);
70806 + pud_m = pud_alloc(mm, pgd_m, address_m);
70807 + if (!pud_m)
70808 + return VM_FAULT_OOM;
70809 + pmd_m = pmd_alloc(mm, pud_m, address_m);
70810 + if (!pmd_m)
70811 + return VM_FAULT_OOM;
70812 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
70813 + return VM_FAULT_OOM;
70814 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
70815 + }
70816 +#endif
70817 +
70818 pgd = pgd_offset(mm, address);
70819 pud = pud_alloc(mm, pgd, address);
70820 if (!pud)
70821 @@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70822 * run pte_offset_map on the pmd, if an huge pmd could
70823 * materialize from under us from a different thread.
70824 */
70825 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
70826 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
70827 return VM_FAULT_OOM;
70828 /* if an huge pmd materialized from under us just retry later */
70829 if (unlikely(pmd_trans_huge(*pmd)))
70830 @@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
70831 gate_vma.vm_start = FIXADDR_USER_START;
70832 gate_vma.vm_end = FIXADDR_USER_END;
70833 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
70834 - gate_vma.vm_page_prot = __P101;
70835 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
70836 /*
70837 * Make sure the vDSO gets into every core dump.
70838 * Dumping its contents makes post-mortem fully interpretable later
70839 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
70840 index 9c51f9f..a9416cf 100644
70841 --- a/mm/mempolicy.c
70842 +++ b/mm/mempolicy.c
70843 @@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
70844 unsigned long vmstart;
70845 unsigned long vmend;
70846
70847 +#ifdef CONFIG_PAX_SEGMEXEC
70848 + struct vm_area_struct *vma_m;
70849 +#endif
70850 +
70851 vma = find_vma_prev(mm, start, &prev);
70852 if (!vma || vma->vm_start > start)
70853 return -EFAULT;
70854 @@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
70855 err = policy_vma(vma, new_pol);
70856 if (err)
70857 goto out;
70858 +
70859 +#ifdef CONFIG_PAX_SEGMEXEC
70860 + vma_m = pax_find_mirror_vma(vma);
70861 + if (vma_m) {
70862 + err = policy_vma(vma_m, new_pol);
70863 + if (err)
70864 + goto out;
70865 + }
70866 +#endif
70867 +
70868 }
70869
70870 out:
70871 @@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start, unsigned long len,
70872
70873 if (end < start)
70874 return -EINVAL;
70875 +
70876 +#ifdef CONFIG_PAX_SEGMEXEC
70877 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
70878 + if (end > SEGMEXEC_TASK_SIZE)
70879 + return -EINVAL;
70880 + } else
70881 +#endif
70882 +
70883 + if (end > TASK_SIZE)
70884 + return -EINVAL;
70885 +
70886 if (end == start)
70887 return 0;
70888
70889 @@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70890 if (!mm)
70891 goto out;
70892
70893 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70894 + if (mm != current->mm &&
70895 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
70896 + err = -EPERM;
70897 + goto out;
70898 + }
70899 +#endif
70900 +
70901 /*
70902 * Check if this process has the right to modify the specified
70903 * process. The right exists if the process has administrative
70904 @@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70905 rcu_read_lock();
70906 tcred = __task_cred(task);
70907 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
70908 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
70909 - !capable(CAP_SYS_NICE)) {
70910 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
70911 rcu_read_unlock();
70912 err = -EPERM;
70913 goto out;
70914 diff --git a/mm/migrate.c b/mm/migrate.c
70915 index 14d0a6a..81ffe69 100644
70916 --- a/mm/migrate.c
70917 +++ b/mm/migrate.c
70918 @@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
70919 unsigned long chunk_start;
70920 int err;
70921
70922 + pax_track_stack();
70923 +
70924 task_nodes = cpuset_mems_allowed(task);
70925
70926 err = -ENOMEM;
70927 @@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
70928 if (!mm)
70929 return -EINVAL;
70930
70931 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70932 + if (mm != current->mm &&
70933 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
70934 + err = -EPERM;
70935 + goto out;
70936 + }
70937 +#endif
70938 +
70939 /*
70940 * Check if this process has the right to modify the specified
70941 * process. The right exists if the process has administrative
70942 @@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
70943 rcu_read_lock();
70944 tcred = __task_cred(task);
70945 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
70946 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
70947 - !capable(CAP_SYS_NICE)) {
70948 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
70949 rcu_read_unlock();
70950 err = -EPERM;
70951 goto out;
70952 diff --git a/mm/mlock.c b/mm/mlock.c
70953 index 048260c..57f4a4e 100644
70954 --- a/mm/mlock.c
70955 +++ b/mm/mlock.c
70956 @@ -13,6 +13,7 @@
70957 #include <linux/pagemap.h>
70958 #include <linux/mempolicy.h>
70959 #include <linux/syscalls.h>
70960 +#include <linux/security.h>
70961 #include <linux/sched.h>
70962 #include <linux/module.h>
70963 #include <linux/rmap.h>
70964 @@ -377,6 +378,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
70965 return -EINVAL;
70966 if (end == start)
70967 return 0;
70968 + if (end > TASK_SIZE)
70969 + return -EINVAL;
70970 +
70971 vma = find_vma_prev(current->mm, start, &prev);
70972 if (!vma || vma->vm_start > start)
70973 return -ENOMEM;
70974 @@ -387,6 +391,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
70975 for (nstart = start ; ; ) {
70976 vm_flags_t newflags;
70977
70978 +#ifdef CONFIG_PAX_SEGMEXEC
70979 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70980 + break;
70981 +#endif
70982 +
70983 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
70984
70985 newflags = vma->vm_flags | VM_LOCKED;
70986 @@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
70987 lock_limit >>= PAGE_SHIFT;
70988
70989 /* check against resource limits */
70990 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
70991 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
70992 error = do_mlock(start, len, 1);
70993 up_write(&current->mm->mmap_sem);
70994 @@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
70995 static int do_mlockall(int flags)
70996 {
70997 struct vm_area_struct * vma, * prev = NULL;
70998 - unsigned int def_flags = 0;
70999
71000 if (flags & MCL_FUTURE)
71001 - def_flags = VM_LOCKED;
71002 - current->mm->def_flags = def_flags;
71003 + current->mm->def_flags |= VM_LOCKED;
71004 + else
71005 + current->mm->def_flags &= ~VM_LOCKED;
71006 if (flags == MCL_FUTURE)
71007 goto out;
71008
71009 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
71010 vm_flags_t newflags;
71011
71012 +#ifdef CONFIG_PAX_SEGMEXEC
71013 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
71014 + break;
71015 +#endif
71016 +
71017 + BUG_ON(vma->vm_end > TASK_SIZE);
71018 newflags = vma->vm_flags | VM_LOCKED;
71019 if (!(flags & MCL_CURRENT))
71020 newflags &= ~VM_LOCKED;
71021 @@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
71022 lock_limit >>= PAGE_SHIFT;
71023
71024 ret = -ENOMEM;
71025 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
71026 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
71027 capable(CAP_IPC_LOCK))
71028 ret = do_mlockall(flags);
71029 diff --git a/mm/mmap.c b/mm/mmap.c
71030 index a65efd4..17d61ff 100644
71031 --- a/mm/mmap.c
71032 +++ b/mm/mmap.c
71033 @@ -46,6 +46,16 @@
71034 #define arch_rebalance_pgtables(addr, len) (addr)
71035 #endif
71036
71037 +static inline void verify_mm_writelocked(struct mm_struct *mm)
71038 +{
71039 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
71040 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71041 + up_read(&mm->mmap_sem);
71042 + BUG();
71043 + }
71044 +#endif
71045 +}
71046 +
71047 static void unmap_region(struct mm_struct *mm,
71048 struct vm_area_struct *vma, struct vm_area_struct *prev,
71049 unsigned long start, unsigned long end);
71050 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
71051 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
71052 *
71053 */
71054 -pgprot_t protection_map[16] = {
71055 +pgprot_t protection_map[16] __read_only = {
71056 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
71057 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
71058 };
71059
71060 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
71061 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
71062 {
71063 - return __pgprot(pgprot_val(protection_map[vm_flags &
71064 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
71065 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
71066 pgprot_val(arch_vm_get_page_prot(vm_flags)));
71067 +
71068 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71069 + if (!(__supported_pte_mask & _PAGE_NX) &&
71070 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
71071 + (vm_flags & (VM_READ | VM_WRITE)))
71072 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
71073 +#endif
71074 +
71075 + return prot;
71076 }
71077 EXPORT_SYMBOL(vm_get_page_prot);
71078
71079 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
71080 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
71081 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
71082 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
71083 /*
71084 * Make sure vm_committed_as in one cacheline and not cacheline shared with
71085 * other variables. It can be updated by several CPUs frequently.
71086 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
71087 struct vm_area_struct *next = vma->vm_next;
71088
71089 might_sleep();
71090 + BUG_ON(vma->vm_mirror);
71091 if (vma->vm_ops && vma->vm_ops->close)
71092 vma->vm_ops->close(vma);
71093 if (vma->vm_file) {
71094 @@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
71095 * not page aligned -Ram Gupta
71096 */
71097 rlim = rlimit(RLIMIT_DATA);
71098 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
71099 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
71100 (mm->end_data - mm->start_data) > rlim)
71101 goto out;
71102 @@ -689,6 +711,12 @@ static int
71103 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
71104 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71105 {
71106 +
71107 +#ifdef CONFIG_PAX_SEGMEXEC
71108 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
71109 + return 0;
71110 +#endif
71111 +
71112 if (is_mergeable_vma(vma, file, vm_flags) &&
71113 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
71114 if (vma->vm_pgoff == vm_pgoff)
71115 @@ -708,6 +736,12 @@ static int
71116 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
71117 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71118 {
71119 +
71120 +#ifdef CONFIG_PAX_SEGMEXEC
71121 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
71122 + return 0;
71123 +#endif
71124 +
71125 if (is_mergeable_vma(vma, file, vm_flags) &&
71126 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
71127 pgoff_t vm_pglen;
71128 @@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
71129 struct vm_area_struct *vma_merge(struct mm_struct *mm,
71130 struct vm_area_struct *prev, unsigned long addr,
71131 unsigned long end, unsigned long vm_flags,
71132 - struct anon_vma *anon_vma, struct file *file,
71133 + struct anon_vma *anon_vma, struct file *file,
71134 pgoff_t pgoff, struct mempolicy *policy)
71135 {
71136 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
71137 struct vm_area_struct *area, *next;
71138 int err;
71139
71140 +#ifdef CONFIG_PAX_SEGMEXEC
71141 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
71142 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
71143 +
71144 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
71145 +#endif
71146 +
71147 /*
71148 * We later require that vma->vm_flags == vm_flags,
71149 * so this tests vma->vm_flags & VM_SPECIAL, too.
71150 @@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71151 if (next && next->vm_end == end) /* cases 6, 7, 8 */
71152 next = next->vm_next;
71153
71154 +#ifdef CONFIG_PAX_SEGMEXEC
71155 + if (prev)
71156 + prev_m = pax_find_mirror_vma(prev);
71157 + if (area)
71158 + area_m = pax_find_mirror_vma(area);
71159 + if (next)
71160 + next_m = pax_find_mirror_vma(next);
71161 +#endif
71162 +
71163 /*
71164 * Can it merge with the predecessor?
71165 */
71166 @@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71167 /* cases 1, 6 */
71168 err = vma_adjust(prev, prev->vm_start,
71169 next->vm_end, prev->vm_pgoff, NULL);
71170 - } else /* cases 2, 5, 7 */
71171 +
71172 +#ifdef CONFIG_PAX_SEGMEXEC
71173 + if (!err && prev_m)
71174 + err = vma_adjust(prev_m, prev_m->vm_start,
71175 + next_m->vm_end, prev_m->vm_pgoff, NULL);
71176 +#endif
71177 +
71178 + } else { /* cases 2, 5, 7 */
71179 err = vma_adjust(prev, prev->vm_start,
71180 end, prev->vm_pgoff, NULL);
71181 +
71182 +#ifdef CONFIG_PAX_SEGMEXEC
71183 + if (!err && prev_m)
71184 + err = vma_adjust(prev_m, prev_m->vm_start,
71185 + end_m, prev_m->vm_pgoff, NULL);
71186 +#endif
71187 +
71188 + }
71189 if (err)
71190 return NULL;
71191 khugepaged_enter_vma_merge(prev);
71192 @@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71193 mpol_equal(policy, vma_policy(next)) &&
71194 can_vma_merge_before(next, vm_flags,
71195 anon_vma, file, pgoff+pglen)) {
71196 - if (prev && addr < prev->vm_end) /* case 4 */
71197 + if (prev && addr < prev->vm_end) { /* case 4 */
71198 err = vma_adjust(prev, prev->vm_start,
71199 addr, prev->vm_pgoff, NULL);
71200 - else /* cases 3, 8 */
71201 +
71202 +#ifdef CONFIG_PAX_SEGMEXEC
71203 + if (!err && prev_m)
71204 + err = vma_adjust(prev_m, prev_m->vm_start,
71205 + addr_m, prev_m->vm_pgoff, NULL);
71206 +#endif
71207 +
71208 + } else { /* cases 3, 8 */
71209 err = vma_adjust(area, addr, next->vm_end,
71210 next->vm_pgoff - pglen, NULL);
71211 +
71212 +#ifdef CONFIG_PAX_SEGMEXEC
71213 + if (!err && area_m)
71214 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
71215 + next_m->vm_pgoff - pglen, NULL);
71216 +#endif
71217 +
71218 + }
71219 if (err)
71220 return NULL;
71221 khugepaged_enter_vma_merge(area);
71222 @@ -921,14 +1001,11 @@ none:
71223 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
71224 struct file *file, long pages)
71225 {
71226 - const unsigned long stack_flags
71227 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
71228 -
71229 if (file) {
71230 mm->shared_vm += pages;
71231 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
71232 mm->exec_vm += pages;
71233 - } else if (flags & stack_flags)
71234 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
71235 mm->stack_vm += pages;
71236 if (flags & (VM_RESERVED|VM_IO))
71237 mm->reserved_vm += pages;
71238 @@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71239 * (the exception is when the underlying filesystem is noexec
71240 * mounted, in which case we dont add PROT_EXEC.)
71241 */
71242 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
71243 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
71244 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
71245 prot |= PROT_EXEC;
71246
71247 @@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71248 /* Obtain the address to map to. we verify (or select) it and ensure
71249 * that it represents a valid section of the address space.
71250 */
71251 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
71252 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
71253 if (addr & ~PAGE_MASK)
71254 return addr;
71255
71256 @@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71257 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
71258 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
71259
71260 +#ifdef CONFIG_PAX_MPROTECT
71261 + if (mm->pax_flags & MF_PAX_MPROTECT) {
71262 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
71263 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
71264 + gr_log_rwxmmap(file);
71265 +
71266 +#ifdef CONFIG_PAX_EMUPLT
71267 + vm_flags &= ~VM_EXEC;
71268 +#else
71269 + return -EPERM;
71270 +#endif
71271 +
71272 + }
71273 +
71274 + if (!(vm_flags & VM_EXEC))
71275 + vm_flags &= ~VM_MAYEXEC;
71276 +#else
71277 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
71278 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
71279 +#endif
71280 + else
71281 + vm_flags &= ~VM_MAYWRITE;
71282 + }
71283 +#endif
71284 +
71285 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71286 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
71287 + vm_flags &= ~VM_PAGEEXEC;
71288 +#endif
71289 +
71290 if (flags & MAP_LOCKED)
71291 if (!can_do_mlock())
71292 return -EPERM;
71293 @@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71294 locked += mm->locked_vm;
71295 lock_limit = rlimit(RLIMIT_MEMLOCK);
71296 lock_limit >>= PAGE_SHIFT;
71297 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71298 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
71299 return -EAGAIN;
71300 }
71301 @@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71302 if (error)
71303 return error;
71304
71305 + if (!gr_acl_handle_mmap(file, prot))
71306 + return -EACCES;
71307 +
71308 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
71309 }
71310 EXPORT_SYMBOL(do_mmap_pgoff);
71311 @@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
71312 vm_flags_t vm_flags = vma->vm_flags;
71313
71314 /* If it was private or non-writable, the write bit is already clear */
71315 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
71316 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
71317 return 0;
71318
71319 /* The backer wishes to know when pages are first written to? */
71320 @@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
71321 unsigned long charged = 0;
71322 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
71323
71324 +#ifdef CONFIG_PAX_SEGMEXEC
71325 + struct vm_area_struct *vma_m = NULL;
71326 +#endif
71327 +
71328 + /*
71329 + * mm->mmap_sem is required to protect against another thread
71330 + * changing the mappings in case we sleep.
71331 + */
71332 + verify_mm_writelocked(mm);
71333 +
71334 /* Clear old maps */
71335 error = -ENOMEM;
71336 -munmap_back:
71337 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71338 if (vma && vma->vm_start < addr + len) {
71339 if (do_munmap(mm, addr, len))
71340 return -ENOMEM;
71341 - goto munmap_back;
71342 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71343 + BUG_ON(vma && vma->vm_start < addr + len);
71344 }
71345
71346 /* Check against address space limit. */
71347 @@ -1258,6 +1379,16 @@ munmap_back:
71348 goto unacct_error;
71349 }
71350
71351 +#ifdef CONFIG_PAX_SEGMEXEC
71352 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
71353 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71354 + if (!vma_m) {
71355 + error = -ENOMEM;
71356 + goto free_vma;
71357 + }
71358 + }
71359 +#endif
71360 +
71361 vma->vm_mm = mm;
71362 vma->vm_start = addr;
71363 vma->vm_end = addr + len;
71364 @@ -1281,6 +1412,19 @@ munmap_back:
71365 error = file->f_op->mmap(file, vma);
71366 if (error)
71367 goto unmap_and_free_vma;
71368 +
71369 +#ifdef CONFIG_PAX_SEGMEXEC
71370 + if (vma_m && (vm_flags & VM_EXECUTABLE))
71371 + added_exe_file_vma(mm);
71372 +#endif
71373 +
71374 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71375 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
71376 + vma->vm_flags |= VM_PAGEEXEC;
71377 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71378 + }
71379 +#endif
71380 +
71381 if (vm_flags & VM_EXECUTABLE)
71382 added_exe_file_vma(mm);
71383
71384 @@ -1316,6 +1460,11 @@ munmap_back:
71385 vma_link(mm, vma, prev, rb_link, rb_parent);
71386 file = vma->vm_file;
71387
71388 +#ifdef CONFIG_PAX_SEGMEXEC
71389 + if (vma_m)
71390 + BUG_ON(pax_mirror_vma(vma_m, vma));
71391 +#endif
71392 +
71393 /* Once vma denies write, undo our temporary denial count */
71394 if (correct_wcount)
71395 atomic_inc(&inode->i_writecount);
71396 @@ -1324,6 +1473,7 @@ out:
71397
71398 mm->total_vm += len >> PAGE_SHIFT;
71399 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
71400 + track_exec_limit(mm, addr, addr + len, vm_flags);
71401 if (vm_flags & VM_LOCKED) {
71402 if (!mlock_vma_pages_range(vma, addr, addr + len))
71403 mm->locked_vm += (len >> PAGE_SHIFT);
71404 @@ -1341,6 +1491,12 @@ unmap_and_free_vma:
71405 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
71406 charged = 0;
71407 free_vma:
71408 +
71409 +#ifdef CONFIG_PAX_SEGMEXEC
71410 + if (vma_m)
71411 + kmem_cache_free(vm_area_cachep, vma_m);
71412 +#endif
71413 +
71414 kmem_cache_free(vm_area_cachep, vma);
71415 unacct_error:
71416 if (charged)
71417 @@ -1348,6 +1504,44 @@ unacct_error:
71418 return error;
71419 }
71420
71421 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
71422 +{
71423 + if (!vma) {
71424 +#ifdef CONFIG_STACK_GROWSUP
71425 + if (addr > sysctl_heap_stack_gap)
71426 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
71427 + else
71428 + vma = find_vma(current->mm, 0);
71429 + if (vma && (vma->vm_flags & VM_GROWSUP))
71430 + return false;
71431 +#endif
71432 + return true;
71433 + }
71434 +
71435 + if (addr + len > vma->vm_start)
71436 + return false;
71437 +
71438 + if (vma->vm_flags & VM_GROWSDOWN)
71439 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
71440 +#ifdef CONFIG_STACK_GROWSUP
71441 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
71442 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
71443 +#endif
71444 +
71445 + return true;
71446 +}
71447 +
71448 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
71449 +{
71450 + if (vma->vm_start < len)
71451 + return -ENOMEM;
71452 + if (!(vma->vm_flags & VM_GROWSDOWN))
71453 + return vma->vm_start - len;
71454 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
71455 + return vma->vm_start - len - sysctl_heap_stack_gap;
71456 + return -ENOMEM;
71457 +}
71458 +
71459 /* Get an address range which is currently unmapped.
71460 * For shmat() with addr=0.
71461 *
71462 @@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
71463 if (flags & MAP_FIXED)
71464 return addr;
71465
71466 +#ifdef CONFIG_PAX_RANDMMAP
71467 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71468 +#endif
71469 +
71470 if (addr) {
71471 addr = PAGE_ALIGN(addr);
71472 - vma = find_vma(mm, addr);
71473 - if (TASK_SIZE - len >= addr &&
71474 - (!vma || addr + len <= vma->vm_start))
71475 - return addr;
71476 + if (TASK_SIZE - len >= addr) {
71477 + vma = find_vma(mm, addr);
71478 + if (check_heap_stack_gap(vma, addr, len))
71479 + return addr;
71480 + }
71481 }
71482 if (len > mm->cached_hole_size) {
71483 - start_addr = addr = mm->free_area_cache;
71484 + start_addr = addr = mm->free_area_cache;
71485 } else {
71486 - start_addr = addr = TASK_UNMAPPED_BASE;
71487 - mm->cached_hole_size = 0;
71488 + start_addr = addr = mm->mmap_base;
71489 + mm->cached_hole_size = 0;
71490 }
71491
71492 full_search:
71493 @@ -1396,34 +1595,40 @@ full_search:
71494 * Start a new search - just in case we missed
71495 * some holes.
71496 */
71497 - if (start_addr != TASK_UNMAPPED_BASE) {
71498 - addr = TASK_UNMAPPED_BASE;
71499 - start_addr = addr;
71500 + if (start_addr != mm->mmap_base) {
71501 + start_addr = addr = mm->mmap_base;
71502 mm->cached_hole_size = 0;
71503 goto full_search;
71504 }
71505 return -ENOMEM;
71506 }
71507 - if (!vma || addr + len <= vma->vm_start) {
71508 - /*
71509 - * Remember the place where we stopped the search:
71510 - */
71511 - mm->free_area_cache = addr + len;
71512 - return addr;
71513 - }
71514 + if (check_heap_stack_gap(vma, addr, len))
71515 + break;
71516 if (addr + mm->cached_hole_size < vma->vm_start)
71517 mm->cached_hole_size = vma->vm_start - addr;
71518 addr = vma->vm_end;
71519 }
71520 +
71521 + /*
71522 + * Remember the place where we stopped the search:
71523 + */
71524 + mm->free_area_cache = addr + len;
71525 + return addr;
71526 }
71527 #endif
71528
71529 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
71530 {
71531 +
71532 +#ifdef CONFIG_PAX_SEGMEXEC
71533 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71534 + return;
71535 +#endif
71536 +
71537 /*
71538 * Is this a new hole at the lowest possible address?
71539 */
71540 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
71541 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
71542 mm->free_area_cache = addr;
71543 mm->cached_hole_size = ~0UL;
71544 }
71545 @@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71546 {
71547 struct vm_area_struct *vma;
71548 struct mm_struct *mm = current->mm;
71549 - unsigned long addr = addr0;
71550 + unsigned long base = mm->mmap_base, addr = addr0;
71551
71552 /* requested length too big for entire address space */
71553 if (len > TASK_SIZE)
71554 @@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71555 if (flags & MAP_FIXED)
71556 return addr;
71557
71558 +#ifdef CONFIG_PAX_RANDMMAP
71559 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71560 +#endif
71561 +
71562 /* requesting a specific address */
71563 if (addr) {
71564 addr = PAGE_ALIGN(addr);
71565 - vma = find_vma(mm, addr);
71566 - if (TASK_SIZE - len >= addr &&
71567 - (!vma || addr + len <= vma->vm_start))
71568 - return addr;
71569 + if (TASK_SIZE - len >= addr) {
71570 + vma = find_vma(mm, addr);
71571 + if (check_heap_stack_gap(vma, addr, len))
71572 + return addr;
71573 + }
71574 }
71575
71576 /* check if free_area_cache is useful for us */
71577 @@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71578 /* make sure it can fit in the remaining address space */
71579 if (addr > len) {
71580 vma = find_vma(mm, addr-len);
71581 - if (!vma || addr <= vma->vm_start)
71582 + if (check_heap_stack_gap(vma, addr - len, len))
71583 /* remember the address as a hint for next time */
71584 return (mm->free_area_cache = addr-len);
71585 }
71586 @@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71587 * return with success:
71588 */
71589 vma = find_vma(mm, addr);
71590 - if (!vma || addr+len <= vma->vm_start)
71591 + if (check_heap_stack_gap(vma, addr, len))
71592 /* remember the address as a hint for next time */
71593 return (mm->free_area_cache = addr);
71594
71595 @@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71596 mm->cached_hole_size = vma->vm_start - addr;
71597
71598 /* try just below the current vma->vm_start */
71599 - addr = vma->vm_start-len;
71600 - } while (len < vma->vm_start);
71601 + addr = skip_heap_stack_gap(vma, len);
71602 + } while (!IS_ERR_VALUE(addr));
71603
71604 bottomup:
71605 /*
71606 @@ -1507,13 +1717,21 @@ bottomup:
71607 * can happen with large stack limits and large mmap()
71608 * allocations.
71609 */
71610 + mm->mmap_base = TASK_UNMAPPED_BASE;
71611 +
71612 +#ifdef CONFIG_PAX_RANDMMAP
71613 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71614 + mm->mmap_base += mm->delta_mmap;
71615 +#endif
71616 +
71617 + mm->free_area_cache = mm->mmap_base;
71618 mm->cached_hole_size = ~0UL;
71619 - mm->free_area_cache = TASK_UNMAPPED_BASE;
71620 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
71621 /*
71622 * Restore the topdown base:
71623 */
71624 - mm->free_area_cache = mm->mmap_base;
71625 + mm->mmap_base = base;
71626 + mm->free_area_cache = base;
71627 mm->cached_hole_size = ~0UL;
71628
71629 return addr;
71630 @@ -1522,6 +1740,12 @@ bottomup:
71631
71632 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71633 {
71634 +
71635 +#ifdef CONFIG_PAX_SEGMEXEC
71636 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71637 + return;
71638 +#endif
71639 +
71640 /*
71641 * Is this a new hole at the highest possible address?
71642 */
71643 @@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71644 mm->free_area_cache = addr;
71645
71646 /* dont allow allocations above current base */
71647 - if (mm->free_area_cache > mm->mmap_base)
71648 + if (mm->free_area_cache > mm->mmap_base) {
71649 mm->free_area_cache = mm->mmap_base;
71650 + mm->cached_hole_size = ~0UL;
71651 + }
71652 }
71653
71654 unsigned long
71655 @@ -1638,6 +1864,28 @@ out:
71656 return prev ? prev->vm_next : vma;
71657 }
71658
71659 +#ifdef CONFIG_PAX_SEGMEXEC
71660 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
71661 +{
71662 + struct vm_area_struct *vma_m;
71663 +
71664 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
71665 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
71666 + BUG_ON(vma->vm_mirror);
71667 + return NULL;
71668 + }
71669 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
71670 + vma_m = vma->vm_mirror;
71671 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
71672 + BUG_ON(vma->vm_file != vma_m->vm_file);
71673 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
71674 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
71675 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
71676 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
71677 + return vma_m;
71678 +}
71679 +#endif
71680 +
71681 /*
71682 * Verify that the stack growth is acceptable and
71683 * update accounting. This is shared with both the
71684 @@ -1654,6 +1902,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71685 return -ENOMEM;
71686
71687 /* Stack limit test */
71688 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
71689 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
71690 return -ENOMEM;
71691
71692 @@ -1664,6 +1913,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71693 locked = mm->locked_vm + grow;
71694 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
71695 limit >>= PAGE_SHIFT;
71696 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71697 if (locked > limit && !capable(CAP_IPC_LOCK))
71698 return -ENOMEM;
71699 }
71700 @@ -1694,37 +1944,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71701 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
71702 * vma is the last one with address > vma->vm_end. Have to extend vma.
71703 */
71704 +#ifndef CONFIG_IA64
71705 +static
71706 +#endif
71707 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71708 {
71709 int error;
71710 + bool locknext;
71711
71712 if (!(vma->vm_flags & VM_GROWSUP))
71713 return -EFAULT;
71714
71715 + /* Also guard against wrapping around to address 0. */
71716 + if (address < PAGE_ALIGN(address+1))
71717 + address = PAGE_ALIGN(address+1);
71718 + else
71719 + return -ENOMEM;
71720 +
71721 /*
71722 * We must make sure the anon_vma is allocated
71723 * so that the anon_vma locking is not a noop.
71724 */
71725 if (unlikely(anon_vma_prepare(vma)))
71726 return -ENOMEM;
71727 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
71728 + if (locknext && anon_vma_prepare(vma->vm_next))
71729 + return -ENOMEM;
71730 vma_lock_anon_vma(vma);
71731 + if (locknext)
71732 + vma_lock_anon_vma(vma->vm_next);
71733
71734 /*
71735 * vma->vm_start/vm_end cannot change under us because the caller
71736 * is required to hold the mmap_sem in read mode. We need the
71737 - * anon_vma lock to serialize against concurrent expand_stacks.
71738 - * Also guard against wrapping around to address 0.
71739 + * anon_vma locks to serialize against concurrent expand_stacks
71740 + * and expand_upwards.
71741 */
71742 - if (address < PAGE_ALIGN(address+4))
71743 - address = PAGE_ALIGN(address+4);
71744 - else {
71745 - vma_unlock_anon_vma(vma);
71746 - return -ENOMEM;
71747 - }
71748 error = 0;
71749
71750 /* Somebody else might have raced and expanded it already */
71751 - if (address > vma->vm_end) {
71752 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
71753 + error = -ENOMEM;
71754 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
71755 unsigned long size, grow;
71756
71757 size = address - vma->vm_start;
71758 @@ -1739,6 +2000,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71759 }
71760 }
71761 }
71762 + if (locknext)
71763 + vma_unlock_anon_vma(vma->vm_next);
71764 vma_unlock_anon_vma(vma);
71765 khugepaged_enter_vma_merge(vma);
71766 return error;
71767 @@ -1752,6 +2015,8 @@ int expand_downwards(struct vm_area_struct *vma,
71768 unsigned long address)
71769 {
71770 int error;
71771 + bool lockprev = false;
71772 + struct vm_area_struct *prev;
71773
71774 /*
71775 * We must make sure the anon_vma is allocated
71776 @@ -1765,6 +2030,15 @@ int expand_downwards(struct vm_area_struct *vma,
71777 if (error)
71778 return error;
71779
71780 + prev = vma->vm_prev;
71781 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
71782 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
71783 +#endif
71784 + if (lockprev && anon_vma_prepare(prev))
71785 + return -ENOMEM;
71786 + if (lockprev)
71787 + vma_lock_anon_vma(prev);
71788 +
71789 vma_lock_anon_vma(vma);
71790
71791 /*
71792 @@ -1774,9 +2048,17 @@ int expand_downwards(struct vm_area_struct *vma,
71793 */
71794
71795 /* Somebody else might have raced and expanded it already */
71796 - if (address < vma->vm_start) {
71797 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
71798 + error = -ENOMEM;
71799 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
71800 unsigned long size, grow;
71801
71802 +#ifdef CONFIG_PAX_SEGMEXEC
71803 + struct vm_area_struct *vma_m;
71804 +
71805 + vma_m = pax_find_mirror_vma(vma);
71806 +#endif
71807 +
71808 size = vma->vm_end - address;
71809 grow = (vma->vm_start - address) >> PAGE_SHIFT;
71810
71811 @@ -1786,11 +2068,22 @@ int expand_downwards(struct vm_area_struct *vma,
71812 if (!error) {
71813 vma->vm_start = address;
71814 vma->vm_pgoff -= grow;
71815 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
71816 +
71817 +#ifdef CONFIG_PAX_SEGMEXEC
71818 + if (vma_m) {
71819 + vma_m->vm_start -= grow << PAGE_SHIFT;
71820 + vma_m->vm_pgoff -= grow;
71821 + }
71822 +#endif
71823 +
71824 perf_event_mmap(vma);
71825 }
71826 }
71827 }
71828 vma_unlock_anon_vma(vma);
71829 + if (lockprev)
71830 + vma_unlock_anon_vma(prev);
71831 khugepaged_enter_vma_merge(vma);
71832 return error;
71833 }
71834 @@ -1860,6 +2153,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
71835 do {
71836 long nrpages = vma_pages(vma);
71837
71838 +#ifdef CONFIG_PAX_SEGMEXEC
71839 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
71840 + vma = remove_vma(vma);
71841 + continue;
71842 + }
71843 +#endif
71844 +
71845 mm->total_vm -= nrpages;
71846 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
71847 vma = remove_vma(vma);
71848 @@ -1905,6 +2205,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
71849 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
71850 vma->vm_prev = NULL;
71851 do {
71852 +
71853 +#ifdef CONFIG_PAX_SEGMEXEC
71854 + if (vma->vm_mirror) {
71855 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
71856 + vma->vm_mirror->vm_mirror = NULL;
71857 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
71858 + vma->vm_mirror = NULL;
71859 + }
71860 +#endif
71861 +
71862 rb_erase(&vma->vm_rb, &mm->mm_rb);
71863 mm->map_count--;
71864 tail_vma = vma;
71865 @@ -1933,14 +2243,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71866 struct vm_area_struct *new;
71867 int err = -ENOMEM;
71868
71869 +#ifdef CONFIG_PAX_SEGMEXEC
71870 + struct vm_area_struct *vma_m, *new_m = NULL;
71871 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
71872 +#endif
71873 +
71874 if (is_vm_hugetlb_page(vma) && (addr &
71875 ~(huge_page_mask(hstate_vma(vma)))))
71876 return -EINVAL;
71877
71878 +#ifdef CONFIG_PAX_SEGMEXEC
71879 + vma_m = pax_find_mirror_vma(vma);
71880 +#endif
71881 +
71882 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71883 if (!new)
71884 goto out_err;
71885
71886 +#ifdef CONFIG_PAX_SEGMEXEC
71887 + if (vma_m) {
71888 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71889 + if (!new_m) {
71890 + kmem_cache_free(vm_area_cachep, new);
71891 + goto out_err;
71892 + }
71893 + }
71894 +#endif
71895 +
71896 /* most fields are the same, copy all, and then fixup */
71897 *new = *vma;
71898
71899 @@ -1953,6 +2282,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71900 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
71901 }
71902
71903 +#ifdef CONFIG_PAX_SEGMEXEC
71904 + if (vma_m) {
71905 + *new_m = *vma_m;
71906 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
71907 + new_m->vm_mirror = new;
71908 + new->vm_mirror = new_m;
71909 +
71910 + if (new_below)
71911 + new_m->vm_end = addr_m;
71912 + else {
71913 + new_m->vm_start = addr_m;
71914 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
71915 + }
71916 + }
71917 +#endif
71918 +
71919 pol = mpol_dup(vma_policy(vma));
71920 if (IS_ERR(pol)) {
71921 err = PTR_ERR(pol);
71922 @@ -1978,6 +2323,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71923 else
71924 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
71925
71926 +#ifdef CONFIG_PAX_SEGMEXEC
71927 + if (!err && vma_m) {
71928 + if (anon_vma_clone(new_m, vma_m))
71929 + goto out_free_mpol;
71930 +
71931 + mpol_get(pol);
71932 + vma_set_policy(new_m, pol);
71933 +
71934 + if (new_m->vm_file) {
71935 + get_file(new_m->vm_file);
71936 + if (vma_m->vm_flags & VM_EXECUTABLE)
71937 + added_exe_file_vma(mm);
71938 + }
71939 +
71940 + if (new_m->vm_ops && new_m->vm_ops->open)
71941 + new_m->vm_ops->open(new_m);
71942 +
71943 + if (new_below)
71944 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
71945 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
71946 + else
71947 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
71948 +
71949 + if (err) {
71950 + if (new_m->vm_ops && new_m->vm_ops->close)
71951 + new_m->vm_ops->close(new_m);
71952 + if (new_m->vm_file) {
71953 + if (vma_m->vm_flags & VM_EXECUTABLE)
71954 + removed_exe_file_vma(mm);
71955 + fput(new_m->vm_file);
71956 + }
71957 + mpol_put(pol);
71958 + }
71959 + }
71960 +#endif
71961 +
71962 /* Success. */
71963 if (!err)
71964 return 0;
71965 @@ -1990,10 +2371,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71966 removed_exe_file_vma(mm);
71967 fput(new->vm_file);
71968 }
71969 - unlink_anon_vmas(new);
71970 out_free_mpol:
71971 mpol_put(pol);
71972 out_free_vma:
71973 +
71974 +#ifdef CONFIG_PAX_SEGMEXEC
71975 + if (new_m) {
71976 + unlink_anon_vmas(new_m);
71977 + kmem_cache_free(vm_area_cachep, new_m);
71978 + }
71979 +#endif
71980 +
71981 + unlink_anon_vmas(new);
71982 kmem_cache_free(vm_area_cachep, new);
71983 out_err:
71984 return err;
71985 @@ -2006,6 +2395,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71986 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71987 unsigned long addr, int new_below)
71988 {
71989 +
71990 +#ifdef CONFIG_PAX_SEGMEXEC
71991 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71992 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
71993 + if (mm->map_count >= sysctl_max_map_count-1)
71994 + return -ENOMEM;
71995 + } else
71996 +#endif
71997 +
71998 if (mm->map_count >= sysctl_max_map_count)
71999 return -ENOMEM;
72000
72001 @@ -2017,11 +2415,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72002 * work. This now handles partial unmappings.
72003 * Jeremy Fitzhardinge <jeremy@goop.org>
72004 */
72005 +#ifdef CONFIG_PAX_SEGMEXEC
72006 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72007 {
72008 + int ret = __do_munmap(mm, start, len);
72009 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
72010 + return ret;
72011 +
72012 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
72013 +}
72014 +
72015 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72016 +#else
72017 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72018 +#endif
72019 +{
72020 unsigned long end;
72021 struct vm_area_struct *vma, *prev, *last;
72022
72023 + /*
72024 + * mm->mmap_sem is required to protect against another thread
72025 + * changing the mappings in case we sleep.
72026 + */
72027 + verify_mm_writelocked(mm);
72028 +
72029 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
72030 return -EINVAL;
72031
72032 @@ -2096,6 +2513,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72033 /* Fix up all other VM information */
72034 remove_vma_list(mm, vma);
72035
72036 + track_exec_limit(mm, start, end, 0UL);
72037 +
72038 return 0;
72039 }
72040
72041 @@ -2108,22 +2527,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
72042
72043 profile_munmap(addr);
72044
72045 +#ifdef CONFIG_PAX_SEGMEXEC
72046 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
72047 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
72048 + return -EINVAL;
72049 +#endif
72050 +
72051 down_write(&mm->mmap_sem);
72052 ret = do_munmap(mm, addr, len);
72053 up_write(&mm->mmap_sem);
72054 return ret;
72055 }
72056
72057 -static inline void verify_mm_writelocked(struct mm_struct *mm)
72058 -{
72059 -#ifdef CONFIG_DEBUG_VM
72060 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
72061 - WARN_ON(1);
72062 - up_read(&mm->mmap_sem);
72063 - }
72064 -#endif
72065 -}
72066 -
72067 /*
72068 * this is really a simplified "do_mmap". it only handles
72069 * anonymous maps. eventually we may be able to do some
72070 @@ -2137,6 +2552,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72071 struct rb_node ** rb_link, * rb_parent;
72072 pgoff_t pgoff = addr >> PAGE_SHIFT;
72073 int error;
72074 + unsigned long charged;
72075
72076 len = PAGE_ALIGN(len);
72077 if (!len)
72078 @@ -2148,16 +2564,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72079
72080 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
72081
72082 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
72083 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
72084 + flags &= ~VM_EXEC;
72085 +
72086 +#ifdef CONFIG_PAX_MPROTECT
72087 + if (mm->pax_flags & MF_PAX_MPROTECT)
72088 + flags &= ~VM_MAYEXEC;
72089 +#endif
72090 +
72091 + }
72092 +#endif
72093 +
72094 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
72095 if (error & ~PAGE_MASK)
72096 return error;
72097
72098 + charged = len >> PAGE_SHIFT;
72099 +
72100 /*
72101 * mlock MCL_FUTURE?
72102 */
72103 if (mm->def_flags & VM_LOCKED) {
72104 unsigned long locked, lock_limit;
72105 - locked = len >> PAGE_SHIFT;
72106 + locked = charged;
72107 locked += mm->locked_vm;
72108 lock_limit = rlimit(RLIMIT_MEMLOCK);
72109 lock_limit >>= PAGE_SHIFT;
72110 @@ -2174,22 +2604,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72111 /*
72112 * Clear old maps. this also does some error checking for us
72113 */
72114 - munmap_back:
72115 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72116 if (vma && vma->vm_start < addr + len) {
72117 if (do_munmap(mm, addr, len))
72118 return -ENOMEM;
72119 - goto munmap_back;
72120 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72121 + BUG_ON(vma && vma->vm_start < addr + len);
72122 }
72123
72124 /* Check against address space limits *after* clearing old maps... */
72125 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
72126 + if (!may_expand_vm(mm, charged))
72127 return -ENOMEM;
72128
72129 if (mm->map_count > sysctl_max_map_count)
72130 return -ENOMEM;
72131
72132 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
72133 + if (security_vm_enough_memory(charged))
72134 return -ENOMEM;
72135
72136 /* Can we just expand an old private anonymous mapping? */
72137 @@ -2203,7 +2633,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72138 */
72139 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72140 if (!vma) {
72141 - vm_unacct_memory(len >> PAGE_SHIFT);
72142 + vm_unacct_memory(charged);
72143 return -ENOMEM;
72144 }
72145
72146 @@ -2217,11 +2647,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72147 vma_link(mm, vma, prev, rb_link, rb_parent);
72148 out:
72149 perf_event_mmap(vma);
72150 - mm->total_vm += len >> PAGE_SHIFT;
72151 + mm->total_vm += charged;
72152 if (flags & VM_LOCKED) {
72153 if (!mlock_vma_pages_range(vma, addr, addr + len))
72154 - mm->locked_vm += (len >> PAGE_SHIFT);
72155 + mm->locked_vm += charged;
72156 }
72157 + track_exec_limit(mm, addr, addr + len, flags);
72158 return addr;
72159 }
72160
72161 @@ -2268,8 +2699,10 @@ void exit_mmap(struct mm_struct *mm)
72162 * Walk the list again, actually closing and freeing it,
72163 * with preemption enabled, without holding any MM locks.
72164 */
72165 - while (vma)
72166 + while (vma) {
72167 + vma->vm_mirror = NULL;
72168 vma = remove_vma(vma);
72169 + }
72170
72171 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
72172 }
72173 @@ -2283,6 +2716,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
72174 struct vm_area_struct * __vma, * prev;
72175 struct rb_node ** rb_link, * rb_parent;
72176
72177 +#ifdef CONFIG_PAX_SEGMEXEC
72178 + struct vm_area_struct *vma_m = NULL;
72179 +#endif
72180 +
72181 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
72182 + return -EPERM;
72183 +
72184 /*
72185 * The vm_pgoff of a purely anonymous vma should be irrelevant
72186 * until its first write fault, when page's anon_vma and index
72187 @@ -2305,7 +2745,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
72188 if ((vma->vm_flags & VM_ACCOUNT) &&
72189 security_vm_enough_memory_mm(mm, vma_pages(vma)))
72190 return -ENOMEM;
72191 +
72192 +#ifdef CONFIG_PAX_SEGMEXEC
72193 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
72194 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72195 + if (!vma_m)
72196 + return -ENOMEM;
72197 + }
72198 +#endif
72199 +
72200 vma_link(mm, vma, prev, rb_link, rb_parent);
72201 +
72202 +#ifdef CONFIG_PAX_SEGMEXEC
72203 + if (vma_m)
72204 + BUG_ON(pax_mirror_vma(vma_m, vma));
72205 +#endif
72206 +
72207 return 0;
72208 }
72209
72210 @@ -2323,6 +2778,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
72211 struct rb_node **rb_link, *rb_parent;
72212 struct mempolicy *pol;
72213
72214 + BUG_ON(vma->vm_mirror);
72215 +
72216 /*
72217 * If anonymous vma has not yet been faulted, update new pgoff
72218 * to match new location, to increase its chance of merging.
72219 @@ -2373,6 +2830,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
72220 return NULL;
72221 }
72222
72223 +#ifdef CONFIG_PAX_SEGMEXEC
72224 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
72225 +{
72226 + struct vm_area_struct *prev_m;
72227 + struct rb_node **rb_link_m, *rb_parent_m;
72228 + struct mempolicy *pol_m;
72229 +
72230 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
72231 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
72232 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
72233 + *vma_m = *vma;
72234 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
72235 + if (anon_vma_clone(vma_m, vma))
72236 + return -ENOMEM;
72237 + pol_m = vma_policy(vma_m);
72238 + mpol_get(pol_m);
72239 + vma_set_policy(vma_m, pol_m);
72240 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
72241 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
72242 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
72243 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
72244 + if (vma_m->vm_file)
72245 + get_file(vma_m->vm_file);
72246 + if (vma_m->vm_ops && vma_m->vm_ops->open)
72247 + vma_m->vm_ops->open(vma_m);
72248 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
72249 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
72250 + vma_m->vm_mirror = vma;
72251 + vma->vm_mirror = vma_m;
72252 + return 0;
72253 +}
72254 +#endif
72255 +
72256 /*
72257 * Return true if the calling process may expand its vm space by the passed
72258 * number of pages
72259 @@ -2383,7 +2873,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
72260 unsigned long lim;
72261
72262 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
72263 -
72264 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
72265 if (cur + npages > lim)
72266 return 0;
72267 return 1;
72268 @@ -2454,6 +2944,22 @@ int install_special_mapping(struct mm_struct *mm,
72269 vma->vm_start = addr;
72270 vma->vm_end = addr + len;
72271
72272 +#ifdef CONFIG_PAX_MPROTECT
72273 + if (mm->pax_flags & MF_PAX_MPROTECT) {
72274 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
72275 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
72276 + return -EPERM;
72277 + if (!(vm_flags & VM_EXEC))
72278 + vm_flags &= ~VM_MAYEXEC;
72279 +#else
72280 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
72281 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
72282 +#endif
72283 + else
72284 + vm_flags &= ~VM_MAYWRITE;
72285 + }
72286 +#endif
72287 +
72288 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
72289 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
72290
72291 diff --git a/mm/mprotect.c b/mm/mprotect.c
72292 index 5a688a2..27e031c 100644
72293 --- a/mm/mprotect.c
72294 +++ b/mm/mprotect.c
72295 @@ -23,10 +23,16 @@
72296 #include <linux/mmu_notifier.h>
72297 #include <linux/migrate.h>
72298 #include <linux/perf_event.h>
72299 +
72300 +#ifdef CONFIG_PAX_MPROTECT
72301 +#include <linux/elf.h>
72302 +#endif
72303 +
72304 #include <asm/uaccess.h>
72305 #include <asm/pgtable.h>
72306 #include <asm/cacheflush.h>
72307 #include <asm/tlbflush.h>
72308 +#include <asm/mmu_context.h>
72309
72310 #ifndef pgprot_modify
72311 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
72312 @@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
72313 flush_tlb_range(vma, start, end);
72314 }
72315
72316 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72317 +/* called while holding the mmap semaphor for writing except stack expansion */
72318 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
72319 +{
72320 + unsigned long oldlimit, newlimit = 0UL;
72321 +
72322 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
72323 + return;
72324 +
72325 + spin_lock(&mm->page_table_lock);
72326 + oldlimit = mm->context.user_cs_limit;
72327 + if ((prot & VM_EXEC) && oldlimit < end)
72328 + /* USER_CS limit moved up */
72329 + newlimit = end;
72330 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
72331 + /* USER_CS limit moved down */
72332 + newlimit = start;
72333 +
72334 + if (newlimit) {
72335 + mm->context.user_cs_limit = newlimit;
72336 +
72337 +#ifdef CONFIG_SMP
72338 + wmb();
72339 + cpus_clear(mm->context.cpu_user_cs_mask);
72340 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
72341 +#endif
72342 +
72343 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
72344 + }
72345 + spin_unlock(&mm->page_table_lock);
72346 + if (newlimit == end) {
72347 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
72348 +
72349 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
72350 + if (is_vm_hugetlb_page(vma))
72351 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
72352 + else
72353 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
72354 + }
72355 +}
72356 +#endif
72357 +
72358 int
72359 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72360 unsigned long start, unsigned long end, unsigned long newflags)
72361 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72362 int error;
72363 int dirty_accountable = 0;
72364
72365 +#ifdef CONFIG_PAX_SEGMEXEC
72366 + struct vm_area_struct *vma_m = NULL;
72367 + unsigned long start_m, end_m;
72368 +
72369 + start_m = start + SEGMEXEC_TASK_SIZE;
72370 + end_m = end + SEGMEXEC_TASK_SIZE;
72371 +#endif
72372 +
72373 if (newflags == oldflags) {
72374 *pprev = vma;
72375 return 0;
72376 }
72377
72378 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
72379 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
72380 +
72381 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
72382 + return -ENOMEM;
72383 +
72384 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
72385 + return -ENOMEM;
72386 + }
72387 +
72388 /*
72389 * If we make a private mapping writable we increase our commit;
72390 * but (without finer accounting) cannot reduce our commit if we
72391 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72392 }
72393 }
72394
72395 +#ifdef CONFIG_PAX_SEGMEXEC
72396 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
72397 + if (start != vma->vm_start) {
72398 + error = split_vma(mm, vma, start, 1);
72399 + if (error)
72400 + goto fail;
72401 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
72402 + *pprev = (*pprev)->vm_next;
72403 + }
72404 +
72405 + if (end != vma->vm_end) {
72406 + error = split_vma(mm, vma, end, 0);
72407 + if (error)
72408 + goto fail;
72409 + }
72410 +
72411 + if (pax_find_mirror_vma(vma)) {
72412 + error = __do_munmap(mm, start_m, end_m - start_m);
72413 + if (error)
72414 + goto fail;
72415 + } else {
72416 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72417 + if (!vma_m) {
72418 + error = -ENOMEM;
72419 + goto fail;
72420 + }
72421 + vma->vm_flags = newflags;
72422 + error = pax_mirror_vma(vma_m, vma);
72423 + if (error) {
72424 + vma->vm_flags = oldflags;
72425 + goto fail;
72426 + }
72427 + }
72428 + }
72429 +#endif
72430 +
72431 /*
72432 * First try to merge with previous and/or next vma.
72433 */
72434 @@ -204,9 +306,21 @@ success:
72435 * vm_flags and vm_page_prot are protected by the mmap_sem
72436 * held in write mode.
72437 */
72438 +
72439 +#ifdef CONFIG_PAX_SEGMEXEC
72440 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
72441 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
72442 +#endif
72443 +
72444 vma->vm_flags = newflags;
72445 +
72446 +#ifdef CONFIG_PAX_MPROTECT
72447 + if (mm->binfmt && mm->binfmt->handle_mprotect)
72448 + mm->binfmt->handle_mprotect(vma, newflags);
72449 +#endif
72450 +
72451 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
72452 - vm_get_page_prot(newflags));
72453 + vm_get_page_prot(vma->vm_flags));
72454
72455 if (vma_wants_writenotify(vma)) {
72456 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
72457 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72458 end = start + len;
72459 if (end <= start)
72460 return -ENOMEM;
72461 +
72462 +#ifdef CONFIG_PAX_SEGMEXEC
72463 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
72464 + if (end > SEGMEXEC_TASK_SIZE)
72465 + return -EINVAL;
72466 + } else
72467 +#endif
72468 +
72469 + if (end > TASK_SIZE)
72470 + return -EINVAL;
72471 +
72472 if (!arch_validate_prot(prot))
72473 return -EINVAL;
72474
72475 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72476 /*
72477 * Does the application expect PROT_READ to imply PROT_EXEC:
72478 */
72479 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72480 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72481 prot |= PROT_EXEC;
72482
72483 vm_flags = calc_vm_prot_bits(prot);
72484 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72485 if (start > vma->vm_start)
72486 prev = vma;
72487
72488 +#ifdef CONFIG_PAX_MPROTECT
72489 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
72490 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
72491 +#endif
72492 +
72493 for (nstart = start ; ; ) {
72494 unsigned long newflags;
72495
72496 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72497
72498 /* newflags >> 4 shift VM_MAY% in place of VM_% */
72499 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
72500 + if (prot & (PROT_WRITE | PROT_EXEC))
72501 + gr_log_rwxmprotect(vma->vm_file);
72502 +
72503 + error = -EACCES;
72504 + goto out;
72505 + }
72506 +
72507 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
72508 error = -EACCES;
72509 goto out;
72510 }
72511 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72512 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
72513 if (error)
72514 goto out;
72515 +
72516 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
72517 +
72518 nstart = tmp;
72519
72520 if (nstart < prev->vm_end)
72521 diff --git a/mm/mremap.c b/mm/mremap.c
72522 index 506fa44..ccc0ba9 100644
72523 --- a/mm/mremap.c
72524 +++ b/mm/mremap.c
72525 @@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
72526 continue;
72527 pte = ptep_clear_flush(vma, old_addr, old_pte);
72528 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
72529 +
72530 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72531 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
72532 + pte = pte_exprotect(pte);
72533 +#endif
72534 +
72535 set_pte_at(mm, new_addr, new_pte, pte);
72536 }
72537
72538 @@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
72539 if (is_vm_hugetlb_page(vma))
72540 goto Einval;
72541
72542 +#ifdef CONFIG_PAX_SEGMEXEC
72543 + if (pax_find_mirror_vma(vma))
72544 + goto Einval;
72545 +#endif
72546 +
72547 /* We can't remap across vm area boundaries */
72548 if (old_len > vma->vm_end - addr)
72549 goto Efault;
72550 @@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned long addr,
72551 unsigned long ret = -EINVAL;
72552 unsigned long charged = 0;
72553 unsigned long map_flags;
72554 + unsigned long pax_task_size = TASK_SIZE;
72555
72556 if (new_addr & ~PAGE_MASK)
72557 goto out;
72558
72559 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
72560 +#ifdef CONFIG_PAX_SEGMEXEC
72561 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72562 + pax_task_size = SEGMEXEC_TASK_SIZE;
72563 +#endif
72564 +
72565 + pax_task_size -= PAGE_SIZE;
72566 +
72567 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
72568 goto out;
72569
72570 /* Check if the location we're moving into overlaps the
72571 * old location at all, and fail if it does.
72572 */
72573 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
72574 - goto out;
72575 -
72576 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
72577 + if (addr + old_len > new_addr && new_addr + new_len > addr)
72578 goto out;
72579
72580 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72581 @@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long addr,
72582 struct vm_area_struct *vma;
72583 unsigned long ret = -EINVAL;
72584 unsigned long charged = 0;
72585 + unsigned long pax_task_size = TASK_SIZE;
72586
72587 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
72588 goto out;
72589 @@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long addr,
72590 if (!new_len)
72591 goto out;
72592
72593 +#ifdef CONFIG_PAX_SEGMEXEC
72594 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72595 + pax_task_size = SEGMEXEC_TASK_SIZE;
72596 +#endif
72597 +
72598 + pax_task_size -= PAGE_SIZE;
72599 +
72600 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
72601 + old_len > pax_task_size || addr > pax_task_size-old_len)
72602 + goto out;
72603 +
72604 if (flags & MREMAP_FIXED) {
72605 if (flags & MREMAP_MAYMOVE)
72606 ret = mremap_to(addr, old_len, new_addr, new_len);
72607 @@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long addr,
72608 addr + new_len);
72609 }
72610 ret = addr;
72611 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
72612 goto out;
72613 }
72614 }
72615 @@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long addr,
72616 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72617 if (ret)
72618 goto out;
72619 +
72620 + map_flags = vma->vm_flags;
72621 ret = move_vma(vma, addr, old_len, new_len, new_addr);
72622 + if (!(ret & ~PAGE_MASK)) {
72623 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
72624 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
72625 + }
72626 }
72627 out:
72628 if (ret & ~PAGE_MASK)
72629 diff --git a/mm/nobootmem.c b/mm/nobootmem.c
72630 index 6e93dc7..c98df0c 100644
72631 --- a/mm/nobootmem.c
72632 +++ b/mm/nobootmem.c
72633 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
72634 unsigned long __init free_all_memory_core_early(int nodeid)
72635 {
72636 int i;
72637 - u64 start, end;
72638 + u64 start, end, startrange, endrange;
72639 unsigned long count = 0;
72640 - struct range *range = NULL;
72641 + struct range *range = NULL, rangerange = { 0, 0 };
72642 int nr_range;
72643
72644 nr_range = get_free_all_memory_range(&range, nodeid);
72645 + startrange = __pa(range) >> PAGE_SHIFT;
72646 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
72647
72648 for (i = 0; i < nr_range; i++) {
72649 start = range[i].start;
72650 end = range[i].end;
72651 + if (start <= endrange && startrange < end) {
72652 + BUG_ON(rangerange.start | rangerange.end);
72653 + rangerange = range[i];
72654 + continue;
72655 + }
72656 count += end - start;
72657 __free_pages_memory(start, end);
72658 }
72659 + start = rangerange.start;
72660 + end = rangerange.end;
72661 + count += end - start;
72662 + __free_pages_memory(start, end);
72663
72664 return count;
72665 }
72666 diff --git a/mm/nommu.c b/mm/nommu.c
72667 index 4358032..e79b99f 100644
72668 --- a/mm/nommu.c
72669 +++ b/mm/nommu.c
72670 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
72671 int sysctl_overcommit_ratio = 50; /* default is 50% */
72672 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
72673 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
72674 -int heap_stack_gap = 0;
72675
72676 atomic_long_t mmap_pages_allocated;
72677
72678 @@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
72679 EXPORT_SYMBOL(find_vma);
72680
72681 /*
72682 - * find a VMA
72683 - * - we don't extend stack VMAs under NOMMU conditions
72684 - */
72685 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
72686 -{
72687 - return find_vma(mm, addr);
72688 -}
72689 -
72690 -/*
72691 * expand a stack to a given address
72692 * - not supported under NOMMU conditions
72693 */
72694 @@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72695
72696 /* most fields are the same, copy all, and then fixup */
72697 *new = *vma;
72698 + INIT_LIST_HEAD(&new->anon_vma_chain);
72699 *region = *vma->vm_region;
72700 new->vm_region = region;
72701
72702 diff --git a/mm/oom_kill.c b/mm/oom_kill.c
72703 index 626303b..e9a1785 100644
72704 --- a/mm/oom_kill.c
72705 +++ b/mm/oom_kill.c
72706 @@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p,
72707 unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
72708 const nodemask_t *nodemask, unsigned long totalpages)
72709 {
72710 - int points;
72711 + long points;
72712
72713 if (oom_unkillable_task(p, mem, nodemask))
72714 return 0;
72715 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
72716 index 6e8ecb6..50b8879 100644
72717 --- a/mm/page_alloc.c
72718 +++ b/mm/page_alloc.c
72719 @@ -340,7 +340,7 @@ out:
72720 * This usage means that zero-order pages may not be compound.
72721 */
72722
72723 -static void free_compound_page(struct page *page)
72724 +void free_compound_page(struct page *page)
72725 {
72726 __free_pages_ok(page, compound_order(page));
72727 }
72728 @@ -653,6 +653,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72729 int i;
72730 int bad = 0;
72731
72732 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
72733 + unsigned long index = 1UL << order;
72734 +#endif
72735 +
72736 trace_mm_page_free_direct(page, order);
72737 kmemcheck_free_shadow(page, order);
72738
72739 @@ -668,6 +672,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72740 debug_check_no_obj_freed(page_address(page),
72741 PAGE_SIZE << order);
72742 }
72743 +
72744 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
72745 + for (; index; --index)
72746 + sanitize_highpage(page + index - 1);
72747 +#endif
72748 +
72749 arch_free_page(page, order);
72750 kernel_map_pages(page, 1 << order, 0);
72751
72752 @@ -783,8 +793,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
72753 arch_alloc_page(page, order);
72754 kernel_map_pages(page, 1 << order, 1);
72755
72756 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
72757 if (gfp_flags & __GFP_ZERO)
72758 prep_zero_page(page, order, gfp_flags);
72759 +#endif
72760
72761 if (order && (gfp_flags & __GFP_COMP))
72762 prep_compound_page(page, order);
72763 @@ -2539,6 +2551,8 @@ void show_free_areas(unsigned int filter)
72764 int cpu;
72765 struct zone *zone;
72766
72767 + pax_track_stack();
72768 +
72769 for_each_populated_zone(zone) {
72770 if (skip_free_areas_node(filter, zone_to_nid(zone)))
72771 continue;
72772 @@ -3350,7 +3364,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
72773 unsigned long pfn;
72774
72775 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
72776 +#ifdef CONFIG_X86_32
72777 + /* boot failures in VMware 8 on 32bit vanilla since
72778 + this change */
72779 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
72780 +#else
72781 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
72782 +#endif
72783 return 1;
72784 }
72785 return 0;
72786 diff --git a/mm/percpu.c b/mm/percpu.c
72787 index bf80e55..c7c3f9a 100644
72788 --- a/mm/percpu.c
72789 +++ b/mm/percpu.c
72790 @@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu __read_mostly;
72791 static unsigned int pcpu_last_unit_cpu __read_mostly;
72792
72793 /* the address of the first chunk which starts with the kernel static area */
72794 -void *pcpu_base_addr __read_mostly;
72795 +void *pcpu_base_addr __read_only;
72796 EXPORT_SYMBOL_GPL(pcpu_base_addr);
72797
72798 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
72799 diff --git a/mm/rmap.c b/mm/rmap.c
72800 index 8005080..198c2cd 100644
72801 --- a/mm/rmap.c
72802 +++ b/mm/rmap.c
72803 @@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72804 struct anon_vma *anon_vma = vma->anon_vma;
72805 struct anon_vma_chain *avc;
72806
72807 +#ifdef CONFIG_PAX_SEGMEXEC
72808 + struct anon_vma_chain *avc_m = NULL;
72809 +#endif
72810 +
72811 might_sleep();
72812 if (unlikely(!anon_vma)) {
72813 struct mm_struct *mm = vma->vm_mm;
72814 @@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72815 if (!avc)
72816 goto out_enomem;
72817
72818 +#ifdef CONFIG_PAX_SEGMEXEC
72819 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
72820 + if (!avc_m)
72821 + goto out_enomem_free_avc;
72822 +#endif
72823 +
72824 anon_vma = find_mergeable_anon_vma(vma);
72825 allocated = NULL;
72826 if (!anon_vma) {
72827 @@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72828 /* page_table_lock to protect against threads */
72829 spin_lock(&mm->page_table_lock);
72830 if (likely(!vma->anon_vma)) {
72831 +
72832 +#ifdef CONFIG_PAX_SEGMEXEC
72833 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
72834 +
72835 + if (vma_m) {
72836 + BUG_ON(vma_m->anon_vma);
72837 + vma_m->anon_vma = anon_vma;
72838 + avc_m->anon_vma = anon_vma;
72839 + avc_m->vma = vma;
72840 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
72841 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
72842 + avc_m = NULL;
72843 + }
72844 +#endif
72845 +
72846 vma->anon_vma = anon_vma;
72847 avc->anon_vma = anon_vma;
72848 avc->vma = vma;
72849 @@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72850
72851 if (unlikely(allocated))
72852 put_anon_vma(allocated);
72853 +
72854 +#ifdef CONFIG_PAX_SEGMEXEC
72855 + if (unlikely(avc_m))
72856 + anon_vma_chain_free(avc_m);
72857 +#endif
72858 +
72859 if (unlikely(avc))
72860 anon_vma_chain_free(avc);
72861 }
72862 return 0;
72863
72864 out_enomem_free_avc:
72865 +
72866 +#ifdef CONFIG_PAX_SEGMEXEC
72867 + if (avc_m)
72868 + anon_vma_chain_free(avc_m);
72869 +#endif
72870 +
72871 anon_vma_chain_free(avc);
72872 out_enomem:
72873 return -ENOMEM;
72874 @@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
72875 * Attach the anon_vmas from src to dst.
72876 * Returns 0 on success, -ENOMEM on failure.
72877 */
72878 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
72879 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
72880 {
72881 struct anon_vma_chain *avc, *pavc;
72882 struct anon_vma *root = NULL;
72883 @@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
72884 * the corresponding VMA in the parent process is attached to.
72885 * Returns 0 on success, non-zero on failure.
72886 */
72887 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
72888 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
72889 {
72890 struct anon_vma_chain *avc;
72891 struct anon_vma *anon_vma;
72892 diff --git a/mm/shmem.c b/mm/shmem.c
72893 index 32f6763..431c405 100644
72894 --- a/mm/shmem.c
72895 +++ b/mm/shmem.c
72896 @@ -31,7 +31,7 @@
72897 #include <linux/module.h>
72898 #include <linux/swap.h>
72899
72900 -static struct vfsmount *shm_mnt;
72901 +struct vfsmount *shm_mnt;
72902
72903 #ifdef CONFIG_SHMEM
72904 /*
72905 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
72906 #define BOGO_DIRENT_SIZE 20
72907
72908 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
72909 -#define SHORT_SYMLINK_LEN 128
72910 +#define SHORT_SYMLINK_LEN 64
72911
72912 struct shmem_xattr {
72913 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
72914 @@ -769,6 +769,8 @@ static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
72915 struct mempolicy mpol, *spol;
72916 struct vm_area_struct pvma;
72917
72918 + pax_track_stack();
72919 +
72920 spol = mpol_cond_copy(&mpol,
72921 mpol_shared_policy_lookup(&info->policy, index));
72922
72923 @@ -2149,8 +2151,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
72924 int err = -ENOMEM;
72925
72926 /* Round up to L1_CACHE_BYTES to resist false sharing */
72927 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
72928 - L1_CACHE_BYTES), GFP_KERNEL);
72929 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
72930 if (!sbinfo)
72931 return -ENOMEM;
72932
72933 diff --git a/mm/slab.c b/mm/slab.c
72934 index 6d90a09..3cab423 100644
72935 --- a/mm/slab.c
72936 +++ b/mm/slab.c
72937 @@ -151,7 +151,7 @@
72938
72939 /* Legal flag mask for kmem_cache_create(). */
72940 #if DEBUG
72941 -# define CREATE_MASK (SLAB_RED_ZONE | \
72942 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
72943 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
72944 SLAB_CACHE_DMA | \
72945 SLAB_STORE_USER | \
72946 @@ -159,7 +159,7 @@
72947 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72948 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
72949 #else
72950 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
72951 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
72952 SLAB_CACHE_DMA | \
72953 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
72954 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72955 @@ -288,7 +288,7 @@ struct kmem_list3 {
72956 * Need this for bootstrapping a per node allocator.
72957 */
72958 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
72959 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
72960 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
72961 #define CACHE_CACHE 0
72962 #define SIZE_AC MAX_NUMNODES
72963 #define SIZE_L3 (2 * MAX_NUMNODES)
72964 @@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
72965 if ((x)->max_freeable < i) \
72966 (x)->max_freeable = i; \
72967 } while (0)
72968 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
72969 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
72970 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
72971 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
72972 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
72973 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
72974 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
72975 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
72976 #else
72977 #define STATS_INC_ACTIVE(x) do { } while (0)
72978 #define STATS_DEC_ACTIVE(x) do { } while (0)
72979 @@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
72980 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
72981 */
72982 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
72983 - const struct slab *slab, void *obj)
72984 + const struct slab *slab, const void *obj)
72985 {
72986 u32 offset = (obj - slab->s_mem);
72987 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
72988 @@ -564,7 +564,7 @@ struct cache_names {
72989 static struct cache_names __initdata cache_names[] = {
72990 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
72991 #include <linux/kmalloc_sizes.h>
72992 - {NULL,}
72993 + {NULL}
72994 #undef CACHE
72995 };
72996
72997 @@ -1571,7 +1571,7 @@ void __init kmem_cache_init(void)
72998 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
72999 sizes[INDEX_AC].cs_size,
73000 ARCH_KMALLOC_MINALIGN,
73001 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73002 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73003 NULL);
73004
73005 if (INDEX_AC != INDEX_L3) {
73006 @@ -1579,7 +1579,7 @@ void __init kmem_cache_init(void)
73007 kmem_cache_create(names[INDEX_L3].name,
73008 sizes[INDEX_L3].cs_size,
73009 ARCH_KMALLOC_MINALIGN,
73010 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73011 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73012 NULL);
73013 }
73014
73015 @@ -1597,7 +1597,7 @@ void __init kmem_cache_init(void)
73016 sizes->cs_cachep = kmem_cache_create(names->name,
73017 sizes->cs_size,
73018 ARCH_KMALLOC_MINALIGN,
73019 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73020 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73021 NULL);
73022 }
73023 #ifdef CONFIG_ZONE_DMA
73024 @@ -4324,10 +4324,10 @@ static int s_show(struct seq_file *m, void *p)
73025 }
73026 /* cpu stats */
73027 {
73028 - unsigned long allochit = atomic_read(&cachep->allochit);
73029 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
73030 - unsigned long freehit = atomic_read(&cachep->freehit);
73031 - unsigned long freemiss = atomic_read(&cachep->freemiss);
73032 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
73033 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
73034 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
73035 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
73036
73037 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
73038 allochit, allocmiss, freehit, freemiss);
73039 @@ -4584,15 +4584,70 @@ static const struct file_operations proc_slabstats_operations = {
73040
73041 static int __init slab_proc_init(void)
73042 {
73043 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
73044 + mode_t gr_mode = S_IRUGO;
73045 +
73046 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
73047 + gr_mode = S_IRUSR;
73048 +#endif
73049 +
73050 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
73051 #ifdef CONFIG_DEBUG_SLAB_LEAK
73052 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
73053 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
73054 #endif
73055 return 0;
73056 }
73057 module_init(slab_proc_init);
73058 #endif
73059
73060 +void check_object_size(const void *ptr, unsigned long n, bool to)
73061 +{
73062 +
73063 +#ifdef CONFIG_PAX_USERCOPY
73064 + struct page *page;
73065 + struct kmem_cache *cachep = NULL;
73066 + struct slab *slabp;
73067 + unsigned int objnr;
73068 + unsigned long offset;
73069 + const char *type;
73070 +
73071 + if (!n)
73072 + return;
73073 +
73074 + type = "<null>";
73075 + if (ZERO_OR_NULL_PTR(ptr))
73076 + goto report;
73077 +
73078 + if (!virt_addr_valid(ptr))
73079 + return;
73080 +
73081 + page = virt_to_head_page(ptr);
73082 +
73083 + type = "<process stack>";
73084 + if (!PageSlab(page)) {
73085 + if (object_is_on_stack(ptr, n) == -1)
73086 + goto report;
73087 + return;
73088 + }
73089 +
73090 + cachep = page_get_cache(page);
73091 + type = cachep->name;
73092 + if (!(cachep->flags & SLAB_USERCOPY))
73093 + goto report;
73094 +
73095 + slabp = page_get_slab(page);
73096 + objnr = obj_to_index(cachep, slabp, ptr);
73097 + BUG_ON(objnr >= cachep->num);
73098 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
73099 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
73100 + return;
73101 +
73102 +report:
73103 + pax_report_usercopy(ptr, n, to, type);
73104 +#endif
73105 +
73106 +}
73107 +EXPORT_SYMBOL(check_object_size);
73108 +
73109 /**
73110 * ksize - get the actual amount of memory allocated for a given object
73111 * @objp: Pointer to the object
73112 diff --git a/mm/slob.c b/mm/slob.c
73113 index bf39181..727f7a3 100644
73114 --- a/mm/slob.c
73115 +++ b/mm/slob.c
73116 @@ -29,7 +29,7 @@
73117 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
73118 * alloc_pages() directly, allocating compound pages so the page order
73119 * does not have to be separately tracked, and also stores the exact
73120 - * allocation size in page->private so that it can be used to accurately
73121 + * allocation size in slob_page->size so that it can be used to accurately
73122 * provide ksize(). These objects are detected in kfree() because slob_page()
73123 * is false for them.
73124 *
73125 @@ -58,6 +58,7 @@
73126 */
73127
73128 #include <linux/kernel.h>
73129 +#include <linux/sched.h>
73130 #include <linux/slab.h>
73131 #include <linux/mm.h>
73132 #include <linux/swap.h> /* struct reclaim_state */
73133 @@ -102,7 +103,8 @@ struct slob_page {
73134 unsigned long flags; /* mandatory */
73135 atomic_t _count; /* mandatory */
73136 slobidx_t units; /* free units left in page */
73137 - unsigned long pad[2];
73138 + unsigned long pad[1];
73139 + unsigned long size; /* size when >=PAGE_SIZE */
73140 slob_t *free; /* first free slob_t in page */
73141 struct list_head list; /* linked list of free pages */
73142 };
73143 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
73144 */
73145 static inline int is_slob_page(struct slob_page *sp)
73146 {
73147 - return PageSlab((struct page *)sp);
73148 + return PageSlab((struct page *)sp) && !sp->size;
73149 }
73150
73151 static inline void set_slob_page(struct slob_page *sp)
73152 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
73153
73154 static inline struct slob_page *slob_page(const void *addr)
73155 {
73156 - return (struct slob_page *)virt_to_page(addr);
73157 + return (struct slob_page *)virt_to_head_page(addr);
73158 }
73159
73160 /*
73161 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
73162 /*
73163 * Return the size of a slob block.
73164 */
73165 -static slobidx_t slob_units(slob_t *s)
73166 +static slobidx_t slob_units(const slob_t *s)
73167 {
73168 if (s->units > 0)
73169 return s->units;
73170 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
73171 /*
73172 * Return the next free slob block pointer after this one.
73173 */
73174 -static slob_t *slob_next(slob_t *s)
73175 +static slob_t *slob_next(const slob_t *s)
73176 {
73177 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
73178 slobidx_t next;
73179 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
73180 /*
73181 * Returns true if s is the last free block in its page.
73182 */
73183 -static int slob_last(slob_t *s)
73184 +static int slob_last(const slob_t *s)
73185 {
73186 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
73187 }
73188 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
73189 if (!page)
73190 return NULL;
73191
73192 + set_slob_page(page);
73193 return page_address(page);
73194 }
73195
73196 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
73197 if (!b)
73198 return NULL;
73199 sp = slob_page(b);
73200 - set_slob_page(sp);
73201
73202 spin_lock_irqsave(&slob_lock, flags);
73203 sp->units = SLOB_UNITS(PAGE_SIZE);
73204 sp->free = b;
73205 + sp->size = 0;
73206 INIT_LIST_HEAD(&sp->list);
73207 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
73208 set_slob_page_free(sp, slob_list);
73209 @@ -476,10 +479,9 @@ out:
73210 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
73211 */
73212
73213 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73214 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
73215 {
73216 - unsigned int *m;
73217 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73218 + slob_t *m;
73219 void *ret;
73220
73221 gfp &= gfp_allowed_mask;
73222 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73223
73224 if (!m)
73225 return NULL;
73226 - *m = size;
73227 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
73228 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
73229 + m[0].units = size;
73230 + m[1].units = align;
73231 ret = (void *)m + align;
73232
73233 trace_kmalloc_node(_RET_IP_, ret,
73234 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73235 gfp |= __GFP_COMP;
73236 ret = slob_new_pages(gfp, order, node);
73237 if (ret) {
73238 - struct page *page;
73239 - page = virt_to_page(ret);
73240 - page->private = size;
73241 + struct slob_page *sp;
73242 + sp = slob_page(ret);
73243 + sp->size = size;
73244 }
73245
73246 trace_kmalloc_node(_RET_IP_, ret,
73247 size, PAGE_SIZE << order, gfp, node);
73248 }
73249
73250 - kmemleak_alloc(ret, size, 1, gfp);
73251 + return ret;
73252 +}
73253 +
73254 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73255 +{
73256 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73257 + void *ret = __kmalloc_node_align(size, gfp, node, align);
73258 +
73259 + if (!ZERO_OR_NULL_PTR(ret))
73260 + kmemleak_alloc(ret, size, 1, gfp);
73261 return ret;
73262 }
73263 EXPORT_SYMBOL(__kmalloc_node);
73264 @@ -533,13 +547,92 @@ void kfree(const void *block)
73265 sp = slob_page(block);
73266 if (is_slob_page(sp)) {
73267 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73268 - unsigned int *m = (unsigned int *)(block - align);
73269 - slob_free(m, *m + align);
73270 - } else
73271 + slob_t *m = (slob_t *)(block - align);
73272 + slob_free(m, m[0].units + align);
73273 + } else {
73274 + clear_slob_page(sp);
73275 + free_slob_page(sp);
73276 + sp->size = 0;
73277 put_page(&sp->page);
73278 + }
73279 }
73280 EXPORT_SYMBOL(kfree);
73281
73282 +void check_object_size(const void *ptr, unsigned long n, bool to)
73283 +{
73284 +
73285 +#ifdef CONFIG_PAX_USERCOPY
73286 + struct slob_page *sp;
73287 + const slob_t *free;
73288 + const void *base;
73289 + unsigned long flags;
73290 + const char *type;
73291 +
73292 + if (!n)
73293 + return;
73294 +
73295 + type = "<null>";
73296 + if (ZERO_OR_NULL_PTR(ptr))
73297 + goto report;
73298 +
73299 + if (!virt_addr_valid(ptr))
73300 + return;
73301 +
73302 + type = "<process stack>";
73303 + sp = slob_page(ptr);
73304 + if (!PageSlab((struct page*)sp)) {
73305 + if (object_is_on_stack(ptr, n) == -1)
73306 + goto report;
73307 + return;
73308 + }
73309 +
73310 + type = "<slob>";
73311 + if (sp->size) {
73312 + base = page_address(&sp->page);
73313 + if (base <= ptr && n <= sp->size - (ptr - base))
73314 + return;
73315 + goto report;
73316 + }
73317 +
73318 + /* some tricky double walking to find the chunk */
73319 + spin_lock_irqsave(&slob_lock, flags);
73320 + base = (void *)((unsigned long)ptr & PAGE_MASK);
73321 + free = sp->free;
73322 +
73323 + while (!slob_last(free) && (void *)free <= ptr) {
73324 + base = free + slob_units(free);
73325 + free = slob_next(free);
73326 + }
73327 +
73328 + while (base < (void *)free) {
73329 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
73330 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
73331 + int offset;
73332 +
73333 + if (ptr < base + align)
73334 + break;
73335 +
73336 + offset = ptr - base - align;
73337 + if (offset >= m) {
73338 + base += size;
73339 + continue;
73340 + }
73341 +
73342 + if (n > m - offset)
73343 + break;
73344 +
73345 + spin_unlock_irqrestore(&slob_lock, flags);
73346 + return;
73347 + }
73348 +
73349 + spin_unlock_irqrestore(&slob_lock, flags);
73350 +report:
73351 + pax_report_usercopy(ptr, n, to, type);
73352 +#endif
73353 +
73354 +}
73355 +EXPORT_SYMBOL(check_object_size);
73356 +
73357 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
73358 size_t ksize(const void *block)
73359 {
73360 @@ -552,10 +645,10 @@ size_t ksize(const void *block)
73361 sp = slob_page(block);
73362 if (is_slob_page(sp)) {
73363 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73364 - unsigned int *m = (unsigned int *)(block - align);
73365 - return SLOB_UNITS(*m) * SLOB_UNIT;
73366 + slob_t *m = (slob_t *)(block - align);
73367 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
73368 } else
73369 - return sp->page.private;
73370 + return sp->size;
73371 }
73372 EXPORT_SYMBOL(ksize);
73373
73374 @@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73375 {
73376 struct kmem_cache *c;
73377
73378 +#ifdef CONFIG_PAX_USERCOPY
73379 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
73380 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
73381 +#else
73382 c = slob_alloc(sizeof(struct kmem_cache),
73383 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
73384 +#endif
73385
73386 if (c) {
73387 c->name = name;
73388 @@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
73389
73390 lockdep_trace_alloc(flags);
73391
73392 +#ifdef CONFIG_PAX_USERCOPY
73393 + b = __kmalloc_node_align(c->size, flags, node, c->align);
73394 +#else
73395 if (c->size < PAGE_SIZE) {
73396 b = slob_alloc(c->size, flags, c->align, node);
73397 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73398 SLOB_UNITS(c->size) * SLOB_UNIT,
73399 flags, node);
73400 } else {
73401 + struct slob_page *sp;
73402 +
73403 b = slob_new_pages(flags, get_order(c->size), node);
73404 + sp = slob_page(b);
73405 + sp->size = c->size;
73406 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73407 PAGE_SIZE << get_order(c->size),
73408 flags, node);
73409 }
73410 +#endif
73411
73412 if (c->ctor)
73413 c->ctor(b);
73414 @@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
73415
73416 static void __kmem_cache_free(void *b, int size)
73417 {
73418 - if (size < PAGE_SIZE)
73419 + struct slob_page *sp = slob_page(b);
73420 +
73421 + if (is_slob_page(sp))
73422 slob_free(b, size);
73423 - else
73424 + else {
73425 + clear_slob_page(sp);
73426 + free_slob_page(sp);
73427 + sp->size = 0;
73428 slob_free_pages(b, get_order(size));
73429 + }
73430 }
73431
73432 static void kmem_rcu_free(struct rcu_head *head)
73433 @@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
73434
73435 void kmem_cache_free(struct kmem_cache *c, void *b)
73436 {
73437 + int size = c->size;
73438 +
73439 +#ifdef CONFIG_PAX_USERCOPY
73440 + if (size + c->align < PAGE_SIZE) {
73441 + size += c->align;
73442 + b -= c->align;
73443 + }
73444 +#endif
73445 +
73446 kmemleak_free_recursive(b, c->flags);
73447 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
73448 struct slob_rcu *slob_rcu;
73449 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
73450 - slob_rcu->size = c->size;
73451 + slob_rcu = b + (size - sizeof(struct slob_rcu));
73452 + slob_rcu->size = size;
73453 call_rcu(&slob_rcu->head, kmem_rcu_free);
73454 } else {
73455 - __kmem_cache_free(b, c->size);
73456 + __kmem_cache_free(b, size);
73457 }
73458
73459 +#ifdef CONFIG_PAX_USERCOPY
73460 + trace_kfree(_RET_IP_, b);
73461 +#else
73462 trace_kmem_cache_free(_RET_IP_, b);
73463 +#endif
73464 +
73465 }
73466 EXPORT_SYMBOL(kmem_cache_free);
73467
73468 diff --git a/mm/slub.c b/mm/slub.c
73469 index 7c54fe8..0bb4ac5 100644
73470 --- a/mm/slub.c
73471 +++ b/mm/slub.c
73472 @@ -208,7 +208,7 @@ struct track {
73473
73474 enum track_item { TRACK_ALLOC, TRACK_FREE };
73475
73476 -#ifdef CONFIG_SYSFS
73477 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73478 static int sysfs_slab_add(struct kmem_cache *);
73479 static int sysfs_slab_alias(struct kmem_cache *, const char *);
73480 static void sysfs_slab_remove(struct kmem_cache *);
73481 @@ -556,7 +556,7 @@ static void print_track(const char *s, struct track *t)
73482 if (!t->addr)
73483 return;
73484
73485 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
73486 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
73487 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
73488 #ifdef CONFIG_STACKTRACE
73489 {
73490 @@ -2456,6 +2456,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
73491
73492 page = virt_to_head_page(x);
73493
73494 + BUG_ON(!PageSlab(page));
73495 +
73496 slab_free(s, page, x, _RET_IP_);
73497
73498 trace_kmem_cache_free(_RET_IP_, x);
73499 @@ -2489,7 +2491,7 @@ static int slub_min_objects;
73500 * Merge control. If this is set then no merging of slab caches will occur.
73501 * (Could be removed. This was introduced to pacify the merge skeptics.)
73502 */
73503 -static int slub_nomerge;
73504 +static int slub_nomerge = 1;
73505
73506 /*
73507 * Calculate the order of allocation given an slab object size.
73508 @@ -2912,7 +2914,7 @@ static int kmem_cache_open(struct kmem_cache *s,
73509 * list to avoid pounding the page allocator excessively.
73510 */
73511 set_min_partial(s, ilog2(s->size));
73512 - s->refcount = 1;
73513 + atomic_set(&s->refcount, 1);
73514 #ifdef CONFIG_NUMA
73515 s->remote_node_defrag_ratio = 1000;
73516 #endif
73517 @@ -3017,8 +3019,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
73518 void kmem_cache_destroy(struct kmem_cache *s)
73519 {
73520 down_write(&slub_lock);
73521 - s->refcount--;
73522 - if (!s->refcount) {
73523 + if (atomic_dec_and_test(&s->refcount)) {
73524 list_del(&s->list);
73525 if (kmem_cache_close(s)) {
73526 printk(KERN_ERR "SLUB %s: %s called for cache that "
73527 @@ -3228,6 +3229,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
73528 EXPORT_SYMBOL(__kmalloc_node);
73529 #endif
73530
73531 +void check_object_size(const void *ptr, unsigned long n, bool to)
73532 +{
73533 +
73534 +#ifdef CONFIG_PAX_USERCOPY
73535 + struct page *page;
73536 + struct kmem_cache *s = NULL;
73537 + unsigned long offset;
73538 + const char *type;
73539 +
73540 + if (!n)
73541 + return;
73542 +
73543 + type = "<null>";
73544 + if (ZERO_OR_NULL_PTR(ptr))
73545 + goto report;
73546 +
73547 + if (!virt_addr_valid(ptr))
73548 + return;
73549 +
73550 + page = virt_to_head_page(ptr);
73551 +
73552 + type = "<process stack>";
73553 + if (!PageSlab(page)) {
73554 + if (object_is_on_stack(ptr, n) == -1)
73555 + goto report;
73556 + return;
73557 + }
73558 +
73559 + s = page->slab;
73560 + type = s->name;
73561 + if (!(s->flags & SLAB_USERCOPY))
73562 + goto report;
73563 +
73564 + offset = (ptr - page_address(page)) % s->size;
73565 + if (offset <= s->objsize && n <= s->objsize - offset)
73566 + return;
73567 +
73568 +report:
73569 + pax_report_usercopy(ptr, n, to, type);
73570 +#endif
73571 +
73572 +}
73573 +EXPORT_SYMBOL(check_object_size);
73574 +
73575 size_t ksize(const void *object)
73576 {
73577 struct page *page;
73578 @@ -3502,7 +3547,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
73579 int node;
73580
73581 list_add(&s->list, &slab_caches);
73582 - s->refcount = -1;
73583 + atomic_set(&s->refcount, -1);
73584
73585 for_each_node_state(node, N_NORMAL_MEMORY) {
73586 struct kmem_cache_node *n = get_node(s, node);
73587 @@ -3619,17 +3664,17 @@ void __init kmem_cache_init(void)
73588
73589 /* Caches that are not of the two-to-the-power-of size */
73590 if (KMALLOC_MIN_SIZE <= 32) {
73591 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
73592 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
73593 caches++;
73594 }
73595
73596 if (KMALLOC_MIN_SIZE <= 64) {
73597 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
73598 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
73599 caches++;
73600 }
73601
73602 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
73603 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
73604 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
73605 caches++;
73606 }
73607
73608 @@ -3697,7 +3742,7 @@ static int slab_unmergeable(struct kmem_cache *s)
73609 /*
73610 * We may have set a slab to be unmergeable during bootstrap.
73611 */
73612 - if (s->refcount < 0)
73613 + if (atomic_read(&s->refcount) < 0)
73614 return 1;
73615
73616 return 0;
73617 @@ -3756,7 +3801,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73618 down_write(&slub_lock);
73619 s = find_mergeable(size, align, flags, name, ctor);
73620 if (s) {
73621 - s->refcount++;
73622 + atomic_inc(&s->refcount);
73623 /*
73624 * Adjust the object sizes so that we clear
73625 * the complete object on kzalloc.
73626 @@ -3765,7 +3810,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73627 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
73628
73629 if (sysfs_slab_alias(s, name)) {
73630 - s->refcount--;
73631 + atomic_dec(&s->refcount);
73632 goto err;
73633 }
73634 up_write(&slub_lock);
73635 @@ -3893,7 +3938,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
73636 }
73637 #endif
73638
73639 -#ifdef CONFIG_SYSFS
73640 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73641 static int count_inuse(struct page *page)
73642 {
73643 return page->inuse;
73644 @@ -4280,12 +4325,12 @@ static void resiliency_test(void)
73645 validate_slab_cache(kmalloc_caches[9]);
73646 }
73647 #else
73648 -#ifdef CONFIG_SYSFS
73649 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73650 static void resiliency_test(void) {};
73651 #endif
73652 #endif
73653
73654 -#ifdef CONFIG_SYSFS
73655 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73656 enum slab_stat_type {
73657 SL_ALL, /* All slabs */
73658 SL_PARTIAL, /* Only partially allocated slabs */
73659 @@ -4495,7 +4540,7 @@ SLAB_ATTR_RO(ctor);
73660
73661 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
73662 {
73663 - return sprintf(buf, "%d\n", s->refcount - 1);
73664 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
73665 }
73666 SLAB_ATTR_RO(aliases);
73667
73668 @@ -5025,6 +5070,7 @@ static char *create_unique_id(struct kmem_cache *s)
73669 return name;
73670 }
73671
73672 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73673 static int sysfs_slab_add(struct kmem_cache *s)
73674 {
73675 int err;
73676 @@ -5087,6 +5133,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
73677 kobject_del(&s->kobj);
73678 kobject_put(&s->kobj);
73679 }
73680 +#endif
73681
73682 /*
73683 * Need to buffer aliases during bootup until sysfs becomes
73684 @@ -5100,6 +5147,7 @@ struct saved_alias {
73685
73686 static struct saved_alias *alias_list;
73687
73688 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73689 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73690 {
73691 struct saved_alias *al;
73692 @@ -5122,6 +5170,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73693 alias_list = al;
73694 return 0;
73695 }
73696 +#endif
73697
73698 static int __init slab_sysfs_init(void)
73699 {
73700 @@ -5257,7 +5306,13 @@ static const struct file_operations proc_slabinfo_operations = {
73701
73702 static int __init slab_proc_init(void)
73703 {
73704 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
73705 + mode_t gr_mode = S_IRUGO;
73706 +
73707 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
73708 + gr_mode = S_IRUSR;
73709 +#endif
73710 +
73711 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
73712 return 0;
73713 }
73714 module_init(slab_proc_init);
73715 diff --git a/mm/swap.c b/mm/swap.c
73716 index 87627f1..8a9eb34 100644
73717 --- a/mm/swap.c
73718 +++ b/mm/swap.c
73719 @@ -31,6 +31,7 @@
73720 #include <linux/backing-dev.h>
73721 #include <linux/memcontrol.h>
73722 #include <linux/gfp.h>
73723 +#include <linux/hugetlb.h>
73724
73725 #include "internal.h"
73726
73727 @@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
73728
73729 __page_cache_release(page);
73730 dtor = get_compound_page_dtor(page);
73731 + if (!PageHuge(page))
73732 + BUG_ON(dtor != free_compound_page);
73733 (*dtor)(page);
73734 }
73735
73736 diff --git a/mm/swapfile.c b/mm/swapfile.c
73737 index 17bc224..1677059 100644
73738 --- a/mm/swapfile.c
73739 +++ b/mm/swapfile.c
73740 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
73741
73742 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
73743 /* Activity counter to indicate that a swapon or swapoff has occurred */
73744 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
73745 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
73746
73747 static inline unsigned char swap_count(unsigned char ent)
73748 {
73749 @@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
73750 }
73751 filp_close(swap_file, NULL);
73752 err = 0;
73753 - atomic_inc(&proc_poll_event);
73754 + atomic_inc_unchecked(&proc_poll_event);
73755 wake_up_interruptible(&proc_poll_wait);
73756
73757 out_dput:
73758 @@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
73759
73760 poll_wait(file, &proc_poll_wait, wait);
73761
73762 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
73763 - seq->poll_event = atomic_read(&proc_poll_event);
73764 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
73765 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73766 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
73767 }
73768
73769 @@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inode, struct file *file)
73770 return ret;
73771
73772 seq = file->private_data;
73773 - seq->poll_event = atomic_read(&proc_poll_event);
73774 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73775 return 0;
73776 }
73777
73778 @@ -2124,7 +2124,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
73779 (p->flags & SWP_DISCARDABLE) ? "D" : "");
73780
73781 mutex_unlock(&swapon_mutex);
73782 - atomic_inc(&proc_poll_event);
73783 + atomic_inc_unchecked(&proc_poll_event);
73784 wake_up_interruptible(&proc_poll_wait);
73785
73786 if (S_ISREG(inode->i_mode))
73787 diff --git a/mm/util.c b/mm/util.c
73788 index 88ea1bd..0f1dfdb 100644
73789 --- a/mm/util.c
73790 +++ b/mm/util.c
73791 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
73792 * allocated buffer. Use this if you don't want to free the buffer immediately
73793 * like, for example, with RCU.
73794 */
73795 +#undef __krealloc
73796 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
73797 {
73798 void *ret;
73799 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
73800 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
73801 * %NULL pointer, the object pointed to is freed.
73802 */
73803 +#undef krealloc
73804 void *krealloc(const void *p, size_t new_size, gfp_t flags)
73805 {
73806 void *ret;
73807 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
73808 void arch_pick_mmap_layout(struct mm_struct *mm)
73809 {
73810 mm->mmap_base = TASK_UNMAPPED_BASE;
73811 +
73812 +#ifdef CONFIG_PAX_RANDMMAP
73813 + if (mm->pax_flags & MF_PAX_RANDMMAP)
73814 + mm->mmap_base += mm->delta_mmap;
73815 +#endif
73816 +
73817 mm->get_unmapped_area = arch_get_unmapped_area;
73818 mm->unmap_area = arch_unmap_area;
73819 }
73820 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
73821 index 56faf31..75c1a4c 100644
73822 --- a/mm/vmalloc.c
73823 +++ b/mm/vmalloc.c
73824 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
73825
73826 pte = pte_offset_kernel(pmd, addr);
73827 do {
73828 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
73829 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
73830 +
73831 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73832 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
73833 + BUG_ON(!pte_exec(*pte));
73834 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
73835 + continue;
73836 + }
73837 +#endif
73838 +
73839 + {
73840 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
73841 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
73842 + }
73843 } while (pte++, addr += PAGE_SIZE, addr != end);
73844 }
73845
73846 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
73847 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
73848 {
73849 pte_t *pte;
73850 + int ret = -ENOMEM;
73851
73852 /*
73853 * nr is a running index into the array which helps higher level
73854 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
73855 pte = pte_alloc_kernel(pmd, addr);
73856 if (!pte)
73857 return -ENOMEM;
73858 +
73859 + pax_open_kernel();
73860 do {
73861 struct page *page = pages[*nr];
73862
73863 - if (WARN_ON(!pte_none(*pte)))
73864 - return -EBUSY;
73865 - if (WARN_ON(!page))
73866 - return -ENOMEM;
73867 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73868 + if (pgprot_val(prot) & _PAGE_NX)
73869 +#endif
73870 +
73871 + if (WARN_ON(!pte_none(*pte))) {
73872 + ret = -EBUSY;
73873 + goto out;
73874 + }
73875 + if (WARN_ON(!page)) {
73876 + ret = -ENOMEM;
73877 + goto out;
73878 + }
73879 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
73880 (*nr)++;
73881 } while (pte++, addr += PAGE_SIZE, addr != end);
73882 - return 0;
73883 + ret = 0;
73884 +out:
73885 + pax_close_kernel();
73886 + return ret;
73887 }
73888
73889 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
73890 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
73891 * and fall back on vmalloc() if that fails. Others
73892 * just put it in the vmalloc space.
73893 */
73894 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
73895 +#ifdef CONFIG_MODULES
73896 +#ifdef MODULES_VADDR
73897 unsigned long addr = (unsigned long)x;
73898 if (addr >= MODULES_VADDR && addr < MODULES_END)
73899 return 1;
73900 #endif
73901 +
73902 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73903 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
73904 + return 1;
73905 +#endif
73906 +
73907 +#endif
73908 +
73909 return is_vmalloc_addr(x);
73910 }
73911
73912 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
73913
73914 if (!pgd_none(*pgd)) {
73915 pud_t *pud = pud_offset(pgd, addr);
73916 +#ifdef CONFIG_X86
73917 + if (!pud_large(*pud))
73918 +#endif
73919 if (!pud_none(*pud)) {
73920 pmd_t *pmd = pmd_offset(pud, addr);
73921 +#ifdef CONFIG_X86
73922 + if (!pmd_large(*pmd))
73923 +#endif
73924 if (!pmd_none(*pmd)) {
73925 pte_t *ptep, pte;
73926
73927 @@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
73928 struct vm_struct *area;
73929
73930 BUG_ON(in_interrupt());
73931 +
73932 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73933 + if (flags & VM_KERNEXEC) {
73934 + if (start != VMALLOC_START || end != VMALLOC_END)
73935 + return NULL;
73936 + start = (unsigned long)MODULES_EXEC_VADDR;
73937 + end = (unsigned long)MODULES_EXEC_END;
73938 + }
73939 +#endif
73940 +
73941 if (flags & VM_IOREMAP) {
73942 int bit = fls(size);
73943
73944 @@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count,
73945 if (count > totalram_pages)
73946 return NULL;
73947
73948 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73949 + if (!(pgprot_val(prot) & _PAGE_NX))
73950 + flags |= VM_KERNEXEC;
73951 +#endif
73952 +
73953 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
73954 __builtin_return_address(0));
73955 if (!area)
73956 @@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
73957 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
73958 return NULL;
73959
73960 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73961 + if (!(pgprot_val(prot) & _PAGE_NX))
73962 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
73963 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
73964 + else
73965 +#endif
73966 +
73967 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
73968 start, end, node, gfp_mask, caller);
73969
73970 @@ -1672,6 +1734,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
73971 gfp_mask, prot, node, caller);
73972 }
73973
73974 +#undef __vmalloc
73975 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
73976 {
73977 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
73978 @@ -1695,6 +1758,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
73979 * For tight control over page level allocator and protection flags
73980 * use __vmalloc() instead.
73981 */
73982 +#undef vmalloc
73983 void *vmalloc(unsigned long size)
73984 {
73985 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
73986 @@ -1711,6 +1775,7 @@ EXPORT_SYMBOL(vmalloc);
73987 * For tight control over page level allocator and protection flags
73988 * use __vmalloc() instead.
73989 */
73990 +#undef vzalloc
73991 void *vzalloc(unsigned long size)
73992 {
73993 return __vmalloc_node_flags(size, -1,
73994 @@ -1725,6 +1790,7 @@ EXPORT_SYMBOL(vzalloc);
73995 * The resulting memory area is zeroed so it can be mapped to userspace
73996 * without leaking data.
73997 */
73998 +#undef vmalloc_user
73999 void *vmalloc_user(unsigned long size)
74000 {
74001 struct vm_struct *area;
74002 @@ -1752,6 +1818,7 @@ EXPORT_SYMBOL(vmalloc_user);
74003 * For tight control over page level allocator and protection flags
74004 * use __vmalloc() instead.
74005 */
74006 +#undef vmalloc_node
74007 void *vmalloc_node(unsigned long size, int node)
74008 {
74009 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
74010 @@ -1771,6 +1838,7 @@ EXPORT_SYMBOL(vmalloc_node);
74011 * For tight control over page level allocator and protection flags
74012 * use __vmalloc_node() instead.
74013 */
74014 +#undef vzalloc_node
74015 void *vzalloc_node(unsigned long size, int node)
74016 {
74017 return __vmalloc_node_flags(size, node,
74018 @@ -1793,10 +1861,10 @@ EXPORT_SYMBOL(vzalloc_node);
74019 * For tight control over page level allocator and protection flags
74020 * use __vmalloc() instead.
74021 */
74022 -
74023 +#undef vmalloc_exec
74024 void *vmalloc_exec(unsigned long size)
74025 {
74026 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
74027 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
74028 -1, __builtin_return_address(0));
74029 }
74030
74031 @@ -1815,6 +1883,7 @@ void *vmalloc_exec(unsigned long size)
74032 * Allocate enough 32bit PA addressable pages to cover @size from the
74033 * page level allocator and map them into contiguous kernel virtual space.
74034 */
74035 +#undef vmalloc_32
74036 void *vmalloc_32(unsigned long size)
74037 {
74038 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
74039 @@ -1829,6 +1898,7 @@ EXPORT_SYMBOL(vmalloc_32);
74040 * The resulting memory area is 32bit addressable and zeroed so it can be
74041 * mapped to userspace without leaking data.
74042 */
74043 +#undef vmalloc_32_user
74044 void *vmalloc_32_user(unsigned long size)
74045 {
74046 struct vm_struct *area;
74047 @@ -2091,6 +2161,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
74048 unsigned long uaddr = vma->vm_start;
74049 unsigned long usize = vma->vm_end - vma->vm_start;
74050
74051 + BUG_ON(vma->vm_mirror);
74052 +
74053 if ((PAGE_SIZE-1) & (unsigned long)addr)
74054 return -EINVAL;
74055
74056 diff --git a/mm/vmstat.c b/mm/vmstat.c
74057 index d52b13d..381d1ac 100644
74058 --- a/mm/vmstat.c
74059 +++ b/mm/vmstat.c
74060 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
74061 *
74062 * vm_stat contains the global counters
74063 */
74064 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
74065 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
74066 EXPORT_SYMBOL(vm_stat);
74067
74068 #ifdef CONFIG_SMP
74069 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
74070 v = p->vm_stat_diff[i];
74071 p->vm_stat_diff[i] = 0;
74072 local_irq_restore(flags);
74073 - atomic_long_add(v, &zone->vm_stat[i]);
74074 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
74075 global_diff[i] += v;
74076 #ifdef CONFIG_NUMA
74077 /* 3 seconds idle till flush */
74078 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
74079
74080 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
74081 if (global_diff[i])
74082 - atomic_long_add(global_diff[i], &vm_stat[i]);
74083 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
74084 }
74085
74086 #endif
74087 @@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
74088 start_cpu_timer(cpu);
74089 #endif
74090 #ifdef CONFIG_PROC_FS
74091 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
74092 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
74093 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
74094 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
74095 + {
74096 + mode_t gr_mode = S_IRUGO;
74097 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
74098 + gr_mode = S_IRUSR;
74099 +#endif
74100 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
74101 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
74102 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74103 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
74104 +#else
74105 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
74106 +#endif
74107 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
74108 + }
74109 #endif
74110 return 0;
74111 }
74112 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
74113 index 8970ba1..e3361fe 100644
74114 --- a/net/8021q/vlan.c
74115 +++ b/net/8021q/vlan.c
74116 @@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
74117 err = -EPERM;
74118 if (!capable(CAP_NET_ADMIN))
74119 break;
74120 - if ((args.u.name_type >= 0) &&
74121 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
74122 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
74123 struct vlan_net *vn;
74124
74125 vn = net_generic(net, vlan_net_id);
74126 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
74127 index fdfdb57..38d368c 100644
74128 --- a/net/9p/trans_fd.c
74129 +++ b/net/9p/trans_fd.c
74130 @@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
74131 oldfs = get_fs();
74132 set_fs(get_ds());
74133 /* The cast to a user pointer is valid due to the set_fs() */
74134 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
74135 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
74136 set_fs(oldfs);
74137
74138 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
74139 diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
74140 index e317583..3c8aeaf 100644
74141 --- a/net/9p/trans_virtio.c
74142 +++ b/net/9p/trans_virtio.c
74143 @@ -327,7 +327,7 @@ req_retry_pinned:
74144 } else {
74145 char *pbuf;
74146 if (req->tc->pubuf)
74147 - pbuf = (__force char *) req->tc->pubuf;
74148 + pbuf = (char __force_kernel *) req->tc->pubuf;
74149 else
74150 pbuf = req->tc->pkbuf;
74151 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
74152 @@ -357,7 +357,7 @@ req_retry_pinned:
74153 } else {
74154 char *pbuf;
74155 if (req->tc->pubuf)
74156 - pbuf = (__force char *) req->tc->pubuf;
74157 + pbuf = (char __force_kernel *) req->tc->pubuf;
74158 else
74159 pbuf = req->tc->pkbuf;
74160
74161 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
74162 index f41f026..fe76ea8 100644
74163 --- a/net/atm/atm_misc.c
74164 +++ b/net/atm/atm_misc.c
74165 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
74166 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
74167 return 1;
74168 atm_return(vcc, truesize);
74169 - atomic_inc(&vcc->stats->rx_drop);
74170 + atomic_inc_unchecked(&vcc->stats->rx_drop);
74171 return 0;
74172 }
74173 EXPORT_SYMBOL(atm_charge);
74174 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
74175 }
74176 }
74177 atm_return(vcc, guess);
74178 - atomic_inc(&vcc->stats->rx_drop);
74179 + atomic_inc_unchecked(&vcc->stats->rx_drop);
74180 return NULL;
74181 }
74182 EXPORT_SYMBOL(atm_alloc_charge);
74183 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
74184
74185 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
74186 {
74187 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74188 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74189 __SONET_ITEMS
74190 #undef __HANDLE_ITEM
74191 }
74192 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
74193
74194 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
74195 {
74196 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
74197 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
74198 __SONET_ITEMS
74199 #undef __HANDLE_ITEM
74200 }
74201 diff --git a/net/atm/lec.h b/net/atm/lec.h
74202 index dfc0719..47c5322 100644
74203 --- a/net/atm/lec.h
74204 +++ b/net/atm/lec.h
74205 @@ -48,7 +48,7 @@ struct lane2_ops {
74206 const u8 *tlvs, u32 sizeoftlvs);
74207 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
74208 const u8 *tlvs, u32 sizeoftlvs);
74209 -};
74210 +} __no_const;
74211
74212 /*
74213 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
74214 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
74215 index 0919a88..a23d54e 100644
74216 --- a/net/atm/mpc.h
74217 +++ b/net/atm/mpc.h
74218 @@ -33,7 +33,7 @@ struct mpoa_client {
74219 struct mpc_parameters parameters; /* parameters for this client */
74220
74221 const struct net_device_ops *old_ops;
74222 - struct net_device_ops new_ops;
74223 + net_device_ops_no_const new_ops;
74224 };
74225
74226
74227 diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
74228 index d1b2d9a..7cc2219 100644
74229 --- a/net/atm/mpoa_caches.c
74230 +++ b/net/atm/mpoa_caches.c
74231 @@ -255,6 +255,8 @@ static void check_resolving_entries(struct mpoa_client *client)
74232 struct timeval now;
74233 struct k_message msg;
74234
74235 + pax_track_stack();
74236 +
74237 do_gettimeofday(&now);
74238
74239 read_lock_bh(&client->ingress_lock);
74240 diff --git a/net/atm/proc.c b/net/atm/proc.c
74241 index 0d020de..011c7bb 100644
74242 --- a/net/atm/proc.c
74243 +++ b/net/atm/proc.c
74244 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
74245 const struct k_atm_aal_stats *stats)
74246 {
74247 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
74248 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
74249 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
74250 - atomic_read(&stats->rx_drop));
74251 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
74252 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
74253 + atomic_read_unchecked(&stats->rx_drop));
74254 }
74255
74256 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
74257 diff --git a/net/atm/resources.c b/net/atm/resources.c
74258 index 23f45ce..c748f1a 100644
74259 --- a/net/atm/resources.c
74260 +++ b/net/atm/resources.c
74261 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
74262 static void copy_aal_stats(struct k_atm_aal_stats *from,
74263 struct atm_aal_stats *to)
74264 {
74265 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74266 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74267 __AAL_STAT_ITEMS
74268 #undef __HANDLE_ITEM
74269 }
74270 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
74271 static void subtract_aal_stats(struct k_atm_aal_stats *from,
74272 struct atm_aal_stats *to)
74273 {
74274 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
74275 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
74276 __AAL_STAT_ITEMS
74277 #undef __HANDLE_ITEM
74278 }
74279 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
74280 index db7aacf..991e539 100644
74281 --- a/net/batman-adv/hard-interface.c
74282 +++ b/net/batman-adv/hard-interface.c
74283 @@ -347,8 +347,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
74284 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
74285 dev_add_pack(&hard_iface->batman_adv_ptype);
74286
74287 - atomic_set(&hard_iface->seqno, 1);
74288 - atomic_set(&hard_iface->frag_seqno, 1);
74289 + atomic_set_unchecked(&hard_iface->seqno, 1);
74290 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
74291 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
74292 hard_iface->net_dev->name);
74293
74294 diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
74295 index 0f32c81..82d1895 100644
74296 --- a/net/batman-adv/routing.c
74297 +++ b/net/batman-adv/routing.c
74298 @@ -656,7 +656,7 @@ void receive_bat_packet(const struct ethhdr *ethhdr,
74299 return;
74300
74301 /* could be changed by schedule_own_packet() */
74302 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
74303 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
74304
74305 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
74306
74307 diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
74308 index 58d1447..2a66c8c 100644
74309 --- a/net/batman-adv/send.c
74310 +++ b/net/batman-adv/send.c
74311 @@ -326,7 +326,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
74312
74313 /* change sequence number to network order */
74314 batman_packet->seqno =
74315 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
74316 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
74317
74318 batman_packet->ttvn = atomic_read(&bat_priv->ttvn);
74319 batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc));
74320 @@ -343,7 +343,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
74321 else
74322 batman_packet->gw_flags = NO_FLAGS;
74323
74324 - atomic_inc(&hard_iface->seqno);
74325 + atomic_inc_unchecked(&hard_iface->seqno);
74326
74327 slide_own_bcast_window(hard_iface);
74328 send_time = own_send_time(bat_priv);
74329 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
74330 index 05dd351..2ecd19b 100644
74331 --- a/net/batman-adv/soft-interface.c
74332 +++ b/net/batman-adv/soft-interface.c
74333 @@ -632,7 +632,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
74334
74335 /* set broadcast sequence number */
74336 bcast_packet->seqno =
74337 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
74338 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
74339
74340 add_bcast_packet_to_list(bat_priv, skb, 1);
74341
74342 @@ -824,7 +824,7 @@ struct net_device *softif_create(const char *name)
74343 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
74344
74345 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
74346 - atomic_set(&bat_priv->bcast_seqno, 1);
74347 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
74348 atomic_set(&bat_priv->ttvn, 0);
74349 atomic_set(&bat_priv->tt_local_changes, 0);
74350 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
74351 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
74352 index 51a0db7..b8a62be 100644
74353 --- a/net/batman-adv/types.h
74354 +++ b/net/batman-adv/types.h
74355 @@ -38,8 +38,8 @@ struct hard_iface {
74356 int16_t if_num;
74357 char if_status;
74358 struct net_device *net_dev;
74359 - atomic_t seqno;
74360 - atomic_t frag_seqno;
74361 + atomic_unchecked_t seqno;
74362 + atomic_unchecked_t frag_seqno;
74363 unsigned char *packet_buff;
74364 int packet_len;
74365 struct kobject *hardif_obj;
74366 @@ -153,7 +153,7 @@ struct bat_priv {
74367 atomic_t orig_interval; /* uint */
74368 atomic_t hop_penalty; /* uint */
74369 atomic_t log_level; /* uint */
74370 - atomic_t bcast_seqno;
74371 + atomic_unchecked_t bcast_seqno;
74372 atomic_t bcast_queue_left;
74373 atomic_t batman_queue_left;
74374 atomic_t ttvn; /* tranlation table version number */
74375 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
74376 index 32b125f..f1447e0 100644
74377 --- a/net/batman-adv/unicast.c
74378 +++ b/net/batman-adv/unicast.c
74379 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
74380 frag1->flags = UNI_FRAG_HEAD | large_tail;
74381 frag2->flags = large_tail;
74382
74383 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
74384 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
74385 frag1->seqno = htons(seqno - 1);
74386 frag2->seqno = htons(seqno);
74387
74388 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
74389 index ea7f031..0615edc 100644
74390 --- a/net/bluetooth/hci_conn.c
74391 +++ b/net/bluetooth/hci_conn.c
74392 @@ -218,7 +218,7 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
74393 cp.handle = cpu_to_le16(conn->handle);
74394 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
74395 cp.ediv = ediv;
74396 - memcpy(cp.rand, rand, sizeof(rand));
74397 + memcpy(cp.rand, rand, sizeof(cp.rand));
74398
74399 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
74400 }
74401 @@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
74402 memset(&cp, 0, sizeof(cp));
74403
74404 cp.handle = cpu_to_le16(conn->handle);
74405 - memcpy(cp.ltk, ltk, sizeof(ltk));
74406 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
74407
74408 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
74409 }
74410 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
74411 index 995cbe0..c056d6c 100644
74412 --- a/net/bridge/br_multicast.c
74413 +++ b/net/bridge/br_multicast.c
74414 @@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
74415 nexthdr = ip6h->nexthdr;
74416 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
74417
74418 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
74419 + if (nexthdr != IPPROTO_ICMPV6)
74420 return 0;
74421
74422 /* Okay, we found ICMPv6 header */
74423 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
74424 index 5864cc4..94cab18 100644
74425 --- a/net/bridge/netfilter/ebtables.c
74426 +++ b/net/bridge/netfilter/ebtables.c
74427 @@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
74428 tmp.valid_hooks = t->table->valid_hooks;
74429 }
74430 mutex_unlock(&ebt_mutex);
74431 - if (copy_to_user(user, &tmp, *len) != 0){
74432 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
74433 BUGPRINT("c2u Didn't work\n");
74434 ret = -EFAULT;
74435 break;
74436 @@ -1781,6 +1781,8 @@ static int compat_copy_everything_to_user(struct ebt_table *t,
74437 int ret;
74438 void __user *pos;
74439
74440 + pax_track_stack();
74441 +
74442 memset(&tinfo, 0, sizeof(tinfo));
74443
74444 if (cmd == EBT_SO_GET_ENTRIES) {
74445 diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
74446 index a986280..13444a1 100644
74447 --- a/net/caif/caif_socket.c
74448 +++ b/net/caif/caif_socket.c
74449 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
74450 #ifdef CONFIG_DEBUG_FS
74451 struct debug_fs_counter {
74452 atomic_t caif_nr_socks;
74453 - atomic_t caif_sock_create;
74454 - atomic_t num_connect_req;
74455 - atomic_t num_connect_resp;
74456 - atomic_t num_connect_fail_resp;
74457 - atomic_t num_disconnect;
74458 - atomic_t num_remote_shutdown_ind;
74459 - atomic_t num_tx_flow_off_ind;
74460 - atomic_t num_tx_flow_on_ind;
74461 - atomic_t num_rx_flow_off;
74462 - atomic_t num_rx_flow_on;
74463 + atomic_unchecked_t caif_sock_create;
74464 + atomic_unchecked_t num_connect_req;
74465 + atomic_unchecked_t num_connect_resp;
74466 + atomic_unchecked_t num_connect_fail_resp;
74467 + atomic_unchecked_t num_disconnect;
74468 + atomic_unchecked_t num_remote_shutdown_ind;
74469 + atomic_unchecked_t num_tx_flow_off_ind;
74470 + atomic_unchecked_t num_tx_flow_on_ind;
74471 + atomic_unchecked_t num_rx_flow_off;
74472 + atomic_unchecked_t num_rx_flow_on;
74473 };
74474 static struct debug_fs_counter cnt;
74475 #define dbfs_atomic_inc(v) atomic_inc_return(v)
74476 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
74477 #define dbfs_atomic_dec(v) atomic_dec_return(v)
74478 #else
74479 #define dbfs_atomic_inc(v) 0
74480 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74481 atomic_read(&cf_sk->sk.sk_rmem_alloc),
74482 sk_rcvbuf_lowwater(cf_sk));
74483 set_rx_flow_off(cf_sk);
74484 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
74485 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
74486 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
74487 }
74488
74489 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74490 set_rx_flow_off(cf_sk);
74491 if (net_ratelimit())
74492 pr_debug("sending flow OFF due to rmem_schedule\n");
74493 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
74494 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
74495 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
74496 }
74497 skb->dev = NULL;
74498 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
74499 switch (flow) {
74500 case CAIF_CTRLCMD_FLOW_ON_IND:
74501 /* OK from modem to start sending again */
74502 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
74503 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
74504 set_tx_flow_on(cf_sk);
74505 cf_sk->sk.sk_state_change(&cf_sk->sk);
74506 break;
74507
74508 case CAIF_CTRLCMD_FLOW_OFF_IND:
74509 /* Modem asks us to shut up */
74510 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
74511 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
74512 set_tx_flow_off(cf_sk);
74513 cf_sk->sk.sk_state_change(&cf_sk->sk);
74514 break;
74515 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
74516 /* We're now connected */
74517 caif_client_register_refcnt(&cf_sk->layer,
74518 cfsk_hold, cfsk_put);
74519 - dbfs_atomic_inc(&cnt.num_connect_resp);
74520 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
74521 cf_sk->sk.sk_state = CAIF_CONNECTED;
74522 set_tx_flow_on(cf_sk);
74523 cf_sk->sk.sk_state_change(&cf_sk->sk);
74524 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
74525
74526 case CAIF_CTRLCMD_INIT_FAIL_RSP:
74527 /* Connect request failed */
74528 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
74529 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
74530 cf_sk->sk.sk_err = ECONNREFUSED;
74531 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
74532 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
74533 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
74534
74535 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
74536 /* Modem has closed this connection, or device is down. */
74537 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
74538 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
74539 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
74540 cf_sk->sk.sk_err = ECONNRESET;
74541 set_rx_flow_on(cf_sk);
74542 @@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
74543 return;
74544
74545 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
74546 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
74547 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
74548 set_rx_flow_on(cf_sk);
74549 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
74550 }
74551 @@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
74552 /*ifindex = id of the interface.*/
74553 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
74554
74555 - dbfs_atomic_inc(&cnt.num_connect_req);
74556 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
74557 cf_sk->layer.receive = caif_sktrecv_cb;
74558
74559 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
74560 @@ -943,7 +944,7 @@ static int caif_release(struct socket *sock)
74561 spin_unlock_bh(&sk->sk_receive_queue.lock);
74562 sock->sk = NULL;
74563
74564 - dbfs_atomic_inc(&cnt.num_disconnect);
74565 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
74566
74567 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
74568 if (cf_sk->debugfs_socket_dir != NULL)
74569 @@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
74570 cf_sk->conn_req.protocol = protocol;
74571 /* Increase the number of sockets created. */
74572 dbfs_atomic_inc(&cnt.caif_nr_socks);
74573 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
74574 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
74575 #ifdef CONFIG_DEBUG_FS
74576 if (!IS_ERR(debugfsdir)) {
74577
74578 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
74579 index e22671b..6598ea0 100644
74580 --- a/net/caif/cfctrl.c
74581 +++ b/net/caif/cfctrl.c
74582 @@ -9,6 +9,7 @@
74583 #include <linux/stddef.h>
74584 #include <linux/spinlock.h>
74585 #include <linux/slab.h>
74586 +#include <linux/sched.h>
74587 #include <net/caif/caif_layer.h>
74588 #include <net/caif/cfpkt.h>
74589 #include <net/caif/cfctrl.h>
74590 @@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
74591 dev_info.id = 0xff;
74592 memset(this, 0, sizeof(*this));
74593 cfsrvl_init(&this->serv, 0, &dev_info, false);
74594 - atomic_set(&this->req_seq_no, 1);
74595 - atomic_set(&this->rsp_seq_no, 1);
74596 + atomic_set_unchecked(&this->req_seq_no, 1);
74597 + atomic_set_unchecked(&this->rsp_seq_no, 1);
74598 this->serv.layer.receive = cfctrl_recv;
74599 sprintf(this->serv.layer.name, "ctrl");
74600 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
74601 @@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
74602 struct cfctrl_request_info *req)
74603 {
74604 spin_lock_bh(&ctrl->info_list_lock);
74605 - atomic_inc(&ctrl->req_seq_no);
74606 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
74607 + atomic_inc_unchecked(&ctrl->req_seq_no);
74608 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
74609 list_add_tail(&req->list, &ctrl->list);
74610 spin_unlock_bh(&ctrl->info_list_lock);
74611 }
74612 @@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
74613 if (p != first)
74614 pr_warn("Requests are not received in order\n");
74615
74616 - atomic_set(&ctrl->rsp_seq_no,
74617 + atomic_set_unchecked(&ctrl->rsp_seq_no,
74618 p->sequence_no);
74619 list_del(&p->list);
74620 goto out;
74621 @@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
74622 struct cfctrl *cfctrl = container_obj(layer);
74623 struct cfctrl_request_info rsp, *req;
74624
74625 + pax_track_stack();
74626
74627 cfpkt_extr_head(pkt, &cmdrsp, 1);
74628 cmd = cmdrsp & CFCTRL_CMD_MASK;
74629 diff --git a/net/compat.c b/net/compat.c
74630 index c578d93..257fab7 100644
74631 --- a/net/compat.c
74632 +++ b/net/compat.c
74633 @@ -70,9 +70,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
74634 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
74635 __get_user(kmsg->msg_flags, &umsg->msg_flags))
74636 return -EFAULT;
74637 - kmsg->msg_name = compat_ptr(tmp1);
74638 - kmsg->msg_iov = compat_ptr(tmp2);
74639 - kmsg->msg_control = compat_ptr(tmp3);
74640 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
74641 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
74642 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
74643 return 0;
74644 }
74645
74646 @@ -84,7 +84,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74647
74648 if (kern_msg->msg_namelen) {
74649 if (mode == VERIFY_READ) {
74650 - int err = move_addr_to_kernel(kern_msg->msg_name,
74651 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
74652 kern_msg->msg_namelen,
74653 kern_address);
74654 if (err < 0)
74655 @@ -95,7 +95,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74656 kern_msg->msg_name = NULL;
74657
74658 tot_len = iov_from_user_compat_to_kern(kern_iov,
74659 - (struct compat_iovec __user *)kern_msg->msg_iov,
74660 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
74661 kern_msg->msg_iovlen);
74662 if (tot_len >= 0)
74663 kern_msg->msg_iov = kern_iov;
74664 @@ -115,20 +115,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74665
74666 #define CMSG_COMPAT_FIRSTHDR(msg) \
74667 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
74668 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
74669 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
74670 (struct compat_cmsghdr __user *)NULL)
74671
74672 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
74673 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
74674 (ucmlen) <= (unsigned long) \
74675 ((mhdr)->msg_controllen - \
74676 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
74677 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
74678
74679 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
74680 struct compat_cmsghdr __user *cmsg, int cmsg_len)
74681 {
74682 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
74683 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
74684 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
74685 msg->msg_controllen)
74686 return NULL;
74687 return (struct compat_cmsghdr __user *)ptr;
74688 @@ -220,7 +220,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
74689 {
74690 struct compat_timeval ctv;
74691 struct compat_timespec cts[3];
74692 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74693 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74694 struct compat_cmsghdr cmhdr;
74695 int cmlen;
74696
74697 @@ -272,7 +272,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
74698
74699 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
74700 {
74701 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74702 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74703 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
74704 int fdnum = scm->fp->count;
74705 struct file **fp = scm->fp->fp;
74706 @@ -369,7 +369,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
74707 return -EFAULT;
74708 old_fs = get_fs();
74709 set_fs(KERNEL_DS);
74710 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
74711 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
74712 set_fs(old_fs);
74713
74714 return err;
74715 @@ -430,7 +430,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
74716 len = sizeof(ktime);
74717 old_fs = get_fs();
74718 set_fs(KERNEL_DS);
74719 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
74720 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
74721 set_fs(old_fs);
74722
74723 if (!err) {
74724 @@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74725 case MCAST_JOIN_GROUP:
74726 case MCAST_LEAVE_GROUP:
74727 {
74728 - struct compat_group_req __user *gr32 = (void *)optval;
74729 + struct compat_group_req __user *gr32 = (void __user *)optval;
74730 struct group_req __user *kgr =
74731 compat_alloc_user_space(sizeof(struct group_req));
74732 u32 interface;
74733 @@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74734 case MCAST_BLOCK_SOURCE:
74735 case MCAST_UNBLOCK_SOURCE:
74736 {
74737 - struct compat_group_source_req __user *gsr32 = (void *)optval;
74738 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
74739 struct group_source_req __user *kgsr = compat_alloc_user_space(
74740 sizeof(struct group_source_req));
74741 u32 interface;
74742 @@ -607,7 +607,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74743 }
74744 case MCAST_MSFILTER:
74745 {
74746 - struct compat_group_filter __user *gf32 = (void *)optval;
74747 + struct compat_group_filter __user *gf32 = (void __user *)optval;
74748 struct group_filter __user *kgf;
74749 u32 interface, fmode, numsrc;
74750
74751 @@ -645,7 +645,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
74752 char __user *optval, int __user *optlen,
74753 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
74754 {
74755 - struct compat_group_filter __user *gf32 = (void *)optval;
74756 + struct compat_group_filter __user *gf32 = (void __user *)optval;
74757 struct group_filter __user *kgf;
74758 int __user *koptlen;
74759 u32 interface, fmode, numsrc;
74760 diff --git a/net/core/datagram.c b/net/core/datagram.c
74761 index 18ac112..fe95ed9 100644
74762 --- a/net/core/datagram.c
74763 +++ b/net/core/datagram.c
74764 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
74765 }
74766
74767 kfree_skb(skb);
74768 - atomic_inc(&sk->sk_drops);
74769 + atomic_inc_unchecked(&sk->sk_drops);
74770 sk_mem_reclaim_partial(sk);
74771
74772 return err;
74773 diff --git a/net/core/dev.c b/net/core/dev.c
74774 index ae5cf2d..2c950a1 100644
74775 --- a/net/core/dev.c
74776 +++ b/net/core/dev.c
74777 @@ -1135,10 +1135,14 @@ void dev_load(struct net *net, const char *name)
74778 if (no_module && capable(CAP_NET_ADMIN))
74779 no_module = request_module("netdev-%s", name);
74780 if (no_module && capable(CAP_SYS_MODULE)) {
74781 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
74782 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
74783 +#else
74784 if (!request_module("%s", name))
74785 pr_err("Loading kernel module for a network device "
74786 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
74787 "instead\n", name);
74788 +#endif
74789 }
74790 }
74791 EXPORT_SYMBOL(dev_load);
74792 @@ -1977,7 +1981,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
74793
74794 struct dev_gso_cb {
74795 void (*destructor)(struct sk_buff *skb);
74796 -};
74797 +} __no_const;
74798
74799 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
74800
74801 @@ -2930,7 +2934,7 @@ int netif_rx_ni(struct sk_buff *skb)
74802 }
74803 EXPORT_SYMBOL(netif_rx_ni);
74804
74805 -static void net_tx_action(struct softirq_action *h)
74806 +static void net_tx_action(void)
74807 {
74808 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74809
74810 @@ -3779,7 +3783,7 @@ void netif_napi_del(struct napi_struct *napi)
74811 }
74812 EXPORT_SYMBOL(netif_napi_del);
74813
74814 -static void net_rx_action(struct softirq_action *h)
74815 +static void net_rx_action(void)
74816 {
74817 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74818 unsigned long time_limit = jiffies + 2;
74819 diff --git a/net/core/flow.c b/net/core/flow.c
74820 index 555a456..de48421 100644
74821 --- a/net/core/flow.c
74822 +++ b/net/core/flow.c
74823 @@ -61,7 +61,7 @@ struct flow_cache {
74824 struct timer_list rnd_timer;
74825 };
74826
74827 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
74828 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
74829 EXPORT_SYMBOL(flow_cache_genid);
74830 static struct flow_cache flow_cache_global;
74831 static struct kmem_cache *flow_cachep __read_mostly;
74832 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
74833
74834 static int flow_entry_valid(struct flow_cache_entry *fle)
74835 {
74836 - if (atomic_read(&flow_cache_genid) != fle->genid)
74837 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
74838 return 0;
74839 if (fle->object && !fle->object->ops->check(fle->object))
74840 return 0;
74841 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
74842 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
74843 fcp->hash_count++;
74844 }
74845 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
74846 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
74847 flo = fle->object;
74848 if (!flo)
74849 goto ret_object;
74850 @@ -280,7 +280,7 @@ nocache:
74851 }
74852 flo = resolver(net, key, family, dir, flo, ctx);
74853 if (fle) {
74854 - fle->genid = atomic_read(&flow_cache_genid);
74855 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
74856 if (!IS_ERR(flo))
74857 fle->object = flo;
74858 else
74859 diff --git a/net/core/iovec.c b/net/core/iovec.c
74860 index c40f27e..7f49254 100644
74861 --- a/net/core/iovec.c
74862 +++ b/net/core/iovec.c
74863 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
74864 if (m->msg_namelen) {
74865 if (mode == VERIFY_READ) {
74866 void __user *namep;
74867 - namep = (void __user __force *) m->msg_name;
74868 + namep = (void __force_user *) m->msg_name;
74869 err = move_addr_to_kernel(namep, m->msg_namelen,
74870 address);
74871 if (err < 0)
74872 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
74873 }
74874
74875 size = m->msg_iovlen * sizeof(struct iovec);
74876 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
74877 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
74878 return -EFAULT;
74879
74880 m->msg_iov = iov;
74881 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
74882 index 99d9e95..209bae2 100644
74883 --- a/net/core/rtnetlink.c
74884 +++ b/net/core/rtnetlink.c
74885 @@ -57,7 +57,7 @@ struct rtnl_link {
74886 rtnl_doit_func doit;
74887 rtnl_dumpit_func dumpit;
74888 rtnl_calcit_func calcit;
74889 -};
74890 +} __no_const;
74891
74892 static DEFINE_MUTEX(rtnl_mutex);
74893 static u16 min_ifinfo_dump_size;
74894 diff --git a/net/core/scm.c b/net/core/scm.c
74895 index 811b53f..5d6c343 100644
74896 --- a/net/core/scm.c
74897 +++ b/net/core/scm.c
74898 @@ -218,7 +218,7 @@ EXPORT_SYMBOL(__scm_send);
74899 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
74900 {
74901 struct cmsghdr __user *cm
74902 - = (__force struct cmsghdr __user *)msg->msg_control;
74903 + = (struct cmsghdr __force_user *)msg->msg_control;
74904 struct cmsghdr cmhdr;
74905 int cmlen = CMSG_LEN(len);
74906 int err;
74907 @@ -241,7 +241,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
74908 err = -EFAULT;
74909 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
74910 goto out;
74911 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
74912 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
74913 goto out;
74914 cmlen = CMSG_SPACE(len);
74915 if (msg->msg_controllen < cmlen)
74916 @@ -257,7 +257,7 @@ EXPORT_SYMBOL(put_cmsg);
74917 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74918 {
74919 struct cmsghdr __user *cm
74920 - = (__force struct cmsghdr __user*)msg->msg_control;
74921 + = (struct cmsghdr __force_user *)msg->msg_control;
74922
74923 int fdmax = 0;
74924 int fdnum = scm->fp->count;
74925 @@ -277,7 +277,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74926 if (fdnum < fdmax)
74927 fdmax = fdnum;
74928
74929 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
74930 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
74931 i++, cmfptr++)
74932 {
74933 int new_fd;
74934 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
74935 index 387703f..035abcf 100644
74936 --- a/net/core/skbuff.c
74937 +++ b/net/core/skbuff.c
74938 @@ -1650,6 +1650,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
74939 struct sock *sk = skb->sk;
74940 int ret = 0;
74941
74942 + pax_track_stack();
74943 +
74944 if (splice_grow_spd(pipe, &spd))
74945 return -ENOMEM;
74946
74947 diff --git a/net/core/sock.c b/net/core/sock.c
74948 index 11d67b3..df26d4b 100644
74949 --- a/net/core/sock.c
74950 +++ b/net/core/sock.c
74951 @@ -293,7 +293,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74952 */
74953 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
74954 (unsigned)sk->sk_rcvbuf) {
74955 - atomic_inc(&sk->sk_drops);
74956 + atomic_inc_unchecked(&sk->sk_drops);
74957 trace_sock_rcvqueue_full(sk, skb);
74958 return -ENOMEM;
74959 }
74960 @@ -303,7 +303,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74961 return err;
74962
74963 if (!sk_rmem_schedule(sk, skb->truesize)) {
74964 - atomic_inc(&sk->sk_drops);
74965 + atomic_inc_unchecked(&sk->sk_drops);
74966 return -ENOBUFS;
74967 }
74968
74969 @@ -323,7 +323,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74970 skb_dst_force(skb);
74971
74972 spin_lock_irqsave(&list->lock, flags);
74973 - skb->dropcount = atomic_read(&sk->sk_drops);
74974 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
74975 __skb_queue_tail(list, skb);
74976 spin_unlock_irqrestore(&list->lock, flags);
74977
74978 @@ -343,7 +343,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74979 skb->dev = NULL;
74980
74981 if (sk_rcvqueues_full(sk, skb)) {
74982 - atomic_inc(&sk->sk_drops);
74983 + atomic_inc_unchecked(&sk->sk_drops);
74984 goto discard_and_relse;
74985 }
74986 if (nested)
74987 @@ -361,7 +361,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74988 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
74989 } else if (sk_add_backlog(sk, skb)) {
74990 bh_unlock_sock(sk);
74991 - atomic_inc(&sk->sk_drops);
74992 + atomic_inc_unchecked(&sk->sk_drops);
74993 goto discard_and_relse;
74994 }
74995
74996 @@ -924,7 +924,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74997 if (len > sizeof(peercred))
74998 len = sizeof(peercred);
74999 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
75000 - if (copy_to_user(optval, &peercred, len))
75001 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
75002 return -EFAULT;
75003 goto lenout;
75004 }
75005 @@ -937,7 +937,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75006 return -ENOTCONN;
75007 if (lv < len)
75008 return -EINVAL;
75009 - if (copy_to_user(optval, address, len))
75010 + if (len > sizeof(address) || copy_to_user(optval, address, len))
75011 return -EFAULT;
75012 goto lenout;
75013 }
75014 @@ -970,7 +970,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75015
75016 if (len > lv)
75017 len = lv;
75018 - if (copy_to_user(optval, &v, len))
75019 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
75020 return -EFAULT;
75021 lenout:
75022 if (put_user(len, optlen))
75023 @@ -2029,7 +2029,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
75024 */
75025 smp_wmb();
75026 atomic_set(&sk->sk_refcnt, 1);
75027 - atomic_set(&sk->sk_drops, 0);
75028 + atomic_set_unchecked(&sk->sk_drops, 0);
75029 }
75030 EXPORT_SYMBOL(sock_init_data);
75031
75032 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
75033 index 02e75d1..9a57a7c 100644
75034 --- a/net/decnet/sysctl_net_decnet.c
75035 +++ b/net/decnet/sysctl_net_decnet.c
75036 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
75037
75038 if (len > *lenp) len = *lenp;
75039
75040 - if (copy_to_user(buffer, addr, len))
75041 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
75042 return -EFAULT;
75043
75044 *lenp = len;
75045 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
75046
75047 if (len > *lenp) len = *lenp;
75048
75049 - if (copy_to_user(buffer, devname, len))
75050 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
75051 return -EFAULT;
75052
75053 *lenp = len;
75054 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
75055 index 39a2d29..f39c0fe 100644
75056 --- a/net/econet/Kconfig
75057 +++ b/net/econet/Kconfig
75058 @@ -4,7 +4,7 @@
75059
75060 config ECONET
75061 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
75062 - depends on EXPERIMENTAL && INET
75063 + depends on EXPERIMENTAL && INET && BROKEN
75064 ---help---
75065 Econet is a fairly old and slow networking protocol mainly used by
75066 Acorn computers to access file and print servers. It uses native
75067 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
75068 index 92fc5f6..b790d91 100644
75069 --- a/net/ipv4/fib_frontend.c
75070 +++ b/net/ipv4/fib_frontend.c
75071 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
75072 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75073 fib_sync_up(dev);
75074 #endif
75075 - atomic_inc(&net->ipv4.dev_addr_genid);
75076 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75077 rt_cache_flush(dev_net(dev), -1);
75078 break;
75079 case NETDEV_DOWN:
75080 fib_del_ifaddr(ifa, NULL);
75081 - atomic_inc(&net->ipv4.dev_addr_genid);
75082 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75083 if (ifa->ifa_dev->ifa_list == NULL) {
75084 /* Last address was deleted from this interface.
75085 * Disable IP.
75086 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
75087 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75088 fib_sync_up(dev);
75089 #endif
75090 - atomic_inc(&net->ipv4.dev_addr_genid);
75091 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75092 rt_cache_flush(dev_net(dev), -1);
75093 break;
75094 case NETDEV_DOWN:
75095 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
75096 index 80106d8..232e898 100644
75097 --- a/net/ipv4/fib_semantics.c
75098 +++ b/net/ipv4/fib_semantics.c
75099 @@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
75100 nh->nh_saddr = inet_select_addr(nh->nh_dev,
75101 nh->nh_gw,
75102 nh->nh_parent->fib_scope);
75103 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
75104 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
75105
75106 return nh->nh_saddr;
75107 }
75108 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
75109 index 389a2e6..ac1c1de 100644
75110 --- a/net/ipv4/inet_diag.c
75111 +++ b/net/ipv4/inet_diag.c
75112 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk,
75113 r->idiag_retrans = 0;
75114
75115 r->id.idiag_if = sk->sk_bound_dev_if;
75116 +
75117 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75118 + r->id.idiag_cookie[0] = 0;
75119 + r->id.idiag_cookie[1] = 0;
75120 +#else
75121 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
75122 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
75123 +#endif
75124
75125 r->id.idiag_sport = inet->inet_sport;
75126 r->id.idiag_dport = inet->inet_dport;
75127 @@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
75128 r->idiag_family = tw->tw_family;
75129 r->idiag_retrans = 0;
75130 r->id.idiag_if = tw->tw_bound_dev_if;
75131 +
75132 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75133 + r->id.idiag_cookie[0] = 0;
75134 + r->id.idiag_cookie[1] = 0;
75135 +#else
75136 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
75137 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
75138 +#endif
75139 +
75140 r->id.idiag_sport = tw->tw_sport;
75141 r->id.idiag_dport = tw->tw_dport;
75142 r->id.idiag_src[0] = tw->tw_rcv_saddr;
75143 @@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
75144 if (sk == NULL)
75145 goto unlock;
75146
75147 +#ifndef CONFIG_GRKERNSEC_HIDESYM
75148 err = -ESTALE;
75149 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
75150 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
75151 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
75152 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
75153 goto out;
75154 +#endif
75155
75156 err = -ENOMEM;
75157 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
75158 @@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
75159 r->idiag_retrans = req->retrans;
75160
75161 r->id.idiag_if = sk->sk_bound_dev_if;
75162 +
75163 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75164 + r->id.idiag_cookie[0] = 0;
75165 + r->id.idiag_cookie[1] = 0;
75166 +#else
75167 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
75168 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
75169 +#endif
75170
75171 tmo = req->expires - jiffies;
75172 if (tmo < 0)
75173 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
75174 index 984ec65..97ac518 100644
75175 --- a/net/ipv4/inet_hashtables.c
75176 +++ b/net/ipv4/inet_hashtables.c
75177 @@ -18,12 +18,15 @@
75178 #include <linux/sched.h>
75179 #include <linux/slab.h>
75180 #include <linux/wait.h>
75181 +#include <linux/security.h>
75182
75183 #include <net/inet_connection_sock.h>
75184 #include <net/inet_hashtables.h>
75185 #include <net/secure_seq.h>
75186 #include <net/ip.h>
75187
75188 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
75189 +
75190 /*
75191 * Allocate and initialize a new local port bind bucket.
75192 * The bindhash mutex for snum's hash chain must be held here.
75193 @@ -530,6 +533,8 @@ ok:
75194 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
75195 spin_unlock(&head->lock);
75196
75197 + gr_update_task_in_ip_table(current, inet_sk(sk));
75198 +
75199 if (tw) {
75200 inet_twsk_deschedule(tw, death_row);
75201 while (twrefcnt) {
75202 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
75203 index 86f13c67..0bce60f 100644
75204 --- a/net/ipv4/inetpeer.c
75205 +++ b/net/ipv4/inetpeer.c
75206 @@ -400,6 +400,8 @@ struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create)
75207 unsigned int sequence;
75208 int invalidated, gccnt = 0;
75209
75210 + pax_track_stack();
75211 +
75212 /* Attempt a lockless lookup first.
75213 * Because of a concurrent writer, we might not find an existing entry.
75214 */
75215 @@ -436,8 +438,8 @@ relookup:
75216 if (p) {
75217 p->daddr = *daddr;
75218 atomic_set(&p->refcnt, 1);
75219 - atomic_set(&p->rid, 0);
75220 - atomic_set(&p->ip_id_count,
75221 + atomic_set_unchecked(&p->rid, 0);
75222 + atomic_set_unchecked(&p->ip_id_count,
75223 (daddr->family == AF_INET) ?
75224 secure_ip_id(daddr->addr.a4) :
75225 secure_ipv6_id(daddr->addr.a6));
75226 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
75227 index 0e0ab98..2ed7dd5 100644
75228 --- a/net/ipv4/ip_fragment.c
75229 +++ b/net/ipv4/ip_fragment.c
75230 @@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
75231 return 0;
75232
75233 start = qp->rid;
75234 - end = atomic_inc_return(&peer->rid);
75235 + end = atomic_inc_return_unchecked(&peer->rid);
75236 qp->rid = end;
75237
75238 rc = qp->q.fragments && (end - start) > max;
75239 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
75240 index 8905e92..0b179fb 100644
75241 --- a/net/ipv4/ip_sockglue.c
75242 +++ b/net/ipv4/ip_sockglue.c
75243 @@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
75244 int val;
75245 int len;
75246
75247 + pax_track_stack();
75248 +
75249 if (level != SOL_IP)
75250 return -EOPNOTSUPP;
75251
75252 @@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
75253 len = min_t(unsigned int, len, opt->optlen);
75254 if (put_user(len, optlen))
75255 return -EFAULT;
75256 - if (copy_to_user(optval, opt->__data, len))
75257 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
75258 + copy_to_user(optval, opt->__data, len))
75259 return -EFAULT;
75260 return 0;
75261 }
75262 @@ -1238,7 +1241,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
75263 if (sk->sk_type != SOCK_STREAM)
75264 return -ENOPROTOOPT;
75265
75266 - msg.msg_control = optval;
75267 + msg.msg_control = (void __force_kernel *)optval;
75268 msg.msg_controllen = len;
75269 msg.msg_flags = flags;
75270
75271 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
75272 index 472a8c4..6507cd4 100644
75273 --- a/net/ipv4/ipconfig.c
75274 +++ b/net/ipv4/ipconfig.c
75275 @@ -313,7 +313,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
75276
75277 mm_segment_t oldfs = get_fs();
75278 set_fs(get_ds());
75279 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75280 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75281 set_fs(oldfs);
75282 return res;
75283 }
75284 @@ -324,7 +324,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
75285
75286 mm_segment_t oldfs = get_fs();
75287 set_fs(get_ds());
75288 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75289 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75290 set_fs(oldfs);
75291 return res;
75292 }
75293 @@ -335,7 +335,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
75294
75295 mm_segment_t oldfs = get_fs();
75296 set_fs(get_ds());
75297 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
75298 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
75299 set_fs(oldfs);
75300 return res;
75301 }
75302 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
75303 index 076b7c8..9c8d038 100644
75304 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
75305 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
75306 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
75307
75308 *len = 0;
75309
75310 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
75311 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
75312 if (*octets == NULL) {
75313 if (net_ratelimit())
75314 pr_notice("OOM in bsalg (%d)\n", __LINE__);
75315 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
75316 index 39b403f..8e6a0a8 100644
75317 --- a/net/ipv4/ping.c
75318 +++ b/net/ipv4/ping.c
75319 @@ -837,7 +837,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
75320 sk_rmem_alloc_get(sp),
75321 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75322 atomic_read(&sp->sk_refcnt), sp,
75323 - atomic_read(&sp->sk_drops), len);
75324 + atomic_read_unchecked(&sp->sk_drops), len);
75325 }
75326
75327 static int ping_seq_show(struct seq_file *seq, void *v)
75328 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
75329 index 61714bd..c9cee6d 100644
75330 --- a/net/ipv4/raw.c
75331 +++ b/net/ipv4/raw.c
75332 @@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
75333 int raw_rcv(struct sock *sk, struct sk_buff *skb)
75334 {
75335 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
75336 - atomic_inc(&sk->sk_drops);
75337 + atomic_inc_unchecked(&sk->sk_drops);
75338 kfree_skb(skb);
75339 return NET_RX_DROP;
75340 }
75341 @@ -737,16 +737,20 @@ static int raw_init(struct sock *sk)
75342
75343 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
75344 {
75345 + struct icmp_filter filter;
75346 +
75347 if (optlen > sizeof(struct icmp_filter))
75348 optlen = sizeof(struct icmp_filter);
75349 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
75350 + if (copy_from_user(&filter, optval, optlen))
75351 return -EFAULT;
75352 + raw_sk(sk)->filter = filter;
75353 return 0;
75354 }
75355
75356 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
75357 {
75358 int len, ret = -EFAULT;
75359 + struct icmp_filter filter;
75360
75361 if (get_user(len, optlen))
75362 goto out;
75363 @@ -756,8 +760,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
75364 if (len > sizeof(struct icmp_filter))
75365 len = sizeof(struct icmp_filter);
75366 ret = -EFAULT;
75367 - if (put_user(len, optlen) ||
75368 - copy_to_user(optval, &raw_sk(sk)->filter, len))
75369 + filter = raw_sk(sk)->filter;
75370 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
75371 goto out;
75372 ret = 0;
75373 out: return ret;
75374 @@ -985,7 +989,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
75375 sk_wmem_alloc_get(sp),
75376 sk_rmem_alloc_get(sp),
75377 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75378 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
75379 + atomic_read(&sp->sk_refcnt),
75380 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75381 + NULL,
75382 +#else
75383 + sp,
75384 +#endif
75385 + atomic_read_unchecked(&sp->sk_drops));
75386 }
75387
75388 static int raw_seq_show(struct seq_file *seq, void *v)
75389 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
75390 index 075212e..8713a00 100644
75391 --- a/net/ipv4/route.c
75392 +++ b/net/ipv4/route.c
75393 @@ -308,7 +308,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
75394
75395 static inline int rt_genid(struct net *net)
75396 {
75397 - return atomic_read(&net->ipv4.rt_genid);
75398 + return atomic_read_unchecked(&net->ipv4.rt_genid);
75399 }
75400
75401 #ifdef CONFIG_PROC_FS
75402 @@ -837,7 +837,7 @@ static void rt_cache_invalidate(struct net *net)
75403 unsigned char shuffle;
75404
75405 get_random_bytes(&shuffle, sizeof(shuffle));
75406 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
75407 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
75408 }
75409
75410 /*
75411 @@ -2872,7 +2872,7 @@ static int rt_fill_info(struct net *net,
75412 error = rt->dst.error;
75413 if (peer) {
75414 inet_peer_refcheck(rt->peer);
75415 - id = atomic_read(&peer->ip_id_count) & 0xffff;
75416 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
75417 if (peer->tcp_ts_stamp) {
75418 ts = peer->tcp_ts;
75419 tsage = get_seconds() - peer->tcp_ts_stamp;
75420 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
75421 index 46febca..98b73a4 100644
75422 --- a/net/ipv4/tcp.c
75423 +++ b/net/ipv4/tcp.c
75424 @@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
75425 int val;
75426 int err = 0;
75427
75428 + pax_track_stack();
75429 +
75430 /* These are data/string values, all the others are ints */
75431 switch (optname) {
75432 case TCP_CONGESTION: {
75433 @@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
75434 struct tcp_sock *tp = tcp_sk(sk);
75435 int val, len;
75436
75437 + pax_track_stack();
75438 +
75439 if (get_user(len, optlen))
75440 return -EFAULT;
75441
75442 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
75443 index 7963e03..c44f5d0 100644
75444 --- a/net/ipv4/tcp_ipv4.c
75445 +++ b/net/ipv4/tcp_ipv4.c
75446 @@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
75447 int sysctl_tcp_low_latency __read_mostly;
75448 EXPORT_SYMBOL(sysctl_tcp_low_latency);
75449
75450 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75451 +extern int grsec_enable_blackhole;
75452 +#endif
75453
75454 #ifdef CONFIG_TCP_MD5SIG
75455 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
75456 @@ -1622,6 +1625,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
75457 return 0;
75458
75459 reset:
75460 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75461 + if (!grsec_enable_blackhole)
75462 +#endif
75463 tcp_v4_send_reset(rsk, skb);
75464 discard:
75465 kfree_skb(skb);
75466 @@ -1684,12 +1690,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
75467 TCP_SKB_CB(skb)->sacked = 0;
75468
75469 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75470 - if (!sk)
75471 + if (!sk) {
75472 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75473 + ret = 1;
75474 +#endif
75475 goto no_tcp_socket;
75476 -
75477 + }
75478 process:
75479 - if (sk->sk_state == TCP_TIME_WAIT)
75480 + if (sk->sk_state == TCP_TIME_WAIT) {
75481 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75482 + ret = 2;
75483 +#endif
75484 goto do_time_wait;
75485 + }
75486
75487 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
75488 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
75489 @@ -1739,6 +1752,10 @@ no_tcp_socket:
75490 bad_packet:
75491 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75492 } else {
75493 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75494 + if (!grsec_enable_blackhole || (ret == 1 &&
75495 + (skb->dev->flags & IFF_LOOPBACK)))
75496 +#endif
75497 tcp_v4_send_reset(NULL, skb);
75498 }
75499
75500 @@ -2403,7 +2420,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
75501 0, /* non standard timer */
75502 0, /* open_requests have no inode */
75503 atomic_read(&sk->sk_refcnt),
75504 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75505 + NULL,
75506 +#else
75507 req,
75508 +#endif
75509 len);
75510 }
75511
75512 @@ -2453,7 +2474,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
75513 sock_i_uid(sk),
75514 icsk->icsk_probes_out,
75515 sock_i_ino(sk),
75516 - atomic_read(&sk->sk_refcnt), sk,
75517 + atomic_read(&sk->sk_refcnt),
75518 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75519 + NULL,
75520 +#else
75521 + sk,
75522 +#endif
75523 jiffies_to_clock_t(icsk->icsk_rto),
75524 jiffies_to_clock_t(icsk->icsk_ack.ato),
75525 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
75526 @@ -2481,7 +2507,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
75527 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
75528 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
75529 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75530 - atomic_read(&tw->tw_refcnt), tw, len);
75531 + atomic_read(&tw->tw_refcnt),
75532 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75533 + NULL,
75534 +#else
75535 + tw,
75536 +#endif
75537 + len);
75538 }
75539
75540 #define TMPSZ 150
75541 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
75542 index 0ce3d06..e182e59 100644
75543 --- a/net/ipv4/tcp_minisocks.c
75544 +++ b/net/ipv4/tcp_minisocks.c
75545 @@ -27,6 +27,10 @@
75546 #include <net/inet_common.h>
75547 #include <net/xfrm.h>
75548
75549 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75550 +extern int grsec_enable_blackhole;
75551 +#endif
75552 +
75553 int sysctl_tcp_syncookies __read_mostly = 1;
75554 EXPORT_SYMBOL(sysctl_tcp_syncookies);
75555
75556 @@ -750,6 +754,10 @@ listen_overflow:
75557
75558 embryonic_reset:
75559 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
75560 +
75561 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75562 + if (!grsec_enable_blackhole)
75563 +#endif
75564 if (!(flg & TCP_FLAG_RST))
75565 req->rsk_ops->send_reset(sk, skb);
75566
75567 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
75568 index 882e0b0..2eba47f 100644
75569 --- a/net/ipv4/tcp_output.c
75570 +++ b/net/ipv4/tcp_output.c
75571 @@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
75572 int mss;
75573 int s_data_desired = 0;
75574
75575 + pax_track_stack();
75576 +
75577 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
75578 s_data_desired = cvp->s_data_desired;
75579 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
75580 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
75581 index 85ee7eb..53277ab 100644
75582 --- a/net/ipv4/tcp_probe.c
75583 +++ b/net/ipv4/tcp_probe.c
75584 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
75585 if (cnt + width >= len)
75586 break;
75587
75588 - if (copy_to_user(buf + cnt, tbuf, width))
75589 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
75590 return -EFAULT;
75591 cnt += width;
75592 }
75593 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
75594 index ecd44b0..b32fba6 100644
75595 --- a/net/ipv4/tcp_timer.c
75596 +++ b/net/ipv4/tcp_timer.c
75597 @@ -22,6 +22,10 @@
75598 #include <linux/gfp.h>
75599 #include <net/tcp.h>
75600
75601 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75602 +extern int grsec_lastack_retries;
75603 +#endif
75604 +
75605 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
75606 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
75607 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
75608 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
75609 }
75610 }
75611
75612 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75613 + if ((sk->sk_state == TCP_LAST_ACK) &&
75614 + (grsec_lastack_retries > 0) &&
75615 + (grsec_lastack_retries < retry_until))
75616 + retry_until = grsec_lastack_retries;
75617 +#endif
75618 +
75619 if (retransmits_timed_out(sk, retry_until,
75620 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
75621 /* Has it gone just too far? */
75622 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
75623 index 1b5a193..bd354b0 100644
75624 --- a/net/ipv4/udp.c
75625 +++ b/net/ipv4/udp.c
75626 @@ -86,6 +86,7 @@
75627 #include <linux/types.h>
75628 #include <linux/fcntl.h>
75629 #include <linux/module.h>
75630 +#include <linux/security.h>
75631 #include <linux/socket.h>
75632 #include <linux/sockios.h>
75633 #include <linux/igmp.h>
75634 @@ -108,6 +109,10 @@
75635 #include <trace/events/udp.h>
75636 #include "udp_impl.h"
75637
75638 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75639 +extern int grsec_enable_blackhole;
75640 +#endif
75641 +
75642 struct udp_table udp_table __read_mostly;
75643 EXPORT_SYMBOL(udp_table);
75644
75645 @@ -565,6 +570,9 @@ found:
75646 return s;
75647 }
75648
75649 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
75650 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
75651 +
75652 /*
75653 * This routine is called by the ICMP module when it gets some
75654 * sort of error condition. If err < 0 then the socket should
75655 @@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
75656 dport = usin->sin_port;
75657 if (dport == 0)
75658 return -EINVAL;
75659 +
75660 + err = gr_search_udp_sendmsg(sk, usin);
75661 + if (err)
75662 + return err;
75663 } else {
75664 if (sk->sk_state != TCP_ESTABLISHED)
75665 return -EDESTADDRREQ;
75666 +
75667 + err = gr_search_udp_sendmsg(sk, NULL);
75668 + if (err)
75669 + return err;
75670 +
75671 daddr = inet->inet_daddr;
75672 dport = inet->inet_dport;
75673 /* Open fast path for connected socket.
75674 @@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk)
75675 udp_lib_checksum_complete(skb)) {
75676 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75677 IS_UDPLITE(sk));
75678 - atomic_inc(&sk->sk_drops);
75679 + atomic_inc_unchecked(&sk->sk_drops);
75680 __skb_unlink(skb, rcvq);
75681 __skb_queue_tail(&list_kill, skb);
75682 }
75683 @@ -1185,6 +1202,10 @@ try_again:
75684 if (!skb)
75685 goto out;
75686
75687 + err = gr_search_udp_recvmsg(sk, skb);
75688 + if (err)
75689 + goto out_free;
75690 +
75691 ulen = skb->len - sizeof(struct udphdr);
75692 if (len > ulen)
75693 len = ulen;
75694 @@ -1485,7 +1506,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75695
75696 drop:
75697 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75698 - atomic_inc(&sk->sk_drops);
75699 + atomic_inc_unchecked(&sk->sk_drops);
75700 kfree_skb(skb);
75701 return -1;
75702 }
75703 @@ -1504,7 +1525,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
75704 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
75705
75706 if (!skb1) {
75707 - atomic_inc(&sk->sk_drops);
75708 + atomic_inc_unchecked(&sk->sk_drops);
75709 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
75710 IS_UDPLITE(sk));
75711 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75712 @@ -1673,6 +1694,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75713 goto csum_error;
75714
75715 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
75716 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75717 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75718 +#endif
75719 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
75720
75721 /*
75722 @@ -2100,8 +2124,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
75723 sk_wmem_alloc_get(sp),
75724 sk_rmem_alloc_get(sp),
75725 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75726 - atomic_read(&sp->sk_refcnt), sp,
75727 - atomic_read(&sp->sk_drops), len);
75728 + atomic_read(&sp->sk_refcnt),
75729 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75730 + NULL,
75731 +#else
75732 + sp,
75733 +#endif
75734 + atomic_read_unchecked(&sp->sk_drops), len);
75735 }
75736
75737 int udp4_seq_show(struct seq_file *seq, void *v)
75738 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
75739 index 12368c5..fbf899f 100644
75740 --- a/net/ipv6/addrconf.c
75741 +++ b/net/ipv6/addrconf.c
75742 @@ -2083,7 +2083,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
75743 p.iph.ihl = 5;
75744 p.iph.protocol = IPPROTO_IPV6;
75745 p.iph.ttl = 64;
75746 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
75747 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
75748
75749 if (ops->ndo_do_ioctl) {
75750 mm_segment_t oldfs = get_fs();
75751 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
75752 index 8a58e8c..8b5e631 100644
75753 --- a/net/ipv6/inet6_connection_sock.c
75754 +++ b/net/ipv6/inet6_connection_sock.c
75755 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
75756 #ifdef CONFIG_XFRM
75757 {
75758 struct rt6_info *rt = (struct rt6_info *)dst;
75759 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
75760 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
75761 }
75762 #endif
75763 }
75764 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
75765 #ifdef CONFIG_XFRM
75766 if (dst) {
75767 struct rt6_info *rt = (struct rt6_info *)dst;
75768 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
75769 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
75770 __sk_dst_reset(sk);
75771 dst = NULL;
75772 }
75773 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
75774 index 2fbda5f..26ed683 100644
75775 --- a/net/ipv6/ipv6_sockglue.c
75776 +++ b/net/ipv6/ipv6_sockglue.c
75777 @@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
75778 int val, valbool;
75779 int retv = -ENOPROTOOPT;
75780
75781 + pax_track_stack();
75782 +
75783 if (optval == NULL)
75784 val=0;
75785 else {
75786 @@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
75787 int len;
75788 int val;
75789
75790 + pax_track_stack();
75791 +
75792 if (ip6_mroute_opt(optname))
75793 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
75794
75795 @@ -960,7 +964,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
75796 if (sk->sk_type != SOCK_STREAM)
75797 return -ENOPROTOOPT;
75798
75799 - msg.msg_control = optval;
75800 + msg.msg_control = (void __force_kernel *)optval;
75801 msg.msg_controllen = len;
75802 msg.msg_flags = flags;
75803
75804 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
75805 index 343852e..c92bd15 100644
75806 --- a/net/ipv6/raw.c
75807 +++ b/net/ipv6/raw.c
75808 @@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
75809 {
75810 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
75811 skb_checksum_complete(skb)) {
75812 - atomic_inc(&sk->sk_drops);
75813 + atomic_inc_unchecked(&sk->sk_drops);
75814 kfree_skb(skb);
75815 return NET_RX_DROP;
75816 }
75817 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75818 struct raw6_sock *rp = raw6_sk(sk);
75819
75820 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
75821 - atomic_inc(&sk->sk_drops);
75822 + atomic_inc_unchecked(&sk->sk_drops);
75823 kfree_skb(skb);
75824 return NET_RX_DROP;
75825 }
75826 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75827
75828 if (inet->hdrincl) {
75829 if (skb_checksum_complete(skb)) {
75830 - atomic_inc(&sk->sk_drops);
75831 + atomic_inc_unchecked(&sk->sk_drops);
75832 kfree_skb(skb);
75833 return NET_RX_DROP;
75834 }
75835 @@ -601,7 +601,7 @@ out:
75836 return err;
75837 }
75838
75839 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
75840 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
75841 struct flowi6 *fl6, struct dst_entry **dstp,
75842 unsigned int flags)
75843 {
75844 @@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
75845 u16 proto;
75846 int err;
75847
75848 + pax_track_stack();
75849 +
75850 /* Rough check on arithmetic overflow,
75851 better check is made in ip6_append_data().
75852 */
75853 @@ -909,12 +911,15 @@ do_confirm:
75854 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
75855 char __user *optval, int optlen)
75856 {
75857 + struct icmp6_filter filter;
75858 +
75859 switch (optname) {
75860 case ICMPV6_FILTER:
75861 if (optlen > sizeof(struct icmp6_filter))
75862 optlen = sizeof(struct icmp6_filter);
75863 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
75864 + if (copy_from_user(&filter, optval, optlen))
75865 return -EFAULT;
75866 + raw6_sk(sk)->filter = filter;
75867 return 0;
75868 default:
75869 return -ENOPROTOOPT;
75870 @@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75871 char __user *optval, int __user *optlen)
75872 {
75873 int len;
75874 + struct icmp6_filter filter;
75875
75876 switch (optname) {
75877 case ICMPV6_FILTER:
75878 @@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75879 len = sizeof(struct icmp6_filter);
75880 if (put_user(len, optlen))
75881 return -EFAULT;
75882 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
75883 + filter = raw6_sk(sk)->filter;
75884 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
75885 return -EFAULT;
75886 return 0;
75887 default:
75888 @@ -1245,7 +1252,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
75889 0, 0L, 0,
75890 sock_i_uid(sp), 0,
75891 sock_i_ino(sp),
75892 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
75893 + atomic_read(&sp->sk_refcnt),
75894 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75895 + NULL,
75896 +#else
75897 + sp,
75898 +#endif
75899 + atomic_read_unchecked(&sp->sk_drops));
75900 }
75901
75902 static int raw6_seq_show(struct seq_file *seq, void *v)
75903 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
75904 index 7b8fc57..c6185da 100644
75905 --- a/net/ipv6/tcp_ipv6.c
75906 +++ b/net/ipv6/tcp_ipv6.c
75907 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
75908 }
75909 #endif
75910
75911 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75912 +extern int grsec_enable_blackhole;
75913 +#endif
75914 +
75915 static void tcp_v6_hash(struct sock *sk)
75916 {
75917 if (sk->sk_state != TCP_CLOSE) {
75918 @@ -1647,6 +1651,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
75919 return 0;
75920
75921 reset:
75922 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75923 + if (!grsec_enable_blackhole)
75924 +#endif
75925 tcp_v6_send_reset(sk, skb);
75926 discard:
75927 if (opt_skb)
75928 @@ -1726,12 +1733,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
75929 TCP_SKB_CB(skb)->sacked = 0;
75930
75931 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75932 - if (!sk)
75933 + if (!sk) {
75934 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75935 + ret = 1;
75936 +#endif
75937 goto no_tcp_socket;
75938 + }
75939
75940 process:
75941 - if (sk->sk_state == TCP_TIME_WAIT)
75942 + if (sk->sk_state == TCP_TIME_WAIT) {
75943 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75944 + ret = 2;
75945 +#endif
75946 goto do_time_wait;
75947 + }
75948
75949 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
75950 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
75951 @@ -1779,6 +1794,10 @@ no_tcp_socket:
75952 bad_packet:
75953 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75954 } else {
75955 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75956 + if (!grsec_enable_blackhole || (ret == 1 &&
75957 + (skb->dev->flags & IFF_LOOPBACK)))
75958 +#endif
75959 tcp_v6_send_reset(NULL, skb);
75960 }
75961
75962 @@ -2039,7 +2058,13 @@ static void get_openreq6(struct seq_file *seq,
75963 uid,
75964 0, /* non standard timer */
75965 0, /* open_requests have no inode */
75966 - 0, req);
75967 + 0,
75968 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75969 + NULL
75970 +#else
75971 + req
75972 +#endif
75973 + );
75974 }
75975
75976 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
75977 @@ -2089,7 +2114,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
75978 sock_i_uid(sp),
75979 icsk->icsk_probes_out,
75980 sock_i_ino(sp),
75981 - atomic_read(&sp->sk_refcnt), sp,
75982 + atomic_read(&sp->sk_refcnt),
75983 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75984 + NULL,
75985 +#else
75986 + sp,
75987 +#endif
75988 jiffies_to_clock_t(icsk->icsk_rto),
75989 jiffies_to_clock_t(icsk->icsk_ack.ato),
75990 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
75991 @@ -2124,7 +2154,13 @@ static void get_timewait6_sock(struct seq_file *seq,
75992 dest->s6_addr32[2], dest->s6_addr32[3], destp,
75993 tw->tw_substate, 0, 0,
75994 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75995 - atomic_read(&tw->tw_refcnt), tw);
75996 + atomic_read(&tw->tw_refcnt),
75997 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75998 + NULL
75999 +#else
76000 + tw
76001 +#endif
76002 + );
76003 }
76004
76005 static int tcp6_seq_show(struct seq_file *seq, void *v)
76006 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
76007 index bb95e8e..ae0ee80 100644
76008 --- a/net/ipv6/udp.c
76009 +++ b/net/ipv6/udp.c
76010 @@ -50,6 +50,10 @@
76011 #include <linux/seq_file.h>
76012 #include "udp_impl.h"
76013
76014 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76015 +extern int grsec_enable_blackhole;
76016 +#endif
76017 +
76018 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
76019 {
76020 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
76021 @@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
76022
76023 return 0;
76024 drop:
76025 - atomic_inc(&sk->sk_drops);
76026 + atomic_inc_unchecked(&sk->sk_drops);
76027 drop_no_sk_drops_inc:
76028 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
76029 kfree_skb(skb);
76030 @@ -624,7 +628,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
76031 continue;
76032 }
76033 drop:
76034 - atomic_inc(&sk->sk_drops);
76035 + atomic_inc_unchecked(&sk->sk_drops);
76036 UDP6_INC_STATS_BH(sock_net(sk),
76037 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
76038 UDP6_INC_STATS_BH(sock_net(sk),
76039 @@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76040 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
76041 proto == IPPROTO_UDPLITE);
76042
76043 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76044 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
76045 +#endif
76046 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
76047
76048 kfree_skb(skb);
76049 @@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76050 if (!sock_owned_by_user(sk))
76051 udpv6_queue_rcv_skb(sk, skb);
76052 else if (sk_add_backlog(sk, skb)) {
76053 - atomic_inc(&sk->sk_drops);
76054 + atomic_inc_unchecked(&sk->sk_drops);
76055 bh_unlock_sock(sk);
76056 sock_put(sk);
76057 goto discard;
76058 @@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
76059 0, 0L, 0,
76060 sock_i_uid(sp), 0,
76061 sock_i_ino(sp),
76062 - atomic_read(&sp->sk_refcnt), sp,
76063 - atomic_read(&sp->sk_drops));
76064 + atomic_read(&sp->sk_refcnt),
76065 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76066 + NULL,
76067 +#else
76068 + sp,
76069 +#endif
76070 + atomic_read_unchecked(&sp->sk_drops));
76071 }
76072
76073 int udp6_seq_show(struct seq_file *seq, void *v)
76074 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
76075 index b3cc8b3..baa02d0 100644
76076 --- a/net/irda/ircomm/ircomm_tty.c
76077 +++ b/net/irda/ircomm/ircomm_tty.c
76078 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76079 add_wait_queue(&self->open_wait, &wait);
76080
76081 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
76082 - __FILE__,__LINE__, tty->driver->name, self->open_count );
76083 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
76084
76085 /* As far as I can see, we protect open_count - Jean II */
76086 spin_lock_irqsave(&self->spinlock, flags);
76087 if (!tty_hung_up_p(filp)) {
76088 extra_count = 1;
76089 - self->open_count--;
76090 + local_dec(&self->open_count);
76091 }
76092 spin_unlock_irqrestore(&self->spinlock, flags);
76093 - self->blocked_open++;
76094 + local_inc(&self->blocked_open);
76095
76096 while (1) {
76097 if (tty->termios->c_cflag & CBAUD) {
76098 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76099 }
76100
76101 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
76102 - __FILE__,__LINE__, tty->driver->name, self->open_count );
76103 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
76104
76105 schedule();
76106 }
76107 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76108 if (extra_count) {
76109 /* ++ is not atomic, so this should be protected - Jean II */
76110 spin_lock_irqsave(&self->spinlock, flags);
76111 - self->open_count++;
76112 + local_inc(&self->open_count);
76113 spin_unlock_irqrestore(&self->spinlock, flags);
76114 }
76115 - self->blocked_open--;
76116 + local_dec(&self->blocked_open);
76117
76118 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
76119 - __FILE__,__LINE__, tty->driver->name, self->open_count);
76120 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
76121
76122 if (!retval)
76123 self->flags |= ASYNC_NORMAL_ACTIVE;
76124 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
76125 }
76126 /* ++ is not atomic, so this should be protected - Jean II */
76127 spin_lock_irqsave(&self->spinlock, flags);
76128 - self->open_count++;
76129 + local_inc(&self->open_count);
76130
76131 tty->driver_data = self;
76132 self->tty = tty;
76133 spin_unlock_irqrestore(&self->spinlock, flags);
76134
76135 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
76136 - self->line, self->open_count);
76137 + self->line, local_read(&self->open_count));
76138
76139 /* Not really used by us, but lets do it anyway */
76140 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
76141 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76142 return;
76143 }
76144
76145 - if ((tty->count == 1) && (self->open_count != 1)) {
76146 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
76147 /*
76148 * Uh, oh. tty->count is 1, which means that the tty
76149 * structure will be freed. state->count should always
76150 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76151 */
76152 IRDA_DEBUG(0, "%s(), bad serial port count; "
76153 "tty->count is 1, state->count is %d\n", __func__ ,
76154 - self->open_count);
76155 - self->open_count = 1;
76156 + local_read(&self->open_count));
76157 + local_set(&self->open_count, 1);
76158 }
76159
76160 - if (--self->open_count < 0) {
76161 + if (local_dec_return(&self->open_count) < 0) {
76162 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
76163 - __func__, self->line, self->open_count);
76164 - self->open_count = 0;
76165 + __func__, self->line, local_read(&self->open_count));
76166 + local_set(&self->open_count, 0);
76167 }
76168 - if (self->open_count) {
76169 + if (local_read(&self->open_count)) {
76170 spin_unlock_irqrestore(&self->spinlock, flags);
76171
76172 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
76173 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76174 tty->closing = 0;
76175 self->tty = NULL;
76176
76177 - if (self->blocked_open) {
76178 + if (local_read(&self->blocked_open)) {
76179 if (self->close_delay)
76180 schedule_timeout_interruptible(self->close_delay);
76181 wake_up_interruptible(&self->open_wait);
76182 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
76183 spin_lock_irqsave(&self->spinlock, flags);
76184 self->flags &= ~ASYNC_NORMAL_ACTIVE;
76185 self->tty = NULL;
76186 - self->open_count = 0;
76187 + local_set(&self->open_count, 0);
76188 spin_unlock_irqrestore(&self->spinlock, flags);
76189
76190 wake_up_interruptible(&self->open_wait);
76191 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
76192 seq_putc(m, '\n');
76193
76194 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
76195 - seq_printf(m, "Open count: %d\n", self->open_count);
76196 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
76197 seq_printf(m, "Max data size: %d\n", self->max_data_size);
76198 seq_printf(m, "Max header size: %d\n", self->max_header_size);
76199
76200 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
76201 index e2013e4..edfc1e3 100644
76202 --- a/net/iucv/af_iucv.c
76203 +++ b/net/iucv/af_iucv.c
76204 @@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct sock *sk)
76205
76206 write_lock_bh(&iucv_sk_list.lock);
76207
76208 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
76209 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76210 while (__iucv_get_sock_by_name(name)) {
76211 sprintf(name, "%08x",
76212 - atomic_inc_return(&iucv_sk_list.autobind_name));
76213 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76214 }
76215
76216 write_unlock_bh(&iucv_sk_list.lock);
76217 diff --git a/net/key/af_key.c b/net/key/af_key.c
76218 index 1e733e9..c84de2f 100644
76219 --- a/net/key/af_key.c
76220 +++ b/net/key/af_key.c
76221 @@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
76222 struct xfrm_migrate m[XFRM_MAX_DEPTH];
76223 struct xfrm_kmaddress k;
76224
76225 + pax_track_stack();
76226 +
76227 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
76228 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
76229 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
76230 @@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
76231 static u32 get_acqseq(void)
76232 {
76233 u32 res;
76234 - static atomic_t acqseq;
76235 + static atomic_unchecked_t acqseq;
76236
76237 do {
76238 - res = atomic_inc_return(&acqseq);
76239 + res = atomic_inc_return_unchecked(&acqseq);
76240 } while (!res);
76241 return res;
76242 }
76243 diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
76244 index 956b7e4..f01d328 100644
76245 --- a/net/lapb/lapb_iface.c
76246 +++ b/net/lapb/lapb_iface.c
76247 @@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
76248 goto out;
76249
76250 lapb->dev = dev;
76251 - lapb->callbacks = *callbacks;
76252 + lapb->callbacks = callbacks;
76253
76254 __lapb_insert_cb(lapb);
76255
76256 @@ -380,32 +380,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
76257
76258 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
76259 {
76260 - if (lapb->callbacks.connect_confirmation)
76261 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
76262 + if (lapb->callbacks->connect_confirmation)
76263 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
76264 }
76265
76266 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
76267 {
76268 - if (lapb->callbacks.connect_indication)
76269 - lapb->callbacks.connect_indication(lapb->dev, reason);
76270 + if (lapb->callbacks->connect_indication)
76271 + lapb->callbacks->connect_indication(lapb->dev, reason);
76272 }
76273
76274 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
76275 {
76276 - if (lapb->callbacks.disconnect_confirmation)
76277 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
76278 + if (lapb->callbacks->disconnect_confirmation)
76279 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
76280 }
76281
76282 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
76283 {
76284 - if (lapb->callbacks.disconnect_indication)
76285 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
76286 + if (lapb->callbacks->disconnect_indication)
76287 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
76288 }
76289
76290 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
76291 {
76292 - if (lapb->callbacks.data_indication)
76293 - return lapb->callbacks.data_indication(lapb->dev, skb);
76294 + if (lapb->callbacks->data_indication)
76295 + return lapb->callbacks->data_indication(lapb->dev, skb);
76296
76297 kfree_skb(skb);
76298 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
76299 @@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
76300 {
76301 int used = 0;
76302
76303 - if (lapb->callbacks.data_transmit) {
76304 - lapb->callbacks.data_transmit(lapb->dev, skb);
76305 + if (lapb->callbacks->data_transmit) {
76306 + lapb->callbacks->data_transmit(lapb->dev, skb);
76307 used = 1;
76308 }
76309
76310 diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
76311 index a01d213..6a1f1ab 100644
76312 --- a/net/mac80211/debugfs_sta.c
76313 +++ b/net/mac80211/debugfs_sta.c
76314 @@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
76315 struct tid_ampdu_rx *tid_rx;
76316 struct tid_ampdu_tx *tid_tx;
76317
76318 + pax_track_stack();
76319 +
76320 rcu_read_lock();
76321
76322 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
76323 @@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
76324 struct sta_info *sta = file->private_data;
76325 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
76326
76327 + pax_track_stack();
76328 +
76329 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
76330 htc->ht_supported ? "" : "not ");
76331 if (htc->ht_supported) {
76332 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
76333 index 9fab144..7f0fc14 100644
76334 --- a/net/mac80211/ieee80211_i.h
76335 +++ b/net/mac80211/ieee80211_i.h
76336 @@ -27,6 +27,7 @@
76337 #include <net/ieee80211_radiotap.h>
76338 #include <net/cfg80211.h>
76339 #include <net/mac80211.h>
76340 +#include <asm/local.h>
76341 #include "key.h"
76342 #include "sta_info.h"
76343
76344 @@ -754,7 +755,7 @@ struct ieee80211_local {
76345 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
76346 spinlock_t queue_stop_reason_lock;
76347
76348 - int open_count;
76349 + local_t open_count;
76350 int monitors, cooked_mntrs;
76351 /* number of interfaces with corresponding FIF_ flags */
76352 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
76353 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
76354 index 556e7e6..120dcaf 100644
76355 --- a/net/mac80211/iface.c
76356 +++ b/net/mac80211/iface.c
76357 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76358 break;
76359 }
76360
76361 - if (local->open_count == 0) {
76362 + if (local_read(&local->open_count) == 0) {
76363 res = drv_start(local);
76364 if (res)
76365 goto err_del_bss;
76366 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76367 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
76368
76369 if (!is_valid_ether_addr(dev->dev_addr)) {
76370 - if (!local->open_count)
76371 + if (!local_read(&local->open_count))
76372 drv_stop(local);
76373 return -EADDRNOTAVAIL;
76374 }
76375 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76376 mutex_unlock(&local->mtx);
76377
76378 if (coming_up)
76379 - local->open_count++;
76380 + local_inc(&local->open_count);
76381
76382 if (hw_reconf_flags) {
76383 ieee80211_hw_config(local, hw_reconf_flags);
76384 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76385 err_del_interface:
76386 drv_remove_interface(local, &sdata->vif);
76387 err_stop:
76388 - if (!local->open_count)
76389 + if (!local_read(&local->open_count))
76390 drv_stop(local);
76391 err_del_bss:
76392 sdata->bss = NULL;
76393 @@ -474,7 +474,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
76394 }
76395
76396 if (going_down)
76397 - local->open_count--;
76398 + local_dec(&local->open_count);
76399
76400 switch (sdata->vif.type) {
76401 case NL80211_IFTYPE_AP_VLAN:
76402 @@ -533,7 +533,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
76403
76404 ieee80211_recalc_ps(local, -1);
76405
76406 - if (local->open_count == 0) {
76407 + if (local_read(&local->open_count) == 0) {
76408 if (local->ops->napi_poll)
76409 napi_disable(&local->napi);
76410 ieee80211_clear_tx_pending(local);
76411 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
76412 index acb4423..278c8e5 100644
76413 --- a/net/mac80211/main.c
76414 +++ b/net/mac80211/main.c
76415 @@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
76416 local->hw.conf.power_level = power;
76417 }
76418
76419 - if (changed && local->open_count) {
76420 + if (changed && local_read(&local->open_count)) {
76421 ret = drv_config(local, changed);
76422 /*
76423 * Goal:
76424 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
76425 index 0f48368..d48e688 100644
76426 --- a/net/mac80211/mlme.c
76427 +++ b/net/mac80211/mlme.c
76428 @@ -1464,6 +1464,8 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
76429 bool have_higher_than_11mbit = false;
76430 u16 ap_ht_cap_flags;
76431
76432 + pax_track_stack();
76433 +
76434 /* AssocResp and ReassocResp have identical structure */
76435
76436 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
76437 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
76438 index 6326d34..7225f61 100644
76439 --- a/net/mac80211/pm.c
76440 +++ b/net/mac80211/pm.c
76441 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76442 struct ieee80211_sub_if_data *sdata;
76443 struct sta_info *sta;
76444
76445 - if (!local->open_count)
76446 + if (!local_read(&local->open_count))
76447 goto suspend;
76448
76449 ieee80211_scan_cancel(local);
76450 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76451 cancel_work_sync(&local->dynamic_ps_enable_work);
76452 del_timer_sync(&local->dynamic_ps_timer);
76453
76454 - local->wowlan = wowlan && local->open_count;
76455 + local->wowlan = wowlan && local_read(&local->open_count);
76456 if (local->wowlan) {
76457 int err = drv_suspend(local, wowlan);
76458 if (err < 0) {
76459 @@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76460 }
76461
76462 /* stop hardware - this must stop RX */
76463 - if (local->open_count)
76464 + if (local_read(&local->open_count))
76465 ieee80211_stop_device(local);
76466
76467 suspend:
76468 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
76469 index 3d5a2cb..b17ad48 100644
76470 --- a/net/mac80211/rate.c
76471 +++ b/net/mac80211/rate.c
76472 @@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
76473
76474 ASSERT_RTNL();
76475
76476 - if (local->open_count)
76477 + if (local_read(&local->open_count))
76478 return -EBUSY;
76479
76480 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
76481 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
76482 index 4851e9e..d860e05 100644
76483 --- a/net/mac80211/rc80211_pid_debugfs.c
76484 +++ b/net/mac80211/rc80211_pid_debugfs.c
76485 @@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
76486
76487 spin_unlock_irqrestore(&events->lock, status);
76488
76489 - if (copy_to_user(buf, pb, p))
76490 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
76491 return -EFAULT;
76492
76493 return p;
76494 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
76495 index fd031e8..84fbfcf 100644
76496 --- a/net/mac80211/util.c
76497 +++ b/net/mac80211/util.c
76498 @@ -1170,7 +1170,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
76499 drv_set_coverage_class(local, hw->wiphy->coverage_class);
76500
76501 /* everything else happens only if HW was up & running */
76502 - if (!local->open_count)
76503 + if (!local_read(&local->open_count))
76504 goto wake_up;
76505
76506 /*
76507 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
76508 index 32bff6d..d0cf986 100644
76509 --- a/net/netfilter/Kconfig
76510 +++ b/net/netfilter/Kconfig
76511 @@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
76512
76513 To compile it as a module, choose M here. If unsure, say N.
76514
76515 +config NETFILTER_XT_MATCH_GRADM
76516 + tristate '"gradm" match support'
76517 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
76518 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
76519 + ---help---
76520 + The gradm match allows to match on grsecurity RBAC being enabled.
76521 + It is useful when iptables rules are applied early on bootup to
76522 + prevent connections to the machine (except from a trusted host)
76523 + while the RBAC system is disabled.
76524 +
76525 config NETFILTER_XT_MATCH_HASHLIMIT
76526 tristate '"hashlimit" match support'
76527 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
76528 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
76529 index 1a02853..5d8c22e 100644
76530 --- a/net/netfilter/Makefile
76531 +++ b/net/netfilter/Makefile
76532 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
76533 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
76534 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
76535 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
76536 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
76537 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
76538 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
76539 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
76540 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
76541 index 12571fb..fb73976 100644
76542 --- a/net/netfilter/ipvs/ip_vs_conn.c
76543 +++ b/net/netfilter/ipvs/ip_vs_conn.c
76544 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
76545 /* Increase the refcnt counter of the dest */
76546 atomic_inc(&dest->refcnt);
76547
76548 - conn_flags = atomic_read(&dest->conn_flags);
76549 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
76550 if (cp->protocol != IPPROTO_UDP)
76551 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
76552 /* Bind with the destination and its corresponding transmitter */
76553 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
76554 atomic_set(&cp->refcnt, 1);
76555
76556 atomic_set(&cp->n_control, 0);
76557 - atomic_set(&cp->in_pkts, 0);
76558 + atomic_set_unchecked(&cp->in_pkts, 0);
76559
76560 atomic_inc(&ipvs->conn_count);
76561 if (flags & IP_VS_CONN_F_NO_CPORT)
76562 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
76563
76564 /* Don't drop the entry if its number of incoming packets is not
76565 located in [0, 8] */
76566 - i = atomic_read(&cp->in_pkts);
76567 + i = atomic_read_unchecked(&cp->in_pkts);
76568 if (i > 8 || i < 0) return 0;
76569
76570 if (!todrop_rate[i]) return 0;
76571 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
76572 index 4f77bb1..5d0bc26 100644
76573 --- a/net/netfilter/ipvs/ip_vs_core.c
76574 +++ b/net/netfilter/ipvs/ip_vs_core.c
76575 @@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
76576 ret = cp->packet_xmit(skb, cp, pd->pp);
76577 /* do not touch skb anymore */
76578
76579 - atomic_inc(&cp->in_pkts);
76580 + atomic_inc_unchecked(&cp->in_pkts);
76581 ip_vs_conn_put(cp);
76582 return ret;
76583 }
76584 @@ -1612,7 +1612,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
76585 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
76586 pkts = sysctl_sync_threshold(ipvs);
76587 else
76588 - pkts = atomic_add_return(1, &cp->in_pkts);
76589 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76590
76591 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
76592 cp->protocol == IPPROTO_SCTP) {
76593 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
76594 index e3be48b..d658c8c 100644
76595 --- a/net/netfilter/ipvs/ip_vs_ctl.c
76596 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
76597 @@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
76598 ip_vs_rs_hash(ipvs, dest);
76599 write_unlock_bh(&ipvs->rs_lock);
76600 }
76601 - atomic_set(&dest->conn_flags, conn_flags);
76602 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
76603
76604 /* bind the service */
76605 if (!dest->svc) {
76606 @@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
76607 " %-7s %-6d %-10d %-10d\n",
76608 &dest->addr.in6,
76609 ntohs(dest->port),
76610 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
76611 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
76612 atomic_read(&dest->weight),
76613 atomic_read(&dest->activeconns),
76614 atomic_read(&dest->inactconns));
76615 @@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
76616 "%-7s %-6d %-10d %-10d\n",
76617 ntohl(dest->addr.ip),
76618 ntohs(dest->port),
76619 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
76620 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
76621 atomic_read(&dest->weight),
76622 atomic_read(&dest->activeconns),
76623 atomic_read(&dest->inactconns));
76624 @@ -2285,6 +2285,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
76625 struct ip_vs_dest_user_kern udest;
76626 struct netns_ipvs *ipvs = net_ipvs(net);
76627
76628 + pax_track_stack();
76629 +
76630 if (!capable(CAP_NET_ADMIN))
76631 return -EPERM;
76632
76633 @@ -2508,7 +2510,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
76634
76635 entry.addr = dest->addr.ip;
76636 entry.port = dest->port;
76637 - entry.conn_flags = atomic_read(&dest->conn_flags);
76638 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
76639 entry.weight = atomic_read(&dest->weight);
76640 entry.u_threshold = dest->u_threshold;
76641 entry.l_threshold = dest->l_threshold;
76642 @@ -3041,7 +3043,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
76643 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
76644
76645 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
76646 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76647 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76648 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
76649 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
76650 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
76651 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
76652 index 3cdd479..116afa8 100644
76653 --- a/net/netfilter/ipvs/ip_vs_sync.c
76654 +++ b/net/netfilter/ipvs/ip_vs_sync.c
76655 @@ -649,7 +649,7 @@ control:
76656 * i.e only increment in_pkts for Templates.
76657 */
76658 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
76659 - int pkts = atomic_add_return(1, &cp->in_pkts);
76660 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76661
76662 if (pkts % sysctl_sync_period(ipvs) != 1)
76663 return;
76664 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
76665
76666 if (opt)
76667 memcpy(&cp->in_seq, opt, sizeof(*opt));
76668 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76669 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76670 cp->state = state;
76671 cp->old_state = cp->state;
76672 /*
76673 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
76674 index ee319a4..8a285ee 100644
76675 --- a/net/netfilter/ipvs/ip_vs_xmit.c
76676 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
76677 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
76678 else
76679 rc = NF_ACCEPT;
76680 /* do not touch skb anymore */
76681 - atomic_inc(&cp->in_pkts);
76682 + atomic_inc_unchecked(&cp->in_pkts);
76683 goto out;
76684 }
76685
76686 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
76687 else
76688 rc = NF_ACCEPT;
76689 /* do not touch skb anymore */
76690 - atomic_inc(&cp->in_pkts);
76691 + atomic_inc_unchecked(&cp->in_pkts);
76692 goto out;
76693 }
76694
76695 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
76696 index 2d8158a..5dca296 100644
76697 --- a/net/netfilter/nfnetlink_log.c
76698 +++ b/net/netfilter/nfnetlink_log.c
76699 @@ -70,7 +70,7 @@ struct nfulnl_instance {
76700 };
76701
76702 static DEFINE_SPINLOCK(instances_lock);
76703 -static atomic_t global_seq;
76704 +static atomic_unchecked_t global_seq;
76705
76706 #define INSTANCE_BUCKETS 16
76707 static struct hlist_head instance_table[INSTANCE_BUCKETS];
76708 @@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_instance *inst,
76709 /* global sequence number */
76710 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
76711 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
76712 - htonl(atomic_inc_return(&global_seq)));
76713 + htonl(atomic_inc_return_unchecked(&global_seq)));
76714
76715 if (data_len) {
76716 struct nlattr *nla;
76717 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
76718 new file mode 100644
76719 index 0000000..6905327
76720 --- /dev/null
76721 +++ b/net/netfilter/xt_gradm.c
76722 @@ -0,0 +1,51 @@
76723 +/*
76724 + * gradm match for netfilter
76725 + * Copyright © Zbigniew Krzystolik, 2010
76726 + *
76727 + * This program is free software; you can redistribute it and/or modify
76728 + * it under the terms of the GNU General Public License; either version
76729 + * 2 or 3 as published by the Free Software Foundation.
76730 + */
76731 +#include <linux/module.h>
76732 +#include <linux/moduleparam.h>
76733 +#include <linux/skbuff.h>
76734 +#include <linux/netfilter/x_tables.h>
76735 +#include <linux/grsecurity.h>
76736 +#include <linux/netfilter/xt_gradm.h>
76737 +
76738 +static bool
76739 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
76740 +{
76741 + const struct xt_gradm_mtinfo *info = par->matchinfo;
76742 + bool retval = false;
76743 + if (gr_acl_is_enabled())
76744 + retval = true;
76745 + return retval ^ info->invflags;
76746 +}
76747 +
76748 +static struct xt_match gradm_mt_reg __read_mostly = {
76749 + .name = "gradm",
76750 + .revision = 0,
76751 + .family = NFPROTO_UNSPEC,
76752 + .match = gradm_mt,
76753 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
76754 + .me = THIS_MODULE,
76755 +};
76756 +
76757 +static int __init gradm_mt_init(void)
76758 +{
76759 + return xt_register_match(&gradm_mt_reg);
76760 +}
76761 +
76762 +static void __exit gradm_mt_exit(void)
76763 +{
76764 + xt_unregister_match(&gradm_mt_reg);
76765 +}
76766 +
76767 +module_init(gradm_mt_init);
76768 +module_exit(gradm_mt_exit);
76769 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
76770 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
76771 +MODULE_LICENSE("GPL");
76772 +MODULE_ALIAS("ipt_gradm");
76773 +MODULE_ALIAS("ip6t_gradm");
76774 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
76775 index 42ecb71..8d687c0 100644
76776 --- a/net/netfilter/xt_statistic.c
76777 +++ b/net/netfilter/xt_statistic.c
76778 @@ -18,7 +18,7 @@
76779 #include <linux/netfilter/x_tables.h>
76780
76781 struct xt_statistic_priv {
76782 - atomic_t count;
76783 + atomic_unchecked_t count;
76784 } ____cacheline_aligned_in_smp;
76785
76786 MODULE_LICENSE("GPL");
76787 @@ -41,9 +41,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
76788 break;
76789 case XT_STATISTIC_MODE_NTH:
76790 do {
76791 - oval = atomic_read(&info->master->count);
76792 + oval = atomic_read_unchecked(&info->master->count);
76793 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
76794 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
76795 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
76796 if (nval == 0)
76797 ret = !ret;
76798 break;
76799 @@ -63,7 +63,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
76800 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
76801 if (info->master == NULL)
76802 return -ENOMEM;
76803 - atomic_set(&info->master->count, info->u.nth.count);
76804 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
76805
76806 return 0;
76807 }
76808 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
76809 index 0a4db02..604f748 100644
76810 --- a/net/netlink/af_netlink.c
76811 +++ b/net/netlink/af_netlink.c
76812 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock *sk)
76813 sk->sk_error_report(sk);
76814 }
76815 }
76816 - atomic_inc(&sk->sk_drops);
76817 + atomic_inc_unchecked(&sk->sk_drops);
76818 }
76819
76820 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
76821 @@ -2000,7 +2000,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
76822 sk_wmem_alloc_get(s),
76823 nlk->cb,
76824 atomic_read(&s->sk_refcnt),
76825 - atomic_read(&s->sk_drops),
76826 + atomic_read_unchecked(&s->sk_drops),
76827 sock_i_ino(s)
76828 );
76829
76830 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
76831 index 732152f..60bb09e 100644
76832 --- a/net/netrom/af_netrom.c
76833 +++ b/net/netrom/af_netrom.c
76834 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
76835 struct sock *sk = sock->sk;
76836 struct nr_sock *nr = nr_sk(sk);
76837
76838 + memset(sax, 0, sizeof(*sax));
76839 lock_sock(sk);
76840 if (peer != 0) {
76841 if (sk->sk_state != TCP_ESTABLISHED) {
76842 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
76843 *uaddr_len = sizeof(struct full_sockaddr_ax25);
76844 } else {
76845 sax->fsa_ax25.sax25_family = AF_NETROM;
76846 - sax->fsa_ax25.sax25_ndigis = 0;
76847 sax->fsa_ax25.sax25_call = nr->source_addr;
76848 *uaddr_len = sizeof(struct sockaddr_ax25);
76849 }
76850 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
76851 index fabb4fa..e146b73 100644
76852 --- a/net/packet/af_packet.c
76853 +++ b/net/packet/af_packet.c
76854 @@ -954,7 +954,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
76855
76856 spin_lock(&sk->sk_receive_queue.lock);
76857 po->stats.tp_packets++;
76858 - skb->dropcount = atomic_read(&sk->sk_drops);
76859 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
76860 __skb_queue_tail(&sk->sk_receive_queue, skb);
76861 spin_unlock(&sk->sk_receive_queue.lock);
76862 sk->sk_data_ready(sk, skb->len);
76863 @@ -963,7 +963,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
76864 drop_n_acct:
76865 spin_lock(&sk->sk_receive_queue.lock);
76866 po->stats.tp_drops++;
76867 - atomic_inc(&sk->sk_drops);
76868 + atomic_inc_unchecked(&sk->sk_drops);
76869 spin_unlock(&sk->sk_receive_queue.lock);
76870
76871 drop_n_restore:
76872 @@ -2479,7 +2479,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
76873 case PACKET_HDRLEN:
76874 if (len > sizeof(int))
76875 len = sizeof(int);
76876 - if (copy_from_user(&val, optval, len))
76877 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
76878 return -EFAULT;
76879 switch (val) {
76880 case TPACKET_V1:
76881 @@ -2526,7 +2526,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
76882
76883 if (put_user(len, optlen))
76884 return -EFAULT;
76885 - if (copy_to_user(optval, data, len))
76886 + if (len > sizeof(st) || copy_to_user(optval, data, len))
76887 return -EFAULT;
76888 return 0;
76889 }
76890 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
76891 index c6fffd9..a7ffa0c 100644
76892 --- a/net/phonet/af_phonet.c
76893 +++ b/net/phonet/af_phonet.c
76894 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
76895 {
76896 struct phonet_protocol *pp;
76897
76898 - if (protocol >= PHONET_NPROTO)
76899 + if (protocol < 0 || protocol >= PHONET_NPROTO)
76900 return NULL;
76901
76902 rcu_read_lock();
76903 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
76904 {
76905 int err = 0;
76906
76907 - if (protocol >= PHONET_NPROTO)
76908 + if (protocol < 0 || protocol >= PHONET_NPROTO)
76909 return -EINVAL;
76910
76911 err = proto_register(pp->prot, 1);
76912 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
76913 index f17fd84..edffce8 100644
76914 --- a/net/phonet/pep.c
76915 +++ b/net/phonet/pep.c
76916 @@ -387,7 +387,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
76917
76918 case PNS_PEP_CTRL_REQ:
76919 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
76920 - atomic_inc(&sk->sk_drops);
76921 + atomic_inc_unchecked(&sk->sk_drops);
76922 break;
76923 }
76924 __skb_pull(skb, 4);
76925 @@ -408,7 +408,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
76926 }
76927
76928 if (pn->rx_credits == 0) {
76929 - atomic_inc(&sk->sk_drops);
76930 + atomic_inc_unchecked(&sk->sk_drops);
76931 err = -ENOBUFS;
76932 break;
76933 }
76934 @@ -556,7 +556,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
76935 }
76936
76937 if (pn->rx_credits == 0) {
76938 - atomic_inc(&sk->sk_drops);
76939 + atomic_inc_unchecked(&sk->sk_drops);
76940 err = NET_RX_DROP;
76941 break;
76942 }
76943 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
76944 index ab07711..9d4ac5d 100644
76945 --- a/net/phonet/socket.c
76946 +++ b/net/phonet/socket.c
76947 @@ -612,8 +612,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
76948 pn->resource, sk->sk_state,
76949 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
76950 sock_i_uid(sk), sock_i_ino(sk),
76951 - atomic_read(&sk->sk_refcnt), sk,
76952 - atomic_read(&sk->sk_drops), &len);
76953 + atomic_read(&sk->sk_refcnt),
76954 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76955 + NULL,
76956 +#else
76957 + sk,
76958 +#endif
76959 + atomic_read_unchecked(&sk->sk_drops), &len);
76960 }
76961 seq_printf(seq, "%*s\n", 127 - len, "");
76962 return 0;
76963 diff --git a/net/rds/cong.c b/net/rds/cong.c
76964 index 6daaa49..fbf6af5 100644
76965 --- a/net/rds/cong.c
76966 +++ b/net/rds/cong.c
76967 @@ -77,7 +77,7 @@
76968 * finds that the saved generation number is smaller than the global generation
76969 * number, it wakes up the process.
76970 */
76971 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
76972 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
76973
76974 /*
76975 * Congestion monitoring
76976 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
76977 rdsdebug("waking map %p for %pI4\n",
76978 map, &map->m_addr);
76979 rds_stats_inc(s_cong_update_received);
76980 - atomic_inc(&rds_cong_generation);
76981 + atomic_inc_unchecked(&rds_cong_generation);
76982 if (waitqueue_active(&map->m_waitq))
76983 wake_up(&map->m_waitq);
76984 if (waitqueue_active(&rds_poll_waitq))
76985 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
76986
76987 int rds_cong_updated_since(unsigned long *recent)
76988 {
76989 - unsigned long gen = atomic_read(&rds_cong_generation);
76990 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
76991
76992 if (likely(*recent == gen))
76993 return 0;
76994 diff --git a/net/rds/ib.h b/net/rds/ib.h
76995 index edfaaaf..8c89879 100644
76996 --- a/net/rds/ib.h
76997 +++ b/net/rds/ib.h
76998 @@ -128,7 +128,7 @@ struct rds_ib_connection {
76999 /* sending acks */
77000 unsigned long i_ack_flags;
77001 #ifdef KERNEL_HAS_ATOMIC64
77002 - atomic64_t i_ack_next; /* next ACK to send */
77003 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
77004 #else
77005 spinlock_t i_ack_lock; /* protect i_ack_next */
77006 u64 i_ack_next; /* next ACK to send */
77007 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
77008 index cd67026..0b9a54a 100644
77009 --- a/net/rds/ib_cm.c
77010 +++ b/net/rds/ib_cm.c
77011 @@ -720,7 +720,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
77012 /* Clear the ACK state */
77013 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
77014 #ifdef KERNEL_HAS_ATOMIC64
77015 - atomic64_set(&ic->i_ack_next, 0);
77016 + atomic64_set_unchecked(&ic->i_ack_next, 0);
77017 #else
77018 ic->i_ack_next = 0;
77019 #endif
77020 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
77021 index e29e0ca..fa3a6a3 100644
77022 --- a/net/rds/ib_recv.c
77023 +++ b/net/rds/ib_recv.c
77024 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
77025 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
77026 int ack_required)
77027 {
77028 - atomic64_set(&ic->i_ack_next, seq);
77029 + atomic64_set_unchecked(&ic->i_ack_next, seq);
77030 if (ack_required) {
77031 smp_mb__before_clear_bit();
77032 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77033 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
77034 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77035 smp_mb__after_clear_bit();
77036
77037 - return atomic64_read(&ic->i_ack_next);
77038 + return atomic64_read_unchecked(&ic->i_ack_next);
77039 }
77040 #endif
77041
77042 diff --git a/net/rds/iw.h b/net/rds/iw.h
77043 index 04ce3b1..48119a6 100644
77044 --- a/net/rds/iw.h
77045 +++ b/net/rds/iw.h
77046 @@ -134,7 +134,7 @@ struct rds_iw_connection {
77047 /* sending acks */
77048 unsigned long i_ack_flags;
77049 #ifdef KERNEL_HAS_ATOMIC64
77050 - atomic64_t i_ack_next; /* next ACK to send */
77051 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
77052 #else
77053 spinlock_t i_ack_lock; /* protect i_ack_next */
77054 u64 i_ack_next; /* next ACK to send */
77055 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
77056 index 9556d28..f046d0e 100644
77057 --- a/net/rds/iw_cm.c
77058 +++ b/net/rds/iw_cm.c
77059 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
77060 /* Clear the ACK state */
77061 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
77062 #ifdef KERNEL_HAS_ATOMIC64
77063 - atomic64_set(&ic->i_ack_next, 0);
77064 + atomic64_set_unchecked(&ic->i_ack_next, 0);
77065 #else
77066 ic->i_ack_next = 0;
77067 #endif
77068 diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
77069 index 4e1de17..d121708 100644
77070 --- a/net/rds/iw_rdma.c
77071 +++ b/net/rds/iw_rdma.c
77072 @@ -184,6 +184,8 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i
77073 struct rdma_cm_id *pcm_id;
77074 int rc;
77075
77076 + pax_track_stack();
77077 +
77078 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
77079 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
77080
77081 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
77082 index 5e57347..3916042 100644
77083 --- a/net/rds/iw_recv.c
77084 +++ b/net/rds/iw_recv.c
77085 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
77086 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
77087 int ack_required)
77088 {
77089 - atomic64_set(&ic->i_ack_next, seq);
77090 + atomic64_set_unchecked(&ic->i_ack_next, seq);
77091 if (ack_required) {
77092 smp_mb__before_clear_bit();
77093 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77094 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
77095 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77096 smp_mb__after_clear_bit();
77097
77098 - return atomic64_read(&ic->i_ack_next);
77099 + return atomic64_read_unchecked(&ic->i_ack_next);
77100 }
77101 #endif
77102
77103 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
77104 index 8e0a320..ee8e38f 100644
77105 --- a/net/rds/tcp.c
77106 +++ b/net/rds/tcp.c
77107 @@ -58,7 +58,7 @@ void rds_tcp_nonagle(struct socket *sock)
77108 int val = 1;
77109
77110 set_fs(KERNEL_DS);
77111 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
77112 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
77113 sizeof(val));
77114 set_fs(oldfs);
77115 }
77116 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
77117 index 1b4fd68..2234175 100644
77118 --- a/net/rds/tcp_send.c
77119 +++ b/net/rds/tcp_send.c
77120 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
77121
77122 oldfs = get_fs();
77123 set_fs(KERNEL_DS);
77124 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
77125 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
77126 sizeof(val));
77127 set_fs(oldfs);
77128 }
77129 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
77130 index 74c064c..fdec26f 100644
77131 --- a/net/rxrpc/af_rxrpc.c
77132 +++ b/net/rxrpc/af_rxrpc.c
77133 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
77134 __be32 rxrpc_epoch;
77135
77136 /* current debugging ID */
77137 -atomic_t rxrpc_debug_id;
77138 +atomic_unchecked_t rxrpc_debug_id;
77139
77140 /* count of skbs currently in use */
77141 atomic_t rxrpc_n_skbs;
77142 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
77143 index f99cfce..3682692 100644
77144 --- a/net/rxrpc/ar-ack.c
77145 +++ b/net/rxrpc/ar-ack.c
77146 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
77147
77148 _enter("{%d,%d,%d,%d},",
77149 call->acks_hard, call->acks_unacked,
77150 - atomic_read(&call->sequence),
77151 + atomic_read_unchecked(&call->sequence),
77152 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
77153
77154 stop = 0;
77155 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
77156
77157 /* each Tx packet has a new serial number */
77158 sp->hdr.serial =
77159 - htonl(atomic_inc_return(&call->conn->serial));
77160 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
77161
77162 hdr = (struct rxrpc_header *) txb->head;
77163 hdr->serial = sp->hdr.serial;
77164 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
77165 */
77166 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
77167 {
77168 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
77169 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
77170 }
77171
77172 /*
77173 @@ -629,7 +629,7 @@ process_further:
77174
77175 latest = ntohl(sp->hdr.serial);
77176 hard = ntohl(ack.firstPacket);
77177 - tx = atomic_read(&call->sequence);
77178 + tx = atomic_read_unchecked(&call->sequence);
77179
77180 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
77181 latest,
77182 @@ -842,6 +842,8 @@ void rxrpc_process_call(struct work_struct *work)
77183 u32 abort_code = RX_PROTOCOL_ERROR;
77184 u8 *acks = NULL;
77185
77186 + pax_track_stack();
77187 +
77188 //printk("\n--------------------\n");
77189 _enter("{%d,%s,%lx} [%lu]",
77190 call->debug_id, rxrpc_call_states[call->state], call->events,
77191 @@ -1161,7 +1163,7 @@ void rxrpc_process_call(struct work_struct *work)
77192 goto maybe_reschedule;
77193
77194 send_ACK_with_skew:
77195 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
77196 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
77197 ntohl(ack.serial));
77198 send_ACK:
77199 mtu = call->conn->trans->peer->if_mtu;
77200 @@ -1173,7 +1175,7 @@ send_ACK:
77201 ackinfo.rxMTU = htonl(5692);
77202 ackinfo.jumbo_max = htonl(4);
77203
77204 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
77205 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
77206 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
77207 ntohl(hdr.serial),
77208 ntohs(ack.maxSkew),
77209 @@ -1191,7 +1193,7 @@ send_ACK:
77210 send_message:
77211 _debug("send message");
77212
77213 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
77214 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
77215 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
77216 send_message_2:
77217
77218 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
77219 index bf656c2..48f9d27 100644
77220 --- a/net/rxrpc/ar-call.c
77221 +++ b/net/rxrpc/ar-call.c
77222 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
77223 spin_lock_init(&call->lock);
77224 rwlock_init(&call->state_lock);
77225 atomic_set(&call->usage, 1);
77226 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
77227 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77228 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
77229
77230 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
77231 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
77232 index 4106ca9..a338d7a 100644
77233 --- a/net/rxrpc/ar-connection.c
77234 +++ b/net/rxrpc/ar-connection.c
77235 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
77236 rwlock_init(&conn->lock);
77237 spin_lock_init(&conn->state_lock);
77238 atomic_set(&conn->usage, 1);
77239 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
77240 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77241 conn->avail_calls = RXRPC_MAXCALLS;
77242 conn->size_align = 4;
77243 conn->header_size = sizeof(struct rxrpc_header);
77244 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
77245 index e7ed43a..6afa140 100644
77246 --- a/net/rxrpc/ar-connevent.c
77247 +++ b/net/rxrpc/ar-connevent.c
77248 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
77249
77250 len = iov[0].iov_len + iov[1].iov_len;
77251
77252 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
77253 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77254 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
77255
77256 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
77257 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
77258 index 1a2b0633..e8d1382 100644
77259 --- a/net/rxrpc/ar-input.c
77260 +++ b/net/rxrpc/ar-input.c
77261 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
77262 /* track the latest serial number on this connection for ACK packet
77263 * information */
77264 serial = ntohl(sp->hdr.serial);
77265 - hi_serial = atomic_read(&call->conn->hi_serial);
77266 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
77267 while (serial > hi_serial)
77268 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
77269 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
77270 serial);
77271
77272 /* request ACK generation for any ACK or DATA packet that requests
77273 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
77274 index 8e22bd3..f66d1c0 100644
77275 --- a/net/rxrpc/ar-internal.h
77276 +++ b/net/rxrpc/ar-internal.h
77277 @@ -272,8 +272,8 @@ struct rxrpc_connection {
77278 int error; /* error code for local abort */
77279 int debug_id; /* debug ID for printks */
77280 unsigned call_counter; /* call ID counter */
77281 - atomic_t serial; /* packet serial number counter */
77282 - atomic_t hi_serial; /* highest serial number received */
77283 + atomic_unchecked_t serial; /* packet serial number counter */
77284 + atomic_unchecked_t hi_serial; /* highest serial number received */
77285 u8 avail_calls; /* number of calls available */
77286 u8 size_align; /* data size alignment (for security) */
77287 u8 header_size; /* rxrpc + security header size */
77288 @@ -346,7 +346,7 @@ struct rxrpc_call {
77289 spinlock_t lock;
77290 rwlock_t state_lock; /* lock for state transition */
77291 atomic_t usage;
77292 - atomic_t sequence; /* Tx data packet sequence counter */
77293 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
77294 u32 abort_code; /* local/remote abort code */
77295 enum { /* current state of call */
77296 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
77297 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
77298 */
77299 extern atomic_t rxrpc_n_skbs;
77300 extern __be32 rxrpc_epoch;
77301 -extern atomic_t rxrpc_debug_id;
77302 +extern atomic_unchecked_t rxrpc_debug_id;
77303 extern struct workqueue_struct *rxrpc_workqueue;
77304
77305 /*
77306 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
77307 index 87f7135..74d3703 100644
77308 --- a/net/rxrpc/ar-local.c
77309 +++ b/net/rxrpc/ar-local.c
77310 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
77311 spin_lock_init(&local->lock);
77312 rwlock_init(&local->services_lock);
77313 atomic_set(&local->usage, 1);
77314 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
77315 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77316 memcpy(&local->srx, srx, sizeof(*srx));
77317 }
77318
77319 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
77320 index 5f22e26..e5bd20f 100644
77321 --- a/net/rxrpc/ar-output.c
77322 +++ b/net/rxrpc/ar-output.c
77323 @@ -681,9 +681,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
77324 sp->hdr.cid = call->cid;
77325 sp->hdr.callNumber = call->call_id;
77326 sp->hdr.seq =
77327 - htonl(atomic_inc_return(&call->sequence));
77328 + htonl(atomic_inc_return_unchecked(&call->sequence));
77329 sp->hdr.serial =
77330 - htonl(atomic_inc_return(&conn->serial));
77331 + htonl(atomic_inc_return_unchecked(&conn->serial));
77332 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
77333 sp->hdr.userStatus = 0;
77334 sp->hdr.securityIndex = conn->security_ix;
77335 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
77336 index 2754f09..b20e38f 100644
77337 --- a/net/rxrpc/ar-peer.c
77338 +++ b/net/rxrpc/ar-peer.c
77339 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
77340 INIT_LIST_HEAD(&peer->error_targets);
77341 spin_lock_init(&peer->lock);
77342 atomic_set(&peer->usage, 1);
77343 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
77344 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77345 memcpy(&peer->srx, srx, sizeof(*srx));
77346
77347 rxrpc_assess_MTU_size(peer);
77348 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
77349 index 38047f7..9f48511 100644
77350 --- a/net/rxrpc/ar-proc.c
77351 +++ b/net/rxrpc/ar-proc.c
77352 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
77353 atomic_read(&conn->usage),
77354 rxrpc_conn_states[conn->state],
77355 key_serial(conn->key),
77356 - atomic_read(&conn->serial),
77357 - atomic_read(&conn->hi_serial));
77358 + atomic_read_unchecked(&conn->serial),
77359 + atomic_read_unchecked(&conn->hi_serial));
77360
77361 return 0;
77362 }
77363 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
77364 index 92df566..87ec1bf 100644
77365 --- a/net/rxrpc/ar-transport.c
77366 +++ b/net/rxrpc/ar-transport.c
77367 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
77368 spin_lock_init(&trans->client_lock);
77369 rwlock_init(&trans->conn_lock);
77370 atomic_set(&trans->usage, 1);
77371 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
77372 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77373
77374 if (peer->srx.transport.family == AF_INET) {
77375 switch (peer->srx.transport_type) {
77376 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
77377 index 7635107..5000b71 100644
77378 --- a/net/rxrpc/rxkad.c
77379 +++ b/net/rxrpc/rxkad.c
77380 @@ -211,6 +211,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
77381 u16 check;
77382 int nsg;
77383
77384 + pax_track_stack();
77385 +
77386 sp = rxrpc_skb(skb);
77387
77388 _enter("");
77389 @@ -338,6 +340,8 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
77390 u16 check;
77391 int nsg;
77392
77393 + pax_track_stack();
77394 +
77395 _enter("");
77396
77397 sp = rxrpc_skb(skb);
77398 @@ -610,7 +614,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
77399
77400 len = iov[0].iov_len + iov[1].iov_len;
77401
77402 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
77403 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77404 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
77405
77406 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
77407 @@ -660,7 +664,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
77408
77409 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
77410
77411 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
77412 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77413 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
77414
77415 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
77416 diff --git a/net/sctp/auth.c b/net/sctp/auth.c
77417 index 865e68f..bf81204 100644
77418 --- a/net/sctp/auth.c
77419 +++ b/net/sctp/auth.c
77420 @@ -82,7 +82,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
77421 struct sctp_auth_bytes *key;
77422
77423 /* Verify that we are not going to overflow INT_MAX */
77424 - if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
77425 + if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
77426 return NULL;
77427
77428 /* Allocate the shared key */
77429 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
77430 index 05a6ce2..c8bf836 100644
77431 --- a/net/sctp/proc.c
77432 +++ b/net/sctp/proc.c
77433 @@ -318,7 +318,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
77434 seq_printf(seq,
77435 "%8pK %8pK %-3d %-3d %-2d %-4d "
77436 "%4d %8d %8d %7d %5lu %-5d %5d ",
77437 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
77438 + assoc, sk,
77439 + sctp_sk(sk)->type, sk->sk_state,
77440 assoc->state, hash,
77441 assoc->assoc_id,
77442 assoc->sndbuf_used,
77443 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
77444 index 836aa63..d779d7b 100644
77445 --- a/net/sctp/socket.c
77446 +++ b/net/sctp/socket.c
77447 @@ -4575,7 +4575,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
77448 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
77449 if (space_left < addrlen)
77450 return -ENOMEM;
77451 - if (copy_to_user(to, &temp, addrlen))
77452 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
77453 return -EFAULT;
77454 to += addrlen;
77455 cnt++;
77456 diff --git a/net/socket.c b/net/socket.c
77457 index ffe92ca..8057b85 100644
77458 --- a/net/socket.c
77459 +++ b/net/socket.c
77460 @@ -88,6 +88,7 @@
77461 #include <linux/nsproxy.h>
77462 #include <linux/magic.h>
77463 #include <linux/slab.h>
77464 +#include <linux/in.h>
77465
77466 #include <asm/uaccess.h>
77467 #include <asm/unistd.h>
77468 @@ -105,6 +106,8 @@
77469 #include <linux/sockios.h>
77470 #include <linux/atalk.h>
77471
77472 +#include <linux/grsock.h>
77473 +
77474 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
77475 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
77476 unsigned long nr_segs, loff_t pos);
77477 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
77478 &sockfs_dentry_operations, SOCKFS_MAGIC);
77479 }
77480
77481 -static struct vfsmount *sock_mnt __read_mostly;
77482 +struct vfsmount *sock_mnt __read_mostly;
77483
77484 static struct file_system_type sock_fs_type = {
77485 .name = "sockfs",
77486 @@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
77487 return -EAFNOSUPPORT;
77488 if (type < 0 || type >= SOCK_MAX)
77489 return -EINVAL;
77490 + if (protocol < 0)
77491 + return -EINVAL;
77492
77493 /* Compatibility.
77494
77495 @@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
77496 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
77497 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
77498
77499 + if(!gr_search_socket(family, type, protocol)) {
77500 + retval = -EACCES;
77501 + goto out;
77502 + }
77503 +
77504 + if (gr_handle_sock_all(family, type, protocol)) {
77505 + retval = -EACCES;
77506 + goto out;
77507 + }
77508 +
77509 retval = sock_create(family, type, protocol, &sock);
77510 if (retval < 0)
77511 goto out;
77512 @@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
77513 if (sock) {
77514 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
77515 if (err >= 0) {
77516 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
77517 + err = -EACCES;
77518 + goto error;
77519 + }
77520 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
77521 + if (err)
77522 + goto error;
77523 +
77524 err = security_socket_bind(sock,
77525 (struct sockaddr *)&address,
77526 addrlen);
77527 @@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
77528 (struct sockaddr *)
77529 &address, addrlen);
77530 }
77531 +error:
77532 fput_light(sock->file, fput_needed);
77533 }
77534 return err;
77535 @@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
77536 if ((unsigned)backlog > somaxconn)
77537 backlog = somaxconn;
77538
77539 + if (gr_handle_sock_server_other(sock->sk)) {
77540 + err = -EPERM;
77541 + goto error;
77542 + }
77543 +
77544 + err = gr_search_listen(sock);
77545 + if (err)
77546 + goto error;
77547 +
77548 err = security_socket_listen(sock, backlog);
77549 if (!err)
77550 err = sock->ops->listen(sock, backlog);
77551
77552 +error:
77553 fput_light(sock->file, fput_needed);
77554 }
77555 return err;
77556 @@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
77557 newsock->type = sock->type;
77558 newsock->ops = sock->ops;
77559
77560 + if (gr_handle_sock_server_other(sock->sk)) {
77561 + err = -EPERM;
77562 + sock_release(newsock);
77563 + goto out_put;
77564 + }
77565 +
77566 + err = gr_search_accept(sock);
77567 + if (err) {
77568 + sock_release(newsock);
77569 + goto out_put;
77570 + }
77571 +
77572 /*
77573 * We don't need try_module_get here, as the listening socket (sock)
77574 * has the protocol module (sock->ops->owner) held.
77575 @@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
77576 fd_install(newfd, newfile);
77577 err = newfd;
77578
77579 + gr_attach_curr_ip(newsock->sk);
77580 +
77581 out_put:
77582 fput_light(sock->file, fput_needed);
77583 out:
77584 @@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
77585 int, addrlen)
77586 {
77587 struct socket *sock;
77588 + struct sockaddr *sck;
77589 struct sockaddr_storage address;
77590 int err, fput_needed;
77591
77592 @@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
77593 if (err < 0)
77594 goto out_put;
77595
77596 + sck = (struct sockaddr *)&address;
77597 +
77598 + if (gr_handle_sock_client(sck)) {
77599 + err = -EACCES;
77600 + goto out_put;
77601 + }
77602 +
77603 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
77604 + if (err)
77605 + goto out_put;
77606 +
77607 err =
77608 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
77609 if (err)
77610 @@ -1890,6 +1950,8 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
77611 unsigned char *ctl_buf = ctl;
77612 int err, ctl_len, iov_size, total_len;
77613
77614 + pax_track_stack();
77615 +
77616 err = -EFAULT;
77617 if (MSG_CMSG_COMPAT & flags) {
77618 if (get_compat_msghdr(msg_sys, msg_compat))
77619 @@ -1950,7 +2012,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
77620 * checking falls down on this.
77621 */
77622 if (copy_from_user(ctl_buf,
77623 - (void __user __force *)msg_sys->msg_control,
77624 + (void __force_user *)msg_sys->msg_control,
77625 ctl_len))
77626 goto out_freectl;
77627 msg_sys->msg_control = ctl_buf;
77628 @@ -2120,7 +2182,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
77629 * kernel msghdr to use the kernel address space)
77630 */
77631
77632 - uaddr = (__force void __user *)msg_sys->msg_name;
77633 + uaddr = (void __force_user *)msg_sys->msg_name;
77634 uaddr_len = COMPAT_NAMELEN(msg);
77635 if (MSG_CMSG_COMPAT & flags) {
77636 err = verify_compat_iovec(msg_sys, iov,
77637 @@ -2748,7 +2810,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
77638 }
77639
77640 ifr = compat_alloc_user_space(buf_size);
77641 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
77642 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
77643
77644 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
77645 return -EFAULT;
77646 @@ -2772,12 +2834,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
77647 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
77648
77649 if (copy_in_user(rxnfc, compat_rxnfc,
77650 - (void *)(&rxnfc->fs.m_ext + 1) -
77651 - (void *)rxnfc) ||
77652 + (void __user *)(&rxnfc->fs.m_ext + 1) -
77653 + (void __user *)rxnfc) ||
77654 copy_in_user(&rxnfc->fs.ring_cookie,
77655 &compat_rxnfc->fs.ring_cookie,
77656 - (void *)(&rxnfc->fs.location + 1) -
77657 - (void *)&rxnfc->fs.ring_cookie) ||
77658 + (void __user *)(&rxnfc->fs.location + 1) -
77659 + (void __user *)&rxnfc->fs.ring_cookie) ||
77660 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
77661 sizeof(rxnfc->rule_cnt)))
77662 return -EFAULT;
77663 @@ -2789,12 +2851,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
77664
77665 if (convert_out) {
77666 if (copy_in_user(compat_rxnfc, rxnfc,
77667 - (const void *)(&rxnfc->fs.m_ext + 1) -
77668 - (const void *)rxnfc) ||
77669 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
77670 + (const void __user *)rxnfc) ||
77671 copy_in_user(&compat_rxnfc->fs.ring_cookie,
77672 &rxnfc->fs.ring_cookie,
77673 - (const void *)(&rxnfc->fs.location + 1) -
77674 - (const void *)&rxnfc->fs.ring_cookie) ||
77675 + (const void __user *)(&rxnfc->fs.location + 1) -
77676 + (const void __user *)&rxnfc->fs.ring_cookie) ||
77677 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
77678 sizeof(rxnfc->rule_cnt)))
77679 return -EFAULT;
77680 @@ -2864,7 +2926,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
77681 old_fs = get_fs();
77682 set_fs(KERNEL_DS);
77683 err = dev_ioctl(net, cmd,
77684 - (struct ifreq __user __force *) &kifr);
77685 + (struct ifreq __force_user *) &kifr);
77686 set_fs(old_fs);
77687
77688 return err;
77689 @@ -2973,7 +3035,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
77690
77691 old_fs = get_fs();
77692 set_fs(KERNEL_DS);
77693 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
77694 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
77695 set_fs(old_fs);
77696
77697 if (cmd == SIOCGIFMAP && !err) {
77698 @@ -3078,7 +3140,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
77699 ret |= __get_user(rtdev, &(ur4->rt_dev));
77700 if (rtdev) {
77701 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
77702 - r4.rt_dev = (char __user __force *)devname;
77703 + r4.rt_dev = (char __force_user *)devname;
77704 devname[15] = 0;
77705 } else
77706 r4.rt_dev = NULL;
77707 @@ -3318,8 +3380,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
77708 int __user *uoptlen;
77709 int err;
77710
77711 - uoptval = (char __user __force *) optval;
77712 - uoptlen = (int __user __force *) optlen;
77713 + uoptval = (char __force_user *) optval;
77714 + uoptlen = (int __force_user *) optlen;
77715
77716 set_fs(KERNEL_DS);
77717 if (level == SOL_SOCKET)
77718 @@ -3339,7 +3401,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
77719 char __user *uoptval;
77720 int err;
77721
77722 - uoptval = (char __user __force *) optval;
77723 + uoptval = (char __force_user *) optval;
77724
77725 set_fs(KERNEL_DS);
77726 if (level == SOL_SOCKET)
77727 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
77728 index d12ffa5..0b5a6e2 100644
77729 --- a/net/sunrpc/sched.c
77730 +++ b/net/sunrpc/sched.c
77731 @@ -238,9 +238,9 @@ static int rpc_wait_bit_killable(void *word)
77732 #ifdef RPC_DEBUG
77733 static void rpc_task_set_debuginfo(struct rpc_task *task)
77734 {
77735 - static atomic_t rpc_pid;
77736 + static atomic_unchecked_t rpc_pid;
77737
77738 - task->tk_pid = atomic_inc_return(&rpc_pid);
77739 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
77740 }
77741 #else
77742 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
77743 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
77744 index 767d494..fe17e9d 100644
77745 --- a/net/sunrpc/svcsock.c
77746 +++ b/net/sunrpc/svcsock.c
77747 @@ -394,7 +394,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
77748 int buflen, unsigned int base)
77749 {
77750 size_t save_iovlen;
77751 - void __user *save_iovbase;
77752 + void *save_iovbase;
77753 unsigned int i;
77754 int ret;
77755
77756 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
77757 index 09af4fa..77110a9 100644
77758 --- a/net/sunrpc/xprtrdma/svc_rdma.c
77759 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
77760 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
77761 static unsigned int min_max_inline = 4096;
77762 static unsigned int max_max_inline = 65536;
77763
77764 -atomic_t rdma_stat_recv;
77765 -atomic_t rdma_stat_read;
77766 -atomic_t rdma_stat_write;
77767 -atomic_t rdma_stat_sq_starve;
77768 -atomic_t rdma_stat_rq_starve;
77769 -atomic_t rdma_stat_rq_poll;
77770 -atomic_t rdma_stat_rq_prod;
77771 -atomic_t rdma_stat_sq_poll;
77772 -atomic_t rdma_stat_sq_prod;
77773 +atomic_unchecked_t rdma_stat_recv;
77774 +atomic_unchecked_t rdma_stat_read;
77775 +atomic_unchecked_t rdma_stat_write;
77776 +atomic_unchecked_t rdma_stat_sq_starve;
77777 +atomic_unchecked_t rdma_stat_rq_starve;
77778 +atomic_unchecked_t rdma_stat_rq_poll;
77779 +atomic_unchecked_t rdma_stat_rq_prod;
77780 +atomic_unchecked_t rdma_stat_sq_poll;
77781 +atomic_unchecked_t rdma_stat_sq_prod;
77782
77783 /* Temporary NFS request map and context caches */
77784 struct kmem_cache *svc_rdma_map_cachep;
77785 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *table, int write,
77786 len -= *ppos;
77787 if (len > *lenp)
77788 len = *lenp;
77789 - if (len && copy_to_user(buffer, str_buf, len))
77790 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
77791 return -EFAULT;
77792 *lenp = len;
77793 *ppos += len;
77794 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] = {
77795 {
77796 .procname = "rdma_stat_read",
77797 .data = &rdma_stat_read,
77798 - .maxlen = sizeof(atomic_t),
77799 + .maxlen = sizeof(atomic_unchecked_t),
77800 .mode = 0644,
77801 .proc_handler = read_reset_stat,
77802 },
77803 {
77804 .procname = "rdma_stat_recv",
77805 .data = &rdma_stat_recv,
77806 - .maxlen = sizeof(atomic_t),
77807 + .maxlen = sizeof(atomic_unchecked_t),
77808 .mode = 0644,
77809 .proc_handler = read_reset_stat,
77810 },
77811 {
77812 .procname = "rdma_stat_write",
77813 .data = &rdma_stat_write,
77814 - .maxlen = sizeof(atomic_t),
77815 + .maxlen = sizeof(atomic_unchecked_t),
77816 .mode = 0644,
77817 .proc_handler = read_reset_stat,
77818 },
77819 {
77820 .procname = "rdma_stat_sq_starve",
77821 .data = &rdma_stat_sq_starve,
77822 - .maxlen = sizeof(atomic_t),
77823 + .maxlen = sizeof(atomic_unchecked_t),
77824 .mode = 0644,
77825 .proc_handler = read_reset_stat,
77826 },
77827 {
77828 .procname = "rdma_stat_rq_starve",
77829 .data = &rdma_stat_rq_starve,
77830 - .maxlen = sizeof(atomic_t),
77831 + .maxlen = sizeof(atomic_unchecked_t),
77832 .mode = 0644,
77833 .proc_handler = read_reset_stat,
77834 },
77835 {
77836 .procname = "rdma_stat_rq_poll",
77837 .data = &rdma_stat_rq_poll,
77838 - .maxlen = sizeof(atomic_t),
77839 + .maxlen = sizeof(atomic_unchecked_t),
77840 .mode = 0644,
77841 .proc_handler = read_reset_stat,
77842 },
77843 {
77844 .procname = "rdma_stat_rq_prod",
77845 .data = &rdma_stat_rq_prod,
77846 - .maxlen = sizeof(atomic_t),
77847 + .maxlen = sizeof(atomic_unchecked_t),
77848 .mode = 0644,
77849 .proc_handler = read_reset_stat,
77850 },
77851 {
77852 .procname = "rdma_stat_sq_poll",
77853 .data = &rdma_stat_sq_poll,
77854 - .maxlen = sizeof(atomic_t),
77855 + .maxlen = sizeof(atomic_unchecked_t),
77856 .mode = 0644,
77857 .proc_handler = read_reset_stat,
77858 },
77859 {
77860 .procname = "rdma_stat_sq_prod",
77861 .data = &rdma_stat_sq_prod,
77862 - .maxlen = sizeof(atomic_t),
77863 + .maxlen = sizeof(atomic_unchecked_t),
77864 .mode = 0644,
77865 .proc_handler = read_reset_stat,
77866 },
77867 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
77868 index df67211..c354b13 100644
77869 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
77870 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
77871 @@ -499,7 +499,7 @@ next_sge:
77872 svc_rdma_put_context(ctxt, 0);
77873 goto out;
77874 }
77875 - atomic_inc(&rdma_stat_read);
77876 + atomic_inc_unchecked(&rdma_stat_read);
77877
77878 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
77879 chl_map->ch[ch_no].count -= read_wr.num_sge;
77880 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
77881 dto_q);
77882 list_del_init(&ctxt->dto_q);
77883 } else {
77884 - atomic_inc(&rdma_stat_rq_starve);
77885 + atomic_inc_unchecked(&rdma_stat_rq_starve);
77886 clear_bit(XPT_DATA, &xprt->xpt_flags);
77887 ctxt = NULL;
77888 }
77889 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
77890 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
77891 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
77892 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
77893 - atomic_inc(&rdma_stat_recv);
77894 + atomic_inc_unchecked(&rdma_stat_recv);
77895
77896 /* Build up the XDR from the receive buffers. */
77897 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
77898 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
77899 index 249a835..fb2794b 100644
77900 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
77901 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
77902 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
77903 write_wr.wr.rdma.remote_addr = to;
77904
77905 /* Post It */
77906 - atomic_inc(&rdma_stat_write);
77907 + atomic_inc_unchecked(&rdma_stat_write);
77908 if (svc_rdma_send(xprt, &write_wr))
77909 goto err;
77910 return 0;
77911 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
77912 index a385430..32254ea 100644
77913 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
77914 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
77915 @@ -299,7 +299,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
77916 return;
77917
77918 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
77919 - atomic_inc(&rdma_stat_rq_poll);
77920 + atomic_inc_unchecked(&rdma_stat_rq_poll);
77921
77922 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
77923 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
77924 @@ -321,7 +321,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
77925 }
77926
77927 if (ctxt)
77928 - atomic_inc(&rdma_stat_rq_prod);
77929 + atomic_inc_unchecked(&rdma_stat_rq_prod);
77930
77931 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
77932 /*
77933 @@ -393,7 +393,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
77934 return;
77935
77936 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
77937 - atomic_inc(&rdma_stat_sq_poll);
77938 + atomic_inc_unchecked(&rdma_stat_sq_poll);
77939 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
77940 if (wc.status != IB_WC_SUCCESS)
77941 /* Close the transport */
77942 @@ -411,7 +411,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
77943 }
77944
77945 if (ctxt)
77946 - atomic_inc(&rdma_stat_sq_prod);
77947 + atomic_inc_unchecked(&rdma_stat_sq_prod);
77948 }
77949
77950 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
77951 @@ -1273,7 +1273,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
77952 spin_lock_bh(&xprt->sc_lock);
77953 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
77954 spin_unlock_bh(&xprt->sc_lock);
77955 - atomic_inc(&rdma_stat_sq_starve);
77956 + atomic_inc_unchecked(&rdma_stat_sq_starve);
77957
77958 /* See if we can opportunistically reap SQ WR to make room */
77959 sq_cq_reap(xprt);
77960 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
77961 index ca84212..3aa338f 100644
77962 --- a/net/sysctl_net.c
77963 +++ b/net/sysctl_net.c
77964 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
77965 struct ctl_table *table)
77966 {
77967 /* Allow network administrator to have same access as root. */
77968 - if (capable(CAP_NET_ADMIN)) {
77969 + if (capable_nolog(CAP_NET_ADMIN)) {
77970 int mode = (table->mode >> 6) & 7;
77971 return (mode << 6) | (mode << 3) | mode;
77972 }
77973 diff --git a/net/tipc/link.c b/net/tipc/link.c
77974 index f89570c..016cf63 100644
77975 --- a/net/tipc/link.c
77976 +++ b/net/tipc/link.c
77977 @@ -1170,7 +1170,7 @@ static int link_send_sections_long(struct tipc_port *sender,
77978 struct tipc_msg fragm_hdr;
77979 struct sk_buff *buf, *buf_chain, *prev;
77980 u32 fragm_crs, fragm_rest, hsz, sect_rest;
77981 - const unchar *sect_crs;
77982 + const unchar __user *sect_crs;
77983 int curr_sect;
77984 u32 fragm_no;
77985
77986 @@ -1214,7 +1214,7 @@ again:
77987
77988 if (!sect_rest) {
77989 sect_rest = msg_sect[++curr_sect].iov_len;
77990 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
77991 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
77992 }
77993
77994 if (sect_rest < fragm_rest)
77995 @@ -1233,7 +1233,7 @@ error:
77996 }
77997 } else
77998 skb_copy_to_linear_data_offset(buf, fragm_crs,
77999 - sect_crs, sz);
78000 + (const void __force_kernel *)sect_crs, sz);
78001 sect_crs += sz;
78002 sect_rest -= sz;
78003 fragm_crs += sz;
78004 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
78005 index 83d5096..dcba497 100644
78006 --- a/net/tipc/msg.c
78007 +++ b/net/tipc/msg.c
78008 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
78009 msg_sect[cnt].iov_len);
78010 else
78011 skb_copy_to_linear_data_offset(*buf, pos,
78012 - msg_sect[cnt].iov_base,
78013 + (const void __force_kernel *)msg_sect[cnt].iov_base,
78014 msg_sect[cnt].iov_len);
78015 pos += msg_sect[cnt].iov_len;
78016 }
78017 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
78018 index 6cf7268..7a488ce 100644
78019 --- a/net/tipc/subscr.c
78020 +++ b/net/tipc/subscr.c
78021 @@ -101,7 +101,7 @@ static void subscr_send_event(struct subscription *sub,
78022 {
78023 struct iovec msg_sect;
78024
78025 - msg_sect.iov_base = (void *)&sub->evt;
78026 + msg_sect.iov_base = (void __force_user *)&sub->evt;
78027 msg_sect.iov_len = sizeof(struct tipc_event);
78028
78029 sub->evt.event = htohl(event, sub->swap);
78030 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
78031 index ec68e1c..fdd792f 100644
78032 --- a/net/unix/af_unix.c
78033 +++ b/net/unix/af_unix.c
78034 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(struct net *net,
78035 err = -ECONNREFUSED;
78036 if (!S_ISSOCK(inode->i_mode))
78037 goto put_fail;
78038 +
78039 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
78040 + err = -EACCES;
78041 + goto put_fail;
78042 + }
78043 +
78044 u = unix_find_socket_byinode(inode);
78045 if (!u)
78046 goto put_fail;
78047 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(struct net *net,
78048 if (u) {
78049 struct dentry *dentry;
78050 dentry = unix_sk(u)->dentry;
78051 +
78052 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
78053 + err = -EPERM;
78054 + sock_put(u);
78055 + goto fail;
78056 + }
78057 +
78058 if (dentry)
78059 touch_atime(unix_sk(u)->mnt, dentry);
78060 } else
78061 @@ -869,11 +882,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
78062 err = security_path_mknod(&path, dentry, mode, 0);
78063 if (err)
78064 goto out_mknod_drop_write;
78065 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
78066 + err = -EACCES;
78067 + goto out_mknod_drop_write;
78068 + }
78069 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
78070 out_mknod_drop_write:
78071 mnt_drop_write(path.mnt);
78072 if (err)
78073 goto out_mknod_dput;
78074 +
78075 + gr_handle_create(dentry, path.mnt);
78076 +
78077 mutex_unlock(&path.dentry->d_inode->i_mutex);
78078 dput(path.dentry);
78079 path.dentry = dentry;
78080 diff --git a/net/wireless/core.h b/net/wireless/core.h
78081 index 8672e02..48782dd 100644
78082 --- a/net/wireless/core.h
78083 +++ b/net/wireless/core.h
78084 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
78085 struct mutex mtx;
78086
78087 /* rfkill support */
78088 - struct rfkill_ops rfkill_ops;
78089 + rfkill_ops_no_const rfkill_ops;
78090 struct rfkill *rfkill;
78091 struct work_struct rfkill_sync;
78092
78093 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
78094 index fdbc23c..212d53e 100644
78095 --- a/net/wireless/wext-core.c
78096 +++ b/net/wireless/wext-core.c
78097 @@ -746,8 +746,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
78098 */
78099
78100 /* Support for very large requests */
78101 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
78102 - (user_length > descr->max_tokens)) {
78103 + if (user_length > descr->max_tokens) {
78104 /* Allow userspace to GET more than max so
78105 * we can support any size GET requests.
78106 * There is still a limit : -ENOMEM.
78107 @@ -784,22 +783,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
78108 }
78109 }
78110
78111 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
78112 - /*
78113 - * If this is a GET, but not NOMAX, it means that the extra
78114 - * data is not bounded by userspace, but by max_tokens. Thus
78115 - * set the length to max_tokens. This matches the extra data
78116 - * allocation.
78117 - * The driver should fill it with the number of tokens it
78118 - * provided, and it may check iwp->length rather than having
78119 - * knowledge of max_tokens. If the driver doesn't change the
78120 - * iwp->length, this ioctl just copies back max_token tokens
78121 - * filled with zeroes. Hopefully the driver isn't claiming
78122 - * them to be valid data.
78123 - */
78124 - iwp->length = descr->max_tokens;
78125 - }
78126 -
78127 err = handler(dev, info, (union iwreq_data *) iwp, extra);
78128
78129 iwp->length += essid_compat;
78130 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
78131 index 552df27..8e7f238 100644
78132 --- a/net/xfrm/xfrm_policy.c
78133 +++ b/net/xfrm/xfrm_policy.c
78134 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
78135 {
78136 policy->walk.dead = 1;
78137
78138 - atomic_inc(&policy->genid);
78139 + atomic_inc_unchecked(&policy->genid);
78140
78141 if (del_timer(&policy->timer))
78142 xfrm_pol_put(policy);
78143 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
78144 hlist_add_head(&policy->bydst, chain);
78145 xfrm_pol_hold(policy);
78146 net->xfrm.policy_count[dir]++;
78147 - atomic_inc(&flow_cache_genid);
78148 + atomic_inc_unchecked(&flow_cache_genid);
78149 if (delpol)
78150 __xfrm_policy_unlink(delpol, dir);
78151 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
78152 @@ -1530,7 +1530,7 @@ free_dst:
78153 goto out;
78154 }
78155
78156 -static int inline
78157 +static inline int
78158 xfrm_dst_alloc_copy(void **target, const void *src, int size)
78159 {
78160 if (!*target) {
78161 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
78162 return 0;
78163 }
78164
78165 -static int inline
78166 +static inline int
78167 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
78168 {
78169 #ifdef CONFIG_XFRM_SUB_POLICY
78170 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
78171 #endif
78172 }
78173
78174 -static int inline
78175 +static inline int
78176 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
78177 {
78178 #ifdef CONFIG_XFRM_SUB_POLICY
78179 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
78180
78181 xdst->num_pols = num_pols;
78182 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
78183 - xdst->policy_genid = atomic_read(&pols[0]->genid);
78184 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
78185
78186 return xdst;
78187 }
78188 @@ -2335,7 +2335,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
78189 if (xdst->xfrm_genid != dst->xfrm->genid)
78190 return 0;
78191 if (xdst->num_pols > 0 &&
78192 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
78193 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
78194 return 0;
78195
78196 mtu = dst_mtu(dst->child);
78197 @@ -2870,7 +2870,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
78198 sizeof(pol->xfrm_vec[i].saddr));
78199 pol->xfrm_vec[i].encap_family = mp->new_family;
78200 /* flush bundles */
78201 - atomic_inc(&pol->genid);
78202 + atomic_inc_unchecked(&pol->genid);
78203 }
78204 }
78205
78206 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
78207 index 0256b8a..9341ef6 100644
78208 --- a/net/xfrm/xfrm_user.c
78209 +++ b/net/xfrm/xfrm_user.c
78210 @@ -1394,6 +1394,8 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
78211 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
78212 int i;
78213
78214 + pax_track_stack();
78215 +
78216 if (xp->xfrm_nr == 0)
78217 return 0;
78218
78219 @@ -2062,6 +2064,8 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
78220 int err;
78221 int n = 0;
78222
78223 + pax_track_stack();
78224 +
78225 if (attrs[XFRMA_MIGRATE] == NULL)
78226 return -EINVAL;
78227
78228 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
78229 index a0fd502..a8e6e83 100644
78230 --- a/scripts/Makefile.build
78231 +++ b/scripts/Makefile.build
78232 @@ -109,7 +109,7 @@ endif
78233 endif
78234
78235 # Do not include host rules unless needed
78236 -ifneq ($(hostprogs-y)$(hostprogs-m),)
78237 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
78238 include scripts/Makefile.host
78239 endif
78240
78241 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
78242 index 686cb0d..9d653bf 100644
78243 --- a/scripts/Makefile.clean
78244 +++ b/scripts/Makefile.clean
78245 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
78246 __clean-files := $(extra-y) $(always) \
78247 $(targets) $(clean-files) \
78248 $(host-progs) \
78249 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
78250 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
78251 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
78252
78253 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
78254
78255 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
78256 index 1ac414f..a1c1451 100644
78257 --- a/scripts/Makefile.host
78258 +++ b/scripts/Makefile.host
78259 @@ -31,6 +31,7 @@
78260 # Note: Shared libraries consisting of C++ files are not supported
78261
78262 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
78263 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
78264
78265 # C code
78266 # Executables compiled from a single .c file
78267 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
78268 # Shared libaries (only .c supported)
78269 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
78270 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
78271 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
78272 # Remove .so files from "xxx-objs"
78273 host-cobjs := $(filter-out %.so,$(host-cobjs))
78274
78275 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
78276 index 291228e..6c55203 100644
78277 --- a/scripts/basic/fixdep.c
78278 +++ b/scripts/basic/fixdep.c
78279 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
78280 /*
78281 * Lookup a value in the configuration string.
78282 */
78283 -static int is_defined_config(const char *name, int len, unsigned int hash)
78284 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
78285 {
78286 struct item *aux;
78287
78288 @@ -211,10 +211,10 @@ static void clear_config(void)
78289 /*
78290 * Record the use of a CONFIG_* word.
78291 */
78292 -static void use_config(const char *m, int slen)
78293 +static void use_config(const char *m, unsigned int slen)
78294 {
78295 unsigned int hash = strhash(m, slen);
78296 - int c, i;
78297 + unsigned int c, i;
78298
78299 if (is_defined_config(m, slen, hash))
78300 return;
78301 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
78302
78303 static void parse_config_file(const char *map, size_t len)
78304 {
78305 - const int *end = (const int *) (map + len);
78306 + const unsigned int *end = (const unsigned int *) (map + len);
78307 /* start at +1, so that p can never be < map */
78308 - const int *m = (const int *) map + 1;
78309 + const unsigned int *m = (const unsigned int *) map + 1;
78310 const char *p, *q;
78311
78312 for (; m < end; m++) {
78313 @@ -405,7 +405,7 @@ static void print_deps(void)
78314 static void traps(void)
78315 {
78316 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
78317 - int *p = (int *)test;
78318 + unsigned int *p = (unsigned int *)test;
78319
78320 if (*p != INT_CONF) {
78321 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
78322 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
78323 new file mode 100644
78324 index 0000000..8729101
78325 --- /dev/null
78326 +++ b/scripts/gcc-plugin.sh
78327 @@ -0,0 +1,2 @@
78328 +#!/bin/sh
78329 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
78330 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
78331 index e26e2fb..f84937b 100644
78332 --- a/scripts/mod/file2alias.c
78333 +++ b/scripts/mod/file2alias.c
78334 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
78335 unsigned long size, unsigned long id_size,
78336 void *symval)
78337 {
78338 - int i;
78339 + unsigned int i;
78340
78341 if (size % id_size || size < id_size) {
78342 if (cross_build != 0)
78343 @@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
78344 /* USB is special because the bcdDevice can be matched against a numeric range */
78345 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
78346 static void do_usb_entry(struct usb_device_id *id,
78347 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
78348 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
78349 unsigned char range_lo, unsigned char range_hi,
78350 unsigned char max, struct module *mod)
78351 {
78352 @@ -203,7 +203,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
78353 {
78354 unsigned int devlo, devhi;
78355 unsigned char chi, clo, max;
78356 - int ndigits;
78357 + unsigned int ndigits;
78358
78359 id->match_flags = TO_NATIVE(id->match_flags);
78360 id->idVendor = TO_NATIVE(id->idVendor);
78361 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
78362 for (i = 0; i < count; i++) {
78363 const char *id = (char *)devs[i].id;
78364 char acpi_id[sizeof(devs[0].id)];
78365 - int j;
78366 + unsigned int j;
78367
78368 buf_printf(&mod->dev_table_buf,
78369 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
78370 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
78371
78372 for (j = 0; j < PNP_MAX_DEVICES; j++) {
78373 const char *id = (char *)card->devs[j].id;
78374 - int i2, j2;
78375 + unsigned int i2, j2;
78376 int dup = 0;
78377
78378 if (!id[0])
78379 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
78380 /* add an individual alias for every device entry */
78381 if (!dup) {
78382 char acpi_id[sizeof(card->devs[0].id)];
78383 - int k;
78384 + unsigned int k;
78385
78386 buf_printf(&mod->dev_table_buf,
78387 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
78388 @@ -786,7 +786,7 @@ static void dmi_ascii_filter(char *d, const char *s)
78389 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
78390 char *alias)
78391 {
78392 - int i, j;
78393 + unsigned int i, j;
78394
78395 sprintf(alias, "dmi*");
78396
78397 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
78398 index a509ff8..5822633 100644
78399 --- a/scripts/mod/modpost.c
78400 +++ b/scripts/mod/modpost.c
78401 @@ -919,6 +919,7 @@ enum mismatch {
78402 ANY_INIT_TO_ANY_EXIT,
78403 ANY_EXIT_TO_ANY_INIT,
78404 EXPORT_TO_INIT_EXIT,
78405 + DATA_TO_TEXT
78406 };
78407
78408 struct sectioncheck {
78409 @@ -1027,6 +1028,12 @@ const struct sectioncheck sectioncheck[] = {
78410 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
78411 .mismatch = EXPORT_TO_INIT_EXIT,
78412 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
78413 +},
78414 +/* Do not reference code from writable data */
78415 +{
78416 + .fromsec = { DATA_SECTIONS, NULL },
78417 + .tosec = { TEXT_SECTIONS, NULL },
78418 + .mismatch = DATA_TO_TEXT
78419 }
78420 };
78421
78422 @@ -1149,10 +1156,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
78423 continue;
78424 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
78425 continue;
78426 - if (sym->st_value == addr)
78427 - return sym;
78428 /* Find a symbol nearby - addr are maybe negative */
78429 d = sym->st_value - addr;
78430 + if (d == 0)
78431 + return sym;
78432 if (d < 0)
78433 d = addr - sym->st_value;
78434 if (d < distance) {
78435 @@ -1431,6 +1438,14 @@ static void report_sec_mismatch(const char *modname,
78436 tosym, prl_to, prl_to, tosym);
78437 free(prl_to);
78438 break;
78439 + case DATA_TO_TEXT:
78440 +/*
78441 + fprintf(stderr,
78442 + "The variable %s references\n"
78443 + "the %s %s%s%s\n",
78444 + fromsym, to, sec2annotation(tosec), tosym, to_p);
78445 +*/
78446 + break;
78447 }
78448 fprintf(stderr, "\n");
78449 }
78450 @@ -1656,7 +1671,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
78451 static void check_sec_ref(struct module *mod, const char *modname,
78452 struct elf_info *elf)
78453 {
78454 - int i;
78455 + unsigned int i;
78456 Elf_Shdr *sechdrs = elf->sechdrs;
78457
78458 /* Walk through all sections */
78459 @@ -1754,7 +1769,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
78460 va_end(ap);
78461 }
78462
78463 -void buf_write(struct buffer *buf, const char *s, int len)
78464 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
78465 {
78466 if (buf->size - buf->pos < len) {
78467 buf->size += len + SZ;
78468 @@ -1966,7 +1981,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
78469 if (fstat(fileno(file), &st) < 0)
78470 goto close_write;
78471
78472 - if (st.st_size != b->pos)
78473 + if (st.st_size != (off_t)b->pos)
78474 goto close_write;
78475
78476 tmp = NOFAIL(malloc(b->pos));
78477 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
78478 index 2031119..b5433af 100644
78479 --- a/scripts/mod/modpost.h
78480 +++ b/scripts/mod/modpost.h
78481 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
78482
78483 struct buffer {
78484 char *p;
78485 - int pos;
78486 - int size;
78487 + unsigned int pos;
78488 + unsigned int size;
78489 };
78490
78491 void __attribute__((format(printf, 2, 3)))
78492 buf_printf(struct buffer *buf, const char *fmt, ...);
78493
78494 void
78495 -buf_write(struct buffer *buf, const char *s, int len);
78496 +buf_write(struct buffer *buf, const char *s, unsigned int len);
78497
78498 struct module {
78499 struct module *next;
78500 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
78501 index 9dfcd6d..099068e 100644
78502 --- a/scripts/mod/sumversion.c
78503 +++ b/scripts/mod/sumversion.c
78504 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
78505 goto out;
78506 }
78507
78508 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
78509 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
78510 warn("writing sum in %s failed: %s\n",
78511 filename, strerror(errno));
78512 goto out;
78513 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
78514 index 5c11312..72742b5 100644
78515 --- a/scripts/pnmtologo.c
78516 +++ b/scripts/pnmtologo.c
78517 @@ -237,14 +237,14 @@ static void write_header(void)
78518 fprintf(out, " * Linux logo %s\n", logoname);
78519 fputs(" */\n\n", out);
78520 fputs("#include <linux/linux_logo.h>\n\n", out);
78521 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
78522 + fprintf(out, "static unsigned char %s_data[] = {\n",
78523 logoname);
78524 }
78525
78526 static void write_footer(void)
78527 {
78528 fputs("\n};\n\n", out);
78529 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
78530 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
78531 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
78532 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
78533 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
78534 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
78535 fputs("\n};\n\n", out);
78536
78537 /* write logo clut */
78538 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
78539 + fprintf(out, "static unsigned char %s_clut[] = {\n",
78540 logoname);
78541 write_hex_cnt = 0;
78542 for (i = 0; i < logo_clutsize; i++) {
78543 diff --git a/security/Kconfig b/security/Kconfig
78544 index e0f08b5..7388edd 100644
78545 --- a/security/Kconfig
78546 +++ b/security/Kconfig
78547 @@ -4,6 +4,586 @@
78548
78549 menu "Security options"
78550
78551 +source grsecurity/Kconfig
78552 +
78553 +menu "PaX"
78554 +
78555 + config ARCH_TRACK_EXEC_LIMIT
78556 + bool
78557 +
78558 + config PAX_KERNEXEC_PLUGIN
78559 + bool
78560 +
78561 + config PAX_PER_CPU_PGD
78562 + bool
78563 +
78564 + config TASK_SIZE_MAX_SHIFT
78565 + int
78566 + depends on X86_64
78567 + default 47 if !PAX_PER_CPU_PGD
78568 + default 42 if PAX_PER_CPU_PGD
78569 +
78570 + config PAX_ENABLE_PAE
78571 + bool
78572 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
78573 +
78574 +config PAX
78575 + bool "Enable various PaX features"
78576 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
78577 + help
78578 + This allows you to enable various PaX features. PaX adds
78579 + intrusion prevention mechanisms to the kernel that reduce
78580 + the risks posed by exploitable memory corruption bugs.
78581 +
78582 +menu "PaX Control"
78583 + depends on PAX
78584 +
78585 +config PAX_SOFTMODE
78586 + bool 'Support soft mode'
78587 + select PAX_PT_PAX_FLAGS
78588 + help
78589 + Enabling this option will allow you to run PaX in soft mode, that
78590 + is, PaX features will not be enforced by default, only on executables
78591 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
78592 + is the only way to mark executables for soft mode use.
78593 +
78594 + Soft mode can be activated by using the "pax_softmode=1" kernel command
78595 + line option on boot. Furthermore you can control various PaX features
78596 + at runtime via the entries in /proc/sys/kernel/pax.
78597 +
78598 +config PAX_EI_PAX
78599 + bool 'Use legacy ELF header marking'
78600 + help
78601 + Enabling this option will allow you to control PaX features on
78602 + a per executable basis via the 'chpax' utility available at
78603 + http://pax.grsecurity.net/. The control flags will be read from
78604 + an otherwise reserved part of the ELF header. This marking has
78605 + numerous drawbacks (no support for soft-mode, toolchain does not
78606 + know about the non-standard use of the ELF header) therefore it
78607 + has been deprecated in favour of PT_PAX_FLAGS support.
78608 +
78609 + Note that if you enable PT_PAX_FLAGS marking support as well,
78610 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
78611 +
78612 +config PAX_PT_PAX_FLAGS
78613 + bool 'Use ELF program header marking'
78614 + help
78615 + Enabling this option will allow you to control PaX features on
78616 + a per executable basis via the 'paxctl' utility available at
78617 + http://pax.grsecurity.net/. The control flags will be read from
78618 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
78619 + has the benefits of supporting both soft mode and being fully
78620 + integrated into the toolchain (the binutils patch is available
78621 + from http://pax.grsecurity.net).
78622 +
78623 + If your toolchain does not support PT_PAX_FLAGS markings,
78624 + you can create one in most cases with 'paxctl -C'.
78625 +
78626 + Note that if you enable the legacy EI_PAX marking support as well,
78627 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
78628 +
78629 +choice
78630 + prompt 'MAC system integration'
78631 + default PAX_HAVE_ACL_FLAGS
78632 + help
78633 + Mandatory Access Control systems have the option of controlling
78634 + PaX flags on a per executable basis, choose the method supported
78635 + by your particular system.
78636 +
78637 + - "none": if your MAC system does not interact with PaX,
78638 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
78639 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
78640 +
78641 + NOTE: this option is for developers/integrators only.
78642 +
78643 + config PAX_NO_ACL_FLAGS
78644 + bool 'none'
78645 +
78646 + config PAX_HAVE_ACL_FLAGS
78647 + bool 'direct'
78648 +
78649 + config PAX_HOOK_ACL_FLAGS
78650 + bool 'hook'
78651 +endchoice
78652 +
78653 +endmenu
78654 +
78655 +menu "Non-executable pages"
78656 + depends on PAX
78657 +
78658 +config PAX_NOEXEC
78659 + bool "Enforce non-executable pages"
78660 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
78661 + help
78662 + By design some architectures do not allow for protecting memory
78663 + pages against execution or even if they do, Linux does not make
78664 + use of this feature. In practice this means that if a page is
78665 + readable (such as the stack or heap) it is also executable.
78666 +
78667 + There is a well known exploit technique that makes use of this
78668 + fact and a common programming mistake where an attacker can
78669 + introduce code of his choice somewhere in the attacked program's
78670 + memory (typically the stack or the heap) and then execute it.
78671 +
78672 + If the attacked program was running with different (typically
78673 + higher) privileges than that of the attacker, then he can elevate
78674 + his own privilege level (e.g. get a root shell, write to files for
78675 + which he does not have write access to, etc).
78676 +
78677 + Enabling this option will let you choose from various features
78678 + that prevent the injection and execution of 'foreign' code in
78679 + a program.
78680 +
78681 + This will also break programs that rely on the old behaviour and
78682 + expect that dynamically allocated memory via the malloc() family
78683 + of functions is executable (which it is not). Notable examples
78684 + are the XFree86 4.x server, the java runtime and wine.
78685 +
78686 +config PAX_PAGEEXEC
78687 + bool "Paging based non-executable pages"
78688 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
78689 + select S390_SWITCH_AMODE if S390
78690 + select S390_EXEC_PROTECT if S390
78691 + select ARCH_TRACK_EXEC_LIMIT if X86_32
78692 + help
78693 + This implementation is based on the paging feature of the CPU.
78694 + On i386 without hardware non-executable bit support there is a
78695 + variable but usually low performance impact, however on Intel's
78696 + P4 core based CPUs it is very high so you should not enable this
78697 + for kernels meant to be used on such CPUs.
78698 +
78699 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
78700 + with hardware non-executable bit support there is no performance
78701 + impact, on ppc the impact is negligible.
78702 +
78703 + Note that several architectures require various emulations due to
78704 + badly designed userland ABIs, this will cause a performance impact
78705 + but will disappear as soon as userland is fixed. For example, ppc
78706 + userland MUST have been built with secure-plt by a recent toolchain.
78707 +
78708 +config PAX_SEGMEXEC
78709 + bool "Segmentation based non-executable pages"
78710 + depends on PAX_NOEXEC && X86_32
78711 + help
78712 + This implementation is based on the segmentation feature of the
78713 + CPU and has a very small performance impact, however applications
78714 + will be limited to a 1.5 GB address space instead of the normal
78715 + 3 GB.
78716 +
78717 +config PAX_EMUTRAMP
78718 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
78719 + default y if PARISC
78720 + help
78721 + There are some programs and libraries that for one reason or
78722 + another attempt to execute special small code snippets from
78723 + non-executable memory pages. Most notable examples are the
78724 + signal handler return code generated by the kernel itself and
78725 + the GCC trampolines.
78726 +
78727 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
78728 + such programs will no longer work under your kernel.
78729 +
78730 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
78731 + utilities to enable trampoline emulation for the affected programs
78732 + yet still have the protection provided by the non-executable pages.
78733 +
78734 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
78735 + your system will not even boot.
78736 +
78737 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
78738 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
78739 + for the affected files.
78740 +
78741 + NOTE: enabling this feature *may* open up a loophole in the
78742 + protection provided by non-executable pages that an attacker
78743 + could abuse. Therefore the best solution is to not have any
78744 + files on your system that would require this option. This can
78745 + be achieved by not using libc5 (which relies on the kernel
78746 + signal handler return code) and not using or rewriting programs
78747 + that make use of the nested function implementation of GCC.
78748 + Skilled users can just fix GCC itself so that it implements
78749 + nested function calls in a way that does not interfere with PaX.
78750 +
78751 +config PAX_EMUSIGRT
78752 + bool "Automatically emulate sigreturn trampolines"
78753 + depends on PAX_EMUTRAMP && PARISC
78754 + default y
78755 + help
78756 + Enabling this option will have the kernel automatically detect
78757 + and emulate signal return trampolines executing on the stack
78758 + that would otherwise lead to task termination.
78759 +
78760 + This solution is intended as a temporary one for users with
78761 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
78762 + Modula-3 runtime, etc) or executables linked to such, basically
78763 + everything that does not specify its own SA_RESTORER function in
78764 + normal executable memory like glibc 2.1+ does.
78765 +
78766 + On parisc you MUST enable this option, otherwise your system will
78767 + not even boot.
78768 +
78769 + NOTE: this feature cannot be disabled on a per executable basis
78770 + and since it *does* open up a loophole in the protection provided
78771 + by non-executable pages, the best solution is to not have any
78772 + files on your system that would require this option.
78773 +
78774 +config PAX_MPROTECT
78775 + bool "Restrict mprotect()"
78776 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
78777 + help
78778 + Enabling this option will prevent programs from
78779 + - changing the executable status of memory pages that were
78780 + not originally created as executable,
78781 + - making read-only executable pages writable again,
78782 + - creating executable pages from anonymous memory,
78783 + - making read-only-after-relocations (RELRO) data pages writable again.
78784 +
78785 + You should say Y here to complete the protection provided by
78786 + the enforcement of non-executable pages.
78787 +
78788 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
78789 + this feature on a per file basis.
78790 +
78791 +config PAX_MPROTECT_COMPAT
78792 + bool "Use legacy/compat protection demoting (read help)"
78793 + depends on PAX_MPROTECT
78794 + default n
78795 + help
78796 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
78797 + by sending the proper error code to the application. For some broken
78798 + userland, this can cause problems with Python or other applications. The
78799 + current implementation however allows for applications like clamav to
78800 + detect if JIT compilation/execution is allowed and to fall back gracefully
78801 + to an interpreter-based mode if it does not. While we encourage everyone
78802 + to use the current implementation as-is and push upstream to fix broken
78803 + userland (note that the RWX logging option can assist with this), in some
78804 + environments this may not be possible. Having to disable MPROTECT
78805 + completely on certain binaries reduces the security benefit of PaX,
78806 + so this option is provided for those environments to revert to the old
78807 + behavior.
78808 +
78809 +config PAX_ELFRELOCS
78810 + bool "Allow ELF text relocations (read help)"
78811 + depends on PAX_MPROTECT
78812 + default n
78813 + help
78814 + Non-executable pages and mprotect() restrictions are effective
78815 + in preventing the introduction of new executable code into an
78816 + attacked task's address space. There remain only two venues
78817 + for this kind of attack: if the attacker can execute already
78818 + existing code in the attacked task then he can either have it
78819 + create and mmap() a file containing his code or have it mmap()
78820 + an already existing ELF library that does not have position
78821 + independent code in it and use mprotect() on it to make it
78822 + writable and copy his code there. While protecting against
78823 + the former approach is beyond PaX, the latter can be prevented
78824 + by having only PIC ELF libraries on one's system (which do not
78825 + need to relocate their code). If you are sure this is your case,
78826 + as is the case with all modern Linux distributions, then leave
78827 + this option disabled. You should say 'n' here.
78828 +
78829 +config PAX_ETEXECRELOCS
78830 + bool "Allow ELF ET_EXEC text relocations"
78831 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
78832 + select PAX_ELFRELOCS
78833 + default y
78834 + help
78835 + On some architectures there are incorrectly created applications
78836 + that require text relocations and would not work without enabling
78837 + this option. If you are an alpha, ia64 or parisc user, you should
78838 + enable this option and disable it once you have made sure that
78839 + none of your applications need it.
78840 +
78841 +config PAX_EMUPLT
78842 + bool "Automatically emulate ELF PLT"
78843 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
78844 + default y
78845 + help
78846 + Enabling this option will have the kernel automatically detect
78847 + and emulate the Procedure Linkage Table entries in ELF files.
78848 + On some architectures such entries are in writable memory, and
78849 + become non-executable leading to task termination. Therefore
78850 + it is mandatory that you enable this option on alpha, parisc,
78851 + sparc and sparc64, otherwise your system would not even boot.
78852 +
78853 + NOTE: this feature *does* open up a loophole in the protection
78854 + provided by the non-executable pages, therefore the proper
78855 + solution is to modify the toolchain to produce a PLT that does
78856 + not need to be writable.
78857 +
78858 +config PAX_DLRESOLVE
78859 + bool 'Emulate old glibc resolver stub'
78860 + depends on PAX_EMUPLT && SPARC
78861 + default n
78862 + help
78863 + This option is needed if userland has an old glibc (before 2.4)
78864 + that puts a 'save' instruction into the runtime generated resolver
78865 + stub that needs special emulation.
78866 +
78867 +config PAX_KERNEXEC
78868 + bool "Enforce non-executable kernel pages"
78869 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
78870 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
78871 + select PAX_KERNEXEC_PLUGIN if X86_64
78872 + help
78873 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
78874 + that is, enabling this option will make it harder to inject
78875 + and execute 'foreign' code in kernel memory itself.
78876 +
78877 + Note that on x86_64 kernels there is a known regression when
78878 + this feature and KVM/VMX are both enabled in the host kernel.
78879 +
78880 +choice
78881 + prompt "Return Address Instrumentation Method"
78882 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
78883 + depends on PAX_KERNEXEC_PLUGIN
78884 + help
78885 + Select the method used to instrument function pointer dereferences.
78886 + Note that binary modules cannot be instrumented by this approach.
78887 +
78888 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
78889 + bool "bts"
78890 + help
78891 + This method is compatible with binary only modules but has
78892 + a higher runtime overhead.
78893 +
78894 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
78895 + bool "or"
78896 + depends on !PARAVIRT
78897 + help
78898 + This method is incompatible with binary only modules but has
78899 + a lower runtime overhead.
78900 +endchoice
78901 +
78902 +config PAX_KERNEXEC_PLUGIN_METHOD
78903 + string
78904 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
78905 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
78906 + default ""
78907 +
78908 +config PAX_KERNEXEC_MODULE_TEXT
78909 + int "Minimum amount of memory reserved for module code"
78910 + default "4"
78911 + depends on PAX_KERNEXEC && X86_32 && MODULES
78912 + help
78913 + Due to implementation details the kernel must reserve a fixed
78914 + amount of memory for module code at compile time that cannot be
78915 + changed at runtime. Here you can specify the minimum amount
78916 + in MB that will be reserved. Due to the same implementation
78917 + details this size will always be rounded up to the next 2/4 MB
78918 + boundary (depends on PAE) so the actually available memory for
78919 + module code will usually be more than this minimum.
78920 +
78921 + The default 4 MB should be enough for most users but if you have
78922 + an excessive number of modules (e.g., most distribution configs
78923 + compile many drivers as modules) or use huge modules such as
78924 + nvidia's kernel driver, you will need to adjust this amount.
78925 + A good rule of thumb is to look at your currently loaded kernel
78926 + modules and add up their sizes.
78927 +
78928 +endmenu
78929 +
78930 +menu "Address Space Layout Randomization"
78931 + depends on PAX
78932 +
78933 +config PAX_ASLR
78934 + bool "Address Space Layout Randomization"
78935 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
78936 + help
78937 + Many if not most exploit techniques rely on the knowledge of
78938 + certain addresses in the attacked program. The following options
78939 + will allow the kernel to apply a certain amount of randomization
78940 + to specific parts of the program thereby forcing an attacker to
78941 + guess them in most cases. Any failed guess will most likely crash
78942 + the attacked program which allows the kernel to detect such attempts
78943 + and react on them. PaX itself provides no reaction mechanisms,
78944 + instead it is strongly encouraged that you make use of Nergal's
78945 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
78946 + (http://www.grsecurity.net/) built-in crash detection features or
78947 + develop one yourself.
78948 +
78949 + By saying Y here you can choose to randomize the following areas:
78950 + - top of the task's kernel stack
78951 + - top of the task's userland stack
78952 + - base address for mmap() requests that do not specify one
78953 + (this includes all libraries)
78954 + - base address of the main executable
78955 +
78956 + It is strongly recommended to say Y here as address space layout
78957 + randomization has negligible impact on performance yet it provides
78958 + a very effective protection.
78959 +
78960 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
78961 + this feature on a per file basis.
78962 +
78963 +config PAX_RANDKSTACK
78964 + bool "Randomize kernel stack base"
78965 + depends on X86_TSC && X86
78966 + help
78967 + By saying Y here the kernel will randomize every task's kernel
78968 + stack on every system call. This will not only force an attacker
78969 + to guess it but also prevent him from making use of possible
78970 + leaked information about it.
78971 +
78972 + Since the kernel stack is a rather scarce resource, randomization
78973 + may cause unexpected stack overflows, therefore you should very
78974 + carefully test your system. Note that once enabled in the kernel
78975 + configuration, this feature cannot be disabled on a per file basis.
78976 +
78977 +config PAX_RANDUSTACK
78978 + bool "Randomize user stack base"
78979 + depends on PAX_ASLR
78980 + help
78981 + By saying Y here the kernel will randomize every task's userland
78982 + stack. The randomization is done in two steps where the second
78983 + one may apply a big amount of shift to the top of the stack and
78984 + cause problems for programs that want to use lots of memory (more
78985 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
78986 + For this reason the second step can be controlled by 'chpax' or
78987 + 'paxctl' on a per file basis.
78988 +
78989 +config PAX_RANDMMAP
78990 + bool "Randomize mmap() base"
78991 + depends on PAX_ASLR
78992 + help
78993 + By saying Y here the kernel will use a randomized base address for
78994 + mmap() requests that do not specify one themselves. As a result
78995 + all dynamically loaded libraries will appear at random addresses
78996 + and therefore be harder to exploit by a technique where an attacker
78997 + attempts to execute library code for his purposes (e.g. spawn a
78998 + shell from an exploited program that is running at an elevated
78999 + privilege level).
79000 +
79001 + Furthermore, if a program is relinked as a dynamic ELF file, its
79002 + base address will be randomized as well, completing the full
79003 + randomization of the address space layout. Attacking such programs
79004 + becomes a guess game. You can find an example of doing this at
79005 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
79006 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
79007 +
79008 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
79009 + feature on a per file basis.
79010 +
79011 +endmenu
79012 +
79013 +menu "Miscellaneous hardening features"
79014 +
79015 +config PAX_MEMORY_SANITIZE
79016 + bool "Sanitize all freed memory"
79017 + help
79018 + By saying Y here the kernel will erase memory pages as soon as they
79019 + are freed. This in turn reduces the lifetime of data stored in the
79020 + pages, making it less likely that sensitive information such as
79021 + passwords, cryptographic secrets, etc stay in memory for too long.
79022 +
79023 + This is especially useful for programs whose runtime is short, long
79024 + lived processes and the kernel itself benefit from this as long as
79025 + they operate on whole memory pages and ensure timely freeing of pages
79026 + that may hold sensitive information.
79027 +
79028 + The tradeoff is performance impact, on a single CPU system kernel
79029 + compilation sees a 3% slowdown, other systems and workloads may vary
79030 + and you are advised to test this feature on your expected workload
79031 + before deploying it.
79032 +
79033 + Note that this feature does not protect data stored in live pages,
79034 + e.g., process memory swapped to disk may stay there for a long time.
79035 +
79036 +config PAX_MEMORY_STACKLEAK
79037 + bool "Sanitize kernel stack"
79038 + depends on X86
79039 + help
79040 + By saying Y here the kernel will erase the kernel stack before it
79041 + returns from a system call. This in turn reduces the information
79042 + that a kernel stack leak bug can reveal.
79043 +
79044 + Note that such a bug can still leak information that was put on
79045 + the stack by the current system call (the one eventually triggering
79046 + the bug) but traces of earlier system calls on the kernel stack
79047 + cannot leak anymore.
79048 +
79049 + The tradeoff is performance impact: on a single CPU system kernel
79050 + compilation sees a 1% slowdown, other systems and workloads may vary
79051 + and you are advised to test this feature on your expected workload
79052 + before deploying it.
79053 +
79054 + Note: full support for this feature requires gcc with plugin support
79055 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
79056 + is not supported). Using older gcc versions means that functions
79057 + with large enough stack frames may leave uninitialized memory behind
79058 + that may be exposed to a later syscall leaking the stack.
79059 +
79060 +config PAX_MEMORY_UDEREF
79061 + bool "Prevent invalid userland pointer dereference"
79062 + depends on X86 && !UML_X86 && !XEN
79063 + select PAX_PER_CPU_PGD if X86_64
79064 + help
79065 + By saying Y here the kernel will be prevented from dereferencing
79066 + userland pointers in contexts where the kernel expects only kernel
79067 + pointers. This is both a useful runtime debugging feature and a
79068 + security measure that prevents exploiting a class of kernel bugs.
79069 +
79070 + The tradeoff is that some virtualization solutions may experience
79071 + a huge slowdown and therefore you should not enable this feature
79072 + for kernels meant to run in such environments. Whether a given VM
79073 + solution is affected or not is best determined by simply trying it
79074 + out, the performance impact will be obvious right on boot as this
79075 + mechanism engages from very early on. A good rule of thumb is that
79076 + VMs running on CPUs without hardware virtualization support (i.e.,
79077 + the majority of IA-32 CPUs) will likely experience the slowdown.
79078 +
79079 +config PAX_REFCOUNT
79080 + bool "Prevent various kernel object reference counter overflows"
79081 + depends on GRKERNSEC && (X86 || SPARC64)
79082 + help
79083 + By saying Y here the kernel will detect and prevent overflowing
79084 + various (but not all) kinds of object reference counters. Such
79085 + overflows can normally occur due to bugs only and are often, if
79086 + not always, exploitable.
79087 +
79088 + The tradeoff is that data structures protected by an overflowed
79089 + refcount will never be freed and therefore will leak memory. Note
79090 + that this leak also happens even without this protection but in
79091 + that case the overflow can eventually trigger the freeing of the
79092 + data structure while it is still being used elsewhere, resulting
79093 + in the exploitable situation that this feature prevents.
79094 +
79095 + Since this has a negligible performance impact, you should enable
79096 + this feature.
79097 +
79098 +config PAX_USERCOPY
79099 + bool "Harden heap object copies between kernel and userland"
79100 + depends on X86 || PPC || SPARC || ARM
79101 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
79102 + help
79103 + By saying Y here the kernel will enforce the size of heap objects
79104 + when they are copied in either direction between the kernel and
79105 + userland, even if only a part of the heap object is copied.
79106 +
79107 + Specifically, this checking prevents information leaking from the
79108 + kernel heap during kernel to userland copies (if the kernel heap
79109 + object is otherwise fully initialized) and prevents kernel heap
79110 + overflows during userland to kernel copies.
79111 +
79112 + Note that the current implementation provides the strictest bounds
79113 + checks for the SLUB allocator.
79114 +
79115 + Enabling this option also enables per-slab cache protection against
79116 + data in a given cache being copied into/out of via userland
79117 + accessors. Though the whitelist of regions will be reduced over
79118 + time, it notably protects important data structures like task structs.
79119 +
79120 + If frame pointers are enabled on x86, this option will also restrict
79121 + copies into and out of the kernel stack to local variables within a
79122 + single frame.
79123 +
79124 + Since this has a negligible performance impact, you should enable
79125 + this feature.
79126 +
79127 +endmenu
79128 +
79129 +endmenu
79130 +
79131 config KEYS
79132 bool "Enable access key retention support"
79133 help
79134 @@ -167,7 +747,7 @@ config INTEL_TXT
79135 config LSM_MMAP_MIN_ADDR
79136 int "Low address space for LSM to protect from user allocation"
79137 depends on SECURITY && SECURITY_SELINUX
79138 - default 32768 if ARM
79139 + default 32768 if ALPHA || ARM || PARISC || SPARC32
79140 default 65536
79141 help
79142 This is the portion of low virtual memory which should be protected
79143 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
79144 index 3783202..1852837 100644
79145 --- a/security/apparmor/lsm.c
79146 +++ b/security/apparmor/lsm.c
79147 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
79148 return error;
79149 }
79150
79151 -static struct security_operations apparmor_ops = {
79152 +static struct security_operations apparmor_ops __read_only = {
79153 .name = "apparmor",
79154
79155 .ptrace_access_check = apparmor_ptrace_access_check,
79156 diff --git a/security/commoncap.c b/security/commoncap.c
79157 index a93b3b7..4410df9 100644
79158 --- a/security/commoncap.c
79159 +++ b/security/commoncap.c
79160 @@ -28,6 +28,7 @@
79161 #include <linux/prctl.h>
79162 #include <linux/securebits.h>
79163 #include <linux/user_namespace.h>
79164 +#include <net/sock.h>
79165
79166 /*
79167 * If a non-root user executes a setuid-root binary in
79168 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
79169
79170 int cap_netlink_recv(struct sk_buff *skb, int cap)
79171 {
79172 - if (!cap_raised(current_cap(), cap))
79173 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
79174 return -EPERM;
79175 return 0;
79176 }
79177 @@ -575,6 +576,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
79178 {
79179 const struct cred *cred = current_cred();
79180
79181 + if (gr_acl_enable_at_secure())
79182 + return 1;
79183 +
79184 if (cred->uid != 0) {
79185 if (bprm->cap_effective)
79186 return 1;
79187 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
79188 index 08408bd..67e6e78 100644
79189 --- a/security/integrity/ima/ima.h
79190 +++ b/security/integrity/ima/ima.h
79191 @@ -85,8 +85,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
79192 extern spinlock_t ima_queue_lock;
79193
79194 struct ima_h_table {
79195 - atomic_long_t len; /* number of stored measurements in the list */
79196 - atomic_long_t violations;
79197 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
79198 + atomic_long_unchecked_t violations;
79199 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
79200 };
79201 extern struct ima_h_table ima_htable;
79202 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
79203 index da36d2c..e1e1965 100644
79204 --- a/security/integrity/ima/ima_api.c
79205 +++ b/security/integrity/ima/ima_api.c
79206 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
79207 int result;
79208
79209 /* can overflow, only indicator */
79210 - atomic_long_inc(&ima_htable.violations);
79211 + atomic_long_inc_unchecked(&ima_htable.violations);
79212
79213 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
79214 if (!entry) {
79215 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
79216 index ef21b96..d53e674 100644
79217 --- a/security/integrity/ima/ima_fs.c
79218 +++ b/security/integrity/ima/ima_fs.c
79219 @@ -28,12 +28,12 @@
79220 static int valid_policy = 1;
79221 #define TMPBUFLEN 12
79222 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
79223 - loff_t *ppos, atomic_long_t *val)
79224 + loff_t *ppos, atomic_long_unchecked_t *val)
79225 {
79226 char tmpbuf[TMPBUFLEN];
79227 ssize_t len;
79228
79229 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
79230 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
79231 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
79232 }
79233
79234 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
79235 index 8e28f04..d5951b1 100644
79236 --- a/security/integrity/ima/ima_queue.c
79237 +++ b/security/integrity/ima/ima_queue.c
79238 @@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
79239 INIT_LIST_HEAD(&qe->later);
79240 list_add_tail_rcu(&qe->later, &ima_measurements);
79241
79242 - atomic_long_inc(&ima_htable.len);
79243 + atomic_long_inc_unchecked(&ima_htable.len);
79244 key = ima_hash_key(entry->digest);
79245 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
79246 return 0;
79247 diff --git a/security/keys/compat.c b/security/keys/compat.c
79248 index 338b510..a235861 100644
79249 --- a/security/keys/compat.c
79250 +++ b/security/keys/compat.c
79251 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
79252 if (ret == 0)
79253 goto no_payload_free;
79254
79255 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
79256 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
79257
79258 if (iov != iovstack)
79259 kfree(iov);
79260 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
79261 index eca5191..da9c7f0 100644
79262 --- a/security/keys/keyctl.c
79263 +++ b/security/keys/keyctl.c
79264 @@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
79265 /*
79266 * Copy the iovec data from userspace
79267 */
79268 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
79269 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
79270 unsigned ioc)
79271 {
79272 for (; ioc > 0; ioc--) {
79273 @@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
79274 * If successful, 0 will be returned.
79275 */
79276 long keyctl_instantiate_key_common(key_serial_t id,
79277 - const struct iovec *payload_iov,
79278 + const struct iovec __user *payload_iov,
79279 unsigned ioc,
79280 size_t plen,
79281 key_serial_t ringid)
79282 @@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t id,
79283 [0].iov_len = plen
79284 };
79285
79286 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
79287 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
79288 }
79289
79290 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
79291 @@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
79292 if (ret == 0)
79293 goto no_payload_free;
79294
79295 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
79296 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
79297
79298 if (iov != iovstack)
79299 kfree(iov);
79300 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
79301 index 30e242f..ec111ab 100644
79302 --- a/security/keys/keyring.c
79303 +++ b/security/keys/keyring.c
79304 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
79305 ret = -EFAULT;
79306
79307 for (loop = 0; loop < klist->nkeys; loop++) {
79308 + key_serial_t serial;
79309 key = klist->keys[loop];
79310 + serial = key->serial;
79311
79312 tmp = sizeof(key_serial_t);
79313 if (tmp > buflen)
79314 tmp = buflen;
79315
79316 - if (copy_to_user(buffer,
79317 - &key->serial,
79318 - tmp) != 0)
79319 + if (copy_to_user(buffer, &serial, tmp))
79320 goto error;
79321
79322 buflen -= tmp;
79323 diff --git a/security/min_addr.c b/security/min_addr.c
79324 index f728728..6457a0c 100644
79325 --- a/security/min_addr.c
79326 +++ b/security/min_addr.c
79327 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
79328 */
79329 static void update_mmap_min_addr(void)
79330 {
79331 +#ifndef SPARC
79332 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
79333 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
79334 mmap_min_addr = dac_mmap_min_addr;
79335 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
79336 #else
79337 mmap_min_addr = dac_mmap_min_addr;
79338 #endif
79339 +#endif
79340 }
79341
79342 /*
79343 diff --git a/security/security.c b/security/security.c
79344 index d9e1533..91427f2 100644
79345 --- a/security/security.c
79346 +++ b/security/security.c
79347 @@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
79348 /* things that live in capability.c */
79349 extern void __init security_fixup_ops(struct security_operations *ops);
79350
79351 -static struct security_operations *security_ops;
79352 -static struct security_operations default_security_ops = {
79353 +static struct security_operations *security_ops __read_only;
79354 +static struct security_operations default_security_ops __read_only = {
79355 .name = "default",
79356 };
79357
79358 @@ -67,7 +67,9 @@ int __init security_init(void)
79359
79360 void reset_security_ops(void)
79361 {
79362 + pax_open_kernel();
79363 security_ops = &default_security_ops;
79364 + pax_close_kernel();
79365 }
79366
79367 /* Save user chosen LSM */
79368 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
79369 index 266a229..61bd553 100644
79370 --- a/security/selinux/hooks.c
79371 +++ b/security/selinux/hooks.c
79372 @@ -93,7 +93,6 @@
79373 #define NUM_SEL_MNT_OPTS 5
79374
79375 extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
79376 -extern struct security_operations *security_ops;
79377
79378 /* SECMARK reference count */
79379 atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
79380 @@ -5455,7 +5454,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
79381
79382 #endif
79383
79384 -static struct security_operations selinux_ops = {
79385 +static struct security_operations selinux_ops __read_only = {
79386 .name = "selinux",
79387
79388 .ptrace_access_check = selinux_ptrace_access_check,
79389 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
79390 index b43813c..74be837 100644
79391 --- a/security/selinux/include/xfrm.h
79392 +++ b/security/selinux/include/xfrm.h
79393 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
79394
79395 static inline void selinux_xfrm_notify_policyload(void)
79396 {
79397 - atomic_inc(&flow_cache_genid);
79398 + atomic_inc_unchecked(&flow_cache_genid);
79399 }
79400 #else
79401 static inline int selinux_xfrm_enabled(void)
79402 diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
79403 index f6917bc..8e8713e 100644
79404 --- a/security/selinux/ss/services.c
79405 +++ b/security/selinux/ss/services.c
79406 @@ -1814,6 +1814,8 @@ int security_load_policy(void *data, size_t len)
79407 int rc = 0;
79408 struct policy_file file = { data, len }, *fp = &file;
79409
79410 + pax_track_stack();
79411 +
79412 if (!ss_initialized) {
79413 avtab_cache_init();
79414 rc = policydb_read(&policydb, fp);
79415 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
79416 index b9c5e14..20ab779 100644
79417 --- a/security/smack/smack_lsm.c
79418 +++ b/security/smack/smack_lsm.c
79419 @@ -3393,7 +3393,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
79420 return 0;
79421 }
79422
79423 -struct security_operations smack_ops = {
79424 +struct security_operations smack_ops __read_only = {
79425 .name = "smack",
79426
79427 .ptrace_access_check = smack_ptrace_access_check,
79428 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
79429 index f776400..f95b158c 100644
79430 --- a/security/tomoyo/tomoyo.c
79431 +++ b/security/tomoyo/tomoyo.c
79432 @@ -446,7 +446,7 @@ static int tomoyo_sb_pivotroot(struct path *old_path, struct path *new_path)
79433 * tomoyo_security_ops is a "struct security_operations" which is used for
79434 * registering TOMOYO.
79435 */
79436 -static struct security_operations tomoyo_security_ops = {
79437 +static struct security_operations tomoyo_security_ops __read_only = {
79438 .name = "tomoyo",
79439 .cred_alloc_blank = tomoyo_cred_alloc_blank,
79440 .cred_prepare = tomoyo_cred_prepare,
79441 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
79442 index 3687a6c..652565e 100644
79443 --- a/sound/aoa/codecs/onyx.c
79444 +++ b/sound/aoa/codecs/onyx.c
79445 @@ -54,7 +54,7 @@ struct onyx {
79446 spdif_locked:1,
79447 analog_locked:1,
79448 original_mute:2;
79449 - int open_count;
79450 + local_t open_count;
79451 struct codec_info *codec_info;
79452
79453 /* mutex serializes concurrent access to the device
79454 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
79455 struct onyx *onyx = cii->codec_data;
79456
79457 mutex_lock(&onyx->mutex);
79458 - onyx->open_count++;
79459 + local_inc(&onyx->open_count);
79460 mutex_unlock(&onyx->mutex);
79461
79462 return 0;
79463 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
79464 struct onyx *onyx = cii->codec_data;
79465
79466 mutex_lock(&onyx->mutex);
79467 - onyx->open_count--;
79468 - if (!onyx->open_count)
79469 + if (local_dec_and_test(&onyx->open_count))
79470 onyx->spdif_locked = onyx->analog_locked = 0;
79471 mutex_unlock(&onyx->mutex);
79472
79473 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
79474 index ffd2025..df062c9 100644
79475 --- a/sound/aoa/codecs/onyx.h
79476 +++ b/sound/aoa/codecs/onyx.h
79477 @@ -11,6 +11,7 @@
79478 #include <linux/i2c.h>
79479 #include <asm/pmac_low_i2c.h>
79480 #include <asm/prom.h>
79481 +#include <asm/local.h>
79482
79483 /* PCM3052 register definitions */
79484
79485 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
79486 index 23c34a0..a2673a5 100644
79487 --- a/sound/core/oss/pcm_oss.c
79488 +++ b/sound/core/oss/pcm_oss.c
79489 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
79490 if (in_kernel) {
79491 mm_segment_t fs;
79492 fs = snd_enter_user();
79493 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
79494 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
79495 snd_leave_user(fs);
79496 } else {
79497 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
79498 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
79499 }
79500 if (ret != -EPIPE && ret != -ESTRPIPE)
79501 break;
79502 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
79503 if (in_kernel) {
79504 mm_segment_t fs;
79505 fs = snd_enter_user();
79506 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
79507 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
79508 snd_leave_user(fs);
79509 } else {
79510 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
79511 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
79512 }
79513 if (ret == -EPIPE) {
79514 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
79515 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
79516 struct snd_pcm_plugin_channel *channels;
79517 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
79518 if (!in_kernel) {
79519 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
79520 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
79521 return -EFAULT;
79522 buf = runtime->oss.buffer;
79523 }
79524 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
79525 }
79526 } else {
79527 tmp = snd_pcm_oss_write2(substream,
79528 - (const char __force *)buf,
79529 + (const char __force_kernel *)buf,
79530 runtime->oss.period_bytes, 0);
79531 if (tmp <= 0)
79532 goto err;
79533 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
79534 struct snd_pcm_runtime *runtime = substream->runtime;
79535 snd_pcm_sframes_t frames, frames1;
79536 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
79537 - char __user *final_dst = (char __force __user *)buf;
79538 + char __user *final_dst = (char __force_user *)buf;
79539 if (runtime->oss.plugin_first) {
79540 struct snd_pcm_plugin_channel *channels;
79541 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
79542 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
79543 xfer += tmp;
79544 runtime->oss.buffer_used -= tmp;
79545 } else {
79546 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
79547 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
79548 runtime->oss.period_bytes, 0);
79549 if (tmp <= 0)
79550 goto err;
79551 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
79552 size1);
79553 size1 /= runtime->channels; /* frames */
79554 fs = snd_enter_user();
79555 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
79556 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
79557 snd_leave_user(fs);
79558 }
79559 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
79560 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
79561 index 91cdf94..4085161 100644
79562 --- a/sound/core/pcm_compat.c
79563 +++ b/sound/core/pcm_compat.c
79564 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
79565 int err;
79566
79567 fs = snd_enter_user();
79568 - err = snd_pcm_delay(substream, &delay);
79569 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
79570 snd_leave_user(fs);
79571 if (err < 0)
79572 return err;
79573 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
79574 index 1c6be91..c761a59 100644
79575 --- a/sound/core/pcm_native.c
79576 +++ b/sound/core/pcm_native.c
79577 @@ -2770,11 +2770,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
79578 switch (substream->stream) {
79579 case SNDRV_PCM_STREAM_PLAYBACK:
79580 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
79581 - (void __user *)arg);
79582 + (void __force_user *)arg);
79583 break;
79584 case SNDRV_PCM_STREAM_CAPTURE:
79585 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
79586 - (void __user *)arg);
79587 + (void __force_user *)arg);
79588 break;
79589 default:
79590 result = -EINVAL;
79591 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
79592 index 1f99767..14636533 100644
79593 --- a/sound/core/seq/seq_device.c
79594 +++ b/sound/core/seq/seq_device.c
79595 @@ -63,7 +63,7 @@ struct ops_list {
79596 int argsize; /* argument size */
79597
79598 /* operators */
79599 - struct snd_seq_dev_ops ops;
79600 + struct snd_seq_dev_ops *ops;
79601
79602 /* registred devices */
79603 struct list_head dev_list; /* list of devices */
79604 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
79605
79606 mutex_lock(&ops->reg_mutex);
79607 /* copy driver operators */
79608 - ops->ops = *entry;
79609 + ops->ops = entry;
79610 ops->driver |= DRIVER_LOADED;
79611 ops->argsize = argsize;
79612
79613 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
79614 dev->name, ops->id, ops->argsize, dev->argsize);
79615 return -EINVAL;
79616 }
79617 - if (ops->ops.init_device(dev) >= 0) {
79618 + if (ops->ops->init_device(dev) >= 0) {
79619 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
79620 ops->num_init_devices++;
79621 } else {
79622 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
79623 dev->name, ops->id, ops->argsize, dev->argsize);
79624 return -EINVAL;
79625 }
79626 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
79627 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
79628 dev->status = SNDRV_SEQ_DEVICE_FREE;
79629 dev->driver_data = NULL;
79630 ops->num_init_devices--;
79631 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
79632 index 8539ab0..be8a121 100644
79633 --- a/sound/drivers/mts64.c
79634 +++ b/sound/drivers/mts64.c
79635 @@ -28,6 +28,7 @@
79636 #include <sound/initval.h>
79637 #include <sound/rawmidi.h>
79638 #include <sound/control.h>
79639 +#include <asm/local.h>
79640
79641 #define CARD_NAME "Miditerminal 4140"
79642 #define DRIVER_NAME "MTS64"
79643 @@ -66,7 +67,7 @@ struct mts64 {
79644 struct pardevice *pardev;
79645 int pardev_claimed;
79646
79647 - int open_count;
79648 + local_t open_count;
79649 int current_midi_output_port;
79650 int current_midi_input_port;
79651 u8 mode[MTS64_NUM_INPUT_PORTS];
79652 @@ -696,7 +697,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
79653 {
79654 struct mts64 *mts = substream->rmidi->private_data;
79655
79656 - if (mts->open_count == 0) {
79657 + if (local_read(&mts->open_count) == 0) {
79658 /* We don't need a spinlock here, because this is just called
79659 if the device has not been opened before.
79660 So there aren't any IRQs from the device */
79661 @@ -704,7 +705,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
79662
79663 msleep(50);
79664 }
79665 - ++(mts->open_count);
79666 + local_inc(&mts->open_count);
79667
79668 return 0;
79669 }
79670 @@ -714,8 +715,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
79671 struct mts64 *mts = substream->rmidi->private_data;
79672 unsigned long flags;
79673
79674 - --(mts->open_count);
79675 - if (mts->open_count == 0) {
79676 + if (local_dec_return(&mts->open_count) == 0) {
79677 /* We need the spinlock_irqsave here because we can still
79678 have IRQs at this point */
79679 spin_lock_irqsave(&mts->lock, flags);
79680 @@ -724,8 +724,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
79681
79682 msleep(500);
79683
79684 - } else if (mts->open_count < 0)
79685 - mts->open_count = 0;
79686 + } else if (local_read(&mts->open_count) < 0)
79687 + local_set(&mts->open_count, 0);
79688
79689 return 0;
79690 }
79691 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
79692 index f07e38d..7aae69a 100644
79693 --- a/sound/drivers/opl4/opl4_lib.c
79694 +++ b/sound/drivers/opl4/opl4_lib.c
79695 @@ -28,7 +28,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
79696 MODULE_DESCRIPTION("OPL4 driver");
79697 MODULE_LICENSE("GPL");
79698
79699 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
79700 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
79701 {
79702 int timeout = 10;
79703 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
79704 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
79705 index f2b0ba2..429efc5 100644
79706 --- a/sound/drivers/portman2x4.c
79707 +++ b/sound/drivers/portman2x4.c
79708 @@ -47,6 +47,7 @@
79709 #include <sound/initval.h>
79710 #include <sound/rawmidi.h>
79711 #include <sound/control.h>
79712 +#include <asm/local.h>
79713
79714 #define CARD_NAME "Portman 2x4"
79715 #define DRIVER_NAME "portman"
79716 @@ -84,7 +85,7 @@ struct portman {
79717 struct pardevice *pardev;
79718 int pardev_claimed;
79719
79720 - int open_count;
79721 + local_t open_count;
79722 int mode[PORTMAN_NUM_INPUT_PORTS];
79723 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
79724 };
79725 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
79726 index 87657dd..a8268d4 100644
79727 --- a/sound/firewire/amdtp.c
79728 +++ b/sound/firewire/amdtp.c
79729 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
79730 ptr = s->pcm_buffer_pointer + data_blocks;
79731 if (ptr >= pcm->runtime->buffer_size)
79732 ptr -= pcm->runtime->buffer_size;
79733 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
79734 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
79735
79736 s->pcm_period_pointer += data_blocks;
79737 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
79738 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
79739 */
79740 void amdtp_out_stream_update(struct amdtp_out_stream *s)
79741 {
79742 - ACCESS_ONCE(s->source_node_id_field) =
79743 + ACCESS_ONCE_RW(s->source_node_id_field) =
79744 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
79745 }
79746 EXPORT_SYMBOL(amdtp_out_stream_update);
79747 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
79748 index 537a9cb..8e8c8e9 100644
79749 --- a/sound/firewire/amdtp.h
79750 +++ b/sound/firewire/amdtp.h
79751 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
79752 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
79753 struct snd_pcm_substream *pcm)
79754 {
79755 - ACCESS_ONCE(s->pcm) = pcm;
79756 + ACCESS_ONCE_RW(s->pcm) = pcm;
79757 }
79758
79759 /**
79760 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
79761 index 4400308..261e9f3 100644
79762 --- a/sound/firewire/isight.c
79763 +++ b/sound/firewire/isight.c
79764 @@ -97,7 +97,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
79765 ptr += count;
79766 if (ptr >= runtime->buffer_size)
79767 ptr -= runtime->buffer_size;
79768 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
79769 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
79770
79771 isight->period_counter += count;
79772 if (isight->period_counter >= runtime->period_size) {
79773 @@ -308,7 +308,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
79774 if (err < 0)
79775 return err;
79776
79777 - ACCESS_ONCE(isight->pcm_active) = true;
79778 + ACCESS_ONCE_RW(isight->pcm_active) = true;
79779
79780 return 0;
79781 }
79782 @@ -341,7 +341,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
79783 {
79784 struct isight *isight = substream->private_data;
79785
79786 - ACCESS_ONCE(isight->pcm_active) = false;
79787 + ACCESS_ONCE_RW(isight->pcm_active) = false;
79788
79789 mutex_lock(&isight->mutex);
79790 isight_stop_streaming(isight);
79791 @@ -434,10 +434,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
79792
79793 switch (cmd) {
79794 case SNDRV_PCM_TRIGGER_START:
79795 - ACCESS_ONCE(isight->pcm_running) = true;
79796 + ACCESS_ONCE_RW(isight->pcm_running) = true;
79797 break;
79798 case SNDRV_PCM_TRIGGER_STOP:
79799 - ACCESS_ONCE(isight->pcm_running) = false;
79800 + ACCESS_ONCE_RW(isight->pcm_running) = false;
79801 break;
79802 default:
79803 return -EINVAL;
79804 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
79805 index fe79a16..4d9714e 100644
79806 --- a/sound/isa/cmi8330.c
79807 +++ b/sound/isa/cmi8330.c
79808 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
79809
79810 struct snd_pcm *pcm;
79811 struct snd_cmi8330_stream {
79812 - struct snd_pcm_ops ops;
79813 + snd_pcm_ops_no_const ops;
79814 snd_pcm_open_callback_t open;
79815 void *private_data; /* sb or wss */
79816 } streams[2];
79817 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
79818 index 733b014..56ce96f 100644
79819 --- a/sound/oss/sb_audio.c
79820 +++ b/sound/oss/sb_audio.c
79821 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
79822 buf16 = (signed short *)(localbuf + localoffs);
79823 while (c)
79824 {
79825 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
79826 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
79827 if (copy_from_user(lbuf8,
79828 userbuf+useroffs + p,
79829 locallen))
79830 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
79831 index 09d4648..cf234c7 100644
79832 --- a/sound/oss/swarm_cs4297a.c
79833 +++ b/sound/oss/swarm_cs4297a.c
79834 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
79835 {
79836 struct cs4297a_state *s;
79837 u32 pwr, id;
79838 - mm_segment_t fs;
79839 int rval;
79840 #ifndef CONFIG_BCM_CS4297A_CSWARM
79841 u64 cfg;
79842 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
79843 if (!rval) {
79844 char *sb1250_duart_present;
79845
79846 +#if 0
79847 + mm_segment_t fs;
79848 fs = get_fs();
79849 set_fs(KERNEL_DS);
79850 -#if 0
79851 val = SOUND_MASK_LINE;
79852 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
79853 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
79854 val = initvol[i].vol;
79855 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
79856 }
79857 + set_fs(fs);
79858 // cs4297a_write_ac97(s, 0x18, 0x0808);
79859 #else
79860 // cs4297a_write_ac97(s, 0x5e, 0x180);
79861 cs4297a_write_ac97(s, 0x02, 0x0808);
79862 cs4297a_write_ac97(s, 0x18, 0x0808);
79863 #endif
79864 - set_fs(fs);
79865
79866 list_add(&s->list, &cs4297a_devs);
79867
79868 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
79869 index 755f2b0..5c12361 100644
79870 --- a/sound/pci/hda/hda_codec.h
79871 +++ b/sound/pci/hda/hda_codec.h
79872 @@ -611,7 +611,7 @@ struct hda_bus_ops {
79873 /* notify power-up/down from codec to controller */
79874 void (*pm_notify)(struct hda_bus *bus);
79875 #endif
79876 -};
79877 +} __no_const;
79878
79879 /* template to pass to the bus constructor */
79880 struct hda_bus_template {
79881 @@ -713,6 +713,7 @@ struct hda_codec_ops {
79882 #endif
79883 void (*reboot_notify)(struct hda_codec *codec);
79884 };
79885 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
79886
79887 /* record for amp information cache */
79888 struct hda_cache_head {
79889 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
79890 struct snd_pcm_substream *substream);
79891 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
79892 struct snd_pcm_substream *substream);
79893 -};
79894 +} __no_const;
79895
79896 /* PCM information for each substream */
79897 struct hda_pcm_stream {
79898 @@ -801,7 +802,7 @@ struct hda_codec {
79899 const char *modelname; /* model name for preset */
79900
79901 /* set by patch */
79902 - struct hda_codec_ops patch_ops;
79903 + hda_codec_ops_no_const patch_ops;
79904
79905 /* PCM to create, set by patch_ops.build_pcms callback */
79906 unsigned int num_pcms;
79907 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
79908 index 0da778a..bc38b84 100644
79909 --- a/sound/pci/ice1712/ice1712.h
79910 +++ b/sound/pci/ice1712/ice1712.h
79911 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
79912 unsigned int mask_flags; /* total mask bits */
79913 struct snd_akm4xxx_ops {
79914 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
79915 - } ops;
79916 + } __no_const ops;
79917 };
79918
79919 struct snd_ice1712_spdif {
79920 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
79921 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
79922 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
79923 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
79924 - } ops;
79925 + } __no_const ops;
79926 };
79927
79928
79929 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
79930 index f3260e6..4a285d8 100644
79931 --- a/sound/pci/ymfpci/ymfpci_main.c
79932 +++ b/sound/pci/ymfpci/ymfpci_main.c
79933 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
79934 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
79935 break;
79936 }
79937 - if (atomic_read(&chip->interrupt_sleep_count)) {
79938 - atomic_set(&chip->interrupt_sleep_count, 0);
79939 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
79940 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
79941 wake_up(&chip->interrupt_sleep);
79942 }
79943 __end:
79944 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
79945 continue;
79946 init_waitqueue_entry(&wait, current);
79947 add_wait_queue(&chip->interrupt_sleep, &wait);
79948 - atomic_inc(&chip->interrupt_sleep_count);
79949 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
79950 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
79951 remove_wait_queue(&chip->interrupt_sleep, &wait);
79952 }
79953 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
79954 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
79955 spin_unlock(&chip->reg_lock);
79956
79957 - if (atomic_read(&chip->interrupt_sleep_count)) {
79958 - atomic_set(&chip->interrupt_sleep_count, 0);
79959 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
79960 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
79961 wake_up(&chip->interrupt_sleep);
79962 }
79963 }
79964 @@ -2363,7 +2363,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
79965 spin_lock_init(&chip->reg_lock);
79966 spin_lock_init(&chip->voice_lock);
79967 init_waitqueue_head(&chip->interrupt_sleep);
79968 - atomic_set(&chip->interrupt_sleep_count, 0);
79969 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
79970 chip->card = card;
79971 chip->pci = pci;
79972 chip->irq = -1;
79973 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
79974 index 2879c88..224159e 100644
79975 --- a/sound/soc/soc-pcm.c
79976 +++ b/sound/soc/soc-pcm.c
79977 @@ -568,7 +568,7 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
79978 }
79979
79980 /* ASoC PCM operations */
79981 -static struct snd_pcm_ops soc_pcm_ops = {
79982 +static snd_pcm_ops_no_const soc_pcm_ops = {
79983 .open = soc_pcm_open,
79984 .close = soc_pcm_close,
79985 .hw_params = soc_pcm_hw_params,
79986 diff --git a/sound/usb/card.h b/sound/usb/card.h
79987 index ae4251d..0961361 100644
79988 --- a/sound/usb/card.h
79989 +++ b/sound/usb/card.h
79990 @@ -44,6 +44,7 @@ struct snd_urb_ops {
79991 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
79992 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
79993 };
79994 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
79995
79996 struct snd_usb_substream {
79997 struct snd_usb_stream *stream;
79998 @@ -93,7 +94,7 @@ struct snd_usb_substream {
79999 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
80000 spinlock_t lock;
80001
80002 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
80003 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
80004 };
80005
80006 struct snd_usb_stream {
80007 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
80008 new file mode 100644
80009 index 0000000..b044b80
80010 --- /dev/null
80011 +++ b/tools/gcc/Makefile
80012 @@ -0,0 +1,21 @@
80013 +#CC := gcc
80014 +#PLUGIN_SOURCE_FILES := pax_plugin.c
80015 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
80016 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
80017 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
80018 +
80019 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
80020 +
80021 +hostlibs-y := constify_plugin.so
80022 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
80023 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
80024 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
80025 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
80026 +
80027 +always := $(hostlibs-y)
80028 +
80029 +constify_plugin-objs := constify_plugin.o
80030 +stackleak_plugin-objs := stackleak_plugin.o
80031 +kallocstat_plugin-objs := kallocstat_plugin.o
80032 +kernexec_plugin-objs := kernexec_plugin.o
80033 +checker_plugin-objs := checker_plugin.o
80034 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
80035 new file mode 100644
80036 index 0000000..d41b5af
80037 --- /dev/null
80038 +++ b/tools/gcc/checker_plugin.c
80039 @@ -0,0 +1,171 @@
80040 +/*
80041 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
80042 + * Licensed under the GPL v2
80043 + *
80044 + * Note: the choice of the license means that the compilation process is
80045 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80046 + * but for the kernel it doesn't matter since it doesn't link against
80047 + * any of the gcc libraries
80048 + *
80049 + * gcc plugin to implement various sparse (source code checker) features
80050 + *
80051 + * TODO:
80052 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
80053 + *
80054 + * BUGS:
80055 + * - none known
80056 + */
80057 +#include "gcc-plugin.h"
80058 +#include "config.h"
80059 +#include "system.h"
80060 +#include "coretypes.h"
80061 +#include "tree.h"
80062 +#include "tree-pass.h"
80063 +#include "flags.h"
80064 +#include "intl.h"
80065 +#include "toplev.h"
80066 +#include "plugin.h"
80067 +//#include "expr.h" where are you...
80068 +#include "diagnostic.h"
80069 +#include "plugin-version.h"
80070 +#include "tm.h"
80071 +#include "function.h"
80072 +#include "basic-block.h"
80073 +#include "gimple.h"
80074 +#include "rtl.h"
80075 +#include "emit-rtl.h"
80076 +#include "tree-flow.h"
80077 +#include "target.h"
80078 +
80079 +extern void c_register_addr_space (const char *str, addr_space_t as);
80080 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
80081 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
80082 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
80083 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
80084 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
80085 +
80086 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80087 +extern rtx emit_move_insn(rtx x, rtx y);
80088 +
80089 +int plugin_is_GPL_compatible;
80090 +
80091 +static struct plugin_info checker_plugin_info = {
80092 + .version = "201111150100",
80093 +};
80094 +
80095 +#define ADDR_SPACE_KERNEL 0
80096 +#define ADDR_SPACE_FORCE_KERNEL 1
80097 +#define ADDR_SPACE_USER 2
80098 +#define ADDR_SPACE_FORCE_USER 3
80099 +#define ADDR_SPACE_IOMEM 0
80100 +#define ADDR_SPACE_FORCE_IOMEM 0
80101 +#define ADDR_SPACE_PERCPU 0
80102 +#define ADDR_SPACE_FORCE_PERCPU 0
80103 +#define ADDR_SPACE_RCU 0
80104 +#define ADDR_SPACE_FORCE_RCU 0
80105 +
80106 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
80107 +{
80108 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
80109 +}
80110 +
80111 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
80112 +{
80113 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
80114 +}
80115 +
80116 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
80117 +{
80118 + return default_addr_space_valid_pointer_mode(mode, as);
80119 +}
80120 +
80121 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
80122 +{
80123 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
80124 +}
80125 +
80126 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
80127 +{
80128 + return default_addr_space_legitimize_address(x, oldx, mode, as);
80129 +}
80130 +
80131 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
80132 +{
80133 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
80134 + return true;
80135 +
80136 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
80137 + return true;
80138 +
80139 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
80140 + return true;
80141 +
80142 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
80143 + return true;
80144 +
80145 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
80146 + return true;
80147 +
80148 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
80149 + return true;
80150 +
80151 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
80152 + return true;
80153 +
80154 + return subset == superset;
80155 +}
80156 +
80157 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
80158 +{
80159 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
80160 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
80161 +
80162 + return op;
80163 +}
80164 +
80165 +static void register_checker_address_spaces(void *event_data, void *data)
80166 +{
80167 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
80168 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
80169 + c_register_addr_space("__user", ADDR_SPACE_USER);
80170 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
80171 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
80172 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
80173 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
80174 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
80175 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
80176 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
80177 +
80178 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
80179 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
80180 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
80181 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
80182 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
80183 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
80184 + targetm.addr_space.convert = checker_addr_space_convert;
80185 +}
80186 +
80187 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80188 +{
80189 + const char * const plugin_name = plugin_info->base_name;
80190 + const int argc = plugin_info->argc;
80191 + const struct plugin_argument * const argv = plugin_info->argv;
80192 + int i;
80193 +
80194 + if (!plugin_default_version_check(version, &gcc_version)) {
80195 + error(G_("incompatible gcc/plugin versions"));
80196 + return 1;
80197 + }
80198 +
80199 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
80200 +
80201 + for (i = 0; i < argc; ++i)
80202 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80203 +
80204 + if (TARGET_64BIT == 0)
80205 + return 0;
80206 +
80207 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
80208 +
80209 + return 0;
80210 +}
80211 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
80212 new file mode 100644
80213 index 0000000..704a564
80214 --- /dev/null
80215 +++ b/tools/gcc/constify_plugin.c
80216 @@ -0,0 +1,303 @@
80217 +/*
80218 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
80219 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
80220 + * Licensed under the GPL v2, or (at your option) v3
80221 + *
80222 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
80223 + *
80224 + * Homepage:
80225 + * http://www.grsecurity.net/~ephox/const_plugin/
80226 + *
80227 + * Usage:
80228 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
80229 + * $ gcc -fplugin=constify_plugin.so test.c -O2
80230 + */
80231 +
80232 +#include "gcc-plugin.h"
80233 +#include "config.h"
80234 +#include "system.h"
80235 +#include "coretypes.h"
80236 +#include "tree.h"
80237 +#include "tree-pass.h"
80238 +#include "flags.h"
80239 +#include "intl.h"
80240 +#include "toplev.h"
80241 +#include "plugin.h"
80242 +#include "diagnostic.h"
80243 +#include "plugin-version.h"
80244 +#include "tm.h"
80245 +#include "function.h"
80246 +#include "basic-block.h"
80247 +#include "gimple.h"
80248 +#include "rtl.h"
80249 +#include "emit-rtl.h"
80250 +#include "tree-flow.h"
80251 +
80252 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
80253 +
80254 +int plugin_is_GPL_compatible;
80255 +
80256 +static struct plugin_info const_plugin_info = {
80257 + .version = "201111150100",
80258 + .help = "no-constify\tturn off constification\n",
80259 +};
80260 +
80261 +static void constify_type(tree type);
80262 +static bool walk_struct(tree node);
80263 +
80264 +static tree deconstify_type(tree old_type)
80265 +{
80266 + tree new_type, field;
80267 +
80268 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
80269 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
80270 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
80271 + DECL_FIELD_CONTEXT(field) = new_type;
80272 + TYPE_READONLY(new_type) = 0;
80273 + C_TYPE_FIELDS_READONLY(new_type) = 0;
80274 + return new_type;
80275 +}
80276 +
80277 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
80278 +{
80279 + tree type;
80280 +
80281 + *no_add_attrs = true;
80282 + if (TREE_CODE(*node) == FUNCTION_DECL) {
80283 + error("%qE attribute does not apply to functions", name);
80284 + return NULL_TREE;
80285 + }
80286 +
80287 + if (TREE_CODE(*node) == VAR_DECL) {
80288 + error("%qE attribute does not apply to variables", name);
80289 + return NULL_TREE;
80290 + }
80291 +
80292 + if (TYPE_P(*node)) {
80293 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
80294 + *no_add_attrs = false;
80295 + else
80296 + error("%qE attribute applies to struct and union types only", name);
80297 + return NULL_TREE;
80298 + }
80299 +
80300 + type = TREE_TYPE(*node);
80301 +
80302 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
80303 + error("%qE attribute applies to struct and union types only", name);
80304 + return NULL_TREE;
80305 + }
80306 +
80307 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
80308 + error("%qE attribute is already applied to the type", name);
80309 + return NULL_TREE;
80310 + }
80311 +
80312 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
80313 + error("%qE attribute used on type that is not constified", name);
80314 + return NULL_TREE;
80315 + }
80316 +
80317 + if (TREE_CODE(*node) == TYPE_DECL) {
80318 + TREE_TYPE(*node) = deconstify_type(type);
80319 + TREE_READONLY(*node) = 0;
80320 + return NULL_TREE;
80321 + }
80322 +
80323 + return NULL_TREE;
80324 +}
80325 +
80326 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
80327 +{
80328 + *no_add_attrs = true;
80329 + if (!TYPE_P(*node)) {
80330 + error("%qE attribute applies to types only", name);
80331 + return NULL_TREE;
80332 + }
80333 +
80334 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
80335 + error("%qE attribute applies to struct and union types only", name);
80336 + return NULL_TREE;
80337 + }
80338 +
80339 + *no_add_attrs = false;
80340 + constify_type(*node);
80341 + return NULL_TREE;
80342 +}
80343 +
80344 +static struct attribute_spec no_const_attr = {
80345 + .name = "no_const",
80346 + .min_length = 0,
80347 + .max_length = 0,
80348 + .decl_required = false,
80349 + .type_required = false,
80350 + .function_type_required = false,
80351 + .handler = handle_no_const_attribute,
80352 +#if BUILDING_GCC_VERSION >= 4007
80353 + .affects_type_identity = true
80354 +#endif
80355 +};
80356 +
80357 +static struct attribute_spec do_const_attr = {
80358 + .name = "do_const",
80359 + .min_length = 0,
80360 + .max_length = 0,
80361 + .decl_required = false,
80362 + .type_required = false,
80363 + .function_type_required = false,
80364 + .handler = handle_do_const_attribute,
80365 +#if BUILDING_GCC_VERSION >= 4007
80366 + .affects_type_identity = true
80367 +#endif
80368 +};
80369 +
80370 +static void register_attributes(void *event_data, void *data)
80371 +{
80372 + register_attribute(&no_const_attr);
80373 + register_attribute(&do_const_attr);
80374 +}
80375 +
80376 +static void constify_type(tree type)
80377 +{
80378 + TYPE_READONLY(type) = 1;
80379 + C_TYPE_FIELDS_READONLY(type) = 1;
80380 +}
80381 +
80382 +static bool is_fptr(tree field)
80383 +{
80384 + tree ptr = TREE_TYPE(field);
80385 +
80386 + if (TREE_CODE(ptr) != POINTER_TYPE)
80387 + return false;
80388 +
80389 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
80390 +}
80391 +
80392 +static bool walk_struct(tree node)
80393 +{
80394 + tree field;
80395 +
80396 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
80397 + return false;
80398 +
80399 + if (TYPE_FIELDS(node) == NULL_TREE)
80400 + return false;
80401 +
80402 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
80403 + tree type = TREE_TYPE(field);
80404 + enum tree_code code = TREE_CODE(type);
80405 + if (code == RECORD_TYPE || code == UNION_TYPE) {
80406 + if (!(walk_struct(type)))
80407 + return false;
80408 + } else if (!is_fptr(field) && !TREE_READONLY(field))
80409 + return false;
80410 + }
80411 + return true;
80412 +}
80413 +
80414 +static void finish_type(void *event_data, void *data)
80415 +{
80416 + tree type = (tree)event_data;
80417 +
80418 + if (type == NULL_TREE)
80419 + return;
80420 +
80421 + if (TYPE_READONLY(type))
80422 + return;
80423 +
80424 + if (walk_struct(type))
80425 + constify_type(type);
80426 +}
80427 +
80428 +static unsigned int check_local_variables(void);
80429 +
80430 +struct gimple_opt_pass pass_local_variable = {
80431 + {
80432 + .type = GIMPLE_PASS,
80433 + .name = "check_local_variables",
80434 + .gate = NULL,
80435 + .execute = check_local_variables,
80436 + .sub = NULL,
80437 + .next = NULL,
80438 + .static_pass_number = 0,
80439 + .tv_id = TV_NONE,
80440 + .properties_required = 0,
80441 + .properties_provided = 0,
80442 + .properties_destroyed = 0,
80443 + .todo_flags_start = 0,
80444 + .todo_flags_finish = 0
80445 + }
80446 +};
80447 +
80448 +static unsigned int check_local_variables(void)
80449 +{
80450 + tree var;
80451 + referenced_var_iterator rvi;
80452 +
80453 +#if BUILDING_GCC_VERSION == 4005
80454 + FOR_EACH_REFERENCED_VAR(var, rvi) {
80455 +#else
80456 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
80457 +#endif
80458 + tree type = TREE_TYPE(var);
80459 +
80460 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
80461 + continue;
80462 +
80463 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
80464 + continue;
80465 +
80466 + if (!TYPE_READONLY(type))
80467 + continue;
80468 +
80469 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
80470 +// continue;
80471 +
80472 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
80473 +// continue;
80474 +
80475 + if (walk_struct(type)) {
80476 + error("constified variable %qE cannot be local", var);
80477 + return 1;
80478 + }
80479 + }
80480 + return 0;
80481 +}
80482 +
80483 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80484 +{
80485 + const char * const plugin_name = plugin_info->base_name;
80486 + const int argc = plugin_info->argc;
80487 + const struct plugin_argument * const argv = plugin_info->argv;
80488 + int i;
80489 + bool constify = true;
80490 +
80491 + struct register_pass_info local_variable_pass_info = {
80492 + .pass = &pass_local_variable.pass,
80493 + .reference_pass_name = "*referenced_vars",
80494 + .ref_pass_instance_number = 0,
80495 + .pos_op = PASS_POS_INSERT_AFTER
80496 + };
80497 +
80498 + if (!plugin_default_version_check(version, &gcc_version)) {
80499 + error(G_("incompatible gcc/plugin versions"));
80500 + return 1;
80501 + }
80502 +
80503 + for (i = 0; i < argc; ++i) {
80504 + if (!(strcmp(argv[i].key, "no-constify"))) {
80505 + constify = false;
80506 + continue;
80507 + }
80508 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80509 + }
80510 +
80511 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
80512 + if (constify) {
80513 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
80514 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
80515 + }
80516 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
80517 +
80518 + return 0;
80519 +}
80520 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
80521 new file mode 100644
80522 index 0000000..a5eabce
80523 --- /dev/null
80524 +++ b/tools/gcc/kallocstat_plugin.c
80525 @@ -0,0 +1,167 @@
80526 +/*
80527 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
80528 + * Licensed under the GPL v2
80529 + *
80530 + * Note: the choice of the license means that the compilation process is
80531 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80532 + * but for the kernel it doesn't matter since it doesn't link against
80533 + * any of the gcc libraries
80534 + *
80535 + * gcc plugin to find the distribution of k*alloc sizes
80536 + *
80537 + * TODO:
80538 + *
80539 + * BUGS:
80540 + * - none known
80541 + */
80542 +#include "gcc-plugin.h"
80543 +#include "config.h"
80544 +#include "system.h"
80545 +#include "coretypes.h"
80546 +#include "tree.h"
80547 +#include "tree-pass.h"
80548 +#include "flags.h"
80549 +#include "intl.h"
80550 +#include "toplev.h"
80551 +#include "plugin.h"
80552 +//#include "expr.h" where are you...
80553 +#include "diagnostic.h"
80554 +#include "plugin-version.h"
80555 +#include "tm.h"
80556 +#include "function.h"
80557 +#include "basic-block.h"
80558 +#include "gimple.h"
80559 +#include "rtl.h"
80560 +#include "emit-rtl.h"
80561 +
80562 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80563 +
80564 +int plugin_is_GPL_compatible;
80565 +
80566 +static const char * const kalloc_functions[] = {
80567 + "__kmalloc",
80568 + "kmalloc",
80569 + "kmalloc_large",
80570 + "kmalloc_node",
80571 + "kmalloc_order",
80572 + "kmalloc_order_trace",
80573 + "kmalloc_slab",
80574 + "kzalloc",
80575 + "kzalloc_node",
80576 +};
80577 +
80578 +static struct plugin_info kallocstat_plugin_info = {
80579 + .version = "201111150100",
80580 +};
80581 +
80582 +static unsigned int execute_kallocstat(void);
80583 +
80584 +static struct gimple_opt_pass kallocstat_pass = {
80585 + .pass = {
80586 + .type = GIMPLE_PASS,
80587 + .name = "kallocstat",
80588 + .gate = NULL,
80589 + .execute = execute_kallocstat,
80590 + .sub = NULL,
80591 + .next = NULL,
80592 + .static_pass_number = 0,
80593 + .tv_id = TV_NONE,
80594 + .properties_required = 0,
80595 + .properties_provided = 0,
80596 + .properties_destroyed = 0,
80597 + .todo_flags_start = 0,
80598 + .todo_flags_finish = 0
80599 + }
80600 +};
80601 +
80602 +static bool is_kalloc(const char *fnname)
80603 +{
80604 + size_t i;
80605 +
80606 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
80607 + if (!strcmp(fnname, kalloc_functions[i]))
80608 + return true;
80609 + return false;
80610 +}
80611 +
80612 +static unsigned int execute_kallocstat(void)
80613 +{
80614 + basic_block bb;
80615 +
80616 + // 1. loop through BBs and GIMPLE statements
80617 + FOR_EACH_BB(bb) {
80618 + gimple_stmt_iterator gsi;
80619 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80620 + // gimple match:
80621 + tree fndecl, size;
80622 + gimple call_stmt;
80623 + const char *fnname;
80624 +
80625 + // is it a call
80626 + call_stmt = gsi_stmt(gsi);
80627 + if (!is_gimple_call(call_stmt))
80628 + continue;
80629 + fndecl = gimple_call_fndecl(call_stmt);
80630 + if (fndecl == NULL_TREE)
80631 + continue;
80632 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
80633 + continue;
80634 +
80635 + // is it a call to k*alloc
80636 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
80637 + if (!is_kalloc(fnname))
80638 + continue;
80639 +
80640 + // is the size arg the result of a simple const assignment
80641 + size = gimple_call_arg(call_stmt, 0);
80642 + while (true) {
80643 + gimple def_stmt;
80644 + expanded_location xloc;
80645 + size_t size_val;
80646 +
80647 + if (TREE_CODE(size) != SSA_NAME)
80648 + break;
80649 + def_stmt = SSA_NAME_DEF_STMT(size);
80650 + if (!def_stmt || !is_gimple_assign(def_stmt))
80651 + break;
80652 + if (gimple_num_ops(def_stmt) != 2)
80653 + break;
80654 + size = gimple_assign_rhs1(def_stmt);
80655 + if (!TREE_CONSTANT(size))
80656 + continue;
80657 + xloc = expand_location(gimple_location(def_stmt));
80658 + if (!xloc.file)
80659 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
80660 + size_val = TREE_INT_CST_LOW(size);
80661 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
80662 + break;
80663 + }
80664 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
80665 +//debug_tree(gimple_call_fn(call_stmt));
80666 +//print_node(stderr, "pax", fndecl, 4);
80667 + }
80668 + }
80669 +
80670 + return 0;
80671 +}
80672 +
80673 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80674 +{
80675 + const char * const plugin_name = plugin_info->base_name;
80676 + struct register_pass_info kallocstat_pass_info = {
80677 + .pass = &kallocstat_pass.pass,
80678 + .reference_pass_name = "ssa",
80679 + .ref_pass_instance_number = 0,
80680 + .pos_op = PASS_POS_INSERT_AFTER
80681 + };
80682 +
80683 + if (!plugin_default_version_check(version, &gcc_version)) {
80684 + error(G_("incompatible gcc/plugin versions"));
80685 + return 1;
80686 + }
80687 +
80688 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
80689 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
80690 +
80691 + return 0;
80692 +}
80693 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
80694 new file mode 100644
80695 index 0000000..51f747e
80696 --- /dev/null
80697 +++ b/tools/gcc/kernexec_plugin.c
80698 @@ -0,0 +1,348 @@
80699 +/*
80700 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
80701 + * Licensed under the GPL v2
80702 + *
80703 + * Note: the choice of the license means that the compilation process is
80704 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80705 + * but for the kernel it doesn't matter since it doesn't link against
80706 + * any of the gcc libraries
80707 + *
80708 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
80709 + *
80710 + * TODO:
80711 + *
80712 + * BUGS:
80713 + * - none known
80714 + */
80715 +#include "gcc-plugin.h"
80716 +#include "config.h"
80717 +#include "system.h"
80718 +#include "coretypes.h"
80719 +#include "tree.h"
80720 +#include "tree-pass.h"
80721 +#include "flags.h"
80722 +#include "intl.h"
80723 +#include "toplev.h"
80724 +#include "plugin.h"
80725 +//#include "expr.h" where are you...
80726 +#include "diagnostic.h"
80727 +#include "plugin-version.h"
80728 +#include "tm.h"
80729 +#include "function.h"
80730 +#include "basic-block.h"
80731 +#include "gimple.h"
80732 +#include "rtl.h"
80733 +#include "emit-rtl.h"
80734 +#include "tree-flow.h"
80735 +
80736 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80737 +extern rtx emit_move_insn(rtx x, rtx y);
80738 +
80739 +int plugin_is_GPL_compatible;
80740 +
80741 +static struct plugin_info kernexec_plugin_info = {
80742 + .version = "201111291120",
80743 + .help = "method=[bts|or]\tinstrumentation method\n"
80744 +};
80745 +
80746 +static unsigned int execute_kernexec_fptr(void);
80747 +static unsigned int execute_kernexec_retaddr(void);
80748 +static bool kernexec_cmodel_check(void);
80749 +
80750 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator);
80751 +static void (*kernexec_instrument_retaddr)(rtx);
80752 +
80753 +static struct gimple_opt_pass kernexec_fptr_pass = {
80754 + .pass = {
80755 + .type = GIMPLE_PASS,
80756 + .name = "kernexec_fptr",
80757 + .gate = kernexec_cmodel_check,
80758 + .execute = execute_kernexec_fptr,
80759 + .sub = NULL,
80760 + .next = NULL,
80761 + .static_pass_number = 0,
80762 + .tv_id = TV_NONE,
80763 + .properties_required = 0,
80764 + .properties_provided = 0,
80765 + .properties_destroyed = 0,
80766 + .todo_flags_start = 0,
80767 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
80768 + }
80769 +};
80770 +
80771 +static struct rtl_opt_pass kernexec_retaddr_pass = {
80772 + .pass = {
80773 + .type = RTL_PASS,
80774 + .name = "kernexec_retaddr",
80775 + .gate = kernexec_cmodel_check,
80776 + .execute = execute_kernexec_retaddr,
80777 + .sub = NULL,
80778 + .next = NULL,
80779 + .static_pass_number = 0,
80780 + .tv_id = TV_NONE,
80781 + .properties_required = 0,
80782 + .properties_provided = 0,
80783 + .properties_destroyed = 0,
80784 + .todo_flags_start = 0,
80785 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
80786 + }
80787 +};
80788 +
80789 +static bool kernexec_cmodel_check(void)
80790 +{
80791 + tree section;
80792 +
80793 + if (ix86_cmodel != CM_KERNEL)
80794 + return false;
80795 +
80796 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
80797 + if (!section || !TREE_VALUE(section))
80798 + return true;
80799 +
80800 + section = TREE_VALUE(TREE_VALUE(section));
80801 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
80802 + return true;
80803 +
80804 + return false;
80805 +}
80806 +
80807 +/*
80808 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
80809 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
80810 + */
80811 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator gsi)
80812 +{
80813 + gimple assign_intptr, assign_new_fptr, call_stmt;
80814 + tree intptr, old_fptr, new_fptr, kernexec_mask;
80815 +
80816 + call_stmt = gsi_stmt(gsi);
80817 + old_fptr = gimple_call_fn(call_stmt);
80818 +
80819 + // create temporary unsigned long variable used for bitops and cast fptr to it
80820 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
80821 + add_referenced_var(intptr);
80822 + mark_sym_for_renaming(intptr);
80823 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
80824 + gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
80825 + update_stmt(assign_intptr);
80826 +
80827 + // apply logical or to temporary unsigned long and bitmask
80828 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
80829 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
80830 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
80831 + gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
80832 + update_stmt(assign_intptr);
80833 +
80834 + // cast temporary unsigned long back to a temporary fptr variable
80835 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
80836 + add_referenced_var(new_fptr);
80837 + mark_sym_for_renaming(new_fptr);
80838 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
80839 + gsi_insert_before(&gsi, assign_new_fptr, GSI_SAME_STMT);
80840 + update_stmt(assign_new_fptr);
80841 +
80842 + // replace call stmt fn with the new fptr
80843 + gimple_call_set_fn(call_stmt, new_fptr);
80844 + update_stmt(call_stmt);
80845 +}
80846 +
80847 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator gsi)
80848 +{
80849 + gimple asm_or_stmt, call_stmt;
80850 + tree old_fptr, new_fptr, input, output;
80851 + VEC(tree, gc) *inputs = NULL;
80852 + VEC(tree, gc) *outputs = NULL;
80853 +
80854 + call_stmt = gsi_stmt(gsi);
80855 + old_fptr = gimple_call_fn(call_stmt);
80856 +
80857 + // create temporary fptr variable
80858 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
80859 + add_referenced_var(new_fptr);
80860 + mark_sym_for_renaming(new_fptr);
80861 +
80862 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
80863 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
80864 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
80865 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
80866 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
80867 + VEC_safe_push(tree, gc, inputs, input);
80868 + VEC_safe_push(tree, gc, outputs, output);
80869 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
80870 + gimple_asm_set_volatile(asm_or_stmt, true);
80871 + gsi_insert_before(&gsi, asm_or_stmt, GSI_SAME_STMT);
80872 + update_stmt(asm_or_stmt);
80873 +
80874 + // replace call stmt fn with the new fptr
80875 + gimple_call_set_fn(call_stmt, new_fptr);
80876 + update_stmt(call_stmt);
80877 +}
80878 +
80879 +/*
80880 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
80881 + */
80882 +static unsigned int execute_kernexec_fptr(void)
80883 +{
80884 + basic_block bb;
80885 + gimple_stmt_iterator gsi;
80886 +
80887 + // 1. loop through BBs and GIMPLE statements
80888 + FOR_EACH_BB(bb) {
80889 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80890 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
80891 + tree fn;
80892 + gimple call_stmt;
80893 +
80894 + // is it a call ...
80895 + call_stmt = gsi_stmt(gsi);
80896 + if (!is_gimple_call(call_stmt))
80897 + continue;
80898 + fn = gimple_call_fn(call_stmt);
80899 + if (TREE_CODE(fn) == ADDR_EXPR)
80900 + continue;
80901 + if (TREE_CODE(fn) != SSA_NAME)
80902 + gcc_unreachable();
80903 +
80904 + // ... through a function pointer
80905 + fn = SSA_NAME_VAR(fn);
80906 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
80907 + continue;
80908 + fn = TREE_TYPE(fn);
80909 + if (TREE_CODE(fn) != POINTER_TYPE)
80910 + continue;
80911 + fn = TREE_TYPE(fn);
80912 + if (TREE_CODE(fn) != FUNCTION_TYPE)
80913 + continue;
80914 +
80915 + kernexec_instrument_fptr(gsi);
80916 +
80917 +//debug_tree(gimple_call_fn(call_stmt));
80918 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
80919 + }
80920 + }
80921 +
80922 + return 0;
80923 +}
80924 +
80925 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
80926 +static void kernexec_instrument_retaddr_bts(rtx insn)
80927 +{
80928 + rtx btsq;
80929 + rtvec argvec, constraintvec, labelvec;
80930 + int line;
80931 +
80932 + // create asm volatile("btsq $63,(%%rsp)":::)
80933 + argvec = rtvec_alloc(0);
80934 + constraintvec = rtvec_alloc(0);
80935 + labelvec = rtvec_alloc(0);
80936 + line = expand_location(RTL_LOCATION(insn)).line;
80937 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
80938 + MEM_VOLATILE_P(btsq) = 1;
80939 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
80940 + emit_insn_before(btsq, insn);
80941 +}
80942 +
80943 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
80944 +static void kernexec_instrument_retaddr_or(rtx insn)
80945 +{
80946 + rtx orq;
80947 + rtvec argvec, constraintvec, labelvec;
80948 + int line;
80949 +
80950 + // create asm volatile("orq %%r10,(%%rsp)":::)
80951 + argvec = rtvec_alloc(0);
80952 + constraintvec = rtvec_alloc(0);
80953 + labelvec = rtvec_alloc(0);
80954 + line = expand_location(RTL_LOCATION(insn)).line;
80955 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
80956 + MEM_VOLATILE_P(orq) = 1;
80957 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
80958 + emit_insn_before(orq, insn);
80959 +}
80960 +
80961 +/*
80962 + * find all asm level function returns and forcibly set the highest bit of the return address
80963 + */
80964 +static unsigned int execute_kernexec_retaddr(void)
80965 +{
80966 + rtx insn;
80967 +
80968 + // 1. find function returns
80969 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
80970 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
80971 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
80972 + rtx body;
80973 +
80974 + // is it a retn
80975 + if (!JUMP_P(insn))
80976 + continue;
80977 + body = PATTERN(insn);
80978 + if (GET_CODE(body) == PARALLEL)
80979 + body = XVECEXP(body, 0, 0);
80980 + if (GET_CODE(body) != RETURN)
80981 + continue;
80982 + kernexec_instrument_retaddr(insn);
80983 + }
80984 +
80985 +// print_simple_rtl(stderr, get_insns());
80986 +// print_rtl(stderr, get_insns());
80987 +
80988 + return 0;
80989 +}
80990 +
80991 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80992 +{
80993 + const char * const plugin_name = plugin_info->base_name;
80994 + const int argc = plugin_info->argc;
80995 + const struct plugin_argument * const argv = plugin_info->argv;
80996 + int i;
80997 + struct register_pass_info kernexec_fptr_pass_info = {
80998 + .pass = &kernexec_fptr_pass.pass,
80999 + .reference_pass_name = "ssa",
81000 + .ref_pass_instance_number = 0,
81001 + .pos_op = PASS_POS_INSERT_AFTER
81002 + };
81003 + struct register_pass_info kernexec_retaddr_pass_info = {
81004 + .pass = &kernexec_retaddr_pass.pass,
81005 + .reference_pass_name = "pro_and_epilogue",
81006 + .ref_pass_instance_number = 0,
81007 + .pos_op = PASS_POS_INSERT_AFTER
81008 + };
81009 +
81010 + if (!plugin_default_version_check(version, &gcc_version)) {
81011 + error(G_("incompatible gcc/plugin versions"));
81012 + return 1;
81013 + }
81014 +
81015 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
81016 +
81017 + if (TARGET_64BIT == 0)
81018 + return 0;
81019 +
81020 + for (i = 0; i < argc; ++i) {
81021 + if (!strcmp(argv[i].key, "method")) {
81022 + if (!argv[i].value) {
81023 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81024 + continue;
81025 + }
81026 + if (!strcmp(argv[i].value, "bts")) {
81027 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
81028 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
81029 + } else if (!strcmp(argv[i].value, "or")) {
81030 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
81031 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
81032 + fix_register("r10", 1, 1);
81033 + } else
81034 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
81035 + continue;
81036 + }
81037 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81038 + }
81039 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
81040 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
81041 +
81042 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
81043 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
81044 +
81045 + return 0;
81046 +}
81047 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
81048 new file mode 100644
81049 index 0000000..d44f37c
81050 --- /dev/null
81051 +++ b/tools/gcc/stackleak_plugin.c
81052 @@ -0,0 +1,291 @@
81053 +/*
81054 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
81055 + * Licensed under the GPL v2
81056 + *
81057 + * Note: the choice of the license means that the compilation process is
81058 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
81059 + * but for the kernel it doesn't matter since it doesn't link against
81060 + * any of the gcc libraries
81061 + *
81062 + * gcc plugin to help implement various PaX features
81063 + *
81064 + * - track lowest stack pointer
81065 + *
81066 + * TODO:
81067 + * - initialize all local variables
81068 + *
81069 + * BUGS:
81070 + * - none known
81071 + */
81072 +#include "gcc-plugin.h"
81073 +#include "config.h"
81074 +#include "system.h"
81075 +#include "coretypes.h"
81076 +#include "tree.h"
81077 +#include "tree-pass.h"
81078 +#include "flags.h"
81079 +#include "intl.h"
81080 +#include "toplev.h"
81081 +#include "plugin.h"
81082 +//#include "expr.h" where are you...
81083 +#include "diagnostic.h"
81084 +#include "plugin-version.h"
81085 +#include "tm.h"
81086 +#include "function.h"
81087 +#include "basic-block.h"
81088 +#include "gimple.h"
81089 +#include "rtl.h"
81090 +#include "emit-rtl.h"
81091 +
81092 +extern void print_gimple_stmt(FILE *, gimple, int, int);
81093 +
81094 +int plugin_is_GPL_compatible;
81095 +
81096 +static int track_frame_size = -1;
81097 +static const char track_function[] = "pax_track_stack";
81098 +static const char check_function[] = "pax_check_alloca";
81099 +static bool init_locals;
81100 +
81101 +static struct plugin_info stackleak_plugin_info = {
81102 + .version = "201111150100",
81103 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
81104 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
81105 +};
81106 +
81107 +static bool gate_stackleak_track_stack(void);
81108 +static unsigned int execute_stackleak_tree_instrument(void);
81109 +static unsigned int execute_stackleak_final(void);
81110 +
81111 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
81112 + .pass = {
81113 + .type = GIMPLE_PASS,
81114 + .name = "stackleak_tree_instrument",
81115 + .gate = gate_stackleak_track_stack,
81116 + .execute = execute_stackleak_tree_instrument,
81117 + .sub = NULL,
81118 + .next = NULL,
81119 + .static_pass_number = 0,
81120 + .tv_id = TV_NONE,
81121 + .properties_required = PROP_gimple_leh | PROP_cfg,
81122 + .properties_provided = 0,
81123 + .properties_destroyed = 0,
81124 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
81125 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
81126 + }
81127 +};
81128 +
81129 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
81130 + .pass = {
81131 + .type = RTL_PASS,
81132 + .name = "stackleak_final",
81133 + .gate = gate_stackleak_track_stack,
81134 + .execute = execute_stackleak_final,
81135 + .sub = NULL,
81136 + .next = NULL,
81137 + .static_pass_number = 0,
81138 + .tv_id = TV_NONE,
81139 + .properties_required = 0,
81140 + .properties_provided = 0,
81141 + .properties_destroyed = 0,
81142 + .todo_flags_start = 0,
81143 + .todo_flags_finish = TODO_dump_func
81144 + }
81145 +};
81146 +
81147 +static bool gate_stackleak_track_stack(void)
81148 +{
81149 + return track_frame_size >= 0;
81150 +}
81151 +
81152 +static void stackleak_check_alloca(gimple_stmt_iterator gsi)
81153 +{
81154 + gimple check_alloca;
81155 + tree fndecl, fntype, alloca_size;
81156 +
81157 + // insert call to void pax_check_alloca(unsigned long size)
81158 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
81159 + fndecl = build_fn_decl(check_function, fntype);
81160 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
81161 + alloca_size = gimple_call_arg(gsi_stmt(gsi), 0);
81162 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
81163 + gsi_insert_before(&gsi, check_alloca, GSI_CONTINUE_LINKING);
81164 +}
81165 +
81166 +static void stackleak_add_instrumentation(gimple_stmt_iterator gsi)
81167 +{
81168 + gimple track_stack;
81169 + tree fndecl, fntype;
81170 +
81171 + // insert call to void pax_track_stack(void)
81172 + fntype = build_function_type_list(void_type_node, NULL_TREE);
81173 + fndecl = build_fn_decl(track_function, fntype);
81174 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
81175 + track_stack = gimple_build_call(fndecl, 0);
81176 + gsi_insert_after(&gsi, track_stack, GSI_CONTINUE_LINKING);
81177 +}
81178 +
81179 +#if BUILDING_GCC_VERSION == 4005
81180 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
81181 +{
81182 + tree fndecl;
81183 +
81184 + if (!is_gimple_call(stmt))
81185 + return false;
81186 + fndecl = gimple_call_fndecl(stmt);
81187 + if (!fndecl)
81188 + return false;
81189 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
81190 + return false;
81191 +// print_node(stderr, "pax", fndecl, 4);
81192 + return DECL_FUNCTION_CODE(fndecl) == code;
81193 +}
81194 +#endif
81195 +
81196 +static bool is_alloca(gimple stmt)
81197 +{
81198 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
81199 + return true;
81200 +
81201 +#if BUILDING_GCC_VERSION >= 4007
81202 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
81203 + return true;
81204 +#endif
81205 +
81206 + return false;
81207 +}
81208 +
81209 +static unsigned int execute_stackleak_tree_instrument(void)
81210 +{
81211 + basic_block bb, entry_bb;
81212 + bool prologue_instrumented = false;
81213 +
81214 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
81215 +
81216 + // 1. loop through BBs and GIMPLE statements
81217 + FOR_EACH_BB(bb) {
81218 + gimple_stmt_iterator gsi;
81219 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
81220 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
81221 + if (!is_alloca(gsi_stmt(gsi)))
81222 + continue;
81223 +
81224 + // 2. insert stack overflow check before each __builtin_alloca call
81225 + stackleak_check_alloca(gsi);
81226 +
81227 + // 3. insert track call after each __builtin_alloca call
81228 + stackleak_add_instrumentation(gsi);
81229 + if (bb == entry_bb)
81230 + prologue_instrumented = true;
81231 + }
81232 + }
81233 +
81234 + // 4. insert track call at the beginning
81235 + if (!prologue_instrumented) {
81236 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
81237 + if (dom_info_available_p(CDI_DOMINATORS))
81238 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
81239 + stackleak_add_instrumentation(gsi_start_bb(bb));
81240 + }
81241 +
81242 + return 0;
81243 +}
81244 +
81245 +static unsigned int execute_stackleak_final(void)
81246 +{
81247 + rtx insn;
81248 +
81249 + if (cfun->calls_alloca)
81250 + return 0;
81251 +
81252 + // keep calls only if function frame is big enough
81253 + if (get_frame_size() >= track_frame_size)
81254 + return 0;
81255 +
81256 + // 1. find pax_track_stack calls
81257 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
81258 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
81259 + rtx body;
81260 +
81261 + if (!CALL_P(insn))
81262 + continue;
81263 + body = PATTERN(insn);
81264 + if (GET_CODE(body) != CALL)
81265 + continue;
81266 + body = XEXP(body, 0);
81267 + if (GET_CODE(body) != MEM)
81268 + continue;
81269 + body = XEXP(body, 0);
81270 + if (GET_CODE(body) != SYMBOL_REF)
81271 + continue;
81272 + if (strcmp(XSTR(body, 0), track_function))
81273 + continue;
81274 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
81275 + // 2. delete call
81276 + insn = delete_insn_and_edges(insn);
81277 +#if BUILDING_GCC_VERSION >= 4007
81278 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
81279 + insn = delete_insn_and_edges(insn);
81280 +#endif
81281 + }
81282 +
81283 +// print_simple_rtl(stderr, get_insns());
81284 +// print_rtl(stderr, get_insns());
81285 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
81286 +
81287 + return 0;
81288 +}
81289 +
81290 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
81291 +{
81292 + const char * const plugin_name = plugin_info->base_name;
81293 + const int argc = plugin_info->argc;
81294 + const struct plugin_argument * const argv = plugin_info->argv;
81295 + int i;
81296 + struct register_pass_info stackleak_tree_instrument_pass_info = {
81297 + .pass = &stackleak_tree_instrument_pass.pass,
81298 +// .reference_pass_name = "tree_profile",
81299 + .reference_pass_name = "optimized",
81300 + .ref_pass_instance_number = 0,
81301 + .pos_op = PASS_POS_INSERT_AFTER
81302 + };
81303 + struct register_pass_info stackleak_final_pass_info = {
81304 + .pass = &stackleak_final_rtl_opt_pass.pass,
81305 + .reference_pass_name = "final",
81306 + .ref_pass_instance_number = 0,
81307 + .pos_op = PASS_POS_INSERT_BEFORE
81308 + };
81309 +
81310 + if (!plugin_default_version_check(version, &gcc_version)) {
81311 + error(G_("incompatible gcc/plugin versions"));
81312 + return 1;
81313 + }
81314 +
81315 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
81316 +
81317 + for (i = 0; i < argc; ++i) {
81318 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
81319 + if (!argv[i].value) {
81320 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81321 + continue;
81322 + }
81323 + track_frame_size = atoi(argv[i].value);
81324 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
81325 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
81326 + continue;
81327 + }
81328 + if (!strcmp(argv[i].key, "initialize-locals")) {
81329 + if (argv[i].value) {
81330 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
81331 + continue;
81332 + }
81333 + init_locals = true;
81334 + continue;
81335 + }
81336 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81337 + }
81338 +
81339 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
81340 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
81341 +
81342 + return 0;
81343 +}
81344 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
81345 index 6789d78..4afd019 100644
81346 --- a/tools/perf/util/include/asm/alternative-asm.h
81347 +++ b/tools/perf/util/include/asm/alternative-asm.h
81348 @@ -5,4 +5,7 @@
81349
81350 #define altinstruction_entry #
81351
81352 + .macro pax_force_retaddr rip=0, reload=0
81353 + .endm
81354 +
81355 #endif
81356 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
81357 index af0f22f..9a7d479 100644
81358 --- a/usr/gen_init_cpio.c
81359 +++ b/usr/gen_init_cpio.c
81360 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
81361 int retval;
81362 int rc = -1;
81363 int namesize;
81364 - int i;
81365 + unsigned int i;
81366
81367 mode |= S_IFREG;
81368
81369 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
81370 *env_var = *expanded = '\0';
81371 strncat(env_var, start + 2, end - start - 2);
81372 strncat(expanded, new_location, start - new_location);
81373 - strncat(expanded, getenv(env_var), PATH_MAX);
81374 - strncat(expanded, end + 1, PATH_MAX);
81375 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
81376 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
81377 strncpy(new_location, expanded, PATH_MAX);
81378 + new_location[PATH_MAX] = 0;
81379 } else
81380 break;
81381 }
81382 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
81383 index aefdda3..8e8fbb9 100644
81384 --- a/virt/kvm/kvm_main.c
81385 +++ b/virt/kvm/kvm_main.c
81386 @@ -73,7 +73,7 @@ LIST_HEAD(vm_list);
81387
81388 static cpumask_var_t cpus_hardware_enabled;
81389 static int kvm_usage_count = 0;
81390 -static atomic_t hardware_enable_failed;
81391 +static atomic_unchecked_t hardware_enable_failed;
81392
81393 struct kmem_cache *kvm_vcpu_cache;
81394 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
81395 @@ -2266,7 +2266,7 @@ static void hardware_enable_nolock(void *junk)
81396
81397 if (r) {
81398 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
81399 - atomic_inc(&hardware_enable_failed);
81400 + atomic_inc_unchecked(&hardware_enable_failed);
81401 printk(KERN_INFO "kvm: enabling virtualization on "
81402 "CPU%d failed\n", cpu);
81403 }
81404 @@ -2320,10 +2320,10 @@ static int hardware_enable_all(void)
81405
81406 kvm_usage_count++;
81407 if (kvm_usage_count == 1) {
81408 - atomic_set(&hardware_enable_failed, 0);
81409 + atomic_set_unchecked(&hardware_enable_failed, 0);
81410 on_each_cpu(hardware_enable_nolock, NULL, 1);
81411
81412 - if (atomic_read(&hardware_enable_failed)) {
81413 + if (atomic_read_unchecked(&hardware_enable_failed)) {
81414 hardware_disable_all_nolock();
81415 r = -EBUSY;
81416 }
81417 @@ -2588,7 +2588,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
81418 kvm_arch_vcpu_put(vcpu);
81419 }
81420
81421 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
81422 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
81423 struct module *module)
81424 {
81425 int r;
81426 @@ -2651,7 +2651,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
81427 if (!vcpu_align)
81428 vcpu_align = __alignof__(struct kvm_vcpu);
81429 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
81430 - 0, NULL);
81431 + SLAB_USERCOPY, NULL);
81432 if (!kvm_vcpu_cache) {
81433 r = -ENOMEM;
81434 goto out_free_3;
81435 @@ -2661,9 +2661,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
81436 if (r)
81437 goto out_free;
81438
81439 - kvm_chardev_ops.owner = module;
81440 - kvm_vm_fops.owner = module;
81441 - kvm_vcpu_fops.owner = module;
81442 + pax_open_kernel();
81443 + *(void **)&kvm_chardev_ops.owner = module;
81444 + *(void **)&kvm_vm_fops.owner = module;
81445 + *(void **)&kvm_vcpu_fops.owner = module;
81446 + pax_close_kernel();
81447
81448 r = misc_register(&kvm_dev);
81449 if (r) {