]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-3.1.4-201112041811.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-3.1.4-201112041811.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index dfa6fc6..0095943 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -5,6 +5,7 @@
6 *.cis
7 *.cpio
8 *.csp
9 +*.dbg
10 *.dsp
11 *.dvi
12 *.elf
13 @@ -14,6 +15,7 @@
14 *.gcov
15 *.gen.S
16 *.gif
17 +*.gmo
18 *.grep
19 *.grp
20 *.gz
21 @@ -48,9 +50,11 @@
22 *.tab.h
23 *.tex
24 *.ver
25 +*.vim
26 *.xml
27 *.xz
28 *_MODULES
29 +*_reg_safe.h
30 *_vga16.c
31 *~
32 \#*#
33 @@ -70,6 +74,7 @@ Kerntypes
34 Module.markers
35 Module.symvers
36 PENDING
37 +PERF*
38 SCCS
39 System.map*
40 TAGS
41 @@ -93,19 +98,24 @@ bounds.h
42 bsetup
43 btfixupprep
44 build
45 +builtin-policy.h
46 bvmlinux
47 bzImage*
48 capability_names.h
49 capflags.c
50 classlist.h*
51 +clut_vga16.c
52 +common-cmds.h
53 comp*.log
54 compile.h*
55 conf
56 config
57 config-*
58 config_data.h*
59 +config.c
60 config.mak
61 config.mak.autogen
62 +config.tmp
63 conmakehash
64 consolemap_deftbl.c*
65 cpustr.h
66 @@ -119,6 +129,7 @@ dslm
67 elf2ecoff
68 elfconfig.h*
69 evergreen_reg_safe.h
70 +exception_policy.conf
71 fixdep
72 flask.h
73 fore200e_mkfirm
74 @@ -126,12 +137,15 @@ fore200e_pca_fw.c*
75 gconf
76 gconf.glade.h
77 gen-devlist
78 +gen-kdb_cmds.c
79 gen_crc32table
80 gen_init_cpio
81 generated
82 genheaders
83 genksyms
84 *_gray256.c
85 +hash
86 +hid-example
87 hpet_example
88 hugepage-mmap
89 hugepage-shm
90 @@ -146,7 +160,7 @@ int32.c
91 int4.c
92 int8.c
93 kallsyms
94 -kconfig
95 +kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99 @@ -154,7 +168,6 @@ kxgettext
100 lkc_defs.h
101 lex.c
102 lex.*.c
103 -linux
104 logo_*.c
105 logo_*_clut224.c
106 logo_*_mono.c
107 @@ -166,14 +179,15 @@ machtypes.h
108 map
109 map_hugetlb
110 maui_boot.h
111 -media
112 mconf
113 +mdp
114 miboot*
115 mk_elfconfig
116 mkboot
117 mkbugboot
118 mkcpustr
119 mkdep
120 +mkpiggy
121 mkprep
122 mkregtable
123 mktables
124 @@ -209,6 +223,7 @@ r300_reg_safe.h
125 r420_reg_safe.h
126 r600_reg_safe.h
127 recordmcount
128 +regdb.c
129 relocs
130 rlim_names.h
131 rn50_reg_safe.h
132 @@ -219,6 +234,7 @@ setup
133 setup.bin
134 setup.elf
135 sImage
136 +slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140 @@ -229,6 +245,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144 +user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148 @@ -246,7 +263,9 @@ vmlinux
149 vmlinux-*
150 vmlinux.aout
151 vmlinux.bin.all
152 +vmlinux.bin.bz2
153 vmlinux.lds
154 +vmlinux.relocs
155 vmlinuz
156 voffset.h
157 vsyscall.lds
158 @@ -254,9 +273,11 @@ vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162 +utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168 +zconf.lex.c
169 zoffset.h
170 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
171 index d6e6724..a024ce8 100644
172 --- a/Documentation/kernel-parameters.txt
173 +++ b/Documentation/kernel-parameters.txt
174 @@ -1898,6 +1898,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
175 the specified number of seconds. This is to be used if
176 your oopses keep scrolling off the screen.
177
178 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
179 + virtualization environments that don't cope well with the
180 + expand down segment used by UDEREF on X86-32 or the frequent
181 + page table updates on X86-64.
182 +
183 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
184 +
185 pcbit= [HW,ISDN]
186
187 pcd. [PARIDE]
188 diff --git a/Makefile b/Makefile
189 index 7f8a93b..4435dc9 100644
190 --- a/Makefile
191 +++ b/Makefile
192 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
193
194 HOSTCC = gcc
195 HOSTCXX = g++
196 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
197 -HOSTCXXFLAGS = -O2
198 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
199 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
200 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
201
202 # Decide whether to build built-in, modular, or both.
203 # Normally, just do built-in.
204 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
205 # Rules shared between *config targets and build targets
206
207 # Basic helpers built in scripts/
208 -PHONY += scripts_basic
209 -scripts_basic:
210 +PHONY += scripts_basic gcc-plugins
211 +scripts_basic: gcc-plugins
212 $(Q)$(MAKE) $(build)=scripts/basic
213 $(Q)rm -f .tmp_quiet_recordmcount
214
215 @@ -564,6 +565,42 @@ else
216 KBUILD_CFLAGS += -O2
217 endif
218
219 +ifndef DISABLE_PAX_PLUGINS
220 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
221 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
222 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
223 +endif
224 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
225 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
226 +STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
227 +endif
228 +ifdef CONFIG_KALLOCSTAT_PLUGIN
229 +KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
230 +endif
231 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
232 +KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
233 +KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
234 +endif
235 +ifdef CONFIG_CHECKER_PLUGIN
236 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
237 +CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
238 +endif
239 +endif
240 +GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
241 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
242 +gcc-plugins:
243 + $(Q)$(MAKE) $(build)=tools/gcc
244 +else
245 +gcc-plugins:
246 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
247 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
248 +else
249 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
250 +endif
251 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
252 +endif
253 +endif
254 +
255 include $(srctree)/arch/$(SRCARCH)/Makefile
256
257 ifneq ($(CONFIG_FRAME_WARN),0)
258 @@ -708,7 +745,7 @@ export mod_strip_cmd
259
260
261 ifeq ($(KBUILD_EXTMOD),)
262 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
263 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
264
265 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
266 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
267 @@ -932,6 +969,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
268
269 # The actual objects are generated when descending,
270 # make sure no implicit rule kicks in
271 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
272 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
273
274 # Handle descending into subdirectories listed in $(vmlinux-dirs)
275 @@ -941,7 +979,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
276 # Error messages still appears in the original language
277
278 PHONY += $(vmlinux-dirs)
279 -$(vmlinux-dirs): prepare scripts
280 +$(vmlinux-dirs): gcc-plugins prepare scripts
281 $(Q)$(MAKE) $(build)=$@
282
283 # Store (new) KERNELRELASE string in include/config/kernel.release
284 @@ -986,6 +1024,7 @@ prepare0: archprepare FORCE
285 $(Q)$(MAKE) $(build)=. missing-syscalls
286
287 # All the preparing..
288 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
289 prepare: prepare0
290
291 # Generate some files
292 @@ -1087,6 +1126,7 @@ all: modules
293 # using awk while concatenating to the final file.
294
295 PHONY += modules
296 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
297 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
298 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
299 @$(kecho) ' Building modules, stage 2.';
300 @@ -1102,7 +1142,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
301
302 # Target to prepare building external modules
303 PHONY += modules_prepare
304 -modules_prepare: prepare scripts
305 +modules_prepare: gcc-plugins prepare scripts
306
307 # Target to install modules
308 PHONY += modules_install
309 @@ -1198,7 +1238,7 @@ distclean: mrproper
310 @find $(srctree) $(RCS_FIND_IGNORE) \
311 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
312 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
313 - -o -name '.*.rej' -o -size 0 \
314 + -o -name '.*.rej' -o -name '*.so' -o -size 0 \
315 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
316 -type f -print | xargs rm -f
317
318 @@ -1360,6 +1400,7 @@ PHONY += $(module-dirs) modules
319 $(module-dirs): crmodverdir $(objtree)/Module.symvers
320 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
321
322 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
323 modules: $(module-dirs)
324 @$(kecho) ' Building modules, stage 2.';
325 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
326 @@ -1486,17 +1527,19 @@ else
327 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
328 endif
329
330 -%.s: %.c prepare scripts FORCE
331 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
332 +%.s: %.c gcc-plugins prepare scripts FORCE
333 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
334 %.i: %.c prepare scripts FORCE
335 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
336 -%.o: %.c prepare scripts FORCE
337 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
338 +%.o: %.c gcc-plugins prepare scripts FORCE
339 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
340 %.lst: %.c prepare scripts FORCE
341 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
342 -%.s: %.S prepare scripts FORCE
343 +%.s: %.S gcc-plugins prepare scripts FORCE
344 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
345 -%.o: %.S prepare scripts FORCE
346 +%.o: %.S gcc-plugins prepare scripts FORCE
347 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
348 %.symtypes: %.c prepare scripts FORCE
349 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
350 @@ -1506,11 +1549,13 @@ endif
351 $(cmd_crmodverdir)
352 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
353 $(build)=$(build-dir)
354 -%/: prepare scripts FORCE
355 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
356 +%/: gcc-plugins prepare scripts FORCE
357 $(cmd_crmodverdir)
358 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
359 $(build)=$(build-dir)
360 -%.ko: prepare scripts FORCE
361 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
362 +%.ko: gcc-plugins prepare scripts FORCE
363 $(cmd_crmodverdir)
364 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
365 $(build)=$(build-dir) $(@:.ko=.o)
366 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
367 index da5449e..7418343 100644
368 --- a/arch/alpha/include/asm/elf.h
369 +++ b/arch/alpha/include/asm/elf.h
370 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
371
372 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
373
374 +#ifdef CONFIG_PAX_ASLR
375 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
376 +
377 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
378 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
379 +#endif
380 +
381 /* $0 is set by ld.so to a pointer to a function which might be
382 registered using atexit. This provides a mean for the dynamic
383 linker to call DT_FINI functions for shared libraries that have
384 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
385 index de98a73..bd4f1f8 100644
386 --- a/arch/alpha/include/asm/pgtable.h
387 +++ b/arch/alpha/include/asm/pgtable.h
388 @@ -101,6 +101,17 @@ struct vm_area_struct;
389 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
390 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
391 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
392 +
393 +#ifdef CONFIG_PAX_PAGEEXEC
394 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
395 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
396 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
397 +#else
398 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
399 +# define PAGE_COPY_NOEXEC PAGE_COPY
400 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
401 +#endif
402 +
403 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
404
405 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
406 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
407 index 2fd00b7..cfd5069 100644
408 --- a/arch/alpha/kernel/module.c
409 +++ b/arch/alpha/kernel/module.c
410 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
411
412 /* The small sections were sorted to the end of the segment.
413 The following should definitely cover them. */
414 - gp = (u64)me->module_core + me->core_size - 0x8000;
415 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
416 got = sechdrs[me->arch.gotsecindex].sh_addr;
417
418 for (i = 0; i < n; i++) {
419 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
420 index 01e8715..be0e80f 100644
421 --- a/arch/alpha/kernel/osf_sys.c
422 +++ b/arch/alpha/kernel/osf_sys.c
423 @@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
424 /* At this point: (!vma || addr < vma->vm_end). */
425 if (limit - len < addr)
426 return -ENOMEM;
427 - if (!vma || addr + len <= vma->vm_start)
428 + if (check_heap_stack_gap(vma, addr, len))
429 return addr;
430 addr = vma->vm_end;
431 vma = vma->vm_next;
432 @@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
433 merely specific addresses, but regions of memory -- perhaps
434 this feature should be incorporated into all ports? */
435
436 +#ifdef CONFIG_PAX_RANDMMAP
437 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
438 +#endif
439 +
440 if (addr) {
441 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
442 if (addr != (unsigned long) -ENOMEM)
443 @@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
444 }
445
446 /* Next, try allocating at TASK_UNMAPPED_BASE. */
447 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
448 - len, limit);
449 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
450 +
451 if (addr != (unsigned long) -ENOMEM)
452 return addr;
453
454 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
455 index fadd5f8..904e73a 100644
456 --- a/arch/alpha/mm/fault.c
457 +++ b/arch/alpha/mm/fault.c
458 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
459 __reload_thread(pcb);
460 }
461
462 +#ifdef CONFIG_PAX_PAGEEXEC
463 +/*
464 + * PaX: decide what to do with offenders (regs->pc = fault address)
465 + *
466 + * returns 1 when task should be killed
467 + * 2 when patched PLT trampoline was detected
468 + * 3 when unpatched PLT trampoline was detected
469 + */
470 +static int pax_handle_fetch_fault(struct pt_regs *regs)
471 +{
472 +
473 +#ifdef CONFIG_PAX_EMUPLT
474 + int err;
475 +
476 + do { /* PaX: patched PLT emulation #1 */
477 + unsigned int ldah, ldq, jmp;
478 +
479 + err = get_user(ldah, (unsigned int *)regs->pc);
480 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
481 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
482 +
483 + if (err)
484 + break;
485 +
486 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
487 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
488 + jmp == 0x6BFB0000U)
489 + {
490 + unsigned long r27, addr;
491 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
492 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
493 +
494 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
495 + err = get_user(r27, (unsigned long *)addr);
496 + if (err)
497 + break;
498 +
499 + regs->r27 = r27;
500 + regs->pc = r27;
501 + return 2;
502 + }
503 + } while (0);
504 +
505 + do { /* PaX: patched PLT emulation #2 */
506 + unsigned int ldah, lda, br;
507 +
508 + err = get_user(ldah, (unsigned int *)regs->pc);
509 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
510 + err |= get_user(br, (unsigned int *)(regs->pc+8));
511 +
512 + if (err)
513 + break;
514 +
515 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
516 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
517 + (br & 0xFFE00000U) == 0xC3E00000U)
518 + {
519 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
520 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
521 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
522 +
523 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
524 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
525 + return 2;
526 + }
527 + } while (0);
528 +
529 + do { /* PaX: unpatched PLT emulation */
530 + unsigned int br;
531 +
532 + err = get_user(br, (unsigned int *)regs->pc);
533 +
534 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
535 + unsigned int br2, ldq, nop, jmp;
536 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
537 +
538 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
539 + err = get_user(br2, (unsigned int *)addr);
540 + err |= get_user(ldq, (unsigned int *)(addr+4));
541 + err |= get_user(nop, (unsigned int *)(addr+8));
542 + err |= get_user(jmp, (unsigned int *)(addr+12));
543 + err |= get_user(resolver, (unsigned long *)(addr+16));
544 +
545 + if (err)
546 + break;
547 +
548 + if (br2 == 0xC3600000U &&
549 + ldq == 0xA77B000CU &&
550 + nop == 0x47FF041FU &&
551 + jmp == 0x6B7B0000U)
552 + {
553 + regs->r28 = regs->pc+4;
554 + regs->r27 = addr+16;
555 + regs->pc = resolver;
556 + return 3;
557 + }
558 + }
559 + } while (0);
560 +#endif
561 +
562 + return 1;
563 +}
564 +
565 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
566 +{
567 + unsigned long i;
568 +
569 + printk(KERN_ERR "PAX: bytes at PC: ");
570 + for (i = 0; i < 5; i++) {
571 + unsigned int c;
572 + if (get_user(c, (unsigned int *)pc+i))
573 + printk(KERN_CONT "???????? ");
574 + else
575 + printk(KERN_CONT "%08x ", c);
576 + }
577 + printk("\n");
578 +}
579 +#endif
580
581 /*
582 * This routine handles page faults. It determines the address,
583 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
584 good_area:
585 si_code = SEGV_ACCERR;
586 if (cause < 0) {
587 - if (!(vma->vm_flags & VM_EXEC))
588 + if (!(vma->vm_flags & VM_EXEC)) {
589 +
590 +#ifdef CONFIG_PAX_PAGEEXEC
591 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
592 + goto bad_area;
593 +
594 + up_read(&mm->mmap_sem);
595 + switch (pax_handle_fetch_fault(regs)) {
596 +
597 +#ifdef CONFIG_PAX_EMUPLT
598 + case 2:
599 + case 3:
600 + return;
601 +#endif
602 +
603 + }
604 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
605 + do_group_exit(SIGKILL);
606 +#else
607 goto bad_area;
608 +#endif
609 +
610 + }
611 } else if (!cause) {
612 /* Allow reads even for write-only mappings */
613 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
614 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
615 index 0e9ce8d..6ef1e03 100644
616 --- a/arch/arm/include/asm/elf.h
617 +++ b/arch/arm/include/asm/elf.h
618 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
619 the loader. We need to make sure that it is out of the way of the program
620 that it will "exec", and that there is sufficient room for the brk. */
621
622 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
623 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
624 +
625 +#ifdef CONFIG_PAX_ASLR
626 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
627 +
628 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
629 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
630 +#endif
631
632 /* When the program starts, a1 contains a pointer to a function to be
633 registered with atexit, as per the SVR4 ABI. A value of 0 means we
634 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
635 extern void elf_set_personality(const struct elf32_hdr *);
636 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
637
638 -struct mm_struct;
639 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
640 -#define arch_randomize_brk arch_randomize_brk
641 -
642 extern int vectors_user_mapping(void);
643 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
644 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
645 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
646 index e51b1e8..32a3113 100644
647 --- a/arch/arm/include/asm/kmap_types.h
648 +++ b/arch/arm/include/asm/kmap_types.h
649 @@ -21,6 +21,7 @@ enum km_type {
650 KM_L1_CACHE,
651 KM_L2_CACHE,
652 KM_KDB,
653 + KM_CLEARPAGE,
654 KM_TYPE_NR
655 };
656
657 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
658 index b293616..96310e5 100644
659 --- a/arch/arm/include/asm/uaccess.h
660 +++ b/arch/arm/include/asm/uaccess.h
661 @@ -22,6 +22,8 @@
662 #define VERIFY_READ 0
663 #define VERIFY_WRITE 1
664
665 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
666 +
667 /*
668 * The exception table consists of pairs of addresses: the first is the
669 * address of an instruction that is allowed to fault, and the second is
670 @@ -387,8 +389,23 @@ do { \
671
672
673 #ifdef CONFIG_MMU
674 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
675 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
676 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
677 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
678 +
679 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
680 +{
681 + if (!__builtin_constant_p(n))
682 + check_object_size(to, n, false);
683 + return ___copy_from_user(to, from, n);
684 +}
685 +
686 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
687 +{
688 + if (!__builtin_constant_p(n))
689 + check_object_size(from, n, true);
690 + return ___copy_to_user(to, from, n);
691 +}
692 +
693 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
694 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
695 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
696 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
697
698 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
699 {
700 + if ((long)n < 0)
701 + return n;
702 +
703 if (access_ok(VERIFY_READ, from, n))
704 n = __copy_from_user(to, from, n);
705 else /* security hole - plug it */
706 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
707
708 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
709 {
710 + if ((long)n < 0)
711 + return n;
712 +
713 if (access_ok(VERIFY_WRITE, to, n))
714 n = __copy_to_user(to, from, n);
715 return n;
716 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
717 index aeef960..2966009 100644
718 --- a/arch/arm/kernel/armksyms.c
719 +++ b/arch/arm/kernel/armksyms.c
720 @@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
721 #ifdef CONFIG_MMU
722 EXPORT_SYMBOL(copy_page);
723
724 -EXPORT_SYMBOL(__copy_from_user);
725 -EXPORT_SYMBOL(__copy_to_user);
726 +EXPORT_SYMBOL(___copy_from_user);
727 +EXPORT_SYMBOL(___copy_to_user);
728 EXPORT_SYMBOL(__clear_user);
729
730 EXPORT_SYMBOL(__get_user_1);
731 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
732 index 1a347f4..8b4c8a1 100644
733 --- a/arch/arm/kernel/process.c
734 +++ b/arch/arm/kernel/process.c
735 @@ -28,7 +28,6 @@
736 #include <linux/tick.h>
737 #include <linux/utsname.h>
738 #include <linux/uaccess.h>
739 -#include <linux/random.h>
740 #include <linux/hw_breakpoint.h>
741 #include <linux/cpuidle.h>
742
743 @@ -481,12 +480,6 @@ unsigned long get_wchan(struct task_struct *p)
744 return 0;
745 }
746
747 -unsigned long arch_randomize_brk(struct mm_struct *mm)
748 -{
749 - unsigned long range_end = mm->brk + 0x02000000;
750 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
751 -}
752 -
753 #ifdef CONFIG_MMU
754 /*
755 * The vectors page is always readable from user space for the
756 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
757 index bc9f9da..c75d826 100644
758 --- a/arch/arm/kernel/traps.c
759 +++ b/arch/arm/kernel/traps.c
760 @@ -257,6 +257,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
761
762 static DEFINE_SPINLOCK(die_lock);
763
764 +extern void gr_handle_kernel_exploit(void);
765 +
766 /*
767 * This function is protected against re-entrancy.
768 */
769 @@ -284,6 +286,9 @@ void die(const char *str, struct pt_regs *regs, int err)
770 panic("Fatal exception in interrupt");
771 if (panic_on_oops)
772 panic("Fatal exception");
773 +
774 + gr_handle_kernel_exploit();
775 +
776 if (ret != NOTIFY_STOP)
777 do_exit(SIGSEGV);
778 }
779 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
780 index 66a477a..bee61d3 100644
781 --- a/arch/arm/lib/copy_from_user.S
782 +++ b/arch/arm/lib/copy_from_user.S
783 @@ -16,7 +16,7 @@
784 /*
785 * Prototype:
786 *
787 - * size_t __copy_from_user(void *to, const void *from, size_t n)
788 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
789 *
790 * Purpose:
791 *
792 @@ -84,11 +84,11 @@
793
794 .text
795
796 -ENTRY(__copy_from_user)
797 +ENTRY(___copy_from_user)
798
799 #include "copy_template.S"
800
801 -ENDPROC(__copy_from_user)
802 +ENDPROC(___copy_from_user)
803
804 .pushsection .fixup,"ax"
805 .align 0
806 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
807 index d066df6..df28194 100644
808 --- a/arch/arm/lib/copy_to_user.S
809 +++ b/arch/arm/lib/copy_to_user.S
810 @@ -16,7 +16,7 @@
811 /*
812 * Prototype:
813 *
814 - * size_t __copy_to_user(void *to, const void *from, size_t n)
815 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
816 *
817 * Purpose:
818 *
819 @@ -88,11 +88,11 @@
820 .text
821
822 ENTRY(__copy_to_user_std)
823 -WEAK(__copy_to_user)
824 +WEAK(___copy_to_user)
825
826 #include "copy_template.S"
827
828 -ENDPROC(__copy_to_user)
829 +ENDPROC(___copy_to_user)
830 ENDPROC(__copy_to_user_std)
831
832 .pushsection .fixup,"ax"
833 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
834 index d0ece2a..5ae2f39 100644
835 --- a/arch/arm/lib/uaccess.S
836 +++ b/arch/arm/lib/uaccess.S
837 @@ -20,7 +20,7 @@
838
839 #define PAGE_SHIFT 12
840
841 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
842 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
843 * Purpose : copy a block to user memory from kernel memory
844 * Params : to - user memory
845 * : from - kernel memory
846 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May fault
847 sub r2, r2, ip
848 b .Lc2u_dest_aligned
849
850 -ENTRY(__copy_to_user)
851 +ENTRY(___copy_to_user)
852 stmfd sp!, {r2, r4 - r7, lr}
853 cmp r2, #4
854 blt .Lc2u_not_enough
855 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May fault
856 ldrgtb r3, [r1], #0
857 USER( T(strgtb) r3, [r0], #1) @ May fault
858 b .Lc2u_finished
859 -ENDPROC(__copy_to_user)
860 +ENDPROC(___copy_to_user)
861
862 .pushsection .fixup,"ax"
863 .align 0
864 9001: ldmfd sp!, {r0, r4 - r7, pc}
865 .popsection
866
867 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
868 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
869 * Purpose : copy a block from user memory to kernel memory
870 * Params : to - kernel memory
871 * : from - user memory
872 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May fault
873 sub r2, r2, ip
874 b .Lcfu_dest_aligned
875
876 -ENTRY(__copy_from_user)
877 +ENTRY(___copy_from_user)
878 stmfd sp!, {r0, r2, r4 - r7, lr}
879 cmp r2, #4
880 blt .Lcfu_not_enough
881 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May fault
882 USER( T(ldrgtb) r3, [r1], #1) @ May fault
883 strgtb r3, [r0], #1
884 b .Lcfu_finished
885 -ENDPROC(__copy_from_user)
886 +ENDPROC(___copy_from_user)
887
888 .pushsection .fixup,"ax"
889 .align 0
890 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
891 index 8b9b136..70d5100 100644
892 --- a/arch/arm/lib/uaccess_with_memcpy.c
893 +++ b/arch/arm/lib/uaccess_with_memcpy.c
894 @@ -103,7 +103,7 @@ out:
895 }
896
897 unsigned long
898 -__copy_to_user(void __user *to, const void *from, unsigned long n)
899 +___copy_to_user(void __user *to, const void *from, unsigned long n)
900 {
901 /*
902 * This test is stubbed out of the main function above to keep
903 diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
904 index 2b2d51c..0127490 100644
905 --- a/arch/arm/mach-ux500/mbox-db5500.c
906 +++ b/arch/arm/mach-ux500/mbox-db5500.c
907 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
908 return sprintf(buf, "0x%X\n", mbox_value);
909 }
910
911 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
912 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
913
914 static int mbox_show(struct seq_file *s, void *data)
915 {
916 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
917 index 3b5ea68..42fc9af 100644
918 --- a/arch/arm/mm/fault.c
919 +++ b/arch/arm/mm/fault.c
920 @@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
921 }
922 #endif
923
924 +#ifdef CONFIG_PAX_PAGEEXEC
925 + if (fsr & FSR_LNX_PF) {
926 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
927 + do_group_exit(SIGKILL);
928 + }
929 +#endif
930 +
931 tsk->thread.address = addr;
932 tsk->thread.error_code = fsr;
933 tsk->thread.trap_no = 14;
934 @@ -383,6 +390,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
935 }
936 #endif /* CONFIG_MMU */
937
938 +#ifdef CONFIG_PAX_PAGEEXEC
939 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
940 +{
941 + long i;
942 +
943 + printk(KERN_ERR "PAX: bytes at PC: ");
944 + for (i = 0; i < 20; i++) {
945 + unsigned char c;
946 + if (get_user(c, (__force unsigned char __user *)pc+i))
947 + printk(KERN_CONT "?? ");
948 + else
949 + printk(KERN_CONT "%02x ", c);
950 + }
951 + printk("\n");
952 +
953 + printk(KERN_ERR "PAX: bytes at SP-4: ");
954 + for (i = -1; i < 20; i++) {
955 + unsigned long c;
956 + if (get_user(c, (__force unsigned long __user *)sp+i))
957 + printk(KERN_CONT "???????? ");
958 + else
959 + printk(KERN_CONT "%08lx ", c);
960 + }
961 + printk("\n");
962 +}
963 +#endif
964 +
965 /*
966 * First Level Translation Fault Handler
967 *
968 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
969 index 74be05f..f605b8c 100644
970 --- a/arch/arm/mm/mmap.c
971 +++ b/arch/arm/mm/mmap.c
972 @@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
973 if (len > TASK_SIZE)
974 return -ENOMEM;
975
976 +#ifdef CONFIG_PAX_RANDMMAP
977 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
978 +#endif
979 +
980 if (addr) {
981 if (do_align)
982 addr = COLOUR_ALIGN(addr, pgoff);
983 @@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
984 addr = PAGE_ALIGN(addr);
985
986 vma = find_vma(mm, addr);
987 - if (TASK_SIZE - len >= addr &&
988 - (!vma || addr + len <= vma->vm_start))
989 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
990 return addr;
991 }
992 if (len > mm->cached_hole_size) {
993 - start_addr = addr = mm->free_area_cache;
994 + start_addr = addr = mm->free_area_cache;
995 } else {
996 - start_addr = addr = TASK_UNMAPPED_BASE;
997 - mm->cached_hole_size = 0;
998 + start_addr = addr = mm->mmap_base;
999 + mm->cached_hole_size = 0;
1000 }
1001 /* 8 bits of randomness in 20 address space bits */
1002 if ((current->flags & PF_RANDOMIZE) &&
1003 @@ -100,14 +103,14 @@ full_search:
1004 * Start a new search - just in case we missed
1005 * some holes.
1006 */
1007 - if (start_addr != TASK_UNMAPPED_BASE) {
1008 - start_addr = addr = TASK_UNMAPPED_BASE;
1009 + if (start_addr != mm->mmap_base) {
1010 + start_addr = addr = mm->mmap_base;
1011 mm->cached_hole_size = 0;
1012 goto full_search;
1013 }
1014 return -ENOMEM;
1015 }
1016 - if (!vma || addr + len <= vma->vm_start) {
1017 + if (check_heap_stack_gap(vma, addr, len)) {
1018 /*
1019 * Remember the place where we stopped the search:
1020 */
1021 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1022 index 3b3159b..425ea94 100644
1023 --- a/arch/avr32/include/asm/elf.h
1024 +++ b/arch/avr32/include/asm/elf.h
1025 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1026 the loader. We need to make sure that it is out of the way of the program
1027 that it will "exec", and that there is sufficient room for the brk. */
1028
1029 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1030 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1031
1032 +#ifdef CONFIG_PAX_ASLR
1033 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1034 +
1035 +#define PAX_DELTA_MMAP_LEN 15
1036 +#define PAX_DELTA_STACK_LEN 15
1037 +#endif
1038
1039 /* This yields a mask that user programs can use to figure out what
1040 instruction set this CPU supports. This could be done in user space,
1041 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1042 index b7f5c68..556135c 100644
1043 --- a/arch/avr32/include/asm/kmap_types.h
1044 +++ b/arch/avr32/include/asm/kmap_types.h
1045 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1046 D(11) KM_IRQ1,
1047 D(12) KM_SOFTIRQ0,
1048 D(13) KM_SOFTIRQ1,
1049 -D(14) KM_TYPE_NR
1050 +D(14) KM_CLEARPAGE,
1051 +D(15) KM_TYPE_NR
1052 };
1053
1054 #undef D
1055 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1056 index f7040a1..db9f300 100644
1057 --- a/arch/avr32/mm/fault.c
1058 +++ b/arch/avr32/mm/fault.c
1059 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1060
1061 int exception_trace = 1;
1062
1063 +#ifdef CONFIG_PAX_PAGEEXEC
1064 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1065 +{
1066 + unsigned long i;
1067 +
1068 + printk(KERN_ERR "PAX: bytes at PC: ");
1069 + for (i = 0; i < 20; i++) {
1070 + unsigned char c;
1071 + if (get_user(c, (unsigned char *)pc+i))
1072 + printk(KERN_CONT "???????? ");
1073 + else
1074 + printk(KERN_CONT "%02x ", c);
1075 + }
1076 + printk("\n");
1077 +}
1078 +#endif
1079 +
1080 /*
1081 * This routine handles page faults. It determines the address and the
1082 * problem, and then passes it off to one of the appropriate routines.
1083 @@ -156,6 +173,16 @@ bad_area:
1084 up_read(&mm->mmap_sem);
1085
1086 if (user_mode(regs)) {
1087 +
1088 +#ifdef CONFIG_PAX_PAGEEXEC
1089 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1090 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1091 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1092 + do_group_exit(SIGKILL);
1093 + }
1094 + }
1095 +#endif
1096 +
1097 if (exception_trace && printk_ratelimit())
1098 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1099 "sp %08lx ecr %lu\n",
1100 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1101 index f8e16b2..c73ff79 100644
1102 --- a/arch/frv/include/asm/kmap_types.h
1103 +++ b/arch/frv/include/asm/kmap_types.h
1104 @@ -23,6 +23,7 @@ enum km_type {
1105 KM_IRQ1,
1106 KM_SOFTIRQ0,
1107 KM_SOFTIRQ1,
1108 + KM_CLEARPAGE,
1109 KM_TYPE_NR
1110 };
1111
1112 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1113 index 385fd30..6c3d97e 100644
1114 --- a/arch/frv/mm/elf-fdpic.c
1115 +++ b/arch/frv/mm/elf-fdpic.c
1116 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1117 if (addr) {
1118 addr = PAGE_ALIGN(addr);
1119 vma = find_vma(current->mm, addr);
1120 - if (TASK_SIZE - len >= addr &&
1121 - (!vma || addr + len <= vma->vm_start))
1122 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1123 goto success;
1124 }
1125
1126 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1127 for (; vma; vma = vma->vm_next) {
1128 if (addr > limit)
1129 break;
1130 - if (addr + len <= vma->vm_start)
1131 + if (check_heap_stack_gap(vma, addr, len))
1132 goto success;
1133 addr = vma->vm_end;
1134 }
1135 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1136 for (; vma; vma = vma->vm_next) {
1137 if (addr > limit)
1138 break;
1139 - if (addr + len <= vma->vm_start)
1140 + if (check_heap_stack_gap(vma, addr, len))
1141 goto success;
1142 addr = vma->vm_end;
1143 }
1144 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1145 index b5298eb..67c6e62 100644
1146 --- a/arch/ia64/include/asm/elf.h
1147 +++ b/arch/ia64/include/asm/elf.h
1148 @@ -42,6 +42,13 @@
1149 */
1150 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1151
1152 +#ifdef CONFIG_PAX_ASLR
1153 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1154 +
1155 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1156 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1157 +#endif
1158 +
1159 #define PT_IA_64_UNWIND 0x70000001
1160
1161 /* IA-64 relocations: */
1162 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1163 index 1a97af3..7529d31 100644
1164 --- a/arch/ia64/include/asm/pgtable.h
1165 +++ b/arch/ia64/include/asm/pgtable.h
1166 @@ -12,7 +12,7 @@
1167 * David Mosberger-Tang <davidm@hpl.hp.com>
1168 */
1169
1170 -
1171 +#include <linux/const.h>
1172 #include <asm/mman.h>
1173 #include <asm/page.h>
1174 #include <asm/processor.h>
1175 @@ -143,6 +143,17 @@
1176 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1177 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1178 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1179 +
1180 +#ifdef CONFIG_PAX_PAGEEXEC
1181 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1182 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1183 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1184 +#else
1185 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1186 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1187 +# define PAGE_COPY_NOEXEC PAGE_COPY
1188 +#endif
1189 +
1190 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1191 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1192 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1193 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1194 index b77768d..e0795eb 100644
1195 --- a/arch/ia64/include/asm/spinlock.h
1196 +++ b/arch/ia64/include/asm/spinlock.h
1197 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
1198 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1199
1200 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1201 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1202 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1203 }
1204
1205 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
1206 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1207 index 449c8c0..432a3d2 100644
1208 --- a/arch/ia64/include/asm/uaccess.h
1209 +++ b/arch/ia64/include/asm/uaccess.h
1210 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1211 const void *__cu_from = (from); \
1212 long __cu_len = (n); \
1213 \
1214 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
1215 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1216 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1217 __cu_len; \
1218 })
1219 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1220 long __cu_len = (n); \
1221 \
1222 __chk_user_ptr(__cu_from); \
1223 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
1224 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1225 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1226 __cu_len; \
1227 })
1228 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1229 index 24603be..948052d 100644
1230 --- a/arch/ia64/kernel/module.c
1231 +++ b/arch/ia64/kernel/module.c
1232 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
1233 void
1234 module_free (struct module *mod, void *module_region)
1235 {
1236 - if (mod && mod->arch.init_unw_table &&
1237 - module_region == mod->module_init) {
1238 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1239 unw_remove_unwind_table(mod->arch.init_unw_table);
1240 mod->arch.init_unw_table = NULL;
1241 }
1242 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1243 }
1244
1245 static inline int
1246 +in_init_rx (const struct module *mod, uint64_t addr)
1247 +{
1248 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1249 +}
1250 +
1251 +static inline int
1252 +in_init_rw (const struct module *mod, uint64_t addr)
1253 +{
1254 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1255 +}
1256 +
1257 +static inline int
1258 in_init (const struct module *mod, uint64_t addr)
1259 {
1260 - return addr - (uint64_t) mod->module_init < mod->init_size;
1261 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1262 +}
1263 +
1264 +static inline int
1265 +in_core_rx (const struct module *mod, uint64_t addr)
1266 +{
1267 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1268 +}
1269 +
1270 +static inline int
1271 +in_core_rw (const struct module *mod, uint64_t addr)
1272 +{
1273 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1274 }
1275
1276 static inline int
1277 in_core (const struct module *mod, uint64_t addr)
1278 {
1279 - return addr - (uint64_t) mod->module_core < mod->core_size;
1280 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1281 }
1282
1283 static inline int
1284 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1285 break;
1286
1287 case RV_BDREL:
1288 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1289 + if (in_init_rx(mod, val))
1290 + val -= (uint64_t) mod->module_init_rx;
1291 + else if (in_init_rw(mod, val))
1292 + val -= (uint64_t) mod->module_init_rw;
1293 + else if (in_core_rx(mod, val))
1294 + val -= (uint64_t) mod->module_core_rx;
1295 + else if (in_core_rw(mod, val))
1296 + val -= (uint64_t) mod->module_core_rw;
1297 break;
1298
1299 case RV_LTV:
1300 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1301 * addresses have been selected...
1302 */
1303 uint64_t gp;
1304 - if (mod->core_size > MAX_LTOFF)
1305 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1306 /*
1307 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1308 * at the end of the module.
1309 */
1310 - gp = mod->core_size - MAX_LTOFF / 2;
1311 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1312 else
1313 - gp = mod->core_size / 2;
1314 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1315 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1316 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1317 mod->arch.gp = gp;
1318 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1319 }
1320 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1321 index 609d500..7dde2a8 100644
1322 --- a/arch/ia64/kernel/sys_ia64.c
1323 +++ b/arch/ia64/kernel/sys_ia64.c
1324 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1325 if (REGION_NUMBER(addr) == RGN_HPAGE)
1326 addr = 0;
1327 #endif
1328 +
1329 +#ifdef CONFIG_PAX_RANDMMAP
1330 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1331 + addr = mm->free_area_cache;
1332 + else
1333 +#endif
1334 +
1335 if (!addr)
1336 addr = mm->free_area_cache;
1337
1338 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1339 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1340 /* At this point: (!vma || addr < vma->vm_end). */
1341 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1342 - if (start_addr != TASK_UNMAPPED_BASE) {
1343 + if (start_addr != mm->mmap_base) {
1344 /* Start a new search --- just in case we missed some holes. */
1345 - addr = TASK_UNMAPPED_BASE;
1346 + addr = mm->mmap_base;
1347 goto full_search;
1348 }
1349 return -ENOMEM;
1350 }
1351 - if (!vma || addr + len <= vma->vm_start) {
1352 + if (check_heap_stack_gap(vma, addr, len)) {
1353 /* Remember the address where we stopped this search: */
1354 mm->free_area_cache = addr + len;
1355 return addr;
1356 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1357 index 53c0ba0..2accdde 100644
1358 --- a/arch/ia64/kernel/vmlinux.lds.S
1359 +++ b/arch/ia64/kernel/vmlinux.lds.S
1360 @@ -199,7 +199,7 @@ SECTIONS {
1361 /* Per-cpu data: */
1362 . = ALIGN(PERCPU_PAGE_SIZE);
1363 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
1364 - __phys_per_cpu_start = __per_cpu_load;
1365 + __phys_per_cpu_start = per_cpu_load;
1366 /*
1367 * ensure percpu data fits
1368 * into percpu page size
1369 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1370 index 20b3593..1ce77f0 100644
1371 --- a/arch/ia64/mm/fault.c
1372 +++ b/arch/ia64/mm/fault.c
1373 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
1374 return pte_present(pte);
1375 }
1376
1377 +#ifdef CONFIG_PAX_PAGEEXEC
1378 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1379 +{
1380 + unsigned long i;
1381 +
1382 + printk(KERN_ERR "PAX: bytes at PC: ");
1383 + for (i = 0; i < 8; i++) {
1384 + unsigned int c;
1385 + if (get_user(c, (unsigned int *)pc+i))
1386 + printk(KERN_CONT "???????? ");
1387 + else
1388 + printk(KERN_CONT "%08x ", c);
1389 + }
1390 + printk("\n");
1391 +}
1392 +#endif
1393 +
1394 void __kprobes
1395 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1396 {
1397 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1398 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1399 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1400
1401 - if ((vma->vm_flags & mask) != mask)
1402 + if ((vma->vm_flags & mask) != mask) {
1403 +
1404 +#ifdef CONFIG_PAX_PAGEEXEC
1405 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1406 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1407 + goto bad_area;
1408 +
1409 + up_read(&mm->mmap_sem);
1410 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1411 + do_group_exit(SIGKILL);
1412 + }
1413 +#endif
1414 +
1415 goto bad_area;
1416
1417 + }
1418 +
1419 /*
1420 * If for any reason at all we couldn't handle the fault, make
1421 * sure we exit gracefully rather than endlessly redo the
1422 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1423 index 5ca674b..e0e1b70 100644
1424 --- a/arch/ia64/mm/hugetlbpage.c
1425 +++ b/arch/ia64/mm/hugetlbpage.c
1426 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1427 /* At this point: (!vmm || addr < vmm->vm_end). */
1428 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1429 return -ENOMEM;
1430 - if (!vmm || (addr + len) <= vmm->vm_start)
1431 + if (check_heap_stack_gap(vmm, addr, len))
1432 return addr;
1433 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1434 }
1435 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1436 index 00cb0e2..2ad8024 100644
1437 --- a/arch/ia64/mm/init.c
1438 +++ b/arch/ia64/mm/init.c
1439 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
1440 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1441 vma->vm_end = vma->vm_start + PAGE_SIZE;
1442 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1443 +
1444 +#ifdef CONFIG_PAX_PAGEEXEC
1445 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1446 + vma->vm_flags &= ~VM_EXEC;
1447 +
1448 +#ifdef CONFIG_PAX_MPROTECT
1449 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1450 + vma->vm_flags &= ~VM_MAYEXEC;
1451 +#endif
1452 +
1453 + }
1454 +#endif
1455 +
1456 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1457 down_write(&current->mm->mmap_sem);
1458 if (insert_vm_struct(current->mm, vma)) {
1459 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1460 index 82abd15..d95ae5d 100644
1461 --- a/arch/m32r/lib/usercopy.c
1462 +++ b/arch/m32r/lib/usercopy.c
1463 @@ -14,6 +14,9 @@
1464 unsigned long
1465 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1466 {
1467 + if ((long)n < 0)
1468 + return n;
1469 +
1470 prefetch(from);
1471 if (access_ok(VERIFY_WRITE, to, n))
1472 __copy_user(to,from,n);
1473 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1474 unsigned long
1475 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1476 {
1477 + if ((long)n < 0)
1478 + return n;
1479 +
1480 prefetchw(to);
1481 if (access_ok(VERIFY_READ, from, n))
1482 __copy_user_zeroing(to,from,n);
1483 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1484 index 455c0ac..ad65fbe 100644
1485 --- a/arch/mips/include/asm/elf.h
1486 +++ b/arch/mips/include/asm/elf.h
1487 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1488 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1489 #endif
1490
1491 +#ifdef CONFIG_PAX_ASLR
1492 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1493 +
1494 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1495 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1496 +#endif
1497 +
1498 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1499 struct linux_binprm;
1500 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1501 int uses_interp);
1502
1503 -struct mm_struct;
1504 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1505 -#define arch_randomize_brk arch_randomize_brk
1506 -
1507 #endif /* _ASM_ELF_H */
1508 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1509 index e59cd1a..8e329d6 100644
1510 --- a/arch/mips/include/asm/page.h
1511 +++ b/arch/mips/include/asm/page.h
1512 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1513 #ifdef CONFIG_CPU_MIPS32
1514 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1515 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1516 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1517 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1518 #else
1519 typedef struct { unsigned long long pte; } pte_t;
1520 #define pte_val(x) ((x).pte)
1521 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1522 index 6018c80..7c37203 100644
1523 --- a/arch/mips/include/asm/system.h
1524 +++ b/arch/mips/include/asm/system.h
1525 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1526 */
1527 #define __ARCH_WANT_UNLOCKED_CTXSW
1528
1529 -extern unsigned long arch_align_stack(unsigned long sp);
1530 +#define arch_align_stack(x) ((x) & ~0xfUL)
1531
1532 #endif /* _ASM_SYSTEM_H */
1533 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1534 index 9fdd8bc..4bd7f1a 100644
1535 --- a/arch/mips/kernel/binfmt_elfn32.c
1536 +++ b/arch/mips/kernel/binfmt_elfn32.c
1537 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1538 #undef ELF_ET_DYN_BASE
1539 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1540
1541 +#ifdef CONFIG_PAX_ASLR
1542 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1543 +
1544 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1545 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1546 +#endif
1547 +
1548 #include <asm/processor.h>
1549 #include <linux/module.h>
1550 #include <linux/elfcore.h>
1551 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
1552 index ff44823..97f8906 100644
1553 --- a/arch/mips/kernel/binfmt_elfo32.c
1554 +++ b/arch/mips/kernel/binfmt_elfo32.c
1555 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1556 #undef ELF_ET_DYN_BASE
1557 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1558
1559 +#ifdef CONFIG_PAX_ASLR
1560 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1561 +
1562 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1563 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1564 +#endif
1565 +
1566 #include <asm/processor.h>
1567
1568 /*
1569 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
1570 index b30cb25..454c0a9 100644
1571 --- a/arch/mips/kernel/process.c
1572 +++ b/arch/mips/kernel/process.c
1573 @@ -481,15 +481,3 @@ unsigned long get_wchan(struct task_struct *task)
1574 out:
1575 return pc;
1576 }
1577 -
1578 -/*
1579 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1580 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1581 - */
1582 -unsigned long arch_align_stack(unsigned long sp)
1583 -{
1584 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1585 - sp -= get_random_int() & ~PAGE_MASK;
1586 -
1587 - return sp & ALMASK;
1588 -}
1589 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
1590 index 937cf33..adb39bb 100644
1591 --- a/arch/mips/mm/fault.c
1592 +++ b/arch/mips/mm/fault.c
1593 @@ -28,6 +28,23 @@
1594 #include <asm/highmem.h> /* For VMALLOC_END */
1595 #include <linux/kdebug.h>
1596
1597 +#ifdef CONFIG_PAX_PAGEEXEC
1598 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1599 +{
1600 + unsigned long i;
1601 +
1602 + printk(KERN_ERR "PAX: bytes at PC: ");
1603 + for (i = 0; i < 5; i++) {
1604 + unsigned int c;
1605 + if (get_user(c, (unsigned int *)pc+i))
1606 + printk(KERN_CONT "???????? ");
1607 + else
1608 + printk(KERN_CONT "%08x ", c);
1609 + }
1610 + printk("\n");
1611 +}
1612 +#endif
1613 +
1614 /*
1615 * This routine handles page faults. It determines the address,
1616 * and the problem, and then passes it off to one of the appropriate
1617 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
1618 index 302d779..7d35bf8 100644
1619 --- a/arch/mips/mm/mmap.c
1620 +++ b/arch/mips/mm/mmap.c
1621 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1622 do_color_align = 1;
1623
1624 /* requesting a specific address */
1625 +
1626 +#ifdef CONFIG_PAX_RANDMMAP
1627 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1628 +#endif
1629 +
1630 if (addr) {
1631 if (do_color_align)
1632 addr = COLOUR_ALIGN(addr, pgoff);
1633 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1634 addr = PAGE_ALIGN(addr);
1635
1636 vma = find_vma(mm, addr);
1637 - if (TASK_SIZE - len >= addr &&
1638 - (!vma || addr + len <= vma->vm_start))
1639 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
1640 return addr;
1641 }
1642
1643 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1644 /* At this point: (!vma || addr < vma->vm_end). */
1645 if (TASK_SIZE - len < addr)
1646 return -ENOMEM;
1647 - if (!vma || addr + len <= vma->vm_start)
1648 + if (check_heap_stack_gap(vmm, addr, len))
1649 return addr;
1650 addr = vma->vm_end;
1651 if (do_color_align)
1652 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1653 /* make sure it can fit in the remaining address space */
1654 if (likely(addr > len)) {
1655 vma = find_vma(mm, addr - len);
1656 - if (!vma || addr <= vma->vm_start) {
1657 + if (check_heap_stack_gap(vmm, addr - len, len))
1658 /* cache the address as a hint for next time */
1659 return mm->free_area_cache = addr - len;
1660 }
1661 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
1662 * return with success:
1663 */
1664 vma = find_vma(mm, addr);
1665 - if (likely(!vma || addr + len <= vma->vm_start)) {
1666 + if (check_heap_stack_gap(vmm, addr, len)) {
1667 /* cache the address as a hint for next time */
1668 return mm->free_area_cache = addr;
1669 }
1670 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1671 mm->unmap_area = arch_unmap_area_topdown;
1672 }
1673 }
1674 -
1675 -static inline unsigned long brk_rnd(void)
1676 -{
1677 - unsigned long rnd = get_random_int();
1678 -
1679 - rnd = rnd << PAGE_SHIFT;
1680 - /* 8MB for 32bit, 256MB for 64bit */
1681 - if (TASK_IS_32BIT_ADDR)
1682 - rnd = rnd & 0x7ffffful;
1683 - else
1684 - rnd = rnd & 0xffffffful;
1685 -
1686 - return rnd;
1687 -}
1688 -
1689 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1690 -{
1691 - unsigned long base = mm->brk;
1692 - unsigned long ret;
1693 -
1694 - ret = PAGE_ALIGN(base + brk_rnd());
1695 -
1696 - if (ret < mm->brk)
1697 - return mm->brk;
1698 -
1699 - return ret;
1700 -}
1701 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
1702 index 19f6cb1..6c78cf2 100644
1703 --- a/arch/parisc/include/asm/elf.h
1704 +++ b/arch/parisc/include/asm/elf.h
1705 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
1706
1707 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1708
1709 +#ifdef CONFIG_PAX_ASLR
1710 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1711 +
1712 +#define PAX_DELTA_MMAP_LEN 16
1713 +#define PAX_DELTA_STACK_LEN 16
1714 +#endif
1715 +
1716 /* This yields a mask that user programs can use to figure out what
1717 instruction set this CPU supports. This could be done in user space,
1718 but it's not easy, and we've already done it here. */
1719 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
1720 index 22dadeb..f6c2be4 100644
1721 --- a/arch/parisc/include/asm/pgtable.h
1722 +++ b/arch/parisc/include/asm/pgtable.h
1723 @@ -210,6 +210,17 @@ struct vm_area_struct;
1724 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1725 #define PAGE_COPY PAGE_EXECREAD
1726 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1727 +
1728 +#ifdef CONFIG_PAX_PAGEEXEC
1729 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1730 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1731 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1732 +#else
1733 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1734 +# define PAGE_COPY_NOEXEC PAGE_COPY
1735 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1736 +#endif
1737 +
1738 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1739 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
1740 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
1741 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
1742 index 5e34ccf..672bc9c 100644
1743 --- a/arch/parisc/kernel/module.c
1744 +++ b/arch/parisc/kernel/module.c
1745 @@ -98,16 +98,38 @@
1746
1747 /* three functions to determine where in the module core
1748 * or init pieces the location is */
1749 +static inline int in_init_rx(struct module *me, void *loc)
1750 +{
1751 + return (loc >= me->module_init_rx &&
1752 + loc < (me->module_init_rx + me->init_size_rx));
1753 +}
1754 +
1755 +static inline int in_init_rw(struct module *me, void *loc)
1756 +{
1757 + return (loc >= me->module_init_rw &&
1758 + loc < (me->module_init_rw + me->init_size_rw));
1759 +}
1760 +
1761 static inline int in_init(struct module *me, void *loc)
1762 {
1763 - return (loc >= me->module_init &&
1764 - loc <= (me->module_init + me->init_size));
1765 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1766 +}
1767 +
1768 +static inline int in_core_rx(struct module *me, void *loc)
1769 +{
1770 + return (loc >= me->module_core_rx &&
1771 + loc < (me->module_core_rx + me->core_size_rx));
1772 +}
1773 +
1774 +static inline int in_core_rw(struct module *me, void *loc)
1775 +{
1776 + return (loc >= me->module_core_rw &&
1777 + loc < (me->module_core_rw + me->core_size_rw));
1778 }
1779
1780 static inline int in_core(struct module *me, void *loc)
1781 {
1782 - return (loc >= me->module_core &&
1783 - loc <= (me->module_core + me->core_size));
1784 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1785 }
1786
1787 static inline int in_local(struct module *me, void *loc)
1788 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
1789 }
1790
1791 /* align things a bit */
1792 - me->core_size = ALIGN(me->core_size, 16);
1793 - me->arch.got_offset = me->core_size;
1794 - me->core_size += gots * sizeof(struct got_entry);
1795 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1796 + me->arch.got_offset = me->core_size_rw;
1797 + me->core_size_rw += gots * sizeof(struct got_entry);
1798
1799 - me->core_size = ALIGN(me->core_size, 16);
1800 - me->arch.fdesc_offset = me->core_size;
1801 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1802 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1803 + me->arch.fdesc_offset = me->core_size_rw;
1804 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1805
1806 me->arch.got_max = gots;
1807 me->arch.fdesc_max = fdescs;
1808 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1809
1810 BUG_ON(value == 0);
1811
1812 - got = me->module_core + me->arch.got_offset;
1813 + got = me->module_core_rw + me->arch.got_offset;
1814 for (i = 0; got[i].addr; i++)
1815 if (got[i].addr == value)
1816 goto out;
1817 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
1818 #ifdef CONFIG_64BIT
1819 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1820 {
1821 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1822 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1823
1824 if (!value) {
1825 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1826 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1827
1828 /* Create new one */
1829 fdesc->addr = value;
1830 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1831 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1832 return (Elf_Addr)fdesc;
1833 }
1834 #endif /* CONFIG_64BIT */
1835 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
1836
1837 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1838 end = table + sechdrs[me->arch.unwind_section].sh_size;
1839 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1840 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1841
1842 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1843 me->arch.unwind_section, table, end, gp);
1844 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
1845 index c9b9322..02d8940 100644
1846 --- a/arch/parisc/kernel/sys_parisc.c
1847 +++ b/arch/parisc/kernel/sys_parisc.c
1848 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
1849 /* At this point: (!vma || addr < vma->vm_end). */
1850 if (TASK_SIZE - len < addr)
1851 return -ENOMEM;
1852 - if (!vma || addr + len <= vma->vm_start)
1853 + if (check_heap_stack_gap(vma, addr, len))
1854 return addr;
1855 addr = vma->vm_end;
1856 }
1857 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
1858 /* At this point: (!vma || addr < vma->vm_end). */
1859 if (TASK_SIZE - len < addr)
1860 return -ENOMEM;
1861 - if (!vma || addr + len <= vma->vm_start)
1862 + if (check_heap_stack_gap(vma, addr, len))
1863 return addr;
1864 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1865 if (addr < vma->vm_end) /* handle wraparound */
1866 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
1867 if (flags & MAP_FIXED)
1868 return addr;
1869 if (!addr)
1870 - addr = TASK_UNMAPPED_BASE;
1871 + addr = current->mm->mmap_base;
1872
1873 if (filp) {
1874 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1875 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
1876 index f19e660..414fe24 100644
1877 --- a/arch/parisc/kernel/traps.c
1878 +++ b/arch/parisc/kernel/traps.c
1879 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
1880
1881 down_read(&current->mm->mmap_sem);
1882 vma = find_vma(current->mm,regs->iaoq[0]);
1883 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1884 - && (vma->vm_flags & VM_EXEC)) {
1885 -
1886 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1887 fault_address = regs->iaoq[0];
1888 fault_space = regs->iasq[0];
1889
1890 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
1891 index 18162ce..94de376 100644
1892 --- a/arch/parisc/mm/fault.c
1893 +++ b/arch/parisc/mm/fault.c
1894 @@ -15,6 +15,7 @@
1895 #include <linux/sched.h>
1896 #include <linux/interrupt.h>
1897 #include <linux/module.h>
1898 +#include <linux/unistd.h>
1899
1900 #include <asm/uaccess.h>
1901 #include <asm/traps.h>
1902 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
1903 static unsigned long
1904 parisc_acctyp(unsigned long code, unsigned int inst)
1905 {
1906 - if (code == 6 || code == 16)
1907 + if (code == 6 || code == 7 || code == 16)
1908 return VM_EXEC;
1909
1910 switch (inst & 0xf0000000) {
1911 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
1912 }
1913 #endif
1914
1915 +#ifdef CONFIG_PAX_PAGEEXEC
1916 +/*
1917 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1918 + *
1919 + * returns 1 when task should be killed
1920 + * 2 when rt_sigreturn trampoline was detected
1921 + * 3 when unpatched PLT trampoline was detected
1922 + */
1923 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1924 +{
1925 +
1926 +#ifdef CONFIG_PAX_EMUPLT
1927 + int err;
1928 +
1929 + do { /* PaX: unpatched PLT emulation */
1930 + unsigned int bl, depwi;
1931 +
1932 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1933 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1934 +
1935 + if (err)
1936 + break;
1937 +
1938 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1939 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1940 +
1941 + err = get_user(ldw, (unsigned int *)addr);
1942 + err |= get_user(bv, (unsigned int *)(addr+4));
1943 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1944 +
1945 + if (err)
1946 + break;
1947 +
1948 + if (ldw == 0x0E801096U &&
1949 + bv == 0xEAC0C000U &&
1950 + ldw2 == 0x0E881095U)
1951 + {
1952 + unsigned int resolver, map;
1953 +
1954 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1955 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1956 + if (err)
1957 + break;
1958 +
1959 + regs->gr[20] = instruction_pointer(regs)+8;
1960 + regs->gr[21] = map;
1961 + regs->gr[22] = resolver;
1962 + regs->iaoq[0] = resolver | 3UL;
1963 + regs->iaoq[1] = regs->iaoq[0] + 4;
1964 + return 3;
1965 + }
1966 + }
1967 + } while (0);
1968 +#endif
1969 +
1970 +#ifdef CONFIG_PAX_EMUTRAMP
1971 +
1972 +#ifndef CONFIG_PAX_EMUSIGRT
1973 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1974 + return 1;
1975 +#endif
1976 +
1977 + do { /* PaX: rt_sigreturn emulation */
1978 + unsigned int ldi1, ldi2, bel, nop;
1979 +
1980 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1981 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1982 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1983 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1984 +
1985 + if (err)
1986 + break;
1987 +
1988 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1989 + ldi2 == 0x3414015AU &&
1990 + bel == 0xE4008200U &&
1991 + nop == 0x08000240U)
1992 + {
1993 + regs->gr[25] = (ldi1 & 2) >> 1;
1994 + regs->gr[20] = __NR_rt_sigreturn;
1995 + regs->gr[31] = regs->iaoq[1] + 16;
1996 + regs->sr[0] = regs->iasq[1];
1997 + regs->iaoq[0] = 0x100UL;
1998 + regs->iaoq[1] = regs->iaoq[0] + 4;
1999 + regs->iasq[0] = regs->sr[2];
2000 + regs->iasq[1] = regs->sr[2];
2001 + return 2;
2002 + }
2003 + } while (0);
2004 +#endif
2005 +
2006 + return 1;
2007 +}
2008 +
2009 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2010 +{
2011 + unsigned long i;
2012 +
2013 + printk(KERN_ERR "PAX: bytes at PC: ");
2014 + for (i = 0; i < 5; i++) {
2015 + unsigned int c;
2016 + if (get_user(c, (unsigned int *)pc+i))
2017 + printk(KERN_CONT "???????? ");
2018 + else
2019 + printk(KERN_CONT "%08x ", c);
2020 + }
2021 + printk("\n");
2022 +}
2023 +#endif
2024 +
2025 int fixup_exception(struct pt_regs *regs)
2026 {
2027 const struct exception_table_entry *fix;
2028 @@ -192,8 +303,33 @@ good_area:
2029
2030 acc_type = parisc_acctyp(code,regs->iir);
2031
2032 - if ((vma->vm_flags & acc_type) != acc_type)
2033 + if ((vma->vm_flags & acc_type) != acc_type) {
2034 +
2035 +#ifdef CONFIG_PAX_PAGEEXEC
2036 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2037 + (address & ~3UL) == instruction_pointer(regs))
2038 + {
2039 + up_read(&mm->mmap_sem);
2040 + switch (pax_handle_fetch_fault(regs)) {
2041 +
2042 +#ifdef CONFIG_PAX_EMUPLT
2043 + case 3:
2044 + return;
2045 +#endif
2046 +
2047 +#ifdef CONFIG_PAX_EMUTRAMP
2048 + case 2:
2049 + return;
2050 +#endif
2051 +
2052 + }
2053 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2054 + do_group_exit(SIGKILL);
2055 + }
2056 +#endif
2057 +
2058 goto bad_area;
2059 + }
2060
2061 /*
2062 * If for any reason at all we couldn't handle the fault, make
2063 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2064 index 3bf9cca..e7457d0 100644
2065 --- a/arch/powerpc/include/asm/elf.h
2066 +++ b/arch/powerpc/include/asm/elf.h
2067 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2068 the loader. We need to make sure that it is out of the way of the program
2069 that it will "exec", and that there is sufficient room for the brk. */
2070
2071 -extern unsigned long randomize_et_dyn(unsigned long base);
2072 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2073 +#define ELF_ET_DYN_BASE (0x20000000)
2074 +
2075 +#ifdef CONFIG_PAX_ASLR
2076 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2077 +
2078 +#ifdef __powerpc64__
2079 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
2080 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
2081 +#else
2082 +#define PAX_DELTA_MMAP_LEN 15
2083 +#define PAX_DELTA_STACK_LEN 15
2084 +#endif
2085 +#endif
2086
2087 /*
2088 * Our registers are always unsigned longs, whether we're a 32 bit
2089 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2090 (0x7ff >> (PAGE_SHIFT - 12)) : \
2091 (0x3ffff >> (PAGE_SHIFT - 12)))
2092
2093 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2094 -#define arch_randomize_brk arch_randomize_brk
2095 -
2096 #endif /* __KERNEL__ */
2097
2098 /*
2099 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2100 index bca8fdc..61e9580 100644
2101 --- a/arch/powerpc/include/asm/kmap_types.h
2102 +++ b/arch/powerpc/include/asm/kmap_types.h
2103 @@ -27,6 +27,7 @@ enum km_type {
2104 KM_PPC_SYNC_PAGE,
2105 KM_PPC_SYNC_ICACHE,
2106 KM_KDB,
2107 + KM_CLEARPAGE,
2108 KM_TYPE_NR
2109 };
2110
2111 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
2112 index d4a7f64..451de1c 100644
2113 --- a/arch/powerpc/include/asm/mman.h
2114 +++ b/arch/powerpc/include/asm/mman.h
2115 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
2116 }
2117 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
2118
2119 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
2120 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
2121 {
2122 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
2123 }
2124 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2125 index 2cd664e..1d2e8a7 100644
2126 --- a/arch/powerpc/include/asm/page.h
2127 +++ b/arch/powerpc/include/asm/page.h
2128 @@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
2129 * and needs to be executable. This means the whole heap ends
2130 * up being executable.
2131 */
2132 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2133 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2134 +#define VM_DATA_DEFAULT_FLAGS32 \
2135 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2136 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2137
2138 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2139 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2140 @@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
2141 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2142 #endif
2143
2144 +#define ktla_ktva(addr) (addr)
2145 +#define ktva_ktla(addr) (addr)
2146 +
2147 #ifndef __ASSEMBLY__
2148
2149 #undef STRICT_MM_TYPECHECKS
2150 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2151 index 9356262..ea96148 100644
2152 --- a/arch/powerpc/include/asm/page_64.h
2153 +++ b/arch/powerpc/include/asm/page_64.h
2154 @@ -155,15 +155,18 @@ do { \
2155 * stack by default, so in the absence of a PT_GNU_STACK program header
2156 * we turn execute permission off.
2157 */
2158 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2159 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2160 +#define VM_STACK_DEFAULT_FLAGS32 \
2161 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2162 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2163
2164 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2165 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2166
2167 +#ifndef CONFIG_PAX_PAGEEXEC
2168 #define VM_STACK_DEFAULT_FLAGS \
2169 (is_32bit_task() ? \
2170 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2171 +#endif
2172
2173 #include <asm-generic/getorder.h>
2174
2175 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2176 index 88b0bd9..e32bc67 100644
2177 --- a/arch/powerpc/include/asm/pgtable.h
2178 +++ b/arch/powerpc/include/asm/pgtable.h
2179 @@ -2,6 +2,7 @@
2180 #define _ASM_POWERPC_PGTABLE_H
2181 #ifdef __KERNEL__
2182
2183 +#include <linux/const.h>
2184 #ifndef __ASSEMBLY__
2185 #include <asm/processor.h> /* For TASK_SIZE */
2186 #include <asm/mmu.h>
2187 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2188 index 4aad413..85d86bf 100644
2189 --- a/arch/powerpc/include/asm/pte-hash32.h
2190 +++ b/arch/powerpc/include/asm/pte-hash32.h
2191 @@ -21,6 +21,7 @@
2192 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2193 #define _PAGE_USER 0x004 /* usermode access allowed */
2194 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2195 +#define _PAGE_EXEC _PAGE_GUARDED
2196 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2197 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2198 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2199 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2200 index 559da19..7e5835c 100644
2201 --- a/arch/powerpc/include/asm/reg.h
2202 +++ b/arch/powerpc/include/asm/reg.h
2203 @@ -212,6 +212,7 @@
2204 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2205 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2206 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2207 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2208 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2209 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2210 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2211 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2212 index e30a13d..2b7d994 100644
2213 --- a/arch/powerpc/include/asm/system.h
2214 +++ b/arch/powerpc/include/asm/system.h
2215 @@ -530,7 +530,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2216 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2217 #endif
2218
2219 -extern unsigned long arch_align_stack(unsigned long sp);
2220 +#define arch_align_stack(x) ((x) & ~0xfUL)
2221
2222 /* Used in very early kernel initialization. */
2223 extern unsigned long reloc_offset(void);
2224 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2225 index bd0fb84..a42a14b 100644
2226 --- a/arch/powerpc/include/asm/uaccess.h
2227 +++ b/arch/powerpc/include/asm/uaccess.h
2228 @@ -13,6 +13,8 @@
2229 #define VERIFY_READ 0
2230 #define VERIFY_WRITE 1
2231
2232 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2233 +
2234 /*
2235 * The fs value determines whether argument validity checking should be
2236 * performed or not. If get_fs() == USER_DS, checking is performed, with
2237 @@ -327,52 +329,6 @@ do { \
2238 extern unsigned long __copy_tofrom_user(void __user *to,
2239 const void __user *from, unsigned long size);
2240
2241 -#ifndef __powerpc64__
2242 -
2243 -static inline unsigned long copy_from_user(void *to,
2244 - const void __user *from, unsigned long n)
2245 -{
2246 - unsigned long over;
2247 -
2248 - if (access_ok(VERIFY_READ, from, n))
2249 - return __copy_tofrom_user((__force void __user *)to, from, n);
2250 - if ((unsigned long)from < TASK_SIZE) {
2251 - over = (unsigned long)from + n - TASK_SIZE;
2252 - return __copy_tofrom_user((__force void __user *)to, from,
2253 - n - over) + over;
2254 - }
2255 - return n;
2256 -}
2257 -
2258 -static inline unsigned long copy_to_user(void __user *to,
2259 - const void *from, unsigned long n)
2260 -{
2261 - unsigned long over;
2262 -
2263 - if (access_ok(VERIFY_WRITE, to, n))
2264 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2265 - if ((unsigned long)to < TASK_SIZE) {
2266 - over = (unsigned long)to + n - TASK_SIZE;
2267 - return __copy_tofrom_user(to, (__force void __user *)from,
2268 - n - over) + over;
2269 - }
2270 - return n;
2271 -}
2272 -
2273 -#else /* __powerpc64__ */
2274 -
2275 -#define __copy_in_user(to, from, size) \
2276 - __copy_tofrom_user((to), (from), (size))
2277 -
2278 -extern unsigned long copy_from_user(void *to, const void __user *from,
2279 - unsigned long n);
2280 -extern unsigned long copy_to_user(void __user *to, const void *from,
2281 - unsigned long n);
2282 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2283 - unsigned long n);
2284 -
2285 -#endif /* __powerpc64__ */
2286 -
2287 static inline unsigned long __copy_from_user_inatomic(void *to,
2288 const void __user *from, unsigned long n)
2289 {
2290 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2291 if (ret == 0)
2292 return 0;
2293 }
2294 +
2295 + if (!__builtin_constant_p(n))
2296 + check_object_size(to, n, false);
2297 +
2298 return __copy_tofrom_user((__force void __user *)to, from, n);
2299 }
2300
2301 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2302 if (ret == 0)
2303 return 0;
2304 }
2305 +
2306 + if (!__builtin_constant_p(n))
2307 + check_object_size(from, n, true);
2308 +
2309 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2310 }
2311
2312 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2313 return __copy_to_user_inatomic(to, from, size);
2314 }
2315
2316 +#ifndef __powerpc64__
2317 +
2318 +static inline unsigned long __must_check copy_from_user(void *to,
2319 + const void __user *from, unsigned long n)
2320 +{
2321 + unsigned long over;
2322 +
2323 + if ((long)n < 0)
2324 + return n;
2325 +
2326 + if (access_ok(VERIFY_READ, from, n)) {
2327 + if (!__builtin_constant_p(n))
2328 + check_object_size(to, n, false);
2329 + return __copy_tofrom_user((__force void __user *)to, from, n);
2330 + }
2331 + if ((unsigned long)from < TASK_SIZE) {
2332 + over = (unsigned long)from + n - TASK_SIZE;
2333 + if (!__builtin_constant_p(n - over))
2334 + check_object_size(to, n - over, false);
2335 + return __copy_tofrom_user((__force void __user *)to, from,
2336 + n - over) + over;
2337 + }
2338 + return n;
2339 +}
2340 +
2341 +static inline unsigned long __must_check copy_to_user(void __user *to,
2342 + const void *from, unsigned long n)
2343 +{
2344 + unsigned long over;
2345 +
2346 + if ((long)n < 0)
2347 + return n;
2348 +
2349 + if (access_ok(VERIFY_WRITE, to, n)) {
2350 + if (!__builtin_constant_p(n))
2351 + check_object_size(from, n, true);
2352 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2353 + }
2354 + if ((unsigned long)to < TASK_SIZE) {
2355 + over = (unsigned long)to + n - TASK_SIZE;
2356 + if (!__builtin_constant_p(n))
2357 + check_object_size(from, n - over, true);
2358 + return __copy_tofrom_user(to, (__force void __user *)from,
2359 + n - over) + over;
2360 + }
2361 + return n;
2362 +}
2363 +
2364 +#else /* __powerpc64__ */
2365 +
2366 +#define __copy_in_user(to, from, size) \
2367 + __copy_tofrom_user((to), (from), (size))
2368 +
2369 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2370 +{
2371 + if ((long)n < 0 || n > INT_MAX)
2372 + return n;
2373 +
2374 + if (!__builtin_constant_p(n))
2375 + check_object_size(to, n, false);
2376 +
2377 + if (likely(access_ok(VERIFY_READ, from, n)))
2378 + n = __copy_from_user(to, from, n);
2379 + else
2380 + memset(to, 0, n);
2381 + return n;
2382 +}
2383 +
2384 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2385 +{
2386 + if ((long)n < 0 || n > INT_MAX)
2387 + return n;
2388 +
2389 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2390 + if (!__builtin_constant_p(n))
2391 + check_object_size(from, n, true);
2392 + n = __copy_to_user(to, from, n);
2393 + }
2394 + return n;
2395 +}
2396 +
2397 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2398 + unsigned long n);
2399 +
2400 +#endif /* __powerpc64__ */
2401 +
2402 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2403
2404 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2405 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
2406 index 429983c..7af363b 100644
2407 --- a/arch/powerpc/kernel/exceptions-64e.S
2408 +++ b/arch/powerpc/kernel/exceptions-64e.S
2409 @@ -587,6 +587,7 @@ storage_fault_common:
2410 std r14,_DAR(r1)
2411 std r15,_DSISR(r1)
2412 addi r3,r1,STACK_FRAME_OVERHEAD
2413 + bl .save_nvgprs
2414 mr r4,r14
2415 mr r5,r15
2416 ld r14,PACA_EXGEN+EX_R14(r13)
2417 @@ -596,8 +597,7 @@ storage_fault_common:
2418 cmpdi r3,0
2419 bne- 1f
2420 b .ret_from_except_lite
2421 -1: bl .save_nvgprs
2422 - mr r5,r3
2423 +1: mr r5,r3
2424 addi r3,r1,STACK_FRAME_OVERHEAD
2425 ld r4,_DAR(r1)
2426 bl .bad_page_fault
2427 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
2428 index 41b02c7..05e76fb 100644
2429 --- a/arch/powerpc/kernel/exceptions-64s.S
2430 +++ b/arch/powerpc/kernel/exceptions-64s.S
2431 @@ -1014,10 +1014,10 @@ handle_page_fault:
2432 11: ld r4,_DAR(r1)
2433 ld r5,_DSISR(r1)
2434 addi r3,r1,STACK_FRAME_OVERHEAD
2435 + bl .save_nvgprs
2436 bl .do_page_fault
2437 cmpdi r3,0
2438 beq+ 13f
2439 - bl .save_nvgprs
2440 mr r5,r3
2441 addi r3,r1,STACK_FRAME_OVERHEAD
2442 lwz r4,_DAR(r1)
2443 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
2444 index 0b6d796..d760ddb 100644
2445 --- a/arch/powerpc/kernel/module_32.c
2446 +++ b/arch/powerpc/kernel/module_32.c
2447 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
2448 me->arch.core_plt_section = i;
2449 }
2450 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2451 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2452 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2453 return -ENOEXEC;
2454 }
2455
2456 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
2457
2458 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2459 /* Init, or core PLT? */
2460 - if (location >= mod->module_core
2461 - && location < mod->module_core + mod->core_size)
2462 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2463 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2464 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2465 - else
2466 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2467 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2468 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2469 + else {
2470 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2471 + return ~0UL;
2472 + }
2473
2474 /* Find this entry, or if that fails, the next avail. entry */
2475 while (entry->jump[0]) {
2476 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
2477 index 8f53954..a704ad6 100644
2478 --- a/arch/powerpc/kernel/process.c
2479 +++ b/arch/powerpc/kernel/process.c
2480 @@ -682,8 +682,8 @@ void show_regs(struct pt_regs * regs)
2481 * Lookup NIP late so we have the best change of getting the
2482 * above info out without failing
2483 */
2484 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2485 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2486 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2487 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2488 #endif
2489 show_stack(current, (unsigned long *) regs->gpr[1]);
2490 if (!user_mode(regs))
2491 @@ -1187,10 +1187,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2492 newsp = stack[0];
2493 ip = stack[STACK_FRAME_LR_SAVE];
2494 if (!firstframe || ip != lr) {
2495 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2496 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2497 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2498 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2499 - printk(" (%pS)",
2500 + printk(" (%pA)",
2501 (void *)current->ret_stack[curr_frame].ret);
2502 curr_frame--;
2503 }
2504 @@ -1210,7 +1210,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
2505 struct pt_regs *regs = (struct pt_regs *)
2506 (sp + STACK_FRAME_OVERHEAD);
2507 lr = regs->link;
2508 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2509 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2510 regs->trap, (void *)regs->nip, (void *)lr);
2511 firstframe = 1;
2512 }
2513 @@ -1285,58 +1285,3 @@ void thread_info_cache_init(void)
2514 }
2515
2516 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2517 -
2518 -unsigned long arch_align_stack(unsigned long sp)
2519 -{
2520 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2521 - sp -= get_random_int() & ~PAGE_MASK;
2522 - return sp & ~0xf;
2523 -}
2524 -
2525 -static inline unsigned long brk_rnd(void)
2526 -{
2527 - unsigned long rnd = 0;
2528 -
2529 - /* 8MB for 32bit, 1GB for 64bit */
2530 - if (is_32bit_task())
2531 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2532 - else
2533 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2534 -
2535 - return rnd << PAGE_SHIFT;
2536 -}
2537 -
2538 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2539 -{
2540 - unsigned long base = mm->brk;
2541 - unsigned long ret;
2542 -
2543 -#ifdef CONFIG_PPC_STD_MMU_64
2544 - /*
2545 - * If we are using 1TB segments and we are allowed to randomise
2546 - * the heap, we can put it above 1TB so it is backed by a 1TB
2547 - * segment. Otherwise the heap will be in the bottom 1TB
2548 - * which always uses 256MB segments and this may result in a
2549 - * performance penalty.
2550 - */
2551 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2552 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2553 -#endif
2554 -
2555 - ret = PAGE_ALIGN(base + brk_rnd());
2556 -
2557 - if (ret < mm->brk)
2558 - return mm->brk;
2559 -
2560 - return ret;
2561 -}
2562 -
2563 -unsigned long randomize_et_dyn(unsigned long base)
2564 -{
2565 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2566 -
2567 - if (ret < base)
2568 - return base;
2569 -
2570 - return ret;
2571 -}
2572 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
2573 index 78b76dc..7f232ef 100644
2574 --- a/arch/powerpc/kernel/signal_32.c
2575 +++ b/arch/powerpc/kernel/signal_32.c
2576 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
2577 /* Save user registers on the stack */
2578 frame = &rt_sf->uc.uc_mcontext;
2579 addr = frame;
2580 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2581 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2582 if (save_user_regs(regs, frame, 0, 1))
2583 goto badframe;
2584 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2585 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
2586 index e91c736..742ec06 100644
2587 --- a/arch/powerpc/kernel/signal_64.c
2588 +++ b/arch/powerpc/kernel/signal_64.c
2589 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
2590 current->thread.fpscr.val = 0;
2591
2592 /* Set up to return from userspace. */
2593 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2594 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2595 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2596 } else {
2597 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2598 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
2599 index f19d977..8ac286e 100644
2600 --- a/arch/powerpc/kernel/traps.c
2601 +++ b/arch/powerpc/kernel/traps.c
2602 @@ -98,6 +98,8 @@ static void pmac_backlight_unblank(void)
2603 static inline void pmac_backlight_unblank(void) { }
2604 #endif
2605
2606 +extern void gr_handle_kernel_exploit(void);
2607 +
2608 int die(const char *str, struct pt_regs *regs, long err)
2609 {
2610 static struct {
2611 @@ -171,6 +173,8 @@ int die(const char *str, struct pt_regs *regs, long err)
2612 if (panic_on_oops)
2613 panic("Fatal exception");
2614
2615 + gr_handle_kernel_exploit();
2616 +
2617 oops_exit();
2618 do_exit(err);
2619
2620 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
2621 index 142ab10..236e61a 100644
2622 --- a/arch/powerpc/kernel/vdso.c
2623 +++ b/arch/powerpc/kernel/vdso.c
2624 @@ -36,6 +36,7 @@
2625 #include <asm/firmware.h>
2626 #include <asm/vdso.h>
2627 #include <asm/vdso_datapage.h>
2628 +#include <asm/mman.h>
2629
2630 #include "setup.h"
2631
2632 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2633 vdso_base = VDSO32_MBASE;
2634 #endif
2635
2636 - current->mm->context.vdso_base = 0;
2637 + current->mm->context.vdso_base = ~0UL;
2638
2639 /* vDSO has a problem and was disabled, just don't "enable" it for the
2640 * process
2641 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2642 vdso_base = get_unmapped_area(NULL, vdso_base,
2643 (vdso_pages << PAGE_SHIFT) +
2644 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2645 - 0, 0);
2646 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2647 if (IS_ERR_VALUE(vdso_base)) {
2648 rc = vdso_base;
2649 goto fail_mmapsem;
2650 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
2651 index 5eea6f3..5d10396 100644
2652 --- a/arch/powerpc/lib/usercopy_64.c
2653 +++ b/arch/powerpc/lib/usercopy_64.c
2654 @@ -9,22 +9,6 @@
2655 #include <linux/module.h>
2656 #include <asm/uaccess.h>
2657
2658 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2659 -{
2660 - if (likely(access_ok(VERIFY_READ, from, n)))
2661 - n = __copy_from_user(to, from, n);
2662 - else
2663 - memset(to, 0, n);
2664 - return n;
2665 -}
2666 -
2667 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2668 -{
2669 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2670 - n = __copy_to_user(to, from, n);
2671 - return n;
2672 -}
2673 -
2674 unsigned long copy_in_user(void __user *to, const void __user *from,
2675 unsigned long n)
2676 {
2677 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
2678 return n;
2679 }
2680
2681 -EXPORT_SYMBOL(copy_from_user);
2682 -EXPORT_SYMBOL(copy_to_user);
2683 EXPORT_SYMBOL(copy_in_user);
2684
2685 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
2686 index 5efe8c9..db9ceef 100644
2687 --- a/arch/powerpc/mm/fault.c
2688 +++ b/arch/powerpc/mm/fault.c
2689 @@ -32,6 +32,10 @@
2690 #include <linux/perf_event.h>
2691 #include <linux/magic.h>
2692 #include <linux/ratelimit.h>
2693 +#include <linux/slab.h>
2694 +#include <linux/pagemap.h>
2695 +#include <linux/compiler.h>
2696 +#include <linux/unistd.h>
2697
2698 #include <asm/firmware.h>
2699 #include <asm/page.h>
2700 @@ -43,6 +47,7 @@
2701 #include <asm/tlbflush.h>
2702 #include <asm/siginfo.h>
2703 #include <mm/mmu_decl.h>
2704 +#include <asm/ptrace.h>
2705
2706 #ifdef CONFIG_KPROBES
2707 static inline int notify_page_fault(struct pt_regs *regs)
2708 @@ -66,6 +71,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
2709 }
2710 #endif
2711
2712 +#ifdef CONFIG_PAX_PAGEEXEC
2713 +/*
2714 + * PaX: decide what to do with offenders (regs->nip = fault address)
2715 + *
2716 + * returns 1 when task should be killed
2717 + */
2718 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2719 +{
2720 + return 1;
2721 +}
2722 +
2723 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2724 +{
2725 + unsigned long i;
2726 +
2727 + printk(KERN_ERR "PAX: bytes at PC: ");
2728 + for (i = 0; i < 5; i++) {
2729 + unsigned int c;
2730 + if (get_user(c, (unsigned int __user *)pc+i))
2731 + printk(KERN_CONT "???????? ");
2732 + else
2733 + printk(KERN_CONT "%08x ", c);
2734 + }
2735 + printk("\n");
2736 +}
2737 +#endif
2738 +
2739 /*
2740 * Check whether the instruction at regs->nip is a store using
2741 * an update addressing form which will update r1.
2742 @@ -136,7 +168,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
2743 * indicate errors in DSISR but can validly be set in SRR1.
2744 */
2745 if (trap == 0x400)
2746 - error_code &= 0x48200000;
2747 + error_code &= 0x58200000;
2748 else
2749 is_write = error_code & DSISR_ISSTORE;
2750 #else
2751 @@ -259,7 +291,7 @@ good_area:
2752 * "undefined". Of those that can be set, this is the only
2753 * one which seems bad.
2754 */
2755 - if (error_code & 0x10000000)
2756 + if (error_code & DSISR_GUARDED)
2757 /* Guarded storage error. */
2758 goto bad_area;
2759 #endif /* CONFIG_8xx */
2760 @@ -274,7 +306,7 @@ good_area:
2761 * processors use the same I/D cache coherency mechanism
2762 * as embedded.
2763 */
2764 - if (error_code & DSISR_PROTFAULT)
2765 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2766 goto bad_area;
2767 #endif /* CONFIG_PPC_STD_MMU */
2768
2769 @@ -343,6 +375,23 @@ bad_area:
2770 bad_area_nosemaphore:
2771 /* User mode accesses cause a SIGSEGV */
2772 if (user_mode(regs)) {
2773 +
2774 +#ifdef CONFIG_PAX_PAGEEXEC
2775 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2776 +#ifdef CONFIG_PPC_STD_MMU
2777 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2778 +#else
2779 + if (is_exec && regs->nip == address) {
2780 +#endif
2781 + switch (pax_handle_fetch_fault(regs)) {
2782 + }
2783 +
2784 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2785 + do_group_exit(SIGKILL);
2786 + }
2787 + }
2788 +#endif
2789 +
2790 _exception(SIGSEGV, regs, code, address);
2791 return 0;
2792 }
2793 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
2794 index 5a783d8..c23e14b 100644
2795 --- a/arch/powerpc/mm/mmap_64.c
2796 +++ b/arch/powerpc/mm/mmap_64.c
2797 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2798 */
2799 if (mmap_is_legacy()) {
2800 mm->mmap_base = TASK_UNMAPPED_BASE;
2801 +
2802 +#ifdef CONFIG_PAX_RANDMMAP
2803 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2804 + mm->mmap_base += mm->delta_mmap;
2805 +#endif
2806 +
2807 mm->get_unmapped_area = arch_get_unmapped_area;
2808 mm->unmap_area = arch_unmap_area;
2809 } else {
2810 mm->mmap_base = mmap_base();
2811 +
2812 +#ifdef CONFIG_PAX_RANDMMAP
2813 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2814 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2815 +#endif
2816 +
2817 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2818 mm->unmap_area = arch_unmap_area_topdown;
2819 }
2820 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
2821 index ba51948..23009d9 100644
2822 --- a/arch/powerpc/mm/slice.c
2823 +++ b/arch/powerpc/mm/slice.c
2824 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
2825 if ((mm->task_size - len) < addr)
2826 return 0;
2827 vma = find_vma(mm, addr);
2828 - return (!vma || (addr + len) <= vma->vm_start);
2829 + return check_heap_stack_gap(vma, addr, len);
2830 }
2831
2832 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2833 @@ -256,7 +256,7 @@ full_search:
2834 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2835 continue;
2836 }
2837 - if (!vma || addr + len <= vma->vm_start) {
2838 + if (check_heap_stack_gap(vma, addr, len)) {
2839 /*
2840 * Remember the place where we stopped the search:
2841 */
2842 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2843 }
2844 }
2845
2846 - addr = mm->mmap_base;
2847 - while (addr > len) {
2848 + if (mm->mmap_base < len)
2849 + addr = -ENOMEM;
2850 + else
2851 + addr = mm->mmap_base - len;
2852 +
2853 + while (!IS_ERR_VALUE(addr)) {
2854 /* Go down by chunk size */
2855 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2856 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2857
2858 /* Check for hit with different page size */
2859 mask = slice_range_to_mask(addr, len);
2860 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2861 * return with success:
2862 */
2863 vma = find_vma(mm, addr);
2864 - if (!vma || (addr + len) <= vma->vm_start) {
2865 + if (check_heap_stack_gap(vma, addr, len)) {
2866 /* remember the address as a hint for next time */
2867 if (use_cache)
2868 mm->free_area_cache = addr;
2869 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
2870 mm->cached_hole_size = vma->vm_start - addr;
2871
2872 /* try just below the current vma->vm_start */
2873 - addr = vma->vm_start;
2874 + addr = skip_heap_stack_gap(vma, len);
2875 }
2876
2877 /*
2878 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
2879 if (fixed && addr > (mm->task_size - len))
2880 return -EINVAL;
2881
2882 +#ifdef CONFIG_PAX_RANDMMAP
2883 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2884 + addr = 0;
2885 +#endif
2886 +
2887 /* If hint, make sure it matches our alignment restrictions */
2888 if (!fixed && addr) {
2889 addr = _ALIGN_UP(addr, 1ul << pshift);
2890 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
2891 index 547f1a6..3fff354 100644
2892 --- a/arch/s390/include/asm/elf.h
2893 +++ b/arch/s390/include/asm/elf.h
2894 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2895 the loader. We need to make sure that it is out of the way of the program
2896 that it will "exec", and that there is sufficient room for the brk. */
2897
2898 -extern unsigned long randomize_et_dyn(unsigned long base);
2899 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2900 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2901 +
2902 +#ifdef CONFIG_PAX_ASLR
2903 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2904 +
2905 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2906 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2907 +#endif
2908
2909 /* This yields a mask that user programs can use to figure out what
2910 instruction set this CPU supports. */
2911 @@ -211,7 +217,4 @@ struct linux_binprm;
2912 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2913 int arch_setup_additional_pages(struct linux_binprm *, int);
2914
2915 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2916 -#define arch_randomize_brk arch_randomize_brk
2917 -
2918 #endif
2919 diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
2920 index 6582f69..b69906f 100644
2921 --- a/arch/s390/include/asm/system.h
2922 +++ b/arch/s390/include/asm/system.h
2923 @@ -256,7 +256,7 @@ extern void (*_machine_restart)(char *command);
2924 extern void (*_machine_halt)(void);
2925 extern void (*_machine_power_off)(void);
2926
2927 -extern unsigned long arch_align_stack(unsigned long sp);
2928 +#define arch_align_stack(x) ((x) & ~0xfUL)
2929
2930 static inline int tprot(unsigned long addr)
2931 {
2932 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
2933 index 2b23885..e136e31 100644
2934 --- a/arch/s390/include/asm/uaccess.h
2935 +++ b/arch/s390/include/asm/uaccess.h
2936 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
2937 copy_to_user(void __user *to, const void *from, unsigned long n)
2938 {
2939 might_fault();
2940 +
2941 + if ((long)n < 0)
2942 + return n;
2943 +
2944 if (access_ok(VERIFY_WRITE, to, n))
2945 n = __copy_to_user(to, from, n);
2946 return n;
2947 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
2948 static inline unsigned long __must_check
2949 __copy_from_user(void *to, const void __user *from, unsigned long n)
2950 {
2951 + if ((long)n < 0)
2952 + return n;
2953 +
2954 if (__builtin_constant_p(n) && (n <= 256))
2955 return uaccess.copy_from_user_small(n, from, to);
2956 else
2957 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
2958 unsigned int sz = __compiletime_object_size(to);
2959
2960 might_fault();
2961 +
2962 + if ((long)n < 0)
2963 + return n;
2964 +
2965 if (unlikely(sz != -1 && sz < n)) {
2966 copy_from_user_overflow();
2967 return n;
2968 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
2969 index dfcb343..eda788a 100644
2970 --- a/arch/s390/kernel/module.c
2971 +++ b/arch/s390/kernel/module.c
2972 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
2973
2974 /* Increase core size by size of got & plt and set start
2975 offsets for got and plt. */
2976 - me->core_size = ALIGN(me->core_size, 4);
2977 - me->arch.got_offset = me->core_size;
2978 - me->core_size += me->arch.got_size;
2979 - me->arch.plt_offset = me->core_size;
2980 - me->core_size += me->arch.plt_size;
2981 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
2982 + me->arch.got_offset = me->core_size_rw;
2983 + me->core_size_rw += me->arch.got_size;
2984 + me->arch.plt_offset = me->core_size_rx;
2985 + me->core_size_rx += me->arch.plt_size;
2986 return 0;
2987 }
2988
2989 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
2990 if (info->got_initialized == 0) {
2991 Elf_Addr *gotent;
2992
2993 - gotent = me->module_core + me->arch.got_offset +
2994 + gotent = me->module_core_rw + me->arch.got_offset +
2995 info->got_offset;
2996 *gotent = val;
2997 info->got_initialized = 1;
2998 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
2999 else if (r_type == R_390_GOTENT ||
3000 r_type == R_390_GOTPLTENT)
3001 *(unsigned int *) loc =
3002 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3003 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3004 else if (r_type == R_390_GOT64 ||
3005 r_type == R_390_GOTPLT64)
3006 *(unsigned long *) loc = val;
3007 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3008 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3009 if (info->plt_initialized == 0) {
3010 unsigned int *ip;
3011 - ip = me->module_core + me->arch.plt_offset +
3012 + ip = me->module_core_rx + me->arch.plt_offset +
3013 info->plt_offset;
3014 #ifndef CONFIG_64BIT
3015 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3016 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3017 val - loc + 0xffffUL < 0x1ffffeUL) ||
3018 (r_type == R_390_PLT32DBL &&
3019 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3020 - val = (Elf_Addr) me->module_core +
3021 + val = (Elf_Addr) me->module_core_rx +
3022 me->arch.plt_offset +
3023 info->plt_offset;
3024 val += rela->r_addend - loc;
3025 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3026 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3027 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3028 val = val + rela->r_addend -
3029 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3030 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3031 if (r_type == R_390_GOTOFF16)
3032 *(unsigned short *) loc = val;
3033 else if (r_type == R_390_GOTOFF32)
3034 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3035 break;
3036 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3037 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3038 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3039 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3040 rela->r_addend - loc;
3041 if (r_type == R_390_GOTPC)
3042 *(unsigned int *) loc = val;
3043 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
3044 index 541a750..8739853 100644
3045 --- a/arch/s390/kernel/process.c
3046 +++ b/arch/s390/kernel/process.c
3047 @@ -319,39 +319,3 @@ unsigned long get_wchan(struct task_struct *p)
3048 }
3049 return 0;
3050 }
3051 -
3052 -unsigned long arch_align_stack(unsigned long sp)
3053 -{
3054 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3055 - sp -= get_random_int() & ~PAGE_MASK;
3056 - return sp & ~0xf;
3057 -}
3058 -
3059 -static inline unsigned long brk_rnd(void)
3060 -{
3061 - /* 8MB for 32bit, 1GB for 64bit */
3062 - if (is_32bit_task())
3063 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
3064 - else
3065 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
3066 -}
3067 -
3068 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3069 -{
3070 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
3071 -
3072 - if (ret < mm->brk)
3073 - return mm->brk;
3074 - return ret;
3075 -}
3076 -
3077 -unsigned long randomize_et_dyn(unsigned long base)
3078 -{
3079 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3080 -
3081 - if (!(current->flags & PF_RANDOMIZE))
3082 - return base;
3083 - if (ret < base)
3084 - return base;
3085 - return ret;
3086 -}
3087 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
3088 index 7b371c3..ad06cf1 100644
3089 --- a/arch/s390/kernel/setup.c
3090 +++ b/arch/s390/kernel/setup.c
3091 @@ -271,7 +271,7 @@ static int __init early_parse_mem(char *p)
3092 }
3093 early_param("mem", early_parse_mem);
3094
3095 -unsigned int user_mode = HOME_SPACE_MODE;
3096 +unsigned int user_mode = SECONDARY_SPACE_MODE;
3097 EXPORT_SYMBOL_GPL(user_mode);
3098
3099 static int set_amode_and_uaccess(unsigned long user_amode,
3100 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
3101 index c9a9f7f..60d0315 100644
3102 --- a/arch/s390/mm/mmap.c
3103 +++ b/arch/s390/mm/mmap.c
3104 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3105 */
3106 if (mmap_is_legacy()) {
3107 mm->mmap_base = TASK_UNMAPPED_BASE;
3108 +
3109 +#ifdef CONFIG_PAX_RANDMMAP
3110 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3111 + mm->mmap_base += mm->delta_mmap;
3112 +#endif
3113 +
3114 mm->get_unmapped_area = arch_get_unmapped_area;
3115 mm->unmap_area = arch_unmap_area;
3116 } else {
3117 mm->mmap_base = mmap_base();
3118 +
3119 +#ifdef CONFIG_PAX_RANDMMAP
3120 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3121 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3122 +#endif
3123 +
3124 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3125 mm->unmap_area = arch_unmap_area_topdown;
3126 }
3127 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3128 */
3129 if (mmap_is_legacy()) {
3130 mm->mmap_base = TASK_UNMAPPED_BASE;
3131 +
3132 +#ifdef CONFIG_PAX_RANDMMAP
3133 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3134 + mm->mmap_base += mm->delta_mmap;
3135 +#endif
3136 +
3137 mm->get_unmapped_area = s390_get_unmapped_area;
3138 mm->unmap_area = arch_unmap_area;
3139 } else {
3140 mm->mmap_base = mmap_base();
3141 +
3142 +#ifdef CONFIG_PAX_RANDMMAP
3143 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3144 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3145 +#endif
3146 +
3147 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3148 mm->unmap_area = arch_unmap_area_topdown;
3149 }
3150 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
3151 index 589d5c7..669e274 100644
3152 --- a/arch/score/include/asm/system.h
3153 +++ b/arch/score/include/asm/system.h
3154 @@ -17,7 +17,7 @@ do { \
3155 #define finish_arch_switch(prev) do {} while (0)
3156
3157 typedef void (*vi_handler_t)(void);
3158 -extern unsigned long arch_align_stack(unsigned long sp);
3159 +#define arch_align_stack(x) (x)
3160
3161 #define mb() barrier()
3162 #define rmb() barrier()
3163 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
3164 index 25d0803..d6c8e36 100644
3165 --- a/arch/score/kernel/process.c
3166 +++ b/arch/score/kernel/process.c
3167 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
3168
3169 return task_pt_regs(task)->cp0_epc;
3170 }
3171 -
3172 -unsigned long arch_align_stack(unsigned long sp)
3173 -{
3174 - return sp;
3175 -}
3176 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
3177 index afeb710..d1d1289 100644
3178 --- a/arch/sh/mm/mmap.c
3179 +++ b/arch/sh/mm/mmap.c
3180 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3181 addr = PAGE_ALIGN(addr);
3182
3183 vma = find_vma(mm, addr);
3184 - if (TASK_SIZE - len >= addr &&
3185 - (!vma || addr + len <= vma->vm_start))
3186 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3187 return addr;
3188 }
3189
3190 @@ -106,7 +105,7 @@ full_search:
3191 }
3192 return -ENOMEM;
3193 }
3194 - if (likely(!vma || addr + len <= vma->vm_start)) {
3195 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3196 /*
3197 * Remember the place where we stopped the search:
3198 */
3199 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3200 addr = PAGE_ALIGN(addr);
3201
3202 vma = find_vma(mm, addr);
3203 - if (TASK_SIZE - len >= addr &&
3204 - (!vma || addr + len <= vma->vm_start))
3205 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3206 return addr;
3207 }
3208
3209 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3210 /* make sure it can fit in the remaining address space */
3211 if (likely(addr > len)) {
3212 vma = find_vma(mm, addr-len);
3213 - if (!vma || addr <= vma->vm_start) {
3214 + if (check_heap_stack_gap(vma, addr - len, len)) {
3215 /* remember the address as a hint for next time */
3216 return (mm->free_area_cache = addr-len);
3217 }
3218 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3219 if (unlikely(mm->mmap_base < len))
3220 goto bottomup;
3221
3222 - addr = mm->mmap_base-len;
3223 - if (do_colour_align)
3224 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3225 + addr = mm->mmap_base - len;
3226
3227 do {
3228 + if (do_colour_align)
3229 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3230 /*
3231 * Lookup failure means no vma is above this address,
3232 * else if new region fits below vma->vm_start,
3233 * return with success:
3234 */
3235 vma = find_vma(mm, addr);
3236 - if (likely(!vma || addr+len <= vma->vm_start)) {
3237 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3238 /* remember the address as a hint for next time */
3239 return (mm->free_area_cache = addr);
3240 }
3241 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3242 mm->cached_hole_size = vma->vm_start - addr;
3243
3244 /* try just below the current vma->vm_start */
3245 - addr = vma->vm_start-len;
3246 - if (do_colour_align)
3247 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3248 - } while (likely(len < vma->vm_start));
3249 + addr = skip_heap_stack_gap(vma, len);
3250 + } while (!IS_ERR_VALUE(addr));
3251
3252 bottomup:
3253 /*
3254 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
3255 index ad1fb5d..fc5315b 100644
3256 --- a/arch/sparc/Makefile
3257 +++ b/arch/sparc/Makefile
3258 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
3259 # Export what is needed by arch/sparc/boot/Makefile
3260 export VMLINUX_INIT VMLINUX_MAIN
3261 VMLINUX_INIT := $(head-y) $(init-y)
3262 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
3263 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
3264 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
3265 VMLINUX_MAIN += $(drivers-y) $(net-y)
3266
3267 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
3268 index 9f421df..b81fc12 100644
3269 --- a/arch/sparc/include/asm/atomic_64.h
3270 +++ b/arch/sparc/include/asm/atomic_64.h
3271 @@ -14,18 +14,40 @@
3272 #define ATOMIC64_INIT(i) { (i) }
3273
3274 #define atomic_read(v) (*(volatile int *)&(v)->counter)
3275 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3276 +{
3277 + return v->counter;
3278 +}
3279 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
3280 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3281 +{
3282 + return v->counter;
3283 +}
3284
3285 #define atomic_set(v, i) (((v)->counter) = i)
3286 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3287 +{
3288 + v->counter = i;
3289 +}
3290 #define atomic64_set(v, i) (((v)->counter) = i)
3291 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3292 +{
3293 + v->counter = i;
3294 +}
3295
3296 extern void atomic_add(int, atomic_t *);
3297 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3298 extern void atomic64_add(long, atomic64_t *);
3299 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3300 extern void atomic_sub(int, atomic_t *);
3301 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3302 extern void atomic64_sub(long, atomic64_t *);
3303 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3304
3305 extern int atomic_add_ret(int, atomic_t *);
3306 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3307 extern long atomic64_add_ret(long, atomic64_t *);
3308 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3309 extern int atomic_sub_ret(int, atomic_t *);
3310 extern long atomic64_sub_ret(long, atomic64_t *);
3311
3312 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3313 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3314
3315 #define atomic_inc_return(v) atomic_add_ret(1, v)
3316 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3317 +{
3318 + return atomic_add_ret_unchecked(1, v);
3319 +}
3320 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3321 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3322 +{
3323 + return atomic64_add_ret_unchecked(1, v);
3324 +}
3325
3326 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3327 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3328
3329 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3330 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3331 +{
3332 + return atomic_add_ret_unchecked(i, v);
3333 +}
3334 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3335 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3336 +{
3337 + return atomic64_add_ret_unchecked(i, v);
3338 +}
3339
3340 /*
3341 * atomic_inc_and_test - increment and test
3342 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3343 * other cases.
3344 */
3345 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3346 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
3347 +{
3348 + return atomic_inc_return_unchecked(v) == 0;
3349 +}
3350 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3351
3352 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3353 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
3354 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3355
3356 #define atomic_inc(v) atomic_add(1, v)
3357 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3358 +{
3359 + atomic_add_unchecked(1, v);
3360 +}
3361 #define atomic64_inc(v) atomic64_add(1, v)
3362 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3363 +{
3364 + atomic64_add_unchecked(1, v);
3365 +}
3366
3367 #define atomic_dec(v) atomic_sub(1, v)
3368 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3369 +{
3370 + atomic_sub_unchecked(1, v);
3371 +}
3372 #define atomic64_dec(v) atomic64_sub(1, v)
3373 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3374 +{
3375 + atomic64_sub_unchecked(1, v);
3376 +}
3377
3378 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3379 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3380
3381 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3382 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
3383 +{
3384 + return cmpxchg(&v->counter, old, new);
3385 +}
3386 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3387 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3388 +{
3389 + return xchg(&v->counter, new);
3390 +}
3391
3392 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3393 {
3394 - int c, old;
3395 + int c, old, new;
3396 c = atomic_read(v);
3397 for (;;) {
3398 - if (unlikely(c == (u)))
3399 + if (unlikely(c == u))
3400 break;
3401 - old = atomic_cmpxchg((v), c, c + (a));
3402 +
3403 + asm volatile("addcc %2, %0, %0\n"
3404 +
3405 +#ifdef CONFIG_PAX_REFCOUNT
3406 + "tvs %%icc, 6\n"
3407 +#endif
3408 +
3409 + : "=r" (new)
3410 + : "0" (c), "ir" (a)
3411 + : "cc");
3412 +
3413 + old = atomic_cmpxchg(v, c, new);
3414 if (likely(old == c))
3415 break;
3416 c = old;
3417 @@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
3418 #define atomic64_cmpxchg(v, o, n) \
3419 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3420 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3421 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3422 +{
3423 + return xchg(&v->counter, new);
3424 +}
3425
3426 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3427 {
3428 - long c, old;
3429 + long c, old, new;
3430 c = atomic64_read(v);
3431 for (;;) {
3432 - if (unlikely(c == (u)))
3433 + if (unlikely(c == u))
3434 break;
3435 - old = atomic64_cmpxchg((v), c, c + (a));
3436 +
3437 + asm volatile("addcc %2, %0, %0\n"
3438 +
3439 +#ifdef CONFIG_PAX_REFCOUNT
3440 + "tvs %%xcc, 6\n"
3441 +#endif
3442 +
3443 + : "=r" (new)
3444 + : "0" (c), "ir" (a)
3445 + : "cc");
3446 +
3447 + old = atomic64_cmpxchg(v, c, new);
3448 if (likely(old == c))
3449 break;
3450 c = old;
3451 }
3452 - return c != (u);
3453 + return c != u;
3454 }
3455
3456 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3457 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
3458 index 69358b5..17b4745 100644
3459 --- a/arch/sparc/include/asm/cache.h
3460 +++ b/arch/sparc/include/asm/cache.h
3461 @@ -10,7 +10,7 @@
3462 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3463
3464 #define L1_CACHE_SHIFT 5
3465 -#define L1_CACHE_BYTES 32
3466 +#define L1_CACHE_BYTES 32UL
3467
3468 #ifdef CONFIG_SPARC32
3469 #define SMP_CACHE_BYTES_SHIFT 5
3470 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
3471 index 4269ca6..e3da77f 100644
3472 --- a/arch/sparc/include/asm/elf_32.h
3473 +++ b/arch/sparc/include/asm/elf_32.h
3474 @@ -114,6 +114,13 @@ typedef struct {
3475
3476 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3477
3478 +#ifdef CONFIG_PAX_ASLR
3479 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3480 +
3481 +#define PAX_DELTA_MMAP_LEN 16
3482 +#define PAX_DELTA_STACK_LEN 16
3483 +#endif
3484 +
3485 /* This yields a mask that user programs can use to figure out what
3486 instruction set this cpu supports. This can NOT be done in userspace
3487 on Sparc. */
3488 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
3489 index 7df8b7f..4946269 100644
3490 --- a/arch/sparc/include/asm/elf_64.h
3491 +++ b/arch/sparc/include/asm/elf_64.h
3492 @@ -180,6 +180,13 @@ typedef struct {
3493 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3494 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3495
3496 +#ifdef CONFIG_PAX_ASLR
3497 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3498 +
3499 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3500 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3501 +#endif
3502 +
3503 extern unsigned long sparc64_elf_hwcap;
3504 #define ELF_HWCAP sparc64_elf_hwcap
3505
3506 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
3507 index 5b31a8e..1d92567 100644
3508 --- a/arch/sparc/include/asm/pgtable_32.h
3509 +++ b/arch/sparc/include/asm/pgtable_32.h
3510 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3511 BTFIXUPDEF_INT(page_none)
3512 BTFIXUPDEF_INT(page_copy)
3513 BTFIXUPDEF_INT(page_readonly)
3514 +
3515 +#ifdef CONFIG_PAX_PAGEEXEC
3516 +BTFIXUPDEF_INT(page_shared_noexec)
3517 +BTFIXUPDEF_INT(page_copy_noexec)
3518 +BTFIXUPDEF_INT(page_readonly_noexec)
3519 +#endif
3520 +
3521 BTFIXUPDEF_INT(page_kernel)
3522
3523 #define PMD_SHIFT SUN4C_PMD_SHIFT
3524 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
3525 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3526 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3527
3528 +#ifdef CONFIG_PAX_PAGEEXEC
3529 +extern pgprot_t PAGE_SHARED_NOEXEC;
3530 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3531 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3532 +#else
3533 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3534 +# define PAGE_COPY_NOEXEC PAGE_COPY
3535 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3536 +#endif
3537 +
3538 extern unsigned long page_kernel;
3539
3540 #ifdef MODULE
3541 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
3542 index f6ae2b2..b03ffc7 100644
3543 --- a/arch/sparc/include/asm/pgtsrmmu.h
3544 +++ b/arch/sparc/include/asm/pgtsrmmu.h
3545 @@ -115,6 +115,13 @@
3546 SRMMU_EXEC | SRMMU_REF)
3547 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3548 SRMMU_EXEC | SRMMU_REF)
3549 +
3550 +#ifdef CONFIG_PAX_PAGEEXEC
3551 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3552 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3553 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3554 +#endif
3555 +
3556 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3557 SRMMU_DIRTY | SRMMU_REF)
3558
3559 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
3560 index 9689176..63c18ea 100644
3561 --- a/arch/sparc/include/asm/spinlock_64.h
3562 +++ b/arch/sparc/include/asm/spinlock_64.h
3563 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
3564
3565 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3566
3567 -static void inline arch_read_lock(arch_rwlock_t *lock)
3568 +static inline void arch_read_lock(arch_rwlock_t *lock)
3569 {
3570 unsigned long tmp1, tmp2;
3571
3572 __asm__ __volatile__ (
3573 "1: ldsw [%2], %0\n"
3574 " brlz,pn %0, 2f\n"
3575 -"4: add %0, 1, %1\n"
3576 +"4: addcc %0, 1, %1\n"
3577 +
3578 +#ifdef CONFIG_PAX_REFCOUNT
3579 +" tvs %%icc, 6\n"
3580 +#endif
3581 +
3582 " cas [%2], %0, %1\n"
3583 " cmp %0, %1\n"
3584 " bne,pn %%icc, 1b\n"
3585 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
3586 " .previous"
3587 : "=&r" (tmp1), "=&r" (tmp2)
3588 : "r" (lock)
3589 - : "memory");
3590 + : "memory", "cc");
3591 }
3592
3593 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3594 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3595 {
3596 int tmp1, tmp2;
3597
3598 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3599 "1: ldsw [%2], %0\n"
3600 " brlz,a,pn %0, 2f\n"
3601 " mov 0, %0\n"
3602 -" add %0, 1, %1\n"
3603 +" addcc %0, 1, %1\n"
3604 +
3605 +#ifdef CONFIG_PAX_REFCOUNT
3606 +" tvs %%icc, 6\n"
3607 +#endif
3608 +
3609 " cas [%2], %0, %1\n"
3610 " cmp %0, %1\n"
3611 " bne,pn %%icc, 1b\n"
3612 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
3613 return tmp1;
3614 }
3615
3616 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3617 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3618 {
3619 unsigned long tmp1, tmp2;
3620
3621 __asm__ __volatile__(
3622 "1: lduw [%2], %0\n"
3623 -" sub %0, 1, %1\n"
3624 +" subcc %0, 1, %1\n"
3625 +
3626 +#ifdef CONFIG_PAX_REFCOUNT
3627 +" tvs %%icc, 6\n"
3628 +#endif
3629 +
3630 " cas [%2], %0, %1\n"
3631 " cmp %0, %1\n"
3632 " bne,pn %%xcc, 1b\n"
3633 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
3634 : "memory");
3635 }
3636
3637 -static void inline arch_write_lock(arch_rwlock_t *lock)
3638 +static inline void arch_write_lock(arch_rwlock_t *lock)
3639 {
3640 unsigned long mask, tmp1, tmp2;
3641
3642 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
3643 : "memory");
3644 }
3645
3646 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3647 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3648 {
3649 __asm__ __volatile__(
3650 " stw %%g0, [%0]"
3651 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
3652 : "memory");
3653 }
3654
3655 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3656 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3657 {
3658 unsigned long mask, tmp1, tmp2, result;
3659
3660 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
3661 index fa57532..e1a4c53 100644
3662 --- a/arch/sparc/include/asm/thread_info_32.h
3663 +++ b/arch/sparc/include/asm/thread_info_32.h
3664 @@ -50,6 +50,8 @@ struct thread_info {
3665 unsigned long w_saved;
3666
3667 struct restart_block restart_block;
3668 +
3669 + unsigned long lowest_stack;
3670 };
3671
3672 /*
3673 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
3674 index 60d86be..952dea1 100644
3675 --- a/arch/sparc/include/asm/thread_info_64.h
3676 +++ b/arch/sparc/include/asm/thread_info_64.h
3677 @@ -63,6 +63,8 @@ struct thread_info {
3678 struct pt_regs *kern_una_regs;
3679 unsigned int kern_una_insn;
3680
3681 + unsigned long lowest_stack;
3682 +
3683 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3684 };
3685
3686 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
3687 index e88fbe5..96b0ce5 100644
3688 --- a/arch/sparc/include/asm/uaccess.h
3689 +++ b/arch/sparc/include/asm/uaccess.h
3690 @@ -1,5 +1,13 @@
3691 #ifndef ___ASM_SPARC_UACCESS_H
3692 #define ___ASM_SPARC_UACCESS_H
3693 +
3694 +#ifdef __KERNEL__
3695 +#ifndef __ASSEMBLY__
3696 +#include <linux/types.h>
3697 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3698 +#endif
3699 +#endif
3700 +
3701 #if defined(__sparc__) && defined(__arch64__)
3702 #include <asm/uaccess_64.h>
3703 #else
3704 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
3705 index 8303ac4..07f333d 100644
3706 --- a/arch/sparc/include/asm/uaccess_32.h
3707 +++ b/arch/sparc/include/asm/uaccess_32.h
3708 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
3709
3710 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3711 {
3712 - if (n && __access_ok((unsigned long) to, n))
3713 + if ((long)n < 0)
3714 + return n;
3715 +
3716 + if (n && __access_ok((unsigned long) to, n)) {
3717 + if (!__builtin_constant_p(n))
3718 + check_object_size(from, n, true);
3719 return __copy_user(to, (__force void __user *) from, n);
3720 - else
3721 + } else
3722 return n;
3723 }
3724
3725 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3726 {
3727 + if ((long)n < 0)
3728 + return n;
3729 +
3730 + if (!__builtin_constant_p(n))
3731 + check_object_size(from, n, true);
3732 +
3733 return __copy_user(to, (__force void __user *) from, n);
3734 }
3735
3736 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3737 {
3738 - if (n && __access_ok((unsigned long) from, n))
3739 + if ((long)n < 0)
3740 + return n;
3741 +
3742 + if (n && __access_ok((unsigned long) from, n)) {
3743 + if (!__builtin_constant_p(n))
3744 + check_object_size(to, n, false);
3745 return __copy_user((__force void __user *) to, from, n);
3746 - else
3747 + } else
3748 return n;
3749 }
3750
3751 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3752 {
3753 + if ((long)n < 0)
3754 + return n;
3755 +
3756 return __copy_user((__force void __user *) to, from, n);
3757 }
3758
3759 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
3760 index 3e1449f..5293a0e 100644
3761 --- a/arch/sparc/include/asm/uaccess_64.h
3762 +++ b/arch/sparc/include/asm/uaccess_64.h
3763 @@ -10,6 +10,7 @@
3764 #include <linux/compiler.h>
3765 #include <linux/string.h>
3766 #include <linux/thread_info.h>
3767 +#include <linux/kernel.h>
3768 #include <asm/asi.h>
3769 #include <asm/system.h>
3770 #include <asm/spitfire.h>
3771 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
3772 static inline unsigned long __must_check
3773 copy_from_user(void *to, const void __user *from, unsigned long size)
3774 {
3775 - unsigned long ret = ___copy_from_user(to, from, size);
3776 + unsigned long ret;
3777
3778 + if ((long)size < 0 || size > INT_MAX)
3779 + return size;
3780 +
3781 + if (!__builtin_constant_p(size))
3782 + check_object_size(to, size, false);
3783 +
3784 + ret = ___copy_from_user(to, from, size);
3785 if (unlikely(ret))
3786 ret = copy_from_user_fixup(to, from, size);
3787
3788 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
3789 static inline unsigned long __must_check
3790 copy_to_user(void __user *to, const void *from, unsigned long size)
3791 {
3792 - unsigned long ret = ___copy_to_user(to, from, size);
3793 + unsigned long ret;
3794 +
3795 + if ((long)size < 0 || size > INT_MAX)
3796 + return size;
3797 +
3798 + if (!__builtin_constant_p(size))
3799 + check_object_size(from, size, true);
3800
3801 + ret = ___copy_to_user(to, from, size);
3802 if (unlikely(ret))
3803 ret = copy_to_user_fixup(to, from, size);
3804 return ret;
3805 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
3806 index cb85458..e063f17 100644
3807 --- a/arch/sparc/kernel/Makefile
3808 +++ b/arch/sparc/kernel/Makefile
3809 @@ -3,7 +3,7 @@
3810 #
3811
3812 asflags-y := -ansi
3813 -ccflags-y := -Werror
3814 +#ccflags-y := -Werror
3815
3816 extra-y := head_$(BITS).o
3817 extra-y += init_task.o
3818 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
3819 index f793742..4d880af 100644
3820 --- a/arch/sparc/kernel/process_32.c
3821 +++ b/arch/sparc/kernel/process_32.c
3822 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
3823 rw->ins[4], rw->ins[5],
3824 rw->ins[6],
3825 rw->ins[7]);
3826 - printk("%pS\n", (void *) rw->ins[7]);
3827 + printk("%pA\n", (void *) rw->ins[7]);
3828 rw = (struct reg_window32 *) rw->ins[6];
3829 }
3830 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3831 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
3832
3833 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3834 r->psr, r->pc, r->npc, r->y, print_tainted());
3835 - printk("PC: <%pS>\n", (void *) r->pc);
3836 + printk("PC: <%pA>\n", (void *) r->pc);
3837 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3838 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3839 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3840 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3841 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3842 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3843 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3844 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3845
3846 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3847 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3848 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
3849 rw = (struct reg_window32 *) fp;
3850 pc = rw->ins[7];
3851 printk("[%08lx : ", pc);
3852 - printk("%pS ] ", (void *) pc);
3853 + printk("%pA ] ", (void *) pc);
3854 fp = rw->ins[6];
3855 } while (++count < 16);
3856 printk("\n");
3857 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
3858 index d959cd0..7b42812 100644
3859 --- a/arch/sparc/kernel/process_64.c
3860 +++ b/arch/sparc/kernel/process_64.c
3861 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
3862 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3863 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3864 if (regs->tstate & TSTATE_PRIV)
3865 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3866 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3867 }
3868
3869 void show_regs(struct pt_regs *regs)
3870 {
3871 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3872 regs->tpc, regs->tnpc, regs->y, print_tainted());
3873 - printk("TPC: <%pS>\n", (void *) regs->tpc);
3874 + printk("TPC: <%pA>\n", (void *) regs->tpc);
3875 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3876 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3877 regs->u_regs[3]);
3878 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3879 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3880 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3881 regs->u_regs[15]);
3882 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3883 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3884 show_regwindow(regs);
3885 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3886 }
3887 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void)
3888 ((tp && tp->task) ? tp->task->pid : -1));
3889
3890 if (gp->tstate & TSTATE_PRIV) {
3891 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3892 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3893 (void *) gp->tpc,
3894 (void *) gp->o7,
3895 (void *) gp->i7,
3896 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
3897 index 42b282f..28ce9f2 100644
3898 --- a/arch/sparc/kernel/sys_sparc_32.c
3899 +++ b/arch/sparc/kernel/sys_sparc_32.c
3900 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3901 if (ARCH_SUN4C && len > 0x20000000)
3902 return -ENOMEM;
3903 if (!addr)
3904 - addr = TASK_UNMAPPED_BASE;
3905 + addr = current->mm->mmap_base;
3906
3907 if (flags & MAP_SHARED)
3908 addr = COLOUR_ALIGN(addr);
3909 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3910 }
3911 if (TASK_SIZE - PAGE_SIZE - len < addr)
3912 return -ENOMEM;
3913 - if (!vmm || addr + len <= vmm->vm_start)
3914 + if (check_heap_stack_gap(vmm, addr, len))
3915 return addr;
3916 addr = vmm->vm_end;
3917 if (flags & MAP_SHARED)
3918 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
3919 index 908b47a..aa9e584 100644
3920 --- a/arch/sparc/kernel/sys_sparc_64.c
3921 +++ b/arch/sparc/kernel/sys_sparc_64.c
3922 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3923 /* We do not accept a shared mapping if it would violate
3924 * cache aliasing constraints.
3925 */
3926 - if ((flags & MAP_SHARED) &&
3927 + if ((filp || (flags & MAP_SHARED)) &&
3928 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3929 return -EINVAL;
3930 return addr;
3931 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3932 if (filp || (flags & MAP_SHARED))
3933 do_color_align = 1;
3934
3935 +#ifdef CONFIG_PAX_RANDMMAP
3936 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3937 +#endif
3938 +
3939 if (addr) {
3940 if (do_color_align)
3941 addr = COLOUR_ALIGN(addr, pgoff);
3942 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
3943 addr = PAGE_ALIGN(addr);
3944
3945 vma = find_vma(mm, addr);
3946 - if (task_size - len >= addr &&
3947 - (!vma || addr + len <= vma->vm_start))
3948 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3949 return addr;
3950 }
3951
3952 if (len > mm->cached_hole_size) {
3953 - start_addr = addr = mm->free_area_cache;
3954 + start_addr = addr = mm->free_area_cache;
3955 } else {
3956 - start_addr = addr = TASK_UNMAPPED_BASE;
3957 + start_addr = addr = mm->mmap_base;
3958 mm->cached_hole_size = 0;
3959 }
3960
3961 @@ -174,14 +177,14 @@ full_search:
3962 vma = find_vma(mm, VA_EXCLUDE_END);
3963 }
3964 if (unlikely(task_size < addr)) {
3965 - if (start_addr != TASK_UNMAPPED_BASE) {
3966 - start_addr = addr = TASK_UNMAPPED_BASE;
3967 + if (start_addr != mm->mmap_base) {
3968 + start_addr = addr = mm->mmap_base;
3969 mm->cached_hole_size = 0;
3970 goto full_search;
3971 }
3972 return -ENOMEM;
3973 }
3974 - if (likely(!vma || addr + len <= vma->vm_start)) {
3975 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3976 /*
3977 * Remember the place where we stopped the search:
3978 */
3979 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3980 /* We do not accept a shared mapping if it would violate
3981 * cache aliasing constraints.
3982 */
3983 - if ((flags & MAP_SHARED) &&
3984 + if ((filp || (flags & MAP_SHARED)) &&
3985 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3986 return -EINVAL;
3987 return addr;
3988 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3989 addr = PAGE_ALIGN(addr);
3990
3991 vma = find_vma(mm, addr);
3992 - if (task_size - len >= addr &&
3993 - (!vma || addr + len <= vma->vm_start))
3994 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3995 return addr;
3996 }
3997
3998 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3999 /* make sure it can fit in the remaining address space */
4000 if (likely(addr > len)) {
4001 vma = find_vma(mm, addr-len);
4002 - if (!vma || addr <= vma->vm_start) {
4003 + if (check_heap_stack_gap(vma, addr - len, len)) {
4004 /* remember the address as a hint for next time */
4005 return (mm->free_area_cache = addr-len);
4006 }
4007 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4008 if (unlikely(mm->mmap_base < len))
4009 goto bottomup;
4010
4011 - addr = mm->mmap_base-len;
4012 - if (do_color_align)
4013 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4014 + addr = mm->mmap_base - len;
4015
4016 do {
4017 + if (do_color_align)
4018 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4019 /*
4020 * Lookup failure means no vma is above this address,
4021 * else if new region fits below vma->vm_start,
4022 * return with success:
4023 */
4024 vma = find_vma(mm, addr);
4025 - if (likely(!vma || addr+len <= vma->vm_start)) {
4026 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4027 /* remember the address as a hint for next time */
4028 return (mm->free_area_cache = addr);
4029 }
4030 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4031 mm->cached_hole_size = vma->vm_start - addr;
4032
4033 /* try just below the current vma->vm_start */
4034 - addr = vma->vm_start-len;
4035 - if (do_color_align)
4036 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4037 - } while (likely(len < vma->vm_start));
4038 + addr = skip_heap_stack_gap(vma, len);
4039 + } while (!IS_ERR_VALUE(addr));
4040
4041 bottomup:
4042 /*
4043 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4044 gap == RLIM_INFINITY ||
4045 sysctl_legacy_va_layout) {
4046 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4047 +
4048 +#ifdef CONFIG_PAX_RANDMMAP
4049 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4050 + mm->mmap_base += mm->delta_mmap;
4051 +#endif
4052 +
4053 mm->get_unmapped_area = arch_get_unmapped_area;
4054 mm->unmap_area = arch_unmap_area;
4055 } else {
4056 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4057 gap = (task_size / 6 * 5);
4058
4059 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4060 +
4061 +#ifdef CONFIG_PAX_RANDMMAP
4062 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4063 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4064 +#endif
4065 +
4066 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4067 mm->unmap_area = arch_unmap_area_topdown;
4068 }
4069 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
4070 index c0490c7..84959d1 100644
4071 --- a/arch/sparc/kernel/traps_32.c
4072 +++ b/arch/sparc/kernel/traps_32.c
4073 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
4074 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4075 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4076
4077 +extern void gr_handle_kernel_exploit(void);
4078 +
4079 void die_if_kernel(char *str, struct pt_regs *regs)
4080 {
4081 static int die_counter;
4082 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4083 count++ < 30 &&
4084 (((unsigned long) rw) >= PAGE_OFFSET) &&
4085 !(((unsigned long) rw) & 0x7)) {
4086 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4087 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4088 (void *) rw->ins[7]);
4089 rw = (struct reg_window32 *)rw->ins[6];
4090 }
4091 }
4092 printk("Instruction DUMP:");
4093 instruction_dump ((unsigned long *) regs->pc);
4094 - if(regs->psr & PSR_PS)
4095 + if(regs->psr & PSR_PS) {
4096 + gr_handle_kernel_exploit();
4097 do_exit(SIGKILL);
4098 + }
4099 do_exit(SIGSEGV);
4100 }
4101
4102 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
4103 index 0cbdaa4..438e4c9 100644
4104 --- a/arch/sparc/kernel/traps_64.c
4105 +++ b/arch/sparc/kernel/traps_64.c
4106 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
4107 i + 1,
4108 p->trapstack[i].tstate, p->trapstack[i].tpc,
4109 p->trapstack[i].tnpc, p->trapstack[i].tt);
4110 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4111 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4112 }
4113 }
4114
4115 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
4116
4117 lvl -= 0x100;
4118 if (regs->tstate & TSTATE_PRIV) {
4119 +
4120 +#ifdef CONFIG_PAX_REFCOUNT
4121 + if (lvl == 6)
4122 + pax_report_refcount_overflow(regs);
4123 +#endif
4124 +
4125 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4126 die_if_kernel(buffer, regs);
4127 }
4128 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
4129 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4130 {
4131 char buffer[32];
4132 -
4133 +
4134 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4135 0, lvl, SIGTRAP) == NOTIFY_STOP)
4136 return;
4137
4138 +#ifdef CONFIG_PAX_REFCOUNT
4139 + if (lvl == 6)
4140 + pax_report_refcount_overflow(regs);
4141 +#endif
4142 +
4143 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4144
4145 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4146 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
4147 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4148 printk("%s" "ERROR(%d): ",
4149 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4150 - printk("TPC<%pS>\n", (void *) regs->tpc);
4151 + printk("TPC<%pA>\n", (void *) regs->tpc);
4152 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4153 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4154 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4155 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4156 smp_processor_id(),
4157 (type & 0x1) ? 'I' : 'D',
4158 regs->tpc);
4159 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4160 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4161 panic("Irrecoverable Cheetah+ parity error.");
4162 }
4163
4164 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
4165 smp_processor_id(),
4166 (type & 0x1) ? 'I' : 'D',
4167 regs->tpc);
4168 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4169 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4170 }
4171
4172 struct sun4v_error_entry {
4173 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
4174
4175 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4176 regs->tpc, tl);
4177 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4178 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4179 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4180 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4181 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4182 (void *) regs->u_regs[UREG_I7]);
4183 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4184 "pte[%lx] error[%lx]\n",
4185 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
4186
4187 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4188 regs->tpc, tl);
4189 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4190 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4191 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4192 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4193 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4194 (void *) regs->u_regs[UREG_I7]);
4195 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4196 "pte[%lx] error[%lx]\n",
4197 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4198 fp = (unsigned long)sf->fp + STACK_BIAS;
4199 }
4200
4201 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4202 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4203 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4204 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
4205 int index = tsk->curr_ret_stack;
4206 if (tsk->ret_stack && index >= graph) {
4207 pc = tsk->ret_stack[index - graph].ret;
4208 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4209 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4210 graph++;
4211 }
4212 }
4213 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
4214 return (struct reg_window *) (fp + STACK_BIAS);
4215 }
4216
4217 +extern void gr_handle_kernel_exploit(void);
4218 +
4219 void die_if_kernel(char *str, struct pt_regs *regs)
4220 {
4221 static int die_counter;
4222 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4223 while (rw &&
4224 count++ < 30 &&
4225 kstack_valid(tp, (unsigned long) rw)) {
4226 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4227 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4228 (void *) rw->ins[7]);
4229
4230 rw = kernel_stack_up(rw);
4231 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
4232 }
4233 user_instruction_dump ((unsigned int __user *) regs->tpc);
4234 }
4235 - if (regs->tstate & TSTATE_PRIV)
4236 + if (regs->tstate & TSTATE_PRIV) {
4237 + gr_handle_kernel_exploit();
4238 do_exit(SIGKILL);
4239 + }
4240 do_exit(SIGSEGV);
4241 }
4242 EXPORT_SYMBOL(die_if_kernel);
4243 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
4244 index 76e4ac1..78f8bb1 100644
4245 --- a/arch/sparc/kernel/unaligned_64.c
4246 +++ b/arch/sparc/kernel/unaligned_64.c
4247 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
4248 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
4249
4250 if (__ratelimit(&ratelimit)) {
4251 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4252 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4253 regs->tpc, (void *) regs->tpc);
4254 }
4255 }
4256 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
4257 index a3fc437..fea9957 100644
4258 --- a/arch/sparc/lib/Makefile
4259 +++ b/arch/sparc/lib/Makefile
4260 @@ -2,7 +2,7 @@
4261 #
4262
4263 asflags-y := -ansi -DST_DIV0=0x02
4264 -ccflags-y := -Werror
4265 +#ccflags-y := -Werror
4266
4267 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4268 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4269 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
4270 index 59186e0..f747d7a 100644
4271 --- a/arch/sparc/lib/atomic_64.S
4272 +++ b/arch/sparc/lib/atomic_64.S
4273 @@ -18,7 +18,12 @@
4274 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4275 BACKOFF_SETUP(%o2)
4276 1: lduw [%o1], %g1
4277 - add %g1, %o0, %g7
4278 + addcc %g1, %o0, %g7
4279 +
4280 +#ifdef CONFIG_PAX_REFCOUNT
4281 + tvs %icc, 6
4282 +#endif
4283 +
4284 cas [%o1], %g1, %g7
4285 cmp %g1, %g7
4286 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4287 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4288 2: BACKOFF_SPIN(%o2, %o3, 1b)
4289 .size atomic_add, .-atomic_add
4290
4291 + .globl atomic_add_unchecked
4292 + .type atomic_add_unchecked,#function
4293 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4294 + BACKOFF_SETUP(%o2)
4295 +1: lduw [%o1], %g1
4296 + add %g1, %o0, %g7
4297 + cas [%o1], %g1, %g7
4298 + cmp %g1, %g7
4299 + bne,pn %icc, 2f
4300 + nop
4301 + retl
4302 + nop
4303 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4304 + .size atomic_add_unchecked, .-atomic_add_unchecked
4305 +
4306 .globl atomic_sub
4307 .type atomic_sub,#function
4308 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4309 BACKOFF_SETUP(%o2)
4310 1: lduw [%o1], %g1
4311 - sub %g1, %o0, %g7
4312 + subcc %g1, %o0, %g7
4313 +
4314 +#ifdef CONFIG_PAX_REFCOUNT
4315 + tvs %icc, 6
4316 +#endif
4317 +
4318 cas [%o1], %g1, %g7
4319 cmp %g1, %g7
4320 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4321 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4322 2: BACKOFF_SPIN(%o2, %o3, 1b)
4323 .size atomic_sub, .-atomic_sub
4324
4325 + .globl atomic_sub_unchecked
4326 + .type atomic_sub_unchecked,#function
4327 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4328 + BACKOFF_SETUP(%o2)
4329 +1: lduw [%o1], %g1
4330 + sub %g1, %o0, %g7
4331 + cas [%o1], %g1, %g7
4332 + cmp %g1, %g7
4333 + bne,pn %icc, 2f
4334 + nop
4335 + retl
4336 + nop
4337 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4338 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4339 +
4340 .globl atomic_add_ret
4341 .type atomic_add_ret,#function
4342 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4343 BACKOFF_SETUP(%o2)
4344 1: lduw [%o1], %g1
4345 - add %g1, %o0, %g7
4346 + addcc %g1, %o0, %g7
4347 +
4348 +#ifdef CONFIG_PAX_REFCOUNT
4349 + tvs %icc, 6
4350 +#endif
4351 +
4352 cas [%o1], %g1, %g7
4353 cmp %g1, %g7
4354 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4355 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4356 2: BACKOFF_SPIN(%o2, %o3, 1b)
4357 .size atomic_add_ret, .-atomic_add_ret
4358
4359 + .globl atomic_add_ret_unchecked
4360 + .type atomic_add_ret_unchecked,#function
4361 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4362 + BACKOFF_SETUP(%o2)
4363 +1: lduw [%o1], %g1
4364 + addcc %g1, %o0, %g7
4365 + cas [%o1], %g1, %g7
4366 + cmp %g1, %g7
4367 + bne,pn %icc, 2f
4368 + add %g7, %o0, %g7
4369 + sra %g7, 0, %o0
4370 + retl
4371 + nop
4372 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4373 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4374 +
4375 .globl atomic_sub_ret
4376 .type atomic_sub_ret,#function
4377 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4378 BACKOFF_SETUP(%o2)
4379 1: lduw [%o1], %g1
4380 - sub %g1, %o0, %g7
4381 + subcc %g1, %o0, %g7
4382 +
4383 +#ifdef CONFIG_PAX_REFCOUNT
4384 + tvs %icc, 6
4385 +#endif
4386 +
4387 cas [%o1], %g1, %g7
4388 cmp %g1, %g7
4389 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
4390 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4391 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4392 BACKOFF_SETUP(%o2)
4393 1: ldx [%o1], %g1
4394 - add %g1, %o0, %g7
4395 + addcc %g1, %o0, %g7
4396 +
4397 +#ifdef CONFIG_PAX_REFCOUNT
4398 + tvs %xcc, 6
4399 +#endif
4400 +
4401 casx [%o1], %g1, %g7
4402 cmp %g1, %g7
4403 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4404 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4405 2: BACKOFF_SPIN(%o2, %o3, 1b)
4406 .size atomic64_add, .-atomic64_add
4407
4408 + .globl atomic64_add_unchecked
4409 + .type atomic64_add_unchecked,#function
4410 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4411 + BACKOFF_SETUP(%o2)
4412 +1: ldx [%o1], %g1
4413 + addcc %g1, %o0, %g7
4414 + casx [%o1], %g1, %g7
4415 + cmp %g1, %g7
4416 + bne,pn %xcc, 2f
4417 + nop
4418 + retl
4419 + nop
4420 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4421 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4422 +
4423 .globl atomic64_sub
4424 .type atomic64_sub,#function
4425 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4426 BACKOFF_SETUP(%o2)
4427 1: ldx [%o1], %g1
4428 - sub %g1, %o0, %g7
4429 + subcc %g1, %o0, %g7
4430 +
4431 +#ifdef CONFIG_PAX_REFCOUNT
4432 + tvs %xcc, 6
4433 +#endif
4434 +
4435 casx [%o1], %g1, %g7
4436 cmp %g1, %g7
4437 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4438 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4439 2: BACKOFF_SPIN(%o2, %o3, 1b)
4440 .size atomic64_sub, .-atomic64_sub
4441
4442 + .globl atomic64_sub_unchecked
4443 + .type atomic64_sub_unchecked,#function
4444 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4445 + BACKOFF_SETUP(%o2)
4446 +1: ldx [%o1], %g1
4447 + subcc %g1, %o0, %g7
4448 + casx [%o1], %g1, %g7
4449 + cmp %g1, %g7
4450 + bne,pn %xcc, 2f
4451 + nop
4452 + retl
4453 + nop
4454 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4455 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4456 +
4457 .globl atomic64_add_ret
4458 .type atomic64_add_ret,#function
4459 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4460 BACKOFF_SETUP(%o2)
4461 1: ldx [%o1], %g1
4462 - add %g1, %o0, %g7
4463 + addcc %g1, %o0, %g7
4464 +
4465 +#ifdef CONFIG_PAX_REFCOUNT
4466 + tvs %xcc, 6
4467 +#endif
4468 +
4469 casx [%o1], %g1, %g7
4470 cmp %g1, %g7
4471 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4472 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4473 2: BACKOFF_SPIN(%o2, %o3, 1b)
4474 .size atomic64_add_ret, .-atomic64_add_ret
4475
4476 + .globl atomic64_add_ret_unchecked
4477 + .type atomic64_add_ret_unchecked,#function
4478 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4479 + BACKOFF_SETUP(%o2)
4480 +1: ldx [%o1], %g1
4481 + addcc %g1, %o0, %g7
4482 + casx [%o1], %g1, %g7
4483 + cmp %g1, %g7
4484 + bne,pn %xcc, 2f
4485 + add %g7, %o0, %g7
4486 + mov %g7, %o0
4487 + retl
4488 + nop
4489 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4490 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4491 +
4492 .globl atomic64_sub_ret
4493 .type atomic64_sub_ret,#function
4494 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4495 BACKOFF_SETUP(%o2)
4496 1: ldx [%o1], %g1
4497 - sub %g1, %o0, %g7
4498 + subcc %g1, %o0, %g7
4499 +
4500 +#ifdef CONFIG_PAX_REFCOUNT
4501 + tvs %xcc, 6
4502 +#endif
4503 +
4504 casx [%o1], %g1, %g7
4505 cmp %g1, %g7
4506 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4507 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
4508 index 1b30bb3..b4a16c7 100644
4509 --- a/arch/sparc/lib/ksyms.c
4510 +++ b/arch/sparc/lib/ksyms.c
4511 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4512
4513 /* Atomic counter implementation. */
4514 EXPORT_SYMBOL(atomic_add);
4515 +EXPORT_SYMBOL(atomic_add_unchecked);
4516 EXPORT_SYMBOL(atomic_add_ret);
4517 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
4518 EXPORT_SYMBOL(atomic_sub);
4519 +EXPORT_SYMBOL(atomic_sub_unchecked);
4520 EXPORT_SYMBOL(atomic_sub_ret);
4521 EXPORT_SYMBOL(atomic64_add);
4522 +EXPORT_SYMBOL(atomic64_add_unchecked);
4523 EXPORT_SYMBOL(atomic64_add_ret);
4524 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4525 EXPORT_SYMBOL(atomic64_sub);
4526 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4527 EXPORT_SYMBOL(atomic64_sub_ret);
4528
4529 /* Atomic bit operations. */
4530 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
4531 index e3cda21..a68e4cb 100644
4532 --- a/arch/sparc/mm/Makefile
4533 +++ b/arch/sparc/mm/Makefile
4534 @@ -2,7 +2,7 @@
4535 #
4536
4537 asflags-y := -ansi
4538 -ccflags-y := -Werror
4539 +#ccflags-y := -Werror
4540
4541 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
4542 obj-y += fault_$(BITS).o
4543 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
4544 index aa1c1b1..f93e28f 100644
4545 --- a/arch/sparc/mm/fault_32.c
4546 +++ b/arch/sparc/mm/fault_32.c
4547 @@ -22,6 +22,9 @@
4548 #include <linux/interrupt.h>
4549 #include <linux/module.h>
4550 #include <linux/kdebug.h>
4551 +#include <linux/slab.h>
4552 +#include <linux/pagemap.h>
4553 +#include <linux/compiler.h>
4554
4555 #include <asm/system.h>
4556 #include <asm/page.h>
4557 @@ -209,6 +212,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
4558 return safe_compute_effective_address(regs, insn);
4559 }
4560
4561 +#ifdef CONFIG_PAX_PAGEEXEC
4562 +#ifdef CONFIG_PAX_DLRESOLVE
4563 +static void pax_emuplt_close(struct vm_area_struct *vma)
4564 +{
4565 + vma->vm_mm->call_dl_resolve = 0UL;
4566 +}
4567 +
4568 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4569 +{
4570 + unsigned int *kaddr;
4571 +
4572 + vmf->page = alloc_page(GFP_HIGHUSER);
4573 + if (!vmf->page)
4574 + return VM_FAULT_OOM;
4575 +
4576 + kaddr = kmap(vmf->page);
4577 + memset(kaddr, 0, PAGE_SIZE);
4578 + kaddr[0] = 0x9DE3BFA8U; /* save */
4579 + flush_dcache_page(vmf->page);
4580 + kunmap(vmf->page);
4581 + return VM_FAULT_MAJOR;
4582 +}
4583 +
4584 +static const struct vm_operations_struct pax_vm_ops = {
4585 + .close = pax_emuplt_close,
4586 + .fault = pax_emuplt_fault
4587 +};
4588 +
4589 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4590 +{
4591 + int ret;
4592 +
4593 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4594 + vma->vm_mm = current->mm;
4595 + vma->vm_start = addr;
4596 + vma->vm_end = addr + PAGE_SIZE;
4597 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4598 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4599 + vma->vm_ops = &pax_vm_ops;
4600 +
4601 + ret = insert_vm_struct(current->mm, vma);
4602 + if (ret)
4603 + return ret;
4604 +
4605 + ++current->mm->total_vm;
4606 + return 0;
4607 +}
4608 +#endif
4609 +
4610 +/*
4611 + * PaX: decide what to do with offenders (regs->pc = fault address)
4612 + *
4613 + * returns 1 when task should be killed
4614 + * 2 when patched PLT trampoline was detected
4615 + * 3 when unpatched PLT trampoline was detected
4616 + */
4617 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4618 +{
4619 +
4620 +#ifdef CONFIG_PAX_EMUPLT
4621 + int err;
4622 +
4623 + do { /* PaX: patched PLT emulation #1 */
4624 + unsigned int sethi1, sethi2, jmpl;
4625 +
4626 + err = get_user(sethi1, (unsigned int *)regs->pc);
4627 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4628 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4629 +
4630 + if (err)
4631 + break;
4632 +
4633 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4634 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4635 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4636 + {
4637 + unsigned int addr;
4638 +
4639 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4640 + addr = regs->u_regs[UREG_G1];
4641 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4642 + regs->pc = addr;
4643 + regs->npc = addr+4;
4644 + return 2;
4645 + }
4646 + } while (0);
4647 +
4648 + { /* PaX: patched PLT emulation #2 */
4649 + unsigned int ba;
4650 +
4651 + err = get_user(ba, (unsigned int *)regs->pc);
4652 +
4653 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4654 + unsigned int addr;
4655 +
4656 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4657 + regs->pc = addr;
4658 + regs->npc = addr+4;
4659 + return 2;
4660 + }
4661 + }
4662 +
4663 + do { /* PaX: patched PLT emulation #3 */
4664 + unsigned int sethi, jmpl, nop;
4665 +
4666 + err = get_user(sethi, (unsigned int *)regs->pc);
4667 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4668 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4669 +
4670 + if (err)
4671 + break;
4672 +
4673 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4674 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4675 + nop == 0x01000000U)
4676 + {
4677 + unsigned int addr;
4678 +
4679 + addr = (sethi & 0x003FFFFFU) << 10;
4680 + regs->u_regs[UREG_G1] = addr;
4681 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4682 + regs->pc = addr;
4683 + regs->npc = addr+4;
4684 + return 2;
4685 + }
4686 + } while (0);
4687 +
4688 + do { /* PaX: unpatched PLT emulation step 1 */
4689 + unsigned int sethi, ba, nop;
4690 +
4691 + err = get_user(sethi, (unsigned int *)regs->pc);
4692 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4693 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4694 +
4695 + if (err)
4696 + break;
4697 +
4698 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4699 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4700 + nop == 0x01000000U)
4701 + {
4702 + unsigned int addr, save, call;
4703 +
4704 + if ((ba & 0xFFC00000U) == 0x30800000U)
4705 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4706 + else
4707 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4708 +
4709 + err = get_user(save, (unsigned int *)addr);
4710 + err |= get_user(call, (unsigned int *)(addr+4));
4711 + err |= get_user(nop, (unsigned int *)(addr+8));
4712 + if (err)
4713 + break;
4714 +
4715 +#ifdef CONFIG_PAX_DLRESOLVE
4716 + if (save == 0x9DE3BFA8U &&
4717 + (call & 0xC0000000U) == 0x40000000U &&
4718 + nop == 0x01000000U)
4719 + {
4720 + struct vm_area_struct *vma;
4721 + unsigned long call_dl_resolve;
4722 +
4723 + down_read(&current->mm->mmap_sem);
4724 + call_dl_resolve = current->mm->call_dl_resolve;
4725 + up_read(&current->mm->mmap_sem);
4726 + if (likely(call_dl_resolve))
4727 + goto emulate;
4728 +
4729 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4730 +
4731 + down_write(&current->mm->mmap_sem);
4732 + if (current->mm->call_dl_resolve) {
4733 + call_dl_resolve = current->mm->call_dl_resolve;
4734 + up_write(&current->mm->mmap_sem);
4735 + if (vma)
4736 + kmem_cache_free(vm_area_cachep, vma);
4737 + goto emulate;
4738 + }
4739 +
4740 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4741 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4742 + up_write(&current->mm->mmap_sem);
4743 + if (vma)
4744 + kmem_cache_free(vm_area_cachep, vma);
4745 + return 1;
4746 + }
4747 +
4748 + if (pax_insert_vma(vma, call_dl_resolve)) {
4749 + up_write(&current->mm->mmap_sem);
4750 + kmem_cache_free(vm_area_cachep, vma);
4751 + return 1;
4752 + }
4753 +
4754 + current->mm->call_dl_resolve = call_dl_resolve;
4755 + up_write(&current->mm->mmap_sem);
4756 +
4757 +emulate:
4758 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4759 + regs->pc = call_dl_resolve;
4760 + regs->npc = addr+4;
4761 + return 3;
4762 + }
4763 +#endif
4764 +
4765 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4766 + if ((save & 0xFFC00000U) == 0x05000000U &&
4767 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4768 + nop == 0x01000000U)
4769 + {
4770 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4771 + regs->u_regs[UREG_G2] = addr + 4;
4772 + addr = (save & 0x003FFFFFU) << 10;
4773 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4774 + regs->pc = addr;
4775 + regs->npc = addr+4;
4776 + return 3;
4777 + }
4778 + }
4779 + } while (0);
4780 +
4781 + do { /* PaX: unpatched PLT emulation step 2 */
4782 + unsigned int save, call, nop;
4783 +
4784 + err = get_user(save, (unsigned int *)(regs->pc-4));
4785 + err |= get_user(call, (unsigned int *)regs->pc);
4786 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
4787 + if (err)
4788 + break;
4789 +
4790 + if (save == 0x9DE3BFA8U &&
4791 + (call & 0xC0000000U) == 0x40000000U &&
4792 + nop == 0x01000000U)
4793 + {
4794 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4795 +
4796 + regs->u_regs[UREG_RETPC] = regs->pc;
4797 + regs->pc = dl_resolve;
4798 + regs->npc = dl_resolve+4;
4799 + return 3;
4800 + }
4801 + } while (0);
4802 +#endif
4803 +
4804 + return 1;
4805 +}
4806 +
4807 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4808 +{
4809 + unsigned long i;
4810 +
4811 + printk(KERN_ERR "PAX: bytes at PC: ");
4812 + for (i = 0; i < 8; i++) {
4813 + unsigned int c;
4814 + if (get_user(c, (unsigned int *)pc+i))
4815 + printk(KERN_CONT "???????? ");
4816 + else
4817 + printk(KERN_CONT "%08x ", c);
4818 + }
4819 + printk("\n");
4820 +}
4821 +#endif
4822 +
4823 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4824 int text_fault)
4825 {
4826 @@ -281,6 +546,24 @@ good_area:
4827 if(!(vma->vm_flags & VM_WRITE))
4828 goto bad_area;
4829 } else {
4830 +
4831 +#ifdef CONFIG_PAX_PAGEEXEC
4832 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4833 + up_read(&mm->mmap_sem);
4834 + switch (pax_handle_fetch_fault(regs)) {
4835 +
4836 +#ifdef CONFIG_PAX_EMUPLT
4837 + case 2:
4838 + case 3:
4839 + return;
4840 +#endif
4841 +
4842 + }
4843 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4844 + do_group_exit(SIGKILL);
4845 + }
4846 +#endif
4847 +
4848 /* Allow reads even for write-only mappings */
4849 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4850 goto bad_area;
4851 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
4852 index 504c062..6fcb9c6 100644
4853 --- a/arch/sparc/mm/fault_64.c
4854 +++ b/arch/sparc/mm/fault_64.c
4855 @@ -21,6 +21,9 @@
4856 #include <linux/kprobes.h>
4857 #include <linux/kdebug.h>
4858 #include <linux/percpu.h>
4859 +#include <linux/slab.h>
4860 +#include <linux/pagemap.h>
4861 +#include <linux/compiler.h>
4862
4863 #include <asm/page.h>
4864 #include <asm/pgtable.h>
4865 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
4866 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4867 regs->tpc);
4868 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4869 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4870 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4871 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4872 dump_stack();
4873 unhandled_fault(regs->tpc, current, regs);
4874 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
4875 show_regs(regs);
4876 }
4877
4878 +#ifdef CONFIG_PAX_PAGEEXEC
4879 +#ifdef CONFIG_PAX_DLRESOLVE
4880 +static void pax_emuplt_close(struct vm_area_struct *vma)
4881 +{
4882 + vma->vm_mm->call_dl_resolve = 0UL;
4883 +}
4884 +
4885 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4886 +{
4887 + unsigned int *kaddr;
4888 +
4889 + vmf->page = alloc_page(GFP_HIGHUSER);
4890 + if (!vmf->page)
4891 + return VM_FAULT_OOM;
4892 +
4893 + kaddr = kmap(vmf->page);
4894 + memset(kaddr, 0, PAGE_SIZE);
4895 + kaddr[0] = 0x9DE3BFA8U; /* save */
4896 + flush_dcache_page(vmf->page);
4897 + kunmap(vmf->page);
4898 + return VM_FAULT_MAJOR;
4899 +}
4900 +
4901 +static const struct vm_operations_struct pax_vm_ops = {
4902 + .close = pax_emuplt_close,
4903 + .fault = pax_emuplt_fault
4904 +};
4905 +
4906 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4907 +{
4908 + int ret;
4909 +
4910 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4911 + vma->vm_mm = current->mm;
4912 + vma->vm_start = addr;
4913 + vma->vm_end = addr + PAGE_SIZE;
4914 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4915 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4916 + vma->vm_ops = &pax_vm_ops;
4917 +
4918 + ret = insert_vm_struct(current->mm, vma);
4919 + if (ret)
4920 + return ret;
4921 +
4922 + ++current->mm->total_vm;
4923 + return 0;
4924 +}
4925 +#endif
4926 +
4927 +/*
4928 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4929 + *
4930 + * returns 1 when task should be killed
4931 + * 2 when patched PLT trampoline was detected
4932 + * 3 when unpatched PLT trampoline was detected
4933 + */
4934 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4935 +{
4936 +
4937 +#ifdef CONFIG_PAX_EMUPLT
4938 + int err;
4939 +
4940 + do { /* PaX: patched PLT emulation #1 */
4941 + unsigned int sethi1, sethi2, jmpl;
4942 +
4943 + err = get_user(sethi1, (unsigned int *)regs->tpc);
4944 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4945 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4946 +
4947 + if (err)
4948 + break;
4949 +
4950 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4951 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4952 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4953 + {
4954 + unsigned long addr;
4955 +
4956 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4957 + addr = regs->u_regs[UREG_G1];
4958 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4959 +
4960 + if (test_thread_flag(TIF_32BIT))
4961 + addr &= 0xFFFFFFFFUL;
4962 +
4963 + regs->tpc = addr;
4964 + regs->tnpc = addr+4;
4965 + return 2;
4966 + }
4967 + } while (0);
4968 +
4969 + { /* PaX: patched PLT emulation #2 */
4970 + unsigned int ba;
4971 +
4972 + err = get_user(ba, (unsigned int *)regs->tpc);
4973 +
4974 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4975 + unsigned long addr;
4976 +
4977 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4978 +
4979 + if (test_thread_flag(TIF_32BIT))
4980 + addr &= 0xFFFFFFFFUL;
4981 +
4982 + regs->tpc = addr;
4983 + regs->tnpc = addr+4;
4984 + return 2;
4985 + }
4986 + }
4987 +
4988 + do { /* PaX: patched PLT emulation #3 */
4989 + unsigned int sethi, jmpl, nop;
4990 +
4991 + err = get_user(sethi, (unsigned int *)regs->tpc);
4992 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4993 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4994 +
4995 + if (err)
4996 + break;
4997 +
4998 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4999 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5000 + nop == 0x01000000U)
5001 + {
5002 + unsigned long addr;
5003 +
5004 + addr = (sethi & 0x003FFFFFU) << 10;
5005 + regs->u_regs[UREG_G1] = addr;
5006 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5007 +
5008 + if (test_thread_flag(TIF_32BIT))
5009 + addr &= 0xFFFFFFFFUL;
5010 +
5011 + regs->tpc = addr;
5012 + regs->tnpc = addr+4;
5013 + return 2;
5014 + }
5015 + } while (0);
5016 +
5017 + do { /* PaX: patched PLT emulation #4 */
5018 + unsigned int sethi, mov1, call, mov2;
5019 +
5020 + err = get_user(sethi, (unsigned int *)regs->tpc);
5021 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5022 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5023 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5024 +
5025 + if (err)
5026 + break;
5027 +
5028 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5029 + mov1 == 0x8210000FU &&
5030 + (call & 0xC0000000U) == 0x40000000U &&
5031 + mov2 == 0x9E100001U)
5032 + {
5033 + unsigned long addr;
5034 +
5035 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5036 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5037 +
5038 + if (test_thread_flag(TIF_32BIT))
5039 + addr &= 0xFFFFFFFFUL;
5040 +
5041 + regs->tpc = addr;
5042 + regs->tnpc = addr+4;
5043 + return 2;
5044 + }
5045 + } while (0);
5046 +
5047 + do { /* PaX: patched PLT emulation #5 */
5048 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5049 +
5050 + err = get_user(sethi, (unsigned int *)regs->tpc);
5051 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5052 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5053 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5054 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5055 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5056 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5057 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5058 +
5059 + if (err)
5060 + break;
5061 +
5062 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5063 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5064 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5065 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5066 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5067 + sllx == 0x83287020U &&
5068 + jmpl == 0x81C04005U &&
5069 + nop == 0x01000000U)
5070 + {
5071 + unsigned long addr;
5072 +
5073 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5074 + regs->u_regs[UREG_G1] <<= 32;
5075 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5076 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5077 + regs->tpc = addr;
5078 + regs->tnpc = addr+4;
5079 + return 2;
5080 + }
5081 + } while (0);
5082 +
5083 + do { /* PaX: patched PLT emulation #6 */
5084 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5085 +
5086 + err = get_user(sethi, (unsigned int *)regs->tpc);
5087 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5088 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5089 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5090 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5091 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5092 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5093 +
5094 + if (err)
5095 + break;
5096 +
5097 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5098 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5099 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5100 + sllx == 0x83287020U &&
5101 + (or & 0xFFFFE000U) == 0x8A116000U &&
5102 + jmpl == 0x81C04005U &&
5103 + nop == 0x01000000U)
5104 + {
5105 + unsigned long addr;
5106 +
5107 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5108 + regs->u_regs[UREG_G1] <<= 32;
5109 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5110 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5111 + regs->tpc = addr;
5112 + regs->tnpc = addr+4;
5113 + return 2;
5114 + }
5115 + } while (0);
5116 +
5117 + do { /* PaX: unpatched PLT emulation step 1 */
5118 + unsigned int sethi, ba, nop;
5119 +
5120 + err = get_user(sethi, (unsigned int *)regs->tpc);
5121 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5122 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5123 +
5124 + if (err)
5125 + break;
5126 +
5127 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5128 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5129 + nop == 0x01000000U)
5130 + {
5131 + unsigned long addr;
5132 + unsigned int save, call;
5133 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5134 +
5135 + if ((ba & 0xFFC00000U) == 0x30800000U)
5136 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5137 + else
5138 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5139 +
5140 + if (test_thread_flag(TIF_32BIT))
5141 + addr &= 0xFFFFFFFFUL;
5142 +
5143 + err = get_user(save, (unsigned int *)addr);
5144 + err |= get_user(call, (unsigned int *)(addr+4));
5145 + err |= get_user(nop, (unsigned int *)(addr+8));
5146 + if (err)
5147 + break;
5148 +
5149 +#ifdef CONFIG_PAX_DLRESOLVE
5150 + if (save == 0x9DE3BFA8U &&
5151 + (call & 0xC0000000U) == 0x40000000U &&
5152 + nop == 0x01000000U)
5153 + {
5154 + struct vm_area_struct *vma;
5155 + unsigned long call_dl_resolve;
5156 +
5157 + down_read(&current->mm->mmap_sem);
5158 + call_dl_resolve = current->mm->call_dl_resolve;
5159 + up_read(&current->mm->mmap_sem);
5160 + if (likely(call_dl_resolve))
5161 + goto emulate;
5162 +
5163 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5164 +
5165 + down_write(&current->mm->mmap_sem);
5166 + if (current->mm->call_dl_resolve) {
5167 + call_dl_resolve = current->mm->call_dl_resolve;
5168 + up_write(&current->mm->mmap_sem);
5169 + if (vma)
5170 + kmem_cache_free(vm_area_cachep, vma);
5171 + goto emulate;
5172 + }
5173 +
5174 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5175 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5176 + up_write(&current->mm->mmap_sem);
5177 + if (vma)
5178 + kmem_cache_free(vm_area_cachep, vma);
5179 + return 1;
5180 + }
5181 +
5182 + if (pax_insert_vma(vma, call_dl_resolve)) {
5183 + up_write(&current->mm->mmap_sem);
5184 + kmem_cache_free(vm_area_cachep, vma);
5185 + return 1;
5186 + }
5187 +
5188 + current->mm->call_dl_resolve = call_dl_resolve;
5189 + up_write(&current->mm->mmap_sem);
5190 +
5191 +emulate:
5192 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5193 + regs->tpc = call_dl_resolve;
5194 + regs->tnpc = addr+4;
5195 + return 3;
5196 + }
5197 +#endif
5198 +
5199 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5200 + if ((save & 0xFFC00000U) == 0x05000000U &&
5201 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5202 + nop == 0x01000000U)
5203 + {
5204 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5205 + regs->u_regs[UREG_G2] = addr + 4;
5206 + addr = (save & 0x003FFFFFU) << 10;
5207 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5208 +
5209 + if (test_thread_flag(TIF_32BIT))
5210 + addr &= 0xFFFFFFFFUL;
5211 +
5212 + regs->tpc = addr;
5213 + regs->tnpc = addr+4;
5214 + return 3;
5215 + }
5216 +
5217 + /* PaX: 64-bit PLT stub */
5218 + err = get_user(sethi1, (unsigned int *)addr);
5219 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5220 + err |= get_user(or1, (unsigned int *)(addr+8));
5221 + err |= get_user(or2, (unsigned int *)(addr+12));
5222 + err |= get_user(sllx, (unsigned int *)(addr+16));
5223 + err |= get_user(add, (unsigned int *)(addr+20));
5224 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5225 + err |= get_user(nop, (unsigned int *)(addr+28));
5226 + if (err)
5227 + break;
5228 +
5229 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5230 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5231 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5232 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5233 + sllx == 0x89293020U &&
5234 + add == 0x8A010005U &&
5235 + jmpl == 0x89C14000U &&
5236 + nop == 0x01000000U)
5237 + {
5238 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5239 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5240 + regs->u_regs[UREG_G4] <<= 32;
5241 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5242 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5243 + regs->u_regs[UREG_G4] = addr + 24;
5244 + addr = regs->u_regs[UREG_G5];
5245 + regs->tpc = addr;
5246 + regs->tnpc = addr+4;
5247 + return 3;
5248 + }
5249 + }
5250 + } while (0);
5251 +
5252 +#ifdef CONFIG_PAX_DLRESOLVE
5253 + do { /* PaX: unpatched PLT emulation step 2 */
5254 + unsigned int save, call, nop;
5255 +
5256 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5257 + err |= get_user(call, (unsigned int *)regs->tpc);
5258 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5259 + if (err)
5260 + break;
5261 +
5262 + if (save == 0x9DE3BFA8U &&
5263 + (call & 0xC0000000U) == 0x40000000U &&
5264 + nop == 0x01000000U)
5265 + {
5266 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5267 +
5268 + if (test_thread_flag(TIF_32BIT))
5269 + dl_resolve &= 0xFFFFFFFFUL;
5270 +
5271 + regs->u_regs[UREG_RETPC] = regs->tpc;
5272 + regs->tpc = dl_resolve;
5273 + regs->tnpc = dl_resolve+4;
5274 + return 3;
5275 + }
5276 + } while (0);
5277 +#endif
5278 +
5279 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5280 + unsigned int sethi, ba, nop;
5281 +
5282 + err = get_user(sethi, (unsigned int *)regs->tpc);
5283 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5284 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5285 +
5286 + if (err)
5287 + break;
5288 +
5289 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5290 + (ba & 0xFFF00000U) == 0x30600000U &&
5291 + nop == 0x01000000U)
5292 + {
5293 + unsigned long addr;
5294 +
5295 + addr = (sethi & 0x003FFFFFU) << 10;
5296 + regs->u_regs[UREG_G1] = addr;
5297 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5298 +
5299 + if (test_thread_flag(TIF_32BIT))
5300 + addr &= 0xFFFFFFFFUL;
5301 +
5302 + regs->tpc = addr;
5303 + regs->tnpc = addr+4;
5304 + return 2;
5305 + }
5306 + } while (0);
5307 +
5308 +#endif
5309 +
5310 + return 1;
5311 +}
5312 +
5313 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5314 +{
5315 + unsigned long i;
5316 +
5317 + printk(KERN_ERR "PAX: bytes at PC: ");
5318 + for (i = 0; i < 8; i++) {
5319 + unsigned int c;
5320 + if (get_user(c, (unsigned int *)pc+i))
5321 + printk(KERN_CONT "???????? ");
5322 + else
5323 + printk(KERN_CONT "%08x ", c);
5324 + }
5325 + printk("\n");
5326 +}
5327 +#endif
5328 +
5329 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5330 {
5331 struct mm_struct *mm = current->mm;
5332 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5333 if (!vma)
5334 goto bad_area;
5335
5336 +#ifdef CONFIG_PAX_PAGEEXEC
5337 + /* PaX: detect ITLB misses on non-exec pages */
5338 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5339 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5340 + {
5341 + if (address != regs->tpc)
5342 + goto good_area;
5343 +
5344 + up_read(&mm->mmap_sem);
5345 + switch (pax_handle_fetch_fault(regs)) {
5346 +
5347 +#ifdef CONFIG_PAX_EMUPLT
5348 + case 2:
5349 + case 3:
5350 + return;
5351 +#endif
5352 +
5353 + }
5354 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5355 + do_group_exit(SIGKILL);
5356 + }
5357 +#endif
5358 +
5359 /* Pure DTLB misses do not tell us whether the fault causing
5360 * load/store/atomic was a write or not, it only says that there
5361 * was no match. So in such a case we (carefully) read the
5362 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
5363 index f4e9764..5682724 100644
5364 --- a/arch/sparc/mm/hugetlbpage.c
5365 +++ b/arch/sparc/mm/hugetlbpage.c
5366 @@ -68,7 +68,7 @@ full_search:
5367 }
5368 return -ENOMEM;
5369 }
5370 - if (likely(!vma || addr + len <= vma->vm_start)) {
5371 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5372 /*
5373 * Remember the place where we stopped the search:
5374 */
5375 @@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5376 /* make sure it can fit in the remaining address space */
5377 if (likely(addr > len)) {
5378 vma = find_vma(mm, addr-len);
5379 - if (!vma || addr <= vma->vm_start) {
5380 + if (check_heap_stack_gap(vma, addr - len, len)) {
5381 /* remember the address as a hint for next time */
5382 return (mm->free_area_cache = addr-len);
5383 }
5384 @@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5385 if (unlikely(mm->mmap_base < len))
5386 goto bottomup;
5387
5388 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5389 + addr = mm->mmap_base - len;
5390
5391 do {
5392 + addr &= HPAGE_MASK;
5393 /*
5394 * Lookup failure means no vma is above this address,
5395 * else if new region fits below vma->vm_start,
5396 * return with success:
5397 */
5398 vma = find_vma(mm, addr);
5399 - if (likely(!vma || addr+len <= vma->vm_start)) {
5400 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5401 /* remember the address as a hint for next time */
5402 return (mm->free_area_cache = addr);
5403 }
5404 @@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5405 mm->cached_hole_size = vma->vm_start - addr;
5406
5407 /* try just below the current vma->vm_start */
5408 - addr = (vma->vm_start-len) & HPAGE_MASK;
5409 - } while (likely(len < vma->vm_start));
5410 + addr = skip_heap_stack_gap(vma, len);
5411 + } while (!IS_ERR_VALUE(addr));
5412
5413 bottomup:
5414 /*
5415 @@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
5416 if (addr) {
5417 addr = ALIGN(addr, HPAGE_SIZE);
5418 vma = find_vma(mm, addr);
5419 - if (task_size - len >= addr &&
5420 - (!vma || addr + len <= vma->vm_start))
5421 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5422 return addr;
5423 }
5424 if (mm->get_unmapped_area == arch_get_unmapped_area)
5425 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
5426 index 7b00de6..78239f4 100644
5427 --- a/arch/sparc/mm/init_32.c
5428 +++ b/arch/sparc/mm/init_32.c
5429 @@ -316,6 +316,9 @@ extern void device_scan(void);
5430 pgprot_t PAGE_SHARED __read_mostly;
5431 EXPORT_SYMBOL(PAGE_SHARED);
5432
5433 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5434 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5435 +
5436 void __init paging_init(void)
5437 {
5438 switch(sparc_cpu_model) {
5439 @@ -344,17 +347,17 @@ void __init paging_init(void)
5440
5441 /* Initialize the protection map with non-constant, MMU dependent values. */
5442 protection_map[0] = PAGE_NONE;
5443 - protection_map[1] = PAGE_READONLY;
5444 - protection_map[2] = PAGE_COPY;
5445 - protection_map[3] = PAGE_COPY;
5446 + protection_map[1] = PAGE_READONLY_NOEXEC;
5447 + protection_map[2] = PAGE_COPY_NOEXEC;
5448 + protection_map[3] = PAGE_COPY_NOEXEC;
5449 protection_map[4] = PAGE_READONLY;
5450 protection_map[5] = PAGE_READONLY;
5451 protection_map[6] = PAGE_COPY;
5452 protection_map[7] = PAGE_COPY;
5453 protection_map[8] = PAGE_NONE;
5454 - protection_map[9] = PAGE_READONLY;
5455 - protection_map[10] = PAGE_SHARED;
5456 - protection_map[11] = PAGE_SHARED;
5457 + protection_map[9] = PAGE_READONLY_NOEXEC;
5458 + protection_map[10] = PAGE_SHARED_NOEXEC;
5459 + protection_map[11] = PAGE_SHARED_NOEXEC;
5460 protection_map[12] = PAGE_READONLY;
5461 protection_map[13] = PAGE_READONLY;
5462 protection_map[14] = PAGE_SHARED;
5463 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
5464 index cbef74e..c38fead 100644
5465 --- a/arch/sparc/mm/srmmu.c
5466 +++ b/arch/sparc/mm/srmmu.c
5467 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5468 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5469 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5470 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5471 +
5472 +#ifdef CONFIG_PAX_PAGEEXEC
5473 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5474 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5475 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5476 +#endif
5477 +
5478 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5479 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5480
5481 diff --git a/arch/um/Makefile b/arch/um/Makefile
5482 index c0f712c..3a5c4c9 100644
5483 --- a/arch/um/Makefile
5484 +++ b/arch/um/Makefile
5485 @@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
5486 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
5487 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
5488
5489 +ifdef CONSTIFY_PLUGIN
5490 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5491 +endif
5492 +
5493 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
5494
5495 #This will adjust *FLAGS accordingly to the platform.
5496 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
5497 index 6c03acd..a5e0215 100644
5498 --- a/arch/um/include/asm/kmap_types.h
5499 +++ b/arch/um/include/asm/kmap_types.h
5500 @@ -23,6 +23,7 @@ enum km_type {
5501 KM_IRQ1,
5502 KM_SOFTIRQ0,
5503 KM_SOFTIRQ1,
5504 + KM_CLEARPAGE,
5505 KM_TYPE_NR
5506 };
5507
5508 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
5509 index 4cc9b6c..02e5029 100644
5510 --- a/arch/um/include/asm/page.h
5511 +++ b/arch/um/include/asm/page.h
5512 @@ -14,6 +14,9 @@
5513 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5514 #define PAGE_MASK (~(PAGE_SIZE-1))
5515
5516 +#define ktla_ktva(addr) (addr)
5517 +#define ktva_ktla(addr) (addr)
5518 +
5519 #ifndef __ASSEMBLY__
5520
5521 struct page;
5522 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
5523 index 21c1ae7..4640aaa 100644
5524 --- a/arch/um/kernel/process.c
5525 +++ b/arch/um/kernel/process.c
5526 @@ -404,22 +404,6 @@ int singlestepping(void * t)
5527 return 2;
5528 }
5529
5530 -/*
5531 - * Only x86 and x86_64 have an arch_align_stack().
5532 - * All other arches have "#define arch_align_stack(x) (x)"
5533 - * in their asm/system.h
5534 - * As this is included in UML from asm-um/system-generic.h,
5535 - * we can use it to behave as the subarch does.
5536 - */
5537 -#ifndef arch_align_stack
5538 -unsigned long arch_align_stack(unsigned long sp)
5539 -{
5540 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5541 - sp -= get_random_int() % 8192;
5542 - return sp & ~0xf;
5543 -}
5544 -#endif
5545 -
5546 unsigned long get_wchan(struct task_struct *p)
5547 {
5548 unsigned long stack_page, sp, ip;
5549 diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
5550 index d1b93c4..ae1b7fd 100644
5551 --- a/arch/um/sys-i386/shared/sysdep/system.h
5552 +++ b/arch/um/sys-i386/shared/sysdep/system.h
5553 @@ -17,7 +17,7 @@
5554 # define AT_VECTOR_SIZE_ARCH 1
5555 #endif
5556
5557 -extern unsigned long arch_align_stack(unsigned long sp);
5558 +#define arch_align_stack(x) ((x) & ~0xfUL)
5559
5560 void default_idle(void);
5561
5562 diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
5563 index 70ca357..728d1cc 100644
5564 --- a/arch/um/sys-i386/syscalls.c
5565 +++ b/arch/um/sys-i386/syscalls.c
5566 @@ -11,6 +11,21 @@
5567 #include "asm/uaccess.h"
5568 #include "asm/unistd.h"
5569
5570 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5571 +{
5572 + unsigned long pax_task_size = TASK_SIZE;
5573 +
5574 +#ifdef CONFIG_PAX_SEGMEXEC
5575 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5576 + pax_task_size = SEGMEXEC_TASK_SIZE;
5577 +#endif
5578 +
5579 + if (len > pax_task_size || addr > pax_task_size - len)
5580 + return -EINVAL;
5581 +
5582 + return 0;
5583 +}
5584 +
5585 /*
5586 * The prototype on i386 is:
5587 *
5588 diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
5589 index d1b93c4..ae1b7fd 100644
5590 --- a/arch/um/sys-x86_64/shared/sysdep/system.h
5591 +++ b/arch/um/sys-x86_64/shared/sysdep/system.h
5592 @@ -17,7 +17,7 @@
5593 # define AT_VECTOR_SIZE_ARCH 1
5594 #endif
5595
5596 -extern unsigned long arch_align_stack(unsigned long sp);
5597 +#define arch_align_stack(x) ((x) & ~0xfUL)
5598
5599 void default_idle(void);
5600
5601 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
5602 index 6a47bb2..dc9a868 100644
5603 --- a/arch/x86/Kconfig
5604 +++ b/arch/x86/Kconfig
5605 @@ -236,7 +236,7 @@ config X86_HT
5606
5607 config X86_32_LAZY_GS
5608 def_bool y
5609 - depends on X86_32 && !CC_STACKPROTECTOR
5610 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
5611
5612 config ARCH_HWEIGHT_CFLAGS
5613 string
5614 @@ -1019,7 +1019,7 @@ choice
5615
5616 config NOHIGHMEM
5617 bool "off"
5618 - depends on !X86_NUMAQ
5619 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5620 ---help---
5621 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
5622 However, the address space of 32-bit x86 processors is only 4
5623 @@ -1056,7 +1056,7 @@ config NOHIGHMEM
5624
5625 config HIGHMEM4G
5626 bool "4GB"
5627 - depends on !X86_NUMAQ
5628 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
5629 ---help---
5630 Select this if you have a 32-bit processor and between 1 and 4
5631 gigabytes of physical RAM.
5632 @@ -1110,7 +1110,7 @@ config PAGE_OFFSET
5633 hex
5634 default 0xB0000000 if VMSPLIT_3G_OPT
5635 default 0x80000000 if VMSPLIT_2G
5636 - default 0x78000000 if VMSPLIT_2G_OPT
5637 + default 0x70000000 if VMSPLIT_2G_OPT
5638 default 0x40000000 if VMSPLIT_1G
5639 default 0xC0000000
5640 depends on X86_32
5641 @@ -1484,6 +1484,7 @@ config SECCOMP
5642
5643 config CC_STACKPROTECTOR
5644 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
5645 + depends on X86_64 || !PAX_MEMORY_UDEREF
5646 ---help---
5647 This option turns on the -fstack-protector GCC feature. This
5648 feature puts, at the beginning of functions, a canary value on
5649 @@ -1541,6 +1542,7 @@ config KEXEC_JUMP
5650 config PHYSICAL_START
5651 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
5652 default "0x1000000"
5653 + range 0x400000 0x40000000
5654 ---help---
5655 This gives the physical address where the kernel is loaded.
5656
5657 @@ -1604,6 +1606,7 @@ config X86_NEED_RELOCS
5658 config PHYSICAL_ALIGN
5659 hex "Alignment value to which kernel should be aligned" if X86_32
5660 default "0x1000000"
5661 + range 0x400000 0x1000000 if PAX_KERNEXEC
5662 range 0x2000 0x1000000
5663 ---help---
5664 This value puts the alignment restrictions on physical address
5665 @@ -1635,9 +1638,10 @@ config HOTPLUG_CPU
5666 Say N if you want to disable CPU hotplug.
5667
5668 config COMPAT_VDSO
5669 - def_bool y
5670 + def_bool n
5671 prompt "Compat VDSO support"
5672 depends on X86_32 || IA32_EMULATION
5673 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
5674 ---help---
5675 Map the 32-bit VDSO to the predictable old-style address too.
5676
5677 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
5678 index e3ca7e0..b30b28a 100644
5679 --- a/arch/x86/Kconfig.cpu
5680 +++ b/arch/x86/Kconfig.cpu
5681 @@ -341,7 +341,7 @@ config X86_PPRO_FENCE
5682
5683 config X86_F00F_BUG
5684 def_bool y
5685 - depends on M586MMX || M586TSC || M586 || M486 || M386
5686 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
5687
5688 config X86_INVD_BUG
5689 def_bool y
5690 @@ -365,7 +365,7 @@ config X86_POPAD_OK
5691
5692 config X86_ALIGNMENT_16
5693 def_bool y
5694 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5695 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
5696
5697 config X86_INTEL_USERCOPY
5698 def_bool y
5699 @@ -411,7 +411,7 @@ config X86_CMPXCHG64
5700 # generates cmov.
5701 config X86_CMOV
5702 def_bool y
5703 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5704 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
5705
5706 config X86_MINIMUM_CPU_FAMILY
5707 int
5708 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
5709 index c0f8a5c..6404f61 100644
5710 --- a/arch/x86/Kconfig.debug
5711 +++ b/arch/x86/Kconfig.debug
5712 @@ -81,7 +81,7 @@ config X86_PTDUMP
5713 config DEBUG_RODATA
5714 bool "Write protect kernel read-only data structures"
5715 default y
5716 - depends on DEBUG_KERNEL
5717 + depends on DEBUG_KERNEL && BROKEN
5718 ---help---
5719 Mark the kernel read-only data as write-protected in the pagetables,
5720 in order to catch accidental (and incorrect) writes to such const
5721 @@ -99,7 +99,7 @@ config DEBUG_RODATA_TEST
5722
5723 config DEBUG_SET_MODULE_RONX
5724 bool "Set loadable kernel module data as NX and text as RO"
5725 - depends on MODULES
5726 + depends on MODULES && BROKEN
5727 ---help---
5728 This option helps catch unintended modifications to loadable
5729 kernel module's text and read-only data. It also prevents execution
5730 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
5731 index b02e509..2631e48 100644
5732 --- a/arch/x86/Makefile
5733 +++ b/arch/x86/Makefile
5734 @@ -46,6 +46,7 @@ else
5735 UTS_MACHINE := x86_64
5736 CHECKFLAGS += -D__x86_64__ -m64
5737
5738 + biarch := $(call cc-option,-m64)
5739 KBUILD_AFLAGS += -m64
5740 KBUILD_CFLAGS += -m64
5741
5742 @@ -195,3 +196,12 @@ define archhelp
5743 echo ' FDARGS="..." arguments for the booted kernel'
5744 echo ' FDINITRD=file initrd for the booted kernel'
5745 endef
5746 +
5747 +define OLD_LD
5748 +
5749 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
5750 +*** Please upgrade your binutils to 2.18 or newer
5751 +endef
5752 +
5753 +archprepare:
5754 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
5755 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
5756 index 95365a8..52f857b 100644
5757 --- a/arch/x86/boot/Makefile
5758 +++ b/arch/x86/boot/Makefile
5759 @@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
5760 $(call cc-option, -fno-stack-protector) \
5761 $(call cc-option, -mpreferred-stack-boundary=2)
5762 KBUILD_CFLAGS += $(call cc-option, -m32)
5763 +ifdef CONSTIFY_PLUGIN
5764 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5765 +endif
5766 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5767 GCOV_PROFILE := n
5768
5769 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
5770 index 878e4b9..20537ab 100644
5771 --- a/arch/x86/boot/bitops.h
5772 +++ b/arch/x86/boot/bitops.h
5773 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5774 u8 v;
5775 const u32 *p = (const u32 *)addr;
5776
5777 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5778 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5779 return v;
5780 }
5781
5782 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
5783
5784 static inline void set_bit(int nr, void *addr)
5785 {
5786 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5787 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5788 }
5789
5790 #endif /* BOOT_BITOPS_H */
5791 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
5792 index c7093bd..d4247ffe0 100644
5793 --- a/arch/x86/boot/boot.h
5794 +++ b/arch/x86/boot/boot.h
5795 @@ -85,7 +85,7 @@ static inline void io_delay(void)
5796 static inline u16 ds(void)
5797 {
5798 u16 seg;
5799 - asm("movw %%ds,%0" : "=rm" (seg));
5800 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5801 return seg;
5802 }
5803
5804 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
5805 static inline int memcmp(const void *s1, const void *s2, size_t len)
5806 {
5807 u8 diff;
5808 - asm("repe; cmpsb; setnz %0"
5809 + asm volatile("repe; cmpsb; setnz %0"
5810 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5811 return diff;
5812 }
5813 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
5814 index 09664ef..edc5d03 100644
5815 --- a/arch/x86/boot/compressed/Makefile
5816 +++ b/arch/x86/boot/compressed/Makefile
5817 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
5818 KBUILD_CFLAGS += $(cflags-y)
5819 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5820 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5821 +ifdef CONSTIFY_PLUGIN
5822 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5823 +endif
5824
5825 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5826 GCOV_PROFILE := n
5827 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
5828 index 67a655a..b924059 100644
5829 --- a/arch/x86/boot/compressed/head_32.S
5830 +++ b/arch/x86/boot/compressed/head_32.S
5831 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5832 notl %eax
5833 andl %eax, %ebx
5834 #else
5835 - movl $LOAD_PHYSICAL_ADDR, %ebx
5836 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5837 #endif
5838
5839 /* Target address to relocate to for decompression */
5840 @@ -162,7 +162,7 @@ relocated:
5841 * and where it was actually loaded.
5842 */
5843 movl %ebp, %ebx
5844 - subl $LOAD_PHYSICAL_ADDR, %ebx
5845 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5846 jz 2f /* Nothing to be done if loaded at compiled addr. */
5847 /*
5848 * Process relocations.
5849 @@ -170,8 +170,7 @@ relocated:
5850
5851 1: subl $4, %edi
5852 movl (%edi), %ecx
5853 - testl %ecx, %ecx
5854 - jz 2f
5855 + jecxz 2f
5856 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5857 jmp 1b
5858 2:
5859 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
5860 index 35af09d..99c9676 100644
5861 --- a/arch/x86/boot/compressed/head_64.S
5862 +++ b/arch/x86/boot/compressed/head_64.S
5863 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5864 notl %eax
5865 andl %eax, %ebx
5866 #else
5867 - movl $LOAD_PHYSICAL_ADDR, %ebx
5868 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5869 #endif
5870
5871 /* Target address to relocate to for decompression */
5872 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5873 notq %rax
5874 andq %rax, %rbp
5875 #else
5876 - movq $LOAD_PHYSICAL_ADDR, %rbp
5877 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5878 #endif
5879
5880 /* Target address to relocate to for decompression */
5881 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
5882 index 3a19d04..7c1d55a 100644
5883 --- a/arch/x86/boot/compressed/misc.c
5884 +++ b/arch/x86/boot/compressed/misc.c
5885 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
5886 case PT_LOAD:
5887 #ifdef CONFIG_RELOCATABLE
5888 dest = output;
5889 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5890 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5891 #else
5892 dest = (void *)(phdr->p_paddr);
5893 #endif
5894 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
5895 error("Destination address too large");
5896 #endif
5897 #ifndef CONFIG_RELOCATABLE
5898 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5899 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5900 error("Wrong destination address");
5901 #endif
5902
5903 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
5904 index 89bbf4e..869908e 100644
5905 --- a/arch/x86/boot/compressed/relocs.c
5906 +++ b/arch/x86/boot/compressed/relocs.c
5907 @@ -13,8 +13,11 @@
5908
5909 static void die(char *fmt, ...);
5910
5911 +#include "../../../../include/generated/autoconf.h"
5912 +
5913 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5914 static Elf32_Ehdr ehdr;
5915 +static Elf32_Phdr *phdr;
5916 static unsigned long reloc_count, reloc_idx;
5917 static unsigned long *relocs;
5918
5919 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5920 }
5921 }
5922
5923 +static void read_phdrs(FILE *fp)
5924 +{
5925 + unsigned int i;
5926 +
5927 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5928 + if (!phdr) {
5929 + die("Unable to allocate %d program headers\n",
5930 + ehdr.e_phnum);
5931 + }
5932 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5933 + die("Seek to %d failed: %s\n",
5934 + ehdr.e_phoff, strerror(errno));
5935 + }
5936 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5937 + die("Cannot read ELF program headers: %s\n",
5938 + strerror(errno));
5939 + }
5940 + for(i = 0; i < ehdr.e_phnum; i++) {
5941 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5942 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5943 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5944 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5945 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5946 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5947 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5948 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5949 + }
5950 +
5951 +}
5952 +
5953 static void read_shdrs(FILE *fp)
5954 {
5955 - int i;
5956 + unsigned int i;
5957 Elf32_Shdr shdr;
5958
5959 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5960 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5961
5962 static void read_strtabs(FILE *fp)
5963 {
5964 - int i;
5965 + unsigned int i;
5966 for (i = 0; i < ehdr.e_shnum; i++) {
5967 struct section *sec = &secs[i];
5968 if (sec->shdr.sh_type != SHT_STRTAB) {
5969 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5970
5971 static void read_symtabs(FILE *fp)
5972 {
5973 - int i,j;
5974 + unsigned int i,j;
5975 for (i = 0; i < ehdr.e_shnum; i++) {
5976 struct section *sec = &secs[i];
5977 if (sec->shdr.sh_type != SHT_SYMTAB) {
5978 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5979
5980 static void read_relocs(FILE *fp)
5981 {
5982 - int i,j;
5983 + unsigned int i,j;
5984 + uint32_t base;
5985 +
5986 for (i = 0; i < ehdr.e_shnum; i++) {
5987 struct section *sec = &secs[i];
5988 if (sec->shdr.sh_type != SHT_REL) {
5989 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5990 die("Cannot read symbol table: %s\n",
5991 strerror(errno));
5992 }
5993 + base = 0;
5994 + for (j = 0; j < ehdr.e_phnum; j++) {
5995 + if (phdr[j].p_type != PT_LOAD )
5996 + continue;
5997 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5998 + continue;
5999 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6000 + break;
6001 + }
6002 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6003 Elf32_Rel *rel = &sec->reltab[j];
6004 - rel->r_offset = elf32_to_cpu(rel->r_offset);
6005 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6006 rel->r_info = elf32_to_cpu(rel->r_info);
6007 }
6008 }
6009 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
6010
6011 static void print_absolute_symbols(void)
6012 {
6013 - int i;
6014 + unsigned int i;
6015 printf("Absolute symbols\n");
6016 printf(" Num: Value Size Type Bind Visibility Name\n");
6017 for (i = 0; i < ehdr.e_shnum; i++) {
6018 struct section *sec = &secs[i];
6019 char *sym_strtab;
6020 Elf32_Sym *sh_symtab;
6021 - int j;
6022 + unsigned int j;
6023
6024 if (sec->shdr.sh_type != SHT_SYMTAB) {
6025 continue;
6026 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
6027
6028 static void print_absolute_relocs(void)
6029 {
6030 - int i, printed = 0;
6031 + unsigned int i, printed = 0;
6032
6033 for (i = 0; i < ehdr.e_shnum; i++) {
6034 struct section *sec = &secs[i];
6035 struct section *sec_applies, *sec_symtab;
6036 char *sym_strtab;
6037 Elf32_Sym *sh_symtab;
6038 - int j;
6039 + unsigned int j;
6040 if (sec->shdr.sh_type != SHT_REL) {
6041 continue;
6042 }
6043 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
6044
6045 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6046 {
6047 - int i;
6048 + unsigned int i;
6049 /* Walk through the relocations */
6050 for (i = 0; i < ehdr.e_shnum; i++) {
6051 char *sym_strtab;
6052 Elf32_Sym *sh_symtab;
6053 struct section *sec_applies, *sec_symtab;
6054 - int j;
6055 + unsigned int j;
6056 struct section *sec = &secs[i];
6057
6058 if (sec->shdr.sh_type != SHT_REL) {
6059 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6060 !is_rel_reloc(sym_name(sym_strtab, sym))) {
6061 continue;
6062 }
6063 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6064 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6065 + continue;
6066 +
6067 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6068 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6069 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6070 + continue;
6071 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6072 + continue;
6073 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6074 + continue;
6075 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6076 + continue;
6077 +#endif
6078 +
6079 switch (r_type) {
6080 case R_386_NONE:
6081 case R_386_PC32:
6082 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
6083
6084 static void emit_relocs(int as_text)
6085 {
6086 - int i;
6087 + unsigned int i;
6088 /* Count how many relocations I have and allocate space for them. */
6089 reloc_count = 0;
6090 walk_relocs(count_reloc);
6091 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
6092 fname, strerror(errno));
6093 }
6094 read_ehdr(fp);
6095 + read_phdrs(fp);
6096 read_shdrs(fp);
6097 read_strtabs(fp);
6098 read_symtabs(fp);
6099 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
6100 index 4d3ff03..e4972ff 100644
6101 --- a/arch/x86/boot/cpucheck.c
6102 +++ b/arch/x86/boot/cpucheck.c
6103 @@ -74,7 +74,7 @@ static int has_fpu(void)
6104 u16 fcw = -1, fsw = -1;
6105 u32 cr0;
6106
6107 - asm("movl %%cr0,%0" : "=r" (cr0));
6108 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6109 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6110 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6111 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6112 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6113 {
6114 u32 f0, f1;
6115
6116 - asm("pushfl ; "
6117 + asm volatile("pushfl ; "
6118 "pushfl ; "
6119 "popl %0 ; "
6120 "movl %0,%1 ; "
6121 @@ -115,7 +115,7 @@ static void get_flags(void)
6122 set_bit(X86_FEATURE_FPU, cpu.flags);
6123
6124 if (has_eflag(X86_EFLAGS_ID)) {
6125 - asm("cpuid"
6126 + asm volatile("cpuid"
6127 : "=a" (max_intel_level),
6128 "=b" (cpu_vendor[0]),
6129 "=d" (cpu_vendor[1]),
6130 @@ -124,7 +124,7 @@ static void get_flags(void)
6131
6132 if (max_intel_level >= 0x00000001 &&
6133 max_intel_level <= 0x0000ffff) {
6134 - asm("cpuid"
6135 + asm volatile("cpuid"
6136 : "=a" (tfms),
6137 "=c" (cpu.flags[4]),
6138 "=d" (cpu.flags[0])
6139 @@ -136,7 +136,7 @@ static void get_flags(void)
6140 cpu.model += ((tfms >> 16) & 0xf) << 4;
6141 }
6142
6143 - asm("cpuid"
6144 + asm volatile("cpuid"
6145 : "=a" (max_amd_level)
6146 : "a" (0x80000000)
6147 : "ebx", "ecx", "edx");
6148 @@ -144,7 +144,7 @@ static void get_flags(void)
6149 if (max_amd_level >= 0x80000001 &&
6150 max_amd_level <= 0x8000ffff) {
6151 u32 eax = 0x80000001;
6152 - asm("cpuid"
6153 + asm volatile("cpuid"
6154 : "+a" (eax),
6155 "=c" (cpu.flags[6]),
6156 "=d" (cpu.flags[1])
6157 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6158 u32 ecx = MSR_K7_HWCR;
6159 u32 eax, edx;
6160
6161 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6162 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6163 eax &= ~(1 << 15);
6164 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6165 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6166
6167 get_flags(); /* Make sure it really did something */
6168 err = check_flags();
6169 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6170 u32 ecx = MSR_VIA_FCR;
6171 u32 eax, edx;
6172
6173 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6174 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6175 eax |= (1<<1)|(1<<7);
6176 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6177 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6178
6179 set_bit(X86_FEATURE_CX8, cpu.flags);
6180 err = check_flags();
6181 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
6182 u32 eax, edx;
6183 u32 level = 1;
6184
6185 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6186 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6187 - asm("cpuid"
6188 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6189 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6190 + asm volatile("cpuid"
6191 : "+a" (level), "=d" (cpu.flags[0])
6192 : : "ecx", "ebx");
6193 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6194 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6195
6196 err = check_flags();
6197 }
6198 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
6199 index 93e689f..504ba09 100644
6200 --- a/arch/x86/boot/header.S
6201 +++ b/arch/x86/boot/header.S
6202 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
6203 # single linked list of
6204 # struct setup_data
6205
6206 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6207 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6208
6209 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6210 #define VO_INIT_SIZE (VO__end - VO__text)
6211 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
6212 index db75d07..8e6d0af 100644
6213 --- a/arch/x86/boot/memory.c
6214 +++ b/arch/x86/boot/memory.c
6215 @@ -19,7 +19,7 @@
6216
6217 static int detect_memory_e820(void)
6218 {
6219 - int count = 0;
6220 + unsigned int count = 0;
6221 struct biosregs ireg, oreg;
6222 struct e820entry *desc = boot_params.e820_map;
6223 static struct e820entry buf; /* static so it is zeroed */
6224 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
6225 index 11e8c6e..fdbb1ed 100644
6226 --- a/arch/x86/boot/video-vesa.c
6227 +++ b/arch/x86/boot/video-vesa.c
6228 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6229
6230 boot_params.screen_info.vesapm_seg = oreg.es;
6231 boot_params.screen_info.vesapm_off = oreg.di;
6232 + boot_params.screen_info.vesapm_size = oreg.cx;
6233 }
6234
6235 /*
6236 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
6237 index 43eda28..5ab5fdb 100644
6238 --- a/arch/x86/boot/video.c
6239 +++ b/arch/x86/boot/video.c
6240 @@ -96,7 +96,7 @@ static void store_mode_params(void)
6241 static unsigned int get_entry(void)
6242 {
6243 char entry_buf[4];
6244 - int i, len = 0;
6245 + unsigned int i, len = 0;
6246 int key;
6247 unsigned int v;
6248
6249 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
6250 index 5b577d5..3c1fed4 100644
6251 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
6252 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
6253 @@ -8,6 +8,8 @@
6254 * including this sentence is retained in full.
6255 */
6256
6257 +#include <asm/alternative-asm.h>
6258 +
6259 .extern crypto_ft_tab
6260 .extern crypto_it_tab
6261 .extern crypto_fl_tab
6262 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
6263 je B192; \
6264 leaq 32(r9),r9;
6265
6266 +#define ret pax_force_retaddr 0, 1; ret
6267 +
6268 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
6269 movq r1,r2; \
6270 movq r3,r4; \
6271 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
6272 index be6d9e3..21fbbca 100644
6273 --- a/arch/x86/crypto/aesni-intel_asm.S
6274 +++ b/arch/x86/crypto/aesni-intel_asm.S
6275 @@ -31,6 +31,7 @@
6276
6277 #include <linux/linkage.h>
6278 #include <asm/inst.h>
6279 +#include <asm/alternative-asm.h>
6280
6281 #ifdef __x86_64__
6282 .data
6283 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
6284 pop %r14
6285 pop %r13
6286 pop %r12
6287 + pax_force_retaddr 0, 1
6288 ret
6289 +ENDPROC(aesni_gcm_dec)
6290
6291
6292 /*****************************************************************************
6293 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
6294 pop %r14
6295 pop %r13
6296 pop %r12
6297 + pax_force_retaddr 0, 1
6298 ret
6299 +ENDPROC(aesni_gcm_enc)
6300
6301 #endif
6302
6303 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
6304 pxor %xmm1, %xmm0
6305 movaps %xmm0, (TKEYP)
6306 add $0x10, TKEYP
6307 + pax_force_retaddr_bts
6308 ret
6309
6310 .align 4
6311 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
6312 shufps $0b01001110, %xmm2, %xmm1
6313 movaps %xmm1, 0x10(TKEYP)
6314 add $0x20, TKEYP
6315 + pax_force_retaddr_bts
6316 ret
6317
6318 .align 4
6319 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
6320
6321 movaps %xmm0, (TKEYP)
6322 add $0x10, TKEYP
6323 + pax_force_retaddr_bts
6324 ret
6325
6326 .align 4
6327 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
6328 pxor %xmm1, %xmm2
6329 movaps %xmm2, (TKEYP)
6330 add $0x10, TKEYP
6331 + pax_force_retaddr_bts
6332 ret
6333
6334 /*
6335 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
6336 #ifndef __x86_64__
6337 popl KEYP
6338 #endif
6339 + pax_force_retaddr 0, 1
6340 ret
6341 +ENDPROC(aesni_set_key)
6342
6343 /*
6344 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
6345 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
6346 popl KLEN
6347 popl KEYP
6348 #endif
6349 + pax_force_retaddr 0, 1
6350 ret
6351 +ENDPROC(aesni_enc)
6352
6353 /*
6354 * _aesni_enc1: internal ABI
6355 @@ -1959,6 +1972,7 @@ _aesni_enc1:
6356 AESENC KEY STATE
6357 movaps 0x70(TKEYP), KEY
6358 AESENCLAST KEY STATE
6359 + pax_force_retaddr_bts
6360 ret
6361
6362 /*
6363 @@ -2067,6 +2081,7 @@ _aesni_enc4:
6364 AESENCLAST KEY STATE2
6365 AESENCLAST KEY STATE3
6366 AESENCLAST KEY STATE4
6367 + pax_force_retaddr_bts
6368 ret
6369
6370 /*
6371 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
6372 popl KLEN
6373 popl KEYP
6374 #endif
6375 + pax_force_retaddr 0, 1
6376 ret
6377 +ENDPROC(aesni_dec)
6378
6379 /*
6380 * _aesni_dec1: internal ABI
6381 @@ -2146,6 +2163,7 @@ _aesni_dec1:
6382 AESDEC KEY STATE
6383 movaps 0x70(TKEYP), KEY
6384 AESDECLAST KEY STATE
6385 + pax_force_retaddr_bts
6386 ret
6387
6388 /*
6389 @@ -2254,6 +2272,7 @@ _aesni_dec4:
6390 AESDECLAST KEY STATE2
6391 AESDECLAST KEY STATE3
6392 AESDECLAST KEY STATE4
6393 + pax_force_retaddr_bts
6394 ret
6395
6396 /*
6397 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
6398 popl KEYP
6399 popl LEN
6400 #endif
6401 + pax_force_retaddr 0, 1
6402 ret
6403 +ENDPROC(aesni_ecb_enc)
6404
6405 /*
6406 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6407 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
6408 popl KEYP
6409 popl LEN
6410 #endif
6411 + pax_force_retaddr 0, 1
6412 ret
6413 +ENDPROC(aesni_ecb_dec)
6414
6415 /*
6416 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6417 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
6418 popl LEN
6419 popl IVP
6420 #endif
6421 + pax_force_retaddr 0, 1
6422 ret
6423 +ENDPROC(aesni_cbc_enc)
6424
6425 /*
6426 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
6427 @@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
6428 popl LEN
6429 popl IVP
6430 #endif
6431 + pax_force_retaddr 0, 1
6432 ret
6433 +ENDPROC(aesni_cbc_dec)
6434
6435 #ifdef __x86_64__
6436 .align 16
6437 @@ -2524,6 +2551,7 @@ _aesni_inc_init:
6438 mov $1, TCTR_LOW
6439 MOVQ_R64_XMM TCTR_LOW INC
6440 MOVQ_R64_XMM CTR TCTR_LOW
6441 + pax_force_retaddr_bts
6442 ret
6443
6444 /*
6445 @@ -2552,6 +2580,7 @@ _aesni_inc:
6446 .Linc_low:
6447 movaps CTR, IV
6448 PSHUFB_XMM BSWAP_MASK IV
6449 + pax_force_retaddr_bts
6450 ret
6451
6452 /*
6453 @@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
6454 .Lctr_enc_ret:
6455 movups IV, (IVP)
6456 .Lctr_enc_just_ret:
6457 + pax_force_retaddr 0, 1
6458 ret
6459 +ENDPROC(aesni_ctr_enc)
6460 #endif
6461 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6462 index 6214a9b..1f4fc9a 100644
6463 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
6464 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6465 @@ -1,3 +1,5 @@
6466 +#include <asm/alternative-asm.h>
6467 +
6468 # enter ECRYPT_encrypt_bytes
6469 .text
6470 .p2align 5
6471 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
6472 add %r11,%rsp
6473 mov %rdi,%rax
6474 mov %rsi,%rdx
6475 + pax_force_retaddr 0, 1
6476 ret
6477 # bytesatleast65:
6478 ._bytesatleast65:
6479 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
6480 add %r11,%rsp
6481 mov %rdi,%rax
6482 mov %rsi,%rdx
6483 + pax_force_retaddr
6484 ret
6485 # enter ECRYPT_ivsetup
6486 .text
6487 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
6488 add %r11,%rsp
6489 mov %rdi,%rax
6490 mov %rsi,%rdx
6491 + pax_force_retaddr
6492 ret
6493 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
6494 index 573aa10..b73ad89 100644
6495 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
6496 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
6497 @@ -21,6 +21,7 @@
6498 .text
6499
6500 #include <asm/asm-offsets.h>
6501 +#include <asm/alternative-asm.h>
6502
6503 #define a_offset 0
6504 #define b_offset 4
6505 @@ -269,6 +270,7 @@ twofish_enc_blk:
6506
6507 popq R1
6508 movq $1,%rax
6509 + pax_force_retaddr 0, 1
6510 ret
6511
6512 twofish_dec_blk:
6513 @@ -321,4 +323,5 @@ twofish_dec_blk:
6514
6515 popq R1
6516 movq $1,%rax
6517 + pax_force_retaddr 0, 1
6518 ret
6519 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
6520 index fd84387..0b4af7d 100644
6521 --- a/arch/x86/ia32/ia32_aout.c
6522 +++ b/arch/x86/ia32/ia32_aout.c
6523 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
6524 unsigned long dump_start, dump_size;
6525 struct user32 dump;
6526
6527 + memset(&dump, 0, sizeof(dump));
6528 +
6529 fs = get_fs();
6530 set_fs(KERNEL_DS);
6531 has_dumped = 1;
6532 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
6533 index 6557769..ef6ae89 100644
6534 --- a/arch/x86/ia32/ia32_signal.c
6535 +++ b/arch/x86/ia32/ia32_signal.c
6536 @@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
6537 }
6538 seg = get_fs();
6539 set_fs(KERNEL_DS);
6540 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
6541 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
6542 set_fs(seg);
6543 if (ret >= 0 && uoss_ptr) {
6544 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
6545 @@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
6546 */
6547 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6548 size_t frame_size,
6549 - void **fpstate)
6550 + void __user **fpstate)
6551 {
6552 unsigned long sp;
6553
6554 @@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6555
6556 if (used_math()) {
6557 sp = sp - sig_xstate_ia32_size;
6558 - *fpstate = (struct _fpstate_ia32 *) sp;
6559 + *fpstate = (struct _fpstate_ia32 __user *) sp;
6560 if (save_i387_xstate_ia32(*fpstate) < 0)
6561 return (void __user *) -1L;
6562 }
6563 @@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6564 sp -= frame_size;
6565 /* Align the stack pointer according to the i386 ABI,
6566 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6567 - sp = ((sp + 4) & -16ul) - 4;
6568 + sp = ((sp - 12) & -16ul) - 4;
6569 return (void __user *) sp;
6570 }
6571
6572 @@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
6573 * These are actually not used anymore, but left because some
6574 * gdb versions depend on them as a marker.
6575 */
6576 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6577 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6578 } put_user_catch(err);
6579
6580 if (err)
6581 @@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6582 0xb8,
6583 __NR_ia32_rt_sigreturn,
6584 0x80cd,
6585 - 0,
6586 + 0
6587 };
6588
6589 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6590 @@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6591
6592 if (ka->sa.sa_flags & SA_RESTORER)
6593 restorer = ka->sa.sa_restorer;
6594 + else if (current->mm->context.vdso)
6595 + /* Return stub is in 32bit vsyscall page */
6596 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6597 else
6598 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6599 - rt_sigreturn);
6600 + restorer = &frame->retcode;
6601 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6602
6603 /*
6604 * Not actually used anymore, but left because some gdb
6605 * versions need it.
6606 */
6607 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6608 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
6609 } put_user_catch(err);
6610
6611 if (err)
6612 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
6613 index 54edb207..9335b5f 100644
6614 --- a/arch/x86/ia32/ia32entry.S
6615 +++ b/arch/x86/ia32/ia32entry.S
6616 @@ -13,7 +13,9 @@
6617 #include <asm/thread_info.h>
6618 #include <asm/segment.h>
6619 #include <asm/irqflags.h>
6620 +#include <asm/pgtable.h>
6621 #include <linux/linkage.h>
6622 +#include <asm/alternative-asm.h>
6623
6624 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6625 #include <linux/elf-em.h>
6626 @@ -95,6 +97,30 @@ ENTRY(native_irq_enable_sysexit)
6627 ENDPROC(native_irq_enable_sysexit)
6628 #endif
6629
6630 + .macro pax_enter_kernel_user
6631 + pax_set_fptr_mask
6632 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6633 + call pax_enter_kernel_user
6634 +#endif
6635 + .endm
6636 +
6637 + .macro pax_exit_kernel_user
6638 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6639 + call pax_exit_kernel_user
6640 +#endif
6641 +#ifdef CONFIG_PAX_RANDKSTACK
6642 + pushq %rax
6643 + call pax_randomize_kstack
6644 + popq %rax
6645 +#endif
6646 + .endm
6647 +
6648 +.macro pax_erase_kstack
6649 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6650 + call pax_erase_kstack
6651 +#endif
6652 +.endm
6653 +
6654 /*
6655 * 32bit SYSENTER instruction entry.
6656 *
6657 @@ -121,12 +147,6 @@ ENTRY(ia32_sysenter_target)
6658 CFI_REGISTER rsp,rbp
6659 SWAPGS_UNSAFE_STACK
6660 movq PER_CPU_VAR(kernel_stack), %rsp
6661 - addq $(KERNEL_STACK_OFFSET),%rsp
6662 - /*
6663 - * No need to follow this irqs on/off section: the syscall
6664 - * disabled irqs, here we enable it straight after entry:
6665 - */
6666 - ENABLE_INTERRUPTS(CLBR_NONE)
6667 movl %ebp,%ebp /* zero extension */
6668 pushq_cfi $__USER32_DS
6669 /*CFI_REL_OFFSET ss,0*/
6670 @@ -134,25 +154,38 @@ ENTRY(ia32_sysenter_target)
6671 CFI_REL_OFFSET rsp,0
6672 pushfq_cfi
6673 /*CFI_REL_OFFSET rflags,0*/
6674 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6675 - CFI_REGISTER rip,r10
6676 + GET_THREAD_INFO(%r11)
6677 + movl TI_sysenter_return(%r11), %r11d
6678 + CFI_REGISTER rip,r11
6679 pushq_cfi $__USER32_CS
6680 /*CFI_REL_OFFSET cs,0*/
6681 movl %eax, %eax
6682 - pushq_cfi %r10
6683 + pushq_cfi %r11
6684 CFI_REL_OFFSET rip,0
6685 pushq_cfi %rax
6686 cld
6687 SAVE_ARGS 0,1,0
6688 + pax_enter_kernel_user
6689 + /*
6690 + * No need to follow this irqs on/off section: the syscall
6691 + * disabled irqs, here we enable it straight after entry:
6692 + */
6693 + ENABLE_INTERRUPTS(CLBR_NONE)
6694 /* no need to do an access_ok check here because rbp has been
6695 32bit zero extended */
6696 +
6697 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6698 + mov $PAX_USER_SHADOW_BASE,%r11
6699 + add %r11,%rbp
6700 +#endif
6701 +
6702 1: movl (%rbp),%ebp
6703 .section __ex_table,"a"
6704 .quad 1b,ia32_badarg
6705 .previous
6706 - GET_THREAD_INFO(%r10)
6707 - orl $TS_COMPAT,TI_status(%r10)
6708 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6709 + GET_THREAD_INFO(%r11)
6710 + orl $TS_COMPAT,TI_status(%r11)
6711 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6712 CFI_REMEMBER_STATE
6713 jnz sysenter_tracesys
6714 cmpq $(IA32_NR_syscalls-1),%rax
6715 @@ -162,13 +195,15 @@ sysenter_do_call:
6716 sysenter_dispatch:
6717 call *ia32_sys_call_table(,%rax,8)
6718 movq %rax,RAX-ARGOFFSET(%rsp)
6719 - GET_THREAD_INFO(%r10)
6720 + GET_THREAD_INFO(%r11)
6721 DISABLE_INTERRUPTS(CLBR_NONE)
6722 TRACE_IRQS_OFF
6723 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6724 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6725 jnz sysexit_audit
6726 sysexit_from_sys_call:
6727 - andl $~TS_COMPAT,TI_status(%r10)
6728 + pax_exit_kernel_user
6729 + pax_erase_kstack
6730 + andl $~TS_COMPAT,TI_status(%r11)
6731 /* clear IF, that popfq doesn't enable interrupts early */
6732 andl $~0x200,EFLAGS-R11(%rsp)
6733 movl RIP-R11(%rsp),%edx /* User %eip */
6734 @@ -194,6 +229,9 @@ sysexit_from_sys_call:
6735 movl %eax,%esi /* 2nd arg: syscall number */
6736 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6737 call audit_syscall_entry
6738 +
6739 + pax_erase_kstack
6740 +
6741 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6742 cmpq $(IA32_NR_syscalls-1),%rax
6743 ja ia32_badsys
6744 @@ -205,7 +243,7 @@ sysexit_from_sys_call:
6745 .endm
6746
6747 .macro auditsys_exit exit
6748 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6749 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6750 jnz ia32_ret_from_sys_call
6751 TRACE_IRQS_ON
6752 sti
6753 @@ -215,12 +253,12 @@ sysexit_from_sys_call:
6754 movzbl %al,%edi /* zero-extend that into %edi */
6755 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
6756 call audit_syscall_exit
6757 - GET_THREAD_INFO(%r10)
6758 + GET_THREAD_INFO(%r11)
6759 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
6760 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
6761 cli
6762 TRACE_IRQS_OFF
6763 - testl %edi,TI_flags(%r10)
6764 + testl %edi,TI_flags(%r11)
6765 jz \exit
6766 CLEAR_RREGS -ARGOFFSET
6767 jmp int_with_check
6768 @@ -238,7 +276,7 @@ sysexit_audit:
6769
6770 sysenter_tracesys:
6771 #ifdef CONFIG_AUDITSYSCALL
6772 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6773 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6774 jz sysenter_auditsys
6775 #endif
6776 SAVE_REST
6777 @@ -246,6 +284,9 @@ sysenter_tracesys:
6778 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6779 movq %rsp,%rdi /* &pt_regs -> arg1 */
6780 call syscall_trace_enter
6781 +
6782 + pax_erase_kstack
6783 +
6784 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6785 RESTORE_REST
6786 cmpq $(IA32_NR_syscalls-1),%rax
6787 @@ -277,19 +318,20 @@ ENDPROC(ia32_sysenter_target)
6788 ENTRY(ia32_cstar_target)
6789 CFI_STARTPROC32 simple
6790 CFI_SIGNAL_FRAME
6791 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6792 + CFI_DEF_CFA rsp,0
6793 CFI_REGISTER rip,rcx
6794 /*CFI_REGISTER rflags,r11*/
6795 SWAPGS_UNSAFE_STACK
6796 movl %esp,%r8d
6797 CFI_REGISTER rsp,r8
6798 movq PER_CPU_VAR(kernel_stack),%rsp
6799 + SAVE_ARGS 8*6,0,0
6800 + pax_enter_kernel_user
6801 /*
6802 * No need to follow this irqs on/off section: the syscall
6803 * disabled irqs and here we enable it straight after entry:
6804 */
6805 ENABLE_INTERRUPTS(CLBR_NONE)
6806 - SAVE_ARGS 8,0,0
6807 movl %eax,%eax /* zero extension */
6808 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6809 movq %rcx,RIP-ARGOFFSET(%rsp)
6810 @@ -305,13 +347,19 @@ ENTRY(ia32_cstar_target)
6811 /* no need to do an access_ok check here because r8 has been
6812 32bit zero extended */
6813 /* hardware stack frame is complete now */
6814 +
6815 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6816 + mov $PAX_USER_SHADOW_BASE,%r11
6817 + add %r11,%r8
6818 +#endif
6819 +
6820 1: movl (%r8),%r9d
6821 .section __ex_table,"a"
6822 .quad 1b,ia32_badarg
6823 .previous
6824 - GET_THREAD_INFO(%r10)
6825 - orl $TS_COMPAT,TI_status(%r10)
6826 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6827 + GET_THREAD_INFO(%r11)
6828 + orl $TS_COMPAT,TI_status(%r11)
6829 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6830 CFI_REMEMBER_STATE
6831 jnz cstar_tracesys
6832 cmpq $IA32_NR_syscalls-1,%rax
6833 @@ -321,13 +369,15 @@ cstar_do_call:
6834 cstar_dispatch:
6835 call *ia32_sys_call_table(,%rax,8)
6836 movq %rax,RAX-ARGOFFSET(%rsp)
6837 - GET_THREAD_INFO(%r10)
6838 + GET_THREAD_INFO(%r11)
6839 DISABLE_INTERRUPTS(CLBR_NONE)
6840 TRACE_IRQS_OFF
6841 - testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6842 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
6843 jnz sysretl_audit
6844 sysretl_from_sys_call:
6845 - andl $~TS_COMPAT,TI_status(%r10)
6846 + pax_exit_kernel_user
6847 + pax_erase_kstack
6848 + andl $~TS_COMPAT,TI_status(%r11)
6849 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
6850 movl RIP-ARGOFFSET(%rsp),%ecx
6851 CFI_REGISTER rip,rcx
6852 @@ -355,7 +405,7 @@ sysretl_audit:
6853
6854 cstar_tracesys:
6855 #ifdef CONFIG_AUDITSYSCALL
6856 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
6857 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
6858 jz cstar_auditsys
6859 #endif
6860 xchgl %r9d,%ebp
6861 @@ -364,6 +414,9 @@ cstar_tracesys:
6862 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6863 movq %rsp,%rdi /* &pt_regs -> arg1 */
6864 call syscall_trace_enter
6865 +
6866 + pax_erase_kstack
6867 +
6868 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6869 RESTORE_REST
6870 xchgl %ebp,%r9d
6871 @@ -409,20 +462,21 @@ ENTRY(ia32_syscall)
6872 CFI_REL_OFFSET rip,RIP-RIP
6873 PARAVIRT_ADJUST_EXCEPTION_FRAME
6874 SWAPGS
6875 - /*
6876 - * No need to follow this irqs on/off section: the syscall
6877 - * disabled irqs and here we enable it straight after entry:
6878 - */
6879 - ENABLE_INTERRUPTS(CLBR_NONE)
6880 movl %eax,%eax
6881 pushq_cfi %rax
6882 cld
6883 /* note the registers are not zero extended to the sf.
6884 this could be a problem. */
6885 SAVE_ARGS 0,1,0
6886 - GET_THREAD_INFO(%r10)
6887 - orl $TS_COMPAT,TI_status(%r10)
6888 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6889 + pax_enter_kernel_user
6890 + /*
6891 + * No need to follow this irqs on/off section: the syscall
6892 + * disabled irqs and here we enable it straight after entry:
6893 + */
6894 + ENABLE_INTERRUPTS(CLBR_NONE)
6895 + GET_THREAD_INFO(%r11)
6896 + orl $TS_COMPAT,TI_status(%r11)
6897 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
6898 jnz ia32_tracesys
6899 cmpq $(IA32_NR_syscalls-1),%rax
6900 ja ia32_badsys
6901 @@ -441,6 +495,9 @@ ia32_tracesys:
6902 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6903 movq %rsp,%rdi /* &pt_regs -> arg1 */
6904 call syscall_trace_enter
6905 +
6906 + pax_erase_kstack
6907 +
6908 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6909 RESTORE_REST
6910 cmpq $(IA32_NR_syscalls-1),%rax
6911 @@ -455,6 +512,7 @@ ia32_badsys:
6912
6913 quiet_ni_syscall:
6914 movq $-ENOSYS,%rax
6915 + pax_force_retaddr
6916 ret
6917 CFI_ENDPROC
6918
6919 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
6920 index f6f5c53..b358b28 100644
6921 --- a/arch/x86/ia32/sys_ia32.c
6922 +++ b/arch/x86/ia32/sys_ia32.c
6923 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
6924 */
6925 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
6926 {
6927 - typeof(ubuf->st_uid) uid = 0;
6928 - typeof(ubuf->st_gid) gid = 0;
6929 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
6930 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
6931 SET_UID(uid, stat->uid);
6932 SET_GID(gid, stat->gid);
6933 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
6934 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
6935 }
6936 set_fs(KERNEL_DS);
6937 ret = sys_rt_sigprocmask(how,
6938 - set ? (sigset_t __user *)&s : NULL,
6939 - oset ? (sigset_t __user *)&s : NULL,
6940 + set ? (sigset_t __force_user *)&s : NULL,
6941 + oset ? (sigset_t __force_user *)&s : NULL,
6942 sigsetsize);
6943 set_fs(old_fs);
6944 if (ret)
6945 @@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
6946 return alarm_setitimer(seconds);
6947 }
6948
6949 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
6950 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
6951 int options)
6952 {
6953 return compat_sys_wait4(pid, stat_addr, options, NULL);
6954 @@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
6955 mm_segment_t old_fs = get_fs();
6956
6957 set_fs(KERNEL_DS);
6958 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
6959 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
6960 set_fs(old_fs);
6961 if (put_compat_timespec(&t, interval))
6962 return -EFAULT;
6963 @@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
6964 mm_segment_t old_fs = get_fs();
6965
6966 set_fs(KERNEL_DS);
6967 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
6968 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
6969 set_fs(old_fs);
6970 if (!ret) {
6971 switch (_NSIG_WORDS) {
6972 @@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
6973 if (copy_siginfo_from_user32(&info, uinfo))
6974 return -EFAULT;
6975 set_fs(KERNEL_DS);
6976 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
6977 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
6978 set_fs(old_fs);
6979 return ret;
6980 }
6981 @@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
6982 return -EFAULT;
6983
6984 set_fs(KERNEL_DS);
6985 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
6986 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
6987 count);
6988 set_fs(old_fs);
6989
6990 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
6991 index 091508b..0ee32ec 100644
6992 --- a/arch/x86/include/asm/alternative-asm.h
6993 +++ b/arch/x86/include/asm/alternative-asm.h
6994 @@ -15,6 +15,45 @@
6995 .endm
6996 #endif
6997
6998 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
6999 + .macro pax_force_retaddr_bts rip=0
7000 + btsq $63,\rip(%rsp)
7001 + .endm
7002 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
7003 + .macro pax_force_retaddr rip=0, reload=0
7004 + btsq $63,\rip(%rsp)
7005 + .endm
7006 + .macro pax_force_fptr ptr
7007 + btsq $63,\ptr
7008 + .endm
7009 + .macro pax_set_fptr_mask
7010 + .endm
7011 +#endif
7012 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
7013 + .macro pax_force_retaddr rip=0, reload=0
7014 + .if \reload
7015 + pax_set_fptr_mask
7016 + .endif
7017 + orq %r10,\rip(%rsp)
7018 + .endm
7019 + .macro pax_force_fptr ptr
7020 + orq %r10,\ptr
7021 + .endm
7022 + .macro pax_set_fptr_mask
7023 + movabs $0x8000000000000000,%r10
7024 + .endm
7025 +#endif
7026 +#else
7027 + .macro pax_force_retaddr rip=0, reload=0
7028 + .endm
7029 + .macro pax_force_fptr ptr
7030 + .endm
7031 + .macro pax_force_retaddr_bts rip=0
7032 + .endm
7033 + .macro pax_set_fptr_mask
7034 + .endm
7035 +#endif
7036 +
7037 .macro altinstruction_entry orig alt feature orig_len alt_len
7038 .long \orig - .
7039 .long \alt - .
7040 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
7041 index 37ad100..7d47faa 100644
7042 --- a/arch/x86/include/asm/alternative.h
7043 +++ b/arch/x86/include/asm/alternative.h
7044 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
7045 ".section .discard,\"aw\",@progbits\n" \
7046 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
7047 ".previous\n" \
7048 - ".section .altinstr_replacement, \"ax\"\n" \
7049 + ".section .altinstr_replacement, \"a\"\n" \
7050 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
7051 ".previous"
7052
7053 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
7054 index 9b7273c..e9fcc24 100644
7055 --- a/arch/x86/include/asm/apic.h
7056 +++ b/arch/x86/include/asm/apic.h
7057 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
7058
7059 #ifdef CONFIG_X86_LOCAL_APIC
7060
7061 -extern unsigned int apic_verbosity;
7062 +extern int apic_verbosity;
7063 extern int local_apic_timer_c2_ok;
7064
7065 extern int disable_apic;
7066 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
7067 index 20370c6..a2eb9b0 100644
7068 --- a/arch/x86/include/asm/apm.h
7069 +++ b/arch/x86/include/asm/apm.h
7070 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
7071 __asm__ __volatile__(APM_DO_ZERO_SEGS
7072 "pushl %%edi\n\t"
7073 "pushl %%ebp\n\t"
7074 - "lcall *%%cs:apm_bios_entry\n\t"
7075 + "lcall *%%ss:apm_bios_entry\n\t"
7076 "setc %%al\n\t"
7077 "popl %%ebp\n\t"
7078 "popl %%edi\n\t"
7079 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
7080 __asm__ __volatile__(APM_DO_ZERO_SEGS
7081 "pushl %%edi\n\t"
7082 "pushl %%ebp\n\t"
7083 - "lcall *%%cs:apm_bios_entry\n\t"
7084 + "lcall *%%ss:apm_bios_entry\n\t"
7085 "setc %%bl\n\t"
7086 "popl %%ebp\n\t"
7087 "popl %%edi\n\t"
7088 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
7089 index 10572e3..2618d91 100644
7090 --- a/arch/x86/include/asm/atomic.h
7091 +++ b/arch/x86/include/asm/atomic.h
7092 @@ -22,7 +22,18 @@
7093 */
7094 static inline int atomic_read(const atomic_t *v)
7095 {
7096 - return (*(volatile int *)&(v)->counter);
7097 + return (*(volatile const int *)&(v)->counter);
7098 +}
7099 +
7100 +/**
7101 + * atomic_read_unchecked - read atomic variable
7102 + * @v: pointer of type atomic_unchecked_t
7103 + *
7104 + * Atomically reads the value of @v.
7105 + */
7106 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7107 +{
7108 + return (*(volatile const int *)&(v)->counter);
7109 }
7110
7111 /**
7112 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
7113 }
7114
7115 /**
7116 + * atomic_set_unchecked - set atomic variable
7117 + * @v: pointer of type atomic_unchecked_t
7118 + * @i: required value
7119 + *
7120 + * Atomically sets the value of @v to @i.
7121 + */
7122 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7123 +{
7124 + v->counter = i;
7125 +}
7126 +
7127 +/**
7128 * atomic_add - add integer to atomic variable
7129 * @i: integer value to add
7130 * @v: pointer of type atomic_t
7131 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
7132 */
7133 static inline void atomic_add(int i, atomic_t *v)
7134 {
7135 - asm volatile(LOCK_PREFIX "addl %1,%0"
7136 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7137 +
7138 +#ifdef CONFIG_PAX_REFCOUNT
7139 + "jno 0f\n"
7140 + LOCK_PREFIX "subl %1,%0\n"
7141 + "int $4\n0:\n"
7142 + _ASM_EXTABLE(0b, 0b)
7143 +#endif
7144 +
7145 + : "+m" (v->counter)
7146 + : "ir" (i));
7147 +}
7148 +
7149 +/**
7150 + * atomic_add_unchecked - add integer to atomic variable
7151 + * @i: integer value to add
7152 + * @v: pointer of type atomic_unchecked_t
7153 + *
7154 + * Atomically adds @i to @v.
7155 + */
7156 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7157 +{
7158 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7159 : "+m" (v->counter)
7160 : "ir" (i));
7161 }
7162 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
7163 */
7164 static inline void atomic_sub(int i, atomic_t *v)
7165 {
7166 - asm volatile(LOCK_PREFIX "subl %1,%0"
7167 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7168 +
7169 +#ifdef CONFIG_PAX_REFCOUNT
7170 + "jno 0f\n"
7171 + LOCK_PREFIX "addl %1,%0\n"
7172 + "int $4\n0:\n"
7173 + _ASM_EXTABLE(0b, 0b)
7174 +#endif
7175 +
7176 + : "+m" (v->counter)
7177 + : "ir" (i));
7178 +}
7179 +
7180 +/**
7181 + * atomic_sub_unchecked - subtract integer from atomic variable
7182 + * @i: integer value to subtract
7183 + * @v: pointer of type atomic_unchecked_t
7184 + *
7185 + * Atomically subtracts @i from @v.
7186 + */
7187 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7188 +{
7189 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7190 : "+m" (v->counter)
7191 : "ir" (i));
7192 }
7193 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7194 {
7195 unsigned char c;
7196
7197 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7198 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7199 +
7200 +#ifdef CONFIG_PAX_REFCOUNT
7201 + "jno 0f\n"
7202 + LOCK_PREFIX "addl %2,%0\n"
7203 + "int $4\n0:\n"
7204 + _ASM_EXTABLE(0b, 0b)
7205 +#endif
7206 +
7207 + "sete %1\n"
7208 : "+m" (v->counter), "=qm" (c)
7209 : "ir" (i) : "memory");
7210 return c;
7211 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
7212 */
7213 static inline void atomic_inc(atomic_t *v)
7214 {
7215 - asm volatile(LOCK_PREFIX "incl %0"
7216 + asm volatile(LOCK_PREFIX "incl %0\n"
7217 +
7218 +#ifdef CONFIG_PAX_REFCOUNT
7219 + "jno 0f\n"
7220 + LOCK_PREFIX "decl %0\n"
7221 + "int $4\n0:\n"
7222 + _ASM_EXTABLE(0b, 0b)
7223 +#endif
7224 +
7225 + : "+m" (v->counter));
7226 +}
7227 +
7228 +/**
7229 + * atomic_inc_unchecked - increment atomic variable
7230 + * @v: pointer of type atomic_unchecked_t
7231 + *
7232 + * Atomically increments @v by 1.
7233 + */
7234 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7235 +{
7236 + asm volatile(LOCK_PREFIX "incl %0\n"
7237 : "+m" (v->counter));
7238 }
7239
7240 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
7241 */
7242 static inline void atomic_dec(atomic_t *v)
7243 {
7244 - asm volatile(LOCK_PREFIX "decl %0"
7245 + asm volatile(LOCK_PREFIX "decl %0\n"
7246 +
7247 +#ifdef CONFIG_PAX_REFCOUNT
7248 + "jno 0f\n"
7249 + LOCK_PREFIX "incl %0\n"
7250 + "int $4\n0:\n"
7251 + _ASM_EXTABLE(0b, 0b)
7252 +#endif
7253 +
7254 + : "+m" (v->counter));
7255 +}
7256 +
7257 +/**
7258 + * atomic_dec_unchecked - decrement atomic variable
7259 + * @v: pointer of type atomic_unchecked_t
7260 + *
7261 + * Atomically decrements @v by 1.
7262 + */
7263 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7264 +{
7265 + asm volatile(LOCK_PREFIX "decl %0\n"
7266 : "+m" (v->counter));
7267 }
7268
7269 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
7270 {
7271 unsigned char c;
7272
7273 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7274 + asm volatile(LOCK_PREFIX "decl %0\n"
7275 +
7276 +#ifdef CONFIG_PAX_REFCOUNT
7277 + "jno 0f\n"
7278 + LOCK_PREFIX "incl %0\n"
7279 + "int $4\n0:\n"
7280 + _ASM_EXTABLE(0b, 0b)
7281 +#endif
7282 +
7283 + "sete %1\n"
7284 : "+m" (v->counter), "=qm" (c)
7285 : : "memory");
7286 return c != 0;
7287 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
7288 {
7289 unsigned char c;
7290
7291 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7292 + asm volatile(LOCK_PREFIX "incl %0\n"
7293 +
7294 +#ifdef CONFIG_PAX_REFCOUNT
7295 + "jno 0f\n"
7296 + LOCK_PREFIX "decl %0\n"
7297 + "int $4\n0:\n"
7298 + _ASM_EXTABLE(0b, 0b)
7299 +#endif
7300 +
7301 + "sete %1\n"
7302 + : "+m" (v->counter), "=qm" (c)
7303 + : : "memory");
7304 + return c != 0;
7305 +}
7306 +
7307 +/**
7308 + * atomic_inc_and_test_unchecked - increment and test
7309 + * @v: pointer of type atomic_unchecked_t
7310 + *
7311 + * Atomically increments @v by 1
7312 + * and returns true if the result is zero, or false for all
7313 + * other cases.
7314 + */
7315 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7316 +{
7317 + unsigned char c;
7318 +
7319 + asm volatile(LOCK_PREFIX "incl %0\n"
7320 + "sete %1\n"
7321 : "+m" (v->counter), "=qm" (c)
7322 : : "memory");
7323 return c != 0;
7324 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
7325 {
7326 unsigned char c;
7327
7328 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7329 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7330 +
7331 +#ifdef CONFIG_PAX_REFCOUNT
7332 + "jno 0f\n"
7333 + LOCK_PREFIX "subl %2,%0\n"
7334 + "int $4\n0:\n"
7335 + _ASM_EXTABLE(0b, 0b)
7336 +#endif
7337 +
7338 + "sets %1\n"
7339 : "+m" (v->counter), "=qm" (c)
7340 : "ir" (i) : "memory");
7341 return c;
7342 @@ -180,6 +342,46 @@ static inline int atomic_add_return(int i, atomic_t *v)
7343 #endif
7344 /* Modern 486+ processor */
7345 __i = i;
7346 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7347 +
7348 +#ifdef CONFIG_PAX_REFCOUNT
7349 + "jno 0f\n"
7350 + "movl %0, %1\n"
7351 + "int $4\n0:\n"
7352 + _ASM_EXTABLE(0b, 0b)
7353 +#endif
7354 +
7355 + : "+r" (i), "+m" (v->counter)
7356 + : : "memory");
7357 + return i + __i;
7358 +
7359 +#ifdef CONFIG_M386
7360 +no_xadd: /* Legacy 386 processor */
7361 + local_irq_save(flags);
7362 + __i = atomic_read(v);
7363 + atomic_set(v, i + __i);
7364 + local_irq_restore(flags);
7365 + return i + __i;
7366 +#endif
7367 +}
7368 +
7369 +/**
7370 + * atomic_add_return_unchecked - add integer and return
7371 + * @v: pointer of type atomic_unchecked_t
7372 + * @i: integer value to add
7373 + *
7374 + * Atomically adds @i to @v and returns @i + @v
7375 + */
7376 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7377 +{
7378 + int __i;
7379 +#ifdef CONFIG_M386
7380 + unsigned long flags;
7381 + if (unlikely(boot_cpu_data.x86 <= 3))
7382 + goto no_xadd;
7383 +#endif
7384 + /* Modern 486+ processor */
7385 + __i = i;
7386 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7387 : "+r" (i), "+m" (v->counter)
7388 : : "memory");
7389 @@ -208,6 +410,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
7390 }
7391
7392 #define atomic_inc_return(v) (atomic_add_return(1, v))
7393 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7394 +{
7395 + return atomic_add_return_unchecked(1, v);
7396 +}
7397 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7398
7399 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7400 @@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
7401 return cmpxchg(&v->counter, old, new);
7402 }
7403
7404 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7405 +{
7406 + return cmpxchg(&v->counter, old, new);
7407 +}
7408 +
7409 static inline int atomic_xchg(atomic_t *v, int new)
7410 {
7411 return xchg(&v->counter, new);
7412 }
7413
7414 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7415 +{
7416 + return xchg(&v->counter, new);
7417 +}
7418 +
7419 /**
7420 * __atomic_add_unless - add unless the number is already a given value
7421 * @v: pointer of type atomic_t
7422 @@ -231,12 +447,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
7423 */
7424 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7425 {
7426 - int c, old;
7427 + int c, old, new;
7428 c = atomic_read(v);
7429 for (;;) {
7430 - if (unlikely(c == (u)))
7431 + if (unlikely(c == u))
7432 break;
7433 - old = atomic_cmpxchg((v), c, c + (a));
7434 +
7435 + asm volatile("addl %2,%0\n"
7436 +
7437 +#ifdef CONFIG_PAX_REFCOUNT
7438 + "jno 0f\n"
7439 + "subl %2,%0\n"
7440 + "int $4\n0:\n"
7441 + _ASM_EXTABLE(0b, 0b)
7442 +#endif
7443 +
7444 + : "=r" (new)
7445 + : "0" (c), "ir" (a));
7446 +
7447 + old = atomic_cmpxchg(v, c, new);
7448 if (likely(old == c))
7449 break;
7450 c = old;
7451 @@ -244,6 +473,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7452 return c;
7453 }
7454
7455 +/**
7456 + * atomic_inc_not_zero_hint - increment if not null
7457 + * @v: pointer of type atomic_t
7458 + * @hint: probable value of the atomic before the increment
7459 + *
7460 + * This version of atomic_inc_not_zero() gives a hint of probable
7461 + * value of the atomic. This helps processor to not read the memory
7462 + * before doing the atomic read/modify/write cycle, lowering
7463 + * number of bus transactions on some arches.
7464 + *
7465 + * Returns: 0 if increment was not done, 1 otherwise.
7466 + */
7467 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
7468 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
7469 +{
7470 + int val, c = hint, new;
7471 +
7472 + /* sanity test, should be removed by compiler if hint is a constant */
7473 + if (!hint)
7474 + return __atomic_add_unless(v, 1, 0);
7475 +
7476 + do {
7477 + asm volatile("incl %0\n"
7478 +
7479 +#ifdef CONFIG_PAX_REFCOUNT
7480 + "jno 0f\n"
7481 + "decl %0\n"
7482 + "int $4\n0:\n"
7483 + _ASM_EXTABLE(0b, 0b)
7484 +#endif
7485 +
7486 + : "=r" (new)
7487 + : "0" (c));
7488 +
7489 + val = atomic_cmpxchg(v, c, new);
7490 + if (val == c)
7491 + return 1;
7492 + c = val;
7493 + } while (c);
7494 +
7495 + return 0;
7496 +}
7497
7498 /*
7499 * atomic_dec_if_positive - decrement by 1 if old value positive
7500 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
7501 index 24098aa..1e37723 100644
7502 --- a/arch/x86/include/asm/atomic64_32.h
7503 +++ b/arch/x86/include/asm/atomic64_32.h
7504 @@ -12,6 +12,14 @@ typedef struct {
7505 u64 __aligned(8) counter;
7506 } atomic64_t;
7507
7508 +#ifdef CONFIG_PAX_REFCOUNT
7509 +typedef struct {
7510 + u64 __aligned(8) counter;
7511 +} atomic64_unchecked_t;
7512 +#else
7513 +typedef atomic64_t atomic64_unchecked_t;
7514 +#endif
7515 +
7516 #define ATOMIC64_INIT(val) { (val) }
7517
7518 #ifdef CONFIG_X86_CMPXCHG64
7519 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
7520 }
7521
7522 /**
7523 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
7524 + * @p: pointer to type atomic64_unchecked_t
7525 + * @o: expected value
7526 + * @n: new value
7527 + *
7528 + * Atomically sets @v to @n if it was equal to @o and returns
7529 + * the old value.
7530 + */
7531 +
7532 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
7533 +{
7534 + return cmpxchg64(&v->counter, o, n);
7535 +}
7536 +
7537 +/**
7538 * atomic64_xchg - xchg atomic64 variable
7539 * @v: pointer to type atomic64_t
7540 * @n: value to assign
7541 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
7542 }
7543
7544 /**
7545 + * atomic64_set_unchecked - set atomic64 variable
7546 + * @v: pointer to type atomic64_unchecked_t
7547 + * @n: value to assign
7548 + *
7549 + * Atomically sets the value of @v to @n.
7550 + */
7551 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
7552 +{
7553 + unsigned high = (unsigned)(i >> 32);
7554 + unsigned low = (unsigned)i;
7555 + asm volatile(ATOMIC64_ALTERNATIVE(set)
7556 + : "+b" (low), "+c" (high)
7557 + : "S" (v)
7558 + : "eax", "edx", "memory"
7559 + );
7560 +}
7561 +
7562 +/**
7563 * atomic64_read - read atomic64 variable
7564 * @v: pointer to type atomic64_t
7565 *
7566 @@ -93,6 +134,22 @@ static inline long long atomic64_read(atomic64_t *v)
7567 }
7568
7569 /**
7570 + * atomic64_read_unchecked - read atomic64 variable
7571 + * @v: pointer to type atomic64_unchecked_t
7572 + *
7573 + * Atomically reads the value of @v and returns it.
7574 + */
7575 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
7576 +{
7577 + long long r;
7578 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
7579 + : "=A" (r), "+c" (v)
7580 + : : "memory"
7581 + );
7582 + return r;
7583 + }
7584 +
7585 +/**
7586 * atomic64_add_return - add and return
7587 * @i: integer value to add
7588 * @v: pointer to type atomic64_t
7589 @@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
7590 return i;
7591 }
7592
7593 +/**
7594 + * atomic64_add_return_unchecked - add and return
7595 + * @i: integer value to add
7596 + * @v: pointer to type atomic64_unchecked_t
7597 + *
7598 + * Atomically adds @i to @v and returns @i + *@v
7599 + */
7600 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
7601 +{
7602 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
7603 + : "+A" (i), "+c" (v)
7604 + : : "memory"
7605 + );
7606 + return i;
7607 +}
7608 +
7609 /*
7610 * Other variants with different arithmetic operators:
7611 */
7612 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
7613 return a;
7614 }
7615
7616 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7617 +{
7618 + long long a;
7619 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
7620 + : "=A" (a)
7621 + : "S" (v)
7622 + : "memory", "ecx"
7623 + );
7624 + return a;
7625 +}
7626 +
7627 static inline long long atomic64_dec_return(atomic64_t *v)
7628 {
7629 long long a;
7630 @@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
7631 }
7632
7633 /**
7634 + * atomic64_add_unchecked - add integer to atomic64 variable
7635 + * @i: integer value to add
7636 + * @v: pointer to type atomic64_unchecked_t
7637 + *
7638 + * Atomically adds @i to @v.
7639 + */
7640 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
7641 +{
7642 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
7643 + : "+A" (i), "+c" (v)
7644 + : : "memory"
7645 + );
7646 + return i;
7647 +}
7648 +
7649 +/**
7650 * atomic64_sub - subtract the atomic64 variable
7651 * @i: integer value to subtract
7652 * @v: pointer to type atomic64_t
7653 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
7654 index 017594d..d3fcf72 100644
7655 --- a/arch/x86/include/asm/atomic64_64.h
7656 +++ b/arch/x86/include/asm/atomic64_64.h
7657 @@ -18,7 +18,19 @@
7658 */
7659 static inline long atomic64_read(const atomic64_t *v)
7660 {
7661 - return (*(volatile long *)&(v)->counter);
7662 + return (*(volatile const long *)&(v)->counter);
7663 +}
7664 +
7665 +/**
7666 + * atomic64_read_unchecked - read atomic64 variable
7667 + * @v: pointer of type atomic64_unchecked_t
7668 + *
7669 + * Atomically reads the value of @v.
7670 + * Doesn't imply a read memory barrier.
7671 + */
7672 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7673 +{
7674 + return (*(volatile const long *)&(v)->counter);
7675 }
7676
7677 /**
7678 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
7679 }
7680
7681 /**
7682 + * atomic64_set_unchecked - set atomic64 variable
7683 + * @v: pointer to type atomic64_unchecked_t
7684 + * @i: required value
7685 + *
7686 + * Atomically sets the value of @v to @i.
7687 + */
7688 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7689 +{
7690 + v->counter = i;
7691 +}
7692 +
7693 +/**
7694 * atomic64_add - add integer to atomic64 variable
7695 * @i: integer value to add
7696 * @v: pointer to type atomic64_t
7697 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
7698 */
7699 static inline void atomic64_add(long i, atomic64_t *v)
7700 {
7701 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
7702 +
7703 +#ifdef CONFIG_PAX_REFCOUNT
7704 + "jno 0f\n"
7705 + LOCK_PREFIX "subq %1,%0\n"
7706 + "int $4\n0:\n"
7707 + _ASM_EXTABLE(0b, 0b)
7708 +#endif
7709 +
7710 + : "=m" (v->counter)
7711 + : "er" (i), "m" (v->counter));
7712 +}
7713 +
7714 +/**
7715 + * atomic64_add_unchecked - add integer to atomic64 variable
7716 + * @i: integer value to add
7717 + * @v: pointer to type atomic64_unchecked_t
7718 + *
7719 + * Atomically adds @i to @v.
7720 + */
7721 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7722 +{
7723 asm volatile(LOCK_PREFIX "addq %1,%0"
7724 : "=m" (v->counter)
7725 : "er" (i), "m" (v->counter));
7726 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
7727 */
7728 static inline void atomic64_sub(long i, atomic64_t *v)
7729 {
7730 - asm volatile(LOCK_PREFIX "subq %1,%0"
7731 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7732 +
7733 +#ifdef CONFIG_PAX_REFCOUNT
7734 + "jno 0f\n"
7735 + LOCK_PREFIX "addq %1,%0\n"
7736 + "int $4\n0:\n"
7737 + _ASM_EXTABLE(0b, 0b)
7738 +#endif
7739 +
7740 + : "=m" (v->counter)
7741 + : "er" (i), "m" (v->counter));
7742 +}
7743 +
7744 +/**
7745 + * atomic64_sub_unchecked - subtract the atomic64 variable
7746 + * @i: integer value to subtract
7747 + * @v: pointer to type atomic64_unchecked_t
7748 + *
7749 + * Atomically subtracts @i from @v.
7750 + */
7751 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
7752 +{
7753 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7754 : "=m" (v->counter)
7755 : "er" (i), "m" (v->counter));
7756 }
7757 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7758 {
7759 unsigned char c;
7760
7761 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7762 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
7763 +
7764 +#ifdef CONFIG_PAX_REFCOUNT
7765 + "jno 0f\n"
7766 + LOCK_PREFIX "addq %2,%0\n"
7767 + "int $4\n0:\n"
7768 + _ASM_EXTABLE(0b, 0b)
7769 +#endif
7770 +
7771 + "sete %1\n"
7772 : "=m" (v->counter), "=qm" (c)
7773 : "er" (i), "m" (v->counter) : "memory");
7774 return c;
7775 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
7776 */
7777 static inline void atomic64_inc(atomic64_t *v)
7778 {
7779 + asm volatile(LOCK_PREFIX "incq %0\n"
7780 +
7781 +#ifdef CONFIG_PAX_REFCOUNT
7782 + "jno 0f\n"
7783 + LOCK_PREFIX "decq %0\n"
7784 + "int $4\n0:\n"
7785 + _ASM_EXTABLE(0b, 0b)
7786 +#endif
7787 +
7788 + : "=m" (v->counter)
7789 + : "m" (v->counter));
7790 +}
7791 +
7792 +/**
7793 + * atomic64_inc_unchecked - increment atomic64 variable
7794 + * @v: pointer to type atomic64_unchecked_t
7795 + *
7796 + * Atomically increments @v by 1.
7797 + */
7798 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7799 +{
7800 asm volatile(LOCK_PREFIX "incq %0"
7801 : "=m" (v->counter)
7802 : "m" (v->counter));
7803 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
7804 */
7805 static inline void atomic64_dec(atomic64_t *v)
7806 {
7807 - asm volatile(LOCK_PREFIX "decq %0"
7808 + asm volatile(LOCK_PREFIX "decq %0\n"
7809 +
7810 +#ifdef CONFIG_PAX_REFCOUNT
7811 + "jno 0f\n"
7812 + LOCK_PREFIX "incq %0\n"
7813 + "int $4\n0:\n"
7814 + _ASM_EXTABLE(0b, 0b)
7815 +#endif
7816 +
7817 + : "=m" (v->counter)
7818 + : "m" (v->counter));
7819 +}
7820 +
7821 +/**
7822 + * atomic64_dec_unchecked - decrement atomic64 variable
7823 + * @v: pointer to type atomic64_t
7824 + *
7825 + * Atomically decrements @v by 1.
7826 + */
7827 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7828 +{
7829 + asm volatile(LOCK_PREFIX "decq %0\n"
7830 : "=m" (v->counter)
7831 : "m" (v->counter));
7832 }
7833 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
7834 {
7835 unsigned char c;
7836
7837 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
7838 + asm volatile(LOCK_PREFIX "decq %0\n"
7839 +
7840 +#ifdef CONFIG_PAX_REFCOUNT
7841 + "jno 0f\n"
7842 + LOCK_PREFIX "incq %0\n"
7843 + "int $4\n0:\n"
7844 + _ASM_EXTABLE(0b, 0b)
7845 +#endif
7846 +
7847 + "sete %1\n"
7848 : "=m" (v->counter), "=qm" (c)
7849 : "m" (v->counter) : "memory");
7850 return c != 0;
7851 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
7852 {
7853 unsigned char c;
7854
7855 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
7856 + asm volatile(LOCK_PREFIX "incq %0\n"
7857 +
7858 +#ifdef CONFIG_PAX_REFCOUNT
7859 + "jno 0f\n"
7860 + LOCK_PREFIX "decq %0\n"
7861 + "int $4\n0:\n"
7862 + _ASM_EXTABLE(0b, 0b)
7863 +#endif
7864 +
7865 + "sete %1\n"
7866 : "=m" (v->counter), "=qm" (c)
7867 : "m" (v->counter) : "memory");
7868 return c != 0;
7869 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7870 {
7871 unsigned char c;
7872
7873 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7874 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
7875 +
7876 +#ifdef CONFIG_PAX_REFCOUNT
7877 + "jno 0f\n"
7878 + LOCK_PREFIX "subq %2,%0\n"
7879 + "int $4\n0:\n"
7880 + _ASM_EXTABLE(0b, 0b)
7881 +#endif
7882 +
7883 + "sets %1\n"
7884 : "=m" (v->counter), "=qm" (c)
7885 : "er" (i), "m" (v->counter) : "memory");
7886 return c;
7887 @@ -171,7 +317,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
7888 static inline long atomic64_add_return(long i, atomic64_t *v)
7889 {
7890 long __i = i;
7891 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7892 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7893 +
7894 +#ifdef CONFIG_PAX_REFCOUNT
7895 + "jno 0f\n"
7896 + "movq %0, %1\n"
7897 + "int $4\n0:\n"
7898 + _ASM_EXTABLE(0b, 0b)
7899 +#endif
7900 +
7901 + : "+r" (i), "+m" (v->counter)
7902 + : : "memory");
7903 + return i + __i;
7904 +}
7905 +
7906 +/**
7907 + * atomic64_add_return_unchecked - add and return
7908 + * @i: integer value to add
7909 + * @v: pointer to type atomic64_unchecked_t
7910 + *
7911 + * Atomically adds @i to @v and returns @i + @v
7912 + */
7913 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7914 +{
7915 + long __i = i;
7916 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
7917 : "+r" (i), "+m" (v->counter)
7918 : : "memory");
7919 return i + __i;
7920 @@ -183,6 +353,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
7921 }
7922
7923 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7924 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7925 +{
7926 + return atomic64_add_return_unchecked(1, v);
7927 +}
7928 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7929
7930 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7931 @@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7932 return cmpxchg(&v->counter, old, new);
7933 }
7934
7935 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
7936 +{
7937 + return cmpxchg(&v->counter, old, new);
7938 +}
7939 +
7940 static inline long atomic64_xchg(atomic64_t *v, long new)
7941 {
7942 return xchg(&v->counter, new);
7943 @@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
7944 */
7945 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
7946 {
7947 - long c, old;
7948 + long c, old, new;
7949 c = atomic64_read(v);
7950 for (;;) {
7951 - if (unlikely(c == (u)))
7952 + if (unlikely(c == u))
7953 break;
7954 - old = atomic64_cmpxchg((v), c, c + (a));
7955 +
7956 + asm volatile("add %2,%0\n"
7957 +
7958 +#ifdef CONFIG_PAX_REFCOUNT
7959 + "jno 0f\n"
7960 + "sub %2,%0\n"
7961 + "int $4\n0:\n"
7962 + _ASM_EXTABLE(0b, 0b)
7963 +#endif
7964 +
7965 + : "=r" (new)
7966 + : "0" (c), "ir" (a));
7967 +
7968 + old = atomic64_cmpxchg(v, c, new);
7969 if (likely(old == c))
7970 break;
7971 c = old;
7972 }
7973 - return c != (u);
7974 + return c != u;
7975 }
7976
7977 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7978 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
7979 index 1775d6e..b65017f 100644
7980 --- a/arch/x86/include/asm/bitops.h
7981 +++ b/arch/x86/include/asm/bitops.h
7982 @@ -38,7 +38,7 @@
7983 * a mask operation on a byte.
7984 */
7985 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
7986 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
7987 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
7988 #define CONST_MASK(nr) (1 << ((nr) & 7))
7989
7990 /**
7991 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
7992 index 5e1a2ee..c9f9533 100644
7993 --- a/arch/x86/include/asm/boot.h
7994 +++ b/arch/x86/include/asm/boot.h
7995 @@ -11,10 +11,15 @@
7996 #include <asm/pgtable_types.h>
7997
7998 /* Physical address where kernel should be loaded. */
7999 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8000 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8001 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8002 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8003
8004 +#ifndef __ASSEMBLY__
8005 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
8006 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8007 +#endif
8008 +
8009 /* Minimum kernel alignment, as a power of two */
8010 #ifdef CONFIG_X86_64
8011 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8012 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
8013 index 48f99f1..d78ebf9 100644
8014 --- a/arch/x86/include/asm/cache.h
8015 +++ b/arch/x86/include/asm/cache.h
8016 @@ -5,12 +5,13 @@
8017
8018 /* L1 cache line size */
8019 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8020 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8021 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8022
8023 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8024 +#define __read_only __attribute__((__section__(".data..read_only")))
8025
8026 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
8027 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
8028 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
8029
8030 #ifdef CONFIG_X86_VSMP
8031 #ifdef CONFIG_SMP
8032 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
8033 index 4e12668..501d239 100644
8034 --- a/arch/x86/include/asm/cacheflush.h
8035 +++ b/arch/x86/include/asm/cacheflush.h
8036 @@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
8037 unsigned long pg_flags = pg->flags & _PGMT_MASK;
8038
8039 if (pg_flags == _PGMT_DEFAULT)
8040 - return -1;
8041 + return ~0UL;
8042 else if (pg_flags == _PGMT_WC)
8043 return _PAGE_CACHE_WC;
8044 else if (pg_flags == _PGMT_UC_MINUS)
8045 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
8046 index 46fc474..b02b0f9 100644
8047 --- a/arch/x86/include/asm/checksum_32.h
8048 +++ b/arch/x86/include/asm/checksum_32.h
8049 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
8050 int len, __wsum sum,
8051 int *src_err_ptr, int *dst_err_ptr);
8052
8053 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8054 + int len, __wsum sum,
8055 + int *src_err_ptr, int *dst_err_ptr);
8056 +
8057 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8058 + int len, __wsum sum,
8059 + int *src_err_ptr, int *dst_err_ptr);
8060 +
8061 /*
8062 * Note: when you get a NULL pointer exception here this means someone
8063 * passed in an incorrect kernel address to one of these functions.
8064 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
8065 int *err_ptr)
8066 {
8067 might_sleep();
8068 - return csum_partial_copy_generic((__force void *)src, dst,
8069 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
8070 len, sum, err_ptr, NULL);
8071 }
8072
8073 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
8074 {
8075 might_sleep();
8076 if (access_ok(VERIFY_WRITE, dst, len))
8077 - return csum_partial_copy_generic(src, (__force void *)dst,
8078 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8079 len, sum, NULL, err_ptr);
8080
8081 if (len)
8082 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
8083 index 88b23a4..d2e5f9f 100644
8084 --- a/arch/x86/include/asm/cpufeature.h
8085 +++ b/arch/x86/include/asm/cpufeature.h
8086 @@ -358,7 +358,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
8087 ".section .discard,\"aw\",@progbits\n"
8088 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
8089 ".previous\n"
8090 - ".section .altinstr_replacement,\"ax\"\n"
8091 + ".section .altinstr_replacement,\"a\"\n"
8092 "3: movb $1,%0\n"
8093 "4:\n"
8094 ".previous\n"
8095 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
8096 index 41935fa..3b40db8 100644
8097 --- a/arch/x86/include/asm/desc.h
8098 +++ b/arch/x86/include/asm/desc.h
8099 @@ -4,6 +4,7 @@
8100 #include <asm/desc_defs.h>
8101 #include <asm/ldt.h>
8102 #include <asm/mmu.h>
8103 +#include <asm/pgtable.h>
8104
8105 #include <linux/smp.h>
8106
8107 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8108
8109 desc->type = (info->read_exec_only ^ 1) << 1;
8110 desc->type |= info->contents << 2;
8111 + desc->type |= info->seg_not_present ^ 1;
8112
8113 desc->s = 1;
8114 desc->dpl = 0x3;
8115 @@ -34,17 +36,12 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
8116 }
8117
8118 extern struct desc_ptr idt_descr;
8119 -extern gate_desc idt_table[];
8120 -
8121 -struct gdt_page {
8122 - struct desc_struct gdt[GDT_ENTRIES];
8123 -} __attribute__((aligned(PAGE_SIZE)));
8124 -
8125 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8126 +extern gate_desc idt_table[256];
8127
8128 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8129 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8130 {
8131 - return per_cpu(gdt_page, cpu).gdt;
8132 + return cpu_gdt_table[cpu];
8133 }
8134
8135 #ifdef CONFIG_X86_64
8136 @@ -69,8 +66,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
8137 unsigned long base, unsigned dpl, unsigned flags,
8138 unsigned short seg)
8139 {
8140 - gate->a = (seg << 16) | (base & 0xffff);
8141 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8142 + gate->gate.offset_low = base;
8143 + gate->gate.seg = seg;
8144 + gate->gate.reserved = 0;
8145 + gate->gate.type = type;
8146 + gate->gate.s = 0;
8147 + gate->gate.dpl = dpl;
8148 + gate->gate.p = 1;
8149 + gate->gate.offset_high = base >> 16;
8150 }
8151
8152 #endif
8153 @@ -115,12 +118,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
8154
8155 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
8156 {
8157 + pax_open_kernel();
8158 memcpy(&idt[entry], gate, sizeof(*gate));
8159 + pax_close_kernel();
8160 }
8161
8162 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
8163 {
8164 + pax_open_kernel();
8165 memcpy(&ldt[entry], desc, 8);
8166 + pax_close_kernel();
8167 }
8168
8169 static inline void
8170 @@ -134,7 +141,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
8171 default: size = sizeof(*gdt); break;
8172 }
8173
8174 + pax_open_kernel();
8175 memcpy(&gdt[entry], desc, size);
8176 + pax_close_kernel();
8177 }
8178
8179 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8180 @@ -207,7 +216,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
8181
8182 static inline void native_load_tr_desc(void)
8183 {
8184 + pax_open_kernel();
8185 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8186 + pax_close_kernel();
8187 }
8188
8189 static inline void native_load_gdt(const struct desc_ptr *dtr)
8190 @@ -244,8 +255,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
8191 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8192 unsigned int i;
8193
8194 + pax_open_kernel();
8195 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8196 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8197 + pax_close_kernel();
8198 }
8199
8200 #define _LDT_empty(info) \
8201 @@ -307,7 +320,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
8202 desc->limit = (limit >> 16) & 0xf;
8203 }
8204
8205 -static inline void _set_gate(int gate, unsigned type, void *addr,
8206 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8207 unsigned dpl, unsigned ist, unsigned seg)
8208 {
8209 gate_desc s;
8210 @@ -326,7 +339,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
8211 * Pentium F0 0F bugfix can have resulted in the mapped
8212 * IDT being write-protected.
8213 */
8214 -static inline void set_intr_gate(unsigned int n, void *addr)
8215 +static inline void set_intr_gate(unsigned int n, const void *addr)
8216 {
8217 BUG_ON((unsigned)n > 0xFF);
8218 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8219 @@ -356,19 +369,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
8220 /*
8221 * This routine sets up an interrupt gate at directory privilege level 3.
8222 */
8223 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8224 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8225 {
8226 BUG_ON((unsigned)n > 0xFF);
8227 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8228 }
8229
8230 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8231 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8232 {
8233 BUG_ON((unsigned)n > 0xFF);
8234 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8235 }
8236
8237 -static inline void set_trap_gate(unsigned int n, void *addr)
8238 +static inline void set_trap_gate(unsigned int n, const void *addr)
8239 {
8240 BUG_ON((unsigned)n > 0xFF);
8241 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8242 @@ -377,19 +390,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
8243 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8244 {
8245 BUG_ON((unsigned)n > 0xFF);
8246 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8247 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8248 }
8249
8250 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8251 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8252 {
8253 BUG_ON((unsigned)n > 0xFF);
8254 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8255 }
8256
8257 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8258 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8259 {
8260 BUG_ON((unsigned)n > 0xFF);
8261 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8262 }
8263
8264 +#ifdef CONFIG_X86_32
8265 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8266 +{
8267 + struct desc_struct d;
8268 +
8269 + if (likely(limit))
8270 + limit = (limit - 1UL) >> PAGE_SHIFT;
8271 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8272 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8273 +}
8274 +#endif
8275 +
8276 #endif /* _ASM_X86_DESC_H */
8277 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
8278 index 278441f..b95a174 100644
8279 --- a/arch/x86/include/asm/desc_defs.h
8280 +++ b/arch/x86/include/asm/desc_defs.h
8281 @@ -31,6 +31,12 @@ struct desc_struct {
8282 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8283 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8284 };
8285 + struct {
8286 + u16 offset_low;
8287 + u16 seg;
8288 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8289 + unsigned offset_high: 16;
8290 + } gate;
8291 };
8292 } __attribute__((packed));
8293
8294 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
8295 index 908b969..a1f4eb4 100644
8296 --- a/arch/x86/include/asm/e820.h
8297 +++ b/arch/x86/include/asm/e820.h
8298 @@ -69,7 +69,7 @@ struct e820map {
8299 #define ISA_START_ADDRESS 0xa0000
8300 #define ISA_END_ADDRESS 0x100000
8301
8302 -#define BIOS_BEGIN 0x000a0000
8303 +#define BIOS_BEGIN 0x000c0000
8304 #define BIOS_END 0x00100000
8305
8306 #define BIOS_ROM_BASE 0xffe00000
8307 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
8308 index f2ad216..eb24c96 100644
8309 --- a/arch/x86/include/asm/elf.h
8310 +++ b/arch/x86/include/asm/elf.h
8311 @@ -237,7 +237,25 @@ extern int force_personality32;
8312 the loader. We need to make sure that it is out of the way of the program
8313 that it will "exec", and that there is sufficient room for the brk. */
8314
8315 +#ifdef CONFIG_PAX_SEGMEXEC
8316 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8317 +#else
8318 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8319 +#endif
8320 +
8321 +#ifdef CONFIG_PAX_ASLR
8322 +#ifdef CONFIG_X86_32
8323 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8324 +
8325 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8326 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8327 +#else
8328 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8329 +
8330 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8331 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8332 +#endif
8333 +#endif
8334
8335 /* This yields a mask that user programs can use to figure out what
8336 instruction set this CPU supports. This could be done in user space,
8337 @@ -290,9 +308,7 @@ do { \
8338
8339 #define ARCH_DLINFO \
8340 do { \
8341 - if (vdso_enabled) \
8342 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8343 - (unsigned long)current->mm->context.vdso); \
8344 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
8345 } while (0)
8346
8347 #define AT_SYSINFO 32
8348 @@ -303,7 +319,7 @@ do { \
8349
8350 #endif /* !CONFIG_X86_32 */
8351
8352 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8353 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8354
8355 #define VDSO_ENTRY \
8356 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8357 @@ -317,7 +333,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8358 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8359 #define compat_arch_setup_additional_pages syscall32_setup_pages
8360
8361 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8362 -#define arch_randomize_brk arch_randomize_brk
8363 -
8364 #endif /* _ASM_X86_ELF_H */
8365 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
8366 index cc70c1c..d96d011 100644
8367 --- a/arch/x86/include/asm/emergency-restart.h
8368 +++ b/arch/x86/include/asm/emergency-restart.h
8369 @@ -15,6 +15,6 @@ enum reboot_type {
8370
8371 extern enum reboot_type reboot_type;
8372
8373 -extern void machine_emergency_restart(void);
8374 +extern void machine_emergency_restart(void) __noreturn;
8375
8376 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8377 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
8378 index d09bb03..4ea4194 100644
8379 --- a/arch/x86/include/asm/futex.h
8380 +++ b/arch/x86/include/asm/futex.h
8381 @@ -12,16 +12,18 @@
8382 #include <asm/system.h>
8383
8384 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8385 + typecheck(u32 __user *, uaddr); \
8386 asm volatile("1:\t" insn "\n" \
8387 "2:\t.section .fixup,\"ax\"\n" \
8388 "3:\tmov\t%3, %1\n" \
8389 "\tjmp\t2b\n" \
8390 "\t.previous\n" \
8391 _ASM_EXTABLE(1b, 3b) \
8392 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8393 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
8394 : "i" (-EFAULT), "0" (oparg), "1" (0))
8395
8396 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8397 + typecheck(u32 __user *, uaddr); \
8398 asm volatile("1:\tmovl %2, %0\n" \
8399 "\tmovl\t%0, %3\n" \
8400 "\t" insn "\n" \
8401 @@ -34,7 +36,7 @@
8402 _ASM_EXTABLE(1b, 4b) \
8403 _ASM_EXTABLE(2b, 4b) \
8404 : "=&a" (oldval), "=&r" (ret), \
8405 - "+m" (*uaddr), "=&r" (tem) \
8406 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
8407 : "r" (oparg), "i" (-EFAULT), "1" (0))
8408
8409 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8410 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8411
8412 switch (op) {
8413 case FUTEX_OP_SET:
8414 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8415 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8416 break;
8417 case FUTEX_OP_ADD:
8418 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8419 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8420 uaddr, oparg);
8421 break;
8422 case FUTEX_OP_OR:
8423 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
8424 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8425 return -EFAULT;
8426
8427 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
8428 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
8429 "2:\t.section .fixup, \"ax\"\n"
8430 "3:\tmov %3, %0\n"
8431 "\tjmp 2b\n"
8432 "\t.previous\n"
8433 _ASM_EXTABLE(1b, 3b)
8434 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
8435 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
8436 : "i" (-EFAULT), "r" (newval), "1" (oldval)
8437 : "memory"
8438 );
8439 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
8440 index 0919905..2cf38d6 100644
8441 --- a/arch/x86/include/asm/hw_irq.h
8442 +++ b/arch/x86/include/asm/hw_irq.h
8443 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
8444 extern void enable_IO_APIC(void);
8445
8446 /* Statistics */
8447 -extern atomic_t irq_err_count;
8448 -extern atomic_t irq_mis_count;
8449 +extern atomic_unchecked_t irq_err_count;
8450 +extern atomic_unchecked_t irq_mis_count;
8451
8452 /* EISA */
8453 extern void eisa_set_level_irq(unsigned int irq);
8454 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
8455 index c9e09ea..73888df 100644
8456 --- a/arch/x86/include/asm/i387.h
8457 +++ b/arch/x86/include/asm/i387.h
8458 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
8459 {
8460 int err;
8461
8462 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8463 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8464 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
8465 +#endif
8466 +
8467 /* See comment in fxsave() below. */
8468 #ifdef CONFIG_AS_FXSAVEQ
8469 asm volatile("1: fxrstorq %[fx]\n\t"
8470 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
8471 {
8472 int err;
8473
8474 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8475 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8476 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8477 +#endif
8478 +
8479 /*
8480 * Clear the bytes not touched by the fxsave and reserved
8481 * for the SW usage.
8482 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu *fpu)
8483 #endif /* CONFIG_X86_64 */
8484
8485 /* We need a safe address that is cheap to find and that is already
8486 - in L1 during context switch. The best choices are unfortunately
8487 - different for UP and SMP */
8488 -#ifdef CONFIG_SMP
8489 -#define safe_address (__per_cpu_offset[0])
8490 -#else
8491 -#define safe_address (kstat_cpu(0).cpustat.user)
8492 -#endif
8493 + in L1 during context switch. */
8494 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8495
8496 /*
8497 * These must be called with preempt disabled
8498 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void)
8499 struct thread_info *me = current_thread_info();
8500 preempt_disable();
8501 if (me->status & TS_USEDFPU)
8502 - __save_init_fpu(me->task);
8503 + __save_init_fpu(current);
8504 else
8505 clts();
8506 }
8507 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
8508 index d8e8eef..99f81ae 100644
8509 --- a/arch/x86/include/asm/io.h
8510 +++ b/arch/x86/include/asm/io.h
8511 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
8512
8513 #include <linux/vmalloc.h>
8514
8515 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8516 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8517 +{
8518 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8519 +}
8520 +
8521 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8522 +{
8523 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8524 +}
8525 +
8526 /*
8527 * Convert a virtual cached pointer to an uncached pointer
8528 */
8529 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
8530 index bba3cf8..06bc8da 100644
8531 --- a/arch/x86/include/asm/irqflags.h
8532 +++ b/arch/x86/include/asm/irqflags.h
8533 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
8534 sti; \
8535 sysexit
8536
8537 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8538 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8539 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8540 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8541 +
8542 #else
8543 #define INTERRUPT_RETURN iret
8544 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8545 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
8546 index 5478825..839e88c 100644
8547 --- a/arch/x86/include/asm/kprobes.h
8548 +++ b/arch/x86/include/asm/kprobes.h
8549 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
8550 #define RELATIVEJUMP_SIZE 5
8551 #define RELATIVECALL_OPCODE 0xe8
8552 #define RELATIVE_ADDR_SIZE 4
8553 -#define MAX_STACK_SIZE 64
8554 -#define MIN_STACK_SIZE(ADDR) \
8555 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8556 - THREAD_SIZE - (unsigned long)(ADDR))) \
8557 - ? (MAX_STACK_SIZE) \
8558 - : (((unsigned long)current_thread_info()) + \
8559 - THREAD_SIZE - (unsigned long)(ADDR)))
8560 +#define MAX_STACK_SIZE 64UL
8561 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8562
8563 #define flush_insn_slot(p) do { } while (0)
8564
8565 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
8566 index dd51c83..66cbfac 100644
8567 --- a/arch/x86/include/asm/kvm_host.h
8568 +++ b/arch/x86/include/asm/kvm_host.h
8569 @@ -456,7 +456,7 @@ struct kvm_arch {
8570 unsigned int n_requested_mmu_pages;
8571 unsigned int n_max_mmu_pages;
8572 unsigned int indirect_shadow_pages;
8573 - atomic_t invlpg_counter;
8574 + atomic_unchecked_t invlpg_counter;
8575 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
8576 /*
8577 * Hash table of struct kvm_mmu_page.
8578 @@ -636,7 +636,7 @@ struct kvm_x86_ops {
8579 enum x86_intercept_stage stage);
8580
8581 const struct trace_print_flags *exit_reasons_str;
8582 -};
8583 +} __do_const;
8584
8585 struct kvm_arch_async_pf {
8586 u32 token;
8587 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
8588 index 9cdae5d..300d20f 100644
8589 --- a/arch/x86/include/asm/local.h
8590 +++ b/arch/x86/include/asm/local.h
8591 @@ -18,26 +18,58 @@ typedef struct {
8592
8593 static inline void local_inc(local_t *l)
8594 {
8595 - asm volatile(_ASM_INC "%0"
8596 + asm volatile(_ASM_INC "%0\n"
8597 +
8598 +#ifdef CONFIG_PAX_REFCOUNT
8599 + "jno 0f\n"
8600 + _ASM_DEC "%0\n"
8601 + "int $4\n0:\n"
8602 + _ASM_EXTABLE(0b, 0b)
8603 +#endif
8604 +
8605 : "+m" (l->a.counter));
8606 }
8607
8608 static inline void local_dec(local_t *l)
8609 {
8610 - asm volatile(_ASM_DEC "%0"
8611 + asm volatile(_ASM_DEC "%0\n"
8612 +
8613 +#ifdef CONFIG_PAX_REFCOUNT
8614 + "jno 0f\n"
8615 + _ASM_INC "%0\n"
8616 + "int $4\n0:\n"
8617 + _ASM_EXTABLE(0b, 0b)
8618 +#endif
8619 +
8620 : "+m" (l->a.counter));
8621 }
8622
8623 static inline void local_add(long i, local_t *l)
8624 {
8625 - asm volatile(_ASM_ADD "%1,%0"
8626 + asm volatile(_ASM_ADD "%1,%0\n"
8627 +
8628 +#ifdef CONFIG_PAX_REFCOUNT
8629 + "jno 0f\n"
8630 + _ASM_SUB "%1,%0\n"
8631 + "int $4\n0:\n"
8632 + _ASM_EXTABLE(0b, 0b)
8633 +#endif
8634 +
8635 : "+m" (l->a.counter)
8636 : "ir" (i));
8637 }
8638
8639 static inline void local_sub(long i, local_t *l)
8640 {
8641 - asm volatile(_ASM_SUB "%1,%0"
8642 + asm volatile(_ASM_SUB "%1,%0\n"
8643 +
8644 +#ifdef CONFIG_PAX_REFCOUNT
8645 + "jno 0f\n"
8646 + _ASM_ADD "%1,%0\n"
8647 + "int $4\n0:\n"
8648 + _ASM_EXTABLE(0b, 0b)
8649 +#endif
8650 +
8651 : "+m" (l->a.counter)
8652 : "ir" (i));
8653 }
8654 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
8655 {
8656 unsigned char c;
8657
8658 - asm volatile(_ASM_SUB "%2,%0; sete %1"
8659 + asm volatile(_ASM_SUB "%2,%0\n"
8660 +
8661 +#ifdef CONFIG_PAX_REFCOUNT
8662 + "jno 0f\n"
8663 + _ASM_ADD "%2,%0\n"
8664 + "int $4\n0:\n"
8665 + _ASM_EXTABLE(0b, 0b)
8666 +#endif
8667 +
8668 + "sete %1\n"
8669 : "+m" (l->a.counter), "=qm" (c)
8670 : "ir" (i) : "memory");
8671 return c;
8672 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
8673 {
8674 unsigned char c;
8675
8676 - asm volatile(_ASM_DEC "%0; sete %1"
8677 + asm volatile(_ASM_DEC "%0\n"
8678 +
8679 +#ifdef CONFIG_PAX_REFCOUNT
8680 + "jno 0f\n"
8681 + _ASM_INC "%0\n"
8682 + "int $4\n0:\n"
8683 + _ASM_EXTABLE(0b, 0b)
8684 +#endif
8685 +
8686 + "sete %1\n"
8687 : "+m" (l->a.counter), "=qm" (c)
8688 : : "memory");
8689 return c != 0;
8690 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
8691 {
8692 unsigned char c;
8693
8694 - asm volatile(_ASM_INC "%0; sete %1"
8695 + asm volatile(_ASM_INC "%0\n"
8696 +
8697 +#ifdef CONFIG_PAX_REFCOUNT
8698 + "jno 0f\n"
8699 + _ASM_DEC "%0\n"
8700 + "int $4\n0:\n"
8701 + _ASM_EXTABLE(0b, 0b)
8702 +#endif
8703 +
8704 + "sete %1\n"
8705 : "+m" (l->a.counter), "=qm" (c)
8706 : : "memory");
8707 return c != 0;
8708 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
8709 {
8710 unsigned char c;
8711
8712 - asm volatile(_ASM_ADD "%2,%0; sets %1"
8713 + asm volatile(_ASM_ADD "%2,%0\n"
8714 +
8715 +#ifdef CONFIG_PAX_REFCOUNT
8716 + "jno 0f\n"
8717 + _ASM_SUB "%2,%0\n"
8718 + "int $4\n0:\n"
8719 + _ASM_EXTABLE(0b, 0b)
8720 +#endif
8721 +
8722 + "sets %1\n"
8723 : "+m" (l->a.counter), "=qm" (c)
8724 : "ir" (i) : "memory");
8725 return c;
8726 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
8727 #endif
8728 /* Modern 486+ processor */
8729 __i = i;
8730 - asm volatile(_ASM_XADD "%0, %1;"
8731 + asm volatile(_ASM_XADD "%0, %1\n"
8732 +
8733 +#ifdef CONFIG_PAX_REFCOUNT
8734 + "jno 0f\n"
8735 + _ASM_MOV "%0,%1\n"
8736 + "int $4\n0:\n"
8737 + _ASM_EXTABLE(0b, 0b)
8738 +#endif
8739 +
8740 : "+r" (i), "+m" (l->a.counter)
8741 : : "memory");
8742 return i + __i;
8743 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
8744 index 593e51d..fa69c9a 100644
8745 --- a/arch/x86/include/asm/mman.h
8746 +++ b/arch/x86/include/asm/mman.h
8747 @@ -5,4 +5,14 @@
8748
8749 #include <asm-generic/mman.h>
8750
8751 +#ifdef __KERNEL__
8752 +#ifndef __ASSEMBLY__
8753 +#ifdef CONFIG_X86_32
8754 +#define arch_mmap_check i386_mmap_check
8755 +int i386_mmap_check(unsigned long addr, unsigned long len,
8756 + unsigned long flags);
8757 +#endif
8758 +#endif
8759 +#endif
8760 +
8761 #endif /* _ASM_X86_MMAN_H */
8762 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
8763 index 5f55e69..e20bfb1 100644
8764 --- a/arch/x86/include/asm/mmu.h
8765 +++ b/arch/x86/include/asm/mmu.h
8766 @@ -9,7 +9,7 @@
8767 * we put the segment information here.
8768 */
8769 typedef struct {
8770 - void *ldt;
8771 + struct desc_struct *ldt;
8772 int size;
8773
8774 #ifdef CONFIG_X86_64
8775 @@ -18,7 +18,19 @@ typedef struct {
8776 #endif
8777
8778 struct mutex lock;
8779 - void *vdso;
8780 + unsigned long vdso;
8781 +
8782 +#ifdef CONFIG_X86_32
8783 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
8784 + unsigned long user_cs_base;
8785 + unsigned long user_cs_limit;
8786 +
8787 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8788 + cpumask_t cpu_user_cs_mask;
8789 +#endif
8790 +
8791 +#endif
8792 +#endif
8793 } mm_context_t;
8794
8795 #ifdef CONFIG_SMP
8796 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
8797 index 6902152..399f3a2 100644
8798 --- a/arch/x86/include/asm/mmu_context.h
8799 +++ b/arch/x86/include/asm/mmu_context.h
8800 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
8801
8802 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8803 {
8804 +
8805 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8806 + unsigned int i;
8807 + pgd_t *pgd;
8808 +
8809 + pax_open_kernel();
8810 + pgd = get_cpu_pgd(smp_processor_id());
8811 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8812 + set_pgd_batched(pgd+i, native_make_pgd(0));
8813 + pax_close_kernel();
8814 +#endif
8815 +
8816 #ifdef CONFIG_SMP
8817 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8818 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8819 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8820 struct task_struct *tsk)
8821 {
8822 unsigned cpu = smp_processor_id();
8823 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8824 + int tlbstate = TLBSTATE_OK;
8825 +#endif
8826
8827 if (likely(prev != next)) {
8828 #ifdef CONFIG_SMP
8829 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8830 + tlbstate = percpu_read(cpu_tlbstate.state);
8831 +#endif
8832 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8833 percpu_write(cpu_tlbstate.active_mm, next);
8834 #endif
8835 cpumask_set_cpu(cpu, mm_cpumask(next));
8836
8837 /* Re-load page tables */
8838 +#ifdef CONFIG_PAX_PER_CPU_PGD
8839 + pax_open_kernel();
8840 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8841 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8842 + pax_close_kernel();
8843 + load_cr3(get_cpu_pgd(cpu));
8844 +#else
8845 load_cr3(next->pgd);
8846 +#endif
8847
8848 /* stop flush ipis for the previous mm */
8849 cpumask_clear_cpu(cpu, mm_cpumask(prev));
8850 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8851 */
8852 if (unlikely(prev->context.ldt != next->context.ldt))
8853 load_LDT_nolock(&next->context);
8854 - }
8855 +
8856 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8857 + if (!(__supported_pte_mask & _PAGE_NX)) {
8858 + smp_mb__before_clear_bit();
8859 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
8860 + smp_mb__after_clear_bit();
8861 + cpu_set(cpu, next->context.cpu_user_cs_mask);
8862 + }
8863 +#endif
8864 +
8865 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8866 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
8867 + prev->context.user_cs_limit != next->context.user_cs_limit))
8868 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8869 #ifdef CONFIG_SMP
8870 + else if (unlikely(tlbstate != TLBSTATE_OK))
8871 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8872 +#endif
8873 +#endif
8874 +
8875 + }
8876 else {
8877 +
8878 +#ifdef CONFIG_PAX_PER_CPU_PGD
8879 + pax_open_kernel();
8880 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8881 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8882 + pax_close_kernel();
8883 + load_cr3(get_cpu_pgd(cpu));
8884 +#endif
8885 +
8886 +#ifdef CONFIG_SMP
8887 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8888 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
8889
8890 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
8891 * tlb flush IPI delivery. We must reload CR3
8892 * to make sure to use no freed page tables.
8893 */
8894 +
8895 +#ifndef CONFIG_PAX_PER_CPU_PGD
8896 load_cr3(next->pgd);
8897 +#endif
8898 +
8899 load_LDT_nolock(&next->context);
8900 +
8901 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
8902 + if (!(__supported_pte_mask & _PAGE_NX))
8903 + cpu_set(cpu, next->context.cpu_user_cs_mask);
8904 +#endif
8905 +
8906 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8907 +#ifdef CONFIG_PAX_PAGEEXEC
8908 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
8909 +#endif
8910 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8911 +#endif
8912 +
8913 }
8914 - }
8915 #endif
8916 + }
8917 }
8918
8919 #define activate_mm(prev, next) \
8920 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
8921 index 9eae775..c914fea 100644
8922 --- a/arch/x86/include/asm/module.h
8923 +++ b/arch/x86/include/asm/module.h
8924 @@ -5,6 +5,7 @@
8925
8926 #ifdef CONFIG_X86_64
8927 /* X86_64 does not define MODULE_PROC_FAMILY */
8928 +#define MODULE_PROC_FAMILY ""
8929 #elif defined CONFIG_M386
8930 #define MODULE_PROC_FAMILY "386 "
8931 #elif defined CONFIG_M486
8932 @@ -59,8 +60,20 @@
8933 #error unknown processor family
8934 #endif
8935
8936 -#ifdef CONFIG_X86_32
8937 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
8938 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8939 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
8940 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
8941 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
8942 +#else
8943 +#define MODULE_PAX_KERNEXEC ""
8944 #endif
8945
8946 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8947 +#define MODULE_PAX_UDEREF "UDEREF "
8948 +#else
8949 +#define MODULE_PAX_UDEREF ""
8950 +#endif
8951 +
8952 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
8953 +
8954 #endif /* _ASM_X86_MODULE_H */
8955 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
8956 index 7639dbf..e08a58c 100644
8957 --- a/arch/x86/include/asm/page_64_types.h
8958 +++ b/arch/x86/include/asm/page_64_types.h
8959 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
8960
8961 /* duplicated to the one in bootmem.h */
8962 extern unsigned long max_pfn;
8963 -extern unsigned long phys_base;
8964 +extern const unsigned long phys_base;
8965
8966 extern unsigned long __phys_addr(unsigned long);
8967 #define __phys_reloc_hide(x) (x)
8968 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
8969 index a7d2db9..edb023e 100644
8970 --- a/arch/x86/include/asm/paravirt.h
8971 +++ b/arch/x86/include/asm/paravirt.h
8972 @@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
8973 val);
8974 }
8975
8976 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
8977 +{
8978 + pgdval_t val = native_pgd_val(pgd);
8979 +
8980 + if (sizeof(pgdval_t) > sizeof(long))
8981 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
8982 + val, (u64)val >> 32);
8983 + else
8984 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
8985 + val);
8986 +}
8987 +
8988 static inline void pgd_clear(pgd_t *pgdp)
8989 {
8990 set_pgd(pgdp, __pgd(0));
8991 @@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
8992 pv_mmu_ops.set_fixmap(idx, phys, flags);
8993 }
8994
8995 +#ifdef CONFIG_PAX_KERNEXEC
8996 +static inline unsigned long pax_open_kernel(void)
8997 +{
8998 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
8999 +}
9000 +
9001 +static inline unsigned long pax_close_kernel(void)
9002 +{
9003 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9004 +}
9005 +#else
9006 +static inline unsigned long pax_open_kernel(void) { return 0; }
9007 +static inline unsigned long pax_close_kernel(void) { return 0; }
9008 +#endif
9009 +
9010 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9011
9012 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
9013 @@ -964,7 +991,7 @@ extern void default_banner(void);
9014
9015 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9016 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9017 -#define PARA_INDIRECT(addr) *%cs:addr
9018 +#define PARA_INDIRECT(addr) *%ss:addr
9019 #endif
9020
9021 #define INTERRUPT_RETURN \
9022 @@ -1041,6 +1068,21 @@ extern void default_banner(void);
9023 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9024 CLBR_NONE, \
9025 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9026 +
9027 +#define GET_CR0_INTO_RDI \
9028 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9029 + mov %rax,%rdi
9030 +
9031 +#define SET_RDI_INTO_CR0 \
9032 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9033 +
9034 +#define GET_CR3_INTO_RDI \
9035 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9036 + mov %rax,%rdi
9037 +
9038 +#define SET_RDI_INTO_CR3 \
9039 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9040 +
9041 #endif /* CONFIG_X86_32 */
9042
9043 #endif /* __ASSEMBLY__ */
9044 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
9045 index 8e8b9a4..f07d725 100644
9046 --- a/arch/x86/include/asm/paravirt_types.h
9047 +++ b/arch/x86/include/asm/paravirt_types.h
9048 @@ -84,20 +84,20 @@ struct pv_init_ops {
9049 */
9050 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9051 unsigned long addr, unsigned len);
9052 -};
9053 +} __no_const;
9054
9055
9056 struct pv_lazy_ops {
9057 /* Set deferred update mode, used for batching operations. */
9058 void (*enter)(void);
9059 void (*leave)(void);
9060 -};
9061 +} __no_const;
9062
9063 struct pv_time_ops {
9064 unsigned long long (*sched_clock)(void);
9065 unsigned long long (*steal_clock)(int cpu);
9066 unsigned long (*get_tsc_khz)(void);
9067 -};
9068 +} __no_const;
9069
9070 struct pv_cpu_ops {
9071 /* hooks for various privileged instructions */
9072 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
9073
9074 void (*start_context_switch)(struct task_struct *prev);
9075 void (*end_context_switch)(struct task_struct *next);
9076 -};
9077 +} __no_const;
9078
9079 struct pv_irq_ops {
9080 /*
9081 @@ -224,7 +224,7 @@ struct pv_apic_ops {
9082 unsigned long start_eip,
9083 unsigned long start_esp);
9084 #endif
9085 -};
9086 +} __no_const;
9087
9088 struct pv_mmu_ops {
9089 unsigned long (*read_cr2)(void);
9090 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
9091 struct paravirt_callee_save make_pud;
9092
9093 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
9094 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
9095 #endif /* PAGETABLE_LEVELS == 4 */
9096 #endif /* PAGETABLE_LEVELS >= 3 */
9097
9098 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
9099 an mfn. We can tell which is which from the index. */
9100 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9101 phys_addr_t phys, pgprot_t flags);
9102 +
9103 +#ifdef CONFIG_PAX_KERNEXEC
9104 + unsigned long (*pax_open_kernel)(void);
9105 + unsigned long (*pax_close_kernel)(void);
9106 +#endif
9107 +
9108 };
9109
9110 struct arch_spinlock;
9111 @@ -334,7 +341,7 @@ struct pv_lock_ops {
9112 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
9113 int (*spin_trylock)(struct arch_spinlock *lock);
9114 void (*spin_unlock)(struct arch_spinlock *lock);
9115 -};
9116 +} __no_const;
9117
9118 /* This contains all the paravirt structures: we get a convenient
9119 * number for each function using the offset which we use to indicate
9120 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
9121 index b4389a4..b7ff22c 100644
9122 --- a/arch/x86/include/asm/pgalloc.h
9123 +++ b/arch/x86/include/asm/pgalloc.h
9124 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
9125 pmd_t *pmd, pte_t *pte)
9126 {
9127 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9128 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9129 +}
9130 +
9131 +static inline void pmd_populate_user(struct mm_struct *mm,
9132 + pmd_t *pmd, pte_t *pte)
9133 +{
9134 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9135 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9136 }
9137
9138 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
9139 index 98391db..8f6984e 100644
9140 --- a/arch/x86/include/asm/pgtable-2level.h
9141 +++ b/arch/x86/include/asm/pgtable-2level.h
9142 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
9143
9144 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9145 {
9146 + pax_open_kernel();
9147 *pmdp = pmd;
9148 + pax_close_kernel();
9149 }
9150
9151 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9152 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
9153 index effff47..f9e4035 100644
9154 --- a/arch/x86/include/asm/pgtable-3level.h
9155 +++ b/arch/x86/include/asm/pgtable-3level.h
9156 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9157
9158 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9159 {
9160 + pax_open_kernel();
9161 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9162 + pax_close_kernel();
9163 }
9164
9165 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9166 {
9167 + pax_open_kernel();
9168 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9169 + pax_close_kernel();
9170 }
9171
9172 /*
9173 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
9174 index 18601c8..3d716d1 100644
9175 --- a/arch/x86/include/asm/pgtable.h
9176 +++ b/arch/x86/include/asm/pgtable.h
9177 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9178
9179 #ifndef __PAGETABLE_PUD_FOLDED
9180 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
9181 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
9182 #define pgd_clear(pgd) native_pgd_clear(pgd)
9183 #endif
9184
9185 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
9186
9187 #define arch_end_context_switch(prev) do {} while(0)
9188
9189 +#define pax_open_kernel() native_pax_open_kernel()
9190 +#define pax_close_kernel() native_pax_close_kernel()
9191 #endif /* CONFIG_PARAVIRT */
9192
9193 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9194 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9195 +
9196 +#ifdef CONFIG_PAX_KERNEXEC
9197 +static inline unsigned long native_pax_open_kernel(void)
9198 +{
9199 + unsigned long cr0;
9200 +
9201 + preempt_disable();
9202 + barrier();
9203 + cr0 = read_cr0() ^ X86_CR0_WP;
9204 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9205 + write_cr0(cr0);
9206 + return cr0 ^ X86_CR0_WP;
9207 +}
9208 +
9209 +static inline unsigned long native_pax_close_kernel(void)
9210 +{
9211 + unsigned long cr0;
9212 +
9213 + cr0 = read_cr0() ^ X86_CR0_WP;
9214 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9215 + write_cr0(cr0);
9216 + barrier();
9217 + preempt_enable_no_resched();
9218 + return cr0 ^ X86_CR0_WP;
9219 +}
9220 +#else
9221 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9222 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9223 +#endif
9224 +
9225 /*
9226 * The following only work if pte_present() is true.
9227 * Undefined behaviour if not..
9228 */
9229 +static inline int pte_user(pte_t pte)
9230 +{
9231 + return pte_val(pte) & _PAGE_USER;
9232 +}
9233 +
9234 static inline int pte_dirty(pte_t pte)
9235 {
9236 return pte_flags(pte) & _PAGE_DIRTY;
9237 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
9238 return pte_clear_flags(pte, _PAGE_RW);
9239 }
9240
9241 +static inline pte_t pte_mkread(pte_t pte)
9242 +{
9243 + return __pte(pte_val(pte) | _PAGE_USER);
9244 +}
9245 +
9246 static inline pte_t pte_mkexec(pte_t pte)
9247 {
9248 - return pte_clear_flags(pte, _PAGE_NX);
9249 +#ifdef CONFIG_X86_PAE
9250 + if (__supported_pte_mask & _PAGE_NX)
9251 + return pte_clear_flags(pte, _PAGE_NX);
9252 + else
9253 +#endif
9254 + return pte_set_flags(pte, _PAGE_USER);
9255 +}
9256 +
9257 +static inline pte_t pte_exprotect(pte_t pte)
9258 +{
9259 +#ifdef CONFIG_X86_PAE
9260 + if (__supported_pte_mask & _PAGE_NX)
9261 + return pte_set_flags(pte, _PAGE_NX);
9262 + else
9263 +#endif
9264 + return pte_clear_flags(pte, _PAGE_USER);
9265 }
9266
9267 static inline pte_t pte_mkdirty(pte_t pte)
9268 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
9269 #endif
9270
9271 #ifndef __ASSEMBLY__
9272 +
9273 +#ifdef CONFIG_PAX_PER_CPU_PGD
9274 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9275 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9276 +{
9277 + return cpu_pgd[cpu];
9278 +}
9279 +#endif
9280 +
9281 #include <linux/mm_types.h>
9282
9283 static inline int pte_none(pte_t pte)
9284 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
9285
9286 static inline int pgd_bad(pgd_t pgd)
9287 {
9288 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9289 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9290 }
9291
9292 static inline int pgd_none(pgd_t pgd)
9293 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
9294 * pgd_offset() returns a (pgd_t *)
9295 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9296 */
9297 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9298 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9299 +
9300 +#ifdef CONFIG_PAX_PER_CPU_PGD
9301 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9302 +#endif
9303 +
9304 /*
9305 * a shortcut which implies the use of the kernel's pgd, instead
9306 * of a process's
9307 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
9308 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9309 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9310
9311 +#ifdef CONFIG_X86_32
9312 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9313 +#else
9314 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9315 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9316 +
9317 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9318 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9319 +#else
9320 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9321 +#endif
9322 +
9323 +#endif
9324 +
9325 #ifndef __ASSEMBLY__
9326
9327 extern int direct_gbpages;
9328 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
9329 * dst and src can be on the same page, but the range must not overlap,
9330 * and must not cross a page boundary.
9331 */
9332 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9333 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9334 {
9335 - memcpy(dst, src, count * sizeof(pgd_t));
9336 + pax_open_kernel();
9337 + while (count--)
9338 + *dst++ = *src++;
9339 + pax_close_kernel();
9340 }
9341
9342 +#ifdef CONFIG_PAX_PER_CPU_PGD
9343 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9344 +#endif
9345 +
9346 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9347 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9348 +#else
9349 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9350 +#endif
9351
9352 #include <asm-generic/pgtable.h>
9353 #endif /* __ASSEMBLY__ */
9354 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
9355 index 0c92113..34a77c6 100644
9356 --- a/arch/x86/include/asm/pgtable_32.h
9357 +++ b/arch/x86/include/asm/pgtable_32.h
9358 @@ -25,9 +25,6 @@
9359 struct mm_struct;
9360 struct vm_area_struct;
9361
9362 -extern pgd_t swapper_pg_dir[1024];
9363 -extern pgd_t initial_page_table[1024];
9364 -
9365 static inline void pgtable_cache_init(void) { }
9366 static inline void check_pgt_cache(void) { }
9367 void paging_init(void);
9368 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9369 # include <asm/pgtable-2level.h>
9370 #endif
9371
9372 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9373 +extern pgd_t initial_page_table[PTRS_PER_PGD];
9374 +#ifdef CONFIG_X86_PAE
9375 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9376 +#endif
9377 +
9378 #if defined(CONFIG_HIGHPTE)
9379 #define pte_offset_map(dir, address) \
9380 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
9381 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
9382 /* Clear a kernel PTE and flush it from the TLB */
9383 #define kpte_clear_flush(ptep, vaddr) \
9384 do { \
9385 + pax_open_kernel(); \
9386 pte_clear(&init_mm, (vaddr), (ptep)); \
9387 + pax_close_kernel(); \
9388 __flush_tlb_one((vaddr)); \
9389 } while (0)
9390
9391 @@ -74,6 +79,9 @@ do { \
9392
9393 #endif /* !__ASSEMBLY__ */
9394
9395 +#define HAVE_ARCH_UNMAPPED_AREA
9396 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9397 +
9398 /*
9399 * kern_addr_valid() is (1) for FLATMEM and (0) for
9400 * SPARSEMEM and DISCONTIGMEM
9401 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
9402 index ed5903b..c7fe163 100644
9403 --- a/arch/x86/include/asm/pgtable_32_types.h
9404 +++ b/arch/x86/include/asm/pgtable_32_types.h
9405 @@ -8,7 +8,7 @@
9406 */
9407 #ifdef CONFIG_X86_PAE
9408 # include <asm/pgtable-3level_types.h>
9409 -# define PMD_SIZE (1UL << PMD_SHIFT)
9410 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9411 # define PMD_MASK (~(PMD_SIZE - 1))
9412 #else
9413 # include <asm/pgtable-2level_types.h>
9414 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
9415 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9416 #endif
9417
9418 +#ifdef CONFIG_PAX_KERNEXEC
9419 +#ifndef __ASSEMBLY__
9420 +extern unsigned char MODULES_EXEC_VADDR[];
9421 +extern unsigned char MODULES_EXEC_END[];
9422 +#endif
9423 +#include <asm/boot.h>
9424 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9425 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9426 +#else
9427 +#define ktla_ktva(addr) (addr)
9428 +#define ktva_ktla(addr) (addr)
9429 +#endif
9430 +
9431 #define MODULES_VADDR VMALLOC_START
9432 #define MODULES_END VMALLOC_END
9433 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9434 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
9435 index 975f709..3a89693 100644
9436 --- a/arch/x86/include/asm/pgtable_64.h
9437 +++ b/arch/x86/include/asm/pgtable_64.h
9438 @@ -16,10 +16,13 @@
9439
9440 extern pud_t level3_kernel_pgt[512];
9441 extern pud_t level3_ident_pgt[512];
9442 +extern pud_t level3_vmalloc_pgt[512];
9443 +extern pud_t level3_vmemmap_pgt[512];
9444 +extern pud_t level2_vmemmap_pgt[512];
9445 extern pmd_t level2_kernel_pgt[512];
9446 extern pmd_t level2_fixmap_pgt[512];
9447 -extern pmd_t level2_ident_pgt[512];
9448 -extern pgd_t init_level4_pgt[];
9449 +extern pmd_t level2_ident_pgt[512*2];
9450 +extern pgd_t init_level4_pgt[512];
9451
9452 #define swapper_pg_dir init_level4_pgt
9453
9454 @@ -61,7 +64,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9455
9456 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9457 {
9458 + pax_open_kernel();
9459 *pmdp = pmd;
9460 + pax_close_kernel();
9461 }
9462
9463 static inline void native_pmd_clear(pmd_t *pmd)
9464 @@ -107,6 +112,13 @@ static inline void native_pud_clear(pud_t *pud)
9465
9466 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9467 {
9468 + pax_open_kernel();
9469 + *pgdp = pgd;
9470 + pax_close_kernel();
9471 +}
9472 +
9473 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
9474 +{
9475 *pgdp = pgd;
9476 }
9477
9478 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
9479 index 766ea16..5b96cb3 100644
9480 --- a/arch/x86/include/asm/pgtable_64_types.h
9481 +++ b/arch/x86/include/asm/pgtable_64_types.h
9482 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9483 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9484 #define MODULES_END _AC(0xffffffffff000000, UL)
9485 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9486 +#define MODULES_EXEC_VADDR MODULES_VADDR
9487 +#define MODULES_EXEC_END MODULES_END
9488 +
9489 +#define ktla_ktva(addr) (addr)
9490 +#define ktva_ktla(addr) (addr)
9491
9492 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9493 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
9494 index 013286a..8b42f4f 100644
9495 --- a/arch/x86/include/asm/pgtable_types.h
9496 +++ b/arch/x86/include/asm/pgtable_types.h
9497 @@ -16,13 +16,12 @@
9498 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9499 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9500 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9501 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9502 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9503 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9504 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9505 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9506 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9507 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9508 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
9509 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9510 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
9511 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9512
9513 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9514 @@ -40,7 +39,6 @@
9515 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9516 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9517 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9518 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9519 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9520 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9521 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9522 @@ -57,8 +55,10 @@
9523
9524 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9525 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9526 -#else
9527 +#elif defined(CONFIG_KMEMCHECK)
9528 #define _PAGE_NX (_AT(pteval_t, 0))
9529 +#else
9530 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9531 #endif
9532
9533 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9534 @@ -96,6 +96,9 @@
9535 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9536 _PAGE_ACCESSED)
9537
9538 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9539 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9540 +
9541 #define __PAGE_KERNEL_EXEC \
9542 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9543 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9544 @@ -106,7 +109,7 @@
9545 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9546 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9547 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9548 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9549 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9550 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
9551 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
9552 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9553 @@ -168,8 +171,8 @@
9554 * bits are combined, this will alow user to access the high address mapped
9555 * VDSO in the presence of CONFIG_COMPAT_VDSO
9556 */
9557 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9558 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9559 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9560 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9561 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9562 #endif
9563
9564 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
9565 {
9566 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9567 }
9568 +#endif
9569
9570 +#if PAGETABLE_LEVELS == 3
9571 +#include <asm-generic/pgtable-nopud.h>
9572 +#endif
9573 +
9574 +#if PAGETABLE_LEVELS == 2
9575 +#include <asm-generic/pgtable-nopmd.h>
9576 +#endif
9577 +
9578 +#ifndef __ASSEMBLY__
9579 #if PAGETABLE_LEVELS > 3
9580 typedef struct { pudval_t pud; } pud_t;
9581
9582 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
9583 return pud.pud;
9584 }
9585 #else
9586 -#include <asm-generic/pgtable-nopud.h>
9587 -
9588 static inline pudval_t native_pud_val(pud_t pud)
9589 {
9590 return native_pgd_val(pud.pgd);
9591 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
9592 return pmd.pmd;
9593 }
9594 #else
9595 -#include <asm-generic/pgtable-nopmd.h>
9596 -
9597 static inline pmdval_t native_pmd_val(pmd_t pmd)
9598 {
9599 return native_pgd_val(pmd.pud.pgd);
9600 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
9601
9602 extern pteval_t __supported_pte_mask;
9603 extern void set_nx(void);
9604 -extern int nx_enabled;
9605
9606 #define pgprot_writecombine pgprot_writecombine
9607 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9608 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
9609 index 0d1171c..36571a9 100644
9610 --- a/arch/x86/include/asm/processor.h
9611 +++ b/arch/x86/include/asm/processor.h
9612 @@ -266,7 +266,7 @@ struct tss_struct {
9613
9614 } ____cacheline_aligned;
9615
9616 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9617 +extern struct tss_struct init_tss[NR_CPUS];
9618
9619 /*
9620 * Save the original ist values for checking stack pointers during debugging
9621 @@ -858,11 +858,18 @@ static inline void spin_lock_prefetch(const void *x)
9622 */
9623 #define TASK_SIZE PAGE_OFFSET
9624 #define TASK_SIZE_MAX TASK_SIZE
9625 +
9626 +#ifdef CONFIG_PAX_SEGMEXEC
9627 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9628 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9629 +#else
9630 #define STACK_TOP TASK_SIZE
9631 -#define STACK_TOP_MAX STACK_TOP
9632 +#endif
9633 +
9634 +#define STACK_TOP_MAX TASK_SIZE
9635
9636 #define INIT_THREAD { \
9637 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9638 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9639 .vm86_info = NULL, \
9640 .sysenter_cs = __KERNEL_CS, \
9641 .io_bitmap_ptr = NULL, \
9642 @@ -876,7 +883,7 @@ static inline void spin_lock_prefetch(const void *x)
9643 */
9644 #define INIT_TSS { \
9645 .x86_tss = { \
9646 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9647 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9648 .ss0 = __KERNEL_DS, \
9649 .ss1 = __KERNEL_CS, \
9650 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9651 @@ -887,11 +894,7 @@ static inline void spin_lock_prefetch(const void *x)
9652 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9653
9654 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9655 -#define KSTK_TOP(info) \
9656 -({ \
9657 - unsigned long *__ptr = (unsigned long *)(info); \
9658 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9659 -})
9660 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9661
9662 /*
9663 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9664 @@ -906,7 +909,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9665 #define task_pt_regs(task) \
9666 ({ \
9667 struct pt_regs *__regs__; \
9668 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9669 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9670 __regs__ - 1; \
9671 })
9672
9673 @@ -916,13 +919,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9674 /*
9675 * User space process size. 47bits minus one guard page.
9676 */
9677 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9678 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9679
9680 /* This decides where the kernel will search for a free chunk of vm
9681 * space during mmap's.
9682 */
9683 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9684 - 0xc0000000 : 0xFFFFe000)
9685 + 0xc0000000 : 0xFFFFf000)
9686
9687 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9688 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9689 @@ -933,11 +936,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
9690 #define STACK_TOP_MAX TASK_SIZE_MAX
9691
9692 #define INIT_THREAD { \
9693 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9694 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9695 }
9696
9697 #define INIT_TSS { \
9698 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9699 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9700 }
9701
9702 /*
9703 @@ -959,6 +962,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
9704 */
9705 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9706
9707 +#ifdef CONFIG_PAX_SEGMEXEC
9708 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9709 +#endif
9710 +
9711 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9712
9713 /* Get/set a process' ability to use the timestamp counter instruction */
9714 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
9715 index 3566454..4bdfb8c 100644
9716 --- a/arch/x86/include/asm/ptrace.h
9717 +++ b/arch/x86/include/asm/ptrace.h
9718 @@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
9719 }
9720
9721 /*
9722 - * user_mode_vm(regs) determines whether a register set came from user mode.
9723 + * user_mode(regs) determines whether a register set came from user mode.
9724 * This is true if V8086 mode was enabled OR if the register set was from
9725 * protected mode with RPL-3 CS value. This tricky test checks that with
9726 * one comparison. Many places in the kernel can bypass this full check
9727 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9728 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9729 + * be used.
9730 */
9731 -static inline int user_mode(struct pt_regs *regs)
9732 +static inline int user_mode_novm(struct pt_regs *regs)
9733 {
9734 #ifdef CONFIG_X86_32
9735 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9736 #else
9737 - return !!(regs->cs & 3);
9738 + return !!(regs->cs & SEGMENT_RPL_MASK);
9739 #endif
9740 }
9741
9742 -static inline int user_mode_vm(struct pt_regs *regs)
9743 +static inline int user_mode(struct pt_regs *regs)
9744 {
9745 #ifdef CONFIG_X86_32
9746 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9747 USER_RPL;
9748 #else
9749 - return user_mode(regs);
9750 + return user_mode_novm(regs);
9751 #endif
9752 }
9753
9754 @@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
9755 #ifdef CONFIG_X86_64
9756 static inline bool user_64bit_mode(struct pt_regs *regs)
9757 {
9758 + unsigned long cs = regs->cs & 0xffff;
9759 #ifndef CONFIG_PARAVIRT
9760 /*
9761 * On non-paravirt systems, this is the only long mode CPL 3
9762 * selector. We do not allow long mode selectors in the LDT.
9763 */
9764 - return regs->cs == __USER_CS;
9765 + return cs == __USER_CS;
9766 #else
9767 /* Headers are too twisted for this to go in paravirt.h. */
9768 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
9769 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
9770 #endif
9771 }
9772 #endif
9773 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
9774 index 3250e3d..20db631 100644
9775 --- a/arch/x86/include/asm/reboot.h
9776 +++ b/arch/x86/include/asm/reboot.h
9777 @@ -6,19 +6,19 @@
9778 struct pt_regs;
9779
9780 struct machine_ops {
9781 - void (*restart)(char *cmd);
9782 - void (*halt)(void);
9783 - void (*power_off)(void);
9784 + void (* __noreturn restart)(char *cmd);
9785 + void (* __noreturn halt)(void);
9786 + void (* __noreturn power_off)(void);
9787 void (*shutdown)(void);
9788 void (*crash_shutdown)(struct pt_regs *);
9789 - void (*emergency_restart)(void);
9790 -};
9791 + void (* __noreturn emergency_restart)(void);
9792 +} __no_const;
9793
9794 extern struct machine_ops machine_ops;
9795
9796 void native_machine_crash_shutdown(struct pt_regs *regs);
9797 void native_machine_shutdown(void);
9798 -void machine_real_restart(unsigned int type);
9799 +void machine_real_restart(unsigned int type) __noreturn;
9800 /* These must match dispatch_table in reboot_32.S */
9801 #define MRR_BIOS 0
9802 #define MRR_APM 1
9803 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
9804 index df4cd32..27ae072 100644
9805 --- a/arch/x86/include/asm/rwsem.h
9806 +++ b/arch/x86/include/asm/rwsem.h
9807 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
9808 {
9809 asm volatile("# beginning down_read\n\t"
9810 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9811 +
9812 +#ifdef CONFIG_PAX_REFCOUNT
9813 + "jno 0f\n"
9814 + LOCK_PREFIX _ASM_DEC "(%1)\n"
9815 + "int $4\n0:\n"
9816 + _ASM_EXTABLE(0b, 0b)
9817 +#endif
9818 +
9819 /* adds 0x00000001 */
9820 " jns 1f\n"
9821 " call call_rwsem_down_read_failed\n"
9822 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
9823 "1:\n\t"
9824 " mov %1,%2\n\t"
9825 " add %3,%2\n\t"
9826 +
9827 +#ifdef CONFIG_PAX_REFCOUNT
9828 + "jno 0f\n"
9829 + "sub %3,%2\n"
9830 + "int $4\n0:\n"
9831 + _ASM_EXTABLE(0b, 0b)
9832 +#endif
9833 +
9834 " jle 2f\n\t"
9835 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9836 " jnz 1b\n\t"
9837 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
9838 long tmp;
9839 asm volatile("# beginning down_write\n\t"
9840 LOCK_PREFIX " xadd %1,(%2)\n\t"
9841 +
9842 +#ifdef CONFIG_PAX_REFCOUNT
9843 + "jno 0f\n"
9844 + "mov %1,(%2)\n"
9845 + "int $4\n0:\n"
9846 + _ASM_EXTABLE(0b, 0b)
9847 +#endif
9848 +
9849 /* adds 0xffff0001, returns the old value */
9850 " test %1,%1\n\t"
9851 /* was the count 0 before? */
9852 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
9853 long tmp;
9854 asm volatile("# beginning __up_read\n\t"
9855 LOCK_PREFIX " xadd %1,(%2)\n\t"
9856 +
9857 +#ifdef CONFIG_PAX_REFCOUNT
9858 + "jno 0f\n"
9859 + "mov %1,(%2)\n"
9860 + "int $4\n0:\n"
9861 + _ASM_EXTABLE(0b, 0b)
9862 +#endif
9863 +
9864 /* subtracts 1, returns the old value */
9865 " jns 1f\n\t"
9866 " call call_rwsem_wake\n" /* expects old value in %edx */
9867 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
9868 long tmp;
9869 asm volatile("# beginning __up_write\n\t"
9870 LOCK_PREFIX " xadd %1,(%2)\n\t"
9871 +
9872 +#ifdef CONFIG_PAX_REFCOUNT
9873 + "jno 0f\n"
9874 + "mov %1,(%2)\n"
9875 + "int $4\n0:\n"
9876 + _ASM_EXTABLE(0b, 0b)
9877 +#endif
9878 +
9879 /* subtracts 0xffff0001, returns the old value */
9880 " jns 1f\n\t"
9881 " call call_rwsem_wake\n" /* expects old value in %edx */
9882 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
9883 {
9884 asm volatile("# beginning __downgrade_write\n\t"
9885 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
9886 +
9887 +#ifdef CONFIG_PAX_REFCOUNT
9888 + "jno 0f\n"
9889 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
9890 + "int $4\n0:\n"
9891 + _ASM_EXTABLE(0b, 0b)
9892 +#endif
9893 +
9894 /*
9895 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
9896 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
9897 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
9898 */
9899 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
9900 {
9901 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
9902 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
9903 +
9904 +#ifdef CONFIG_PAX_REFCOUNT
9905 + "jno 0f\n"
9906 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
9907 + "int $4\n0:\n"
9908 + _ASM_EXTABLE(0b, 0b)
9909 +#endif
9910 +
9911 : "+m" (sem->count)
9912 : "er" (delta));
9913 }
9914 @@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
9915 {
9916 long tmp = delta;
9917
9918 - asm volatile(LOCK_PREFIX "xadd %0,%1"
9919 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
9920 +
9921 +#ifdef CONFIG_PAX_REFCOUNT
9922 + "jno 0f\n"
9923 + "mov %0,%1\n"
9924 + "int $4\n0:\n"
9925 + _ASM_EXTABLE(0b, 0b)
9926 +#endif
9927 +
9928 : "+r" (tmp), "+m" (sem->count)
9929 : : "memory");
9930
9931 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
9932 index 5e64171..f58957e 100644
9933 --- a/arch/x86/include/asm/segment.h
9934 +++ b/arch/x86/include/asm/segment.h
9935 @@ -64,10 +64,15 @@
9936 * 26 - ESPFIX small SS
9937 * 27 - per-cpu [ offset to per-cpu data area ]
9938 * 28 - stack_canary-20 [ for stack protector ]
9939 - * 29 - unused
9940 - * 30 - unused
9941 + * 29 - PCI BIOS CS
9942 + * 30 - PCI BIOS DS
9943 * 31 - TSS for double fault handler
9944 */
9945 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
9946 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
9947 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
9948 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
9949 +
9950 #define GDT_ENTRY_TLS_MIN 6
9951 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
9952
9953 @@ -79,6 +84,8 @@
9954
9955 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
9956
9957 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
9958 +
9959 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
9960
9961 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
9962 @@ -104,6 +111,12 @@
9963 #define __KERNEL_STACK_CANARY 0
9964 #endif
9965
9966 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
9967 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
9968 +
9969 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
9970 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
9971 +
9972 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
9973
9974 /*
9975 @@ -141,7 +154,7 @@
9976 */
9977
9978 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
9979 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
9980 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
9981
9982
9983 #else
9984 @@ -165,6 +178,8 @@
9985 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
9986 #define __USER32_DS __USER_DS
9987
9988 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
9989 +
9990 #define GDT_ENTRY_TSS 8 /* needs two entries */
9991 #define GDT_ENTRY_LDT 10 /* needs two entries */
9992 #define GDT_ENTRY_TLS_MIN 12
9993 @@ -185,6 +200,7 @@
9994 #endif
9995
9996 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
9997 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
9998 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
9999 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
10000 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
10001 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
10002 index 73b11bc..d4a3b63 100644
10003 --- a/arch/x86/include/asm/smp.h
10004 +++ b/arch/x86/include/asm/smp.h
10005 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10006 /* cpus sharing the last level cache: */
10007 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
10008 DECLARE_PER_CPU(u16, cpu_llc_id);
10009 -DECLARE_PER_CPU(int, cpu_number);
10010 +DECLARE_PER_CPU(unsigned int, cpu_number);
10011
10012 static inline struct cpumask *cpu_sibling_mask(int cpu)
10013 {
10014 @@ -77,7 +77,7 @@ struct smp_ops {
10015
10016 void (*send_call_func_ipi)(const struct cpumask *mask);
10017 void (*send_call_func_single_ipi)(int cpu);
10018 -};
10019 +} __no_const;
10020
10021 /* Globals due to paravirt */
10022 extern void set_cpu_sibling_map(int cpu);
10023 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
10024 extern int safe_smp_processor_id(void);
10025
10026 #elif defined(CONFIG_X86_64_SMP)
10027 -#define raw_smp_processor_id() (percpu_read(cpu_number))
10028 -
10029 -#define stack_smp_processor_id() \
10030 -({ \
10031 - struct thread_info *ti; \
10032 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10033 - ti->cpu; \
10034 -})
10035 +#define raw_smp_processor_id() (percpu_read(cpu_number))
10036 +#define stack_smp_processor_id() raw_smp_processor_id()
10037 #define safe_smp_processor_id() smp_processor_id()
10038
10039 #endif
10040 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
10041 index ee67edf..49c796b 100644
10042 --- a/arch/x86/include/asm/spinlock.h
10043 +++ b/arch/x86/include/asm/spinlock.h
10044 @@ -248,6 +248,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
10045 static inline void arch_read_lock(arch_rwlock_t *rw)
10046 {
10047 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
10048 +
10049 +#ifdef CONFIG_PAX_REFCOUNT
10050 + "jno 0f\n"
10051 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
10052 + "int $4\n0:\n"
10053 + _ASM_EXTABLE(0b, 0b)
10054 +#endif
10055 +
10056 "jns 1f\n"
10057 "call __read_lock_failed\n\t"
10058 "1:\n"
10059 @@ -257,6 +265,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
10060 static inline void arch_write_lock(arch_rwlock_t *rw)
10061 {
10062 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
10063 +
10064 +#ifdef CONFIG_PAX_REFCOUNT
10065 + "jno 0f\n"
10066 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
10067 + "int $4\n0:\n"
10068 + _ASM_EXTABLE(0b, 0b)
10069 +#endif
10070 +
10071 "jz 1f\n"
10072 "call __write_lock_failed\n\t"
10073 "1:\n"
10074 @@ -286,13 +302,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
10075
10076 static inline void arch_read_unlock(arch_rwlock_t *rw)
10077 {
10078 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
10079 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
10080 +
10081 +#ifdef CONFIG_PAX_REFCOUNT
10082 + "jno 0f\n"
10083 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
10084 + "int $4\n0:\n"
10085 + _ASM_EXTABLE(0b, 0b)
10086 +#endif
10087 +
10088 :"+m" (rw->lock) : : "memory");
10089 }
10090
10091 static inline void arch_write_unlock(arch_rwlock_t *rw)
10092 {
10093 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
10094 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
10095 +
10096 +#ifdef CONFIG_PAX_REFCOUNT
10097 + "jno 0f\n"
10098 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
10099 + "int $4\n0:\n"
10100 + _ASM_EXTABLE(0b, 0b)
10101 +#endif
10102 +
10103 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
10104 }
10105
10106 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
10107 index 1575177..cb23f52 100644
10108 --- a/arch/x86/include/asm/stackprotector.h
10109 +++ b/arch/x86/include/asm/stackprotector.h
10110 @@ -48,7 +48,7 @@
10111 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10112 */
10113 #define GDT_STACK_CANARY_INIT \
10114 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10115 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10116
10117 /*
10118 * Initialize the stackprotector canary value.
10119 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
10120
10121 static inline void load_stack_canary_segment(void)
10122 {
10123 -#ifdef CONFIG_X86_32
10124 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10125 asm volatile ("mov %0, %%gs" : : "r" (0));
10126 #endif
10127 }
10128 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
10129 index 70bbe39..4ae2bd4 100644
10130 --- a/arch/x86/include/asm/stacktrace.h
10131 +++ b/arch/x86/include/asm/stacktrace.h
10132 @@ -11,28 +11,20 @@
10133
10134 extern int kstack_depth_to_print;
10135
10136 -struct thread_info;
10137 +struct task_struct;
10138 struct stacktrace_ops;
10139
10140 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
10141 - unsigned long *stack,
10142 - unsigned long bp,
10143 - const struct stacktrace_ops *ops,
10144 - void *data,
10145 - unsigned long *end,
10146 - int *graph);
10147 -
10148 -extern unsigned long
10149 -print_context_stack(struct thread_info *tinfo,
10150 - unsigned long *stack, unsigned long bp,
10151 - const struct stacktrace_ops *ops, void *data,
10152 - unsigned long *end, int *graph);
10153 -
10154 -extern unsigned long
10155 -print_context_stack_bp(struct thread_info *tinfo,
10156 - unsigned long *stack, unsigned long bp,
10157 - const struct stacktrace_ops *ops, void *data,
10158 - unsigned long *end, int *graph);
10159 +typedef unsigned long walk_stack_t(struct task_struct *task,
10160 + void *stack_start,
10161 + unsigned long *stack,
10162 + unsigned long bp,
10163 + const struct stacktrace_ops *ops,
10164 + void *data,
10165 + unsigned long *end,
10166 + int *graph);
10167 +
10168 +extern walk_stack_t print_context_stack;
10169 +extern walk_stack_t print_context_stack_bp;
10170
10171 /* Generic stack tracer with callbacks */
10172
10173 @@ -40,7 +32,7 @@ struct stacktrace_ops {
10174 void (*address)(void *data, unsigned long address, int reliable);
10175 /* On negative return stop dumping */
10176 int (*stack)(void *data, char *name);
10177 - walk_stack_t walk_stack;
10178 + walk_stack_t *walk_stack;
10179 };
10180
10181 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
10182 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
10183 index cb23852..2dde194 100644
10184 --- a/arch/x86/include/asm/sys_ia32.h
10185 +++ b/arch/x86/include/asm/sys_ia32.h
10186 @@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
10187 compat_sigset_t __user *, unsigned int);
10188 asmlinkage long sys32_alarm(unsigned int);
10189
10190 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
10191 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
10192 asmlinkage long sys32_sysfs(int, u32, u32);
10193
10194 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
10195 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
10196 index c2ff2a1..4349184 100644
10197 --- a/arch/x86/include/asm/system.h
10198 +++ b/arch/x86/include/asm/system.h
10199 @@ -129,7 +129,7 @@ do { \
10200 "call __switch_to\n\t" \
10201 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10202 __switch_canary \
10203 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
10204 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10205 "movq %%rax,%%rdi\n\t" \
10206 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10207 "jnz ret_from_fork\n\t" \
10208 @@ -140,7 +140,7 @@ do { \
10209 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10210 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10211 [_tif_fork] "i" (_TIF_FORK), \
10212 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
10213 + [thread_info] "m" (current_tinfo), \
10214 [current_task] "m" (current_task) \
10215 __switch_canary_iparam \
10216 : "memory", "cc" __EXTRA_CLOBBER)
10217 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
10218 {
10219 unsigned long __limit;
10220 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10221 - return __limit + 1;
10222 + return __limit;
10223 }
10224
10225 static inline void native_clts(void)
10226 @@ -397,12 +397,12 @@ void enable_hlt(void);
10227
10228 void cpu_idle_wait(void);
10229
10230 -extern unsigned long arch_align_stack(unsigned long sp);
10231 +#define arch_align_stack(x) ((x) & ~0xfUL)
10232 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10233
10234 void default_idle(void);
10235
10236 -void stop_this_cpu(void *dummy);
10237 +void stop_this_cpu(void *dummy) __noreturn;
10238
10239 /*
10240 * Force strict CPU ordering.
10241 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
10242 index a1fe5c1..ee326d8 100644
10243 --- a/arch/x86/include/asm/thread_info.h
10244 +++ b/arch/x86/include/asm/thread_info.h
10245 @@ -10,6 +10,7 @@
10246 #include <linux/compiler.h>
10247 #include <asm/page.h>
10248 #include <asm/types.h>
10249 +#include <asm/percpu.h>
10250
10251 /*
10252 * low level task data that entry.S needs immediate access to
10253 @@ -24,7 +25,6 @@ struct exec_domain;
10254 #include <linux/atomic.h>
10255
10256 struct thread_info {
10257 - struct task_struct *task; /* main task structure */
10258 struct exec_domain *exec_domain; /* execution domain */
10259 __u32 flags; /* low level flags */
10260 __u32 status; /* thread synchronous flags */
10261 @@ -34,18 +34,12 @@ struct thread_info {
10262 mm_segment_t addr_limit;
10263 struct restart_block restart_block;
10264 void __user *sysenter_return;
10265 -#ifdef CONFIG_X86_32
10266 - unsigned long previous_esp; /* ESP of the previous stack in
10267 - case of nested (IRQ) stacks
10268 - */
10269 - __u8 supervisor_stack[0];
10270 -#endif
10271 + unsigned long lowest_stack;
10272 int uaccess_err;
10273 };
10274
10275 -#define INIT_THREAD_INFO(tsk) \
10276 +#define INIT_THREAD_INFO \
10277 { \
10278 - .task = &tsk, \
10279 .exec_domain = &default_exec_domain, \
10280 .flags = 0, \
10281 .cpu = 0, \
10282 @@ -56,7 +50,7 @@ struct thread_info {
10283 }, \
10284 }
10285
10286 -#define init_thread_info (init_thread_union.thread_info)
10287 +#define init_thread_info (init_thread_union.stack)
10288 #define init_stack (init_thread_union.stack)
10289
10290 #else /* !__ASSEMBLY__ */
10291 @@ -170,6 +164,23 @@ struct thread_info {
10292 ret; \
10293 })
10294
10295 +#ifdef __ASSEMBLY__
10296 +/* how to get the thread information struct from ASM */
10297 +#define GET_THREAD_INFO(reg) \
10298 + mov PER_CPU_VAR(current_tinfo), reg
10299 +
10300 +/* use this one if reg already contains %esp */
10301 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10302 +#else
10303 +/* how to get the thread information struct from C */
10304 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10305 +
10306 +static __always_inline struct thread_info *current_thread_info(void)
10307 +{
10308 + return percpu_read_stable(current_tinfo);
10309 +}
10310 +#endif
10311 +
10312 #ifdef CONFIG_X86_32
10313
10314 #define STACK_WARN (THREAD_SIZE/8)
10315 @@ -180,35 +191,13 @@ struct thread_info {
10316 */
10317 #ifndef __ASSEMBLY__
10318
10319 -
10320 /* how to get the current stack pointer from C */
10321 register unsigned long current_stack_pointer asm("esp") __used;
10322
10323 -/* how to get the thread information struct from C */
10324 -static inline struct thread_info *current_thread_info(void)
10325 -{
10326 - return (struct thread_info *)
10327 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10328 -}
10329 -
10330 -#else /* !__ASSEMBLY__ */
10331 -
10332 -/* how to get the thread information struct from ASM */
10333 -#define GET_THREAD_INFO(reg) \
10334 - movl $-THREAD_SIZE, reg; \
10335 - andl %esp, reg
10336 -
10337 -/* use this one if reg already contains %esp */
10338 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10339 - andl $-THREAD_SIZE, reg
10340 -
10341 #endif
10342
10343 #else /* X86_32 */
10344
10345 -#include <asm/percpu.h>
10346 -#define KERNEL_STACK_OFFSET (5*8)
10347 -
10348 /*
10349 * macros/functions for gaining access to the thread information structure
10350 * preempt_count needs to be 1 initially, until the scheduler is functional.
10351 @@ -216,21 +205,8 @@ static inline struct thread_info *current_thread_info(void)
10352 #ifndef __ASSEMBLY__
10353 DECLARE_PER_CPU(unsigned long, kernel_stack);
10354
10355 -static inline struct thread_info *current_thread_info(void)
10356 -{
10357 - struct thread_info *ti;
10358 - ti = (void *)(percpu_read_stable(kernel_stack) +
10359 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10360 - return ti;
10361 -}
10362 -
10363 -#else /* !__ASSEMBLY__ */
10364 -
10365 -/* how to get the thread information struct from ASM */
10366 -#define GET_THREAD_INFO(reg) \
10367 - movq PER_CPU_VAR(kernel_stack),reg ; \
10368 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10369 -
10370 +/* how to get the current stack pointer from C */
10371 +register unsigned long current_stack_pointer asm("rsp") __used;
10372 #endif
10373
10374 #endif /* !X86_32 */
10375 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
10376 extern void free_thread_info(struct thread_info *ti);
10377 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10378 #define arch_task_cache_init arch_task_cache_init
10379 +
10380 +#define __HAVE_THREAD_FUNCTIONS
10381 +#define task_thread_info(task) (&(task)->tinfo)
10382 +#define task_stack_page(task) ((task)->stack)
10383 +#define setup_thread_stack(p, org) do {} while (0)
10384 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10385 +
10386 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10387 +extern struct task_struct *alloc_task_struct_node(int node);
10388 +extern void free_task_struct(struct task_struct *);
10389 +
10390 #endif
10391 #endif /* _ASM_X86_THREAD_INFO_H */
10392 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
10393 index 36361bf..324f262 100644
10394 --- a/arch/x86/include/asm/uaccess.h
10395 +++ b/arch/x86/include/asm/uaccess.h
10396 @@ -7,12 +7,15 @@
10397 #include <linux/compiler.h>
10398 #include <linux/thread_info.h>
10399 #include <linux/string.h>
10400 +#include <linux/sched.h>
10401 #include <asm/asm.h>
10402 #include <asm/page.h>
10403
10404 #define VERIFY_READ 0
10405 #define VERIFY_WRITE 1
10406
10407 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
10408 +
10409 /*
10410 * The fs value determines whether argument validity checking should be
10411 * performed or not. If get_fs() == USER_DS, checking is performed, with
10412 @@ -28,7 +31,12 @@
10413
10414 #define get_ds() (KERNEL_DS)
10415 #define get_fs() (current_thread_info()->addr_limit)
10416 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10417 +void __set_fs(mm_segment_t x);
10418 +void set_fs(mm_segment_t x);
10419 +#else
10420 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10421 +#endif
10422
10423 #define segment_eq(a, b) ((a).seg == (b).seg)
10424
10425 @@ -76,7 +84,33 @@
10426 * checks that the pointer is in the user space range - after calling
10427 * this function, memory access functions may still return -EFAULT.
10428 */
10429 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10430 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10431 +#define access_ok(type, addr, size) \
10432 +({ \
10433 + long __size = size; \
10434 + unsigned long __addr = (unsigned long)addr; \
10435 + unsigned long __addr_ao = __addr & PAGE_MASK; \
10436 + unsigned long __end_ao = __addr + __size - 1; \
10437 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10438 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10439 + while(__addr_ao <= __end_ao) { \
10440 + char __c_ao; \
10441 + __addr_ao += PAGE_SIZE; \
10442 + if (__size > PAGE_SIZE) \
10443 + cond_resched(); \
10444 + if (__get_user(__c_ao, (char __user *)__addr)) \
10445 + break; \
10446 + if (type != VERIFY_WRITE) { \
10447 + __addr = __addr_ao; \
10448 + continue; \
10449 + } \
10450 + if (__put_user(__c_ao, (char __user *)__addr)) \
10451 + break; \
10452 + __addr = __addr_ao; \
10453 + } \
10454 + } \
10455 + __ret_ao; \
10456 +})
10457
10458 /*
10459 * The exception table consists of pairs of addresses: the first is the
10460 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
10461 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10462 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10463
10464 -
10465 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10466 +#define __copyuser_seg "gs;"
10467 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10468 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10469 +#else
10470 +#define __copyuser_seg
10471 +#define __COPYUSER_SET_ES
10472 +#define __COPYUSER_RESTORE_ES
10473 +#endif
10474
10475 #ifdef CONFIG_X86_32
10476 #define __put_user_asm_u64(x, addr, err, errret) \
10477 - asm volatile("1: movl %%eax,0(%2)\n" \
10478 - "2: movl %%edx,4(%2)\n" \
10479 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10480 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10481 "3:\n" \
10482 ".section .fixup,\"ax\"\n" \
10483 "4: movl %3,%0\n" \
10484 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
10485 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10486
10487 #define __put_user_asm_ex_u64(x, addr) \
10488 - asm volatile("1: movl %%eax,0(%1)\n" \
10489 - "2: movl %%edx,4(%1)\n" \
10490 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10491 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10492 "3:\n" \
10493 _ASM_EXTABLE(1b, 2b - 1b) \
10494 _ASM_EXTABLE(2b, 3b - 2b) \
10495 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
10496 __typeof__(*(ptr)) __pu_val; \
10497 __chk_user_ptr(ptr); \
10498 might_fault(); \
10499 - __pu_val = x; \
10500 + __pu_val = (x); \
10501 switch (sizeof(*(ptr))) { \
10502 case 1: \
10503 __put_user_x(1, __pu_val, ptr, __ret_pu); \
10504 @@ -373,7 +415,7 @@ do { \
10505 } while (0)
10506
10507 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10508 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10509 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10510 "2:\n" \
10511 ".section .fixup,\"ax\"\n" \
10512 "3: mov %3,%0\n" \
10513 @@ -381,7 +423,7 @@ do { \
10514 " jmp 2b\n" \
10515 ".previous\n" \
10516 _ASM_EXTABLE(1b, 3b) \
10517 - : "=r" (err), ltype(x) \
10518 + : "=r" (err), ltype (x) \
10519 : "m" (__m(addr)), "i" (errret), "0" (err))
10520
10521 #define __get_user_size_ex(x, ptr, size) \
10522 @@ -406,7 +448,7 @@ do { \
10523 } while (0)
10524
10525 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10526 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10527 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10528 "2:\n" \
10529 _ASM_EXTABLE(1b, 2b - 1b) \
10530 : ltype(x) : "m" (__m(addr)))
10531 @@ -423,13 +465,24 @@ do { \
10532 int __gu_err; \
10533 unsigned long __gu_val; \
10534 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10535 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
10536 + (x) = (__typeof__(*(ptr)))__gu_val; \
10537 __gu_err; \
10538 })
10539
10540 /* FIXME: this hack is definitely wrong -AK */
10541 struct __large_struct { unsigned long buf[100]; };
10542 -#define __m(x) (*(struct __large_struct __user *)(x))
10543 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10544 +#define ____m(x) \
10545 +({ \
10546 + unsigned long ____x = (unsigned long)(x); \
10547 + if (____x < PAX_USER_SHADOW_BASE) \
10548 + ____x += PAX_USER_SHADOW_BASE; \
10549 + (void __user *)____x; \
10550 +})
10551 +#else
10552 +#define ____m(x) (x)
10553 +#endif
10554 +#define __m(x) (*(struct __large_struct __user *)____m(x))
10555
10556 /*
10557 * Tell gcc we read from memory instead of writing: this is because
10558 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
10559 * aliasing issues.
10560 */
10561 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10562 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10563 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10564 "2:\n" \
10565 ".section .fixup,\"ax\"\n" \
10566 "3: mov %3,%0\n" \
10567 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
10568 ".previous\n" \
10569 _ASM_EXTABLE(1b, 3b) \
10570 : "=r"(err) \
10571 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10572 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10573
10574 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10575 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10576 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10577 "2:\n" \
10578 _ASM_EXTABLE(1b, 2b - 1b) \
10579 : : ltype(x), "m" (__m(addr)))
10580 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
10581 * On error, the variable @x is set to zero.
10582 */
10583
10584 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10585 +#define __get_user(x, ptr) get_user((x), (ptr))
10586 +#else
10587 #define __get_user(x, ptr) \
10588 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10589 +#endif
10590
10591 /**
10592 * __put_user: - Write a simple value into user space, with less checking.
10593 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
10594 * Returns zero on success, or -EFAULT on error.
10595 */
10596
10597 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10598 +#define __put_user(x, ptr) put_user((x), (ptr))
10599 +#else
10600 #define __put_user(x, ptr) \
10601 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10602 +#endif
10603
10604 #define __get_user_unaligned __get_user
10605 #define __put_user_unaligned __put_user
10606 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
10607 #define get_user_ex(x, ptr) do { \
10608 unsigned long __gue_val; \
10609 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10610 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
10611 + (x) = (__typeof__(*(ptr)))__gue_val; \
10612 } while (0)
10613
10614 #ifdef CONFIG_X86_WP_WORKS_OK
10615 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
10616 index 566e803..89f1e60 100644
10617 --- a/arch/x86/include/asm/uaccess_32.h
10618 +++ b/arch/x86/include/asm/uaccess_32.h
10619 @@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
10620 static __always_inline unsigned long __must_check
10621 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10622 {
10623 + pax_track_stack();
10624 +
10625 + if ((long)n < 0)
10626 + return n;
10627 +
10628 if (__builtin_constant_p(n)) {
10629 unsigned long ret;
10630
10631 @@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10632 return ret;
10633 }
10634 }
10635 + if (!__builtin_constant_p(n))
10636 + check_object_size(from, n, true);
10637 return __copy_to_user_ll(to, from, n);
10638 }
10639
10640 @@ -82,12 +89,16 @@ static __always_inline unsigned long __must_check
10641 __copy_to_user(void __user *to, const void *from, unsigned long n)
10642 {
10643 might_fault();
10644 +
10645 return __copy_to_user_inatomic(to, from, n);
10646 }
10647
10648 static __always_inline unsigned long
10649 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10650 {
10651 + if ((long)n < 0)
10652 + return n;
10653 +
10654 /* Avoid zeroing the tail if the copy fails..
10655 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10656 * but as the zeroing behaviour is only significant when n is not
10657 @@ -137,6 +148,12 @@ static __always_inline unsigned long
10658 __copy_from_user(void *to, const void __user *from, unsigned long n)
10659 {
10660 might_fault();
10661 +
10662 + pax_track_stack();
10663 +
10664 + if ((long)n < 0)
10665 + return n;
10666 +
10667 if (__builtin_constant_p(n)) {
10668 unsigned long ret;
10669
10670 @@ -152,6 +169,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
10671 return ret;
10672 }
10673 }
10674 + if (!__builtin_constant_p(n))
10675 + check_object_size(to, n, false);
10676 return __copy_from_user_ll(to, from, n);
10677 }
10678
10679 @@ -159,6 +178,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
10680 const void __user *from, unsigned long n)
10681 {
10682 might_fault();
10683 +
10684 + if ((long)n < 0)
10685 + return n;
10686 +
10687 if (__builtin_constant_p(n)) {
10688 unsigned long ret;
10689
10690 @@ -181,15 +204,19 @@ static __always_inline unsigned long
10691 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10692 unsigned long n)
10693 {
10694 - return __copy_from_user_ll_nocache_nozero(to, from, n);
10695 -}
10696 + if ((long)n < 0)
10697 + return n;
10698
10699 -unsigned long __must_check copy_to_user(void __user *to,
10700 - const void *from, unsigned long n);
10701 -unsigned long __must_check _copy_from_user(void *to,
10702 - const void __user *from,
10703 - unsigned long n);
10704 + return __copy_from_user_ll_nocache_nozero(to, from, n);
10705 +}
10706
10707 +extern void copy_to_user_overflow(void)
10708 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10709 + __compiletime_error("copy_to_user() buffer size is not provably correct")
10710 +#else
10711 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
10712 +#endif
10713 +;
10714
10715 extern void copy_from_user_overflow(void)
10716 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
10717 @@ -199,17 +226,61 @@ extern void copy_from_user_overflow(void)
10718 #endif
10719 ;
10720
10721 -static inline unsigned long __must_check copy_from_user(void *to,
10722 - const void __user *from,
10723 - unsigned long n)
10724 +/**
10725 + * copy_to_user: - Copy a block of data into user space.
10726 + * @to: Destination address, in user space.
10727 + * @from: Source address, in kernel space.
10728 + * @n: Number of bytes to copy.
10729 + *
10730 + * Context: User context only. This function may sleep.
10731 + *
10732 + * Copy data from kernel space to user space.
10733 + *
10734 + * Returns number of bytes that could not be copied.
10735 + * On success, this will be zero.
10736 + */
10737 +static inline unsigned long __must_check
10738 +copy_to_user(void __user *to, const void *from, unsigned long n)
10739 +{
10740 + int sz = __compiletime_object_size(from);
10741 +
10742 + if (unlikely(sz != -1 && sz < n))
10743 + copy_to_user_overflow();
10744 + else if (access_ok(VERIFY_WRITE, to, n))
10745 + n = __copy_to_user(to, from, n);
10746 + return n;
10747 +}
10748 +
10749 +/**
10750 + * copy_from_user: - Copy a block of data from user space.
10751 + * @to: Destination address, in kernel space.
10752 + * @from: Source address, in user space.
10753 + * @n: Number of bytes to copy.
10754 + *
10755 + * Context: User context only. This function may sleep.
10756 + *
10757 + * Copy data from user space to kernel space.
10758 + *
10759 + * Returns number of bytes that could not be copied.
10760 + * On success, this will be zero.
10761 + *
10762 + * If some data could not be copied, this function will pad the copied
10763 + * data to the requested size using zero bytes.
10764 + */
10765 +static inline unsigned long __must_check
10766 +copy_from_user(void *to, const void __user *from, unsigned long n)
10767 {
10768 int sz = __compiletime_object_size(to);
10769
10770 - if (likely(sz == -1 || sz >= n))
10771 - n = _copy_from_user(to, from, n);
10772 - else
10773 + if (unlikely(sz != -1 && sz < n))
10774 copy_from_user_overflow();
10775 -
10776 + else if (access_ok(VERIFY_READ, from, n))
10777 + n = __copy_from_user(to, from, n);
10778 + else if ((long)n > 0) {
10779 + if (!__builtin_constant_p(n))
10780 + check_object_size(to, n, false);
10781 + memset(to, 0, n);
10782 + }
10783 return n;
10784 }
10785
10786 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
10787 index 1c66d30..c299480 100644
10788 --- a/arch/x86/include/asm/uaccess_64.h
10789 +++ b/arch/x86/include/asm/uaccess_64.h
10790 @@ -10,6 +10,9 @@
10791 #include <asm/alternative.h>
10792 #include <asm/cpufeature.h>
10793 #include <asm/page.h>
10794 +#include <asm/pgtable.h>
10795 +
10796 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
10797
10798 /*
10799 * Copy To/From Userspace
10800 @@ -36,26 +39,26 @@ copy_user_generic(void *to, const void *from, unsigned len)
10801 return ret;
10802 }
10803
10804 -__must_check unsigned long
10805 -_copy_to_user(void __user *to, const void *from, unsigned len);
10806 -__must_check unsigned long
10807 -_copy_from_user(void *to, const void __user *from, unsigned len);
10808 +static __always_inline __must_check unsigned long
10809 +__copy_to_user(void __user *to, const void *from, unsigned len);
10810 +static __always_inline __must_check unsigned long
10811 +__copy_from_user(void *to, const void __user *from, unsigned len);
10812 __must_check unsigned long
10813 copy_in_user(void __user *to, const void __user *from, unsigned len);
10814
10815 static inline unsigned long __must_check copy_from_user(void *to,
10816 const void __user *from,
10817 - unsigned long n)
10818 + unsigned n)
10819 {
10820 - int sz = __compiletime_object_size(to);
10821 -
10822 might_fault();
10823 - if (likely(sz == -1 || sz >= n))
10824 - n = _copy_from_user(to, from, n);
10825 -#ifdef CONFIG_DEBUG_VM
10826 - else
10827 - WARN(1, "Buffer overflow detected!\n");
10828 -#endif
10829 +
10830 + if (access_ok(VERIFY_READ, from, n))
10831 + n = __copy_from_user(to, from, n);
10832 + else if ((int)n > 0) {
10833 + if (!__builtin_constant_p(n))
10834 + check_object_size(to, n, false);
10835 + memset(to, 0, n);
10836 + }
10837 return n;
10838 }
10839
10840 @@ -64,110 +67,198 @@ int copy_to_user(void __user *dst, const void *src, unsigned size)
10841 {
10842 might_fault();
10843
10844 - return _copy_to_user(dst, src, size);
10845 + if (access_ok(VERIFY_WRITE, dst, size))
10846 + size = __copy_to_user(dst, src, size);
10847 + return size;
10848 }
10849
10850 static __always_inline __must_check
10851 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
10852 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10853 {
10854 - int ret = 0;
10855 + int sz = __compiletime_object_size(dst);
10856 + unsigned ret = 0;
10857
10858 might_fault();
10859 - if (!__builtin_constant_p(size))
10860 - return copy_user_generic(dst, (__force void *)src, size);
10861 +
10862 + pax_track_stack();
10863 +
10864 + if ((int)size < 0)
10865 + return size;
10866 +
10867 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10868 + if (!__access_ok(VERIFY_READ, src, size))
10869 + return size;
10870 +#endif
10871 +
10872 + if (unlikely(sz != -1 && sz < size)) {
10873 +#ifdef CONFIG_DEBUG_VM
10874 + WARN(1, "Buffer overflow detected!\n");
10875 +#endif
10876 + return size;
10877 + }
10878 +
10879 + if (!__builtin_constant_p(size)) {
10880 + check_object_size(dst, size, false);
10881 +
10882 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10883 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10884 + src += PAX_USER_SHADOW_BASE;
10885 +#endif
10886 +
10887 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
10888 + }
10889 switch (size) {
10890 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10891 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10892 ret, "b", "b", "=q", 1);
10893 return ret;
10894 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10895 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10896 ret, "w", "w", "=r", 2);
10897 return ret;
10898 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10899 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10900 ret, "l", "k", "=r", 4);
10901 return ret;
10902 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10903 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10904 ret, "q", "", "=r", 8);
10905 return ret;
10906 case 10:
10907 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10908 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10909 ret, "q", "", "=r", 10);
10910 if (unlikely(ret))
10911 return ret;
10912 __get_user_asm(*(u16 *)(8 + (char *)dst),
10913 - (u16 __user *)(8 + (char __user *)src),
10914 + (const u16 __user *)(8 + (const char __user *)src),
10915 ret, "w", "w", "=r", 2);
10916 return ret;
10917 case 16:
10918 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10919 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10920 ret, "q", "", "=r", 16);
10921 if (unlikely(ret))
10922 return ret;
10923 __get_user_asm(*(u64 *)(8 + (char *)dst),
10924 - (u64 __user *)(8 + (char __user *)src),
10925 + (const u64 __user *)(8 + (const char __user *)src),
10926 ret, "q", "", "=r", 8);
10927 return ret;
10928 default:
10929 - return copy_user_generic(dst, (__force void *)src, size);
10930 +
10931 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10932 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10933 + src += PAX_USER_SHADOW_BASE;
10934 +#endif
10935 +
10936 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
10937 }
10938 }
10939
10940 static __always_inline __must_check
10941 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
10942 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10943 {
10944 - int ret = 0;
10945 + int sz = __compiletime_object_size(src);
10946 + unsigned ret = 0;
10947
10948 might_fault();
10949 - if (!__builtin_constant_p(size))
10950 - return copy_user_generic((__force void *)dst, src, size);
10951 +
10952 + pax_track_stack();
10953 +
10954 + if ((int)size < 0)
10955 + return size;
10956 +
10957 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10958 + if (!__access_ok(VERIFY_WRITE, dst, size))
10959 + return size;
10960 +#endif
10961 +
10962 + if (unlikely(sz != -1 && sz < size)) {
10963 +#ifdef CONFIG_DEBUG_VM
10964 + WARN(1, "Buffer overflow detected!\n");
10965 +#endif
10966 + return size;
10967 + }
10968 +
10969 + if (!__builtin_constant_p(size)) {
10970 + check_object_size(src, size, true);
10971 +
10972 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10973 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10974 + dst += PAX_USER_SHADOW_BASE;
10975 +#endif
10976 +
10977 + return copy_user_generic((__force_kernel void *)dst, src, size);
10978 + }
10979 switch (size) {
10980 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10981 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10982 ret, "b", "b", "iq", 1);
10983 return ret;
10984 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10985 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10986 ret, "w", "w", "ir", 2);
10987 return ret;
10988 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10989 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10990 ret, "l", "k", "ir", 4);
10991 return ret;
10992 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
10993 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10994 ret, "q", "", "er", 8);
10995 return ret;
10996 case 10:
10997 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10998 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10999 ret, "q", "", "er", 10);
11000 if (unlikely(ret))
11001 return ret;
11002 asm("":::"memory");
11003 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
11004 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
11005 ret, "w", "w", "ir", 2);
11006 return ret;
11007 case 16:
11008 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
11009 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
11010 ret, "q", "", "er", 16);
11011 if (unlikely(ret))
11012 return ret;
11013 asm("":::"memory");
11014 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
11015 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
11016 ret, "q", "", "er", 8);
11017 return ret;
11018 default:
11019 - return copy_user_generic((__force void *)dst, src, size);
11020 +
11021 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11022 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11023 + dst += PAX_USER_SHADOW_BASE;
11024 +#endif
11025 +
11026 + return copy_user_generic((__force_kernel void *)dst, src, size);
11027 }
11028 }
11029
11030 static __always_inline __must_check
11031 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11032 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11033 {
11034 - int ret = 0;
11035 + unsigned ret = 0;
11036
11037 might_fault();
11038 - if (!__builtin_constant_p(size))
11039 - return copy_user_generic((__force void *)dst,
11040 - (__force void *)src, size);
11041 +
11042 + if ((int)size < 0)
11043 + return size;
11044 +
11045 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11046 + if (!__access_ok(VERIFY_READ, src, size))
11047 + return size;
11048 + if (!__access_ok(VERIFY_WRITE, dst, size))
11049 + return size;
11050 +#endif
11051 +
11052 + if (!__builtin_constant_p(size)) {
11053 +
11054 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11055 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11056 + src += PAX_USER_SHADOW_BASE;
11057 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11058 + dst += PAX_USER_SHADOW_BASE;
11059 +#endif
11060 +
11061 + return copy_user_generic((__force_kernel void *)dst,
11062 + (__force_kernel const void *)src, size);
11063 + }
11064 switch (size) {
11065 case 1: {
11066 u8 tmp;
11067 - __get_user_asm(tmp, (u8 __user *)src,
11068 + __get_user_asm(tmp, (const u8 __user *)src,
11069 ret, "b", "b", "=q", 1);
11070 if (likely(!ret))
11071 __put_user_asm(tmp, (u8 __user *)dst,
11072 @@ -176,7 +267,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11073 }
11074 case 2: {
11075 u16 tmp;
11076 - __get_user_asm(tmp, (u16 __user *)src,
11077 + __get_user_asm(tmp, (const u16 __user *)src,
11078 ret, "w", "w", "=r", 2);
11079 if (likely(!ret))
11080 __put_user_asm(tmp, (u16 __user *)dst,
11081 @@ -186,7 +277,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11082
11083 case 4: {
11084 u32 tmp;
11085 - __get_user_asm(tmp, (u32 __user *)src,
11086 + __get_user_asm(tmp, (const u32 __user *)src,
11087 ret, "l", "k", "=r", 4);
11088 if (likely(!ret))
11089 __put_user_asm(tmp, (u32 __user *)dst,
11090 @@ -195,7 +286,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11091 }
11092 case 8: {
11093 u64 tmp;
11094 - __get_user_asm(tmp, (u64 __user *)src,
11095 + __get_user_asm(tmp, (const u64 __user *)src,
11096 ret, "q", "", "=r", 8);
11097 if (likely(!ret))
11098 __put_user_asm(tmp, (u64 __user *)dst,
11099 @@ -203,8 +294,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
11100 return ret;
11101 }
11102 default:
11103 - return copy_user_generic((__force void *)dst,
11104 - (__force void *)src, size);
11105 +
11106 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11107 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11108 + src += PAX_USER_SHADOW_BASE;
11109 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11110 + dst += PAX_USER_SHADOW_BASE;
11111 +#endif
11112 +
11113 + return copy_user_generic((__force_kernel void *)dst,
11114 + (__force_kernel const void *)src, size);
11115 }
11116 }
11117
11118 @@ -221,33 +320,72 @@ __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
11119 static __must_check __always_inline int
11120 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11121 {
11122 - return copy_user_generic(dst, (__force const void *)src, size);
11123 + pax_track_stack();
11124 +
11125 + if ((int)size < 0)
11126 + return size;
11127 +
11128 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11129 + if (!__access_ok(VERIFY_READ, src, size))
11130 + return size;
11131 +
11132 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11133 + src += PAX_USER_SHADOW_BASE;
11134 +#endif
11135 +
11136 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
11137 }
11138
11139 -static __must_check __always_inline int
11140 +static __must_check __always_inline unsigned long
11141 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11142 {
11143 - return copy_user_generic((__force void *)dst, src, size);
11144 + if ((int)size < 0)
11145 + return size;
11146 +
11147 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11148 + if (!__access_ok(VERIFY_WRITE, dst, size))
11149 + return size;
11150 +
11151 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11152 + dst += PAX_USER_SHADOW_BASE;
11153 +#endif
11154 +
11155 + return copy_user_generic((__force_kernel void *)dst, src, size);
11156 }
11157
11158 -extern long __copy_user_nocache(void *dst, const void __user *src,
11159 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11160 unsigned size, int zerorest);
11161
11162 -static inline int
11163 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11164 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11165 {
11166 might_sleep();
11167 +
11168 + if ((int)size < 0)
11169 + return size;
11170 +
11171 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11172 + if (!__access_ok(VERIFY_READ, src, size))
11173 + return size;
11174 +#endif
11175 +
11176 return __copy_user_nocache(dst, src, size, 1);
11177 }
11178
11179 -static inline int
11180 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11181 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11182 unsigned size)
11183 {
11184 + if ((int)size < 0)
11185 + return size;
11186 +
11187 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11188 + if (!__access_ok(VERIFY_READ, src, size))
11189 + return size;
11190 +#endif
11191 +
11192 return __copy_user_nocache(dst, src, size, 0);
11193 }
11194
11195 -unsigned long
11196 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11197 +extern unsigned long
11198 +copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest);
11199
11200 #endif /* _ASM_X86_UACCESS_64_H */
11201 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
11202 index bb05228..d763d5b 100644
11203 --- a/arch/x86/include/asm/vdso.h
11204 +++ b/arch/x86/include/asm/vdso.h
11205 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
11206 #define VDSO32_SYMBOL(base, name) \
11207 ({ \
11208 extern const char VDSO32_##name[]; \
11209 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11210 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
11211 })
11212 #endif
11213
11214 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
11215 index d3d8590..d296b5f 100644
11216 --- a/arch/x86/include/asm/x86_init.h
11217 +++ b/arch/x86/include/asm/x86_init.h
11218 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
11219 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11220 void (*find_smp_config)(void);
11221 void (*get_smp_config)(unsigned int early);
11222 -};
11223 +} __no_const;
11224
11225 /**
11226 * struct x86_init_resources - platform specific resource related ops
11227 @@ -42,7 +42,7 @@ struct x86_init_resources {
11228 void (*probe_roms)(void);
11229 void (*reserve_resources)(void);
11230 char *(*memory_setup)(void);
11231 -};
11232 +} __no_const;
11233
11234 /**
11235 * struct x86_init_irqs - platform specific interrupt setup
11236 @@ -55,7 +55,7 @@ struct x86_init_irqs {
11237 void (*pre_vector_init)(void);
11238 void (*intr_init)(void);
11239 void (*trap_init)(void);
11240 -};
11241 +} __no_const;
11242
11243 /**
11244 * struct x86_init_oem - oem platform specific customizing functions
11245 @@ -65,7 +65,7 @@ struct x86_init_irqs {
11246 struct x86_init_oem {
11247 void (*arch_setup)(void);
11248 void (*banner)(void);
11249 -};
11250 +} __no_const;
11251
11252 /**
11253 * struct x86_init_mapping - platform specific initial kernel pagetable setup
11254 @@ -76,7 +76,7 @@ struct x86_init_oem {
11255 */
11256 struct x86_init_mapping {
11257 void (*pagetable_reserve)(u64 start, u64 end);
11258 -};
11259 +} __no_const;
11260
11261 /**
11262 * struct x86_init_paging - platform specific paging functions
11263 @@ -86,7 +86,7 @@ struct x86_init_mapping {
11264 struct x86_init_paging {
11265 void (*pagetable_setup_start)(pgd_t *base);
11266 void (*pagetable_setup_done)(pgd_t *base);
11267 -};
11268 +} __no_const;
11269
11270 /**
11271 * struct x86_init_timers - platform specific timer setup
11272 @@ -101,7 +101,7 @@ struct x86_init_timers {
11273 void (*tsc_pre_init)(void);
11274 void (*timer_init)(void);
11275 void (*wallclock_init)(void);
11276 -};
11277 +} __no_const;
11278
11279 /**
11280 * struct x86_init_iommu - platform specific iommu setup
11281 @@ -109,7 +109,7 @@ struct x86_init_timers {
11282 */
11283 struct x86_init_iommu {
11284 int (*iommu_init)(void);
11285 -};
11286 +} __no_const;
11287
11288 /**
11289 * struct x86_init_pci - platform specific pci init functions
11290 @@ -123,7 +123,7 @@ struct x86_init_pci {
11291 int (*init)(void);
11292 void (*init_irq)(void);
11293 void (*fixup_irqs)(void);
11294 -};
11295 +} __no_const;
11296
11297 /**
11298 * struct x86_init_ops - functions for platform specific setup
11299 @@ -139,7 +139,7 @@ struct x86_init_ops {
11300 struct x86_init_timers timers;
11301 struct x86_init_iommu iommu;
11302 struct x86_init_pci pci;
11303 -};
11304 +} __no_const;
11305
11306 /**
11307 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11308 @@ -147,7 +147,7 @@ struct x86_init_ops {
11309 */
11310 struct x86_cpuinit_ops {
11311 void (*setup_percpu_clockev)(void);
11312 -};
11313 +} __no_const;
11314
11315 /**
11316 * struct x86_platform_ops - platform specific runtime functions
11317 @@ -166,7 +166,7 @@ struct x86_platform_ops {
11318 bool (*is_untracked_pat_range)(u64 start, u64 end);
11319 void (*nmi_init)(void);
11320 int (*i8042_detect)(void);
11321 -};
11322 +} __no_const;
11323
11324 struct pci_dev;
11325
11326 @@ -174,7 +174,7 @@ struct x86_msi_ops {
11327 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
11328 void (*teardown_msi_irq)(unsigned int irq);
11329 void (*teardown_msi_irqs)(struct pci_dev *dev);
11330 -};
11331 +} __no_const;
11332
11333 extern struct x86_init_ops x86_init;
11334 extern struct x86_cpuinit_ops x86_cpuinit;
11335 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
11336 index c6ce245..ffbdab7 100644
11337 --- a/arch/x86/include/asm/xsave.h
11338 +++ b/arch/x86/include/asm/xsave.h
11339 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11340 {
11341 int err;
11342
11343 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11344 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11345 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11346 +#endif
11347 +
11348 /*
11349 * Clear the xsave header first, so that reserved fields are
11350 * initialized to zero.
11351 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
11352 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
11353 {
11354 int err;
11355 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
11356 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
11357 u32 lmask = mask;
11358 u32 hmask = mask >> 32;
11359
11360 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11361 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11362 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11363 +#endif
11364 +
11365 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11366 "2:\n"
11367 ".section .fixup,\"ax\"\n"
11368 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
11369 index 6a564ac..9b1340c 100644
11370 --- a/arch/x86/kernel/acpi/realmode/Makefile
11371 +++ b/arch/x86/kernel/acpi/realmode/Makefile
11372 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
11373 $(call cc-option, -fno-stack-protector) \
11374 $(call cc-option, -mpreferred-stack-boundary=2)
11375 KBUILD_CFLAGS += $(call cc-option, -m32)
11376 +ifdef CONSTIFY_PLUGIN
11377 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11378 +endif
11379 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11380 GCOV_PROFILE := n
11381
11382 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
11383 index b4fd836..4358fe3 100644
11384 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
11385 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
11386 @@ -108,6 +108,9 @@ wakeup_code:
11387 /* Do any other stuff... */
11388
11389 #ifndef CONFIG_64BIT
11390 + /* Recheck NX bit overrides (64bit path does this in trampoline */
11391 + call verify_cpu
11392 +
11393 /* This could also be done in C code... */
11394 movl pmode_cr3, %eax
11395 movl %eax, %cr3
11396 @@ -131,6 +134,7 @@ wakeup_code:
11397 movl pmode_cr0, %eax
11398 movl %eax, %cr0
11399 jmp pmode_return
11400 +# include "../../verify_cpu.S"
11401 #else
11402 pushw $0
11403 pushw trampoline_segment
11404 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
11405 index 103b6ab..2004d0a 100644
11406 --- a/arch/x86/kernel/acpi/sleep.c
11407 +++ b/arch/x86/kernel/acpi/sleep.c
11408 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
11409 header->trampoline_segment = trampoline_address() >> 4;
11410 #ifdef CONFIG_SMP
11411 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11412 +
11413 + pax_open_kernel();
11414 early_gdt_descr.address =
11415 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11416 + pax_close_kernel();
11417 +
11418 initial_gs = per_cpu_offset(smp_processor_id());
11419 #endif
11420 initial_code = (unsigned long)wakeup_long64;
11421 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
11422 index 13ab720..95d5442 100644
11423 --- a/arch/x86/kernel/acpi/wakeup_32.S
11424 +++ b/arch/x86/kernel/acpi/wakeup_32.S
11425 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11426 # and restore the stack ... but you need gdt for this to work
11427 movl saved_context_esp, %esp
11428
11429 - movl %cs:saved_magic, %eax
11430 - cmpl $0x12345678, %eax
11431 + cmpl $0x12345678, saved_magic
11432 jne bogus_magic
11433
11434 # jump to place where we left off
11435 - movl saved_eip, %eax
11436 - jmp *%eax
11437 + jmp *(saved_eip)
11438
11439 bogus_magic:
11440 jmp bogus_magic
11441 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
11442 index c638228..16dfa8d 100644
11443 --- a/arch/x86/kernel/alternative.c
11444 +++ b/arch/x86/kernel/alternative.c
11445 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
11446 */
11447 for (a = start; a < end; a++) {
11448 instr = (u8 *)&a->instr_offset + a->instr_offset;
11449 +
11450 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11451 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11452 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
11453 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11454 +#endif
11455 +
11456 replacement = (u8 *)&a->repl_offset + a->repl_offset;
11457 BUG_ON(a->replacementlen > a->instrlen);
11458 BUG_ON(a->instrlen > sizeof(insnbuf));
11459 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
11460 for (poff = start; poff < end; poff++) {
11461 u8 *ptr = (u8 *)poff + *poff;
11462
11463 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11464 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11465 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11466 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11467 +#endif
11468 +
11469 if (!*poff || ptr < text || ptr >= text_end)
11470 continue;
11471 /* turn DS segment override prefix into lock prefix */
11472 - if (*ptr == 0x3e)
11473 + if (*ktla_ktva(ptr) == 0x3e)
11474 text_poke(ptr, ((unsigned char []){0xf0}), 1);
11475 };
11476 mutex_unlock(&text_mutex);
11477 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
11478 for (poff = start; poff < end; poff++) {
11479 u8 *ptr = (u8 *)poff + *poff;
11480
11481 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
11482 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11483 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
11484 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
11485 +#endif
11486 +
11487 if (!*poff || ptr < text || ptr >= text_end)
11488 continue;
11489 /* turn lock prefix into DS segment override prefix */
11490 - if (*ptr == 0xf0)
11491 + if (*ktla_ktva(ptr) == 0xf0)
11492 text_poke(ptr, ((unsigned char []){0x3E}), 1);
11493 };
11494 mutex_unlock(&text_mutex);
11495 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
11496
11497 BUG_ON(p->len > MAX_PATCH_LEN);
11498 /* prep the buffer with the original instructions */
11499 - memcpy(insnbuf, p->instr, p->len);
11500 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11501 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11502 (unsigned long)p->instr, p->len);
11503
11504 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
11505 if (smp_alt_once)
11506 free_init_pages("SMP alternatives",
11507 (unsigned long)__smp_locks,
11508 - (unsigned long)__smp_locks_end);
11509 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11510
11511 restart_nmi();
11512 }
11513 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
11514 * instructions. And on the local CPU you need to be protected again NMI or MCE
11515 * handlers seeing an inconsistent instruction while you patch.
11516 */
11517 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
11518 +void *__kprobes text_poke_early(void *addr, const void *opcode,
11519 size_t len)
11520 {
11521 unsigned long flags;
11522 local_irq_save(flags);
11523 - memcpy(addr, opcode, len);
11524 +
11525 + pax_open_kernel();
11526 + memcpy(ktla_ktva(addr), opcode, len);
11527 sync_core();
11528 + pax_close_kernel();
11529 +
11530 local_irq_restore(flags);
11531 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11532 that causes hangs on some VIA CPUs. */
11533 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
11534 */
11535 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11536 {
11537 - unsigned long flags;
11538 - char *vaddr;
11539 + unsigned char *vaddr = ktla_ktva(addr);
11540 struct page *pages[2];
11541 - int i;
11542 + size_t i;
11543
11544 if (!core_kernel_text((unsigned long)addr)) {
11545 - pages[0] = vmalloc_to_page(addr);
11546 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11547 + pages[0] = vmalloc_to_page(vaddr);
11548 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11549 } else {
11550 - pages[0] = virt_to_page(addr);
11551 + pages[0] = virt_to_page(vaddr);
11552 WARN_ON(!PageReserved(pages[0]));
11553 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11554 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11555 }
11556 BUG_ON(!pages[0]);
11557 - local_irq_save(flags);
11558 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11559 - if (pages[1])
11560 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11561 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11562 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11563 - clear_fixmap(FIX_TEXT_POKE0);
11564 - if (pages[1])
11565 - clear_fixmap(FIX_TEXT_POKE1);
11566 - local_flush_tlb();
11567 - sync_core();
11568 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11569 - that causes hangs on some VIA CPUs. */
11570 + text_poke_early(addr, opcode, len);
11571 for (i = 0; i < len; i++)
11572 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11573 - local_irq_restore(flags);
11574 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11575 return addr;
11576 }
11577
11578 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
11579 index 52fa563..5de9d9c 100644
11580 --- a/arch/x86/kernel/apic/apic.c
11581 +++ b/arch/x86/kernel/apic/apic.c
11582 @@ -174,7 +174,7 @@ int first_system_vector = 0xfe;
11583 /*
11584 * Debug level, exported for io_apic.c
11585 */
11586 -unsigned int apic_verbosity;
11587 +int apic_verbosity;
11588
11589 int pic_mode;
11590
11591 @@ -1835,7 +1835,7 @@ void smp_error_interrupt(struct pt_regs *regs)
11592 apic_write(APIC_ESR, 0);
11593 v1 = apic_read(APIC_ESR);
11594 ack_APIC_irq();
11595 - atomic_inc(&irq_err_count);
11596 + atomic_inc_unchecked(&irq_err_count);
11597
11598 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
11599 smp_processor_id(), v0 , v1);
11600 @@ -2209,6 +2209,8 @@ static int __cpuinit apic_cluster_num(void)
11601 u16 *bios_cpu_apicid;
11602 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11603
11604 + pax_track_stack();
11605 +
11606 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11607 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11608
11609 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
11610 index 8eb863e..32e6934 100644
11611 --- a/arch/x86/kernel/apic/io_apic.c
11612 +++ b/arch/x86/kernel/apic/io_apic.c
11613 @@ -1028,7 +1028,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
11614 }
11615 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11616
11617 -void lock_vector_lock(void)
11618 +void lock_vector_lock(void) __acquires(vector_lock)
11619 {
11620 /* Used to the online set of cpus does not change
11621 * during assign_irq_vector.
11622 @@ -1036,7 +1036,7 @@ void lock_vector_lock(void)
11623 raw_spin_lock(&vector_lock);
11624 }
11625
11626 -void unlock_vector_lock(void)
11627 +void unlock_vector_lock(void) __releases(vector_lock)
11628 {
11629 raw_spin_unlock(&vector_lock);
11630 }
11631 @@ -2405,7 +2405,7 @@ static void ack_apic_edge(struct irq_data *data)
11632 ack_APIC_irq();
11633 }
11634
11635 -atomic_t irq_mis_count;
11636 +atomic_unchecked_t irq_mis_count;
11637
11638 /*
11639 * IO-APIC versions below 0x20 don't support EOI register.
11640 @@ -2513,7 +2513,7 @@ static void ack_apic_level(struct irq_data *data)
11641 * at the cpu.
11642 */
11643 if (!(v & (1 << (i & 0x1f)))) {
11644 - atomic_inc(&irq_mis_count);
11645 + atomic_inc_unchecked(&irq_mis_count);
11646
11647 eoi_ioapic_irq(irq, cfg);
11648 }
11649 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
11650 index 0371c48..54cdf63 100644
11651 --- a/arch/x86/kernel/apm_32.c
11652 +++ b/arch/x86/kernel/apm_32.c
11653 @@ -413,7 +413,7 @@ static DEFINE_MUTEX(apm_mutex);
11654 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11655 * even though they are called in protected mode.
11656 */
11657 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11658 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11659 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11660
11661 static const char driver_version[] = "1.16ac"; /* no spaces */
11662 @@ -591,7 +591,10 @@ static long __apm_bios_call(void *_call)
11663 BUG_ON(cpu != 0);
11664 gdt = get_cpu_gdt_table(cpu);
11665 save_desc_40 = gdt[0x40 / 8];
11666 +
11667 + pax_open_kernel();
11668 gdt[0x40 / 8] = bad_bios_desc;
11669 + pax_close_kernel();
11670
11671 apm_irq_save(flags);
11672 APM_DO_SAVE_SEGS;
11673 @@ -600,7 +603,11 @@ static long __apm_bios_call(void *_call)
11674 &call->esi);
11675 APM_DO_RESTORE_SEGS;
11676 apm_irq_restore(flags);
11677 +
11678 + pax_open_kernel();
11679 gdt[0x40 / 8] = save_desc_40;
11680 + pax_close_kernel();
11681 +
11682 put_cpu();
11683
11684 return call->eax & 0xff;
11685 @@ -667,7 +674,10 @@ static long __apm_bios_call_simple(void *_call)
11686 BUG_ON(cpu != 0);
11687 gdt = get_cpu_gdt_table(cpu);
11688 save_desc_40 = gdt[0x40 / 8];
11689 +
11690 + pax_open_kernel();
11691 gdt[0x40 / 8] = bad_bios_desc;
11692 + pax_close_kernel();
11693
11694 apm_irq_save(flags);
11695 APM_DO_SAVE_SEGS;
11696 @@ -675,7 +685,11 @@ static long __apm_bios_call_simple(void *_call)
11697 &call->eax);
11698 APM_DO_RESTORE_SEGS;
11699 apm_irq_restore(flags);
11700 +
11701 + pax_open_kernel();
11702 gdt[0x40 / 8] = save_desc_40;
11703 + pax_close_kernel();
11704 +
11705 put_cpu();
11706 return error;
11707 }
11708 @@ -2349,12 +2363,15 @@ static int __init apm_init(void)
11709 * code to that CPU.
11710 */
11711 gdt = get_cpu_gdt_table(0);
11712 +
11713 + pax_open_kernel();
11714 set_desc_base(&gdt[APM_CS >> 3],
11715 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11716 set_desc_base(&gdt[APM_CS_16 >> 3],
11717 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11718 set_desc_base(&gdt[APM_DS >> 3],
11719 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11720 + pax_close_kernel();
11721
11722 proc_create("apm", 0, NULL, &apm_file_ops);
11723
11724 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
11725 index 4f13faf..87db5d2 100644
11726 --- a/arch/x86/kernel/asm-offsets.c
11727 +++ b/arch/x86/kernel/asm-offsets.c
11728 @@ -33,6 +33,8 @@ void common(void) {
11729 OFFSET(TI_status, thread_info, status);
11730 OFFSET(TI_addr_limit, thread_info, addr_limit);
11731 OFFSET(TI_preempt_count, thread_info, preempt_count);
11732 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11733 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11734
11735 BLANK();
11736 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
11737 @@ -53,8 +55,26 @@ void common(void) {
11738 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11739 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11740 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11741 +
11742 +#ifdef CONFIG_PAX_KERNEXEC
11743 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11744 +#endif
11745 +
11746 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11747 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11748 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11749 +#ifdef CONFIG_X86_64
11750 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
11751 +#endif
11752 #endif
11753
11754 +#endif
11755 +
11756 + BLANK();
11757 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11758 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11759 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11760 +
11761 #ifdef CONFIG_XEN
11762 BLANK();
11763 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11764 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
11765 index e72a119..6e2955d 100644
11766 --- a/arch/x86/kernel/asm-offsets_64.c
11767 +++ b/arch/x86/kernel/asm-offsets_64.c
11768 @@ -69,6 +69,7 @@ int main(void)
11769 BLANK();
11770 #undef ENTRY
11771
11772 + DEFINE(TSS_size, sizeof(struct tss_struct));
11773 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
11774 BLANK();
11775
11776 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
11777 index 6042981..e638266 100644
11778 --- a/arch/x86/kernel/cpu/Makefile
11779 +++ b/arch/x86/kernel/cpu/Makefile
11780 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
11781 CFLAGS_REMOVE_perf_event.o = -pg
11782 endif
11783
11784 -# Make sure load_percpu_segment has no stackprotector
11785 -nostackp := $(call cc-option, -fno-stack-protector)
11786 -CFLAGS_common.o := $(nostackp)
11787 -
11788 obj-y := intel_cacheinfo.o scattered.o topology.o
11789 obj-y += proc.o capflags.o powerflags.o common.o
11790 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
11791 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
11792 index b13ed39..603286c 100644
11793 --- a/arch/x86/kernel/cpu/amd.c
11794 +++ b/arch/x86/kernel/cpu/amd.c
11795 @@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
11796 unsigned int size)
11797 {
11798 /* AMD errata T13 (order #21922) */
11799 - if ((c->x86 == 6)) {
11800 + if (c->x86 == 6) {
11801 /* Duron Rev A0 */
11802 if (c->x86_model == 3 && c->x86_mask == 0)
11803 size = 64;
11804 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
11805 index 6218439..0f1addc 100644
11806 --- a/arch/x86/kernel/cpu/common.c
11807 +++ b/arch/x86/kernel/cpu/common.c
11808 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
11809
11810 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11811
11812 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11813 -#ifdef CONFIG_X86_64
11814 - /*
11815 - * We need valid kernel segments for data and code in long mode too
11816 - * IRET will check the segment types kkeil 2000/10/28
11817 - * Also sysret mandates a special GDT layout
11818 - *
11819 - * TLS descriptors are currently at a different place compared to i386.
11820 - * Hopefully nobody expects them at a fixed place (Wine?)
11821 - */
11822 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11823 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11824 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11825 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11826 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11827 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11828 -#else
11829 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11830 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11831 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11832 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11833 - /*
11834 - * Segments used for calling PnP BIOS have byte granularity.
11835 - * They code segments and data segments have fixed 64k limits,
11836 - * the transfer segment sizes are set at run time.
11837 - */
11838 - /* 32-bit code */
11839 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11840 - /* 16-bit code */
11841 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11842 - /* 16-bit data */
11843 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11844 - /* 16-bit data */
11845 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11846 - /* 16-bit data */
11847 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11848 - /*
11849 - * The APM segments have byte granularity and their bases
11850 - * are set at run time. All have 64k limits.
11851 - */
11852 - /* 32-bit code */
11853 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11854 - /* 16-bit code */
11855 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11856 - /* data */
11857 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11858 -
11859 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11860 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11861 - GDT_STACK_CANARY_INIT
11862 -#endif
11863 -} };
11864 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
11865 -
11866 static int __init x86_xsave_setup(char *s)
11867 {
11868 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
11869 @@ -371,7 +317,7 @@ void switch_to_new_gdt(int cpu)
11870 {
11871 struct desc_ptr gdt_descr;
11872
11873 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
11874 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
11875 gdt_descr.size = GDT_SIZE - 1;
11876 load_gdt(&gdt_descr);
11877 /* Reload the per-cpu base */
11878 @@ -840,6 +786,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
11879 /* Filter out anything that depends on CPUID levels we don't have */
11880 filter_cpuid_features(c, true);
11881
11882 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
11883 + setup_clear_cpu_cap(X86_FEATURE_SEP);
11884 +#endif
11885 +
11886 /* If the model name is still unset, do table lookup. */
11887 if (!c->x86_model_id[0]) {
11888 const char *p;
11889 @@ -1019,6 +969,9 @@ static __init int setup_disablecpuid(char *arg)
11890 }
11891 __setup("clearcpuid=", setup_disablecpuid);
11892
11893 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
11894 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
11895 +
11896 #ifdef CONFIG_X86_64
11897 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
11898
11899 @@ -1034,7 +987,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
11900 EXPORT_PER_CPU_SYMBOL(current_task);
11901
11902 DEFINE_PER_CPU(unsigned long, kernel_stack) =
11903 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
11904 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
11905 EXPORT_PER_CPU_SYMBOL(kernel_stack);
11906
11907 DEFINE_PER_CPU(char *, irq_stack_ptr) =
11908 @@ -1099,7 +1052,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
11909 {
11910 memset(regs, 0, sizeof(struct pt_regs));
11911 regs->fs = __KERNEL_PERCPU;
11912 - regs->gs = __KERNEL_STACK_CANARY;
11913 + savesegment(gs, regs->gs);
11914
11915 return regs;
11916 }
11917 @@ -1154,7 +1107,7 @@ void __cpuinit cpu_init(void)
11918 int i;
11919
11920 cpu = stack_smp_processor_id();
11921 - t = &per_cpu(init_tss, cpu);
11922 + t = init_tss + cpu;
11923 oist = &per_cpu(orig_ist, cpu);
11924
11925 #ifdef CONFIG_NUMA
11926 @@ -1180,7 +1133,7 @@ void __cpuinit cpu_init(void)
11927 switch_to_new_gdt(cpu);
11928 loadsegment(fs, 0);
11929
11930 - load_idt((const struct desc_ptr *)&idt_descr);
11931 + load_idt(&idt_descr);
11932
11933 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
11934 syscall_init();
11935 @@ -1189,7 +1142,6 @@ void __cpuinit cpu_init(void)
11936 wrmsrl(MSR_KERNEL_GS_BASE, 0);
11937 barrier();
11938
11939 - x86_configure_nx();
11940 if (cpu != 0)
11941 enable_x2apic();
11942
11943 @@ -1243,7 +1195,7 @@ void __cpuinit cpu_init(void)
11944 {
11945 int cpu = smp_processor_id();
11946 struct task_struct *curr = current;
11947 - struct tss_struct *t = &per_cpu(init_tss, cpu);
11948 + struct tss_struct *t = init_tss + cpu;
11949 struct thread_struct *thread = &curr->thread;
11950
11951 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
11952 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
11953 index ed6086e..a1dcf29 100644
11954 --- a/arch/x86/kernel/cpu/intel.c
11955 +++ b/arch/x86/kernel/cpu/intel.c
11956 @@ -172,7 +172,7 @@ static void __cpuinit trap_init_f00f_bug(void)
11957 * Update the IDT descriptor and reload the IDT so that
11958 * it uses the read-only mapped virtual address.
11959 */
11960 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
11961 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
11962 load_idt(&idt_descr);
11963 }
11964 #endif
11965 diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
11966 index 0ed633c..82cef2a 100644
11967 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
11968 +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
11969 @@ -215,7 +215,9 @@ static int inject_init(void)
11970 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
11971 return -ENOMEM;
11972 printk(KERN_INFO "Machine check injector initialized\n");
11973 - mce_chrdev_ops.write = mce_write;
11974 + pax_open_kernel();
11975 + *(void **)&mce_chrdev_ops.write = mce_write;
11976 + pax_close_kernel();
11977 register_die_notifier(&mce_raise_nb);
11978 return 0;
11979 }
11980 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
11981 index 08363b0..ee26113 100644
11982 --- a/arch/x86/kernel/cpu/mcheck/mce.c
11983 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
11984 @@ -42,6 +42,7 @@
11985 #include <asm/processor.h>
11986 #include <asm/mce.h>
11987 #include <asm/msr.h>
11988 +#include <asm/local.h>
11989
11990 #include "mce-internal.h"
11991
11992 @@ -205,7 +206,7 @@ static void print_mce(struct mce *m)
11993 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
11994 m->cs, m->ip);
11995
11996 - if (m->cs == __KERNEL_CS)
11997 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
11998 print_symbol("{%s}", m->ip);
11999 pr_cont("\n");
12000 }
12001 @@ -233,10 +234,10 @@ static void print_mce(struct mce *m)
12002
12003 #define PANIC_TIMEOUT 5 /* 5 seconds */
12004
12005 -static atomic_t mce_paniced;
12006 +static atomic_unchecked_t mce_paniced;
12007
12008 static int fake_panic;
12009 -static atomic_t mce_fake_paniced;
12010 +static atomic_unchecked_t mce_fake_paniced;
12011
12012 /* Panic in progress. Enable interrupts and wait for final IPI */
12013 static void wait_for_panic(void)
12014 @@ -260,7 +261,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12015 /*
12016 * Make sure only one CPU runs in machine check panic
12017 */
12018 - if (atomic_inc_return(&mce_paniced) > 1)
12019 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12020 wait_for_panic();
12021 barrier();
12022
12023 @@ -268,7 +269,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
12024 console_verbose();
12025 } else {
12026 /* Don't log too much for fake panic */
12027 - if (atomic_inc_return(&mce_fake_paniced) > 1)
12028 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12029 return;
12030 }
12031 /* First print corrected ones that are still unlogged */
12032 @@ -610,7 +611,7 @@ static int mce_timed_out(u64 *t)
12033 * might have been modified by someone else.
12034 */
12035 rmb();
12036 - if (atomic_read(&mce_paniced))
12037 + if (atomic_read_unchecked(&mce_paniced))
12038 wait_for_panic();
12039 if (!monarch_timeout)
12040 goto out;
12041 @@ -1392,7 +1393,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
12042 }
12043
12044 /* Call the installed machine check handler for this CPU setup. */
12045 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
12046 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
12047 unexpected_machine_check;
12048
12049 /*
12050 @@ -1415,7 +1416,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12051 return;
12052 }
12053
12054 + pax_open_kernel();
12055 machine_check_vector = do_machine_check;
12056 + pax_close_kernel();
12057
12058 __mcheck_cpu_init_generic();
12059 __mcheck_cpu_init_vendor(c);
12060 @@ -1429,7 +1432,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
12061 */
12062
12063 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
12064 -static int mce_chrdev_open_count; /* #times opened */
12065 +static local_t mce_chrdev_open_count; /* #times opened */
12066 static int mce_chrdev_open_exclu; /* already open exclusive? */
12067
12068 static int mce_chrdev_open(struct inode *inode, struct file *file)
12069 @@ -1437,7 +1440,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12070 spin_lock(&mce_chrdev_state_lock);
12071
12072 if (mce_chrdev_open_exclu ||
12073 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
12074 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
12075 spin_unlock(&mce_chrdev_state_lock);
12076
12077 return -EBUSY;
12078 @@ -1445,7 +1448,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
12079
12080 if (file->f_flags & O_EXCL)
12081 mce_chrdev_open_exclu = 1;
12082 - mce_chrdev_open_count++;
12083 + local_inc(&mce_chrdev_open_count);
12084
12085 spin_unlock(&mce_chrdev_state_lock);
12086
12087 @@ -1456,7 +1459,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
12088 {
12089 spin_lock(&mce_chrdev_state_lock);
12090
12091 - mce_chrdev_open_count--;
12092 + local_dec(&mce_chrdev_open_count);
12093 mce_chrdev_open_exclu = 0;
12094
12095 spin_unlock(&mce_chrdev_state_lock);
12096 @@ -2147,7 +2150,7 @@ struct dentry *mce_get_debugfs_dir(void)
12097 static void mce_reset(void)
12098 {
12099 cpu_missing = 0;
12100 - atomic_set(&mce_fake_paniced, 0);
12101 + atomic_set_unchecked(&mce_fake_paniced, 0);
12102 atomic_set(&mce_executing, 0);
12103 atomic_set(&mce_callin, 0);
12104 atomic_set(&global_nwo, 0);
12105 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
12106 index 5c0e653..1e82c7c 100644
12107 --- a/arch/x86/kernel/cpu/mcheck/p5.c
12108 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
12109 @@ -50,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
12110 if (!cpu_has(c, X86_FEATURE_MCE))
12111 return;
12112
12113 + pax_open_kernel();
12114 machine_check_vector = pentium_machine_check;
12115 + pax_close_kernel();
12116 /* Make sure the vector pointer is visible before we enable MCEs: */
12117 wmb();
12118
12119 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
12120 index 54060f5..e6ba93d 100644
12121 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
12122 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
12123 @@ -24,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
12124 {
12125 u32 lo, hi;
12126
12127 + pax_open_kernel();
12128 machine_check_vector = winchip_machine_check;
12129 + pax_close_kernel();
12130 /* Make sure the vector pointer is visible before we enable MCEs: */
12131 wmb();
12132
12133 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
12134 index 6b96110..0da73eb 100644
12135 --- a/arch/x86/kernel/cpu/mtrr/main.c
12136 +++ b/arch/x86/kernel/cpu/mtrr/main.c
12137 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
12138 u64 size_or_mask, size_and_mask;
12139 static bool mtrr_aps_delayed_init;
12140
12141 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12142 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12143
12144 const struct mtrr_ops *mtrr_if;
12145
12146 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
12147 index df5e41f..816c719 100644
12148 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
12149 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
12150 @@ -25,7 +25,7 @@ struct mtrr_ops {
12151 int (*validate_add_page)(unsigned long base, unsigned long size,
12152 unsigned int type);
12153 int (*have_wrcomb)(void);
12154 -};
12155 +} __do_const;
12156
12157 extern int generic_get_free_region(unsigned long base, unsigned long size,
12158 int replace_reg);
12159 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
12160 index cfa62ec..9250dd7 100644
12161 --- a/arch/x86/kernel/cpu/perf_event.c
12162 +++ b/arch/x86/kernel/cpu/perf_event.c
12163 @@ -795,6 +795,8 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
12164 int i, j, w, wmax, num = 0;
12165 struct hw_perf_event *hwc;
12166
12167 + pax_track_stack();
12168 +
12169 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
12170
12171 for (i = 0; i < n; i++) {
12172 @@ -1919,7 +1921,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
12173 break;
12174
12175 perf_callchain_store(entry, frame.return_address);
12176 - fp = frame.next_frame;
12177 + fp = (const void __force_user *)frame.next_frame;
12178 }
12179 }
12180
12181 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
12182 index 764c7c2..c5d9c7b 100644
12183 --- a/arch/x86/kernel/crash.c
12184 +++ b/arch/x86/kernel/crash.c
12185 @@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
12186 regs = args->regs;
12187
12188 #ifdef CONFIG_X86_32
12189 - if (!user_mode_vm(regs)) {
12190 + if (!user_mode(regs)) {
12191 crash_fixup_ss_esp(&fixed_regs, regs);
12192 regs = &fixed_regs;
12193 }
12194 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
12195 index 37250fe..bf2ec74 100644
12196 --- a/arch/x86/kernel/doublefault_32.c
12197 +++ b/arch/x86/kernel/doublefault_32.c
12198 @@ -11,7 +11,7 @@
12199
12200 #define DOUBLEFAULT_STACKSIZE (1024)
12201 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12202 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12203 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12204
12205 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12206
12207 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12208 unsigned long gdt, tss;
12209
12210 store_gdt(&gdt_desc);
12211 - gdt = gdt_desc.address;
12212 + gdt = (unsigned long)gdt_desc.address;
12213
12214 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12215
12216 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
12217 /* 0x2 bit is always set */
12218 .flags = X86_EFLAGS_SF | 0x2,
12219 .sp = STACK_START,
12220 - .es = __USER_DS,
12221 + .es = __KERNEL_DS,
12222 .cs = __KERNEL_CS,
12223 .ss = __KERNEL_DS,
12224 - .ds = __USER_DS,
12225 + .ds = __KERNEL_DS,
12226 .fs = __KERNEL_PERCPU,
12227
12228 .__cr3 = __pa_nodebug(swapper_pg_dir),
12229 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
12230 index 1aae78f..aab3a3d 100644
12231 --- a/arch/x86/kernel/dumpstack.c
12232 +++ b/arch/x86/kernel/dumpstack.c
12233 @@ -2,6 +2,9 @@
12234 * Copyright (C) 1991, 1992 Linus Torvalds
12235 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12236 */
12237 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12238 +#define __INCLUDED_BY_HIDESYM 1
12239 +#endif
12240 #include <linux/kallsyms.h>
12241 #include <linux/kprobes.h>
12242 #include <linux/uaccess.h>
12243 @@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
12244 static void
12245 print_ftrace_graph_addr(unsigned long addr, void *data,
12246 const struct stacktrace_ops *ops,
12247 - struct thread_info *tinfo, int *graph)
12248 + struct task_struct *task, int *graph)
12249 {
12250 - struct task_struct *task = tinfo->task;
12251 unsigned long ret_addr;
12252 int index = task->curr_ret_stack;
12253
12254 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12255 static inline void
12256 print_ftrace_graph_addr(unsigned long addr, void *data,
12257 const struct stacktrace_ops *ops,
12258 - struct thread_info *tinfo, int *graph)
12259 + struct task_struct *task, int *graph)
12260 { }
12261 #endif
12262
12263 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
12264 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12265 */
12266
12267 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12268 - void *p, unsigned int size, void *end)
12269 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12270 {
12271 - void *t = tinfo;
12272 if (end) {
12273 if (p < end && p >= (end-THREAD_SIZE))
12274 return 1;
12275 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
12276 }
12277
12278 unsigned long
12279 -print_context_stack(struct thread_info *tinfo,
12280 +print_context_stack(struct task_struct *task, void *stack_start,
12281 unsigned long *stack, unsigned long bp,
12282 const struct stacktrace_ops *ops, void *data,
12283 unsigned long *end, int *graph)
12284 {
12285 struct stack_frame *frame = (struct stack_frame *)bp;
12286
12287 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12288 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12289 unsigned long addr;
12290
12291 addr = *stack;
12292 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
12293 } else {
12294 ops->address(data, addr, 0);
12295 }
12296 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12297 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12298 }
12299 stack++;
12300 }
12301 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
12302 EXPORT_SYMBOL_GPL(print_context_stack);
12303
12304 unsigned long
12305 -print_context_stack_bp(struct thread_info *tinfo,
12306 +print_context_stack_bp(struct task_struct *task, void *stack_start,
12307 unsigned long *stack, unsigned long bp,
12308 const struct stacktrace_ops *ops, void *data,
12309 unsigned long *end, int *graph)
12310 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12311 struct stack_frame *frame = (struct stack_frame *)bp;
12312 unsigned long *ret_addr = &frame->return_address;
12313
12314 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
12315 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
12316 unsigned long addr = *ret_addr;
12317
12318 if (!__kernel_text_address(addr))
12319 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
12320 ops->address(data, addr, 1);
12321 frame = frame->next_frame;
12322 ret_addr = &frame->return_address;
12323 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12324 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12325 }
12326
12327 return (unsigned long)frame;
12328 @@ -186,7 +186,7 @@ void dump_stack(void)
12329
12330 bp = stack_frame(current, NULL);
12331 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12332 - current->pid, current->comm, print_tainted(),
12333 + task_pid_nr(current), current->comm, print_tainted(),
12334 init_utsname()->release,
12335 (int)strcspn(init_utsname()->version, " "),
12336 init_utsname()->version);
12337 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
12338 }
12339 EXPORT_SYMBOL_GPL(oops_begin);
12340
12341 +extern void gr_handle_kernel_exploit(void);
12342 +
12343 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12344 {
12345 if (regs && kexec_should_crash(current))
12346 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12347 panic("Fatal exception in interrupt");
12348 if (panic_on_oops)
12349 panic("Fatal exception");
12350 - do_exit(signr);
12351 +
12352 + gr_handle_kernel_exploit();
12353 +
12354 + do_group_exit(signr);
12355 }
12356
12357 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12358 @@ -269,7 +274,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12359
12360 show_registers(regs);
12361 #ifdef CONFIG_X86_32
12362 - if (user_mode_vm(regs)) {
12363 + if (user_mode(regs)) {
12364 sp = regs->sp;
12365 ss = regs->ss & 0xffff;
12366 } else {
12367 @@ -297,7 +302,7 @@ void die(const char *str, struct pt_regs *regs, long err)
12368 unsigned long flags = oops_begin();
12369 int sig = SIGSEGV;
12370
12371 - if (!user_mode_vm(regs))
12372 + if (!user_mode(regs))
12373 report_bug(regs->ip, regs);
12374
12375 if (__die(str, regs, err))
12376 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
12377 index 3b97a80..667ce7a 100644
12378 --- a/arch/x86/kernel/dumpstack_32.c
12379 +++ b/arch/x86/kernel/dumpstack_32.c
12380 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12381 bp = stack_frame(task, regs);
12382
12383 for (;;) {
12384 - struct thread_info *context;
12385 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12386
12387 - context = (struct thread_info *)
12388 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12389 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
12390 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12391
12392 - stack = (unsigned long *)context->previous_esp;
12393 - if (!stack)
12394 + if (stack_start == task_stack_page(task))
12395 break;
12396 + stack = *(unsigned long **)stack_start;
12397 if (ops->stack(data, "IRQ") < 0)
12398 break;
12399 touch_nmi_watchdog();
12400 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
12401 * When in-kernel, we also print out the stack and code at the
12402 * time of the fault..
12403 */
12404 - if (!user_mode_vm(regs)) {
12405 + if (!user_mode(regs)) {
12406 unsigned int code_prologue = code_bytes * 43 / 64;
12407 unsigned int code_len = code_bytes;
12408 unsigned char c;
12409 u8 *ip;
12410 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12411
12412 printk(KERN_EMERG "Stack:\n");
12413 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
12414
12415 printk(KERN_EMERG "Code: ");
12416
12417 - ip = (u8 *)regs->ip - code_prologue;
12418 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12419 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12420 /* try starting at IP */
12421 - ip = (u8 *)regs->ip;
12422 + ip = (u8 *)regs->ip + cs_base;
12423 code_len = code_len - code_prologue + 1;
12424 }
12425 for (i = 0; i < code_len; i++, ip++) {
12426 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
12427 printk(" Bad EIP value.");
12428 break;
12429 }
12430 - if (ip == (u8 *)regs->ip)
12431 + if (ip == (u8 *)regs->ip + cs_base)
12432 printk("<%02x> ", c);
12433 else
12434 printk("%02x ", c);
12435 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
12436 {
12437 unsigned short ud2;
12438
12439 + ip = ktla_ktva(ip);
12440 if (ip < PAGE_OFFSET)
12441 return 0;
12442 if (probe_kernel_address((unsigned short *)ip, ud2))
12443 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
12444
12445 return ud2 == 0x0b0f;
12446 }
12447 +
12448 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12449 +void pax_check_alloca(unsigned long size)
12450 +{
12451 + unsigned long sp = (unsigned long)&sp, stack_left;
12452 +
12453 + /* all kernel stacks are of the same size */
12454 + stack_left = sp & (THREAD_SIZE - 1);
12455 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12456 +}
12457 +EXPORT_SYMBOL(pax_check_alloca);
12458 +#endif
12459 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
12460 index 19853ad..508ca79 100644
12461 --- a/arch/x86/kernel/dumpstack_64.c
12462 +++ b/arch/x86/kernel/dumpstack_64.c
12463 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12464 unsigned long *irq_stack_end =
12465 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12466 unsigned used = 0;
12467 - struct thread_info *tinfo;
12468 int graph = 0;
12469 unsigned long dummy;
12470 + void *stack_start;
12471
12472 if (!task)
12473 task = current;
12474 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12475 * current stack address. If the stacks consist of nested
12476 * exceptions
12477 */
12478 - tinfo = task_thread_info(task);
12479 for (;;) {
12480 char *id;
12481 unsigned long *estack_end;
12482 +
12483 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12484 &used, &id);
12485
12486 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12487 if (ops->stack(data, id) < 0)
12488 break;
12489
12490 - bp = ops->walk_stack(tinfo, stack, bp, ops,
12491 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12492 data, estack_end, &graph);
12493 ops->stack(data, "<EOE>");
12494 /*
12495 @@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12496 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
12497 if (ops->stack(data, "IRQ") < 0)
12498 break;
12499 - bp = ops->walk_stack(tinfo, stack, bp,
12500 + bp = ops->walk_stack(task, irq_stack, stack, bp,
12501 ops, data, irq_stack_end, &graph);
12502 /*
12503 * We link to the next stack (which would be
12504 @@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
12505 /*
12506 * This handles the process stack:
12507 */
12508 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12509 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12510 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12511 put_cpu();
12512 }
12513 EXPORT_SYMBOL(dump_trace);
12514 @@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
12515
12516 return ud2 == 0x0b0f;
12517 }
12518 +
12519 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12520 +void pax_check_alloca(unsigned long size)
12521 +{
12522 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
12523 + unsigned cpu, used;
12524 + char *id;
12525 +
12526 + /* check the process stack first */
12527 + stack_start = (unsigned long)task_stack_page(current);
12528 + stack_end = stack_start + THREAD_SIZE;
12529 + if (likely(stack_start <= sp && sp < stack_end)) {
12530 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
12531 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12532 + return;
12533 + }
12534 +
12535 + cpu = get_cpu();
12536 +
12537 + /* check the irq stacks */
12538 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
12539 + stack_start = stack_end - IRQ_STACK_SIZE;
12540 + if (stack_start <= sp && sp < stack_end) {
12541 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
12542 + put_cpu();
12543 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12544 + return;
12545 + }
12546 +
12547 + /* check the exception stacks */
12548 + used = 0;
12549 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
12550 + stack_start = stack_end - EXCEPTION_STKSZ;
12551 + if (stack_end && stack_start <= sp && sp < stack_end) {
12552 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
12553 + put_cpu();
12554 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
12555 + return;
12556 + }
12557 +
12558 + put_cpu();
12559 +
12560 + /* unknown stack */
12561 + BUG();
12562 +}
12563 +EXPORT_SYMBOL(pax_check_alloca);
12564 +#endif
12565 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
12566 index cd28a35..2601699 100644
12567 --- a/arch/x86/kernel/early_printk.c
12568 +++ b/arch/x86/kernel/early_printk.c
12569 @@ -7,6 +7,7 @@
12570 #include <linux/pci_regs.h>
12571 #include <linux/pci_ids.h>
12572 #include <linux/errno.h>
12573 +#include <linux/sched.h>
12574 #include <asm/io.h>
12575 #include <asm/processor.h>
12576 #include <asm/fcntl.h>
12577 @@ -179,6 +180,8 @@ asmlinkage void early_printk(const char *fmt, ...)
12578 int n;
12579 va_list ap;
12580
12581 + pax_track_stack();
12582 +
12583 va_start(ap, fmt);
12584 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12585 early_console->write(early_console, buf, n);
12586 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
12587 index f3f6f53..0841b66 100644
12588 --- a/arch/x86/kernel/entry_32.S
12589 +++ b/arch/x86/kernel/entry_32.S
12590 @@ -186,13 +186,146 @@
12591 /*CFI_REL_OFFSET gs, PT_GS*/
12592 .endm
12593 .macro SET_KERNEL_GS reg
12594 +
12595 +#ifdef CONFIG_CC_STACKPROTECTOR
12596 movl $(__KERNEL_STACK_CANARY), \reg
12597 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12598 + movl $(__USER_DS), \reg
12599 +#else
12600 + xorl \reg, \reg
12601 +#endif
12602 +
12603 movl \reg, %gs
12604 .endm
12605
12606 #endif /* CONFIG_X86_32_LAZY_GS */
12607
12608 -.macro SAVE_ALL
12609 +.macro pax_enter_kernel
12610 +#ifdef CONFIG_PAX_KERNEXEC
12611 + call pax_enter_kernel
12612 +#endif
12613 +.endm
12614 +
12615 +.macro pax_exit_kernel
12616 +#ifdef CONFIG_PAX_KERNEXEC
12617 + call pax_exit_kernel
12618 +#endif
12619 +.endm
12620 +
12621 +#ifdef CONFIG_PAX_KERNEXEC
12622 +ENTRY(pax_enter_kernel)
12623 +#ifdef CONFIG_PARAVIRT
12624 + pushl %eax
12625 + pushl %ecx
12626 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12627 + mov %eax, %esi
12628 +#else
12629 + mov %cr0, %esi
12630 +#endif
12631 + bts $16, %esi
12632 + jnc 1f
12633 + mov %cs, %esi
12634 + cmp $__KERNEL_CS, %esi
12635 + jz 3f
12636 + ljmp $__KERNEL_CS, $3f
12637 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12638 +2:
12639 +#ifdef CONFIG_PARAVIRT
12640 + mov %esi, %eax
12641 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12642 +#else
12643 + mov %esi, %cr0
12644 +#endif
12645 +3:
12646 +#ifdef CONFIG_PARAVIRT
12647 + popl %ecx
12648 + popl %eax
12649 +#endif
12650 + ret
12651 +ENDPROC(pax_enter_kernel)
12652 +
12653 +ENTRY(pax_exit_kernel)
12654 +#ifdef CONFIG_PARAVIRT
12655 + pushl %eax
12656 + pushl %ecx
12657 +#endif
12658 + mov %cs, %esi
12659 + cmp $__KERNEXEC_KERNEL_CS, %esi
12660 + jnz 2f
12661 +#ifdef CONFIG_PARAVIRT
12662 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12663 + mov %eax, %esi
12664 +#else
12665 + mov %cr0, %esi
12666 +#endif
12667 + btr $16, %esi
12668 + ljmp $__KERNEL_CS, $1f
12669 +1:
12670 +#ifdef CONFIG_PARAVIRT
12671 + mov %esi, %eax
12672 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12673 +#else
12674 + mov %esi, %cr0
12675 +#endif
12676 +2:
12677 +#ifdef CONFIG_PARAVIRT
12678 + popl %ecx
12679 + popl %eax
12680 +#endif
12681 + ret
12682 +ENDPROC(pax_exit_kernel)
12683 +#endif
12684 +
12685 +.macro pax_erase_kstack
12686 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12687 + call pax_erase_kstack
12688 +#endif
12689 +.endm
12690 +
12691 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12692 +/*
12693 + * ebp: thread_info
12694 + * ecx, edx: can be clobbered
12695 + */
12696 +ENTRY(pax_erase_kstack)
12697 + pushl %edi
12698 + pushl %eax
12699 +
12700 + mov TI_lowest_stack(%ebp), %edi
12701 + mov $-0xBEEF, %eax
12702 + std
12703 +
12704 +1: mov %edi, %ecx
12705 + and $THREAD_SIZE_asm - 1, %ecx
12706 + shr $2, %ecx
12707 + repne scasl
12708 + jecxz 2f
12709 +
12710 + cmp $2*16, %ecx
12711 + jc 2f
12712 +
12713 + mov $2*16, %ecx
12714 + repe scasl
12715 + jecxz 2f
12716 + jne 1b
12717 +
12718 +2: cld
12719 + mov %esp, %ecx
12720 + sub %edi, %ecx
12721 + shr $2, %ecx
12722 + rep stosl
12723 +
12724 + mov TI_task_thread_sp0(%ebp), %edi
12725 + sub $128, %edi
12726 + mov %edi, TI_lowest_stack(%ebp)
12727 +
12728 + popl %eax
12729 + popl %edi
12730 + ret
12731 +ENDPROC(pax_erase_kstack)
12732 +#endif
12733 +
12734 +.macro __SAVE_ALL _DS
12735 cld
12736 PUSH_GS
12737 pushl_cfi %fs
12738 @@ -215,7 +348,7 @@
12739 CFI_REL_OFFSET ecx, 0
12740 pushl_cfi %ebx
12741 CFI_REL_OFFSET ebx, 0
12742 - movl $(__USER_DS), %edx
12743 + movl $\_DS, %edx
12744 movl %edx, %ds
12745 movl %edx, %es
12746 movl $(__KERNEL_PERCPU), %edx
12747 @@ -223,6 +356,15 @@
12748 SET_KERNEL_GS %edx
12749 .endm
12750
12751 +.macro SAVE_ALL
12752 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
12753 + __SAVE_ALL __KERNEL_DS
12754 + pax_enter_kernel
12755 +#else
12756 + __SAVE_ALL __USER_DS
12757 +#endif
12758 +.endm
12759 +
12760 .macro RESTORE_INT_REGS
12761 popl_cfi %ebx
12762 CFI_RESTORE ebx
12763 @@ -308,7 +450,7 @@ ENTRY(ret_from_fork)
12764 popfl_cfi
12765 jmp syscall_exit
12766 CFI_ENDPROC
12767 -END(ret_from_fork)
12768 +ENDPROC(ret_from_fork)
12769
12770 /*
12771 * Interrupt exit functions should be protected against kprobes
12772 @@ -333,7 +475,15 @@ check_userspace:
12773 movb PT_CS(%esp), %al
12774 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
12775 cmpl $USER_RPL, %eax
12776 +
12777 +#ifdef CONFIG_PAX_KERNEXEC
12778 + jae resume_userspace
12779 +
12780 + PAX_EXIT_KERNEL
12781 + jmp resume_kernel
12782 +#else
12783 jb resume_kernel # not returning to v8086 or userspace
12784 +#endif
12785
12786 ENTRY(resume_userspace)
12787 LOCKDEP_SYS_EXIT
12788 @@ -345,8 +495,8 @@ ENTRY(resume_userspace)
12789 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
12790 # int/exception return?
12791 jne work_pending
12792 - jmp restore_all
12793 -END(ret_from_exception)
12794 + jmp restore_all_pax
12795 +ENDPROC(ret_from_exception)
12796
12797 #ifdef CONFIG_PREEMPT
12798 ENTRY(resume_kernel)
12799 @@ -361,7 +511,7 @@ need_resched:
12800 jz restore_all
12801 call preempt_schedule_irq
12802 jmp need_resched
12803 -END(resume_kernel)
12804 +ENDPROC(resume_kernel)
12805 #endif
12806 CFI_ENDPROC
12807 /*
12808 @@ -395,23 +545,34 @@ sysenter_past_esp:
12809 /*CFI_REL_OFFSET cs, 0*/
12810 /*
12811 * Push current_thread_info()->sysenter_return to the stack.
12812 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
12813 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
12814 */
12815 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
12816 + pushl_cfi $0
12817 CFI_REL_OFFSET eip, 0
12818
12819 pushl_cfi %eax
12820 SAVE_ALL
12821 + GET_THREAD_INFO(%ebp)
12822 + movl TI_sysenter_return(%ebp),%ebp
12823 + movl %ebp,PT_EIP(%esp)
12824 ENABLE_INTERRUPTS(CLBR_NONE)
12825
12826 /*
12827 * Load the potential sixth argument from user stack.
12828 * Careful about security.
12829 */
12830 + movl PT_OLDESP(%esp),%ebp
12831 +
12832 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12833 + mov PT_OLDSS(%esp),%ds
12834 +1: movl %ds:(%ebp),%ebp
12835 + push %ss
12836 + pop %ds
12837 +#else
12838 cmpl $__PAGE_OFFSET-3,%ebp
12839 jae syscall_fault
12840 1: movl (%ebp),%ebp
12841 +#endif
12842 +
12843 movl %ebp,PT_EBP(%esp)
12844 .section __ex_table,"a"
12845 .align 4
12846 @@ -434,12 +595,24 @@ sysenter_do_call:
12847 testl $_TIF_ALLWORK_MASK, %ecx
12848 jne sysexit_audit
12849 sysenter_exit:
12850 +
12851 +#ifdef CONFIG_PAX_RANDKSTACK
12852 + pushl_cfi %eax
12853 + movl %esp, %eax
12854 + call pax_randomize_kstack
12855 + popl_cfi %eax
12856 +#endif
12857 +
12858 + pax_erase_kstack
12859 +
12860 /* if something modifies registers it must also disable sysexit */
12861 movl PT_EIP(%esp), %edx
12862 movl PT_OLDESP(%esp), %ecx
12863 xorl %ebp,%ebp
12864 TRACE_IRQS_ON
12865 1: mov PT_FS(%esp), %fs
12866 +2: mov PT_DS(%esp), %ds
12867 +3: mov PT_ES(%esp), %es
12868 PTGS_TO_GS
12869 ENABLE_INTERRUPTS_SYSEXIT
12870
12871 @@ -456,6 +629,9 @@ sysenter_audit:
12872 movl %eax,%edx /* 2nd arg: syscall number */
12873 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
12874 call audit_syscall_entry
12875 +
12876 + pax_erase_kstack
12877 +
12878 pushl_cfi %ebx
12879 movl PT_EAX(%esp),%eax /* reload syscall number */
12880 jmp sysenter_do_call
12881 @@ -482,11 +658,17 @@ sysexit_audit:
12882
12883 CFI_ENDPROC
12884 .pushsection .fixup,"ax"
12885 -2: movl $0,PT_FS(%esp)
12886 +4: movl $0,PT_FS(%esp)
12887 + jmp 1b
12888 +5: movl $0,PT_DS(%esp)
12889 + jmp 1b
12890 +6: movl $0,PT_ES(%esp)
12891 jmp 1b
12892 .section __ex_table,"a"
12893 .align 4
12894 - .long 1b,2b
12895 + .long 1b,4b
12896 + .long 2b,5b
12897 + .long 3b,6b
12898 .popsection
12899 PTGS_TO_GS_EX
12900 ENDPROC(ia32_sysenter_target)
12901 @@ -519,6 +701,15 @@ syscall_exit:
12902 testl $_TIF_ALLWORK_MASK, %ecx # current->work
12903 jne syscall_exit_work
12904
12905 +restore_all_pax:
12906 +
12907 +#ifdef CONFIG_PAX_RANDKSTACK
12908 + movl %esp, %eax
12909 + call pax_randomize_kstack
12910 +#endif
12911 +
12912 + pax_erase_kstack
12913 +
12914 restore_all:
12915 TRACE_IRQS_IRET
12916 restore_all_notrace:
12917 @@ -578,14 +769,34 @@ ldt_ss:
12918 * compensating for the offset by changing to the ESPFIX segment with
12919 * a base address that matches for the difference.
12920 */
12921 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
12922 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
12923 mov %esp, %edx /* load kernel esp */
12924 mov PT_OLDESP(%esp), %eax /* load userspace esp */
12925 mov %dx, %ax /* eax: new kernel esp */
12926 sub %eax, %edx /* offset (low word is 0) */
12927 +#ifdef CONFIG_SMP
12928 + movl PER_CPU_VAR(cpu_number), %ebx
12929 + shll $PAGE_SHIFT_asm, %ebx
12930 + addl $cpu_gdt_table, %ebx
12931 +#else
12932 + movl $cpu_gdt_table, %ebx
12933 +#endif
12934 shr $16, %edx
12935 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
12936 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
12937 +
12938 +#ifdef CONFIG_PAX_KERNEXEC
12939 + mov %cr0, %esi
12940 + btr $16, %esi
12941 + mov %esi, %cr0
12942 +#endif
12943 +
12944 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
12945 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
12946 +
12947 +#ifdef CONFIG_PAX_KERNEXEC
12948 + bts $16, %esi
12949 + mov %esi, %cr0
12950 +#endif
12951 +
12952 pushl_cfi $__ESPFIX_SS
12953 pushl_cfi %eax /* new kernel esp */
12954 /* Disable interrupts, but do not irqtrace this section: we
12955 @@ -614,34 +825,28 @@ work_resched:
12956 movl TI_flags(%ebp), %ecx
12957 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
12958 # than syscall tracing?
12959 - jz restore_all
12960 + jz restore_all_pax
12961 testb $_TIF_NEED_RESCHED, %cl
12962 jnz work_resched
12963
12964 work_notifysig: # deal with pending signals and
12965 # notify-resume requests
12966 + movl %esp, %eax
12967 #ifdef CONFIG_VM86
12968 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
12969 - movl %esp, %eax
12970 - jne work_notifysig_v86 # returning to kernel-space or
12971 + jz 1f # returning to kernel-space or
12972 # vm86-space
12973 - xorl %edx, %edx
12974 - call do_notify_resume
12975 - jmp resume_userspace_sig
12976
12977 - ALIGN
12978 -work_notifysig_v86:
12979 pushl_cfi %ecx # save ti_flags for do_notify_resume
12980 call save_v86_state # %eax contains pt_regs pointer
12981 popl_cfi %ecx
12982 movl %eax, %esp
12983 -#else
12984 - movl %esp, %eax
12985 +1:
12986 #endif
12987 xorl %edx, %edx
12988 call do_notify_resume
12989 jmp resume_userspace_sig
12990 -END(work_pending)
12991 +ENDPROC(work_pending)
12992
12993 # perform syscall exit tracing
12994 ALIGN
12995 @@ -649,11 +854,14 @@ syscall_trace_entry:
12996 movl $-ENOSYS,PT_EAX(%esp)
12997 movl %esp, %eax
12998 call syscall_trace_enter
12999 +
13000 + pax_erase_kstack
13001 +
13002 /* What it returned is what we'll actually use. */
13003 cmpl $(nr_syscalls), %eax
13004 jnae syscall_call
13005 jmp syscall_exit
13006 -END(syscall_trace_entry)
13007 +ENDPROC(syscall_trace_entry)
13008
13009 # perform syscall exit tracing
13010 ALIGN
13011 @@ -666,20 +874,24 @@ syscall_exit_work:
13012 movl %esp, %eax
13013 call syscall_trace_leave
13014 jmp resume_userspace
13015 -END(syscall_exit_work)
13016 +ENDPROC(syscall_exit_work)
13017 CFI_ENDPROC
13018
13019 RING0_INT_FRAME # can't unwind into user space anyway
13020 syscall_fault:
13021 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13022 + push %ss
13023 + pop %ds
13024 +#endif
13025 GET_THREAD_INFO(%ebp)
13026 movl $-EFAULT,PT_EAX(%esp)
13027 jmp resume_userspace
13028 -END(syscall_fault)
13029 +ENDPROC(syscall_fault)
13030
13031 syscall_badsys:
13032 movl $-ENOSYS,PT_EAX(%esp)
13033 jmp resume_userspace
13034 -END(syscall_badsys)
13035 +ENDPROC(syscall_badsys)
13036 CFI_ENDPROC
13037 /*
13038 * End of kprobes section
13039 @@ -753,6 +965,36 @@ ptregs_clone:
13040 CFI_ENDPROC
13041 ENDPROC(ptregs_clone)
13042
13043 + ALIGN;
13044 +ENTRY(kernel_execve)
13045 + CFI_STARTPROC
13046 + pushl_cfi %ebp
13047 + sub $PT_OLDSS+4,%esp
13048 + pushl_cfi %edi
13049 + pushl_cfi %ecx
13050 + pushl_cfi %eax
13051 + lea 3*4(%esp),%edi
13052 + mov $PT_OLDSS/4+1,%ecx
13053 + xorl %eax,%eax
13054 + rep stosl
13055 + popl_cfi %eax
13056 + popl_cfi %ecx
13057 + popl_cfi %edi
13058 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13059 + pushl_cfi %esp
13060 + call sys_execve
13061 + add $4,%esp
13062 + CFI_ADJUST_CFA_OFFSET -4
13063 + GET_THREAD_INFO(%ebp)
13064 + test %eax,%eax
13065 + jz syscall_exit
13066 + add $PT_OLDSS+4,%esp
13067 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
13068 + popl_cfi %ebp
13069 + ret
13070 + CFI_ENDPROC
13071 +ENDPROC(kernel_execve)
13072 +
13073 .macro FIXUP_ESPFIX_STACK
13074 /*
13075 * Switch back for ESPFIX stack to the normal zerobased stack
13076 @@ -762,8 +1004,15 @@ ENDPROC(ptregs_clone)
13077 * normal stack and adjusts ESP with the matching offset.
13078 */
13079 /* fixup the stack */
13080 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
13081 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
13082 +#ifdef CONFIG_SMP
13083 + movl PER_CPU_VAR(cpu_number), %ebx
13084 + shll $PAGE_SHIFT_asm, %ebx
13085 + addl $cpu_gdt_table, %ebx
13086 +#else
13087 + movl $cpu_gdt_table, %ebx
13088 +#endif
13089 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
13090 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
13091 shl $16, %eax
13092 addl %esp, %eax /* the adjusted stack pointer */
13093 pushl_cfi $__KERNEL_DS
13094 @@ -816,7 +1065,7 @@ vector=vector+1
13095 .endr
13096 2: jmp common_interrupt
13097 .endr
13098 -END(irq_entries_start)
13099 +ENDPROC(irq_entries_start)
13100
13101 .previous
13102 END(interrupt)
13103 @@ -864,7 +1113,7 @@ ENTRY(coprocessor_error)
13104 pushl_cfi $do_coprocessor_error
13105 jmp error_code
13106 CFI_ENDPROC
13107 -END(coprocessor_error)
13108 +ENDPROC(coprocessor_error)
13109
13110 ENTRY(simd_coprocessor_error)
13111 RING0_INT_FRAME
13112 @@ -885,7 +1134,7 @@ ENTRY(simd_coprocessor_error)
13113 #endif
13114 jmp error_code
13115 CFI_ENDPROC
13116 -END(simd_coprocessor_error)
13117 +ENDPROC(simd_coprocessor_error)
13118
13119 ENTRY(device_not_available)
13120 RING0_INT_FRAME
13121 @@ -893,7 +1142,7 @@ ENTRY(device_not_available)
13122 pushl_cfi $do_device_not_available
13123 jmp error_code
13124 CFI_ENDPROC
13125 -END(device_not_available)
13126 +ENDPROC(device_not_available)
13127
13128 #ifdef CONFIG_PARAVIRT
13129 ENTRY(native_iret)
13130 @@ -902,12 +1151,12 @@ ENTRY(native_iret)
13131 .align 4
13132 .long native_iret, iret_exc
13133 .previous
13134 -END(native_iret)
13135 +ENDPROC(native_iret)
13136
13137 ENTRY(native_irq_enable_sysexit)
13138 sti
13139 sysexit
13140 -END(native_irq_enable_sysexit)
13141 +ENDPROC(native_irq_enable_sysexit)
13142 #endif
13143
13144 ENTRY(overflow)
13145 @@ -916,7 +1165,7 @@ ENTRY(overflow)
13146 pushl_cfi $do_overflow
13147 jmp error_code
13148 CFI_ENDPROC
13149 -END(overflow)
13150 +ENDPROC(overflow)
13151
13152 ENTRY(bounds)
13153 RING0_INT_FRAME
13154 @@ -924,7 +1173,7 @@ ENTRY(bounds)
13155 pushl_cfi $do_bounds
13156 jmp error_code
13157 CFI_ENDPROC
13158 -END(bounds)
13159 +ENDPROC(bounds)
13160
13161 ENTRY(invalid_op)
13162 RING0_INT_FRAME
13163 @@ -932,7 +1181,7 @@ ENTRY(invalid_op)
13164 pushl_cfi $do_invalid_op
13165 jmp error_code
13166 CFI_ENDPROC
13167 -END(invalid_op)
13168 +ENDPROC(invalid_op)
13169
13170 ENTRY(coprocessor_segment_overrun)
13171 RING0_INT_FRAME
13172 @@ -940,35 +1189,35 @@ ENTRY(coprocessor_segment_overrun)
13173 pushl_cfi $do_coprocessor_segment_overrun
13174 jmp error_code
13175 CFI_ENDPROC
13176 -END(coprocessor_segment_overrun)
13177 +ENDPROC(coprocessor_segment_overrun)
13178
13179 ENTRY(invalid_TSS)
13180 RING0_EC_FRAME
13181 pushl_cfi $do_invalid_TSS
13182 jmp error_code
13183 CFI_ENDPROC
13184 -END(invalid_TSS)
13185 +ENDPROC(invalid_TSS)
13186
13187 ENTRY(segment_not_present)
13188 RING0_EC_FRAME
13189 pushl_cfi $do_segment_not_present
13190 jmp error_code
13191 CFI_ENDPROC
13192 -END(segment_not_present)
13193 +ENDPROC(segment_not_present)
13194
13195 ENTRY(stack_segment)
13196 RING0_EC_FRAME
13197 pushl_cfi $do_stack_segment
13198 jmp error_code
13199 CFI_ENDPROC
13200 -END(stack_segment)
13201 +ENDPROC(stack_segment)
13202
13203 ENTRY(alignment_check)
13204 RING0_EC_FRAME
13205 pushl_cfi $do_alignment_check
13206 jmp error_code
13207 CFI_ENDPROC
13208 -END(alignment_check)
13209 +ENDPROC(alignment_check)
13210
13211 ENTRY(divide_error)
13212 RING0_INT_FRAME
13213 @@ -976,7 +1225,7 @@ ENTRY(divide_error)
13214 pushl_cfi $do_divide_error
13215 jmp error_code
13216 CFI_ENDPROC
13217 -END(divide_error)
13218 +ENDPROC(divide_error)
13219
13220 #ifdef CONFIG_X86_MCE
13221 ENTRY(machine_check)
13222 @@ -985,7 +1234,7 @@ ENTRY(machine_check)
13223 pushl_cfi machine_check_vector
13224 jmp error_code
13225 CFI_ENDPROC
13226 -END(machine_check)
13227 +ENDPROC(machine_check)
13228 #endif
13229
13230 ENTRY(spurious_interrupt_bug)
13231 @@ -994,7 +1243,7 @@ ENTRY(spurious_interrupt_bug)
13232 pushl_cfi $do_spurious_interrupt_bug
13233 jmp error_code
13234 CFI_ENDPROC
13235 -END(spurious_interrupt_bug)
13236 +ENDPROC(spurious_interrupt_bug)
13237 /*
13238 * End of kprobes section
13239 */
13240 @@ -1109,7 +1358,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
13241
13242 ENTRY(mcount)
13243 ret
13244 -END(mcount)
13245 +ENDPROC(mcount)
13246
13247 ENTRY(ftrace_caller)
13248 cmpl $0, function_trace_stop
13249 @@ -1138,7 +1387,7 @@ ftrace_graph_call:
13250 .globl ftrace_stub
13251 ftrace_stub:
13252 ret
13253 -END(ftrace_caller)
13254 +ENDPROC(ftrace_caller)
13255
13256 #else /* ! CONFIG_DYNAMIC_FTRACE */
13257
13258 @@ -1174,7 +1423,7 @@ trace:
13259 popl %ecx
13260 popl %eax
13261 jmp ftrace_stub
13262 -END(mcount)
13263 +ENDPROC(mcount)
13264 #endif /* CONFIG_DYNAMIC_FTRACE */
13265 #endif /* CONFIG_FUNCTION_TRACER */
13266
13267 @@ -1195,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
13268 popl %ecx
13269 popl %eax
13270 ret
13271 -END(ftrace_graph_caller)
13272 +ENDPROC(ftrace_graph_caller)
13273
13274 .globl return_to_handler
13275 return_to_handler:
13276 @@ -1209,7 +1458,6 @@ return_to_handler:
13277 jmp *%ecx
13278 #endif
13279
13280 -.section .rodata,"a"
13281 #include "syscall_table_32.S"
13282
13283 syscall_table_size=(.-sys_call_table)
13284 @@ -1255,15 +1503,18 @@ error_code:
13285 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13286 REG_TO_PTGS %ecx
13287 SET_KERNEL_GS %ecx
13288 - movl $(__USER_DS), %ecx
13289 + movl $(__KERNEL_DS), %ecx
13290 movl %ecx, %ds
13291 movl %ecx, %es
13292 +
13293 + pax_enter_kernel
13294 +
13295 TRACE_IRQS_OFF
13296 movl %esp,%eax # pt_regs pointer
13297 call *%edi
13298 jmp ret_from_exception
13299 CFI_ENDPROC
13300 -END(page_fault)
13301 +ENDPROC(page_fault)
13302
13303 /*
13304 * Debug traps and NMI can happen at the one SYSENTER instruction
13305 @@ -1305,7 +1556,7 @@ debug_stack_correct:
13306 call do_debug
13307 jmp ret_from_exception
13308 CFI_ENDPROC
13309 -END(debug)
13310 +ENDPROC(debug)
13311
13312 /*
13313 * NMI is doubly nasty. It can happen _while_ we're handling
13314 @@ -1342,6 +1593,9 @@ nmi_stack_correct:
13315 xorl %edx,%edx # zero error code
13316 movl %esp,%eax # pt_regs pointer
13317 call do_nmi
13318 +
13319 + pax_exit_kernel
13320 +
13321 jmp restore_all_notrace
13322 CFI_ENDPROC
13323
13324 @@ -1378,12 +1632,15 @@ nmi_espfix_stack:
13325 FIXUP_ESPFIX_STACK # %eax == %esp
13326 xorl %edx,%edx # zero error code
13327 call do_nmi
13328 +
13329 + pax_exit_kernel
13330 +
13331 RESTORE_REGS
13332 lss 12+4(%esp), %esp # back to espfix stack
13333 CFI_ADJUST_CFA_OFFSET -24
13334 jmp irq_return
13335 CFI_ENDPROC
13336 -END(nmi)
13337 +ENDPROC(nmi)
13338
13339 ENTRY(int3)
13340 RING0_INT_FRAME
13341 @@ -1395,14 +1652,14 @@ ENTRY(int3)
13342 call do_int3
13343 jmp ret_from_exception
13344 CFI_ENDPROC
13345 -END(int3)
13346 +ENDPROC(int3)
13347
13348 ENTRY(general_protection)
13349 RING0_EC_FRAME
13350 pushl_cfi $do_general_protection
13351 jmp error_code
13352 CFI_ENDPROC
13353 -END(general_protection)
13354 +ENDPROC(general_protection)
13355
13356 #ifdef CONFIG_KVM_GUEST
13357 ENTRY(async_page_fault)
13358 @@ -1410,7 +1667,7 @@ ENTRY(async_page_fault)
13359 pushl_cfi $do_async_page_fault
13360 jmp error_code
13361 CFI_ENDPROC
13362 -END(async_page_fault)
13363 +ENDPROC(async_page_fault)
13364 #endif
13365
13366 /*
13367 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
13368 index 6419bb0..4f4cf2b 100644
13369 --- a/arch/x86/kernel/entry_64.S
13370 +++ b/arch/x86/kernel/entry_64.S
13371 @@ -55,6 +55,8 @@
13372 #include <asm/paravirt.h>
13373 #include <asm/ftrace.h>
13374 #include <asm/percpu.h>
13375 +#include <asm/pgtable.h>
13376 +#include <asm/alternative-asm.h>
13377
13378 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13379 #include <linux/elf-em.h>
13380 @@ -68,8 +70,9 @@
13381 #ifdef CONFIG_FUNCTION_TRACER
13382 #ifdef CONFIG_DYNAMIC_FTRACE
13383 ENTRY(mcount)
13384 + pax_force_retaddr
13385 retq
13386 -END(mcount)
13387 +ENDPROC(mcount)
13388
13389 ENTRY(ftrace_caller)
13390 cmpl $0, function_trace_stop
13391 @@ -92,8 +95,9 @@ GLOBAL(ftrace_graph_call)
13392 #endif
13393
13394 GLOBAL(ftrace_stub)
13395 + pax_force_retaddr
13396 retq
13397 -END(ftrace_caller)
13398 +ENDPROC(ftrace_caller)
13399
13400 #else /* ! CONFIG_DYNAMIC_FTRACE */
13401 ENTRY(mcount)
13402 @@ -112,6 +116,7 @@ ENTRY(mcount)
13403 #endif
13404
13405 GLOBAL(ftrace_stub)
13406 + pax_force_retaddr
13407 retq
13408
13409 trace:
13410 @@ -121,12 +126,13 @@ trace:
13411 movq 8(%rbp), %rsi
13412 subq $MCOUNT_INSN_SIZE, %rdi
13413
13414 + pax_force_fptr ftrace_trace_function
13415 call *ftrace_trace_function
13416
13417 MCOUNT_RESTORE_FRAME
13418
13419 jmp ftrace_stub
13420 -END(mcount)
13421 +ENDPROC(mcount)
13422 #endif /* CONFIG_DYNAMIC_FTRACE */
13423 #endif /* CONFIG_FUNCTION_TRACER */
13424
13425 @@ -146,8 +152,9 @@ ENTRY(ftrace_graph_caller)
13426
13427 MCOUNT_RESTORE_FRAME
13428
13429 + pax_force_retaddr
13430 retq
13431 -END(ftrace_graph_caller)
13432 +ENDPROC(ftrace_graph_caller)
13433
13434 GLOBAL(return_to_handler)
13435 subq $24, %rsp
13436 @@ -163,6 +170,7 @@ GLOBAL(return_to_handler)
13437 movq 8(%rsp), %rdx
13438 movq (%rsp), %rax
13439 addq $24, %rsp
13440 + pax_force_fptr %rdi
13441 jmp *%rdi
13442 #endif
13443
13444 @@ -178,6 +186,282 @@ ENTRY(native_usergs_sysret64)
13445 ENDPROC(native_usergs_sysret64)
13446 #endif /* CONFIG_PARAVIRT */
13447
13448 + .macro ljmpq sel, off
13449 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13450 + .byte 0x48; ljmp *1234f(%rip)
13451 + .pushsection .rodata
13452 + .align 16
13453 + 1234: .quad \off; .word \sel
13454 + .popsection
13455 +#else
13456 + pushq $\sel
13457 + pushq $\off
13458 + lretq
13459 +#endif
13460 + .endm
13461 +
13462 + .macro pax_enter_kernel
13463 + pax_set_fptr_mask
13464 +#ifdef CONFIG_PAX_KERNEXEC
13465 + call pax_enter_kernel
13466 +#endif
13467 + .endm
13468 +
13469 + .macro pax_exit_kernel
13470 +#ifdef CONFIG_PAX_KERNEXEC
13471 + call pax_exit_kernel
13472 +#endif
13473 + .endm
13474 +
13475 +#ifdef CONFIG_PAX_KERNEXEC
13476 +ENTRY(pax_enter_kernel)
13477 + pushq %rdi
13478 +
13479 +#ifdef CONFIG_PARAVIRT
13480 + PV_SAVE_REGS(CLBR_RDI)
13481 +#endif
13482 +
13483 + GET_CR0_INTO_RDI
13484 + bts $16,%rdi
13485 + jnc 3f
13486 + mov %cs,%edi
13487 + cmp $__KERNEL_CS,%edi
13488 + jnz 2f
13489 +1:
13490 +
13491 +#ifdef CONFIG_PARAVIRT
13492 + PV_RESTORE_REGS(CLBR_RDI)
13493 +#endif
13494 +
13495 + popq %rdi
13496 + pax_force_retaddr
13497 + retq
13498 +
13499 +2: ljmpq __KERNEL_CS,1f
13500 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
13501 +4: SET_RDI_INTO_CR0
13502 + jmp 1b
13503 +ENDPROC(pax_enter_kernel)
13504 +
13505 +ENTRY(pax_exit_kernel)
13506 + pushq %rdi
13507 +
13508 +#ifdef CONFIG_PARAVIRT
13509 + PV_SAVE_REGS(CLBR_RDI)
13510 +#endif
13511 +
13512 + mov %cs,%rdi
13513 + cmp $__KERNEXEC_KERNEL_CS,%edi
13514 + jz 2f
13515 +1:
13516 +
13517 +#ifdef CONFIG_PARAVIRT
13518 + PV_RESTORE_REGS(CLBR_RDI);
13519 +#endif
13520 +
13521 + popq %rdi
13522 + pax_force_retaddr
13523 + retq
13524 +
13525 +2: GET_CR0_INTO_RDI
13526 + btr $16,%rdi
13527 + ljmpq __KERNEL_CS,3f
13528 +3: SET_RDI_INTO_CR0
13529 + jmp 1b
13530 +#ifdef CONFIG_PARAVIRT
13531 + PV_RESTORE_REGS(CLBR_RDI);
13532 +#endif
13533 +
13534 + popq %rdi
13535 + pax_force_retaddr
13536 + retq
13537 +ENDPROC(pax_exit_kernel)
13538 +#endif
13539 +
13540 + .macro pax_enter_kernel_user
13541 + pax_set_fptr_mask
13542 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13543 + call pax_enter_kernel_user
13544 +#endif
13545 + .endm
13546 +
13547 + .macro pax_exit_kernel_user
13548 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13549 + call pax_exit_kernel_user
13550 +#endif
13551 +#ifdef CONFIG_PAX_RANDKSTACK
13552 + push %rax
13553 + call pax_randomize_kstack
13554 + pop %rax
13555 +#endif
13556 + .endm
13557 +
13558 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13559 +ENTRY(pax_enter_kernel_user)
13560 + pushq %rdi
13561 + pushq %rbx
13562 +
13563 +#ifdef CONFIG_PARAVIRT
13564 + PV_SAVE_REGS(CLBR_RDI)
13565 +#endif
13566 +
13567 + GET_CR3_INTO_RDI
13568 + mov %rdi,%rbx
13569 + add $__START_KERNEL_map,%rbx
13570 + sub phys_base(%rip),%rbx
13571 +
13572 +#ifdef CONFIG_PARAVIRT
13573 + pushq %rdi
13574 + cmpl $0, pv_info+PARAVIRT_enabled
13575 + jz 1f
13576 + i = 0
13577 + .rept USER_PGD_PTRS
13578 + mov i*8(%rbx),%rsi
13579 + mov $0,%sil
13580 + lea i*8(%rbx),%rdi
13581 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13582 + i = i + 1
13583 + .endr
13584 + jmp 2f
13585 +1:
13586 +#endif
13587 +
13588 + i = 0
13589 + .rept USER_PGD_PTRS
13590 + movb $0,i*8(%rbx)
13591 + i = i + 1
13592 + .endr
13593 +
13594 +#ifdef CONFIG_PARAVIRT
13595 +2: popq %rdi
13596 +#endif
13597 + SET_RDI_INTO_CR3
13598 +
13599 +#ifdef CONFIG_PAX_KERNEXEC
13600 + GET_CR0_INTO_RDI
13601 + bts $16,%rdi
13602 + SET_RDI_INTO_CR0
13603 +#endif
13604 +
13605 +#ifdef CONFIG_PARAVIRT
13606 + PV_RESTORE_REGS(CLBR_RDI)
13607 +#endif
13608 +
13609 + popq %rbx
13610 + popq %rdi
13611 + pax_force_retaddr
13612 + retq
13613 +ENDPROC(pax_enter_kernel_user)
13614 +
13615 +ENTRY(pax_exit_kernel_user)
13616 + push %rdi
13617 +
13618 +#ifdef CONFIG_PARAVIRT
13619 + pushq %rbx
13620 + PV_SAVE_REGS(CLBR_RDI)
13621 +#endif
13622 +
13623 +#ifdef CONFIG_PAX_KERNEXEC
13624 + GET_CR0_INTO_RDI
13625 + btr $16,%rdi
13626 + SET_RDI_INTO_CR0
13627 +#endif
13628 +
13629 + GET_CR3_INTO_RDI
13630 + add $__START_KERNEL_map,%rdi
13631 + sub phys_base(%rip),%rdi
13632 +
13633 +#ifdef CONFIG_PARAVIRT
13634 + cmpl $0, pv_info+PARAVIRT_enabled
13635 + jz 1f
13636 + mov %rdi,%rbx
13637 + i = 0
13638 + .rept USER_PGD_PTRS
13639 + mov i*8(%rbx),%rsi
13640 + mov $0x67,%sil
13641 + lea i*8(%rbx),%rdi
13642 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
13643 + i = i + 1
13644 + .endr
13645 + jmp 2f
13646 +1:
13647 +#endif
13648 +
13649 + i = 0
13650 + .rept USER_PGD_PTRS
13651 + movb $0x67,i*8(%rdi)
13652 + i = i + 1
13653 + .endr
13654 +
13655 +#ifdef CONFIG_PARAVIRT
13656 +2: PV_RESTORE_REGS(CLBR_RDI)
13657 + popq %rbx
13658 +#endif
13659 +
13660 + popq %rdi
13661 + pax_force_retaddr
13662 + retq
13663 +ENDPROC(pax_exit_kernel_user)
13664 +#endif
13665 +
13666 +.macro pax_erase_kstack
13667 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13668 + call pax_erase_kstack
13669 +#endif
13670 +.endm
13671 +
13672 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13673 +/*
13674 + * r11: thread_info
13675 + * rcx, rdx: can be clobbered
13676 + */
13677 +ENTRY(pax_erase_kstack)
13678 + pushq %rdi
13679 + pushq %rax
13680 + pushq %r11
13681 +
13682 + GET_THREAD_INFO(%r11)
13683 + mov TI_lowest_stack(%r11), %rdi
13684 + mov $-0xBEEF, %rax
13685 + std
13686 +
13687 +1: mov %edi, %ecx
13688 + and $THREAD_SIZE_asm - 1, %ecx
13689 + shr $3, %ecx
13690 + repne scasq
13691 + jecxz 2f
13692 +
13693 + cmp $2*8, %ecx
13694 + jc 2f
13695 +
13696 + mov $2*8, %ecx
13697 + repe scasq
13698 + jecxz 2f
13699 + jne 1b
13700 +
13701 +2: cld
13702 + mov %esp, %ecx
13703 + sub %edi, %ecx
13704 +
13705 + cmp $THREAD_SIZE_asm, %rcx
13706 + jb 3f
13707 + ud2
13708 +3:
13709 +
13710 + shr $3, %ecx
13711 + rep stosq
13712 +
13713 + mov TI_task_thread_sp0(%r11), %rdi
13714 + sub $256, %rdi
13715 + mov %rdi, TI_lowest_stack(%r11)
13716 +
13717 + popq %r11
13718 + popq %rax
13719 + popq %rdi
13720 + pax_force_retaddr
13721 + ret
13722 +ENDPROC(pax_erase_kstack)
13723 +#endif
13724
13725 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13726 #ifdef CONFIG_TRACE_IRQFLAGS
13727 @@ -319,7 +603,7 @@ ENDPROC(native_usergs_sysret64)
13728 movq %rsp, %rsi
13729
13730 leaq -RBP(%rsp),%rdi /* arg1 for handler */
13731 - testl $3, CS(%rdi)
13732 + testb $3, CS(%rdi)
13733 je 1f
13734 SWAPGS
13735 /*
13736 @@ -350,9 +634,10 @@ ENTRY(save_rest)
13737 movq_cfi r15, R15+16
13738 movq %r11, 8(%rsp) /* return address */
13739 FIXUP_TOP_OF_STACK %r11, 16
13740 + pax_force_retaddr
13741 ret
13742 CFI_ENDPROC
13743 -END(save_rest)
13744 +ENDPROC(save_rest)
13745
13746 /* save complete stack frame */
13747 .pushsection .kprobes.text, "ax"
13748 @@ -381,9 +666,10 @@ ENTRY(save_paranoid)
13749 js 1f /* negative -> in kernel */
13750 SWAPGS
13751 xorl %ebx,%ebx
13752 -1: ret
13753 +1: pax_force_retaddr_bts
13754 + ret
13755 CFI_ENDPROC
13756 -END(save_paranoid)
13757 +ENDPROC(save_paranoid)
13758 .popsection
13759
13760 /*
13761 @@ -405,7 +691,7 @@ ENTRY(ret_from_fork)
13762
13763 RESTORE_REST
13764
13765 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13766 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13767 je int_ret_from_sys_call
13768
13769 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13770 @@ -415,7 +701,7 @@ ENTRY(ret_from_fork)
13771 jmp ret_from_sys_call # go to the SYSRET fastpath
13772
13773 CFI_ENDPROC
13774 -END(ret_from_fork)
13775 +ENDPROC(ret_from_fork)
13776
13777 /*
13778 * System call entry. Up to 6 arguments in registers are supported.
13779 @@ -451,7 +737,7 @@ END(ret_from_fork)
13780 ENTRY(system_call)
13781 CFI_STARTPROC simple
13782 CFI_SIGNAL_FRAME
13783 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13784 + CFI_DEF_CFA rsp,0
13785 CFI_REGISTER rip,rcx
13786 /*CFI_REGISTER rflags,r11*/
13787 SWAPGS_UNSAFE_STACK
13788 @@ -464,12 +750,13 @@ ENTRY(system_call_after_swapgs)
13789
13790 movq %rsp,PER_CPU_VAR(old_rsp)
13791 movq PER_CPU_VAR(kernel_stack),%rsp
13792 + SAVE_ARGS 8*6,0
13793 + pax_enter_kernel_user
13794 /*
13795 * No need to follow this irqs off/on section - it's straight
13796 * and short:
13797 */
13798 ENABLE_INTERRUPTS(CLBR_NONE)
13799 - SAVE_ARGS 8,0
13800 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13801 movq %rcx,RIP-ARGOFFSET(%rsp)
13802 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13803 @@ -479,7 +766,7 @@ ENTRY(system_call_after_swapgs)
13804 system_call_fastpath:
13805 cmpq $__NR_syscall_max,%rax
13806 ja badsys
13807 - movq %r10,%rcx
13808 + movq R10-ARGOFFSET(%rsp),%rcx
13809 call *sys_call_table(,%rax,8) # XXX: rip relative
13810 movq %rax,RAX-ARGOFFSET(%rsp)
13811 /*
13812 @@ -498,6 +785,8 @@ sysret_check:
13813 andl %edi,%edx
13814 jnz sysret_careful
13815 CFI_REMEMBER_STATE
13816 + pax_exit_kernel_user
13817 + pax_erase_kstack
13818 /*
13819 * sysretq will re-enable interrupts:
13820 */
13821 @@ -549,14 +838,18 @@ badsys:
13822 * jump back to the normal fast path.
13823 */
13824 auditsys:
13825 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
13826 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
13827 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
13828 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
13829 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
13830 movq %rax,%rsi /* 2nd arg: syscall number */
13831 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13832 call audit_syscall_entry
13833 +
13834 + pax_erase_kstack
13835 +
13836 LOAD_ARGS 0 /* reload call-clobbered registers */
13837 + pax_set_fptr_mask
13838 jmp system_call_fastpath
13839
13840 /*
13841 @@ -586,16 +879,20 @@ tracesys:
13842 FIXUP_TOP_OF_STACK %rdi
13843 movq %rsp,%rdi
13844 call syscall_trace_enter
13845 +
13846 + pax_erase_kstack
13847 +
13848 /*
13849 * Reload arg registers from stack in case ptrace changed them.
13850 * We don't reload %rax because syscall_trace_enter() returned
13851 * the value it wants us to use in the table lookup.
13852 */
13853 LOAD_ARGS ARGOFFSET, 1
13854 + pax_set_fptr_mask
13855 RESTORE_REST
13856 cmpq $__NR_syscall_max,%rax
13857 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
13858 - movq %r10,%rcx /* fixup for C */
13859 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
13860 call *sys_call_table(,%rax,8)
13861 movq %rax,RAX-ARGOFFSET(%rsp)
13862 /* Use IRET because user could have changed frame */
13863 @@ -607,7 +904,7 @@ tracesys:
13864 GLOBAL(int_ret_from_sys_call)
13865 DISABLE_INTERRUPTS(CLBR_NONE)
13866 TRACE_IRQS_OFF
13867 - testl $3,CS-ARGOFFSET(%rsp)
13868 + testb $3,CS-ARGOFFSET(%rsp)
13869 je retint_restore_args
13870 movl $_TIF_ALLWORK_MASK,%edi
13871 /* edi: mask to check */
13872 @@ -664,7 +961,7 @@ int_restore_rest:
13873 TRACE_IRQS_OFF
13874 jmp int_with_check
13875 CFI_ENDPROC
13876 -END(system_call)
13877 +ENDPROC(system_call)
13878
13879 /*
13880 * Certain special system calls that need to save a complete full stack frame.
13881 @@ -680,7 +977,7 @@ ENTRY(\label)
13882 call \func
13883 jmp ptregscall_common
13884 CFI_ENDPROC
13885 -END(\label)
13886 +ENDPROC(\label)
13887 .endm
13888
13889 PTREGSCALL stub_clone, sys_clone, %r8
13890 @@ -698,9 +995,10 @@ ENTRY(ptregscall_common)
13891 movq_cfi_restore R12+8, r12
13892 movq_cfi_restore RBP+8, rbp
13893 movq_cfi_restore RBX+8, rbx
13894 + pax_force_retaddr
13895 ret $REST_SKIP /* pop extended registers */
13896 CFI_ENDPROC
13897 -END(ptregscall_common)
13898 +ENDPROC(ptregscall_common)
13899
13900 ENTRY(stub_execve)
13901 CFI_STARTPROC
13902 @@ -715,7 +1013,7 @@ ENTRY(stub_execve)
13903 RESTORE_REST
13904 jmp int_ret_from_sys_call
13905 CFI_ENDPROC
13906 -END(stub_execve)
13907 +ENDPROC(stub_execve)
13908
13909 /*
13910 * sigreturn is special because it needs to restore all registers on return.
13911 @@ -733,7 +1031,7 @@ ENTRY(stub_rt_sigreturn)
13912 RESTORE_REST
13913 jmp int_ret_from_sys_call
13914 CFI_ENDPROC
13915 -END(stub_rt_sigreturn)
13916 +ENDPROC(stub_rt_sigreturn)
13917
13918 /*
13919 * Build the entry stubs and pointer table with some assembler magic.
13920 @@ -768,7 +1066,7 @@ vector=vector+1
13921 2: jmp common_interrupt
13922 .endr
13923 CFI_ENDPROC
13924 -END(irq_entries_start)
13925 +ENDPROC(irq_entries_start)
13926
13927 .previous
13928 END(interrupt)
13929 @@ -789,6 +1087,16 @@ END(interrupt)
13930 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
13931 SAVE_ARGS_IRQ
13932 PARTIAL_FRAME 0
13933 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13934 + testb $3, CS(%rdi)
13935 + jnz 1f
13936 + pax_enter_kernel
13937 + jmp 2f
13938 +1: pax_enter_kernel_user
13939 +2:
13940 +#else
13941 + pax_enter_kernel
13942 +#endif
13943 call \func
13944 .endm
13945
13946 @@ -820,7 +1128,7 @@ ret_from_intr:
13947
13948 exit_intr:
13949 GET_THREAD_INFO(%rcx)
13950 - testl $3,CS-ARGOFFSET(%rsp)
13951 + testb $3,CS-ARGOFFSET(%rsp)
13952 je retint_kernel
13953
13954 /* Interrupt came from user space */
13955 @@ -842,12 +1150,16 @@ retint_swapgs: /* return to user-space */
13956 * The iretq could re-enable interrupts:
13957 */
13958 DISABLE_INTERRUPTS(CLBR_ANY)
13959 + pax_exit_kernel_user
13960 + pax_erase_kstack
13961 TRACE_IRQS_IRETQ
13962 SWAPGS
13963 jmp restore_args
13964
13965 retint_restore_args: /* return to kernel space */
13966 DISABLE_INTERRUPTS(CLBR_ANY)
13967 + pax_exit_kernel
13968 + pax_force_retaddr RIP-ARGOFFSET
13969 /*
13970 * The iretq could re-enable interrupts:
13971 */
13972 @@ -936,7 +1248,7 @@ ENTRY(retint_kernel)
13973 #endif
13974
13975 CFI_ENDPROC
13976 -END(common_interrupt)
13977 +ENDPROC(common_interrupt)
13978 /*
13979 * End of kprobes section
13980 */
13981 @@ -952,7 +1264,7 @@ ENTRY(\sym)
13982 interrupt \do_sym
13983 jmp ret_from_intr
13984 CFI_ENDPROC
13985 -END(\sym)
13986 +ENDPROC(\sym)
13987 .endm
13988
13989 #ifdef CONFIG_SMP
13990 @@ -1017,12 +1329,22 @@ ENTRY(\sym)
13991 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
13992 call error_entry
13993 DEFAULT_FRAME 0
13994 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13995 + testb $3, CS(%rsp)
13996 + jnz 1f
13997 + pax_enter_kernel
13998 + jmp 2f
13999 +1: pax_enter_kernel_user
14000 +2:
14001 +#else
14002 + pax_enter_kernel
14003 +#endif
14004 movq %rsp,%rdi /* pt_regs pointer */
14005 xorl %esi,%esi /* no error code */
14006 call \do_sym
14007 jmp error_exit /* %ebx: no swapgs flag */
14008 CFI_ENDPROC
14009 -END(\sym)
14010 +ENDPROC(\sym)
14011 .endm
14012
14013 .macro paranoidzeroentry sym do_sym
14014 @@ -1034,15 +1356,25 @@ ENTRY(\sym)
14015 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14016 call save_paranoid
14017 TRACE_IRQS_OFF
14018 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14019 + testb $3, CS(%rsp)
14020 + jnz 1f
14021 + pax_enter_kernel
14022 + jmp 2f
14023 +1: pax_enter_kernel_user
14024 +2:
14025 +#else
14026 + pax_enter_kernel
14027 +#endif
14028 movq %rsp,%rdi /* pt_regs pointer */
14029 xorl %esi,%esi /* no error code */
14030 call \do_sym
14031 jmp paranoid_exit /* %ebx: no swapgs flag */
14032 CFI_ENDPROC
14033 -END(\sym)
14034 +ENDPROC(\sym)
14035 .endm
14036
14037 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
14038 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
14039 .macro paranoidzeroentry_ist sym do_sym ist
14040 ENTRY(\sym)
14041 INTR_FRAME
14042 @@ -1052,14 +1384,30 @@ ENTRY(\sym)
14043 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14044 call save_paranoid
14045 TRACE_IRQS_OFF
14046 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14047 + testb $3, CS(%rsp)
14048 + jnz 1f
14049 + pax_enter_kernel
14050 + jmp 2f
14051 +1: pax_enter_kernel_user
14052 +2:
14053 +#else
14054 + pax_enter_kernel
14055 +#endif
14056 movq %rsp,%rdi /* pt_regs pointer */
14057 xorl %esi,%esi /* no error code */
14058 +#ifdef CONFIG_SMP
14059 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
14060 + lea init_tss(%r12), %r12
14061 +#else
14062 + lea init_tss(%rip), %r12
14063 +#endif
14064 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14065 call \do_sym
14066 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
14067 jmp paranoid_exit /* %ebx: no swapgs flag */
14068 CFI_ENDPROC
14069 -END(\sym)
14070 +ENDPROC(\sym)
14071 .endm
14072
14073 .macro errorentry sym do_sym
14074 @@ -1070,13 +1418,23 @@ ENTRY(\sym)
14075 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14076 call error_entry
14077 DEFAULT_FRAME 0
14078 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14079 + testb $3, CS(%rsp)
14080 + jnz 1f
14081 + pax_enter_kernel
14082 + jmp 2f
14083 +1: pax_enter_kernel_user
14084 +2:
14085 +#else
14086 + pax_enter_kernel
14087 +#endif
14088 movq %rsp,%rdi /* pt_regs pointer */
14089 movq ORIG_RAX(%rsp),%rsi /* get error code */
14090 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14091 call \do_sym
14092 jmp error_exit /* %ebx: no swapgs flag */
14093 CFI_ENDPROC
14094 -END(\sym)
14095 +ENDPROC(\sym)
14096 .endm
14097
14098 /* error code is on the stack already */
14099 @@ -1089,13 +1447,23 @@ ENTRY(\sym)
14100 call save_paranoid
14101 DEFAULT_FRAME 0
14102 TRACE_IRQS_OFF
14103 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14104 + testb $3, CS(%rsp)
14105 + jnz 1f
14106 + pax_enter_kernel
14107 + jmp 2f
14108 +1: pax_enter_kernel_user
14109 +2:
14110 +#else
14111 + pax_enter_kernel
14112 +#endif
14113 movq %rsp,%rdi /* pt_regs pointer */
14114 movq ORIG_RAX(%rsp),%rsi /* get error code */
14115 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14116 call \do_sym
14117 jmp paranoid_exit /* %ebx: no swapgs flag */
14118 CFI_ENDPROC
14119 -END(\sym)
14120 +ENDPROC(\sym)
14121 .endm
14122
14123 zeroentry divide_error do_divide_error
14124 @@ -1125,9 +1493,10 @@ gs_change:
14125 2: mfence /* workaround */
14126 SWAPGS
14127 popfq_cfi
14128 + pax_force_retaddr
14129 ret
14130 CFI_ENDPROC
14131 -END(native_load_gs_index)
14132 +ENDPROC(native_load_gs_index)
14133
14134 .section __ex_table,"a"
14135 .align 8
14136 @@ -1149,13 +1518,14 @@ ENTRY(kernel_thread_helper)
14137 * Here we are in the child and the registers are set as they were
14138 * at kernel_thread() invocation in the parent.
14139 */
14140 + pax_force_fptr %rsi
14141 call *%rsi
14142 # exit
14143 mov %eax, %edi
14144 call do_exit
14145 ud2 # padding for call trace
14146 CFI_ENDPROC
14147 -END(kernel_thread_helper)
14148 +ENDPROC(kernel_thread_helper)
14149
14150 /*
14151 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
14152 @@ -1184,9 +1554,10 @@ ENTRY(kernel_execve)
14153 je int_ret_from_sys_call
14154 RESTORE_ARGS
14155 UNFAKE_STACK_FRAME
14156 + pax_force_retaddr
14157 ret
14158 CFI_ENDPROC
14159 -END(kernel_execve)
14160 +ENDPROC(kernel_execve)
14161
14162 /* Call softirq on interrupt stack. Interrupts are off. */
14163 ENTRY(call_softirq)
14164 @@ -1204,9 +1575,10 @@ ENTRY(call_softirq)
14165 CFI_DEF_CFA_REGISTER rsp
14166 CFI_ADJUST_CFA_OFFSET -8
14167 decl PER_CPU_VAR(irq_count)
14168 + pax_force_retaddr
14169 ret
14170 CFI_ENDPROC
14171 -END(call_softirq)
14172 +ENDPROC(call_softirq)
14173
14174 #ifdef CONFIG_XEN
14175 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
14176 @@ -1244,7 +1616,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
14177 decl PER_CPU_VAR(irq_count)
14178 jmp error_exit
14179 CFI_ENDPROC
14180 -END(xen_do_hypervisor_callback)
14181 +ENDPROC(xen_do_hypervisor_callback)
14182
14183 /*
14184 * Hypervisor uses this for application faults while it executes.
14185 @@ -1303,7 +1675,7 @@ ENTRY(xen_failsafe_callback)
14186 SAVE_ALL
14187 jmp error_exit
14188 CFI_ENDPROC
14189 -END(xen_failsafe_callback)
14190 +ENDPROC(xen_failsafe_callback)
14191
14192 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
14193 xen_hvm_callback_vector xen_evtchn_do_upcall
14194 @@ -1352,16 +1724,31 @@ ENTRY(paranoid_exit)
14195 TRACE_IRQS_OFF
14196 testl %ebx,%ebx /* swapgs needed? */
14197 jnz paranoid_restore
14198 - testl $3,CS(%rsp)
14199 + testb $3,CS(%rsp)
14200 jnz paranoid_userspace
14201 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14202 + pax_exit_kernel
14203 + TRACE_IRQS_IRETQ 0
14204 + SWAPGS_UNSAFE_STACK
14205 + RESTORE_ALL 8
14206 + pax_force_retaddr_bts
14207 + jmp irq_return
14208 +#endif
14209 paranoid_swapgs:
14210 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14211 + pax_exit_kernel_user
14212 +#else
14213 + pax_exit_kernel
14214 +#endif
14215 TRACE_IRQS_IRETQ 0
14216 SWAPGS_UNSAFE_STACK
14217 RESTORE_ALL 8
14218 jmp irq_return
14219 paranoid_restore:
14220 + pax_exit_kernel
14221 TRACE_IRQS_IRETQ 0
14222 RESTORE_ALL 8
14223 + pax_force_retaddr_bts
14224 jmp irq_return
14225 paranoid_userspace:
14226 GET_THREAD_INFO(%rcx)
14227 @@ -1390,7 +1777,7 @@ paranoid_schedule:
14228 TRACE_IRQS_OFF
14229 jmp paranoid_userspace
14230 CFI_ENDPROC
14231 -END(paranoid_exit)
14232 +ENDPROC(paranoid_exit)
14233
14234 /*
14235 * Exception entry point. This expects an error code/orig_rax on the stack.
14236 @@ -1417,12 +1804,13 @@ ENTRY(error_entry)
14237 movq_cfi r14, R14+8
14238 movq_cfi r15, R15+8
14239 xorl %ebx,%ebx
14240 - testl $3,CS+8(%rsp)
14241 + testb $3,CS+8(%rsp)
14242 je error_kernelspace
14243 error_swapgs:
14244 SWAPGS
14245 error_sti:
14246 TRACE_IRQS_OFF
14247 + pax_force_retaddr_bts
14248 ret
14249
14250 /*
14251 @@ -1449,7 +1837,7 @@ bstep_iret:
14252 movq %rcx,RIP+8(%rsp)
14253 jmp error_swapgs
14254 CFI_ENDPROC
14255 -END(error_entry)
14256 +ENDPROC(error_entry)
14257
14258
14259 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
14260 @@ -1469,7 +1857,7 @@ ENTRY(error_exit)
14261 jnz retint_careful
14262 jmp retint_swapgs
14263 CFI_ENDPROC
14264 -END(error_exit)
14265 +ENDPROC(error_exit)
14266
14267
14268 /* runs on exception stack */
14269 @@ -1481,6 +1869,16 @@ ENTRY(nmi)
14270 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
14271 call save_paranoid
14272 DEFAULT_FRAME 0
14273 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14274 + testb $3, CS(%rsp)
14275 + jnz 1f
14276 + pax_enter_kernel
14277 + jmp 2f
14278 +1: pax_enter_kernel_user
14279 +2:
14280 +#else
14281 + pax_enter_kernel
14282 +#endif
14283 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14284 movq %rsp,%rdi
14285 movq $-1,%rsi
14286 @@ -1491,12 +1889,28 @@ ENTRY(nmi)
14287 DISABLE_INTERRUPTS(CLBR_NONE)
14288 testl %ebx,%ebx /* swapgs needed? */
14289 jnz nmi_restore
14290 - testl $3,CS(%rsp)
14291 + testb $3,CS(%rsp)
14292 jnz nmi_userspace
14293 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14294 + pax_exit_kernel
14295 + SWAPGS_UNSAFE_STACK
14296 + RESTORE_ALL 8
14297 + pax_force_retaddr_bts
14298 + jmp irq_return
14299 +#endif
14300 nmi_swapgs:
14301 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14302 + pax_exit_kernel_user
14303 +#else
14304 + pax_exit_kernel
14305 +#endif
14306 SWAPGS_UNSAFE_STACK
14307 + RESTORE_ALL 8
14308 + jmp irq_return
14309 nmi_restore:
14310 + pax_exit_kernel
14311 RESTORE_ALL 8
14312 + pax_force_retaddr_bts
14313 jmp irq_return
14314 nmi_userspace:
14315 GET_THREAD_INFO(%rcx)
14316 @@ -1525,14 +1939,14 @@ nmi_schedule:
14317 jmp paranoid_exit
14318 CFI_ENDPROC
14319 #endif
14320 -END(nmi)
14321 +ENDPROC(nmi)
14322
14323 ENTRY(ignore_sysret)
14324 CFI_STARTPROC
14325 mov $-ENOSYS,%eax
14326 sysret
14327 CFI_ENDPROC
14328 -END(ignore_sysret)
14329 +ENDPROC(ignore_sysret)
14330
14331 /*
14332 * End of kprobes section
14333 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
14334 index c9a281f..ce2f317 100644
14335 --- a/arch/x86/kernel/ftrace.c
14336 +++ b/arch/x86/kernel/ftrace.c
14337 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
14338 static const void *mod_code_newcode; /* holds the text to write to the IP */
14339
14340 static unsigned nmi_wait_count;
14341 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
14342 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14343
14344 int ftrace_arch_read_dyn_info(char *buf, int size)
14345 {
14346 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
14347
14348 r = snprintf(buf, size, "%u %u",
14349 nmi_wait_count,
14350 - atomic_read(&nmi_update_count));
14351 + atomic_read_unchecked(&nmi_update_count));
14352 return r;
14353 }
14354
14355 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
14356
14357 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14358 smp_rmb();
14359 + pax_open_kernel();
14360 ftrace_mod_code();
14361 - atomic_inc(&nmi_update_count);
14362 + pax_close_kernel();
14363 + atomic_inc_unchecked(&nmi_update_count);
14364 }
14365 /* Must have previous changes seen before executions */
14366 smp_mb();
14367 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
14368 {
14369 unsigned char replaced[MCOUNT_INSN_SIZE];
14370
14371 + ip = ktla_ktva(ip);
14372 +
14373 /*
14374 * Note: Due to modules and __init, code can
14375 * disappear and change, we need to protect against faulting
14376 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
14377 unsigned char old[MCOUNT_INSN_SIZE], *new;
14378 int ret;
14379
14380 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14381 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14382 new = ftrace_call_replace(ip, (unsigned long)func);
14383 ret = ftrace_modify_code(ip, old, new);
14384
14385 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
14386 {
14387 unsigned char code[MCOUNT_INSN_SIZE];
14388
14389 + ip = ktla_ktva(ip);
14390 +
14391 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14392 return -EFAULT;
14393
14394 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
14395 index 3bb0850..55a56f4 100644
14396 --- a/arch/x86/kernel/head32.c
14397 +++ b/arch/x86/kernel/head32.c
14398 @@ -19,6 +19,7 @@
14399 #include <asm/io_apic.h>
14400 #include <asm/bios_ebda.h>
14401 #include <asm/tlbflush.h>
14402 +#include <asm/boot.h>
14403
14404 static void __init i386_default_early_setup(void)
14405 {
14406 @@ -33,7 +34,7 @@ void __init i386_start_kernel(void)
14407 {
14408 memblock_init();
14409
14410 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14411 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14412
14413 #ifdef CONFIG_BLK_DEV_INITRD
14414 /* Reserve INITRD */
14415 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
14416 index ce0be7c..c41476e 100644
14417 --- a/arch/x86/kernel/head_32.S
14418 +++ b/arch/x86/kernel/head_32.S
14419 @@ -25,6 +25,12 @@
14420 /* Physical address */
14421 #define pa(X) ((X) - __PAGE_OFFSET)
14422
14423 +#ifdef CONFIG_PAX_KERNEXEC
14424 +#define ta(X) (X)
14425 +#else
14426 +#define ta(X) ((X) - __PAGE_OFFSET)
14427 +#endif
14428 +
14429 /*
14430 * References to members of the new_cpu_data structure.
14431 */
14432 @@ -54,11 +60,7 @@
14433 * and small than max_low_pfn, otherwise will waste some page table entries
14434 */
14435
14436 -#if PTRS_PER_PMD > 1
14437 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14438 -#else
14439 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14440 -#endif
14441 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14442
14443 /* Number of possible pages in the lowmem region */
14444 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
14445 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
14446 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14447
14448 /*
14449 + * Real beginning of normal "text" segment
14450 + */
14451 +ENTRY(stext)
14452 +ENTRY(_stext)
14453 +
14454 +/*
14455 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14456 * %esi points to the real-mode code as a 32-bit pointer.
14457 * CS and DS must be 4 GB flat segments, but we don't depend on
14458 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14459 * can.
14460 */
14461 __HEAD
14462 +
14463 +#ifdef CONFIG_PAX_KERNEXEC
14464 + jmp startup_32
14465 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14466 +.fill PAGE_SIZE-5,1,0xcc
14467 +#endif
14468 +
14469 ENTRY(startup_32)
14470 movl pa(stack_start),%ecx
14471
14472 @@ -105,6 +120,57 @@ ENTRY(startup_32)
14473 2:
14474 leal -__PAGE_OFFSET(%ecx),%esp
14475
14476 +#ifdef CONFIG_SMP
14477 + movl $pa(cpu_gdt_table),%edi
14478 + movl $__per_cpu_load,%eax
14479 + movw %ax,__KERNEL_PERCPU + 2(%edi)
14480 + rorl $16,%eax
14481 + movb %al,__KERNEL_PERCPU + 4(%edi)
14482 + movb %ah,__KERNEL_PERCPU + 7(%edi)
14483 + movl $__per_cpu_end - 1,%eax
14484 + subl $__per_cpu_start,%eax
14485 + movw %ax,__KERNEL_PERCPU + 0(%edi)
14486 +#endif
14487 +
14488 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14489 + movl $NR_CPUS,%ecx
14490 + movl $pa(cpu_gdt_table),%edi
14491 +1:
14492 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14493 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14494 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14495 + addl $PAGE_SIZE_asm,%edi
14496 + loop 1b
14497 +#endif
14498 +
14499 +#ifdef CONFIG_PAX_KERNEXEC
14500 + movl $pa(boot_gdt),%edi
14501 + movl $__LOAD_PHYSICAL_ADDR,%eax
14502 + movw %ax,__BOOT_CS + 2(%edi)
14503 + rorl $16,%eax
14504 + movb %al,__BOOT_CS + 4(%edi)
14505 + movb %ah,__BOOT_CS + 7(%edi)
14506 + rorl $16,%eax
14507 +
14508 + ljmp $(__BOOT_CS),$1f
14509 +1:
14510 +
14511 + movl $NR_CPUS,%ecx
14512 + movl $pa(cpu_gdt_table),%edi
14513 + addl $__PAGE_OFFSET,%eax
14514 +1:
14515 + movw %ax,__KERNEL_CS + 2(%edi)
14516 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14517 + rorl $16,%eax
14518 + movb %al,__KERNEL_CS + 4(%edi)
14519 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14520 + movb %ah,__KERNEL_CS + 7(%edi)
14521 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14522 + rorl $16,%eax
14523 + addl $PAGE_SIZE_asm,%edi
14524 + loop 1b
14525 +#endif
14526 +
14527 /*
14528 * Clear BSS first so that there are no surprises...
14529 */
14530 @@ -195,8 +261,11 @@ ENTRY(startup_32)
14531 movl %eax, pa(max_pfn_mapped)
14532
14533 /* Do early initialization of the fixmap area */
14534 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14535 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
14536 +#ifdef CONFIG_COMPAT_VDSO
14537 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
14538 +#else
14539 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
14540 +#endif
14541 #else /* Not PAE */
14542
14543 page_pde_offset = (__PAGE_OFFSET >> 20);
14544 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14545 movl %eax, pa(max_pfn_mapped)
14546
14547 /* Do early initialization of the fixmap area */
14548 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
14549 - movl %eax,pa(initial_page_table+0xffc)
14550 +#ifdef CONFIG_COMPAT_VDSO
14551 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
14552 +#else
14553 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
14554 +#endif
14555 #endif
14556
14557 #ifdef CONFIG_PARAVIRT
14558 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14559 cmpl $num_subarch_entries, %eax
14560 jae bad_subarch
14561
14562 - movl pa(subarch_entries)(,%eax,4), %eax
14563 - subl $__PAGE_OFFSET, %eax
14564 - jmp *%eax
14565 + jmp *pa(subarch_entries)(,%eax,4)
14566
14567 bad_subarch:
14568 WEAK(lguest_entry)
14569 @@ -255,10 +325,10 @@ WEAK(xen_entry)
14570 __INITDATA
14571
14572 subarch_entries:
14573 - .long default_entry /* normal x86/PC */
14574 - .long lguest_entry /* lguest hypervisor */
14575 - .long xen_entry /* Xen hypervisor */
14576 - .long default_entry /* Moorestown MID */
14577 + .long ta(default_entry) /* normal x86/PC */
14578 + .long ta(lguest_entry) /* lguest hypervisor */
14579 + .long ta(xen_entry) /* Xen hypervisor */
14580 + .long ta(default_entry) /* Moorestown MID */
14581 num_subarch_entries = (. - subarch_entries) / 4
14582 .previous
14583 #else
14584 @@ -312,6 +382,7 @@ default_entry:
14585 orl %edx,%eax
14586 movl %eax,%cr4
14587
14588 +#ifdef CONFIG_X86_PAE
14589 testb $X86_CR4_PAE, %al # check if PAE is enabled
14590 jz 6f
14591
14592 @@ -340,6 +411,9 @@ default_entry:
14593 /* Make changes effective */
14594 wrmsr
14595
14596 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14597 +#endif
14598 +
14599 6:
14600
14601 /*
14602 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
14603 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14604 movl %eax,%ss # after changing gdt.
14605
14606 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
14607 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14608 movl %eax,%ds
14609 movl %eax,%es
14610
14611 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
14612 */
14613 cmpb $0,ready
14614 jne 1f
14615 - movl $gdt_page,%eax
14616 + movl $cpu_gdt_table,%eax
14617 movl $stack_canary,%ecx
14618 +#ifdef CONFIG_SMP
14619 + addl $__per_cpu_load,%ecx
14620 +#endif
14621 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14622 shrl $16, %ecx
14623 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14624 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14625 1:
14626 -#endif
14627 movl $(__KERNEL_STACK_CANARY),%eax
14628 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14629 + movl $(__USER_DS),%eax
14630 +#else
14631 + xorl %eax,%eax
14632 +#endif
14633 movl %eax,%gs
14634
14635 xorl %eax,%eax # Clear LDT
14636 @@ -558,22 +639,22 @@ early_page_fault:
14637 jmp early_fault
14638
14639 early_fault:
14640 - cld
14641 #ifdef CONFIG_PRINTK
14642 + cmpl $1,%ss:early_recursion_flag
14643 + je hlt_loop
14644 + incl %ss:early_recursion_flag
14645 + cld
14646 pusha
14647 movl $(__KERNEL_DS),%eax
14648 movl %eax,%ds
14649 movl %eax,%es
14650 - cmpl $2,early_recursion_flag
14651 - je hlt_loop
14652 - incl early_recursion_flag
14653 movl %cr2,%eax
14654 pushl %eax
14655 pushl %edx /* trapno */
14656 pushl $fault_msg
14657 call printk
14658 +; call dump_stack
14659 #endif
14660 - call dump_stack
14661 hlt_loop:
14662 hlt
14663 jmp hlt_loop
14664 @@ -581,8 +662,11 @@ hlt_loop:
14665 /* This is the default interrupt "handler" :-) */
14666 ALIGN
14667 ignore_int:
14668 - cld
14669 #ifdef CONFIG_PRINTK
14670 + cmpl $2,%ss:early_recursion_flag
14671 + je hlt_loop
14672 + incl %ss:early_recursion_flag
14673 + cld
14674 pushl %eax
14675 pushl %ecx
14676 pushl %edx
14677 @@ -591,9 +675,6 @@ ignore_int:
14678 movl $(__KERNEL_DS),%eax
14679 movl %eax,%ds
14680 movl %eax,%es
14681 - cmpl $2,early_recursion_flag
14682 - je hlt_loop
14683 - incl early_recursion_flag
14684 pushl 16(%esp)
14685 pushl 24(%esp)
14686 pushl 32(%esp)
14687 @@ -622,29 +703,43 @@ ENTRY(initial_code)
14688 /*
14689 * BSS section
14690 */
14691 -__PAGE_ALIGNED_BSS
14692 - .align PAGE_SIZE
14693 #ifdef CONFIG_X86_PAE
14694 +.section .initial_pg_pmd,"a",@progbits
14695 initial_pg_pmd:
14696 .fill 1024*KPMDS,4,0
14697 #else
14698 +.section .initial_page_table,"a",@progbits
14699 ENTRY(initial_page_table)
14700 .fill 1024,4,0
14701 #endif
14702 +.section .initial_pg_fixmap,"a",@progbits
14703 initial_pg_fixmap:
14704 .fill 1024,4,0
14705 +.section .empty_zero_page,"a",@progbits
14706 ENTRY(empty_zero_page)
14707 .fill 4096,1,0
14708 +.section .swapper_pg_dir,"a",@progbits
14709 ENTRY(swapper_pg_dir)
14710 +#ifdef CONFIG_X86_PAE
14711 + .fill 4,8,0
14712 +#else
14713 .fill 1024,4,0
14714 +#endif
14715 +
14716 +/*
14717 + * The IDT has to be page-aligned to simplify the Pentium
14718 + * F0 0F bug workaround.. We have a special link segment
14719 + * for this.
14720 + */
14721 +.section .idt,"a",@progbits
14722 +ENTRY(idt_table)
14723 + .fill 256,8,0
14724
14725 /*
14726 * This starts the data section.
14727 */
14728 #ifdef CONFIG_X86_PAE
14729 -__PAGE_ALIGNED_DATA
14730 - /* Page-aligned for the benefit of paravirt? */
14731 - .align PAGE_SIZE
14732 +.section .initial_page_table,"a",@progbits
14733 ENTRY(initial_page_table)
14734 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14735 # if KPMDS == 3
14736 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
14737 # error "Kernel PMDs should be 1, 2 or 3"
14738 # endif
14739 .align PAGE_SIZE /* needs to be page-sized too */
14740 +
14741 +#ifdef CONFIG_PAX_PER_CPU_PGD
14742 +ENTRY(cpu_pgd)
14743 + .rept NR_CPUS
14744 + .fill 4,8,0
14745 + .endr
14746 +#endif
14747 +
14748 #endif
14749
14750 .data
14751 .balign 4
14752 ENTRY(stack_start)
14753 - .long init_thread_union+THREAD_SIZE
14754 + .long init_thread_union+THREAD_SIZE-8
14755
14756 +ready: .byte 0
14757 +
14758 +.section .rodata,"a",@progbits
14759 early_recursion_flag:
14760 .long 0
14761
14762 -ready: .byte 0
14763 -
14764 int_msg:
14765 .asciz "Unknown interrupt or fault at: %p %p %p\n"
14766
14767 @@ -707,7 +811,7 @@ fault_msg:
14768 .word 0 # 32 bit align gdt_desc.address
14769 boot_gdt_descr:
14770 .word __BOOT_DS+7
14771 - .long boot_gdt - __PAGE_OFFSET
14772 + .long pa(boot_gdt)
14773
14774 .word 0 # 32-bit align idt_desc.address
14775 idt_descr:
14776 @@ -718,7 +822,7 @@ idt_descr:
14777 .word 0 # 32 bit align gdt_desc.address
14778 ENTRY(early_gdt_descr)
14779 .word GDT_ENTRIES*8-1
14780 - .long gdt_page /* Overwritten for secondary CPUs */
14781 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
14782
14783 /*
14784 * The boot_gdt must mirror the equivalent in setup.S and is
14785 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
14786 .align L1_CACHE_BYTES
14787 ENTRY(boot_gdt)
14788 .fill GDT_ENTRY_BOOT_CS,8,0
14789 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14790 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14791 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14792 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14793 +
14794 + .align PAGE_SIZE_asm
14795 +ENTRY(cpu_gdt_table)
14796 + .rept NR_CPUS
14797 + .quad 0x0000000000000000 /* NULL descriptor */
14798 + .quad 0x0000000000000000 /* 0x0b reserved */
14799 + .quad 0x0000000000000000 /* 0x13 reserved */
14800 + .quad 0x0000000000000000 /* 0x1b reserved */
14801 +
14802 +#ifdef CONFIG_PAX_KERNEXEC
14803 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14804 +#else
14805 + .quad 0x0000000000000000 /* 0x20 unused */
14806 +#endif
14807 +
14808 + .quad 0x0000000000000000 /* 0x28 unused */
14809 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14810 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14811 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14812 + .quad 0x0000000000000000 /* 0x4b reserved */
14813 + .quad 0x0000000000000000 /* 0x53 reserved */
14814 + .quad 0x0000000000000000 /* 0x5b reserved */
14815 +
14816 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14817 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14818 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14819 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14820 +
14821 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14822 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14823 +
14824 + /*
14825 + * Segments used for calling PnP BIOS have byte granularity.
14826 + * The code segments and data segments have fixed 64k limits,
14827 + * the transfer segment sizes are set at run time.
14828 + */
14829 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
14830 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
14831 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
14832 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
14833 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
14834 +
14835 + /*
14836 + * The APM segments have byte granularity and their bases
14837 + * are set at run time. All have 64k limits.
14838 + */
14839 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14840 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14841 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
14842 +
14843 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14844 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14845 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14846 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14847 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14848 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14849 +
14850 + /* Be sure this is zeroed to avoid false validations in Xen */
14851 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14852 + .endr
14853 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
14854 index e11e394..3d66dfe 100644
14855 --- a/arch/x86/kernel/head_64.S
14856 +++ b/arch/x86/kernel/head_64.S
14857 @@ -19,6 +19,8 @@
14858 #include <asm/cache.h>
14859 #include <asm/processor-flags.h>
14860 #include <asm/percpu.h>
14861 +#include <asm/cpufeature.h>
14862 +#include <asm/alternative-asm.h>
14863
14864 #ifdef CONFIG_PARAVIRT
14865 #include <asm/asm-offsets.h>
14866 @@ -38,6 +40,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
14867 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14868 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14869 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14870 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
14871 +L3_VMALLOC_START = pud_index(VMALLOC_START)
14872 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14873 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14874
14875 .text
14876 __HEAD
14877 @@ -85,35 +91,22 @@ startup_64:
14878 */
14879 addq %rbp, init_level4_pgt + 0(%rip)
14880 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14881 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14882 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14883 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14884
14885 addq %rbp, level3_ident_pgt + 0(%rip)
14886 +#ifndef CONFIG_XEN
14887 + addq %rbp, level3_ident_pgt + 8(%rip)
14888 +#endif
14889
14890 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14891 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14892 -
14893 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14894 -
14895 - /* Add an Identity mapping if I am above 1G */
14896 - leaq _text(%rip), %rdi
14897 - andq $PMD_PAGE_MASK, %rdi
14898 -
14899 - movq %rdi, %rax
14900 - shrq $PUD_SHIFT, %rax
14901 - andq $(PTRS_PER_PUD - 1), %rax
14902 - jz ident_complete
14903 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14904
14905 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14906 - leaq level3_ident_pgt(%rip), %rbx
14907 - movq %rdx, 0(%rbx, %rax, 8)
14908 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14909 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14910
14911 - movq %rdi, %rax
14912 - shrq $PMD_SHIFT, %rax
14913 - andq $(PTRS_PER_PMD - 1), %rax
14914 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14915 - leaq level2_spare_pgt(%rip), %rbx
14916 - movq %rdx, 0(%rbx, %rax, 8)
14917 -ident_complete:
14918 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14919 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14920
14921 /*
14922 * Fixup the kernel text+data virtual addresses. Note that
14923 @@ -160,8 +153,8 @@ ENTRY(secondary_startup_64)
14924 * after the boot processor executes this code.
14925 */
14926
14927 - /* Enable PAE mode and PGE */
14928 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
14929 + /* Enable PAE mode and PSE/PGE */
14930 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
14931 movq %rax, %cr4
14932
14933 /* Setup early boot stage 4 level pagetables. */
14934 @@ -183,9 +176,16 @@ ENTRY(secondary_startup_64)
14935 movl $MSR_EFER, %ecx
14936 rdmsr
14937 btsl $_EFER_SCE, %eax /* Enable System Call */
14938 - btl $20,%edi /* No Execute supported? */
14939 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
14940 jnc 1f
14941 btsl $_EFER_NX, %eax
14942 + leaq init_level4_pgt(%rip), %rdi
14943 +#ifndef CONFIG_EFI
14944 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
14945 +#endif
14946 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
14947 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
14948 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
14949 1: wrmsr /* Make changes effective */
14950
14951 /* Setup cr0 */
14952 @@ -247,6 +247,7 @@ ENTRY(secondary_startup_64)
14953 * jump. In addition we need to ensure %cs is set so we make this
14954 * a far return.
14955 */
14956 + pax_set_fptr_mask
14957 movq initial_code(%rip),%rax
14958 pushq $0 # fake return address to stop unwinder
14959 pushq $__KERNEL_CS # set correct cs
14960 @@ -269,7 +270,7 @@ ENTRY(secondary_startup_64)
14961 bad_address:
14962 jmp bad_address
14963
14964 - .section ".init.text","ax"
14965 + __INIT
14966 #ifdef CONFIG_EARLY_PRINTK
14967 .globl early_idt_handlers
14968 early_idt_handlers:
14969 @@ -314,18 +315,23 @@ ENTRY(early_idt_handler)
14970 #endif /* EARLY_PRINTK */
14971 1: hlt
14972 jmp 1b
14973 + .previous
14974
14975 #ifdef CONFIG_EARLY_PRINTK
14976 + __INITDATA
14977 early_recursion_flag:
14978 .long 0
14979 + .previous
14980
14981 + .section .rodata,"a",@progbits
14982 early_idt_msg:
14983 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
14984 early_idt_ripmsg:
14985 .asciz "RIP %s\n"
14986 -#endif /* CONFIG_EARLY_PRINTK */
14987 .previous
14988 +#endif /* CONFIG_EARLY_PRINTK */
14989
14990 + .section .rodata,"a",@progbits
14991 #define NEXT_PAGE(name) \
14992 .balign PAGE_SIZE; \
14993 ENTRY(name)
14994 @@ -338,7 +344,6 @@ ENTRY(name)
14995 i = i + 1 ; \
14996 .endr
14997
14998 - .data
14999 /*
15000 * This default setting generates an ident mapping at address 0x100000
15001 * and a mapping for the kernel that precisely maps virtual address
15002 @@ -349,13 +354,36 @@ NEXT_PAGE(init_level4_pgt)
15003 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15004 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
15005 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15006 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
15007 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
15008 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
15009 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15010 .org init_level4_pgt + L4_START_KERNEL*8, 0
15011 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
15012 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
15013
15014 +#ifdef CONFIG_PAX_PER_CPU_PGD
15015 +NEXT_PAGE(cpu_pgd)
15016 + .rept NR_CPUS
15017 + .fill 512,8,0
15018 + .endr
15019 +#endif
15020 +
15021 NEXT_PAGE(level3_ident_pgt)
15022 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
15023 +#ifdef CONFIG_XEN
15024 .fill 511,8,0
15025 +#else
15026 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
15027 + .fill 510,8,0
15028 +#endif
15029 +
15030 +NEXT_PAGE(level3_vmalloc_pgt)
15031 + .fill 512,8,0
15032 +
15033 +NEXT_PAGE(level3_vmemmap_pgt)
15034 + .fill L3_VMEMMAP_START,8,0
15035 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
15036
15037 NEXT_PAGE(level3_kernel_pgt)
15038 .fill L3_START_KERNEL,8,0
15039 @@ -363,20 +391,23 @@ NEXT_PAGE(level3_kernel_pgt)
15040 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
15041 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15042
15043 +NEXT_PAGE(level2_vmemmap_pgt)
15044 + .fill 512,8,0
15045 +
15046 NEXT_PAGE(level2_fixmap_pgt)
15047 - .fill 506,8,0
15048 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
15049 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
15050 - .fill 5,8,0
15051 + .fill 507,8,0
15052 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
15053 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
15054 + .fill 4,8,0
15055
15056 -NEXT_PAGE(level1_fixmap_pgt)
15057 +NEXT_PAGE(level1_vsyscall_pgt)
15058 .fill 512,8,0
15059
15060 -NEXT_PAGE(level2_ident_pgt)
15061 - /* Since I easily can, map the first 1G.
15062 + /* Since I easily can, map the first 2G.
15063 * Don't set NX because code runs from these pages.
15064 */
15065 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
15066 +NEXT_PAGE(level2_ident_pgt)
15067 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
15068
15069 NEXT_PAGE(level2_kernel_pgt)
15070 /*
15071 @@ -389,33 +420,55 @@ NEXT_PAGE(level2_kernel_pgt)
15072 * If you want to increase this then increase MODULES_VADDR
15073 * too.)
15074 */
15075 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
15076 - KERNEL_IMAGE_SIZE/PMD_SIZE)
15077 -
15078 -NEXT_PAGE(level2_spare_pgt)
15079 - .fill 512, 8, 0
15080 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
15081
15082 #undef PMDS
15083 #undef NEXT_PAGE
15084
15085 - .data
15086 + .align PAGE_SIZE
15087 +ENTRY(cpu_gdt_table)
15088 + .rept NR_CPUS
15089 + .quad 0x0000000000000000 /* NULL descriptor */
15090 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15091 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
15092 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
15093 + .quad 0x00cffb000000ffff /* __USER32_CS */
15094 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15095 + .quad 0x00affb000000ffff /* __USER_CS */
15096 +
15097 +#ifdef CONFIG_PAX_KERNEXEC
15098 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15099 +#else
15100 + .quad 0x0 /* unused */
15101 +#endif
15102 +
15103 + .quad 0,0 /* TSS */
15104 + .quad 0,0 /* LDT */
15105 + .quad 0,0,0 /* three TLS descriptors */
15106 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
15107 + /* asm/segment.h:GDT_ENTRIES must match this */
15108 +
15109 + /* zero the remaining page */
15110 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15111 + .endr
15112 +
15113 .align 16
15114 .globl early_gdt_descr
15115 early_gdt_descr:
15116 .word GDT_ENTRIES*8-1
15117 early_gdt_descr_base:
15118 - .quad INIT_PER_CPU_VAR(gdt_page)
15119 + .quad cpu_gdt_table
15120
15121 ENTRY(phys_base)
15122 /* This must match the first entry in level2_kernel_pgt */
15123 .quad 0x0000000000000000
15124
15125 #include "../../x86/xen/xen-head.S"
15126 -
15127 - .section .bss, "aw", @nobits
15128 +
15129 + .section .rodata,"a",@progbits
15130 .align L1_CACHE_BYTES
15131 ENTRY(idt_table)
15132 - .skip IDT_ENTRIES * 16
15133 + .fill 512,8,0
15134
15135 __PAGE_ALIGNED_BSS
15136 .align PAGE_SIZE
15137 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
15138 index 9c3bd4a..e1d9b35 100644
15139 --- a/arch/x86/kernel/i386_ksyms_32.c
15140 +++ b/arch/x86/kernel/i386_ksyms_32.c
15141 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15142 EXPORT_SYMBOL(cmpxchg8b_emu);
15143 #endif
15144
15145 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
15146 +
15147 /* Networking helper routines. */
15148 EXPORT_SYMBOL(csum_partial_copy_generic);
15149 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15150 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15151
15152 EXPORT_SYMBOL(__get_user_1);
15153 EXPORT_SYMBOL(__get_user_2);
15154 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15155
15156 EXPORT_SYMBOL(csum_partial);
15157 EXPORT_SYMBOL(empty_zero_page);
15158 +
15159 +#ifdef CONFIG_PAX_KERNEXEC
15160 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15161 +#endif
15162 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
15163 index 6104852..6114160 100644
15164 --- a/arch/x86/kernel/i8259.c
15165 +++ b/arch/x86/kernel/i8259.c
15166 @@ -210,7 +210,7 @@ spurious_8259A_irq:
15167 "spurious 8259A interrupt: IRQ%d.\n", irq);
15168 spurious_irq_mask |= irqmask;
15169 }
15170 - atomic_inc(&irq_err_count);
15171 + atomic_inc_unchecked(&irq_err_count);
15172 /*
15173 * Theoretically we do not have to handle this IRQ,
15174 * but in Linux this does not cause problems and is
15175 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
15176 index 43e9ccf..44ccf6f 100644
15177 --- a/arch/x86/kernel/init_task.c
15178 +++ b/arch/x86/kernel/init_task.c
15179 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
15180 * way process stacks are handled. This is done by having a special
15181 * "init_task" linker map entry..
15182 */
15183 -union thread_union init_thread_union __init_task_data =
15184 - { INIT_THREAD_INFO(init_task) };
15185 +union thread_union init_thread_union __init_task_data;
15186
15187 /*
15188 * Initial task structure.
15189 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15190 * section. Since TSS's are completely CPU-local, we want them
15191 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15192 */
15193 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15194 -
15195 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15196 +EXPORT_SYMBOL(init_tss);
15197 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
15198 index 8c96897..be66bfa 100644
15199 --- a/arch/x86/kernel/ioport.c
15200 +++ b/arch/x86/kernel/ioport.c
15201 @@ -6,6 +6,7 @@
15202 #include <linux/sched.h>
15203 #include <linux/kernel.h>
15204 #include <linux/capability.h>
15205 +#include <linux/security.h>
15206 #include <linux/errno.h>
15207 #include <linux/types.h>
15208 #include <linux/ioport.h>
15209 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15210
15211 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15212 return -EINVAL;
15213 +#ifdef CONFIG_GRKERNSEC_IO
15214 + if (turn_on && grsec_disable_privio) {
15215 + gr_handle_ioperm();
15216 + return -EPERM;
15217 + }
15218 +#endif
15219 if (turn_on && !capable(CAP_SYS_RAWIO))
15220 return -EPERM;
15221
15222 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
15223 * because the ->io_bitmap_max value must match the bitmap
15224 * contents:
15225 */
15226 - tss = &per_cpu(init_tss, get_cpu());
15227 + tss = init_tss + get_cpu();
15228
15229 if (turn_on)
15230 bitmap_clear(t->io_bitmap_ptr, from, num);
15231 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
15232 return -EINVAL;
15233 /* Trying to gain more privileges? */
15234 if (level > old) {
15235 +#ifdef CONFIG_GRKERNSEC_IO
15236 + if (grsec_disable_privio) {
15237 + gr_handle_iopl();
15238 + return -EPERM;
15239 + }
15240 +#endif
15241 if (!capable(CAP_SYS_RAWIO))
15242 return -EPERM;
15243 }
15244 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
15245 index 6c0802e..bea25ae 100644
15246 --- a/arch/x86/kernel/irq.c
15247 +++ b/arch/x86/kernel/irq.c
15248 @@ -17,7 +17,7 @@
15249 #include <asm/mce.h>
15250 #include <asm/hw_irq.h>
15251
15252 -atomic_t irq_err_count;
15253 +atomic_unchecked_t irq_err_count;
15254
15255 /* Function pointer for generic interrupt vector handling */
15256 void (*x86_platform_ipi_callback)(void) = NULL;
15257 @@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
15258 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15259 seq_printf(p, " Machine check polls\n");
15260 #endif
15261 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15262 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15263 #if defined(CONFIG_X86_IO_APIC)
15264 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15265 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15266 #endif
15267 return 0;
15268 }
15269 @@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15270
15271 u64 arch_irq_stat(void)
15272 {
15273 - u64 sum = atomic_read(&irq_err_count);
15274 + u64 sum = atomic_read_unchecked(&irq_err_count);
15275
15276 #ifdef CONFIG_X86_IO_APIC
15277 - sum += atomic_read(&irq_mis_count);
15278 + sum += atomic_read_unchecked(&irq_mis_count);
15279 #endif
15280 return sum;
15281 }
15282 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
15283 index 7209070..cbcd71a 100644
15284 --- a/arch/x86/kernel/irq_32.c
15285 +++ b/arch/x86/kernel/irq_32.c
15286 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
15287 __asm__ __volatile__("andl %%esp,%0" :
15288 "=r" (sp) : "0" (THREAD_SIZE - 1));
15289
15290 - return sp < (sizeof(struct thread_info) + STACK_WARN);
15291 + return sp < STACK_WARN;
15292 }
15293
15294 static void print_stack_overflow(void)
15295 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(void) { }
15296 * per-CPU IRQ handling contexts (thread information and stack)
15297 */
15298 union irq_ctx {
15299 - struct thread_info tinfo;
15300 - u32 stack[THREAD_SIZE/sizeof(u32)];
15301 + unsigned long previous_esp;
15302 + u32 stack[THREAD_SIZE/sizeof(u32)];
15303 } __attribute__((aligned(THREAD_SIZE)));
15304
15305 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15306 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, void *stack)
15307 static inline int
15308 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15309 {
15310 - union irq_ctx *curctx, *irqctx;
15311 + union irq_ctx *irqctx;
15312 u32 *isp, arg1, arg2;
15313
15314 - curctx = (union irq_ctx *) current_thread_info();
15315 irqctx = __this_cpu_read(hardirq_ctx);
15316
15317 /*
15318 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15319 * handler) we can't do that and just have to keep using the
15320 * current stack (which is the irq stack already after all)
15321 */
15322 - if (unlikely(curctx == irqctx))
15323 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15324 return 0;
15325
15326 /* build the stack frame on the IRQ stack */
15327 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15328 - irqctx->tinfo.task = curctx->tinfo.task;
15329 - irqctx->tinfo.previous_esp = current_stack_pointer;
15330 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15331 + irqctx->previous_esp = current_stack_pointer;
15332
15333 - /*
15334 - * Copy the softirq bits in preempt_count so that the
15335 - * softirq checks work in the hardirq context.
15336 - */
15337 - irqctx->tinfo.preempt_count =
15338 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15339 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15340 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15341 + __set_fs(MAKE_MM_SEG(0));
15342 +#endif
15343
15344 if (unlikely(overflow))
15345 call_on_stack(print_stack_overflow, isp);
15346 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15347 : "0" (irq), "1" (desc), "2" (isp),
15348 "D" (desc->handle_irq)
15349 : "memory", "cc", "ecx");
15350 +
15351 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15352 + __set_fs(current_thread_info()->addr_limit);
15353 +#endif
15354 +
15355 return 1;
15356 }
15357
15358 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15359 */
15360 void __cpuinit irq_ctx_init(int cpu)
15361 {
15362 - union irq_ctx *irqctx;
15363 -
15364 if (per_cpu(hardirq_ctx, cpu))
15365 return;
15366
15367 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15368 - THREAD_FLAGS,
15369 - THREAD_ORDER));
15370 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15371 - irqctx->tinfo.cpu = cpu;
15372 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15373 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15374 -
15375 - per_cpu(hardirq_ctx, cpu) = irqctx;
15376 -
15377 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
15378 - THREAD_FLAGS,
15379 - THREAD_ORDER));
15380 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
15381 - irqctx->tinfo.cpu = cpu;
15382 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15383 -
15384 - per_cpu(softirq_ctx, cpu) = irqctx;
15385 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15386 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
15387
15388 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15389 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15390 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
15391 asmlinkage void do_softirq(void)
15392 {
15393 unsigned long flags;
15394 - struct thread_info *curctx;
15395 union irq_ctx *irqctx;
15396 u32 *isp;
15397
15398 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
15399 local_irq_save(flags);
15400
15401 if (local_softirq_pending()) {
15402 - curctx = current_thread_info();
15403 irqctx = __this_cpu_read(softirq_ctx);
15404 - irqctx->tinfo.task = curctx->task;
15405 - irqctx->tinfo.previous_esp = current_stack_pointer;
15406 + irqctx->previous_esp = current_stack_pointer;
15407
15408 /* build the stack frame on the softirq stack */
15409 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15410 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15411 +
15412 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15413 + __set_fs(MAKE_MM_SEG(0));
15414 +#endif
15415
15416 call_on_stack(__do_softirq, isp);
15417 +
15418 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15419 + __set_fs(current_thread_info()->addr_limit);
15420 +#endif
15421 +
15422 /*
15423 * Shouldn't happen, we returned above if in_interrupt():
15424 */
15425 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
15426 index 00354d4..187ae44 100644
15427 --- a/arch/x86/kernel/kgdb.c
15428 +++ b/arch/x86/kernel/kgdb.c
15429 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
15430 #ifdef CONFIG_X86_32
15431 switch (regno) {
15432 case GDB_SS:
15433 - if (!user_mode_vm(regs))
15434 + if (!user_mode(regs))
15435 *(unsigned long *)mem = __KERNEL_DS;
15436 break;
15437 case GDB_SP:
15438 - if (!user_mode_vm(regs))
15439 + if (!user_mode(regs))
15440 *(unsigned long *)mem = kernel_stack_pointer(regs);
15441 break;
15442 case GDB_GS:
15443 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
15444 case 'k':
15445 /* clear the trace bit */
15446 linux_regs->flags &= ~X86_EFLAGS_TF;
15447 - atomic_set(&kgdb_cpu_doing_single_step, -1);
15448 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15449
15450 /* set the trace bit if we're stepping */
15451 if (remcomInBuffer[0] == 's') {
15452 linux_regs->flags |= X86_EFLAGS_TF;
15453 - atomic_set(&kgdb_cpu_doing_single_step,
15454 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15455 raw_smp_processor_id());
15456 }
15457
15458 @@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
15459 return NOTIFY_DONE;
15460
15461 case DIE_DEBUG:
15462 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
15463 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
15464 if (user_mode(regs))
15465 return single_step_cont(regs, args);
15466 break;
15467 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
15468 index 794bc95..c6e29e9 100644
15469 --- a/arch/x86/kernel/kprobes.c
15470 +++ b/arch/x86/kernel/kprobes.c
15471 @@ -117,8 +117,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
15472 } __attribute__((packed)) *insn;
15473
15474 insn = (struct __arch_relative_insn *)from;
15475 +
15476 + pax_open_kernel();
15477 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
15478 insn->op = op;
15479 + pax_close_kernel();
15480 }
15481
15482 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
15483 @@ -155,7 +158,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
15484 kprobe_opcode_t opcode;
15485 kprobe_opcode_t *orig_opcodes = opcodes;
15486
15487 - if (search_exception_tables((unsigned long)opcodes))
15488 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15489 return 0; /* Page fault may occur on this address. */
15490
15491 retry:
15492 @@ -316,7 +319,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15493 }
15494 }
15495 insn_get_length(&insn);
15496 + pax_open_kernel();
15497 memcpy(dest, insn.kaddr, insn.length);
15498 + pax_close_kernel();
15499
15500 #ifdef CONFIG_X86_64
15501 if (insn_rip_relative(&insn)) {
15502 @@ -340,7 +345,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
15503 (u8 *) dest;
15504 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
15505 disp = (u8 *) dest + insn_offset_displacement(&insn);
15506 + pax_open_kernel();
15507 *(s32 *) disp = (s32) newdisp;
15508 + pax_close_kernel();
15509 }
15510 #endif
15511 return insn.length;
15512 @@ -354,12 +361,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
15513 */
15514 __copy_instruction(p->ainsn.insn, p->addr, 0);
15515
15516 - if (can_boost(p->addr))
15517 + if (can_boost(ktla_ktva(p->addr)))
15518 p->ainsn.boostable = 0;
15519 else
15520 p->ainsn.boostable = -1;
15521
15522 - p->opcode = *p->addr;
15523 + p->opcode = *(ktla_ktva(p->addr));
15524 }
15525
15526 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15527 @@ -476,7 +483,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15528 * nor set current_kprobe, because it doesn't use single
15529 * stepping.
15530 */
15531 - regs->ip = (unsigned long)p->ainsn.insn;
15532 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15533 preempt_enable_no_resched();
15534 return;
15535 }
15536 @@ -495,7 +502,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
15537 if (p->opcode == BREAKPOINT_INSTRUCTION)
15538 regs->ip = (unsigned long)p->addr;
15539 else
15540 - regs->ip = (unsigned long)p->ainsn.insn;
15541 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15542 }
15543
15544 /*
15545 @@ -574,7 +581,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
15546 setup_singlestep(p, regs, kcb, 0);
15547 return 1;
15548 }
15549 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
15550 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15551 /*
15552 * The breakpoint instruction was removed right
15553 * after we hit it. Another cpu has removed
15554 @@ -682,6 +689,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
15555 " movq %rax, 152(%rsp)\n"
15556 RESTORE_REGS_STRING
15557 " popfq\n"
15558 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
15559 + " btsq $63,(%rsp)\n"
15560 +#endif
15561 #else
15562 " pushf\n"
15563 SAVE_REGS_STRING
15564 @@ -819,7 +829,7 @@ static void __kprobes resume_execution(struct kprobe *p,
15565 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15566 {
15567 unsigned long *tos = stack_addr(regs);
15568 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15569 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15570 unsigned long orig_ip = (unsigned long)p->addr;
15571 kprobe_opcode_t *insn = p->ainsn.insn;
15572
15573 @@ -1001,7 +1011,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
15574 struct die_args *args = data;
15575 int ret = NOTIFY_DONE;
15576
15577 - if (args->regs && user_mode_vm(args->regs))
15578 + if (args->regs && user_mode(args->regs))
15579 return ret;
15580
15581 switch (val) {
15582 @@ -1383,7 +1393,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15583 * Verify if the address gap is in 2GB range, because this uses
15584 * a relative jump.
15585 */
15586 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
15587 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
15588 if (abs(rel) > 0x7fffffff)
15589 return -ERANGE;
15590
15591 @@ -1404,11 +1414,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
15592 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
15593
15594 /* Set probe function call */
15595 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
15596 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
15597
15598 /* Set returning jmp instruction at the tail of out-of-line buffer */
15599 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
15600 - (u8 *)op->kp.addr + op->optinsn.size);
15601 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
15602
15603 flush_icache_range((unsigned long) buf,
15604 (unsigned long) buf + TMPL_END_IDX +
15605 @@ -1430,7 +1440,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
15606 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
15607
15608 /* Backup instructions which will be replaced by jump address */
15609 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
15610 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
15611 RELATIVE_ADDR_SIZE);
15612
15613 insn_buf[0] = RELATIVEJUMP_OPCODE;
15614 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
15615 index a9c2116..a52d4fc 100644
15616 --- a/arch/x86/kernel/kvm.c
15617 +++ b/arch/x86/kernel/kvm.c
15618 @@ -437,6 +437,7 @@ static void __init paravirt_ops_setup(void)
15619 pv_mmu_ops.set_pud = kvm_set_pud;
15620 #if PAGETABLE_LEVELS == 4
15621 pv_mmu_ops.set_pgd = kvm_set_pgd;
15622 + pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
15623 #endif
15624 #endif
15625 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
15626 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
15627 index ea69726..604d066 100644
15628 --- a/arch/x86/kernel/ldt.c
15629 +++ b/arch/x86/kernel/ldt.c
15630 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
15631 if (reload) {
15632 #ifdef CONFIG_SMP
15633 preempt_disable();
15634 - load_LDT(pc);
15635 + load_LDT_nolock(pc);
15636 if (!cpumask_equal(mm_cpumask(current->mm),
15637 cpumask_of(smp_processor_id())))
15638 smp_call_function(flush_ldt, current->mm, 1);
15639 preempt_enable();
15640 #else
15641 - load_LDT(pc);
15642 + load_LDT_nolock(pc);
15643 #endif
15644 }
15645 if (oldsize) {
15646 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
15647 return err;
15648
15649 for (i = 0; i < old->size; i++)
15650 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15651 + write_ldt_entry(new->ldt, i, old->ldt + i);
15652 return 0;
15653 }
15654
15655 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
15656 retval = copy_ldt(&mm->context, &old_mm->context);
15657 mutex_unlock(&old_mm->context.lock);
15658 }
15659 +
15660 + if (tsk == current) {
15661 + mm->context.vdso = 0;
15662 +
15663 +#ifdef CONFIG_X86_32
15664 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15665 + mm->context.user_cs_base = 0UL;
15666 + mm->context.user_cs_limit = ~0UL;
15667 +
15668 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15669 + cpus_clear(mm->context.cpu_user_cs_mask);
15670 +#endif
15671 +
15672 +#endif
15673 +#endif
15674 +
15675 + }
15676 +
15677 return retval;
15678 }
15679
15680 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
15681 }
15682 }
15683
15684 +#ifdef CONFIG_PAX_SEGMEXEC
15685 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15686 + error = -EINVAL;
15687 + goto out_unlock;
15688 + }
15689 +#endif
15690 +
15691 fill_ldt(&ldt, &ldt_info);
15692 if (oldmode)
15693 ldt.avl = 0;
15694 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
15695 index a3fa43b..8966f4c 100644
15696 --- a/arch/x86/kernel/machine_kexec_32.c
15697 +++ b/arch/x86/kernel/machine_kexec_32.c
15698 @@ -27,7 +27,7 @@
15699 #include <asm/cacheflush.h>
15700 #include <asm/debugreg.h>
15701
15702 -static void set_idt(void *newidt, __u16 limit)
15703 +static void set_idt(struct desc_struct *newidt, __u16 limit)
15704 {
15705 struct desc_ptr curidt;
15706
15707 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
15708 }
15709
15710
15711 -static void set_gdt(void *newgdt, __u16 limit)
15712 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15713 {
15714 struct desc_ptr curgdt;
15715
15716 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15717 }
15718
15719 control_page = page_address(image->control_code_page);
15720 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15721 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15722
15723 relocate_kernel_ptr = control_page;
15724 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15725 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
15726 index 1a1b606..5c89b55 100644
15727 --- a/arch/x86/kernel/microcode_intel.c
15728 +++ b/arch/x86/kernel/microcode_intel.c
15729 @@ -440,13 +440,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
15730
15731 static int get_ucode_user(void *to, const void *from, size_t n)
15732 {
15733 - return copy_from_user(to, from, n);
15734 + return copy_from_user(to, (const void __force_user *)from, n);
15735 }
15736
15737 static enum ucode_state
15738 request_microcode_user(int cpu, const void __user *buf, size_t size)
15739 {
15740 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15741 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
15742 }
15743
15744 static void microcode_fini_cpu(int cpu)
15745 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
15746 index 925179f..85bec6c 100644
15747 --- a/arch/x86/kernel/module.c
15748 +++ b/arch/x86/kernel/module.c
15749 @@ -36,15 +36,60 @@
15750 #define DEBUGP(fmt...)
15751 #endif
15752
15753 -void *module_alloc(unsigned long size)
15754 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
15755 {
15756 if (PAGE_ALIGN(size) > MODULES_LEN)
15757 return NULL;
15758 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
15759 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
15760 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
15761 -1, __builtin_return_address(0));
15762 }
15763
15764 +void *module_alloc(unsigned long size)
15765 +{
15766 +
15767 +#ifdef CONFIG_PAX_KERNEXEC
15768 + return __module_alloc(size, PAGE_KERNEL);
15769 +#else
15770 + return __module_alloc(size, PAGE_KERNEL_EXEC);
15771 +#endif
15772 +
15773 +}
15774 +
15775 +#ifdef CONFIG_PAX_KERNEXEC
15776 +#ifdef CONFIG_X86_32
15777 +void *module_alloc_exec(unsigned long size)
15778 +{
15779 + struct vm_struct *area;
15780 +
15781 + if (size == 0)
15782 + return NULL;
15783 +
15784 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15785 + return area ? area->addr : NULL;
15786 +}
15787 +EXPORT_SYMBOL(module_alloc_exec);
15788 +
15789 +void module_free_exec(struct module *mod, void *module_region)
15790 +{
15791 + vunmap(module_region);
15792 +}
15793 +EXPORT_SYMBOL(module_free_exec);
15794 +#else
15795 +void module_free_exec(struct module *mod, void *module_region)
15796 +{
15797 + module_free(mod, module_region);
15798 +}
15799 +EXPORT_SYMBOL(module_free_exec);
15800 +
15801 +void *module_alloc_exec(unsigned long size)
15802 +{
15803 + return __module_alloc(size, PAGE_KERNEL_RX);
15804 +}
15805 +EXPORT_SYMBOL(module_alloc_exec);
15806 +#endif
15807 +#endif
15808 +
15809 #ifdef CONFIG_X86_32
15810 int apply_relocate(Elf32_Shdr *sechdrs,
15811 const char *strtab,
15812 @@ -55,14 +100,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15813 unsigned int i;
15814 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15815 Elf32_Sym *sym;
15816 - uint32_t *location;
15817 + uint32_t *plocation, location;
15818
15819 DEBUGP("Applying relocate section %u to %u\n", relsec,
15820 sechdrs[relsec].sh_info);
15821 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15822 /* This is where to make the change */
15823 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15824 - + rel[i].r_offset;
15825 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15826 + location = (uint32_t)plocation;
15827 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15828 + plocation = ktla_ktva((void *)plocation);
15829 /* This is the symbol it is referring to. Note that all
15830 undefined symbols have been resolved. */
15831 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15832 @@ -71,11 +118,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15833 switch (ELF32_R_TYPE(rel[i].r_info)) {
15834 case R_386_32:
15835 /* We add the value into the location given */
15836 - *location += sym->st_value;
15837 + pax_open_kernel();
15838 + *plocation += sym->st_value;
15839 + pax_close_kernel();
15840 break;
15841 case R_386_PC32:
15842 /* Add the value, subtract its postition */
15843 - *location += sym->st_value - (uint32_t)location;
15844 + pax_open_kernel();
15845 + *plocation += sym->st_value - location;
15846 + pax_close_kernel();
15847 break;
15848 default:
15849 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15850 @@ -120,21 +171,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
15851 case R_X86_64_NONE:
15852 break;
15853 case R_X86_64_64:
15854 + pax_open_kernel();
15855 *(u64 *)loc = val;
15856 + pax_close_kernel();
15857 break;
15858 case R_X86_64_32:
15859 + pax_open_kernel();
15860 *(u32 *)loc = val;
15861 + pax_close_kernel();
15862 if (val != *(u32 *)loc)
15863 goto overflow;
15864 break;
15865 case R_X86_64_32S:
15866 + pax_open_kernel();
15867 *(s32 *)loc = val;
15868 + pax_close_kernel();
15869 if ((s64)val != *(s32 *)loc)
15870 goto overflow;
15871 break;
15872 case R_X86_64_PC32:
15873 val -= (u64)loc;
15874 + pax_open_kernel();
15875 *(u32 *)loc = val;
15876 + pax_close_kernel();
15877 +
15878 #if 0
15879 if ((s64)val != *(s32 *)loc)
15880 goto overflow;
15881 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
15882 index 676b8c7..870ba04 100644
15883 --- a/arch/x86/kernel/paravirt-spinlocks.c
15884 +++ b/arch/x86/kernel/paravirt-spinlocks.c
15885 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
15886 arch_spin_lock(lock);
15887 }
15888
15889 -struct pv_lock_ops pv_lock_ops = {
15890 +struct pv_lock_ops pv_lock_ops __read_only = {
15891 #ifdef CONFIG_SMP
15892 .spin_is_locked = __ticket_spin_is_locked,
15893 .spin_is_contended = __ticket_spin_is_contended,
15894 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
15895 index d90272e..2d54e8e 100644
15896 --- a/arch/x86/kernel/paravirt.c
15897 +++ b/arch/x86/kernel/paravirt.c
15898 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
15899 {
15900 return x;
15901 }
15902 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15903 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
15904 +#endif
15905
15906 void __init default_banner(void)
15907 {
15908 @@ -133,6 +136,9 @@ static void *get_call_destination(u8 type)
15909 .pv_lock_ops = pv_lock_ops,
15910 #endif
15911 };
15912 +
15913 + pax_track_stack();
15914 +
15915 return *((void **)&tmpl + type);
15916 }
15917
15918 @@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
15919 if (opfunc == NULL)
15920 /* If there's no function, patch it with a ud2a (BUG) */
15921 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15922 - else if (opfunc == _paravirt_nop)
15923 + else if (opfunc == (void *)_paravirt_nop)
15924 /* If the operation is a nop, then nop the callsite */
15925 ret = paravirt_patch_nop();
15926
15927 /* identity functions just return their single argument */
15928 - else if (opfunc == _paravirt_ident_32)
15929 + else if (opfunc == (void *)_paravirt_ident_32)
15930 ret = paravirt_patch_ident_32(insnbuf, len);
15931 - else if (opfunc == _paravirt_ident_64)
15932 + else if (opfunc == (void *)_paravirt_ident_64)
15933 ret = paravirt_patch_ident_64(insnbuf, len);
15934 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15935 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
15936 + ret = paravirt_patch_ident_64(insnbuf, len);
15937 +#endif
15938
15939 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15940 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
15941 @@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
15942 if (insn_len > len || start == NULL)
15943 insn_len = len;
15944 else
15945 - memcpy(insnbuf, start, insn_len);
15946 + memcpy(insnbuf, ktla_ktva(start), insn_len);
15947
15948 return insn_len;
15949 }
15950 @@ -302,7 +312,7 @@ void arch_flush_lazy_mmu_mode(void)
15951 preempt_enable();
15952 }
15953
15954 -struct pv_info pv_info = {
15955 +struct pv_info pv_info __read_only = {
15956 .name = "bare hardware",
15957 .paravirt_enabled = 0,
15958 .kernel_rpl = 0,
15959 @@ -313,16 +323,16 @@ struct pv_info pv_info = {
15960 #endif
15961 };
15962
15963 -struct pv_init_ops pv_init_ops = {
15964 +struct pv_init_ops pv_init_ops __read_only = {
15965 .patch = native_patch,
15966 };
15967
15968 -struct pv_time_ops pv_time_ops = {
15969 +struct pv_time_ops pv_time_ops __read_only = {
15970 .sched_clock = native_sched_clock,
15971 .steal_clock = native_steal_clock,
15972 };
15973
15974 -struct pv_irq_ops pv_irq_ops = {
15975 +struct pv_irq_ops pv_irq_ops __read_only = {
15976 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
15977 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
15978 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
15979 @@ -334,7 +344,7 @@ struct pv_irq_ops pv_irq_ops = {
15980 #endif
15981 };
15982
15983 -struct pv_cpu_ops pv_cpu_ops = {
15984 +struct pv_cpu_ops pv_cpu_ops __read_only = {
15985 .cpuid = native_cpuid,
15986 .get_debugreg = native_get_debugreg,
15987 .set_debugreg = native_set_debugreg,
15988 @@ -395,21 +405,26 @@ struct pv_cpu_ops pv_cpu_ops = {
15989 .end_context_switch = paravirt_nop,
15990 };
15991
15992 -struct pv_apic_ops pv_apic_ops = {
15993 +struct pv_apic_ops pv_apic_ops __read_only = {
15994 #ifdef CONFIG_X86_LOCAL_APIC
15995 .startup_ipi_hook = paravirt_nop,
15996 #endif
15997 };
15998
15999 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
16000 +#ifdef CONFIG_X86_32
16001 +#ifdef CONFIG_X86_PAE
16002 +/* 64-bit pagetable entries */
16003 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
16004 +#else
16005 /* 32-bit pagetable entries */
16006 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
16007 +#endif
16008 #else
16009 /* 64-bit pagetable entries */
16010 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
16011 #endif
16012
16013 -struct pv_mmu_ops pv_mmu_ops = {
16014 +struct pv_mmu_ops pv_mmu_ops __read_only = {
16015
16016 .read_cr2 = native_read_cr2,
16017 .write_cr2 = native_write_cr2,
16018 @@ -459,6 +474,7 @@ struct pv_mmu_ops pv_mmu_ops = {
16019 .make_pud = PTE_IDENT,
16020
16021 .set_pgd = native_set_pgd,
16022 + .set_pgd_batched = native_set_pgd_batched,
16023 #endif
16024 #endif /* PAGETABLE_LEVELS >= 3 */
16025
16026 @@ -478,6 +494,12 @@ struct pv_mmu_ops pv_mmu_ops = {
16027 },
16028
16029 .set_fixmap = native_set_fixmap,
16030 +
16031 +#ifdef CONFIG_PAX_KERNEXEC
16032 + .pax_open_kernel = native_pax_open_kernel,
16033 + .pax_close_kernel = native_pax_close_kernel,
16034 +#endif
16035 +
16036 };
16037
16038 EXPORT_SYMBOL_GPL(pv_time_ops);
16039 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
16040 index 35ccf75..67e7d4d 100644
16041 --- a/arch/x86/kernel/pci-iommu_table.c
16042 +++ b/arch/x86/kernel/pci-iommu_table.c
16043 @@ -2,7 +2,7 @@
16044 #include <asm/iommu_table.h>
16045 #include <linux/string.h>
16046 #include <linux/kallsyms.h>
16047 -
16048 +#include <linux/sched.h>
16049
16050 #define DEBUG 1
16051
16052 @@ -51,6 +51,8 @@ void __init check_iommu_entries(struct iommu_table_entry *start,
16053 {
16054 struct iommu_table_entry *p, *q, *x;
16055
16056 + pax_track_stack();
16057 +
16058 /* Simple cyclic dependency checker. */
16059 for (p = start; p < finish; p++) {
16060 q = find_dependents_of(start, finish, p);
16061 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
16062 index e7e3b01..43c5af3 100644
16063 --- a/arch/x86/kernel/process.c
16064 +++ b/arch/x86/kernel/process.c
16065 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
16066
16067 void free_thread_info(struct thread_info *ti)
16068 {
16069 - free_thread_xstate(ti->task);
16070 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
16071 }
16072
16073 +static struct kmem_cache *task_struct_cachep;
16074 +
16075 void arch_task_cache_init(void)
16076 {
16077 - task_xstate_cachep =
16078 - kmem_cache_create("task_xstate", xstate_size,
16079 + /* create a slab on which task_structs can be allocated */
16080 + task_struct_cachep =
16081 + kmem_cache_create("task_struct", sizeof(struct task_struct),
16082 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16083 +
16084 + task_xstate_cachep =
16085 + kmem_cache_create("task_xstate", xstate_size,
16086 __alignof__(union thread_xstate),
16087 - SLAB_PANIC | SLAB_NOTRACK, NULL);
16088 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16089 +}
16090 +
16091 +struct task_struct *alloc_task_struct_node(int node)
16092 +{
16093 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
16094 +}
16095 +
16096 +void free_task_struct(struct task_struct *task)
16097 +{
16098 + free_thread_xstate(task);
16099 + kmem_cache_free(task_struct_cachep, task);
16100 }
16101
16102 /*
16103 @@ -70,7 +87,7 @@ void exit_thread(void)
16104 unsigned long *bp = t->io_bitmap_ptr;
16105
16106 if (bp) {
16107 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16108 + struct tss_struct *tss = init_tss + get_cpu();
16109
16110 t->io_bitmap_ptr = NULL;
16111 clear_thread_flag(TIF_IO_BITMAP);
16112 @@ -106,7 +123,7 @@ void show_regs_common(void)
16113
16114 printk(KERN_CONT "\n");
16115 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
16116 - current->pid, current->comm, print_tainted(),
16117 + task_pid_nr(current), current->comm, print_tainted(),
16118 init_utsname()->release,
16119 (int)strcspn(init_utsname()->version, " "),
16120 init_utsname()->version);
16121 @@ -120,6 +137,9 @@ void flush_thread(void)
16122 {
16123 struct task_struct *tsk = current;
16124
16125 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16126 + loadsegment(gs, 0);
16127 +#endif
16128 flush_ptrace_hw_breakpoint(tsk);
16129 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
16130 /*
16131 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
16132 regs.di = (unsigned long) arg;
16133
16134 #ifdef CONFIG_X86_32
16135 - regs.ds = __USER_DS;
16136 - regs.es = __USER_DS;
16137 + regs.ds = __KERNEL_DS;
16138 + regs.es = __KERNEL_DS;
16139 regs.fs = __KERNEL_PERCPU;
16140 - regs.gs = __KERNEL_STACK_CANARY;
16141 + savesegment(gs, regs.gs);
16142 #else
16143 regs.ss = __KERNEL_DS;
16144 #endif
16145 @@ -403,7 +423,7 @@ void default_idle(void)
16146 EXPORT_SYMBOL(default_idle);
16147 #endif
16148
16149 -void stop_this_cpu(void *dummy)
16150 +__noreturn void stop_this_cpu(void *dummy)
16151 {
16152 local_irq_disable();
16153 /*
16154 @@ -645,16 +665,37 @@ static int __init idle_setup(char *str)
16155 }
16156 early_param("idle", idle_setup);
16157
16158 -unsigned long arch_align_stack(unsigned long sp)
16159 +#ifdef CONFIG_PAX_RANDKSTACK
16160 +void pax_randomize_kstack(struct pt_regs *regs)
16161 {
16162 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16163 - sp -= get_random_int() % 8192;
16164 - return sp & ~0xf;
16165 -}
16166 + struct thread_struct *thread = &current->thread;
16167 + unsigned long time;
16168
16169 -unsigned long arch_randomize_brk(struct mm_struct *mm)
16170 -{
16171 - unsigned long range_end = mm->brk + 0x02000000;
16172 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16173 -}
16174 + if (!randomize_va_space)
16175 + return;
16176 +
16177 + if (v8086_mode(regs))
16178 + return;
16179
16180 + rdtscl(time);
16181 +
16182 + /* P4 seems to return a 0 LSB, ignore it */
16183 +#ifdef CONFIG_MPENTIUM4
16184 + time &= 0x3EUL;
16185 + time <<= 2;
16186 +#elif defined(CONFIG_X86_64)
16187 + time &= 0xFUL;
16188 + time <<= 4;
16189 +#else
16190 + time &= 0x1FUL;
16191 + time <<= 3;
16192 +#endif
16193 +
16194 + thread->sp0 ^= time;
16195 + load_sp0(init_tss + smp_processor_id(), thread);
16196 +
16197 +#ifdef CONFIG_X86_64
16198 + percpu_write(kernel_stack, thread->sp0);
16199 +#endif
16200 +}
16201 +#endif
16202 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
16203 index 7a3b651..5a946f6 100644
16204 --- a/arch/x86/kernel/process_32.c
16205 +++ b/arch/x86/kernel/process_32.c
16206 @@ -66,6 +66,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
16207 unsigned long thread_saved_pc(struct task_struct *tsk)
16208 {
16209 return ((unsigned long *)tsk->thread.sp)[3];
16210 +//XXX return tsk->thread.eip;
16211 }
16212
16213 #ifndef CONFIG_SMP
16214 @@ -128,15 +129,14 @@ void __show_regs(struct pt_regs *regs, int all)
16215 unsigned long sp;
16216 unsigned short ss, gs;
16217
16218 - if (user_mode_vm(regs)) {
16219 + if (user_mode(regs)) {
16220 sp = regs->sp;
16221 ss = regs->ss & 0xffff;
16222 - gs = get_user_gs(regs);
16223 } else {
16224 sp = kernel_stack_pointer(regs);
16225 savesegment(ss, ss);
16226 - savesegment(gs, gs);
16227 }
16228 + gs = get_user_gs(regs);
16229
16230 show_regs_common();
16231
16232 @@ -198,13 +198,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16233 struct task_struct *tsk;
16234 int err;
16235
16236 - childregs = task_pt_regs(p);
16237 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16238 *childregs = *regs;
16239 childregs->ax = 0;
16240 childregs->sp = sp;
16241
16242 p->thread.sp = (unsigned long) childregs;
16243 p->thread.sp0 = (unsigned long) (childregs+1);
16244 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16245
16246 p->thread.ip = (unsigned long) ret_from_fork;
16247
16248 @@ -294,7 +295,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16249 struct thread_struct *prev = &prev_p->thread,
16250 *next = &next_p->thread;
16251 int cpu = smp_processor_id();
16252 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16253 + struct tss_struct *tss = init_tss + cpu;
16254 bool preload_fpu;
16255
16256 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16257 @@ -329,6 +330,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16258 */
16259 lazy_save_gs(prev->gs);
16260
16261 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16262 + __set_fs(task_thread_info(next_p)->addr_limit);
16263 +#endif
16264 +
16265 /*
16266 * Load the per-thread Thread-Local Storage descriptor.
16267 */
16268 @@ -364,6 +369,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16269 */
16270 arch_end_context_switch(next_p);
16271
16272 + percpu_write(current_task, next_p);
16273 + percpu_write(current_tinfo, &next_p->tinfo);
16274 +
16275 if (preload_fpu)
16276 __math_state_restore();
16277
16278 @@ -373,8 +381,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16279 if (prev->gs | next->gs)
16280 lazy_load_gs(next->gs);
16281
16282 - percpu_write(current_task, next_p);
16283 -
16284 return prev_p;
16285 }
16286
16287 @@ -404,4 +410,3 @@ unsigned long get_wchan(struct task_struct *p)
16288 } while (count++ < 16);
16289 return 0;
16290 }
16291 -
16292 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
16293 index f693e44..3c979b2 100644
16294 --- a/arch/x86/kernel/process_64.c
16295 +++ b/arch/x86/kernel/process_64.c
16296 @@ -88,7 +88,7 @@ static void __exit_idle(void)
16297 void exit_idle(void)
16298 {
16299 /* idle loop has pid 0 */
16300 - if (current->pid)
16301 + if (task_pid_nr(current))
16302 return;
16303 __exit_idle();
16304 }
16305 @@ -262,8 +262,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16306 struct pt_regs *childregs;
16307 struct task_struct *me = current;
16308
16309 - childregs = ((struct pt_regs *)
16310 - (THREAD_SIZE + task_stack_page(p))) - 1;
16311 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16312 *childregs = *regs;
16313
16314 childregs->ax = 0;
16315 @@ -275,6 +274,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
16316 p->thread.sp = (unsigned long) childregs;
16317 p->thread.sp0 = (unsigned long) (childregs+1);
16318 p->thread.usersp = me->thread.usersp;
16319 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16320
16321 set_tsk_thread_flag(p, TIF_FORK);
16322
16323 @@ -377,7 +377,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16324 struct thread_struct *prev = &prev_p->thread;
16325 struct thread_struct *next = &next_p->thread;
16326 int cpu = smp_processor_id();
16327 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
16328 + struct tss_struct *tss = init_tss + cpu;
16329 unsigned fsindex, gsindex;
16330 bool preload_fpu;
16331
16332 @@ -473,10 +473,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
16333 prev->usersp = percpu_read(old_rsp);
16334 percpu_write(old_rsp, next->usersp);
16335 percpu_write(current_task, next_p);
16336 + percpu_write(current_tinfo, &next_p->tinfo);
16337
16338 - percpu_write(kernel_stack,
16339 - (unsigned long)task_stack_page(next_p) +
16340 - THREAD_SIZE - KERNEL_STACK_OFFSET);
16341 + percpu_write(kernel_stack, next->sp0);
16342
16343 /*
16344 * Now maybe reload the debug registers and handle I/O bitmaps
16345 @@ -538,12 +537,11 @@ unsigned long get_wchan(struct task_struct *p)
16346 if (!p || p == current || p->state == TASK_RUNNING)
16347 return 0;
16348 stack = (unsigned long)task_stack_page(p);
16349 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16350 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16351 return 0;
16352 fp = *(u64 *)(p->thread.sp);
16353 do {
16354 - if (fp < (unsigned long)stack ||
16355 - fp >= (unsigned long)stack+THREAD_SIZE)
16356 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16357 return 0;
16358 ip = *(u64 *)(fp+8);
16359 if (!in_sched_functions(ip))
16360 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
16361 index 8252879..d3219e0 100644
16362 --- a/arch/x86/kernel/ptrace.c
16363 +++ b/arch/x86/kernel/ptrace.c
16364 @@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request,
16365 unsigned long addr, unsigned long data)
16366 {
16367 int ret;
16368 - unsigned long __user *datap = (unsigned long __user *)data;
16369 + unsigned long __user *datap = (__force unsigned long __user *)data;
16370
16371 switch (request) {
16372 /* read the word at location addr in the USER area. */
16373 @@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request,
16374 if ((int) addr < 0)
16375 return -EIO;
16376 ret = do_get_thread_area(child, addr,
16377 - (struct user_desc __user *)data);
16378 + (__force struct user_desc __user *) data);
16379 break;
16380
16381 case PTRACE_SET_THREAD_AREA:
16382 if ((int) addr < 0)
16383 return -EIO;
16384 ret = do_set_thread_area(child, addr,
16385 - (struct user_desc __user *)data, 0);
16386 + (__force struct user_desc __user *) data, 0);
16387 break;
16388 #endif
16389
16390 @@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
16391 memset(info, 0, sizeof(*info));
16392 info->si_signo = SIGTRAP;
16393 info->si_code = si_code;
16394 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
16395 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
16396 }
16397
16398 void user_single_step_siginfo(struct task_struct *tsk,
16399 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
16400 index 42eb330..139955c 100644
16401 --- a/arch/x86/kernel/pvclock.c
16402 +++ b/arch/x86/kernel/pvclock.c
16403 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
16404 return pv_tsc_khz;
16405 }
16406
16407 -static atomic64_t last_value = ATOMIC64_INIT(0);
16408 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
16409
16410 void pvclock_resume(void)
16411 {
16412 - atomic64_set(&last_value, 0);
16413 + atomic64_set_unchecked(&last_value, 0);
16414 }
16415
16416 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16417 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
16418 * updating at the same time, and one of them could be slightly behind,
16419 * making the assumption that last_value always go forward fail to hold.
16420 */
16421 - last = atomic64_read(&last_value);
16422 + last = atomic64_read_unchecked(&last_value);
16423 do {
16424 if (ret < last)
16425 return last;
16426 - last = atomic64_cmpxchg(&last_value, last, ret);
16427 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
16428 } while (unlikely(last != ret));
16429
16430 return ret;
16431 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
16432 index 9242436..753954d 100644
16433 --- a/arch/x86/kernel/reboot.c
16434 +++ b/arch/x86/kernel/reboot.c
16435 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
16436 EXPORT_SYMBOL(pm_power_off);
16437
16438 static const struct desc_ptr no_idt = {};
16439 -static int reboot_mode;
16440 +static unsigned short reboot_mode;
16441 enum reboot_type reboot_type = BOOT_ACPI;
16442 int reboot_force;
16443
16444 @@ -315,13 +315,17 @@ core_initcall(reboot_init);
16445 extern const unsigned char machine_real_restart_asm[];
16446 extern const u64 machine_real_restart_gdt[3];
16447
16448 -void machine_real_restart(unsigned int type)
16449 +__noreturn void machine_real_restart(unsigned int type)
16450 {
16451 void *restart_va;
16452 unsigned long restart_pa;
16453 - void (*restart_lowmem)(unsigned int);
16454 + void (* __noreturn restart_lowmem)(unsigned int);
16455 u64 *lowmem_gdt;
16456
16457 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16458 + struct desc_struct *gdt;
16459 +#endif
16460 +
16461 local_irq_disable();
16462
16463 /* Write zero to CMOS register number 0x0f, which the BIOS POST
16464 @@ -347,14 +351,14 @@ void machine_real_restart(unsigned int type)
16465 boot)". This seems like a fairly standard thing that gets set by
16466 REBOOT.COM programs, and the previous reset routine did this
16467 too. */
16468 - *((unsigned short *)0x472) = reboot_mode;
16469 + *(unsigned short *)(__va(0x472)) = reboot_mode;
16470
16471 /* Patch the GDT in the low memory trampoline */
16472 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
16473
16474 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
16475 restart_pa = virt_to_phys(restart_va);
16476 - restart_lowmem = (void (*)(unsigned int))restart_pa;
16477 + restart_lowmem = (void *)restart_pa;
16478
16479 /* GDT[0]: GDT self-pointer */
16480 lowmem_gdt[0] =
16481 @@ -365,7 +369,33 @@ void machine_real_restart(unsigned int type)
16482 GDT_ENTRY(0x009b, restart_pa, 0xffff);
16483
16484 /* Jump to the identity-mapped low memory code */
16485 +
16486 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16487 + gdt = get_cpu_gdt_table(smp_processor_id());
16488 + pax_open_kernel();
16489 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16490 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
16491 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
16492 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
16493 +#endif
16494 +#ifdef CONFIG_PAX_KERNEXEC
16495 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
16496 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
16497 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
16498 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
16499 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
16500 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
16501 +#endif
16502 + pax_close_kernel();
16503 +#endif
16504 +
16505 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16506 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
16507 + unreachable();
16508 +#else
16509 restart_lowmem(type);
16510 +#endif
16511 +
16512 }
16513 #ifdef CONFIG_APM_MODULE
16514 EXPORT_SYMBOL(machine_real_restart);
16515 @@ -523,7 +553,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
16516 * try to force a triple fault and then cycle between hitting the keyboard
16517 * controller and doing that
16518 */
16519 -static void native_machine_emergency_restart(void)
16520 +__noreturn static void native_machine_emergency_restart(void)
16521 {
16522 int i;
16523 int attempt = 0;
16524 @@ -647,13 +677,13 @@ void native_machine_shutdown(void)
16525 #endif
16526 }
16527
16528 -static void __machine_emergency_restart(int emergency)
16529 +static __noreturn void __machine_emergency_restart(int emergency)
16530 {
16531 reboot_emergency = emergency;
16532 machine_ops.emergency_restart();
16533 }
16534
16535 -static void native_machine_restart(char *__unused)
16536 +static __noreturn void native_machine_restart(char *__unused)
16537 {
16538 printk("machine restart\n");
16539
16540 @@ -662,7 +692,7 @@ static void native_machine_restart(char *__unused)
16541 __machine_emergency_restart(0);
16542 }
16543
16544 -static void native_machine_halt(void)
16545 +static __noreturn void native_machine_halt(void)
16546 {
16547 /* stop other cpus and apics */
16548 machine_shutdown();
16549 @@ -673,7 +703,7 @@ static void native_machine_halt(void)
16550 stop_this_cpu(NULL);
16551 }
16552
16553 -static void native_machine_power_off(void)
16554 +__noreturn static void native_machine_power_off(void)
16555 {
16556 if (pm_power_off) {
16557 if (!reboot_force)
16558 @@ -682,6 +712,7 @@ static void native_machine_power_off(void)
16559 }
16560 /* a fallback in case there is no PM info available */
16561 tboot_shutdown(TB_SHUTDOWN_HALT);
16562 + unreachable();
16563 }
16564
16565 struct machine_ops machine_ops = {
16566 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
16567 index 7a6f3b3..bed145d7 100644
16568 --- a/arch/x86/kernel/relocate_kernel_64.S
16569 +++ b/arch/x86/kernel/relocate_kernel_64.S
16570 @@ -11,6 +11,7 @@
16571 #include <asm/kexec.h>
16572 #include <asm/processor-flags.h>
16573 #include <asm/pgtable_types.h>
16574 +#include <asm/alternative-asm.h>
16575
16576 /*
16577 * Must be relocatable PIC code callable as a C function
16578 @@ -160,13 +161,14 @@ identity_mapped:
16579 xorq %rbp, %rbp
16580 xorq %r8, %r8
16581 xorq %r9, %r9
16582 - xorq %r10, %r9
16583 + xorq %r10, %r10
16584 xorq %r11, %r11
16585 xorq %r12, %r12
16586 xorq %r13, %r13
16587 xorq %r14, %r14
16588 xorq %r15, %r15
16589
16590 + pax_force_retaddr 0, 1
16591 ret
16592
16593 1:
16594 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
16595 index afaf384..1a101fe 100644
16596 --- a/arch/x86/kernel/setup.c
16597 +++ b/arch/x86/kernel/setup.c
16598 @@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
16599
16600 switch (data->type) {
16601 case SETUP_E820_EXT:
16602 - parse_e820_ext(data);
16603 + parse_e820_ext((struct setup_data __force_kernel *)data);
16604 break;
16605 case SETUP_DTB:
16606 add_dtb(pa_data);
16607 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
16608 * area (640->1Mb) as ram even though it is not.
16609 * take them out.
16610 */
16611 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
16612 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
16613 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
16614 }
16615
16616 @@ -773,14 +773,14 @@ void __init setup_arch(char **cmdline_p)
16617
16618 if (!boot_params.hdr.root_flags)
16619 root_mountflags &= ~MS_RDONLY;
16620 - init_mm.start_code = (unsigned long) _text;
16621 - init_mm.end_code = (unsigned long) _etext;
16622 + init_mm.start_code = ktla_ktva((unsigned long) _text);
16623 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
16624 init_mm.end_data = (unsigned long) _edata;
16625 init_mm.brk = _brk_end;
16626
16627 - code_resource.start = virt_to_phys(_text);
16628 - code_resource.end = virt_to_phys(_etext)-1;
16629 - data_resource.start = virt_to_phys(_etext);
16630 + code_resource.start = virt_to_phys(ktla_ktva(_text));
16631 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16632 + data_resource.start = virt_to_phys(_sdata);
16633 data_resource.end = virt_to_phys(_edata)-1;
16634 bss_resource.start = virt_to_phys(&__bss_start);
16635 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16636 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
16637 index 71f4727..16dc9f7 100644
16638 --- a/arch/x86/kernel/setup_percpu.c
16639 +++ b/arch/x86/kernel/setup_percpu.c
16640 @@ -21,19 +21,17 @@
16641 #include <asm/cpu.h>
16642 #include <asm/stackprotector.h>
16643
16644 -DEFINE_PER_CPU(int, cpu_number);
16645 +#ifdef CONFIG_SMP
16646 +DEFINE_PER_CPU(unsigned int, cpu_number);
16647 EXPORT_PER_CPU_SYMBOL(cpu_number);
16648 +#endif
16649
16650 -#ifdef CONFIG_X86_64
16651 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16652 -#else
16653 -#define BOOT_PERCPU_OFFSET 0
16654 -#endif
16655
16656 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16657 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16658
16659 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16660 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16661 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16662 };
16663 EXPORT_SYMBOL(__per_cpu_offset);
16664 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
16665 {
16666 #ifdef CONFIG_X86_32
16667 struct desc_struct gdt;
16668 + unsigned long base = per_cpu_offset(cpu);
16669
16670 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16671 - 0x2 | DESCTYPE_S, 0x8);
16672 - gdt.s = 1;
16673 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16674 + 0x83 | DESCTYPE_S, 0xC);
16675 write_gdt_entry(get_cpu_gdt_table(cpu),
16676 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16677 #endif
16678 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
16679 /* alrighty, percpu areas up and running */
16680 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16681 for_each_possible_cpu(cpu) {
16682 +#ifdef CONFIG_CC_STACKPROTECTOR
16683 +#ifdef CONFIG_X86_32
16684 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
16685 +#endif
16686 +#endif
16687 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16688 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16689 per_cpu(cpu_number, cpu) = cpu;
16690 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
16691 */
16692 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
16693 #endif
16694 +#ifdef CONFIG_CC_STACKPROTECTOR
16695 +#ifdef CONFIG_X86_32
16696 + if (!cpu)
16697 + per_cpu(stack_canary.canary, cpu) = canary;
16698 +#endif
16699 +#endif
16700 /*
16701 * Up to this point, the boot CPU has been using .init.data
16702 * area. Reload any changed state for the boot CPU.
16703 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
16704 index 54ddaeb2..a6aa4d2 100644
16705 --- a/arch/x86/kernel/signal.c
16706 +++ b/arch/x86/kernel/signal.c
16707 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
16708 * Align the stack pointer according to the i386 ABI,
16709 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16710 */
16711 - sp = ((sp + 4) & -16ul) - 4;
16712 + sp = ((sp - 12) & -16ul) - 4;
16713 #else /* !CONFIG_X86_32 */
16714 sp = round_down(sp, 16) - 8;
16715 #endif
16716 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
16717 * Return an always-bogus address instead so we will die with SIGSEGV.
16718 */
16719 if (onsigstack && !likely(on_sig_stack(sp)))
16720 - return (void __user *)-1L;
16721 + return (__force void __user *)-1L;
16722
16723 /* save i387 state */
16724 if (used_math() && save_i387_xstate(*fpstate) < 0)
16725 - return (void __user *)-1L;
16726 + return (__force void __user *)-1L;
16727
16728 return (void __user *)sp;
16729 }
16730 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16731 }
16732
16733 if (current->mm->context.vdso)
16734 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16735 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16736 else
16737 - restorer = &frame->retcode;
16738 + restorer = (void __user *)&frame->retcode;
16739 if (ka->sa.sa_flags & SA_RESTORER)
16740 restorer = ka->sa.sa_restorer;
16741
16742 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
16743 * reasons and because gdb uses it as a signature to notice
16744 * signal handler stack frames.
16745 */
16746 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16747 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16748
16749 if (err)
16750 return -EFAULT;
16751 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16752 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16753
16754 /* Set up to return from userspace. */
16755 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16756 + if (current->mm->context.vdso)
16757 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16758 + else
16759 + restorer = (void __user *)&frame->retcode;
16760 if (ka->sa.sa_flags & SA_RESTORER)
16761 restorer = ka->sa.sa_restorer;
16762 put_user_ex(restorer, &frame->pretcode);
16763 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
16764 * reasons and because gdb uses it as a signature to notice
16765 * signal handler stack frames.
16766 */
16767 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16768 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16769 } put_user_catch(err);
16770
16771 if (err)
16772 @@ -762,6 +765,8 @@ static void do_signal(struct pt_regs *regs)
16773 siginfo_t info;
16774 int signr;
16775
16776 + pax_track_stack();
16777 +
16778 /*
16779 * We want the common case to go fast, which is why we may in certain
16780 * cases get here from kernel mode. Just return without doing anything
16781 @@ -769,7 +774,7 @@ static void do_signal(struct pt_regs *regs)
16782 * X86_32: vm86 regs switched out by assembly code before reaching
16783 * here, so testing against kernel CS suffices.
16784 */
16785 - if (!user_mode(regs))
16786 + if (!user_mode_novm(regs))
16787 return;
16788
16789 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
16790 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
16791 index 9f548cb..caf76f7 100644
16792 --- a/arch/x86/kernel/smpboot.c
16793 +++ b/arch/x86/kernel/smpboot.c
16794 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
16795 set_idle_for_cpu(cpu, c_idle.idle);
16796 do_rest:
16797 per_cpu(current_task, cpu) = c_idle.idle;
16798 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16799 #ifdef CONFIG_X86_32
16800 /* Stack for startup_32 can be just as for start_secondary onwards */
16801 irq_ctx_init(cpu);
16802 #else
16803 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16804 initial_gs = per_cpu_offset(cpu);
16805 - per_cpu(kernel_stack, cpu) =
16806 - (unsigned long)task_stack_page(c_idle.idle) -
16807 - KERNEL_STACK_OFFSET + THREAD_SIZE;
16808 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16809 #endif
16810 +
16811 + pax_open_kernel();
16812 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16813 + pax_close_kernel();
16814 +
16815 initial_code = (unsigned long)start_secondary;
16816 stack_start = c_idle.idle->thread.sp;
16817
16818 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
16819
16820 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16821
16822 +#ifdef CONFIG_PAX_PER_CPU_PGD
16823 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16824 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16825 + KERNEL_PGD_PTRS);
16826 +#endif
16827 +
16828 err = do_boot_cpu(apicid, cpu);
16829 if (err) {
16830 pr_debug("do_boot_cpu failed %d\n", err);
16831 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
16832 index c346d11..d43b163 100644
16833 --- a/arch/x86/kernel/step.c
16834 +++ b/arch/x86/kernel/step.c
16835 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
16836 struct desc_struct *desc;
16837 unsigned long base;
16838
16839 - seg &= ~7UL;
16840 + seg >>= 3;
16841
16842 mutex_lock(&child->mm->context.lock);
16843 - if (unlikely((seg >> 3) >= child->mm->context.size))
16844 + if (unlikely(seg >= child->mm->context.size))
16845 addr = -1L; /* bogus selector, access would fault */
16846 else {
16847 desc = child->mm->context.ldt + seg;
16848 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
16849 addr += base;
16850 }
16851 mutex_unlock(&child->mm->context.lock);
16852 - }
16853 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16854 + addr = ktla_ktva(addr);
16855
16856 return addr;
16857 }
16858 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
16859 unsigned char opcode[15];
16860 unsigned long addr = convert_ip_to_linear(child, regs);
16861
16862 + if (addr == -EINVAL)
16863 + return 0;
16864 +
16865 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16866 for (i = 0; i < copied; i++) {
16867 switch (opcode[i]) {
16868 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
16869 index 0b0cb5f..db6b9ed 100644
16870 --- a/arch/x86/kernel/sys_i386_32.c
16871 +++ b/arch/x86/kernel/sys_i386_32.c
16872 @@ -24,17 +24,224 @@
16873
16874 #include <asm/syscalls.h>
16875
16876 -/*
16877 - * Do a system call from kernel instead of calling sys_execve so we
16878 - * end up with proper pt_regs.
16879 - */
16880 -int kernel_execve(const char *filename,
16881 - const char *const argv[],
16882 - const char *const envp[])
16883 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16884 +{
16885 + unsigned long pax_task_size = TASK_SIZE;
16886 +
16887 +#ifdef CONFIG_PAX_SEGMEXEC
16888 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16889 + pax_task_size = SEGMEXEC_TASK_SIZE;
16890 +#endif
16891 +
16892 + if (len > pax_task_size || addr > pax_task_size - len)
16893 + return -EINVAL;
16894 +
16895 + return 0;
16896 +}
16897 +
16898 +unsigned long
16899 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
16900 + unsigned long len, unsigned long pgoff, unsigned long flags)
16901 +{
16902 + struct mm_struct *mm = current->mm;
16903 + struct vm_area_struct *vma;
16904 + unsigned long start_addr, pax_task_size = TASK_SIZE;
16905 +
16906 +#ifdef CONFIG_PAX_SEGMEXEC
16907 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16908 + pax_task_size = SEGMEXEC_TASK_SIZE;
16909 +#endif
16910 +
16911 + pax_task_size -= PAGE_SIZE;
16912 +
16913 + if (len > pax_task_size)
16914 + return -ENOMEM;
16915 +
16916 + if (flags & MAP_FIXED)
16917 + return addr;
16918 +
16919 +#ifdef CONFIG_PAX_RANDMMAP
16920 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16921 +#endif
16922 +
16923 + if (addr) {
16924 + addr = PAGE_ALIGN(addr);
16925 + if (pax_task_size - len >= addr) {
16926 + vma = find_vma(mm, addr);
16927 + if (check_heap_stack_gap(vma, addr, len))
16928 + return addr;
16929 + }
16930 + }
16931 + if (len > mm->cached_hole_size) {
16932 + start_addr = addr = mm->free_area_cache;
16933 + } else {
16934 + start_addr = addr = mm->mmap_base;
16935 + mm->cached_hole_size = 0;
16936 + }
16937 +
16938 +#ifdef CONFIG_PAX_PAGEEXEC
16939 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
16940 + start_addr = 0x00110000UL;
16941 +
16942 +#ifdef CONFIG_PAX_RANDMMAP
16943 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16944 + start_addr += mm->delta_mmap & 0x03FFF000UL;
16945 +#endif
16946 +
16947 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
16948 + start_addr = addr = mm->mmap_base;
16949 + else
16950 + addr = start_addr;
16951 + }
16952 +#endif
16953 +
16954 +full_search:
16955 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
16956 + /* At this point: (!vma || addr < vma->vm_end). */
16957 + if (pax_task_size - len < addr) {
16958 + /*
16959 + * Start a new search - just in case we missed
16960 + * some holes.
16961 + */
16962 + if (start_addr != mm->mmap_base) {
16963 + start_addr = addr = mm->mmap_base;
16964 + mm->cached_hole_size = 0;
16965 + goto full_search;
16966 + }
16967 + return -ENOMEM;
16968 + }
16969 + if (check_heap_stack_gap(vma, addr, len))
16970 + break;
16971 + if (addr + mm->cached_hole_size < vma->vm_start)
16972 + mm->cached_hole_size = vma->vm_start - addr;
16973 + addr = vma->vm_end;
16974 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
16975 + start_addr = addr = mm->mmap_base;
16976 + mm->cached_hole_size = 0;
16977 + goto full_search;
16978 + }
16979 + }
16980 +
16981 + /*
16982 + * Remember the place where we stopped the search:
16983 + */
16984 + mm->free_area_cache = addr + len;
16985 + return addr;
16986 +}
16987 +
16988 +unsigned long
16989 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16990 + const unsigned long len, const unsigned long pgoff,
16991 + const unsigned long flags)
16992 {
16993 - long __res;
16994 - asm volatile ("int $0x80"
16995 - : "=a" (__res)
16996 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
16997 - return __res;
16998 + struct vm_area_struct *vma;
16999 + struct mm_struct *mm = current->mm;
17000 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
17001 +
17002 +#ifdef CONFIG_PAX_SEGMEXEC
17003 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17004 + pax_task_size = SEGMEXEC_TASK_SIZE;
17005 +#endif
17006 +
17007 + pax_task_size -= PAGE_SIZE;
17008 +
17009 + /* requested length too big for entire address space */
17010 + if (len > pax_task_size)
17011 + return -ENOMEM;
17012 +
17013 + if (flags & MAP_FIXED)
17014 + return addr;
17015 +
17016 +#ifdef CONFIG_PAX_PAGEEXEC
17017 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
17018 + goto bottomup;
17019 +#endif
17020 +
17021 +#ifdef CONFIG_PAX_RANDMMAP
17022 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17023 +#endif
17024 +
17025 + /* requesting a specific address */
17026 + if (addr) {
17027 + addr = PAGE_ALIGN(addr);
17028 + if (pax_task_size - len >= addr) {
17029 + vma = find_vma(mm, addr);
17030 + if (check_heap_stack_gap(vma, addr, len))
17031 + return addr;
17032 + }
17033 + }
17034 +
17035 + /* check if free_area_cache is useful for us */
17036 + if (len <= mm->cached_hole_size) {
17037 + mm->cached_hole_size = 0;
17038 + mm->free_area_cache = mm->mmap_base;
17039 + }
17040 +
17041 + /* either no address requested or can't fit in requested address hole */
17042 + addr = mm->free_area_cache;
17043 +
17044 + /* make sure it can fit in the remaining address space */
17045 + if (addr > len) {
17046 + vma = find_vma(mm, addr-len);
17047 + if (check_heap_stack_gap(vma, addr - len, len))
17048 + /* remember the address as a hint for next time */
17049 + return (mm->free_area_cache = addr-len);
17050 + }
17051 +
17052 + if (mm->mmap_base < len)
17053 + goto bottomup;
17054 +
17055 + addr = mm->mmap_base-len;
17056 +
17057 + do {
17058 + /*
17059 + * Lookup failure means no vma is above this address,
17060 + * else if new region fits below vma->vm_start,
17061 + * return with success:
17062 + */
17063 + vma = find_vma(mm, addr);
17064 + if (check_heap_stack_gap(vma, addr, len))
17065 + /* remember the address as a hint for next time */
17066 + return (mm->free_area_cache = addr);
17067 +
17068 + /* remember the largest hole we saw so far */
17069 + if (addr + mm->cached_hole_size < vma->vm_start)
17070 + mm->cached_hole_size = vma->vm_start - addr;
17071 +
17072 + /* try just below the current vma->vm_start */
17073 + addr = skip_heap_stack_gap(vma, len);
17074 + } while (!IS_ERR_VALUE(addr));
17075 +
17076 +bottomup:
17077 + /*
17078 + * A failed mmap() very likely causes application failure,
17079 + * so fall back to the bottom-up function here. This scenario
17080 + * can happen with large stack limits and large mmap()
17081 + * allocations.
17082 + */
17083 +
17084 +#ifdef CONFIG_PAX_SEGMEXEC
17085 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
17086 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17087 + else
17088 +#endif
17089 +
17090 + mm->mmap_base = TASK_UNMAPPED_BASE;
17091 +
17092 +#ifdef CONFIG_PAX_RANDMMAP
17093 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17094 + mm->mmap_base += mm->delta_mmap;
17095 +#endif
17096 +
17097 + mm->free_area_cache = mm->mmap_base;
17098 + mm->cached_hole_size = ~0UL;
17099 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17100 + /*
17101 + * Restore the topdown base:
17102 + */
17103 + mm->mmap_base = base;
17104 + mm->free_area_cache = base;
17105 + mm->cached_hole_size = ~0UL;
17106 +
17107 + return addr;
17108 }
17109 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
17110 index ff14a50..35626c3 100644
17111 --- a/arch/x86/kernel/sys_x86_64.c
17112 +++ b/arch/x86/kernel/sys_x86_64.c
17113 @@ -32,8 +32,8 @@ out:
17114 return error;
17115 }
17116
17117 -static void find_start_end(unsigned long flags, unsigned long *begin,
17118 - unsigned long *end)
17119 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
17120 + unsigned long *begin, unsigned long *end)
17121 {
17122 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17123 unsigned long new_begin;
17124 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
17125 *begin = new_begin;
17126 }
17127 } else {
17128 - *begin = TASK_UNMAPPED_BASE;
17129 + *begin = mm->mmap_base;
17130 *end = TASK_SIZE;
17131 }
17132 }
17133 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
17134 if (flags & MAP_FIXED)
17135 return addr;
17136
17137 - find_start_end(flags, &begin, &end);
17138 + find_start_end(mm, flags, &begin, &end);
17139
17140 if (len > end)
17141 return -ENOMEM;
17142
17143 +#ifdef CONFIG_PAX_RANDMMAP
17144 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17145 +#endif
17146 +
17147 if (addr) {
17148 addr = PAGE_ALIGN(addr);
17149 vma = find_vma(mm, addr);
17150 - if (end - len >= addr &&
17151 - (!vma || addr + len <= vma->vm_start))
17152 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17153 return addr;
17154 }
17155 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17156 @@ -106,7 +109,7 @@ full_search:
17157 }
17158 return -ENOMEM;
17159 }
17160 - if (!vma || addr + len <= vma->vm_start) {
17161 + if (check_heap_stack_gap(vma, addr, len)) {
17162 /*
17163 * Remember the place where we stopped the search:
17164 */
17165 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17166 {
17167 struct vm_area_struct *vma;
17168 struct mm_struct *mm = current->mm;
17169 - unsigned long addr = addr0;
17170 + unsigned long base = mm->mmap_base, addr = addr0;
17171
17172 /* requested length too big for entire address space */
17173 if (len > TASK_SIZE)
17174 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17175 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17176 goto bottomup;
17177
17178 +#ifdef CONFIG_PAX_RANDMMAP
17179 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17180 +#endif
17181 +
17182 /* requesting a specific address */
17183 if (addr) {
17184 addr = PAGE_ALIGN(addr);
17185 - vma = find_vma(mm, addr);
17186 - if (TASK_SIZE - len >= addr &&
17187 - (!vma || addr + len <= vma->vm_start))
17188 - return addr;
17189 + if (TASK_SIZE - len >= addr) {
17190 + vma = find_vma(mm, addr);
17191 + if (check_heap_stack_gap(vma, addr, len))
17192 + return addr;
17193 + }
17194 }
17195
17196 /* check if free_area_cache is useful for us */
17197 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17198 /* make sure it can fit in the remaining address space */
17199 if (addr > len) {
17200 vma = find_vma(mm, addr-len);
17201 - if (!vma || addr <= vma->vm_start)
17202 + if (check_heap_stack_gap(vma, addr - len, len))
17203 /* remember the address as a hint for next time */
17204 return mm->free_area_cache = addr-len;
17205 }
17206 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17207 * return with success:
17208 */
17209 vma = find_vma(mm, addr);
17210 - if (!vma || addr+len <= vma->vm_start)
17211 + if (check_heap_stack_gap(vma, addr, len))
17212 /* remember the address as a hint for next time */
17213 return mm->free_area_cache = addr;
17214
17215 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
17216 mm->cached_hole_size = vma->vm_start - addr;
17217
17218 /* try just below the current vma->vm_start */
17219 - addr = vma->vm_start-len;
17220 - } while (len < vma->vm_start);
17221 + addr = skip_heap_stack_gap(vma, len);
17222 + } while (!IS_ERR_VALUE(addr));
17223
17224 bottomup:
17225 /*
17226 @@ -198,13 +206,21 @@ bottomup:
17227 * can happen with large stack limits and large mmap()
17228 * allocations.
17229 */
17230 + mm->mmap_base = TASK_UNMAPPED_BASE;
17231 +
17232 +#ifdef CONFIG_PAX_RANDMMAP
17233 + if (mm->pax_flags & MF_PAX_RANDMMAP)
17234 + mm->mmap_base += mm->delta_mmap;
17235 +#endif
17236 +
17237 + mm->free_area_cache = mm->mmap_base;
17238 mm->cached_hole_size = ~0UL;
17239 - mm->free_area_cache = TASK_UNMAPPED_BASE;
17240 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17241 /*
17242 * Restore the topdown base:
17243 */
17244 - mm->free_area_cache = mm->mmap_base;
17245 + mm->mmap_base = base;
17246 + mm->free_area_cache = base;
17247 mm->cached_hole_size = ~0UL;
17248
17249 return addr;
17250 diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
17251 index bc19be3..0f5fbf7 100644
17252 --- a/arch/x86/kernel/syscall_table_32.S
17253 +++ b/arch/x86/kernel/syscall_table_32.S
17254 @@ -1,3 +1,4 @@
17255 +.section .rodata,"a",@progbits
17256 ENTRY(sys_call_table)
17257 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
17258 .long sys_exit
17259 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
17260 index e07a2fc..db0369d 100644
17261 --- a/arch/x86/kernel/tboot.c
17262 +++ b/arch/x86/kernel/tboot.c
17263 @@ -218,7 +218,7 @@ static int tboot_setup_sleep(void)
17264
17265 void tboot_shutdown(u32 shutdown_type)
17266 {
17267 - void (*shutdown)(void);
17268 + void (* __noreturn shutdown)(void);
17269
17270 if (!tboot_enabled())
17271 return;
17272 @@ -240,7 +240,7 @@ void tboot_shutdown(u32 shutdown_type)
17273
17274 switch_to_tboot_pt();
17275
17276 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17277 + shutdown = (void *)tboot->shutdown_entry;
17278 shutdown();
17279
17280 /* should not reach here */
17281 @@ -297,7 +297,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
17282 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17283 }
17284
17285 -static atomic_t ap_wfs_count;
17286 +static atomic_unchecked_t ap_wfs_count;
17287
17288 static int tboot_wait_for_aps(int num_aps)
17289 {
17290 @@ -321,9 +321,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
17291 {
17292 switch (action) {
17293 case CPU_DYING:
17294 - atomic_inc(&ap_wfs_count);
17295 + atomic_inc_unchecked(&ap_wfs_count);
17296 if (num_online_cpus() == 1)
17297 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17298 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17299 return NOTIFY_BAD;
17300 break;
17301 }
17302 @@ -342,7 +342,7 @@ static __init int tboot_late_init(void)
17303
17304 tboot_create_trampoline();
17305
17306 - atomic_set(&ap_wfs_count, 0);
17307 + atomic_set_unchecked(&ap_wfs_count, 0);
17308 register_hotcpu_notifier(&tboot_cpu_notifier);
17309 return 0;
17310 }
17311 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
17312 index 5a64d05..804587b 100644
17313 --- a/arch/x86/kernel/time.c
17314 +++ b/arch/x86/kernel/time.c
17315 @@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
17316 {
17317 unsigned long pc = instruction_pointer(regs);
17318
17319 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17320 + if (!user_mode(regs) && in_lock_functions(pc)) {
17321 #ifdef CONFIG_FRAME_POINTER
17322 - return *(unsigned long *)(regs->bp + sizeof(long));
17323 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17324 #else
17325 unsigned long *sp =
17326 (unsigned long *)kernel_stack_pointer(regs);
17327 @@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
17328 * or above a saved flags. Eflags has bits 22-31 zero,
17329 * kernel addresses don't.
17330 */
17331 +
17332 +#ifdef CONFIG_PAX_KERNEXEC
17333 + return ktla_ktva(sp[0]);
17334 +#else
17335 if (sp[0] >> 22)
17336 return sp[0];
17337 if (sp[1] >> 22)
17338 return sp[1];
17339 #endif
17340 +
17341 +#endif
17342 }
17343 return pc;
17344 }
17345 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
17346 index 6bb7b85..dd853e1 100644
17347 --- a/arch/x86/kernel/tls.c
17348 +++ b/arch/x86/kernel/tls.c
17349 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
17350 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17351 return -EINVAL;
17352
17353 +#ifdef CONFIG_PAX_SEGMEXEC
17354 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17355 + return -EINVAL;
17356 +#endif
17357 +
17358 set_tls_desc(p, idx, &info, 1);
17359
17360 return 0;
17361 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
17362 index 451c0a7..e57f551 100644
17363 --- a/arch/x86/kernel/trampoline_32.S
17364 +++ b/arch/x86/kernel/trampoline_32.S
17365 @@ -32,6 +32,12 @@
17366 #include <asm/segment.h>
17367 #include <asm/page_types.h>
17368
17369 +#ifdef CONFIG_PAX_KERNEXEC
17370 +#define ta(X) (X)
17371 +#else
17372 +#define ta(X) ((X) - __PAGE_OFFSET)
17373 +#endif
17374 +
17375 #ifdef CONFIG_SMP
17376
17377 .section ".x86_trampoline","a"
17378 @@ -62,7 +68,7 @@ r_base = .
17379 inc %ax # protected mode (PE) bit
17380 lmsw %ax # into protected mode
17381 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17382 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17383 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
17384
17385 # These need to be in the same 64K segment as the above;
17386 # hence we don't use the boot_gdt_descr defined in head.S
17387 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
17388 index 09ff517..df19fbff 100644
17389 --- a/arch/x86/kernel/trampoline_64.S
17390 +++ b/arch/x86/kernel/trampoline_64.S
17391 @@ -90,7 +90,7 @@ startup_32:
17392 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17393 movl %eax, %ds
17394
17395 - movl $X86_CR4_PAE, %eax
17396 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17397 movl %eax, %cr4 # Enable PAE mode
17398
17399 # Setup trampoline 4 level pagetables
17400 @@ -138,7 +138,7 @@ tidt:
17401 # so the kernel can live anywhere
17402 .balign 4
17403 tgdt:
17404 - .short tgdt_end - tgdt # gdt limit
17405 + .short tgdt_end - tgdt - 1 # gdt limit
17406 .long tgdt - r_base
17407 .short 0
17408 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17409 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
17410 index 6913369..7e7dff6 100644
17411 --- a/arch/x86/kernel/traps.c
17412 +++ b/arch/x86/kernel/traps.c
17413 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
17414
17415 /* Do we ignore FPU interrupts ? */
17416 char ignore_fpu_irq;
17417 -
17418 -/*
17419 - * The IDT has to be page-aligned to simplify the Pentium
17420 - * F0 0F bug workaround.
17421 - */
17422 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17423 #endif
17424
17425 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17426 @@ -117,13 +111,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
17427 }
17428
17429 static void __kprobes
17430 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17431 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17432 long error_code, siginfo_t *info)
17433 {
17434 struct task_struct *tsk = current;
17435
17436 #ifdef CONFIG_X86_32
17437 - if (regs->flags & X86_VM_MASK) {
17438 + if (v8086_mode(regs)) {
17439 /*
17440 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17441 * On nmi (interrupt 2), do_trap should not be called.
17442 @@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17443 }
17444 #endif
17445
17446 - if (!user_mode(regs))
17447 + if (!user_mode_novm(regs))
17448 goto kernel_trap;
17449
17450 #ifdef CONFIG_X86_32
17451 @@ -157,7 +151,7 @@ trap_signal:
17452 printk_ratelimit()) {
17453 printk(KERN_INFO
17454 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17455 - tsk->comm, tsk->pid, str,
17456 + tsk->comm, task_pid_nr(tsk), str,
17457 regs->ip, regs->sp, error_code);
17458 print_vma_addr(" in ", regs->ip);
17459 printk("\n");
17460 @@ -174,8 +168,20 @@ kernel_trap:
17461 if (!fixup_exception(regs)) {
17462 tsk->thread.error_code = error_code;
17463 tsk->thread.trap_no = trapnr;
17464 +
17465 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17466 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17467 + str = "PAX: suspicious stack segment fault";
17468 +#endif
17469 +
17470 die(str, regs, error_code);
17471 }
17472 +
17473 +#ifdef CONFIG_PAX_REFCOUNT
17474 + if (trapnr == 4)
17475 + pax_report_refcount_overflow(regs);
17476 +#endif
17477 +
17478 return;
17479
17480 #ifdef CONFIG_X86_32
17481 @@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
17482 conditional_sti(regs);
17483
17484 #ifdef CONFIG_X86_32
17485 - if (regs->flags & X86_VM_MASK)
17486 + if (v8086_mode(regs))
17487 goto gp_in_vm86;
17488 #endif
17489
17490 tsk = current;
17491 - if (!user_mode(regs))
17492 + if (!user_mode_novm(regs))
17493 goto gp_in_kernel;
17494
17495 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17496 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17497 + struct mm_struct *mm = tsk->mm;
17498 + unsigned long limit;
17499 +
17500 + down_write(&mm->mmap_sem);
17501 + limit = mm->context.user_cs_limit;
17502 + if (limit < TASK_SIZE) {
17503 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17504 + up_write(&mm->mmap_sem);
17505 + return;
17506 + }
17507 + up_write(&mm->mmap_sem);
17508 + }
17509 +#endif
17510 +
17511 tsk->thread.error_code = error_code;
17512 tsk->thread.trap_no = 13;
17513
17514 @@ -304,6 +326,13 @@ gp_in_kernel:
17515 if (notify_die(DIE_GPF, "general protection fault", regs,
17516 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17517 return;
17518 +
17519 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17520 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17521 + die("PAX: suspicious general protection fault", regs, error_code);
17522 + else
17523 +#endif
17524 +
17525 die("general protection fault", regs, error_code);
17526 }
17527
17528 @@ -433,6 +462,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
17529 dotraplinkage notrace __kprobes void
17530 do_nmi(struct pt_regs *regs, long error_code)
17531 {
17532 +
17533 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17534 + if (!user_mode(regs)) {
17535 + unsigned long cs = regs->cs & 0xFFFF;
17536 + unsigned long ip = ktva_ktla(regs->ip);
17537 +
17538 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17539 + regs->ip = ip;
17540 + }
17541 +#endif
17542 +
17543 nmi_enter();
17544
17545 inc_irq_stat(__nmi_count);
17546 @@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17547 /* It's safe to allow irq's after DR6 has been saved */
17548 preempt_conditional_sti(regs);
17549
17550 - if (regs->flags & X86_VM_MASK) {
17551 + if (v8086_mode(regs)) {
17552 handle_vm86_trap((struct kernel_vm86_regs *) regs,
17553 error_code, 1);
17554 preempt_conditional_cli(regs);
17555 @@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
17556 * We already checked v86 mode above, so we can check for kernel mode
17557 * by just checking the CPL of CS.
17558 */
17559 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
17560 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
17561 tsk->thread.debugreg6 &= ~DR_STEP;
17562 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
17563 regs->flags &= ~X86_EFLAGS_TF;
17564 @@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
17565 return;
17566 conditional_sti(regs);
17567
17568 - if (!user_mode_vm(regs))
17569 + if (!user_mode(regs))
17570 {
17571 if (!fixup_exception(regs)) {
17572 task->thread.error_code = error_code;
17573 @@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
17574 void __math_state_restore(void)
17575 {
17576 struct thread_info *thread = current_thread_info();
17577 - struct task_struct *tsk = thread->task;
17578 + struct task_struct *tsk = current;
17579
17580 /*
17581 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17582 @@ -750,8 +790,7 @@ void __math_state_restore(void)
17583 */
17584 asmlinkage void math_state_restore(void)
17585 {
17586 - struct thread_info *thread = current_thread_info();
17587 - struct task_struct *tsk = thread->task;
17588 + struct task_struct *tsk = current;
17589
17590 if (!tsk_used_math(tsk)) {
17591 local_irq_enable();
17592 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
17593 index b9242ba..50c5edd 100644
17594 --- a/arch/x86/kernel/verify_cpu.S
17595 +++ b/arch/x86/kernel/verify_cpu.S
17596 @@ -20,6 +20,7 @@
17597 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17598 * arch/x86/kernel/trampoline_64.S: secondary processor verification
17599 * arch/x86/kernel/head_32.S: processor startup
17600 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17601 *
17602 * verify_cpu, returns the status of longmode and SSE in register %eax.
17603 * 0: Success 1: Failure
17604 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
17605 index 863f875..4307295 100644
17606 --- a/arch/x86/kernel/vm86_32.c
17607 +++ b/arch/x86/kernel/vm86_32.c
17608 @@ -41,6 +41,7 @@
17609 #include <linux/ptrace.h>
17610 #include <linux/audit.h>
17611 #include <linux/stddef.h>
17612 +#include <linux/grsecurity.h>
17613
17614 #include <asm/uaccess.h>
17615 #include <asm/io.h>
17616 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
17617 do_exit(SIGSEGV);
17618 }
17619
17620 - tss = &per_cpu(init_tss, get_cpu());
17621 + tss = init_tss + get_cpu();
17622 current->thread.sp0 = current->thread.saved_sp0;
17623 current->thread.sysenter_cs = __KERNEL_CS;
17624 load_sp0(tss, &current->thread);
17625 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
17626 struct task_struct *tsk;
17627 int tmp, ret = -EPERM;
17628
17629 +#ifdef CONFIG_GRKERNSEC_VM86
17630 + if (!capable(CAP_SYS_RAWIO)) {
17631 + gr_handle_vm86();
17632 + goto out;
17633 + }
17634 +#endif
17635 +
17636 tsk = current;
17637 if (tsk->thread.saved_sp0)
17638 goto out;
17639 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
17640 int tmp, ret;
17641 struct vm86plus_struct __user *v86;
17642
17643 +#ifdef CONFIG_GRKERNSEC_VM86
17644 + if (!capable(CAP_SYS_RAWIO)) {
17645 + gr_handle_vm86();
17646 + ret = -EPERM;
17647 + goto out;
17648 + }
17649 +#endif
17650 +
17651 tsk = current;
17652 switch (cmd) {
17653 case VM86_REQUEST_IRQ:
17654 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
17655 tsk->thread.saved_fs = info->regs32->fs;
17656 tsk->thread.saved_gs = get_user_gs(info->regs32);
17657
17658 - tss = &per_cpu(init_tss, get_cpu());
17659 + tss = init_tss + get_cpu();
17660 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17661 if (cpu_has_sep)
17662 tsk->thread.sysenter_cs = 0;
17663 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
17664 goto cannot_handle;
17665 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17666 goto cannot_handle;
17667 - intr_ptr = (unsigned long __user *) (i << 2);
17668 + intr_ptr = (__force unsigned long __user *) (i << 2);
17669 if (get_user(segoffs, intr_ptr))
17670 goto cannot_handle;
17671 if ((segoffs >> 16) == BIOSSEG)
17672 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
17673 index 0f703f1..9e15f64 100644
17674 --- a/arch/x86/kernel/vmlinux.lds.S
17675 +++ b/arch/x86/kernel/vmlinux.lds.S
17676 @@ -26,6 +26,13 @@
17677 #include <asm/page_types.h>
17678 #include <asm/cache.h>
17679 #include <asm/boot.h>
17680 +#include <asm/segment.h>
17681 +
17682 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17683 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17684 +#else
17685 +#define __KERNEL_TEXT_OFFSET 0
17686 +#endif
17687
17688 #undef i386 /* in case the preprocessor is a 32bit one */
17689
17690 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
17691
17692 PHDRS {
17693 text PT_LOAD FLAGS(5); /* R_E */
17694 +#ifdef CONFIG_X86_32
17695 + module PT_LOAD FLAGS(5); /* R_E */
17696 +#endif
17697 +#ifdef CONFIG_XEN
17698 + rodata PT_LOAD FLAGS(5); /* R_E */
17699 +#else
17700 + rodata PT_LOAD FLAGS(4); /* R__ */
17701 +#endif
17702 data PT_LOAD FLAGS(6); /* RW_ */
17703 -#ifdef CONFIG_X86_64
17704 + init.begin PT_LOAD FLAGS(6); /* RW_ */
17705 #ifdef CONFIG_SMP
17706 percpu PT_LOAD FLAGS(6); /* RW_ */
17707 #endif
17708 + text.init PT_LOAD FLAGS(5); /* R_E */
17709 + text.exit PT_LOAD FLAGS(5); /* R_E */
17710 init PT_LOAD FLAGS(7); /* RWE */
17711 -#endif
17712 note PT_NOTE FLAGS(0); /* ___ */
17713 }
17714
17715 SECTIONS
17716 {
17717 #ifdef CONFIG_X86_32
17718 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17719 - phys_startup_32 = startup_32 - LOAD_OFFSET;
17720 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17721 #else
17722 - . = __START_KERNEL;
17723 - phys_startup_64 = startup_64 - LOAD_OFFSET;
17724 + . = __START_KERNEL;
17725 #endif
17726
17727 /* Text and read-only data */
17728 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
17729 - _text = .;
17730 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17731 /* bootstrapping code */
17732 +#ifdef CONFIG_X86_32
17733 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17734 +#else
17735 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17736 +#endif
17737 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17738 + _text = .;
17739 HEAD_TEXT
17740 #ifdef CONFIG_X86_32
17741 . = ALIGN(PAGE_SIZE);
17742 @@ -108,13 +128,47 @@ SECTIONS
17743 IRQENTRY_TEXT
17744 *(.fixup)
17745 *(.gnu.warning)
17746 - /* End of text section */
17747 - _etext = .;
17748 } :text = 0x9090
17749
17750 - NOTES :text :note
17751 + . += __KERNEL_TEXT_OFFSET;
17752
17753 - EXCEPTION_TABLE(16) :text = 0x9090
17754 +#ifdef CONFIG_X86_32
17755 + . = ALIGN(PAGE_SIZE);
17756 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17757 +
17758 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17759 + MODULES_EXEC_VADDR = .;
17760 + BYTE(0)
17761 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17762 + . = ALIGN(HPAGE_SIZE);
17763 + MODULES_EXEC_END = . - 1;
17764 +#endif
17765 +
17766 + } :module
17767 +#endif
17768 +
17769 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17770 + /* End of text section */
17771 + _etext = . - __KERNEL_TEXT_OFFSET;
17772 + }
17773 +
17774 +#ifdef CONFIG_X86_32
17775 + . = ALIGN(PAGE_SIZE);
17776 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17777 + *(.idt)
17778 + . = ALIGN(PAGE_SIZE);
17779 + *(.empty_zero_page)
17780 + *(.initial_pg_fixmap)
17781 + *(.initial_pg_pmd)
17782 + *(.initial_page_table)
17783 + *(.swapper_pg_dir)
17784 + } :rodata
17785 +#endif
17786 +
17787 + . = ALIGN(PAGE_SIZE);
17788 + NOTES :rodata :note
17789 +
17790 + EXCEPTION_TABLE(16) :rodata
17791
17792 #if defined(CONFIG_DEBUG_RODATA)
17793 /* .text should occupy whole number of pages */
17794 @@ -126,16 +180,20 @@ SECTIONS
17795
17796 /* Data */
17797 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17798 +
17799 +#ifdef CONFIG_PAX_KERNEXEC
17800 + . = ALIGN(HPAGE_SIZE);
17801 +#else
17802 + . = ALIGN(PAGE_SIZE);
17803 +#endif
17804 +
17805 /* Start of data section */
17806 _sdata = .;
17807
17808 /* init_task */
17809 INIT_TASK_DATA(THREAD_SIZE)
17810
17811 -#ifdef CONFIG_X86_32
17812 - /* 32 bit has nosave before _edata */
17813 NOSAVE_DATA
17814 -#endif
17815
17816 PAGE_ALIGNED_DATA(PAGE_SIZE)
17817
17818 @@ -176,12 +234,19 @@ SECTIONS
17819 #endif /* CONFIG_X86_64 */
17820
17821 /* Init code and data - will be freed after init */
17822 - . = ALIGN(PAGE_SIZE);
17823 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17824 + BYTE(0)
17825 +
17826 +#ifdef CONFIG_PAX_KERNEXEC
17827 + . = ALIGN(HPAGE_SIZE);
17828 +#else
17829 + . = ALIGN(PAGE_SIZE);
17830 +#endif
17831 +
17832 __init_begin = .; /* paired with __init_end */
17833 - }
17834 + } :init.begin
17835
17836 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17837 +#ifdef CONFIG_SMP
17838 /*
17839 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17840 * output PHDR, so the next output section - .init.text - should
17841 @@ -190,12 +255,27 @@ SECTIONS
17842 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
17843 #endif
17844
17845 - INIT_TEXT_SECTION(PAGE_SIZE)
17846 -#ifdef CONFIG_X86_64
17847 - :init
17848 -#endif
17849 + . = ALIGN(PAGE_SIZE);
17850 + init_begin = .;
17851 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17852 + VMLINUX_SYMBOL(_sinittext) = .;
17853 + INIT_TEXT
17854 + VMLINUX_SYMBOL(_einittext) = .;
17855 + . = ALIGN(PAGE_SIZE);
17856 + } :text.init
17857
17858 - INIT_DATA_SECTION(16)
17859 + /*
17860 + * .exit.text is discard at runtime, not link time, to deal with
17861 + * references from .altinstructions and .eh_frame
17862 + */
17863 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17864 + EXIT_TEXT
17865 + . = ALIGN(16);
17866 + } :text.exit
17867 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
17868 +
17869 + . = ALIGN(PAGE_SIZE);
17870 + INIT_DATA_SECTION(16) :init
17871
17872 /*
17873 * Code and data for a variety of lowlevel trampolines, to be
17874 @@ -269,19 +349,12 @@ SECTIONS
17875 }
17876
17877 . = ALIGN(8);
17878 - /*
17879 - * .exit.text is discard at runtime, not link time, to deal with
17880 - * references from .altinstructions and .eh_frame
17881 - */
17882 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
17883 - EXIT_TEXT
17884 - }
17885
17886 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
17887 EXIT_DATA
17888 }
17889
17890 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
17891 +#ifndef CONFIG_SMP
17892 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
17893 #endif
17894
17895 @@ -300,16 +373,10 @@ SECTIONS
17896 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
17897 __smp_locks = .;
17898 *(.smp_locks)
17899 - . = ALIGN(PAGE_SIZE);
17900 __smp_locks_end = .;
17901 + . = ALIGN(PAGE_SIZE);
17902 }
17903
17904 -#ifdef CONFIG_X86_64
17905 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
17906 - NOSAVE_DATA
17907 - }
17908 -#endif
17909 -
17910 /* BSS */
17911 . = ALIGN(PAGE_SIZE);
17912 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
17913 @@ -325,6 +392,7 @@ SECTIONS
17914 __brk_base = .;
17915 . += 64 * 1024; /* 64k alignment slop space */
17916 *(.brk_reservation) /* areas brk users have reserved */
17917 + . = ALIGN(HPAGE_SIZE);
17918 __brk_limit = .;
17919 }
17920
17921 @@ -351,13 +419,12 @@ SECTIONS
17922 * for the boot processor.
17923 */
17924 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
17925 -INIT_PER_CPU(gdt_page);
17926 INIT_PER_CPU(irq_stack_union);
17927
17928 /*
17929 * Build-time check on the image size:
17930 */
17931 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
17932 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
17933 "kernel image bigger than KERNEL_IMAGE_SIZE");
17934
17935 #ifdef CONFIG_SMP
17936 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
17937 index b56c65de..561a55b 100644
17938 --- a/arch/x86/kernel/vsyscall_64.c
17939 +++ b/arch/x86/kernel/vsyscall_64.c
17940 @@ -56,15 +56,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
17941 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
17942 };
17943
17944 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
17945 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
17946
17947 static int __init vsyscall_setup(char *str)
17948 {
17949 if (str) {
17950 if (!strcmp("emulate", str))
17951 vsyscall_mode = EMULATE;
17952 - else if (!strcmp("native", str))
17953 - vsyscall_mode = NATIVE;
17954 else if (!strcmp("none", str))
17955 vsyscall_mode = NONE;
17956 else
17957 @@ -177,7 +175,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
17958
17959 tsk = current;
17960 if (seccomp_mode(&tsk->seccomp))
17961 - do_exit(SIGKILL);
17962 + do_group_exit(SIGKILL);
17963
17964 switch (vsyscall_nr) {
17965 case 0:
17966 @@ -219,8 +217,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
17967 return true;
17968
17969 sigsegv:
17970 - force_sig(SIGSEGV, current);
17971 - return true;
17972 + do_group_exit(SIGKILL);
17973 }
17974
17975 /*
17976 @@ -273,10 +270,7 @@ void __init map_vsyscall(void)
17977 extern char __vvar_page;
17978 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
17979
17980 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
17981 - vsyscall_mode == NATIVE
17982 - ? PAGE_KERNEL_VSYSCALL
17983 - : PAGE_KERNEL_VVAR);
17984 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
17985 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
17986 (unsigned long)VSYSCALL_START);
17987
17988 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
17989 index 9796c2f..f686fbf 100644
17990 --- a/arch/x86/kernel/x8664_ksyms_64.c
17991 +++ b/arch/x86/kernel/x8664_ksyms_64.c
17992 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
17993 EXPORT_SYMBOL(copy_user_generic_string);
17994 EXPORT_SYMBOL(copy_user_generic_unrolled);
17995 EXPORT_SYMBOL(__copy_user_nocache);
17996 -EXPORT_SYMBOL(_copy_from_user);
17997 -EXPORT_SYMBOL(_copy_to_user);
17998
17999 EXPORT_SYMBOL(copy_page);
18000 EXPORT_SYMBOL(clear_page);
18001 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
18002 index a391134..d0b63b6e 100644
18003 --- a/arch/x86/kernel/xsave.c
18004 +++ b/arch/x86/kernel/xsave.c
18005 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
18006 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18007 return -EINVAL;
18008
18009 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18010 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18011 fx_sw_user->extended_size -
18012 FP_XSTATE_MAGIC2_SIZE));
18013 if (err)
18014 @@ -267,7 +267,7 @@ fx_only:
18015 * the other extended state.
18016 */
18017 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18018 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18019 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
18020 }
18021
18022 /*
18023 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf)
18024 if (use_xsave())
18025 err = restore_user_xstate(buf);
18026 else
18027 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
18028 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
18029 buf);
18030 if (unlikely(err)) {
18031 /*
18032 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
18033 index 8b4cc5f..f086b5b 100644
18034 --- a/arch/x86/kvm/emulate.c
18035 +++ b/arch/x86/kvm/emulate.c
18036 @@ -96,7 +96,7 @@
18037 #define Src2ImmByte (2<<29)
18038 #define Src2One (3<<29)
18039 #define Src2Imm (4<<29)
18040 -#define Src2Mask (7<<29)
18041 +#define Src2Mask (7U<<29)
18042
18043 #define X2(x...) x, x
18044 #define X3(x...) X2(x), x
18045 @@ -207,6 +207,7 @@ struct gprefix {
18046
18047 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
18048 do { \
18049 + unsigned long _tmp; \
18050 __asm__ __volatile__ ( \
18051 _PRE_EFLAGS("0", "4", "2") \
18052 _op _suffix " %"_x"3,%1; " \
18053 @@ -220,8 +221,6 @@ struct gprefix {
18054 /* Raw emulation: instruction has two explicit operands. */
18055 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
18056 do { \
18057 - unsigned long _tmp; \
18058 - \
18059 switch ((_dst).bytes) { \
18060 case 2: \
18061 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
18062 @@ -237,7 +236,6 @@ struct gprefix {
18063
18064 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18065 do { \
18066 - unsigned long _tmp; \
18067 switch ((_dst).bytes) { \
18068 case 1: \
18069 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
18070 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
18071 index 57dcbd4..79aba9b 100644
18072 --- a/arch/x86/kvm/lapic.c
18073 +++ b/arch/x86/kvm/lapic.c
18074 @@ -53,7 +53,7 @@
18075 #define APIC_BUS_CYCLE_NS 1
18076
18077 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18078 -#define apic_debug(fmt, arg...)
18079 +#define apic_debug(fmt, arg...) do {} while (0)
18080
18081 #define APIC_LVT_NUM 6
18082 /* 14 is the version for Xeon and Pentium 8.4.8*/
18083 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
18084 index 8e8da79..13bc641 100644
18085 --- a/arch/x86/kvm/mmu.c
18086 +++ b/arch/x86/kvm/mmu.c
18087 @@ -3552,7 +3552,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18088
18089 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
18090
18091 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
18092 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
18093
18094 /*
18095 * Assume that the pte write on a page table of the same type
18096 @@ -3584,7 +3584,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
18097 }
18098
18099 spin_lock(&vcpu->kvm->mmu_lock);
18100 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18101 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
18102 gentry = 0;
18103 kvm_mmu_free_some_pages(vcpu);
18104 ++vcpu->kvm->stat.mmu_pte_write;
18105 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
18106 index 507e2b8..fc55f89 100644
18107 --- a/arch/x86/kvm/paging_tmpl.h
18108 +++ b/arch/x86/kvm/paging_tmpl.h
18109 @@ -197,7 +197,7 @@ retry_walk:
18110 if (unlikely(kvm_is_error_hva(host_addr)))
18111 goto error;
18112
18113 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
18114 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
18115 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
18116 goto error;
18117
18118 @@ -575,6 +575,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
18119 unsigned long mmu_seq;
18120 bool map_writable;
18121
18122 + pax_track_stack();
18123 +
18124 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
18125
18126 if (unlikely(error_code & PFERR_RSVD_MASK))
18127 @@ -701,7 +703,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
18128 if (need_flush)
18129 kvm_flush_remote_tlbs(vcpu->kvm);
18130
18131 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
18132 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
18133
18134 spin_unlock(&vcpu->kvm->mmu_lock);
18135
18136 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
18137 index 475d1c9..33658ff 100644
18138 --- a/arch/x86/kvm/svm.c
18139 +++ b/arch/x86/kvm/svm.c
18140 @@ -3381,7 +3381,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
18141 int cpu = raw_smp_processor_id();
18142
18143 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
18144 +
18145 + pax_open_kernel();
18146 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
18147 + pax_close_kernel();
18148 +
18149 load_TR_desc();
18150 }
18151
18152 @@ -3759,6 +3763,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
18153 #endif
18154 #endif
18155
18156 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18157 + __set_fs(current_thread_info()->addr_limit);
18158 +#endif
18159 +
18160 reload_tss(vcpu);
18161
18162 local_irq_disable();
18163 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
18164 index e65a158..656dc24 100644
18165 --- a/arch/x86/kvm/vmx.c
18166 +++ b/arch/x86/kvm/vmx.c
18167 @@ -1251,7 +1251,11 @@ static void reload_tss(void)
18168 struct desc_struct *descs;
18169
18170 descs = (void *)gdt->address;
18171 +
18172 + pax_open_kernel();
18173 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18174 + pax_close_kernel();
18175 +
18176 load_TR_desc();
18177 }
18178
18179 @@ -2520,8 +2524,11 @@ static __init int hardware_setup(void)
18180 if (!cpu_has_vmx_flexpriority())
18181 flexpriority_enabled = 0;
18182
18183 - if (!cpu_has_vmx_tpr_shadow())
18184 - kvm_x86_ops->update_cr8_intercept = NULL;
18185 + if (!cpu_has_vmx_tpr_shadow()) {
18186 + pax_open_kernel();
18187 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18188 + pax_close_kernel();
18189 + }
18190
18191 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18192 kvm_disable_largepages();
18193 @@ -3535,7 +3542,7 @@ static void vmx_set_constant_host_state(void)
18194 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
18195
18196 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
18197 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
18198 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
18199
18200 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
18201 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
18202 @@ -6021,6 +6028,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18203 "jmp .Lkvm_vmx_return \n\t"
18204 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18205 ".Lkvm_vmx_return: "
18206 +
18207 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18208 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18209 + ".Lkvm_vmx_return2: "
18210 +#endif
18211 +
18212 /* Save guest registers, load host registers, keep flags */
18213 "mov %0, %c[wordsize](%%"R"sp) \n\t"
18214 "pop %0 \n\t"
18215 @@ -6069,6 +6082,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18216 #endif
18217 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
18218 [wordsize]"i"(sizeof(ulong))
18219 +
18220 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18221 + ,[cs]"i"(__KERNEL_CS)
18222 +#endif
18223 +
18224 : "cc", "memory"
18225 , R"ax", R"bx", R"di", R"si"
18226 #ifdef CONFIG_X86_64
18227 @@ -6097,7 +6115,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
18228 }
18229 }
18230
18231 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18232 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18233 +
18234 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18235 + loadsegment(fs, __KERNEL_PERCPU);
18236 +#endif
18237 +
18238 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18239 + __set_fs(current_thread_info()->addr_limit);
18240 +#endif
18241 +
18242 vmx->loaded_vmcs->launched = 1;
18243
18244 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
18245 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
18246 index 84a28ea..9326501 100644
18247 --- a/arch/x86/kvm/x86.c
18248 +++ b/arch/x86/kvm/x86.c
18249 @@ -1334,8 +1334,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
18250 {
18251 struct kvm *kvm = vcpu->kvm;
18252 int lm = is_long_mode(vcpu);
18253 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18254 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18255 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
18256 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
18257 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
18258 : kvm->arch.xen_hvm_config.blob_size_32;
18259 u32 page_num = data & ~PAGE_MASK;
18260 @@ -2137,6 +2137,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
18261 if (n < msr_list.nmsrs)
18262 goto out;
18263 r = -EFAULT;
18264 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
18265 + goto out;
18266 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
18267 num_msrs_to_save * sizeof(u32)))
18268 goto out;
18269 @@ -2312,15 +2314,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
18270 struct kvm_cpuid2 *cpuid,
18271 struct kvm_cpuid_entry2 __user *entries)
18272 {
18273 - int r;
18274 + int r, i;
18275
18276 r = -E2BIG;
18277 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18278 goto out;
18279 r = -EFAULT;
18280 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18281 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18282 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18283 goto out;
18284 + for (i = 0; i < cpuid->nent; ++i) {
18285 + struct kvm_cpuid_entry2 cpuid_entry;
18286 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18287 + goto out;
18288 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
18289 + }
18290 vcpu->arch.cpuid_nent = cpuid->nent;
18291 kvm_apic_set_version(vcpu);
18292 kvm_x86_ops->cpuid_update(vcpu);
18293 @@ -2335,15 +2342,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
18294 struct kvm_cpuid2 *cpuid,
18295 struct kvm_cpuid_entry2 __user *entries)
18296 {
18297 - int r;
18298 + int r, i;
18299
18300 r = -E2BIG;
18301 if (cpuid->nent < vcpu->arch.cpuid_nent)
18302 goto out;
18303 r = -EFAULT;
18304 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18305 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18306 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18307 goto out;
18308 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18309 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18310 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18311 + goto out;
18312 + }
18313 return 0;
18314
18315 out:
18316 @@ -2718,7 +2729,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
18317 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18318 struct kvm_interrupt *irq)
18319 {
18320 - if (irq->irq < 0 || irq->irq >= 256)
18321 + if (irq->irq >= 256)
18322 return -EINVAL;
18323 if (irqchip_in_kernel(vcpu->kvm))
18324 return -ENXIO;
18325 @@ -5089,7 +5100,7 @@ static void kvm_set_mmio_spte_mask(void)
18326 kvm_mmu_set_mmio_spte_mask(mask);
18327 }
18328
18329 -int kvm_arch_init(void *opaque)
18330 +int kvm_arch_init(const void *opaque)
18331 {
18332 int r;
18333 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18334 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
18335 index 13ee258..b9632f6 100644
18336 --- a/arch/x86/lguest/boot.c
18337 +++ b/arch/x86/lguest/boot.c
18338 @@ -1184,9 +1184,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
18339 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18340 * Launcher to reboot us.
18341 */
18342 -static void lguest_restart(char *reason)
18343 +static __noreturn void lguest_restart(char *reason)
18344 {
18345 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
18346 + BUG();
18347 }
18348
18349 /*G:050
18350 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
18351 index 042f682..c92afb6 100644
18352 --- a/arch/x86/lib/atomic64_32.c
18353 +++ b/arch/x86/lib/atomic64_32.c
18354 @@ -8,18 +8,30 @@
18355
18356 long long atomic64_read_cx8(long long, const atomic64_t *v);
18357 EXPORT_SYMBOL(atomic64_read_cx8);
18358 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18359 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
18360 long long atomic64_set_cx8(long long, const atomic64_t *v);
18361 EXPORT_SYMBOL(atomic64_set_cx8);
18362 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
18363 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
18364 long long atomic64_xchg_cx8(long long, unsigned high);
18365 EXPORT_SYMBOL(atomic64_xchg_cx8);
18366 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
18367 EXPORT_SYMBOL(atomic64_add_return_cx8);
18368 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18369 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
18370 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
18371 EXPORT_SYMBOL(atomic64_sub_return_cx8);
18372 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18373 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
18374 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
18375 EXPORT_SYMBOL(atomic64_inc_return_cx8);
18376 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18377 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
18378 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
18379 EXPORT_SYMBOL(atomic64_dec_return_cx8);
18380 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
18381 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
18382 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
18383 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
18384 int atomic64_inc_not_zero_cx8(atomic64_t *v);
18385 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
18386 #ifndef CONFIG_X86_CMPXCHG64
18387 long long atomic64_read_386(long long, const atomic64_t *v);
18388 EXPORT_SYMBOL(atomic64_read_386);
18389 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
18390 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
18391 long long atomic64_set_386(long long, const atomic64_t *v);
18392 EXPORT_SYMBOL(atomic64_set_386);
18393 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
18394 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
18395 long long atomic64_xchg_386(long long, unsigned high);
18396 EXPORT_SYMBOL(atomic64_xchg_386);
18397 long long atomic64_add_return_386(long long a, atomic64_t *v);
18398 EXPORT_SYMBOL(atomic64_add_return_386);
18399 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18400 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
18401 long long atomic64_sub_return_386(long long a, atomic64_t *v);
18402 EXPORT_SYMBOL(atomic64_sub_return_386);
18403 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18404 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
18405 long long atomic64_inc_return_386(long long a, atomic64_t *v);
18406 EXPORT_SYMBOL(atomic64_inc_return_386);
18407 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18408 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
18409 long long atomic64_dec_return_386(long long a, atomic64_t *v);
18410 EXPORT_SYMBOL(atomic64_dec_return_386);
18411 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
18412 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
18413 long long atomic64_add_386(long long a, atomic64_t *v);
18414 EXPORT_SYMBOL(atomic64_add_386);
18415 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
18416 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
18417 long long atomic64_sub_386(long long a, atomic64_t *v);
18418 EXPORT_SYMBOL(atomic64_sub_386);
18419 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
18420 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
18421 long long atomic64_inc_386(long long a, atomic64_t *v);
18422 EXPORT_SYMBOL(atomic64_inc_386);
18423 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
18424 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
18425 long long atomic64_dec_386(long long a, atomic64_t *v);
18426 EXPORT_SYMBOL(atomic64_dec_386);
18427 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
18428 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
18429 long long atomic64_dec_if_positive_386(atomic64_t *v);
18430 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
18431 int atomic64_inc_not_zero_386(atomic64_t *v);
18432 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
18433 index e8e7e0d..56fd1b0 100644
18434 --- a/arch/x86/lib/atomic64_386_32.S
18435 +++ b/arch/x86/lib/atomic64_386_32.S
18436 @@ -48,6 +48,10 @@ BEGIN(read)
18437 movl (v), %eax
18438 movl 4(v), %edx
18439 RET_ENDP
18440 +BEGIN(read_unchecked)
18441 + movl (v), %eax
18442 + movl 4(v), %edx
18443 +RET_ENDP
18444 #undef v
18445
18446 #define v %esi
18447 @@ -55,6 +59,10 @@ BEGIN(set)
18448 movl %ebx, (v)
18449 movl %ecx, 4(v)
18450 RET_ENDP
18451 +BEGIN(set_unchecked)
18452 + movl %ebx, (v)
18453 + movl %ecx, 4(v)
18454 +RET_ENDP
18455 #undef v
18456
18457 #define v %esi
18458 @@ -70,6 +78,20 @@ RET_ENDP
18459 BEGIN(add)
18460 addl %eax, (v)
18461 adcl %edx, 4(v)
18462 +
18463 +#ifdef CONFIG_PAX_REFCOUNT
18464 + jno 0f
18465 + subl %eax, (v)
18466 + sbbl %edx, 4(v)
18467 + int $4
18468 +0:
18469 + _ASM_EXTABLE(0b, 0b)
18470 +#endif
18471 +
18472 +RET_ENDP
18473 +BEGIN(add_unchecked)
18474 + addl %eax, (v)
18475 + adcl %edx, 4(v)
18476 RET_ENDP
18477 #undef v
18478
18479 @@ -77,6 +99,24 @@ RET_ENDP
18480 BEGIN(add_return)
18481 addl (v), %eax
18482 adcl 4(v), %edx
18483 +
18484 +#ifdef CONFIG_PAX_REFCOUNT
18485 + into
18486 +1234:
18487 + _ASM_EXTABLE(1234b, 2f)
18488 +#endif
18489 +
18490 + movl %eax, (v)
18491 + movl %edx, 4(v)
18492 +
18493 +#ifdef CONFIG_PAX_REFCOUNT
18494 +2:
18495 +#endif
18496 +
18497 +RET_ENDP
18498 +BEGIN(add_return_unchecked)
18499 + addl (v), %eax
18500 + adcl 4(v), %edx
18501 movl %eax, (v)
18502 movl %edx, 4(v)
18503 RET_ENDP
18504 @@ -86,6 +126,20 @@ RET_ENDP
18505 BEGIN(sub)
18506 subl %eax, (v)
18507 sbbl %edx, 4(v)
18508 +
18509 +#ifdef CONFIG_PAX_REFCOUNT
18510 + jno 0f
18511 + addl %eax, (v)
18512 + adcl %edx, 4(v)
18513 + int $4
18514 +0:
18515 + _ASM_EXTABLE(0b, 0b)
18516 +#endif
18517 +
18518 +RET_ENDP
18519 +BEGIN(sub_unchecked)
18520 + subl %eax, (v)
18521 + sbbl %edx, 4(v)
18522 RET_ENDP
18523 #undef v
18524
18525 @@ -96,6 +150,27 @@ BEGIN(sub_return)
18526 sbbl $0, %edx
18527 addl (v), %eax
18528 adcl 4(v), %edx
18529 +
18530 +#ifdef CONFIG_PAX_REFCOUNT
18531 + into
18532 +1234:
18533 + _ASM_EXTABLE(1234b, 2f)
18534 +#endif
18535 +
18536 + movl %eax, (v)
18537 + movl %edx, 4(v)
18538 +
18539 +#ifdef CONFIG_PAX_REFCOUNT
18540 +2:
18541 +#endif
18542 +
18543 +RET_ENDP
18544 +BEGIN(sub_return_unchecked)
18545 + negl %edx
18546 + negl %eax
18547 + sbbl $0, %edx
18548 + addl (v), %eax
18549 + adcl 4(v), %edx
18550 movl %eax, (v)
18551 movl %edx, 4(v)
18552 RET_ENDP
18553 @@ -105,6 +180,20 @@ RET_ENDP
18554 BEGIN(inc)
18555 addl $1, (v)
18556 adcl $0, 4(v)
18557 +
18558 +#ifdef CONFIG_PAX_REFCOUNT
18559 + jno 0f
18560 + subl $1, (v)
18561 + sbbl $0, 4(v)
18562 + int $4
18563 +0:
18564 + _ASM_EXTABLE(0b, 0b)
18565 +#endif
18566 +
18567 +RET_ENDP
18568 +BEGIN(inc_unchecked)
18569 + addl $1, (v)
18570 + adcl $0, 4(v)
18571 RET_ENDP
18572 #undef v
18573
18574 @@ -114,6 +203,26 @@ BEGIN(inc_return)
18575 movl 4(v), %edx
18576 addl $1, %eax
18577 adcl $0, %edx
18578 +
18579 +#ifdef CONFIG_PAX_REFCOUNT
18580 + into
18581 +1234:
18582 + _ASM_EXTABLE(1234b, 2f)
18583 +#endif
18584 +
18585 + movl %eax, (v)
18586 + movl %edx, 4(v)
18587 +
18588 +#ifdef CONFIG_PAX_REFCOUNT
18589 +2:
18590 +#endif
18591 +
18592 +RET_ENDP
18593 +BEGIN(inc_return_unchecked)
18594 + movl (v), %eax
18595 + movl 4(v), %edx
18596 + addl $1, %eax
18597 + adcl $0, %edx
18598 movl %eax, (v)
18599 movl %edx, 4(v)
18600 RET_ENDP
18601 @@ -123,6 +232,20 @@ RET_ENDP
18602 BEGIN(dec)
18603 subl $1, (v)
18604 sbbl $0, 4(v)
18605 +
18606 +#ifdef CONFIG_PAX_REFCOUNT
18607 + jno 0f
18608 + addl $1, (v)
18609 + adcl $0, 4(v)
18610 + int $4
18611 +0:
18612 + _ASM_EXTABLE(0b, 0b)
18613 +#endif
18614 +
18615 +RET_ENDP
18616 +BEGIN(dec_unchecked)
18617 + subl $1, (v)
18618 + sbbl $0, 4(v)
18619 RET_ENDP
18620 #undef v
18621
18622 @@ -132,6 +255,26 @@ BEGIN(dec_return)
18623 movl 4(v), %edx
18624 subl $1, %eax
18625 sbbl $0, %edx
18626 +
18627 +#ifdef CONFIG_PAX_REFCOUNT
18628 + into
18629 +1234:
18630 + _ASM_EXTABLE(1234b, 2f)
18631 +#endif
18632 +
18633 + movl %eax, (v)
18634 + movl %edx, 4(v)
18635 +
18636 +#ifdef CONFIG_PAX_REFCOUNT
18637 +2:
18638 +#endif
18639 +
18640 +RET_ENDP
18641 +BEGIN(dec_return_unchecked)
18642 + movl (v), %eax
18643 + movl 4(v), %edx
18644 + subl $1, %eax
18645 + sbbl $0, %edx
18646 movl %eax, (v)
18647 movl %edx, 4(v)
18648 RET_ENDP
18649 @@ -143,6 +286,13 @@ BEGIN(add_unless)
18650 adcl %edx, %edi
18651 addl (v), %eax
18652 adcl 4(v), %edx
18653 +
18654 +#ifdef CONFIG_PAX_REFCOUNT
18655 + into
18656 +1234:
18657 + _ASM_EXTABLE(1234b, 2f)
18658 +#endif
18659 +
18660 cmpl %eax, %esi
18661 je 3f
18662 1:
18663 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
18664 1:
18665 addl $1, %eax
18666 adcl $0, %edx
18667 +
18668 +#ifdef CONFIG_PAX_REFCOUNT
18669 + into
18670 +1234:
18671 + _ASM_EXTABLE(1234b, 2f)
18672 +#endif
18673 +
18674 movl %eax, (v)
18675 movl %edx, 4(v)
18676 movl $1, %eax
18677 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
18678 movl 4(v), %edx
18679 subl $1, %eax
18680 sbbl $0, %edx
18681 +
18682 +#ifdef CONFIG_PAX_REFCOUNT
18683 + into
18684 +1234:
18685 + _ASM_EXTABLE(1234b, 1f)
18686 +#endif
18687 +
18688 js 1f
18689 movl %eax, (v)
18690 movl %edx, 4(v)
18691 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
18692 index 391a083..d658e9f 100644
18693 --- a/arch/x86/lib/atomic64_cx8_32.S
18694 +++ b/arch/x86/lib/atomic64_cx8_32.S
18695 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
18696 CFI_STARTPROC
18697
18698 read64 %ecx
18699 + pax_force_retaddr
18700 ret
18701 CFI_ENDPROC
18702 ENDPROC(atomic64_read_cx8)
18703
18704 +ENTRY(atomic64_read_unchecked_cx8)
18705 + CFI_STARTPROC
18706 +
18707 + read64 %ecx
18708 + pax_force_retaddr
18709 + ret
18710 + CFI_ENDPROC
18711 +ENDPROC(atomic64_read_unchecked_cx8)
18712 +
18713 ENTRY(atomic64_set_cx8)
18714 CFI_STARTPROC
18715
18716 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
18717 cmpxchg8b (%esi)
18718 jne 1b
18719
18720 + pax_force_retaddr
18721 ret
18722 CFI_ENDPROC
18723 ENDPROC(atomic64_set_cx8)
18724
18725 +ENTRY(atomic64_set_unchecked_cx8)
18726 + CFI_STARTPROC
18727 +
18728 +1:
18729 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
18730 + * are atomic on 586 and newer */
18731 + cmpxchg8b (%esi)
18732 + jne 1b
18733 +
18734 + pax_force_retaddr
18735 + ret
18736 + CFI_ENDPROC
18737 +ENDPROC(atomic64_set_unchecked_cx8)
18738 +
18739 ENTRY(atomic64_xchg_cx8)
18740 CFI_STARTPROC
18741
18742 @@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
18743 cmpxchg8b (%esi)
18744 jne 1b
18745
18746 + pax_force_retaddr
18747 ret
18748 CFI_ENDPROC
18749 ENDPROC(atomic64_xchg_cx8)
18750
18751 -.macro addsub_return func ins insc
18752 -ENTRY(atomic64_\func\()_return_cx8)
18753 +.macro addsub_return func ins insc unchecked=""
18754 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18755 CFI_STARTPROC
18756 SAVE ebp
18757 SAVE ebx
18758 @@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
18759 movl %edx, %ecx
18760 \ins\()l %esi, %ebx
18761 \insc\()l %edi, %ecx
18762 +
18763 +.ifb \unchecked
18764 +#ifdef CONFIG_PAX_REFCOUNT
18765 + into
18766 +2:
18767 + _ASM_EXTABLE(2b, 3f)
18768 +#endif
18769 +.endif
18770 +
18771 LOCK_PREFIX
18772 cmpxchg8b (%ebp)
18773 jne 1b
18774 -
18775 -10:
18776 movl %ebx, %eax
18777 movl %ecx, %edx
18778 +
18779 +.ifb \unchecked
18780 +#ifdef CONFIG_PAX_REFCOUNT
18781 +3:
18782 +#endif
18783 +.endif
18784 +
18785 RESTORE edi
18786 RESTORE esi
18787 RESTORE ebx
18788 RESTORE ebp
18789 + pax_force_retaddr
18790 ret
18791 CFI_ENDPROC
18792 -ENDPROC(atomic64_\func\()_return_cx8)
18793 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18794 .endm
18795
18796 addsub_return add add adc
18797 addsub_return sub sub sbb
18798 +addsub_return add add adc _unchecked
18799 +addsub_return sub sub sbb _unchecked
18800
18801 -.macro incdec_return func ins insc
18802 -ENTRY(atomic64_\func\()_return_cx8)
18803 +.macro incdec_return func ins insc unchecked
18804 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
18805 CFI_STARTPROC
18806 SAVE ebx
18807
18808 @@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
18809 movl %edx, %ecx
18810 \ins\()l $1, %ebx
18811 \insc\()l $0, %ecx
18812 +
18813 +.ifb \unchecked
18814 +#ifdef CONFIG_PAX_REFCOUNT
18815 + into
18816 +2:
18817 + _ASM_EXTABLE(2b, 3f)
18818 +#endif
18819 +.endif
18820 +
18821 LOCK_PREFIX
18822 cmpxchg8b (%esi)
18823 jne 1b
18824
18825 -10:
18826 movl %ebx, %eax
18827 movl %ecx, %edx
18828 +
18829 +.ifb \unchecked
18830 +#ifdef CONFIG_PAX_REFCOUNT
18831 +3:
18832 +#endif
18833 +.endif
18834 +
18835 RESTORE ebx
18836 + pax_force_retaddr
18837 ret
18838 CFI_ENDPROC
18839 -ENDPROC(atomic64_\func\()_return_cx8)
18840 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
18841 .endm
18842
18843 incdec_return inc add adc
18844 incdec_return dec sub sbb
18845 +incdec_return inc add adc _unchecked
18846 +incdec_return dec sub sbb _unchecked
18847
18848 ENTRY(atomic64_dec_if_positive_cx8)
18849 CFI_STARTPROC
18850 @@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
18851 movl %edx, %ecx
18852 subl $1, %ebx
18853 sbb $0, %ecx
18854 +
18855 +#ifdef CONFIG_PAX_REFCOUNT
18856 + into
18857 +1234:
18858 + _ASM_EXTABLE(1234b, 2f)
18859 +#endif
18860 +
18861 js 2f
18862 LOCK_PREFIX
18863 cmpxchg8b (%esi)
18864 @@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
18865 movl %ebx, %eax
18866 movl %ecx, %edx
18867 RESTORE ebx
18868 + pax_force_retaddr
18869 ret
18870 CFI_ENDPROC
18871 ENDPROC(atomic64_dec_if_positive_cx8)
18872 @@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
18873 movl %edx, %ecx
18874 addl %esi, %ebx
18875 adcl %edi, %ecx
18876 +
18877 +#ifdef CONFIG_PAX_REFCOUNT
18878 + into
18879 +1234:
18880 + _ASM_EXTABLE(1234b, 3f)
18881 +#endif
18882 +
18883 LOCK_PREFIX
18884 cmpxchg8b (%ebp)
18885 jne 1b
18886 @@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
18887 CFI_ADJUST_CFA_OFFSET -8
18888 RESTORE ebx
18889 RESTORE ebp
18890 + pax_force_retaddr
18891 ret
18892 4:
18893 cmpl %edx, 4(%esp)
18894 @@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
18895 movl %edx, %ecx
18896 addl $1, %ebx
18897 adcl $0, %ecx
18898 +
18899 +#ifdef CONFIG_PAX_REFCOUNT
18900 + into
18901 +1234:
18902 + _ASM_EXTABLE(1234b, 3f)
18903 +#endif
18904 +
18905 LOCK_PREFIX
18906 cmpxchg8b (%esi)
18907 jne 1b
18908 @@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
18909 movl $1, %eax
18910 3:
18911 RESTORE ebx
18912 + pax_force_retaddr
18913 ret
18914 4:
18915 testl %edx, %edx
18916 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
18917 index 78d16a5..fbcf666 100644
18918 --- a/arch/x86/lib/checksum_32.S
18919 +++ b/arch/x86/lib/checksum_32.S
18920 @@ -28,7 +28,8 @@
18921 #include <linux/linkage.h>
18922 #include <asm/dwarf2.h>
18923 #include <asm/errno.h>
18924 -
18925 +#include <asm/segment.h>
18926 +
18927 /*
18928 * computes a partial checksum, e.g. for TCP/UDP fragments
18929 */
18930 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
18931
18932 #define ARGBASE 16
18933 #define FP 12
18934 -
18935 -ENTRY(csum_partial_copy_generic)
18936 +
18937 +ENTRY(csum_partial_copy_generic_to_user)
18938 CFI_STARTPROC
18939 +
18940 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18941 + pushl_cfi %gs
18942 + popl_cfi %es
18943 + jmp csum_partial_copy_generic
18944 +#endif
18945 +
18946 +ENTRY(csum_partial_copy_generic_from_user)
18947 +
18948 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18949 + pushl_cfi %gs
18950 + popl_cfi %ds
18951 +#endif
18952 +
18953 +ENTRY(csum_partial_copy_generic)
18954 subl $4,%esp
18955 CFI_ADJUST_CFA_OFFSET 4
18956 pushl_cfi %edi
18957 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
18958 jmp 4f
18959 SRC(1: movw (%esi), %bx )
18960 addl $2, %esi
18961 -DST( movw %bx, (%edi) )
18962 +DST( movw %bx, %es:(%edi) )
18963 addl $2, %edi
18964 addw %bx, %ax
18965 adcl $0, %eax
18966 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
18967 SRC(1: movl (%esi), %ebx )
18968 SRC( movl 4(%esi), %edx )
18969 adcl %ebx, %eax
18970 -DST( movl %ebx, (%edi) )
18971 +DST( movl %ebx, %es:(%edi) )
18972 adcl %edx, %eax
18973 -DST( movl %edx, 4(%edi) )
18974 +DST( movl %edx, %es:4(%edi) )
18975
18976 SRC( movl 8(%esi), %ebx )
18977 SRC( movl 12(%esi), %edx )
18978 adcl %ebx, %eax
18979 -DST( movl %ebx, 8(%edi) )
18980 +DST( movl %ebx, %es:8(%edi) )
18981 adcl %edx, %eax
18982 -DST( movl %edx, 12(%edi) )
18983 +DST( movl %edx, %es:12(%edi) )
18984
18985 SRC( movl 16(%esi), %ebx )
18986 SRC( movl 20(%esi), %edx )
18987 adcl %ebx, %eax
18988 -DST( movl %ebx, 16(%edi) )
18989 +DST( movl %ebx, %es:16(%edi) )
18990 adcl %edx, %eax
18991 -DST( movl %edx, 20(%edi) )
18992 +DST( movl %edx, %es:20(%edi) )
18993
18994 SRC( movl 24(%esi), %ebx )
18995 SRC( movl 28(%esi), %edx )
18996 adcl %ebx, %eax
18997 -DST( movl %ebx, 24(%edi) )
18998 +DST( movl %ebx, %es:24(%edi) )
18999 adcl %edx, %eax
19000 -DST( movl %edx, 28(%edi) )
19001 +DST( movl %edx, %es:28(%edi) )
19002
19003 lea 32(%esi), %esi
19004 lea 32(%edi), %edi
19005 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
19006 shrl $2, %edx # This clears CF
19007 SRC(3: movl (%esi), %ebx )
19008 adcl %ebx, %eax
19009 -DST( movl %ebx, (%edi) )
19010 +DST( movl %ebx, %es:(%edi) )
19011 lea 4(%esi), %esi
19012 lea 4(%edi), %edi
19013 dec %edx
19014 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
19015 jb 5f
19016 SRC( movw (%esi), %cx )
19017 leal 2(%esi), %esi
19018 -DST( movw %cx, (%edi) )
19019 +DST( movw %cx, %es:(%edi) )
19020 leal 2(%edi), %edi
19021 je 6f
19022 shll $16,%ecx
19023 SRC(5: movb (%esi), %cl )
19024 -DST( movb %cl, (%edi) )
19025 +DST( movb %cl, %es:(%edi) )
19026 6: addl %ecx, %eax
19027 adcl $0, %eax
19028 7:
19029 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
19030
19031 6001:
19032 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19033 - movl $-EFAULT, (%ebx)
19034 + movl $-EFAULT, %ss:(%ebx)
19035
19036 # zero the complete destination - computing the rest
19037 # is too much work
19038 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
19039
19040 6002:
19041 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19042 - movl $-EFAULT,(%ebx)
19043 + movl $-EFAULT,%ss:(%ebx)
19044 jmp 5000b
19045
19046 .previous
19047
19048 + pushl_cfi %ss
19049 + popl_cfi %ds
19050 + pushl_cfi %ss
19051 + popl_cfi %es
19052 popl_cfi %ebx
19053 CFI_RESTORE ebx
19054 popl_cfi %esi
19055 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
19056 popl_cfi %ecx # equivalent to addl $4,%esp
19057 ret
19058 CFI_ENDPROC
19059 -ENDPROC(csum_partial_copy_generic)
19060 +ENDPROC(csum_partial_copy_generic_to_user)
19061
19062 #else
19063
19064 /* Version for PentiumII/PPro */
19065
19066 #define ROUND1(x) \
19067 + nop; nop; nop; \
19068 SRC(movl x(%esi), %ebx ) ; \
19069 addl %ebx, %eax ; \
19070 - DST(movl %ebx, x(%edi) ) ;
19071 + DST(movl %ebx, %es:x(%edi)) ;
19072
19073 #define ROUND(x) \
19074 + nop; nop; nop; \
19075 SRC(movl x(%esi), %ebx ) ; \
19076 adcl %ebx, %eax ; \
19077 - DST(movl %ebx, x(%edi) ) ;
19078 + DST(movl %ebx, %es:x(%edi)) ;
19079
19080 #define ARGBASE 12
19081 -
19082 -ENTRY(csum_partial_copy_generic)
19083 +
19084 +ENTRY(csum_partial_copy_generic_to_user)
19085 CFI_STARTPROC
19086 +
19087 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19088 + pushl_cfi %gs
19089 + popl_cfi %es
19090 + jmp csum_partial_copy_generic
19091 +#endif
19092 +
19093 +ENTRY(csum_partial_copy_generic_from_user)
19094 +
19095 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19096 + pushl_cfi %gs
19097 + popl_cfi %ds
19098 +#endif
19099 +
19100 +ENTRY(csum_partial_copy_generic)
19101 pushl_cfi %ebx
19102 CFI_REL_OFFSET ebx, 0
19103 pushl_cfi %edi
19104 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
19105 subl %ebx, %edi
19106 lea -1(%esi),%edx
19107 andl $-32,%edx
19108 - lea 3f(%ebx,%ebx), %ebx
19109 + lea 3f(%ebx,%ebx,2), %ebx
19110 testl %esi, %esi
19111 jmp *%ebx
19112 1: addl $64,%esi
19113 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
19114 jb 5f
19115 SRC( movw (%esi), %dx )
19116 leal 2(%esi), %esi
19117 -DST( movw %dx, (%edi) )
19118 +DST( movw %dx, %es:(%edi) )
19119 leal 2(%edi), %edi
19120 je 6f
19121 shll $16,%edx
19122 5:
19123 SRC( movb (%esi), %dl )
19124 -DST( movb %dl, (%edi) )
19125 +DST( movb %dl, %es:(%edi) )
19126 6: addl %edx, %eax
19127 adcl $0, %eax
19128 7:
19129 .section .fixup, "ax"
19130 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19131 - movl $-EFAULT, (%ebx)
19132 + movl $-EFAULT, %ss:(%ebx)
19133 # zero the complete destination (computing the rest is too much work)
19134 movl ARGBASE+8(%esp),%edi # dst
19135 movl ARGBASE+12(%esp),%ecx # len
19136 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
19137 rep; stosb
19138 jmp 7b
19139 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19140 - movl $-EFAULT, (%ebx)
19141 + movl $-EFAULT, %ss:(%ebx)
19142 jmp 7b
19143 .previous
19144
19145 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19146 + pushl_cfi %ss
19147 + popl_cfi %ds
19148 + pushl_cfi %ss
19149 + popl_cfi %es
19150 +#endif
19151 +
19152 popl_cfi %esi
19153 CFI_RESTORE esi
19154 popl_cfi %edi
19155 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
19156 CFI_RESTORE ebx
19157 ret
19158 CFI_ENDPROC
19159 -ENDPROC(csum_partial_copy_generic)
19160 +ENDPROC(csum_partial_copy_generic_to_user)
19161
19162 #undef ROUND
19163 #undef ROUND1
19164 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
19165 index f2145cf..cea889d 100644
19166 --- a/arch/x86/lib/clear_page_64.S
19167 +++ b/arch/x86/lib/clear_page_64.S
19168 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
19169 movl $4096/8,%ecx
19170 xorl %eax,%eax
19171 rep stosq
19172 + pax_force_retaddr
19173 ret
19174 CFI_ENDPROC
19175 ENDPROC(clear_page_c)
19176 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
19177 movl $4096,%ecx
19178 xorl %eax,%eax
19179 rep stosb
19180 + pax_force_retaddr
19181 ret
19182 CFI_ENDPROC
19183 ENDPROC(clear_page_c_e)
19184 @@ -43,6 +45,7 @@ ENTRY(clear_page)
19185 leaq 64(%rdi),%rdi
19186 jnz .Lloop
19187 nop
19188 + pax_force_retaddr
19189 ret
19190 CFI_ENDPROC
19191 .Lclear_page_end:
19192 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
19193
19194 #include <asm/cpufeature.h>
19195
19196 - .section .altinstr_replacement,"ax"
19197 + .section .altinstr_replacement,"a"
19198 1: .byte 0xeb /* jmp <disp8> */
19199 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19200 2: .byte 0xeb /* jmp <disp8> */
19201 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
19202 index 1e572c5..2a162cd 100644
19203 --- a/arch/x86/lib/cmpxchg16b_emu.S
19204 +++ b/arch/x86/lib/cmpxchg16b_emu.S
19205 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
19206
19207 popf
19208 mov $1, %al
19209 + pax_force_retaddr
19210 ret
19211
19212 not_same:
19213 popf
19214 xor %al,%al
19215 + pax_force_retaddr
19216 ret
19217
19218 CFI_ENDPROC
19219 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
19220 index 01c805b..dccb07f 100644
19221 --- a/arch/x86/lib/copy_page_64.S
19222 +++ b/arch/x86/lib/copy_page_64.S
19223 @@ -9,6 +9,7 @@ copy_page_c:
19224 CFI_STARTPROC
19225 movl $4096/8,%ecx
19226 rep movsq
19227 + pax_force_retaddr
19228 ret
19229 CFI_ENDPROC
19230 ENDPROC(copy_page_c)
19231 @@ -39,7 +40,7 @@ ENTRY(copy_page)
19232 movq 16 (%rsi), %rdx
19233 movq 24 (%rsi), %r8
19234 movq 32 (%rsi), %r9
19235 - movq 40 (%rsi), %r10
19236 + movq 40 (%rsi), %r13
19237 movq 48 (%rsi), %r11
19238 movq 56 (%rsi), %r12
19239
19240 @@ -50,7 +51,7 @@ ENTRY(copy_page)
19241 movq %rdx, 16 (%rdi)
19242 movq %r8, 24 (%rdi)
19243 movq %r9, 32 (%rdi)
19244 - movq %r10, 40 (%rdi)
19245 + movq %r13, 40 (%rdi)
19246 movq %r11, 48 (%rdi)
19247 movq %r12, 56 (%rdi)
19248
19249 @@ -69,7 +70,7 @@ ENTRY(copy_page)
19250 movq 16 (%rsi), %rdx
19251 movq 24 (%rsi), %r8
19252 movq 32 (%rsi), %r9
19253 - movq 40 (%rsi), %r10
19254 + movq 40 (%rsi), %r13
19255 movq 48 (%rsi), %r11
19256 movq 56 (%rsi), %r12
19257
19258 @@ -78,7 +79,7 @@ ENTRY(copy_page)
19259 movq %rdx, 16 (%rdi)
19260 movq %r8, 24 (%rdi)
19261 movq %r9, 32 (%rdi)
19262 - movq %r10, 40 (%rdi)
19263 + movq %r13, 40 (%rdi)
19264 movq %r11, 48 (%rdi)
19265 movq %r12, 56 (%rdi)
19266
19267 @@ -95,6 +96,7 @@ ENTRY(copy_page)
19268 CFI_RESTORE r13
19269 addq $3*8,%rsp
19270 CFI_ADJUST_CFA_OFFSET -3*8
19271 + pax_force_retaddr
19272 ret
19273 .Lcopy_page_end:
19274 CFI_ENDPROC
19275 @@ -105,7 +107,7 @@ ENDPROC(copy_page)
19276
19277 #include <asm/cpufeature.h>
19278
19279 - .section .altinstr_replacement,"ax"
19280 + .section .altinstr_replacement,"a"
19281 1: .byte 0xeb /* jmp <disp8> */
19282 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19283 2:
19284 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
19285 index 0248402..821c786 100644
19286 --- a/arch/x86/lib/copy_user_64.S
19287 +++ b/arch/x86/lib/copy_user_64.S
19288 @@ -16,6 +16,7 @@
19289 #include <asm/thread_info.h>
19290 #include <asm/cpufeature.h>
19291 #include <asm/alternative-asm.h>
19292 +#include <asm/pgtable.h>
19293
19294 /*
19295 * By placing feature2 after feature1 in altinstructions section, we logically
19296 @@ -29,7 +30,7 @@
19297 .byte 0xe9 /* 32bit jump */
19298 .long \orig-1f /* by default jump to orig */
19299 1:
19300 - .section .altinstr_replacement,"ax"
19301 + .section .altinstr_replacement,"a"
19302 2: .byte 0xe9 /* near jump with 32bit immediate */
19303 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
19304 3: .byte 0xe9 /* near jump with 32bit immediate */
19305 @@ -71,47 +72,20 @@
19306 #endif
19307 .endm
19308
19309 -/* Standard copy_to_user with segment limit checking */
19310 -ENTRY(_copy_to_user)
19311 - CFI_STARTPROC
19312 - GET_THREAD_INFO(%rax)
19313 - movq %rdi,%rcx
19314 - addq %rdx,%rcx
19315 - jc bad_to_user
19316 - cmpq TI_addr_limit(%rax),%rcx
19317 - ja bad_to_user
19318 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19319 - copy_user_generic_unrolled,copy_user_generic_string, \
19320 - copy_user_enhanced_fast_string
19321 - CFI_ENDPROC
19322 -ENDPROC(_copy_to_user)
19323 -
19324 -/* Standard copy_from_user with segment limit checking */
19325 -ENTRY(_copy_from_user)
19326 - CFI_STARTPROC
19327 - GET_THREAD_INFO(%rax)
19328 - movq %rsi,%rcx
19329 - addq %rdx,%rcx
19330 - jc bad_from_user
19331 - cmpq TI_addr_limit(%rax),%rcx
19332 - ja bad_from_user
19333 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
19334 - copy_user_generic_unrolled,copy_user_generic_string, \
19335 - copy_user_enhanced_fast_string
19336 - CFI_ENDPROC
19337 -ENDPROC(_copy_from_user)
19338 -
19339 .section .fixup,"ax"
19340 /* must zero dest */
19341 ENTRY(bad_from_user)
19342 bad_from_user:
19343 CFI_STARTPROC
19344 + testl %edx,%edx
19345 + js bad_to_user
19346 movl %edx,%ecx
19347 xorl %eax,%eax
19348 rep
19349 stosb
19350 bad_to_user:
19351 movl %edx,%eax
19352 + pax_force_retaddr
19353 ret
19354 CFI_ENDPROC
19355 ENDPROC(bad_from_user)
19356 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
19357 jz 17f
19358 1: movq (%rsi),%r8
19359 2: movq 1*8(%rsi),%r9
19360 -3: movq 2*8(%rsi),%r10
19361 +3: movq 2*8(%rsi),%rax
19362 4: movq 3*8(%rsi),%r11
19363 5: movq %r8,(%rdi)
19364 6: movq %r9,1*8(%rdi)
19365 -7: movq %r10,2*8(%rdi)
19366 +7: movq %rax,2*8(%rdi)
19367 8: movq %r11,3*8(%rdi)
19368 9: movq 4*8(%rsi),%r8
19369 10: movq 5*8(%rsi),%r9
19370 -11: movq 6*8(%rsi),%r10
19371 +11: movq 6*8(%rsi),%rax
19372 12: movq 7*8(%rsi),%r11
19373 13: movq %r8,4*8(%rdi)
19374 14: movq %r9,5*8(%rdi)
19375 -15: movq %r10,6*8(%rdi)
19376 +15: movq %rax,6*8(%rdi)
19377 16: movq %r11,7*8(%rdi)
19378 leaq 64(%rsi),%rsi
19379 leaq 64(%rdi),%rdi
19380 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
19381 decl %ecx
19382 jnz 21b
19383 23: xor %eax,%eax
19384 + pax_force_retaddr
19385 ret
19386
19387 .section .fixup,"ax"
19388 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
19389 3: rep
19390 movsb
19391 4: xorl %eax,%eax
19392 + pax_force_retaddr
19393 ret
19394
19395 .section .fixup,"ax"
19396 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
19397 1: rep
19398 movsb
19399 2: xorl %eax,%eax
19400 + pax_force_retaddr
19401 ret
19402
19403 .section .fixup,"ax"
19404 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
19405 index cb0c112..e3a6895 100644
19406 --- a/arch/x86/lib/copy_user_nocache_64.S
19407 +++ b/arch/x86/lib/copy_user_nocache_64.S
19408 @@ -8,12 +8,14 @@
19409
19410 #include <linux/linkage.h>
19411 #include <asm/dwarf2.h>
19412 +#include <asm/alternative-asm.h>
19413
19414 #define FIX_ALIGNMENT 1
19415
19416 #include <asm/current.h>
19417 #include <asm/asm-offsets.h>
19418 #include <asm/thread_info.h>
19419 +#include <asm/pgtable.h>
19420
19421 .macro ALIGN_DESTINATION
19422 #ifdef FIX_ALIGNMENT
19423 @@ -50,6 +52,15 @@
19424 */
19425 ENTRY(__copy_user_nocache)
19426 CFI_STARTPROC
19427 +
19428 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19429 + mov $PAX_USER_SHADOW_BASE,%rcx
19430 + cmp %rcx,%rsi
19431 + jae 1f
19432 + add %rcx,%rsi
19433 +1:
19434 +#endif
19435 +
19436 cmpl $8,%edx
19437 jb 20f /* less then 8 bytes, go to byte copy loop */
19438 ALIGN_DESTINATION
19439 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
19440 jz 17f
19441 1: movq (%rsi),%r8
19442 2: movq 1*8(%rsi),%r9
19443 -3: movq 2*8(%rsi),%r10
19444 +3: movq 2*8(%rsi),%rax
19445 4: movq 3*8(%rsi),%r11
19446 5: movnti %r8,(%rdi)
19447 6: movnti %r9,1*8(%rdi)
19448 -7: movnti %r10,2*8(%rdi)
19449 +7: movnti %rax,2*8(%rdi)
19450 8: movnti %r11,3*8(%rdi)
19451 9: movq 4*8(%rsi),%r8
19452 10: movq 5*8(%rsi),%r9
19453 -11: movq 6*8(%rsi),%r10
19454 +11: movq 6*8(%rsi),%rax
19455 12: movq 7*8(%rsi),%r11
19456 13: movnti %r8,4*8(%rdi)
19457 14: movnti %r9,5*8(%rdi)
19458 -15: movnti %r10,6*8(%rdi)
19459 +15: movnti %rax,6*8(%rdi)
19460 16: movnti %r11,7*8(%rdi)
19461 leaq 64(%rsi),%rsi
19462 leaq 64(%rdi),%rdi
19463 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
19464 jnz 21b
19465 23: xorl %eax,%eax
19466 sfence
19467 + pax_force_retaddr
19468 ret
19469
19470 .section .fixup,"ax"
19471 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
19472 index fb903b7..c92b7f7 100644
19473 --- a/arch/x86/lib/csum-copy_64.S
19474 +++ b/arch/x86/lib/csum-copy_64.S
19475 @@ -8,6 +8,7 @@
19476 #include <linux/linkage.h>
19477 #include <asm/dwarf2.h>
19478 #include <asm/errno.h>
19479 +#include <asm/alternative-asm.h>
19480
19481 /*
19482 * Checksum copy with exception handling.
19483 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
19484 CFI_RESTORE rbp
19485 addq $7*8, %rsp
19486 CFI_ADJUST_CFA_OFFSET -7*8
19487 + pax_force_retaddr 0, 1
19488 ret
19489 CFI_RESTORE_STATE
19490
19491 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
19492 index 459b58a..9570bc7 100644
19493 --- a/arch/x86/lib/csum-wrappers_64.c
19494 +++ b/arch/x86/lib/csum-wrappers_64.c
19495 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
19496 len -= 2;
19497 }
19498 }
19499 - isum = csum_partial_copy_generic((__force const void *)src,
19500 +
19501 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19502 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19503 + src += PAX_USER_SHADOW_BASE;
19504 +#endif
19505 +
19506 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
19507 dst, len, isum, errp, NULL);
19508 if (unlikely(*errp))
19509 goto out_err;
19510 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
19511 }
19512
19513 *errp = 0;
19514 - return csum_partial_copy_generic(src, (void __force *)dst,
19515 +
19516 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19517 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19518 + dst += PAX_USER_SHADOW_BASE;
19519 +#endif
19520 +
19521 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
19522 len, isum, NULL, errp);
19523 }
19524 EXPORT_SYMBOL(csum_partial_copy_to_user);
19525 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
19526 index 51f1504..ddac4c1 100644
19527 --- a/arch/x86/lib/getuser.S
19528 +++ b/arch/x86/lib/getuser.S
19529 @@ -33,15 +33,38 @@
19530 #include <asm/asm-offsets.h>
19531 #include <asm/thread_info.h>
19532 #include <asm/asm.h>
19533 +#include <asm/segment.h>
19534 +#include <asm/pgtable.h>
19535 +#include <asm/alternative-asm.h>
19536 +
19537 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19538 +#define __copyuser_seg gs;
19539 +#else
19540 +#define __copyuser_seg
19541 +#endif
19542
19543 .text
19544 ENTRY(__get_user_1)
19545 CFI_STARTPROC
19546 +
19547 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19548 GET_THREAD_INFO(%_ASM_DX)
19549 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19550 jae bad_get_user
19551 -1: movzb (%_ASM_AX),%edx
19552 +
19553 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19554 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19555 + cmp %_ASM_DX,%_ASM_AX
19556 + jae 1234f
19557 + add %_ASM_DX,%_ASM_AX
19558 +1234:
19559 +#endif
19560 +
19561 +#endif
19562 +
19563 +1: __copyuser_seg movzb (%_ASM_AX),%edx
19564 xor %eax,%eax
19565 + pax_force_retaddr
19566 ret
19567 CFI_ENDPROC
19568 ENDPROC(__get_user_1)
19569 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
19570 ENTRY(__get_user_2)
19571 CFI_STARTPROC
19572 add $1,%_ASM_AX
19573 +
19574 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19575 jc bad_get_user
19576 GET_THREAD_INFO(%_ASM_DX)
19577 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19578 jae bad_get_user
19579 -2: movzwl -1(%_ASM_AX),%edx
19580 +
19581 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19582 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19583 + cmp %_ASM_DX,%_ASM_AX
19584 + jae 1234f
19585 + add %_ASM_DX,%_ASM_AX
19586 +1234:
19587 +#endif
19588 +
19589 +#endif
19590 +
19591 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19592 xor %eax,%eax
19593 + pax_force_retaddr
19594 ret
19595 CFI_ENDPROC
19596 ENDPROC(__get_user_2)
19597 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
19598 ENTRY(__get_user_4)
19599 CFI_STARTPROC
19600 add $3,%_ASM_AX
19601 +
19602 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19603 jc bad_get_user
19604 GET_THREAD_INFO(%_ASM_DX)
19605 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19606 jae bad_get_user
19607 -3: mov -3(%_ASM_AX),%edx
19608 +
19609 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19610 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19611 + cmp %_ASM_DX,%_ASM_AX
19612 + jae 1234f
19613 + add %_ASM_DX,%_ASM_AX
19614 +1234:
19615 +#endif
19616 +
19617 +#endif
19618 +
19619 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
19620 xor %eax,%eax
19621 + pax_force_retaddr
19622 ret
19623 CFI_ENDPROC
19624 ENDPROC(__get_user_4)
19625 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
19626 GET_THREAD_INFO(%_ASM_DX)
19627 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19628 jae bad_get_user
19629 +
19630 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19631 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19632 + cmp %_ASM_DX,%_ASM_AX
19633 + jae 1234f
19634 + add %_ASM_DX,%_ASM_AX
19635 +1234:
19636 +#endif
19637 +
19638 4: movq -7(%_ASM_AX),%_ASM_DX
19639 xor %eax,%eax
19640 + pax_force_retaddr
19641 ret
19642 CFI_ENDPROC
19643 ENDPROC(__get_user_8)
19644 @@ -91,6 +152,7 @@ bad_get_user:
19645 CFI_STARTPROC
19646 xor %edx,%edx
19647 mov $(-EFAULT),%_ASM_AX
19648 + pax_force_retaddr
19649 ret
19650 CFI_ENDPROC
19651 END(bad_get_user)
19652 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
19653 index 9f33b98..dfc7678 100644
19654 --- a/arch/x86/lib/insn.c
19655 +++ b/arch/x86/lib/insn.c
19656 @@ -21,6 +21,11 @@
19657 #include <linux/string.h>
19658 #include <asm/inat.h>
19659 #include <asm/insn.h>
19660 +#ifdef __KERNEL__
19661 +#include <asm/pgtable_types.h>
19662 +#else
19663 +#define ktla_ktva(addr) addr
19664 +#endif
19665
19666 #define get_next(t, insn) \
19667 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
19668 @@ -40,8 +45,8 @@
19669 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
19670 {
19671 memset(insn, 0, sizeof(*insn));
19672 - insn->kaddr = kaddr;
19673 - insn->next_byte = kaddr;
19674 + insn->kaddr = ktla_ktva(kaddr);
19675 + insn->next_byte = ktla_ktva(kaddr);
19676 insn->x86_64 = x86_64 ? 1 : 0;
19677 insn->opnd_bytes = 4;
19678 if (x86_64)
19679 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
19680 index 05a95e7..326f2fa 100644
19681 --- a/arch/x86/lib/iomap_copy_64.S
19682 +++ b/arch/x86/lib/iomap_copy_64.S
19683 @@ -17,6 +17,7 @@
19684
19685 #include <linux/linkage.h>
19686 #include <asm/dwarf2.h>
19687 +#include <asm/alternative-asm.h>
19688
19689 /*
19690 * override generic version in lib/iomap_copy.c
19691 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
19692 CFI_STARTPROC
19693 movl %edx,%ecx
19694 rep movsd
19695 + pax_force_retaddr
19696 ret
19697 CFI_ENDPROC
19698 ENDPROC(__iowrite32_copy)
19699 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
19700 index efbf2a0..8893637 100644
19701 --- a/arch/x86/lib/memcpy_64.S
19702 +++ b/arch/x86/lib/memcpy_64.S
19703 @@ -34,6 +34,7 @@
19704 rep movsq
19705 movl %edx, %ecx
19706 rep movsb
19707 + pax_force_retaddr
19708 ret
19709 .Lmemcpy_e:
19710 .previous
19711 @@ -51,6 +52,7 @@
19712
19713 movl %edx, %ecx
19714 rep movsb
19715 + pax_force_retaddr
19716 ret
19717 .Lmemcpy_e_e:
19718 .previous
19719 @@ -81,13 +83,13 @@ ENTRY(memcpy)
19720 */
19721 movq 0*8(%rsi), %r8
19722 movq 1*8(%rsi), %r9
19723 - movq 2*8(%rsi), %r10
19724 + movq 2*8(%rsi), %rcx
19725 movq 3*8(%rsi), %r11
19726 leaq 4*8(%rsi), %rsi
19727
19728 movq %r8, 0*8(%rdi)
19729 movq %r9, 1*8(%rdi)
19730 - movq %r10, 2*8(%rdi)
19731 + movq %rcx, 2*8(%rdi)
19732 movq %r11, 3*8(%rdi)
19733 leaq 4*8(%rdi), %rdi
19734 jae .Lcopy_forward_loop
19735 @@ -110,12 +112,12 @@ ENTRY(memcpy)
19736 subq $0x20, %rdx
19737 movq -1*8(%rsi), %r8
19738 movq -2*8(%rsi), %r9
19739 - movq -3*8(%rsi), %r10
19740 + movq -3*8(%rsi), %rcx
19741 movq -4*8(%rsi), %r11
19742 leaq -4*8(%rsi), %rsi
19743 movq %r8, -1*8(%rdi)
19744 movq %r9, -2*8(%rdi)
19745 - movq %r10, -3*8(%rdi)
19746 + movq %rcx, -3*8(%rdi)
19747 movq %r11, -4*8(%rdi)
19748 leaq -4*8(%rdi), %rdi
19749 jae .Lcopy_backward_loop
19750 @@ -135,12 +137,13 @@ ENTRY(memcpy)
19751 */
19752 movq 0*8(%rsi), %r8
19753 movq 1*8(%rsi), %r9
19754 - movq -2*8(%rsi, %rdx), %r10
19755 + movq -2*8(%rsi, %rdx), %rcx
19756 movq -1*8(%rsi, %rdx), %r11
19757 movq %r8, 0*8(%rdi)
19758 movq %r9, 1*8(%rdi)
19759 - movq %r10, -2*8(%rdi, %rdx)
19760 + movq %rcx, -2*8(%rdi, %rdx)
19761 movq %r11, -1*8(%rdi, %rdx)
19762 + pax_force_retaddr
19763 retq
19764 .p2align 4
19765 .Lless_16bytes:
19766 @@ -153,6 +156,7 @@ ENTRY(memcpy)
19767 movq -1*8(%rsi, %rdx), %r9
19768 movq %r8, 0*8(%rdi)
19769 movq %r9, -1*8(%rdi, %rdx)
19770 + pax_force_retaddr
19771 retq
19772 .p2align 4
19773 .Lless_8bytes:
19774 @@ -166,6 +170,7 @@ ENTRY(memcpy)
19775 movl -4(%rsi, %rdx), %r8d
19776 movl %ecx, (%rdi)
19777 movl %r8d, -4(%rdi, %rdx)
19778 + pax_force_retaddr
19779 retq
19780 .p2align 4
19781 .Lless_3bytes:
19782 @@ -183,6 +188,7 @@ ENTRY(memcpy)
19783 jnz .Lloop_1
19784
19785 .Lend:
19786 + pax_force_retaddr
19787 retq
19788 CFI_ENDPROC
19789 ENDPROC(memcpy)
19790 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
19791 index ee16461..c39c199 100644
19792 --- a/arch/x86/lib/memmove_64.S
19793 +++ b/arch/x86/lib/memmove_64.S
19794 @@ -61,13 +61,13 @@ ENTRY(memmove)
19795 5:
19796 sub $0x20, %rdx
19797 movq 0*8(%rsi), %r11
19798 - movq 1*8(%rsi), %r10
19799 + movq 1*8(%rsi), %rcx
19800 movq 2*8(%rsi), %r9
19801 movq 3*8(%rsi), %r8
19802 leaq 4*8(%rsi), %rsi
19803
19804 movq %r11, 0*8(%rdi)
19805 - movq %r10, 1*8(%rdi)
19806 + movq %rcx, 1*8(%rdi)
19807 movq %r9, 2*8(%rdi)
19808 movq %r8, 3*8(%rdi)
19809 leaq 4*8(%rdi), %rdi
19810 @@ -81,10 +81,10 @@ ENTRY(memmove)
19811 4:
19812 movq %rdx, %rcx
19813 movq -8(%rsi, %rdx), %r11
19814 - lea -8(%rdi, %rdx), %r10
19815 + lea -8(%rdi, %rdx), %r9
19816 shrq $3, %rcx
19817 rep movsq
19818 - movq %r11, (%r10)
19819 + movq %r11, (%r9)
19820 jmp 13f
19821 .Lmemmove_end_forward:
19822
19823 @@ -95,14 +95,14 @@ ENTRY(memmove)
19824 7:
19825 movq %rdx, %rcx
19826 movq (%rsi), %r11
19827 - movq %rdi, %r10
19828 + movq %rdi, %r9
19829 leaq -8(%rsi, %rdx), %rsi
19830 leaq -8(%rdi, %rdx), %rdi
19831 shrq $3, %rcx
19832 std
19833 rep movsq
19834 cld
19835 - movq %r11, (%r10)
19836 + movq %r11, (%r9)
19837 jmp 13f
19838
19839 /*
19840 @@ -127,13 +127,13 @@ ENTRY(memmove)
19841 8:
19842 subq $0x20, %rdx
19843 movq -1*8(%rsi), %r11
19844 - movq -2*8(%rsi), %r10
19845 + movq -2*8(%rsi), %rcx
19846 movq -3*8(%rsi), %r9
19847 movq -4*8(%rsi), %r8
19848 leaq -4*8(%rsi), %rsi
19849
19850 movq %r11, -1*8(%rdi)
19851 - movq %r10, -2*8(%rdi)
19852 + movq %rcx, -2*8(%rdi)
19853 movq %r9, -3*8(%rdi)
19854 movq %r8, -4*8(%rdi)
19855 leaq -4*8(%rdi), %rdi
19856 @@ -151,11 +151,11 @@ ENTRY(memmove)
19857 * Move data from 16 bytes to 31 bytes.
19858 */
19859 movq 0*8(%rsi), %r11
19860 - movq 1*8(%rsi), %r10
19861 + movq 1*8(%rsi), %rcx
19862 movq -2*8(%rsi, %rdx), %r9
19863 movq -1*8(%rsi, %rdx), %r8
19864 movq %r11, 0*8(%rdi)
19865 - movq %r10, 1*8(%rdi)
19866 + movq %rcx, 1*8(%rdi)
19867 movq %r9, -2*8(%rdi, %rdx)
19868 movq %r8, -1*8(%rdi, %rdx)
19869 jmp 13f
19870 @@ -167,9 +167,9 @@ ENTRY(memmove)
19871 * Move data from 8 bytes to 15 bytes.
19872 */
19873 movq 0*8(%rsi), %r11
19874 - movq -1*8(%rsi, %rdx), %r10
19875 + movq -1*8(%rsi, %rdx), %r9
19876 movq %r11, 0*8(%rdi)
19877 - movq %r10, -1*8(%rdi, %rdx)
19878 + movq %r9, -1*8(%rdi, %rdx)
19879 jmp 13f
19880 10:
19881 cmpq $4, %rdx
19882 @@ -178,9 +178,9 @@ ENTRY(memmove)
19883 * Move data from 4 bytes to 7 bytes.
19884 */
19885 movl (%rsi), %r11d
19886 - movl -4(%rsi, %rdx), %r10d
19887 + movl -4(%rsi, %rdx), %r9d
19888 movl %r11d, (%rdi)
19889 - movl %r10d, -4(%rdi, %rdx)
19890 + movl %r9d, -4(%rdi, %rdx)
19891 jmp 13f
19892 11:
19893 cmp $2, %rdx
19894 @@ -189,9 +189,9 @@ ENTRY(memmove)
19895 * Move data from 2 bytes to 3 bytes.
19896 */
19897 movw (%rsi), %r11w
19898 - movw -2(%rsi, %rdx), %r10w
19899 + movw -2(%rsi, %rdx), %r9w
19900 movw %r11w, (%rdi)
19901 - movw %r10w, -2(%rdi, %rdx)
19902 + movw %r9w, -2(%rdi, %rdx)
19903 jmp 13f
19904 12:
19905 cmp $1, %rdx
19906 @@ -202,6 +202,7 @@ ENTRY(memmove)
19907 movb (%rsi), %r11b
19908 movb %r11b, (%rdi)
19909 13:
19910 + pax_force_retaddr
19911 retq
19912 CFI_ENDPROC
19913
19914 @@ -210,6 +211,7 @@ ENTRY(memmove)
19915 /* Forward moving data. */
19916 movq %rdx, %rcx
19917 rep movsb
19918 + pax_force_retaddr
19919 retq
19920 .Lmemmove_end_forward_efs:
19921 .previous
19922 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
19923 index 79bd454..dff325a 100644
19924 --- a/arch/x86/lib/memset_64.S
19925 +++ b/arch/x86/lib/memset_64.S
19926 @@ -31,6 +31,7 @@
19927 movl %r8d,%ecx
19928 rep stosb
19929 movq %r9,%rax
19930 + pax_force_retaddr
19931 ret
19932 .Lmemset_e:
19933 .previous
19934 @@ -53,6 +54,7 @@
19935 movl %edx,%ecx
19936 rep stosb
19937 movq %r9,%rax
19938 + pax_force_retaddr
19939 ret
19940 .Lmemset_e_e:
19941 .previous
19942 @@ -60,13 +62,13 @@
19943 ENTRY(memset)
19944 ENTRY(__memset)
19945 CFI_STARTPROC
19946 - movq %rdi,%r10
19947 movq %rdx,%r11
19948
19949 /* expand byte value */
19950 movzbl %sil,%ecx
19951 movabs $0x0101010101010101,%rax
19952 mul %rcx /* with rax, clobbers rdx */
19953 + movq %rdi,%rdx
19954
19955 /* align dst */
19956 movl %edi,%r9d
19957 @@ -120,7 +122,8 @@ ENTRY(__memset)
19958 jnz .Lloop_1
19959
19960 .Lende:
19961 - movq %r10,%rax
19962 + movq %rdx,%rax
19963 + pax_force_retaddr
19964 ret
19965
19966 CFI_RESTORE_STATE
19967 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
19968 index c9f2d9b..e7fd2c0 100644
19969 --- a/arch/x86/lib/mmx_32.c
19970 +++ b/arch/x86/lib/mmx_32.c
19971 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
19972 {
19973 void *p;
19974 int i;
19975 + unsigned long cr0;
19976
19977 if (unlikely(in_interrupt()))
19978 return __memcpy(to, from, len);
19979 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
19980 kernel_fpu_begin();
19981
19982 __asm__ __volatile__ (
19983 - "1: prefetch (%0)\n" /* This set is 28 bytes */
19984 - " prefetch 64(%0)\n"
19985 - " prefetch 128(%0)\n"
19986 - " prefetch 192(%0)\n"
19987 - " prefetch 256(%0)\n"
19988 + "1: prefetch (%1)\n" /* This set is 28 bytes */
19989 + " prefetch 64(%1)\n"
19990 + " prefetch 128(%1)\n"
19991 + " prefetch 192(%1)\n"
19992 + " prefetch 256(%1)\n"
19993 "2: \n"
19994 ".section .fixup, \"ax\"\n"
19995 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19996 + "3: \n"
19997 +
19998 +#ifdef CONFIG_PAX_KERNEXEC
19999 + " movl %%cr0, %0\n"
20000 + " movl %0, %%eax\n"
20001 + " andl $0xFFFEFFFF, %%eax\n"
20002 + " movl %%eax, %%cr0\n"
20003 +#endif
20004 +
20005 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20006 +
20007 +#ifdef CONFIG_PAX_KERNEXEC
20008 + " movl %0, %%cr0\n"
20009 +#endif
20010 +
20011 " jmp 2b\n"
20012 ".previous\n"
20013 _ASM_EXTABLE(1b, 3b)
20014 - : : "r" (from));
20015 + : "=&r" (cr0) : "r" (from) : "ax");
20016
20017 for ( ; i > 5; i--) {
20018 __asm__ __volatile__ (
20019 - "1: prefetch 320(%0)\n"
20020 - "2: movq (%0), %%mm0\n"
20021 - " movq 8(%0), %%mm1\n"
20022 - " movq 16(%0), %%mm2\n"
20023 - " movq 24(%0), %%mm3\n"
20024 - " movq %%mm0, (%1)\n"
20025 - " movq %%mm1, 8(%1)\n"
20026 - " movq %%mm2, 16(%1)\n"
20027 - " movq %%mm3, 24(%1)\n"
20028 - " movq 32(%0), %%mm0\n"
20029 - " movq 40(%0), %%mm1\n"
20030 - " movq 48(%0), %%mm2\n"
20031 - " movq 56(%0), %%mm3\n"
20032 - " movq %%mm0, 32(%1)\n"
20033 - " movq %%mm1, 40(%1)\n"
20034 - " movq %%mm2, 48(%1)\n"
20035 - " movq %%mm3, 56(%1)\n"
20036 + "1: prefetch 320(%1)\n"
20037 + "2: movq (%1), %%mm0\n"
20038 + " movq 8(%1), %%mm1\n"
20039 + " movq 16(%1), %%mm2\n"
20040 + " movq 24(%1), %%mm3\n"
20041 + " movq %%mm0, (%2)\n"
20042 + " movq %%mm1, 8(%2)\n"
20043 + " movq %%mm2, 16(%2)\n"
20044 + " movq %%mm3, 24(%2)\n"
20045 + " movq 32(%1), %%mm0\n"
20046 + " movq 40(%1), %%mm1\n"
20047 + " movq 48(%1), %%mm2\n"
20048 + " movq 56(%1), %%mm3\n"
20049 + " movq %%mm0, 32(%2)\n"
20050 + " movq %%mm1, 40(%2)\n"
20051 + " movq %%mm2, 48(%2)\n"
20052 + " movq %%mm3, 56(%2)\n"
20053 ".section .fixup, \"ax\"\n"
20054 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20055 + "3:\n"
20056 +
20057 +#ifdef CONFIG_PAX_KERNEXEC
20058 + " movl %%cr0, %0\n"
20059 + " movl %0, %%eax\n"
20060 + " andl $0xFFFEFFFF, %%eax\n"
20061 + " movl %%eax, %%cr0\n"
20062 +#endif
20063 +
20064 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20065 +
20066 +#ifdef CONFIG_PAX_KERNEXEC
20067 + " movl %0, %%cr0\n"
20068 +#endif
20069 +
20070 " jmp 2b\n"
20071 ".previous\n"
20072 _ASM_EXTABLE(1b, 3b)
20073 - : : "r" (from), "r" (to) : "memory");
20074 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20075
20076 from += 64;
20077 to += 64;
20078 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
20079 static void fast_copy_page(void *to, void *from)
20080 {
20081 int i;
20082 + unsigned long cr0;
20083
20084 kernel_fpu_begin();
20085
20086 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
20087 * but that is for later. -AV
20088 */
20089 __asm__ __volatile__(
20090 - "1: prefetch (%0)\n"
20091 - " prefetch 64(%0)\n"
20092 - " prefetch 128(%0)\n"
20093 - " prefetch 192(%0)\n"
20094 - " prefetch 256(%0)\n"
20095 + "1: prefetch (%1)\n"
20096 + " prefetch 64(%1)\n"
20097 + " prefetch 128(%1)\n"
20098 + " prefetch 192(%1)\n"
20099 + " prefetch 256(%1)\n"
20100 "2: \n"
20101 ".section .fixup, \"ax\"\n"
20102 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20103 + "3: \n"
20104 +
20105 +#ifdef CONFIG_PAX_KERNEXEC
20106 + " movl %%cr0, %0\n"
20107 + " movl %0, %%eax\n"
20108 + " andl $0xFFFEFFFF, %%eax\n"
20109 + " movl %%eax, %%cr0\n"
20110 +#endif
20111 +
20112 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20113 +
20114 +#ifdef CONFIG_PAX_KERNEXEC
20115 + " movl %0, %%cr0\n"
20116 +#endif
20117 +
20118 " jmp 2b\n"
20119 ".previous\n"
20120 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
20121 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20122
20123 for (i = 0; i < (4096-320)/64; i++) {
20124 __asm__ __volatile__ (
20125 - "1: prefetch 320(%0)\n"
20126 - "2: movq (%0), %%mm0\n"
20127 - " movntq %%mm0, (%1)\n"
20128 - " movq 8(%0), %%mm1\n"
20129 - " movntq %%mm1, 8(%1)\n"
20130 - " movq 16(%0), %%mm2\n"
20131 - " movntq %%mm2, 16(%1)\n"
20132 - " movq 24(%0), %%mm3\n"
20133 - " movntq %%mm3, 24(%1)\n"
20134 - " movq 32(%0), %%mm4\n"
20135 - " movntq %%mm4, 32(%1)\n"
20136 - " movq 40(%0), %%mm5\n"
20137 - " movntq %%mm5, 40(%1)\n"
20138 - " movq 48(%0), %%mm6\n"
20139 - " movntq %%mm6, 48(%1)\n"
20140 - " movq 56(%0), %%mm7\n"
20141 - " movntq %%mm7, 56(%1)\n"
20142 + "1: prefetch 320(%1)\n"
20143 + "2: movq (%1), %%mm0\n"
20144 + " movntq %%mm0, (%2)\n"
20145 + " movq 8(%1), %%mm1\n"
20146 + " movntq %%mm1, 8(%2)\n"
20147 + " movq 16(%1), %%mm2\n"
20148 + " movntq %%mm2, 16(%2)\n"
20149 + " movq 24(%1), %%mm3\n"
20150 + " movntq %%mm3, 24(%2)\n"
20151 + " movq 32(%1), %%mm4\n"
20152 + " movntq %%mm4, 32(%2)\n"
20153 + " movq 40(%1), %%mm5\n"
20154 + " movntq %%mm5, 40(%2)\n"
20155 + " movq 48(%1), %%mm6\n"
20156 + " movntq %%mm6, 48(%2)\n"
20157 + " movq 56(%1), %%mm7\n"
20158 + " movntq %%mm7, 56(%2)\n"
20159 ".section .fixup, \"ax\"\n"
20160 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20161 + "3:\n"
20162 +
20163 +#ifdef CONFIG_PAX_KERNEXEC
20164 + " movl %%cr0, %0\n"
20165 + " movl %0, %%eax\n"
20166 + " andl $0xFFFEFFFF, %%eax\n"
20167 + " movl %%eax, %%cr0\n"
20168 +#endif
20169 +
20170 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20171 +
20172 +#ifdef CONFIG_PAX_KERNEXEC
20173 + " movl %0, %%cr0\n"
20174 +#endif
20175 +
20176 " jmp 2b\n"
20177 ".previous\n"
20178 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
20179 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20180
20181 from += 64;
20182 to += 64;
20183 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
20184 static void fast_copy_page(void *to, void *from)
20185 {
20186 int i;
20187 + unsigned long cr0;
20188
20189 kernel_fpu_begin();
20190
20191 __asm__ __volatile__ (
20192 - "1: prefetch (%0)\n"
20193 - " prefetch 64(%0)\n"
20194 - " prefetch 128(%0)\n"
20195 - " prefetch 192(%0)\n"
20196 - " prefetch 256(%0)\n"
20197 + "1: prefetch (%1)\n"
20198 + " prefetch 64(%1)\n"
20199 + " prefetch 128(%1)\n"
20200 + " prefetch 192(%1)\n"
20201 + " prefetch 256(%1)\n"
20202 "2: \n"
20203 ".section .fixup, \"ax\"\n"
20204 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20205 + "3: \n"
20206 +
20207 +#ifdef CONFIG_PAX_KERNEXEC
20208 + " movl %%cr0, %0\n"
20209 + " movl %0, %%eax\n"
20210 + " andl $0xFFFEFFFF, %%eax\n"
20211 + " movl %%eax, %%cr0\n"
20212 +#endif
20213 +
20214 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
20215 +
20216 +#ifdef CONFIG_PAX_KERNEXEC
20217 + " movl %0, %%cr0\n"
20218 +#endif
20219 +
20220 " jmp 2b\n"
20221 ".previous\n"
20222 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
20223 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
20224
20225 for (i = 0; i < 4096/64; i++) {
20226 __asm__ __volatile__ (
20227 - "1: prefetch 320(%0)\n"
20228 - "2: movq (%0), %%mm0\n"
20229 - " movq 8(%0), %%mm1\n"
20230 - " movq 16(%0), %%mm2\n"
20231 - " movq 24(%0), %%mm3\n"
20232 - " movq %%mm0, (%1)\n"
20233 - " movq %%mm1, 8(%1)\n"
20234 - " movq %%mm2, 16(%1)\n"
20235 - " movq %%mm3, 24(%1)\n"
20236 - " movq 32(%0), %%mm0\n"
20237 - " movq 40(%0), %%mm1\n"
20238 - " movq 48(%0), %%mm2\n"
20239 - " movq 56(%0), %%mm3\n"
20240 - " movq %%mm0, 32(%1)\n"
20241 - " movq %%mm1, 40(%1)\n"
20242 - " movq %%mm2, 48(%1)\n"
20243 - " movq %%mm3, 56(%1)\n"
20244 + "1: prefetch 320(%1)\n"
20245 + "2: movq (%1), %%mm0\n"
20246 + " movq 8(%1), %%mm1\n"
20247 + " movq 16(%1), %%mm2\n"
20248 + " movq 24(%1), %%mm3\n"
20249 + " movq %%mm0, (%2)\n"
20250 + " movq %%mm1, 8(%2)\n"
20251 + " movq %%mm2, 16(%2)\n"
20252 + " movq %%mm3, 24(%2)\n"
20253 + " movq 32(%1), %%mm0\n"
20254 + " movq 40(%1), %%mm1\n"
20255 + " movq 48(%1), %%mm2\n"
20256 + " movq 56(%1), %%mm3\n"
20257 + " movq %%mm0, 32(%2)\n"
20258 + " movq %%mm1, 40(%2)\n"
20259 + " movq %%mm2, 48(%2)\n"
20260 + " movq %%mm3, 56(%2)\n"
20261 ".section .fixup, \"ax\"\n"
20262 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20263 + "3:\n"
20264 +
20265 +#ifdef CONFIG_PAX_KERNEXEC
20266 + " movl %%cr0, %0\n"
20267 + " movl %0, %%eax\n"
20268 + " andl $0xFFFEFFFF, %%eax\n"
20269 + " movl %%eax, %%cr0\n"
20270 +#endif
20271 +
20272 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
20273 +
20274 +#ifdef CONFIG_PAX_KERNEXEC
20275 + " movl %0, %%cr0\n"
20276 +#endif
20277 +
20278 " jmp 2b\n"
20279 ".previous\n"
20280 _ASM_EXTABLE(1b, 3b)
20281 - : : "r" (from), "r" (to) : "memory");
20282 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
20283
20284 from += 64;
20285 to += 64;
20286 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
20287 index 69fa106..adda88b 100644
20288 --- a/arch/x86/lib/msr-reg.S
20289 +++ b/arch/x86/lib/msr-reg.S
20290 @@ -3,6 +3,7 @@
20291 #include <asm/dwarf2.h>
20292 #include <asm/asm.h>
20293 #include <asm/msr.h>
20294 +#include <asm/alternative-asm.h>
20295
20296 #ifdef CONFIG_X86_64
20297 /*
20298 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
20299 CFI_STARTPROC
20300 pushq_cfi %rbx
20301 pushq_cfi %rbp
20302 - movq %rdi, %r10 /* Save pointer */
20303 + movq %rdi, %r9 /* Save pointer */
20304 xorl %r11d, %r11d /* Return value */
20305 movl (%rdi), %eax
20306 movl 4(%rdi), %ecx
20307 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
20308 movl 28(%rdi), %edi
20309 CFI_REMEMBER_STATE
20310 1: \op
20311 -2: movl %eax, (%r10)
20312 +2: movl %eax, (%r9)
20313 movl %r11d, %eax /* Return value */
20314 - movl %ecx, 4(%r10)
20315 - movl %edx, 8(%r10)
20316 - movl %ebx, 12(%r10)
20317 - movl %ebp, 20(%r10)
20318 - movl %esi, 24(%r10)
20319 - movl %edi, 28(%r10)
20320 + movl %ecx, 4(%r9)
20321 + movl %edx, 8(%r9)
20322 + movl %ebx, 12(%r9)
20323 + movl %ebp, 20(%r9)
20324 + movl %esi, 24(%r9)
20325 + movl %edi, 28(%r9)
20326 popq_cfi %rbp
20327 popq_cfi %rbx
20328 + pax_force_retaddr
20329 ret
20330 3:
20331 CFI_RESTORE_STATE
20332 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
20333 index 36b0d15..d381858 100644
20334 --- a/arch/x86/lib/putuser.S
20335 +++ b/arch/x86/lib/putuser.S
20336 @@ -15,7 +15,9 @@
20337 #include <asm/thread_info.h>
20338 #include <asm/errno.h>
20339 #include <asm/asm.h>
20340 -
20341 +#include <asm/segment.h>
20342 +#include <asm/pgtable.h>
20343 +#include <asm/alternative-asm.h>
20344
20345 /*
20346 * __put_user_X
20347 @@ -29,52 +31,119 @@
20348 * as they get called from within inline assembly.
20349 */
20350
20351 -#define ENTER CFI_STARTPROC ; \
20352 - GET_THREAD_INFO(%_ASM_BX)
20353 -#define EXIT ret ; \
20354 +#define ENTER CFI_STARTPROC
20355 +#define EXIT pax_force_retaddr; ret ; \
20356 CFI_ENDPROC
20357
20358 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20359 +#define _DEST %_ASM_CX,%_ASM_BX
20360 +#else
20361 +#define _DEST %_ASM_CX
20362 +#endif
20363 +
20364 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20365 +#define __copyuser_seg gs;
20366 +#else
20367 +#define __copyuser_seg
20368 +#endif
20369 +
20370 .text
20371 ENTRY(__put_user_1)
20372 ENTER
20373 +
20374 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20375 + GET_THREAD_INFO(%_ASM_BX)
20376 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
20377 jae bad_put_user
20378 -1: movb %al,(%_ASM_CX)
20379 +
20380 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20381 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20382 + cmp %_ASM_BX,%_ASM_CX
20383 + jb 1234f
20384 + xor %ebx,%ebx
20385 +1234:
20386 +#endif
20387 +
20388 +#endif
20389 +
20390 +1: __copyuser_seg movb %al,(_DEST)
20391 xor %eax,%eax
20392 EXIT
20393 ENDPROC(__put_user_1)
20394
20395 ENTRY(__put_user_2)
20396 ENTER
20397 +
20398 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20399 + GET_THREAD_INFO(%_ASM_BX)
20400 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20401 sub $1,%_ASM_BX
20402 cmp %_ASM_BX,%_ASM_CX
20403 jae bad_put_user
20404 -2: movw %ax,(%_ASM_CX)
20405 +
20406 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20407 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20408 + cmp %_ASM_BX,%_ASM_CX
20409 + jb 1234f
20410 + xor %ebx,%ebx
20411 +1234:
20412 +#endif
20413 +
20414 +#endif
20415 +
20416 +2: __copyuser_seg movw %ax,(_DEST)
20417 xor %eax,%eax
20418 EXIT
20419 ENDPROC(__put_user_2)
20420
20421 ENTRY(__put_user_4)
20422 ENTER
20423 +
20424 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20425 + GET_THREAD_INFO(%_ASM_BX)
20426 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20427 sub $3,%_ASM_BX
20428 cmp %_ASM_BX,%_ASM_CX
20429 jae bad_put_user
20430 -3: movl %eax,(%_ASM_CX)
20431 +
20432 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20433 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20434 + cmp %_ASM_BX,%_ASM_CX
20435 + jb 1234f
20436 + xor %ebx,%ebx
20437 +1234:
20438 +#endif
20439 +
20440 +#endif
20441 +
20442 +3: __copyuser_seg movl %eax,(_DEST)
20443 xor %eax,%eax
20444 EXIT
20445 ENDPROC(__put_user_4)
20446
20447 ENTRY(__put_user_8)
20448 ENTER
20449 +
20450 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
20451 + GET_THREAD_INFO(%_ASM_BX)
20452 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
20453 sub $7,%_ASM_BX
20454 cmp %_ASM_BX,%_ASM_CX
20455 jae bad_put_user
20456 -4: mov %_ASM_AX,(%_ASM_CX)
20457 +
20458 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20459 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
20460 + cmp %_ASM_BX,%_ASM_CX
20461 + jb 1234f
20462 + xor %ebx,%ebx
20463 +1234:
20464 +#endif
20465 +
20466 +#endif
20467 +
20468 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
20469 #ifdef CONFIG_X86_32
20470 -5: movl %edx,4(%_ASM_CX)
20471 +5: __copyuser_seg movl %edx,4(_DEST)
20472 #endif
20473 xor %eax,%eax
20474 EXIT
20475 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
20476 index 1cad221..6cc4b8d 100644
20477 --- a/arch/x86/lib/rwlock.S
20478 +++ b/arch/x86/lib/rwlock.S
20479 @@ -23,6 +23,7 @@ ENTRY(__write_lock_failed)
20480 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
20481 jnz 0b
20482 ENDFRAME
20483 + pax_force_retaddr
20484 ret
20485 CFI_ENDPROC
20486 END(__write_lock_failed)
20487 @@ -39,6 +40,7 @@ ENTRY(__read_lock_failed)
20488 READ_LOCK_SIZE(dec) (%__lock_ptr)
20489 js 0b
20490 ENDFRAME
20491 + pax_force_retaddr
20492 ret
20493 CFI_ENDPROC
20494 END(__read_lock_failed)
20495 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
20496 index 5dff5f0..cadebf4 100644
20497 --- a/arch/x86/lib/rwsem.S
20498 +++ b/arch/x86/lib/rwsem.S
20499 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
20500 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20501 CFI_RESTORE __ASM_REG(dx)
20502 restore_common_regs
20503 + pax_force_retaddr
20504 ret
20505 CFI_ENDPROC
20506 ENDPROC(call_rwsem_down_read_failed)
20507 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
20508 movq %rax,%rdi
20509 call rwsem_down_write_failed
20510 restore_common_regs
20511 + pax_force_retaddr
20512 ret
20513 CFI_ENDPROC
20514 ENDPROC(call_rwsem_down_write_failed)
20515 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
20516 movq %rax,%rdi
20517 call rwsem_wake
20518 restore_common_regs
20519 -1: ret
20520 +1: pax_force_retaddr
20521 + ret
20522 CFI_ENDPROC
20523 ENDPROC(call_rwsem_wake)
20524
20525 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
20526 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
20527 CFI_RESTORE __ASM_REG(dx)
20528 restore_common_regs
20529 + pax_force_retaddr
20530 ret
20531 CFI_ENDPROC
20532 ENDPROC(call_rwsem_downgrade_wake)
20533 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
20534 index a63efd6..ccecad8 100644
20535 --- a/arch/x86/lib/thunk_64.S
20536 +++ b/arch/x86/lib/thunk_64.S
20537 @@ -8,6 +8,7 @@
20538 #include <linux/linkage.h>
20539 #include <asm/dwarf2.h>
20540 #include <asm/calling.h>
20541 +#include <asm/alternative-asm.h>
20542
20543 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
20544 .macro THUNK name, func, put_ret_addr_in_rdi=0
20545 @@ -41,5 +42,6 @@
20546 SAVE_ARGS
20547 restore:
20548 RESTORE_ARGS
20549 + pax_force_retaddr
20550 ret
20551 CFI_ENDPROC
20552 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
20553 index e218d5d..35679b4 100644
20554 --- a/arch/x86/lib/usercopy_32.c
20555 +++ b/arch/x86/lib/usercopy_32.c
20556 @@ -43,7 +43,7 @@ do { \
20557 __asm__ __volatile__( \
20558 " testl %1,%1\n" \
20559 " jz 2f\n" \
20560 - "0: lodsb\n" \
20561 + "0: "__copyuser_seg"lodsb\n" \
20562 " stosb\n" \
20563 " testb %%al,%%al\n" \
20564 " jz 1f\n" \
20565 @@ -128,10 +128,12 @@ do { \
20566 int __d0; \
20567 might_fault(); \
20568 __asm__ __volatile__( \
20569 + __COPYUSER_SET_ES \
20570 "0: rep; stosl\n" \
20571 " movl %2,%0\n" \
20572 "1: rep; stosb\n" \
20573 "2:\n" \
20574 + __COPYUSER_RESTORE_ES \
20575 ".section .fixup,\"ax\"\n" \
20576 "3: lea 0(%2,%0,4),%0\n" \
20577 " jmp 2b\n" \
20578 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
20579 might_fault();
20580
20581 __asm__ __volatile__(
20582 + __COPYUSER_SET_ES
20583 " testl %0, %0\n"
20584 " jz 3f\n"
20585 " andl %0,%%ecx\n"
20586 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
20587 " subl %%ecx,%0\n"
20588 " addl %0,%%eax\n"
20589 "1:\n"
20590 + __COPYUSER_RESTORE_ES
20591 ".section .fixup,\"ax\"\n"
20592 "2: xorl %%eax,%%eax\n"
20593 " jmp 1b\n"
20594 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
20595
20596 #ifdef CONFIG_X86_INTEL_USERCOPY
20597 static unsigned long
20598 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
20599 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
20600 {
20601 int d0, d1;
20602 __asm__ __volatile__(
20603 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20604 " .align 2,0x90\n"
20605 "3: movl 0(%4), %%eax\n"
20606 "4: movl 4(%4), %%edx\n"
20607 - "5: movl %%eax, 0(%3)\n"
20608 - "6: movl %%edx, 4(%3)\n"
20609 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
20610 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
20611 "7: movl 8(%4), %%eax\n"
20612 "8: movl 12(%4),%%edx\n"
20613 - "9: movl %%eax, 8(%3)\n"
20614 - "10: movl %%edx, 12(%3)\n"
20615 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
20616 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
20617 "11: movl 16(%4), %%eax\n"
20618 "12: movl 20(%4), %%edx\n"
20619 - "13: movl %%eax, 16(%3)\n"
20620 - "14: movl %%edx, 20(%3)\n"
20621 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
20622 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
20623 "15: movl 24(%4), %%eax\n"
20624 "16: movl 28(%4), %%edx\n"
20625 - "17: movl %%eax, 24(%3)\n"
20626 - "18: movl %%edx, 28(%3)\n"
20627 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20628 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20629 "19: movl 32(%4), %%eax\n"
20630 "20: movl 36(%4), %%edx\n"
20631 - "21: movl %%eax, 32(%3)\n"
20632 - "22: movl %%edx, 36(%3)\n"
20633 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20634 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
20635 "23: movl 40(%4), %%eax\n"
20636 "24: movl 44(%4), %%edx\n"
20637 - "25: movl %%eax, 40(%3)\n"
20638 - "26: movl %%edx, 44(%3)\n"
20639 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20640 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
20641 "27: movl 48(%4), %%eax\n"
20642 "28: movl 52(%4), %%edx\n"
20643 - "29: movl %%eax, 48(%3)\n"
20644 - "30: movl %%edx, 52(%3)\n"
20645 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20646 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20647 "31: movl 56(%4), %%eax\n"
20648 "32: movl 60(%4), %%edx\n"
20649 - "33: movl %%eax, 56(%3)\n"
20650 - "34: movl %%edx, 60(%3)\n"
20651 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20652 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20653 " addl $-64, %0\n"
20654 " addl $64, %4\n"
20655 " addl $64, %3\n"
20656 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
20657 " shrl $2, %0\n"
20658 " andl $3, %%eax\n"
20659 " cld\n"
20660 + __COPYUSER_SET_ES
20661 "99: rep; movsl\n"
20662 "36: movl %%eax, %0\n"
20663 "37: rep; movsb\n"
20664 "100:\n"
20665 + __COPYUSER_RESTORE_ES
20666 + ".section .fixup,\"ax\"\n"
20667 + "101: lea 0(%%eax,%0,4),%0\n"
20668 + " jmp 100b\n"
20669 + ".previous\n"
20670 + ".section __ex_table,\"a\"\n"
20671 + " .align 4\n"
20672 + " .long 1b,100b\n"
20673 + " .long 2b,100b\n"
20674 + " .long 3b,100b\n"
20675 + " .long 4b,100b\n"
20676 + " .long 5b,100b\n"
20677 + " .long 6b,100b\n"
20678 + " .long 7b,100b\n"
20679 + " .long 8b,100b\n"
20680 + " .long 9b,100b\n"
20681 + " .long 10b,100b\n"
20682 + " .long 11b,100b\n"
20683 + " .long 12b,100b\n"
20684 + " .long 13b,100b\n"
20685 + " .long 14b,100b\n"
20686 + " .long 15b,100b\n"
20687 + " .long 16b,100b\n"
20688 + " .long 17b,100b\n"
20689 + " .long 18b,100b\n"
20690 + " .long 19b,100b\n"
20691 + " .long 20b,100b\n"
20692 + " .long 21b,100b\n"
20693 + " .long 22b,100b\n"
20694 + " .long 23b,100b\n"
20695 + " .long 24b,100b\n"
20696 + " .long 25b,100b\n"
20697 + " .long 26b,100b\n"
20698 + " .long 27b,100b\n"
20699 + " .long 28b,100b\n"
20700 + " .long 29b,100b\n"
20701 + " .long 30b,100b\n"
20702 + " .long 31b,100b\n"
20703 + " .long 32b,100b\n"
20704 + " .long 33b,100b\n"
20705 + " .long 34b,100b\n"
20706 + " .long 35b,100b\n"
20707 + " .long 36b,100b\n"
20708 + " .long 37b,100b\n"
20709 + " .long 99b,101b\n"
20710 + ".previous"
20711 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
20712 + : "1"(to), "2"(from), "0"(size)
20713 + : "eax", "edx", "memory");
20714 + return size;
20715 +}
20716 +
20717 +static unsigned long
20718 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20719 +{
20720 + int d0, d1;
20721 + __asm__ __volatile__(
20722 + " .align 2,0x90\n"
20723 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20724 + " cmpl $67, %0\n"
20725 + " jbe 3f\n"
20726 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20727 + " .align 2,0x90\n"
20728 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20729 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20730 + "5: movl %%eax, 0(%3)\n"
20731 + "6: movl %%edx, 4(%3)\n"
20732 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20733 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20734 + "9: movl %%eax, 8(%3)\n"
20735 + "10: movl %%edx, 12(%3)\n"
20736 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20737 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20738 + "13: movl %%eax, 16(%3)\n"
20739 + "14: movl %%edx, 20(%3)\n"
20740 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20741 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20742 + "17: movl %%eax, 24(%3)\n"
20743 + "18: movl %%edx, 28(%3)\n"
20744 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20745 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20746 + "21: movl %%eax, 32(%3)\n"
20747 + "22: movl %%edx, 36(%3)\n"
20748 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20749 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20750 + "25: movl %%eax, 40(%3)\n"
20751 + "26: movl %%edx, 44(%3)\n"
20752 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20753 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20754 + "29: movl %%eax, 48(%3)\n"
20755 + "30: movl %%edx, 52(%3)\n"
20756 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20757 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20758 + "33: movl %%eax, 56(%3)\n"
20759 + "34: movl %%edx, 60(%3)\n"
20760 + " addl $-64, %0\n"
20761 + " addl $64, %4\n"
20762 + " addl $64, %3\n"
20763 + " cmpl $63, %0\n"
20764 + " ja 1b\n"
20765 + "35: movl %0, %%eax\n"
20766 + " shrl $2, %0\n"
20767 + " andl $3, %%eax\n"
20768 + " cld\n"
20769 + "99: rep; "__copyuser_seg" movsl\n"
20770 + "36: movl %%eax, %0\n"
20771 + "37: rep; "__copyuser_seg" movsb\n"
20772 + "100:\n"
20773 ".section .fixup,\"ax\"\n"
20774 "101: lea 0(%%eax,%0,4),%0\n"
20775 " jmp 100b\n"
20776 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
20777 int d0, d1;
20778 __asm__ __volatile__(
20779 " .align 2,0x90\n"
20780 - "0: movl 32(%4), %%eax\n"
20781 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20782 " cmpl $67, %0\n"
20783 " jbe 2f\n"
20784 - "1: movl 64(%4), %%eax\n"
20785 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20786 " .align 2,0x90\n"
20787 - "2: movl 0(%4), %%eax\n"
20788 - "21: movl 4(%4), %%edx\n"
20789 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20790 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20791 " movl %%eax, 0(%3)\n"
20792 " movl %%edx, 4(%3)\n"
20793 - "3: movl 8(%4), %%eax\n"
20794 - "31: movl 12(%4),%%edx\n"
20795 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20796 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20797 " movl %%eax, 8(%3)\n"
20798 " movl %%edx, 12(%3)\n"
20799 - "4: movl 16(%4), %%eax\n"
20800 - "41: movl 20(%4), %%edx\n"
20801 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20802 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20803 " movl %%eax, 16(%3)\n"
20804 " movl %%edx, 20(%3)\n"
20805 - "10: movl 24(%4), %%eax\n"
20806 - "51: movl 28(%4), %%edx\n"
20807 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20808 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20809 " movl %%eax, 24(%3)\n"
20810 " movl %%edx, 28(%3)\n"
20811 - "11: movl 32(%4), %%eax\n"
20812 - "61: movl 36(%4), %%edx\n"
20813 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20814 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20815 " movl %%eax, 32(%3)\n"
20816 " movl %%edx, 36(%3)\n"
20817 - "12: movl 40(%4), %%eax\n"
20818 - "71: movl 44(%4), %%edx\n"
20819 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20820 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20821 " movl %%eax, 40(%3)\n"
20822 " movl %%edx, 44(%3)\n"
20823 - "13: movl 48(%4), %%eax\n"
20824 - "81: movl 52(%4), %%edx\n"
20825 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20826 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20827 " movl %%eax, 48(%3)\n"
20828 " movl %%edx, 52(%3)\n"
20829 - "14: movl 56(%4), %%eax\n"
20830 - "91: movl 60(%4), %%edx\n"
20831 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20832 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20833 " movl %%eax, 56(%3)\n"
20834 " movl %%edx, 60(%3)\n"
20835 " addl $-64, %0\n"
20836 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
20837 " shrl $2, %0\n"
20838 " andl $3, %%eax\n"
20839 " cld\n"
20840 - "6: rep; movsl\n"
20841 + "6: rep; "__copyuser_seg" movsl\n"
20842 " movl %%eax,%0\n"
20843 - "7: rep; movsb\n"
20844 + "7: rep; "__copyuser_seg" movsb\n"
20845 "8:\n"
20846 ".section .fixup,\"ax\"\n"
20847 "9: lea 0(%%eax,%0,4),%0\n"
20848 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
20849
20850 __asm__ __volatile__(
20851 " .align 2,0x90\n"
20852 - "0: movl 32(%4), %%eax\n"
20853 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20854 " cmpl $67, %0\n"
20855 " jbe 2f\n"
20856 - "1: movl 64(%4), %%eax\n"
20857 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20858 " .align 2,0x90\n"
20859 - "2: movl 0(%4), %%eax\n"
20860 - "21: movl 4(%4), %%edx\n"
20861 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20862 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20863 " movnti %%eax, 0(%3)\n"
20864 " movnti %%edx, 4(%3)\n"
20865 - "3: movl 8(%4), %%eax\n"
20866 - "31: movl 12(%4),%%edx\n"
20867 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20868 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20869 " movnti %%eax, 8(%3)\n"
20870 " movnti %%edx, 12(%3)\n"
20871 - "4: movl 16(%4), %%eax\n"
20872 - "41: movl 20(%4), %%edx\n"
20873 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20874 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20875 " movnti %%eax, 16(%3)\n"
20876 " movnti %%edx, 20(%3)\n"
20877 - "10: movl 24(%4), %%eax\n"
20878 - "51: movl 28(%4), %%edx\n"
20879 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20880 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20881 " movnti %%eax, 24(%3)\n"
20882 " movnti %%edx, 28(%3)\n"
20883 - "11: movl 32(%4), %%eax\n"
20884 - "61: movl 36(%4), %%edx\n"
20885 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20886 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20887 " movnti %%eax, 32(%3)\n"
20888 " movnti %%edx, 36(%3)\n"
20889 - "12: movl 40(%4), %%eax\n"
20890 - "71: movl 44(%4), %%edx\n"
20891 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20892 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20893 " movnti %%eax, 40(%3)\n"
20894 " movnti %%edx, 44(%3)\n"
20895 - "13: movl 48(%4), %%eax\n"
20896 - "81: movl 52(%4), %%edx\n"
20897 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20898 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20899 " movnti %%eax, 48(%3)\n"
20900 " movnti %%edx, 52(%3)\n"
20901 - "14: movl 56(%4), %%eax\n"
20902 - "91: movl 60(%4), %%edx\n"
20903 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20904 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20905 " movnti %%eax, 56(%3)\n"
20906 " movnti %%edx, 60(%3)\n"
20907 " addl $-64, %0\n"
20908 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
20909 " shrl $2, %0\n"
20910 " andl $3, %%eax\n"
20911 " cld\n"
20912 - "6: rep; movsl\n"
20913 + "6: rep; "__copyuser_seg" movsl\n"
20914 " movl %%eax,%0\n"
20915 - "7: rep; movsb\n"
20916 + "7: rep; "__copyuser_seg" movsb\n"
20917 "8:\n"
20918 ".section .fixup,\"ax\"\n"
20919 "9: lea 0(%%eax,%0,4),%0\n"
20920 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
20921
20922 __asm__ __volatile__(
20923 " .align 2,0x90\n"
20924 - "0: movl 32(%4), %%eax\n"
20925 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20926 " cmpl $67, %0\n"
20927 " jbe 2f\n"
20928 - "1: movl 64(%4), %%eax\n"
20929 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20930 " .align 2,0x90\n"
20931 - "2: movl 0(%4), %%eax\n"
20932 - "21: movl 4(%4), %%edx\n"
20933 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20934 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20935 " movnti %%eax, 0(%3)\n"
20936 " movnti %%edx, 4(%3)\n"
20937 - "3: movl 8(%4), %%eax\n"
20938 - "31: movl 12(%4),%%edx\n"
20939 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20940 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20941 " movnti %%eax, 8(%3)\n"
20942 " movnti %%edx, 12(%3)\n"
20943 - "4: movl 16(%4), %%eax\n"
20944 - "41: movl 20(%4), %%edx\n"
20945 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20946 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20947 " movnti %%eax, 16(%3)\n"
20948 " movnti %%edx, 20(%3)\n"
20949 - "10: movl 24(%4), %%eax\n"
20950 - "51: movl 28(%4), %%edx\n"
20951 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20952 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20953 " movnti %%eax, 24(%3)\n"
20954 " movnti %%edx, 28(%3)\n"
20955 - "11: movl 32(%4), %%eax\n"
20956 - "61: movl 36(%4), %%edx\n"
20957 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20958 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20959 " movnti %%eax, 32(%3)\n"
20960 " movnti %%edx, 36(%3)\n"
20961 - "12: movl 40(%4), %%eax\n"
20962 - "71: movl 44(%4), %%edx\n"
20963 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20964 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20965 " movnti %%eax, 40(%3)\n"
20966 " movnti %%edx, 44(%3)\n"
20967 - "13: movl 48(%4), %%eax\n"
20968 - "81: movl 52(%4), %%edx\n"
20969 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20970 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20971 " movnti %%eax, 48(%3)\n"
20972 " movnti %%edx, 52(%3)\n"
20973 - "14: movl 56(%4), %%eax\n"
20974 - "91: movl 60(%4), %%edx\n"
20975 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20976 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20977 " movnti %%eax, 56(%3)\n"
20978 " movnti %%edx, 60(%3)\n"
20979 " addl $-64, %0\n"
20980 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
20981 " shrl $2, %0\n"
20982 " andl $3, %%eax\n"
20983 " cld\n"
20984 - "6: rep; movsl\n"
20985 + "6: rep; "__copyuser_seg" movsl\n"
20986 " movl %%eax,%0\n"
20987 - "7: rep; movsb\n"
20988 + "7: rep; "__copyuser_seg" movsb\n"
20989 "8:\n"
20990 ".section .fixup,\"ax\"\n"
20991 "9: lea 0(%%eax,%0,4),%0\n"
20992 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
20993 */
20994 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
20995 unsigned long size);
20996 -unsigned long __copy_user_intel(void __user *to, const void *from,
20997 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
20998 + unsigned long size);
20999 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
21000 unsigned long size);
21001 unsigned long __copy_user_zeroing_intel_nocache(void *to,
21002 const void __user *from, unsigned long size);
21003 #endif /* CONFIG_X86_INTEL_USERCOPY */
21004
21005 /* Generic arbitrary sized copy. */
21006 -#define __copy_user(to, from, size) \
21007 +#define __copy_user(to, from, size, prefix, set, restore) \
21008 do { \
21009 int __d0, __d1, __d2; \
21010 __asm__ __volatile__( \
21011 + set \
21012 " cmp $7,%0\n" \
21013 " jbe 1f\n" \
21014 " movl %1,%0\n" \
21015 " negl %0\n" \
21016 " andl $7,%0\n" \
21017 " subl %0,%3\n" \
21018 - "4: rep; movsb\n" \
21019 + "4: rep; "prefix"movsb\n" \
21020 " movl %3,%0\n" \
21021 " shrl $2,%0\n" \
21022 " andl $3,%3\n" \
21023 " .align 2,0x90\n" \
21024 - "0: rep; movsl\n" \
21025 + "0: rep; "prefix"movsl\n" \
21026 " movl %3,%0\n" \
21027 - "1: rep; movsb\n" \
21028 + "1: rep; "prefix"movsb\n" \
21029 "2:\n" \
21030 + restore \
21031 ".section .fixup,\"ax\"\n" \
21032 "5: addl %3,%0\n" \
21033 " jmp 2b\n" \
21034 @@ -682,14 +799,14 @@ do { \
21035 " negl %0\n" \
21036 " andl $7,%0\n" \
21037 " subl %0,%3\n" \
21038 - "4: rep; movsb\n" \
21039 + "4: rep; "__copyuser_seg"movsb\n" \
21040 " movl %3,%0\n" \
21041 " shrl $2,%0\n" \
21042 " andl $3,%3\n" \
21043 " .align 2,0x90\n" \
21044 - "0: rep; movsl\n" \
21045 + "0: rep; "__copyuser_seg"movsl\n" \
21046 " movl %3,%0\n" \
21047 - "1: rep; movsb\n" \
21048 + "1: rep; "__copyuser_seg"movsb\n" \
21049 "2:\n" \
21050 ".section .fixup,\"ax\"\n" \
21051 "5: addl %3,%0\n" \
21052 @@ -775,9 +892,9 @@ survive:
21053 }
21054 #endif
21055 if (movsl_is_ok(to, from, n))
21056 - __copy_user(to, from, n);
21057 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
21058 else
21059 - n = __copy_user_intel(to, from, n);
21060 + n = __generic_copy_to_user_intel(to, from, n);
21061 return n;
21062 }
21063 EXPORT_SYMBOL(__copy_to_user_ll);
21064 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
21065 unsigned long n)
21066 {
21067 if (movsl_is_ok(to, from, n))
21068 - __copy_user(to, from, n);
21069 + __copy_user(to, from, n, __copyuser_seg, "", "");
21070 else
21071 - n = __copy_user_intel((void __user *)to,
21072 - (const void *)from, n);
21073 + n = __generic_copy_from_user_intel(to, from, n);
21074 return n;
21075 }
21076 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
21077 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
21078 if (n > 64 && cpu_has_xmm2)
21079 n = __copy_user_intel_nocache(to, from, n);
21080 else
21081 - __copy_user(to, from, n);
21082 + __copy_user(to, from, n, __copyuser_seg, "", "");
21083 #else
21084 - __copy_user(to, from, n);
21085 + __copy_user(to, from, n, __copyuser_seg, "", "");
21086 #endif
21087 return n;
21088 }
21089 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
21090
21091 -/**
21092 - * copy_to_user: - Copy a block of data into user space.
21093 - * @to: Destination address, in user space.
21094 - * @from: Source address, in kernel space.
21095 - * @n: Number of bytes to copy.
21096 - *
21097 - * Context: User context only. This function may sleep.
21098 - *
21099 - * Copy data from kernel space to user space.
21100 - *
21101 - * Returns number of bytes that could not be copied.
21102 - * On success, this will be zero.
21103 - */
21104 -unsigned long
21105 -copy_to_user(void __user *to, const void *from, unsigned long n)
21106 +void copy_from_user_overflow(void)
21107 {
21108 - if (access_ok(VERIFY_WRITE, to, n))
21109 - n = __copy_to_user(to, from, n);
21110 - return n;
21111 + WARN(1, "Buffer overflow detected!\n");
21112 }
21113 -EXPORT_SYMBOL(copy_to_user);
21114 +EXPORT_SYMBOL(copy_from_user_overflow);
21115
21116 -/**
21117 - * copy_from_user: - Copy a block of data from user space.
21118 - * @to: Destination address, in kernel space.
21119 - * @from: Source address, in user space.
21120 - * @n: Number of bytes to copy.
21121 - *
21122 - * Context: User context only. This function may sleep.
21123 - *
21124 - * Copy data from user space to kernel space.
21125 - *
21126 - * Returns number of bytes that could not be copied.
21127 - * On success, this will be zero.
21128 - *
21129 - * If some data could not be copied, this function will pad the copied
21130 - * data to the requested size using zero bytes.
21131 - */
21132 -unsigned long
21133 -_copy_from_user(void *to, const void __user *from, unsigned long n)
21134 +void copy_to_user_overflow(void)
21135 {
21136 - if (access_ok(VERIFY_READ, from, n))
21137 - n = __copy_from_user(to, from, n);
21138 - else
21139 - memset(to, 0, n);
21140 - return n;
21141 + WARN(1, "Buffer overflow detected!\n");
21142 }
21143 -EXPORT_SYMBOL(_copy_from_user);
21144 +EXPORT_SYMBOL(copy_to_user_overflow);
21145
21146 -void copy_from_user_overflow(void)
21147 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21148 +void __set_fs(mm_segment_t x)
21149 {
21150 - WARN(1, "Buffer overflow detected!\n");
21151 + switch (x.seg) {
21152 + case 0:
21153 + loadsegment(gs, 0);
21154 + break;
21155 + case TASK_SIZE_MAX:
21156 + loadsegment(gs, __USER_DS);
21157 + break;
21158 + case -1UL:
21159 + loadsegment(gs, __KERNEL_DS);
21160 + break;
21161 + default:
21162 + BUG();
21163 + }
21164 + return;
21165 }
21166 -EXPORT_SYMBOL(copy_from_user_overflow);
21167 +EXPORT_SYMBOL(__set_fs);
21168 +
21169 +void set_fs(mm_segment_t x)
21170 +{
21171 + current_thread_info()->addr_limit = x;
21172 + __set_fs(x);
21173 +}
21174 +EXPORT_SYMBOL(set_fs);
21175 +#endif
21176 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
21177 index b7c2849..5ef0f95 100644
21178 --- a/arch/x86/lib/usercopy_64.c
21179 +++ b/arch/x86/lib/usercopy_64.c
21180 @@ -42,6 +42,12 @@ long
21181 __strncpy_from_user(char *dst, const char __user *src, long count)
21182 {
21183 long res;
21184 +
21185 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21186 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21187 + src += PAX_USER_SHADOW_BASE;
21188 +#endif
21189 +
21190 __do_strncpy_from_user(dst, src, count, res);
21191 return res;
21192 }
21193 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
21194 {
21195 long __d0;
21196 might_fault();
21197 +
21198 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21199 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
21200 + addr += PAX_USER_SHADOW_BASE;
21201 +#endif
21202 +
21203 /* no memory constraint because it doesn't change any memory gcc knows
21204 about */
21205 asm volatile(
21206 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
21207
21208 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
21209 {
21210 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21211 - return copy_user_generic((__force void *)to, (__force void *)from, len);
21212 - }
21213 - return len;
21214 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
21215 +
21216 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21217 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
21218 + to += PAX_USER_SHADOW_BASE;
21219 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
21220 + from += PAX_USER_SHADOW_BASE;
21221 +#endif
21222 +
21223 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
21224 + }
21225 + return len;
21226 }
21227 EXPORT_SYMBOL(copy_in_user);
21228
21229 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
21230 * it is not necessary to optimize tail handling.
21231 */
21232 unsigned long
21233 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
21234 +copy_user_handle_tail(char __user *to, char __user *from, unsigned len, unsigned zerorest)
21235 {
21236 char c;
21237 unsigned zero_len;
21238 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
21239 index d0474ad..36e9257 100644
21240 --- a/arch/x86/mm/extable.c
21241 +++ b/arch/x86/mm/extable.c
21242 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
21243 const struct exception_table_entry *fixup;
21244
21245 #ifdef CONFIG_PNPBIOS
21246 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
21247 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
21248 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
21249 extern u32 pnp_bios_is_utter_crap;
21250 pnp_bios_is_utter_crap = 1;
21251 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
21252 index 0d17c8c..4f4764f 100644
21253 --- a/arch/x86/mm/fault.c
21254 +++ b/arch/x86/mm/fault.c
21255 @@ -13,11 +13,18 @@
21256 #include <linux/perf_event.h> /* perf_sw_event */
21257 #include <linux/hugetlb.h> /* hstate_index_to_shift */
21258 #include <linux/prefetch.h> /* prefetchw */
21259 +#include <linux/unistd.h>
21260 +#include <linux/compiler.h>
21261
21262 #include <asm/traps.h> /* dotraplinkage, ... */
21263 #include <asm/pgalloc.h> /* pgd_*(), ... */
21264 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
21265 #include <asm/vsyscall.h>
21266 +#include <asm/tlbflush.h>
21267 +
21268 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21269 +#include <asm/stacktrace.h>
21270 +#endif
21271
21272 /*
21273 * Page fault error code bits:
21274 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
21275 int ret = 0;
21276
21277 /* kprobe_running() needs smp_processor_id() */
21278 - if (kprobes_built_in() && !user_mode_vm(regs)) {
21279 + if (kprobes_built_in() && !user_mode(regs)) {
21280 preempt_disable();
21281 if (kprobe_running() && kprobe_fault_handler(regs, 14))
21282 ret = 1;
21283 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
21284 return !instr_lo || (instr_lo>>1) == 1;
21285 case 0x00:
21286 /* Prefetch instruction is 0x0F0D or 0x0F18 */
21287 - if (probe_kernel_address(instr, opcode))
21288 + if (user_mode(regs)) {
21289 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21290 + return 0;
21291 + } else if (probe_kernel_address(instr, opcode))
21292 return 0;
21293
21294 *prefetch = (instr_lo == 0xF) &&
21295 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
21296 while (instr < max_instr) {
21297 unsigned char opcode;
21298
21299 - if (probe_kernel_address(instr, opcode))
21300 + if (user_mode(regs)) {
21301 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
21302 + break;
21303 + } else if (probe_kernel_address(instr, opcode))
21304 break;
21305
21306 instr++;
21307 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
21308 force_sig_info(si_signo, &info, tsk);
21309 }
21310
21311 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21312 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
21313 +#endif
21314 +
21315 +#ifdef CONFIG_PAX_EMUTRAMP
21316 +static int pax_handle_fetch_fault(struct pt_regs *regs);
21317 +#endif
21318 +
21319 +#ifdef CONFIG_PAX_PAGEEXEC
21320 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
21321 +{
21322 + pgd_t *pgd;
21323 + pud_t *pud;
21324 + pmd_t *pmd;
21325 +
21326 + pgd = pgd_offset(mm, address);
21327 + if (!pgd_present(*pgd))
21328 + return NULL;
21329 + pud = pud_offset(pgd, address);
21330 + if (!pud_present(*pud))
21331 + return NULL;
21332 + pmd = pmd_offset(pud, address);
21333 + if (!pmd_present(*pmd))
21334 + return NULL;
21335 + return pmd;
21336 +}
21337 +#endif
21338 +
21339 DEFINE_SPINLOCK(pgd_lock);
21340 LIST_HEAD(pgd_list);
21341
21342 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
21343 for (address = VMALLOC_START & PMD_MASK;
21344 address >= TASK_SIZE && address < FIXADDR_TOP;
21345 address += PMD_SIZE) {
21346 +
21347 +#ifdef CONFIG_PAX_PER_CPU_PGD
21348 + unsigned long cpu;
21349 +#else
21350 struct page *page;
21351 +#endif
21352
21353 spin_lock(&pgd_lock);
21354 +
21355 +#ifdef CONFIG_PAX_PER_CPU_PGD
21356 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21357 + pgd_t *pgd = get_cpu_pgd(cpu);
21358 + pmd_t *ret;
21359 +#else
21360 list_for_each_entry(page, &pgd_list, lru) {
21361 + pgd_t *pgd = page_address(page);
21362 spinlock_t *pgt_lock;
21363 pmd_t *ret;
21364
21365 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
21366 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
21367
21368 spin_lock(pgt_lock);
21369 - ret = vmalloc_sync_one(page_address(page), address);
21370 +#endif
21371 +
21372 + ret = vmalloc_sync_one(pgd, address);
21373 +
21374 +#ifndef CONFIG_PAX_PER_CPU_PGD
21375 spin_unlock(pgt_lock);
21376 +#endif
21377
21378 if (!ret)
21379 break;
21380 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21381 * an interrupt in the middle of a task switch..
21382 */
21383 pgd_paddr = read_cr3();
21384 +
21385 +#ifdef CONFIG_PAX_PER_CPU_PGD
21386 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
21387 +#endif
21388 +
21389 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
21390 if (!pmd_k)
21391 return -1;
21392 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
21393 * happen within a race in page table update. In the later
21394 * case just flush:
21395 */
21396 +
21397 +#ifdef CONFIG_PAX_PER_CPU_PGD
21398 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
21399 + pgd = pgd_offset_cpu(smp_processor_id(), address);
21400 +#else
21401 pgd = pgd_offset(current->active_mm, address);
21402 +#endif
21403 +
21404 pgd_ref = pgd_offset_k(address);
21405 if (pgd_none(*pgd_ref))
21406 return -1;
21407 @@ -534,7 +604,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
21408 static int is_errata100(struct pt_regs *regs, unsigned long address)
21409 {
21410 #ifdef CONFIG_X86_64
21411 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
21412 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
21413 return 1;
21414 #endif
21415 return 0;
21416 @@ -561,7 +631,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
21417 }
21418
21419 static const char nx_warning[] = KERN_CRIT
21420 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
21421 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
21422
21423 static void
21424 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21425 @@ -570,15 +640,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
21426 if (!oops_may_print())
21427 return;
21428
21429 - if (error_code & PF_INSTR) {
21430 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
21431 unsigned int level;
21432
21433 pte_t *pte = lookup_address(address, &level);
21434
21435 if (pte && pte_present(*pte) && !pte_exec(*pte))
21436 - printk(nx_warning, current_uid());
21437 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
21438 }
21439
21440 +#ifdef CONFIG_PAX_KERNEXEC
21441 + if (init_mm.start_code <= address && address < init_mm.end_code) {
21442 + if (current->signal->curr_ip)
21443 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21444 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
21445 + else
21446 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
21447 + current->comm, task_pid_nr(current), current_uid(), current_euid());
21448 + }
21449 +#endif
21450 +
21451 printk(KERN_ALERT "BUG: unable to handle kernel ");
21452 if (address < PAGE_SIZE)
21453 printk(KERN_CONT "NULL pointer dereference");
21454 @@ -733,6 +814,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
21455 }
21456 #endif
21457
21458 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21459 + if (pax_is_fetch_fault(regs, error_code, address)) {
21460 +
21461 +#ifdef CONFIG_PAX_EMUTRAMP
21462 + switch (pax_handle_fetch_fault(regs)) {
21463 + case 2:
21464 + return;
21465 + }
21466 +#endif
21467 +
21468 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21469 + do_group_exit(SIGKILL);
21470 + }
21471 +#endif
21472 +
21473 if (unlikely(show_unhandled_signals))
21474 show_signal_msg(regs, error_code, address, tsk);
21475
21476 @@ -829,7 +925,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
21477 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
21478 printk(KERN_ERR
21479 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
21480 - tsk->comm, tsk->pid, address);
21481 + tsk->comm, task_pid_nr(tsk), address);
21482 code = BUS_MCEERR_AR;
21483 }
21484 #endif
21485 @@ -884,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
21486 return 1;
21487 }
21488
21489 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21490 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
21491 +{
21492 + pte_t *pte;
21493 + pmd_t *pmd;
21494 + spinlock_t *ptl;
21495 + unsigned char pte_mask;
21496 +
21497 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
21498 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
21499 + return 0;
21500 +
21501 + /* PaX: it's our fault, let's handle it if we can */
21502 +
21503 + /* PaX: take a look at read faults before acquiring any locks */
21504 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
21505 + /* instruction fetch attempt from a protected page in user mode */
21506 + up_read(&mm->mmap_sem);
21507 +
21508 +#ifdef CONFIG_PAX_EMUTRAMP
21509 + switch (pax_handle_fetch_fault(regs)) {
21510 + case 2:
21511 + return 1;
21512 + }
21513 +#endif
21514 +
21515 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21516 + do_group_exit(SIGKILL);
21517 + }
21518 +
21519 + pmd = pax_get_pmd(mm, address);
21520 + if (unlikely(!pmd))
21521 + return 0;
21522 +
21523 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21524 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21525 + pte_unmap_unlock(pte, ptl);
21526 + return 0;
21527 + }
21528 +
21529 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21530 + /* write attempt to a protected page in user mode */
21531 + pte_unmap_unlock(pte, ptl);
21532 + return 0;
21533 + }
21534 +
21535 +#ifdef CONFIG_SMP
21536 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21537 +#else
21538 + if (likely(address > get_limit(regs->cs)))
21539 +#endif
21540 + {
21541 + set_pte(pte, pte_mkread(*pte));
21542 + __flush_tlb_one(address);
21543 + pte_unmap_unlock(pte, ptl);
21544 + up_read(&mm->mmap_sem);
21545 + return 1;
21546 + }
21547 +
21548 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21549 +
21550 + /*
21551 + * PaX: fill DTLB with user rights and retry
21552 + */
21553 + __asm__ __volatile__ (
21554 + "orb %2,(%1)\n"
21555 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21556 +/*
21557 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21558 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21559 + * page fault when examined during a TLB load attempt. this is true not only
21560 + * for PTEs holding a non-present entry but also present entries that will
21561 + * raise a page fault (such as those set up by PaX, or the copy-on-write
21562 + * mechanism). in effect it means that we do *not* need to flush the TLBs
21563 + * for our target pages since their PTEs are simply not in the TLBs at all.
21564 +
21565 + * the best thing in omitting it is that we gain around 15-20% speed in the
21566 + * fast path of the page fault handler and can get rid of tracing since we
21567 + * can no longer flush unintended entries.
21568 + */
21569 + "invlpg (%0)\n"
21570 +#endif
21571 + __copyuser_seg"testb $0,(%0)\n"
21572 + "xorb %3,(%1)\n"
21573 + :
21574 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21575 + : "memory", "cc");
21576 + pte_unmap_unlock(pte, ptl);
21577 + up_read(&mm->mmap_sem);
21578 + return 1;
21579 +}
21580 +#endif
21581 +
21582 /*
21583 * Handle a spurious fault caused by a stale TLB entry.
21584 *
21585 @@ -956,6 +1145,9 @@ int show_unhandled_signals = 1;
21586 static inline int
21587 access_error(unsigned long error_code, struct vm_area_struct *vma)
21588 {
21589 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21590 + return 1;
21591 +
21592 if (error_code & PF_WRITE) {
21593 /* write, present and write, not present: */
21594 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21595 @@ -989,19 +1181,33 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21596 {
21597 struct vm_area_struct *vma;
21598 struct task_struct *tsk;
21599 - unsigned long address;
21600 struct mm_struct *mm;
21601 int fault;
21602 int write = error_code & PF_WRITE;
21603 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
21604 (write ? FAULT_FLAG_WRITE : 0);
21605
21606 + /* Get the faulting address: */
21607 + unsigned long address = read_cr2();
21608 +
21609 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21610 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21611 + if (!search_exception_tables(regs->ip)) {
21612 + bad_area_nosemaphore(regs, error_code, address);
21613 + return;
21614 + }
21615 + if (address < PAX_USER_SHADOW_BASE) {
21616 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21617 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
21618 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21619 + } else
21620 + address -= PAX_USER_SHADOW_BASE;
21621 + }
21622 +#endif
21623 +
21624 tsk = current;
21625 mm = tsk->mm;
21626
21627 - /* Get the faulting address: */
21628 - address = read_cr2();
21629 -
21630 /*
21631 * Detect and handle instructions that would cause a page fault for
21632 * both a tracked kernel page and a userspace page.
21633 @@ -1061,7 +1267,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
21634 * User-mode registers count as a user access even for any
21635 * potential system fault or CPU buglet:
21636 */
21637 - if (user_mode_vm(regs)) {
21638 + if (user_mode(regs)) {
21639 local_irq_enable();
21640 error_code |= PF_USER;
21641 } else {
21642 @@ -1116,6 +1322,11 @@ retry:
21643 might_sleep();
21644 }
21645
21646 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21647 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21648 + return;
21649 +#endif
21650 +
21651 vma = find_vma(mm, address);
21652 if (unlikely(!vma)) {
21653 bad_area(regs, error_code, address);
21654 @@ -1127,18 +1338,24 @@ retry:
21655 bad_area(regs, error_code, address);
21656 return;
21657 }
21658 - if (error_code & PF_USER) {
21659 - /*
21660 - * Accessing the stack below %sp is always a bug.
21661 - * The large cushion allows instructions like enter
21662 - * and pusha to work. ("enter $65535, $31" pushes
21663 - * 32 pointers and then decrements %sp by 65535.)
21664 - */
21665 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21666 - bad_area(regs, error_code, address);
21667 - return;
21668 - }
21669 + /*
21670 + * Accessing the stack below %sp is always a bug.
21671 + * The large cushion allows instructions like enter
21672 + * and pusha to work. ("enter $65535, $31" pushes
21673 + * 32 pointers and then decrements %sp by 65535.)
21674 + */
21675 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21676 + bad_area(regs, error_code, address);
21677 + return;
21678 + }
21679 +
21680 +#ifdef CONFIG_PAX_SEGMEXEC
21681 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21682 + bad_area(regs, error_code, address);
21683 + return;
21684 }
21685 +#endif
21686 +
21687 if (unlikely(expand_stack(vma, address))) {
21688 bad_area(regs, error_code, address);
21689 return;
21690 @@ -1193,3 +1410,240 @@ good_area:
21691
21692 up_read(&mm->mmap_sem);
21693 }
21694 +
21695 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21696 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
21697 +{
21698 + struct mm_struct *mm = current->mm;
21699 + unsigned long ip = regs->ip;
21700 +
21701 + if (v8086_mode(regs))
21702 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
21703 +
21704 +#ifdef CONFIG_PAX_PAGEEXEC
21705 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
21706 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
21707 + return true;
21708 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
21709 + return true;
21710 + return false;
21711 + }
21712 +#endif
21713 +
21714 +#ifdef CONFIG_PAX_SEGMEXEC
21715 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
21716 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
21717 + return true;
21718 + return false;
21719 + }
21720 +#endif
21721 +
21722 + return false;
21723 +}
21724 +#endif
21725 +
21726 +#ifdef CONFIG_PAX_EMUTRAMP
21727 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21728 +{
21729 + int err;
21730 +
21731 + do { /* PaX: gcc trampoline emulation #1 */
21732 + unsigned char mov1, mov2;
21733 + unsigned short jmp;
21734 + unsigned int addr1, addr2;
21735 +
21736 +#ifdef CONFIG_X86_64
21737 + if ((regs->ip + 11) >> 32)
21738 + break;
21739 +#endif
21740 +
21741 + err = get_user(mov1, (unsigned char __user *)regs->ip);
21742 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21743 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21744 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21745 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21746 +
21747 + if (err)
21748 + break;
21749 +
21750 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21751 + regs->cx = addr1;
21752 + regs->ax = addr2;
21753 + regs->ip = addr2;
21754 + return 2;
21755 + }
21756 + } while (0);
21757 +
21758 + do { /* PaX: gcc trampoline emulation #2 */
21759 + unsigned char mov, jmp;
21760 + unsigned int addr1, addr2;
21761 +
21762 +#ifdef CONFIG_X86_64
21763 + if ((regs->ip + 9) >> 32)
21764 + break;
21765 +#endif
21766 +
21767 + err = get_user(mov, (unsigned char __user *)regs->ip);
21768 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21769 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21770 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21771 +
21772 + if (err)
21773 + break;
21774 +
21775 + if (mov == 0xB9 && jmp == 0xE9) {
21776 + regs->cx = addr1;
21777 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21778 + return 2;
21779 + }
21780 + } while (0);
21781 +
21782 + return 1; /* PaX in action */
21783 +}
21784 +
21785 +#ifdef CONFIG_X86_64
21786 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21787 +{
21788 + int err;
21789 +
21790 + do { /* PaX: gcc trampoline emulation #1 */
21791 + unsigned short mov1, mov2, jmp1;
21792 + unsigned char jmp2;
21793 + unsigned int addr1;
21794 + unsigned long addr2;
21795 +
21796 + err = get_user(mov1, (unsigned short __user *)regs->ip);
21797 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
21798 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
21799 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
21800 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
21801 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
21802 +
21803 + if (err)
21804 + break;
21805 +
21806 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21807 + regs->r11 = addr1;
21808 + regs->r10 = addr2;
21809 + regs->ip = addr1;
21810 + return 2;
21811 + }
21812 + } while (0);
21813 +
21814 + do { /* PaX: gcc trampoline emulation #2 */
21815 + unsigned short mov1, mov2, jmp1;
21816 + unsigned char jmp2;
21817 + unsigned long addr1, addr2;
21818 +
21819 + err = get_user(mov1, (unsigned short __user *)regs->ip);
21820 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21821 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21822 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21823 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
21824 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
21825 +
21826 + if (err)
21827 + break;
21828 +
21829 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21830 + regs->r11 = addr1;
21831 + regs->r10 = addr2;
21832 + regs->ip = addr1;
21833 + return 2;
21834 + }
21835 + } while (0);
21836 +
21837 + return 1; /* PaX in action */
21838 +}
21839 +#endif
21840 +
21841 +/*
21842 + * PaX: decide what to do with offenders (regs->ip = fault address)
21843 + *
21844 + * returns 1 when task should be killed
21845 + * 2 when gcc trampoline was detected
21846 + */
21847 +static int pax_handle_fetch_fault(struct pt_regs *regs)
21848 +{
21849 + if (v8086_mode(regs))
21850 + return 1;
21851 +
21852 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
21853 + return 1;
21854 +
21855 +#ifdef CONFIG_X86_32
21856 + return pax_handle_fetch_fault_32(regs);
21857 +#else
21858 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
21859 + return pax_handle_fetch_fault_32(regs);
21860 + else
21861 + return pax_handle_fetch_fault_64(regs);
21862 +#endif
21863 +}
21864 +#endif
21865 +
21866 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21867 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
21868 +{
21869 + long i;
21870 +
21871 + printk(KERN_ERR "PAX: bytes at PC: ");
21872 + for (i = 0; i < 20; i++) {
21873 + unsigned char c;
21874 + if (get_user(c, (unsigned char __force_user *)pc+i))
21875 + printk(KERN_CONT "?? ");
21876 + else
21877 + printk(KERN_CONT "%02x ", c);
21878 + }
21879 + printk("\n");
21880 +
21881 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
21882 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
21883 + unsigned long c;
21884 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
21885 +#ifdef CONFIG_X86_32
21886 + printk(KERN_CONT "???????? ");
21887 +#else
21888 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
21889 + printk(KERN_CONT "???????? ???????? ");
21890 + else
21891 + printk(KERN_CONT "???????????????? ");
21892 +#endif
21893 + } else {
21894 +#ifdef CONFIG_X86_64
21895 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
21896 + printk(KERN_CONT "%08x ", (unsigned int)c);
21897 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
21898 + } else
21899 +#endif
21900 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
21901 + }
21902 + }
21903 + printk("\n");
21904 +}
21905 +#endif
21906 +
21907 +/**
21908 + * probe_kernel_write(): safely attempt to write to a location
21909 + * @dst: address to write to
21910 + * @src: pointer to the data that shall be written
21911 + * @size: size of the data chunk
21912 + *
21913 + * Safely write to address @dst from the buffer at @src. If a kernel fault
21914 + * happens, handle that and return -EFAULT.
21915 + */
21916 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
21917 +{
21918 + long ret;
21919 + mm_segment_t old_fs = get_fs();
21920 +
21921 + set_fs(KERNEL_DS);
21922 + pagefault_disable();
21923 + pax_open_kernel();
21924 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
21925 + pax_close_kernel();
21926 + pagefault_enable();
21927 + set_fs(old_fs);
21928 +
21929 + return ret ? -EFAULT : 0;
21930 +}
21931 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
21932 index ea30585..b5e1508 100644
21933 --- a/arch/x86/mm/gup.c
21934 +++ b/arch/x86/mm/gup.c
21935 @@ -253,7 +253,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
21936 addr = start;
21937 len = (unsigned long) nr_pages << PAGE_SHIFT;
21938 end = start + len;
21939 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21940 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21941 (void __user *)start, len)))
21942 return 0;
21943
21944 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
21945 index b499626..6fd1882 100644
21946 --- a/arch/x86/mm/highmem_32.c
21947 +++ b/arch/x86/mm/highmem_32.c
21948 @@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
21949 idx = type + KM_TYPE_NR*smp_processor_id();
21950 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21951 BUG_ON(!pte_none(*(kmap_pte-idx)));
21952 +
21953 + pax_open_kernel();
21954 set_pte(kmap_pte-idx, mk_pte(page, prot));
21955 + pax_close_kernel();
21956
21957 return (void *)vaddr;
21958 }
21959 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
21960 index f581a18..29efd37 100644
21961 --- a/arch/x86/mm/hugetlbpage.c
21962 +++ b/arch/x86/mm/hugetlbpage.c
21963 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
21964 struct hstate *h = hstate_file(file);
21965 struct mm_struct *mm = current->mm;
21966 struct vm_area_struct *vma;
21967 - unsigned long start_addr;
21968 + unsigned long start_addr, pax_task_size = TASK_SIZE;
21969 +
21970 +#ifdef CONFIG_PAX_SEGMEXEC
21971 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21972 + pax_task_size = SEGMEXEC_TASK_SIZE;
21973 +#endif
21974 +
21975 + pax_task_size -= PAGE_SIZE;
21976
21977 if (len > mm->cached_hole_size) {
21978 - start_addr = mm->free_area_cache;
21979 + start_addr = mm->free_area_cache;
21980 } else {
21981 - start_addr = TASK_UNMAPPED_BASE;
21982 - mm->cached_hole_size = 0;
21983 + start_addr = mm->mmap_base;
21984 + mm->cached_hole_size = 0;
21985 }
21986
21987 full_search:
21988 @@ -280,26 +287,27 @@ full_search:
21989
21990 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
21991 /* At this point: (!vma || addr < vma->vm_end). */
21992 - if (TASK_SIZE - len < addr) {
21993 + if (pax_task_size - len < addr) {
21994 /*
21995 * Start a new search - just in case we missed
21996 * some holes.
21997 */
21998 - if (start_addr != TASK_UNMAPPED_BASE) {
21999 - start_addr = TASK_UNMAPPED_BASE;
22000 + if (start_addr != mm->mmap_base) {
22001 + start_addr = mm->mmap_base;
22002 mm->cached_hole_size = 0;
22003 goto full_search;
22004 }
22005 return -ENOMEM;
22006 }
22007 - if (!vma || addr + len <= vma->vm_start) {
22008 - mm->free_area_cache = addr + len;
22009 - return addr;
22010 - }
22011 + if (check_heap_stack_gap(vma, addr, len))
22012 + break;
22013 if (addr + mm->cached_hole_size < vma->vm_start)
22014 mm->cached_hole_size = vma->vm_start - addr;
22015 addr = ALIGN(vma->vm_end, huge_page_size(h));
22016 }
22017 +
22018 + mm->free_area_cache = addr + len;
22019 + return addr;
22020 }
22021
22022 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22023 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22024 {
22025 struct hstate *h = hstate_file(file);
22026 struct mm_struct *mm = current->mm;
22027 - struct vm_area_struct *vma, *prev_vma;
22028 - unsigned long base = mm->mmap_base, addr = addr0;
22029 + struct vm_area_struct *vma;
22030 + unsigned long base = mm->mmap_base, addr;
22031 unsigned long largest_hole = mm->cached_hole_size;
22032 - int first_time = 1;
22033
22034 /* don't allow allocations above current base */
22035 if (mm->free_area_cache > base)
22036 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
22037 largest_hole = 0;
22038 mm->free_area_cache = base;
22039 }
22040 -try_again:
22041 +
22042 /* make sure it can fit in the remaining address space */
22043 if (mm->free_area_cache < len)
22044 goto fail;
22045
22046 /* either no address requested or can't fit in requested address hole */
22047 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
22048 + addr = (mm->free_area_cache - len);
22049 do {
22050 + addr &= huge_page_mask(h);
22051 + vma = find_vma(mm, addr);
22052 /*
22053 * Lookup failure means no vma is above this address,
22054 * i.e. return with success:
22055 - */
22056 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
22057 - return addr;
22058 -
22059 - /*
22060 * new region fits between prev_vma->vm_end and
22061 * vma->vm_start, use it:
22062 */
22063 - if (addr + len <= vma->vm_start &&
22064 - (!prev_vma || (addr >= prev_vma->vm_end))) {
22065 + if (check_heap_stack_gap(vma, addr, len)) {
22066 /* remember the address as a hint for next time */
22067 - mm->cached_hole_size = largest_hole;
22068 - return (mm->free_area_cache = addr);
22069 - } else {
22070 - /* pull free_area_cache down to the first hole */
22071 - if (mm->free_area_cache == vma->vm_end) {
22072 - mm->free_area_cache = vma->vm_start;
22073 - mm->cached_hole_size = largest_hole;
22074 - }
22075 + mm->cached_hole_size = largest_hole;
22076 + return (mm->free_area_cache = addr);
22077 + }
22078 + /* pull free_area_cache down to the first hole */
22079 + if (mm->free_area_cache == vma->vm_end) {
22080 + mm->free_area_cache = vma->vm_start;
22081 + mm->cached_hole_size = largest_hole;
22082 }
22083
22084 /* remember the largest hole we saw so far */
22085 if (addr + largest_hole < vma->vm_start)
22086 - largest_hole = vma->vm_start - addr;
22087 + largest_hole = vma->vm_start - addr;
22088
22089 /* try just below the current vma->vm_start */
22090 - addr = (vma->vm_start - len) & huge_page_mask(h);
22091 - } while (len <= vma->vm_start);
22092 + addr = skip_heap_stack_gap(vma, len);
22093 + } while (!IS_ERR_VALUE(addr));
22094
22095 fail:
22096 /*
22097 - * if hint left us with no space for the requested
22098 - * mapping then try again:
22099 - */
22100 - if (first_time) {
22101 - mm->free_area_cache = base;
22102 - largest_hole = 0;
22103 - first_time = 0;
22104 - goto try_again;
22105 - }
22106 - /*
22107 * A failed mmap() very likely causes application failure,
22108 * so fall back to the bottom-up function here. This scenario
22109 * can happen with large stack limits and large mmap()
22110 * allocations.
22111 */
22112 - mm->free_area_cache = TASK_UNMAPPED_BASE;
22113 +
22114 +#ifdef CONFIG_PAX_SEGMEXEC
22115 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22116 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22117 + else
22118 +#endif
22119 +
22120 + mm->mmap_base = TASK_UNMAPPED_BASE;
22121 +
22122 +#ifdef CONFIG_PAX_RANDMMAP
22123 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22124 + mm->mmap_base += mm->delta_mmap;
22125 +#endif
22126 +
22127 + mm->free_area_cache = mm->mmap_base;
22128 mm->cached_hole_size = ~0UL;
22129 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
22130 len, pgoff, flags);
22131 @@ -386,6 +392,7 @@ fail:
22132 /*
22133 * Restore the topdown base:
22134 */
22135 + mm->mmap_base = base;
22136 mm->free_area_cache = base;
22137 mm->cached_hole_size = ~0UL;
22138
22139 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22140 struct hstate *h = hstate_file(file);
22141 struct mm_struct *mm = current->mm;
22142 struct vm_area_struct *vma;
22143 + unsigned long pax_task_size = TASK_SIZE;
22144
22145 if (len & ~huge_page_mask(h))
22146 return -EINVAL;
22147 - if (len > TASK_SIZE)
22148 +
22149 +#ifdef CONFIG_PAX_SEGMEXEC
22150 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22151 + pax_task_size = SEGMEXEC_TASK_SIZE;
22152 +#endif
22153 +
22154 + pax_task_size -= PAGE_SIZE;
22155 +
22156 + if (len > pax_task_size)
22157 return -ENOMEM;
22158
22159 if (flags & MAP_FIXED) {
22160 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
22161 if (addr) {
22162 addr = ALIGN(addr, huge_page_size(h));
22163 vma = find_vma(mm, addr);
22164 - if (TASK_SIZE - len >= addr &&
22165 - (!vma || addr + len <= vma->vm_start))
22166 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
22167 return addr;
22168 }
22169 if (mm->get_unmapped_area == arch_get_unmapped_area)
22170 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
22171 index 87488b9..7129f32 100644
22172 --- a/arch/x86/mm/init.c
22173 +++ b/arch/x86/mm/init.c
22174 @@ -31,7 +31,7 @@ int direct_gbpages
22175 static void __init find_early_table_space(unsigned long end, int use_pse,
22176 int use_gbpages)
22177 {
22178 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
22179 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
22180 phys_addr_t base;
22181
22182 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
22183 @@ -312,8 +312,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
22184 */
22185 int devmem_is_allowed(unsigned long pagenr)
22186 {
22187 - if (pagenr <= 256)
22188 +#ifdef CONFIG_GRKERNSEC_KMEM
22189 + /* allow BDA */
22190 + if (!pagenr)
22191 + return 1;
22192 + /* allow EBDA */
22193 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22194 + return 1;
22195 +#else
22196 + if (!pagenr)
22197 + return 1;
22198 +#ifdef CONFIG_VM86
22199 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
22200 + return 1;
22201 +#endif
22202 +#endif
22203 +
22204 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22205 return 1;
22206 +#ifdef CONFIG_GRKERNSEC_KMEM
22207 + /* throw out everything else below 1MB */
22208 + if (pagenr <= 256)
22209 + return 0;
22210 +#endif
22211 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22212 return 0;
22213 if (!page_is_ram(pagenr))
22214 @@ -372,6 +393,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
22215
22216 void free_initmem(void)
22217 {
22218 +
22219 +#ifdef CONFIG_PAX_KERNEXEC
22220 +#ifdef CONFIG_X86_32
22221 + /* PaX: limit KERNEL_CS to actual size */
22222 + unsigned long addr, limit;
22223 + struct desc_struct d;
22224 + int cpu;
22225 +
22226 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22227 + limit = (limit - 1UL) >> PAGE_SHIFT;
22228 +
22229 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22230 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
22231 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22232 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22233 + }
22234 +
22235 + /* PaX: make KERNEL_CS read-only */
22236 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22237 + if (!paravirt_enabled())
22238 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22239 +/*
22240 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22241 + pgd = pgd_offset_k(addr);
22242 + pud = pud_offset(pgd, addr);
22243 + pmd = pmd_offset(pud, addr);
22244 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22245 + }
22246 +*/
22247 +#ifdef CONFIG_X86_PAE
22248 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22249 +/*
22250 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22251 + pgd = pgd_offset_k(addr);
22252 + pud = pud_offset(pgd, addr);
22253 + pmd = pmd_offset(pud, addr);
22254 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22255 + }
22256 +*/
22257 +#endif
22258 +
22259 +#ifdef CONFIG_MODULES
22260 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22261 +#endif
22262 +
22263 +#else
22264 + pgd_t *pgd;
22265 + pud_t *pud;
22266 + pmd_t *pmd;
22267 + unsigned long addr, end;
22268 +
22269 + /* PaX: make kernel code/rodata read-only, rest non-executable */
22270 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22271 + pgd = pgd_offset_k(addr);
22272 + pud = pud_offset(pgd, addr);
22273 + pmd = pmd_offset(pud, addr);
22274 + if (!pmd_present(*pmd))
22275 + continue;
22276 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22277 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22278 + else
22279 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22280 + }
22281 +
22282 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22283 + end = addr + KERNEL_IMAGE_SIZE;
22284 + for (; addr < end; addr += PMD_SIZE) {
22285 + pgd = pgd_offset_k(addr);
22286 + pud = pud_offset(pgd, addr);
22287 + pmd = pmd_offset(pud, addr);
22288 + if (!pmd_present(*pmd))
22289 + continue;
22290 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22291 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22292 + }
22293 +#endif
22294 +
22295 + flush_tlb_all();
22296 +#endif
22297 +
22298 free_init_pages("unused kernel memory",
22299 (unsigned long)(&__init_begin),
22300 (unsigned long)(&__init_end));
22301 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
22302 index 29f7c6d..b46b35b 100644
22303 --- a/arch/x86/mm/init_32.c
22304 +++ b/arch/x86/mm/init_32.c
22305 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
22306 }
22307
22308 /*
22309 - * Creates a middle page table and puts a pointer to it in the
22310 - * given global directory entry. This only returns the gd entry
22311 - * in non-PAE compilation mode, since the middle layer is folded.
22312 - */
22313 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
22314 -{
22315 - pud_t *pud;
22316 - pmd_t *pmd_table;
22317 -
22318 -#ifdef CONFIG_X86_PAE
22319 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
22320 - if (after_bootmem)
22321 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
22322 - else
22323 - pmd_table = (pmd_t *)alloc_low_page();
22324 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
22325 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
22326 - pud = pud_offset(pgd, 0);
22327 - BUG_ON(pmd_table != pmd_offset(pud, 0));
22328 -
22329 - return pmd_table;
22330 - }
22331 -#endif
22332 - pud = pud_offset(pgd, 0);
22333 - pmd_table = pmd_offset(pud, 0);
22334 -
22335 - return pmd_table;
22336 -}
22337 -
22338 -/*
22339 * Create a page table and place a pointer to it in a middle page
22340 * directory entry:
22341 */
22342 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
22343 page_table = (pte_t *)alloc_low_page();
22344
22345 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
22346 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
22347 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
22348 +#else
22349 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
22350 +#endif
22351 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
22352 }
22353
22354 return pte_offset_kernel(pmd, 0);
22355 }
22356
22357 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
22358 +{
22359 + pud_t *pud;
22360 + pmd_t *pmd_table;
22361 +
22362 + pud = pud_offset(pgd, 0);
22363 + pmd_table = pmd_offset(pud, 0);
22364 +
22365 + return pmd_table;
22366 +}
22367 +
22368 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
22369 {
22370 int pgd_idx = pgd_index(vaddr);
22371 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22372 int pgd_idx, pmd_idx;
22373 unsigned long vaddr;
22374 pgd_t *pgd;
22375 + pud_t *pud;
22376 pmd_t *pmd;
22377 pte_t *pte = NULL;
22378
22379 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22380 pgd = pgd_base + pgd_idx;
22381
22382 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
22383 - pmd = one_md_table_init(pgd);
22384 - pmd = pmd + pmd_index(vaddr);
22385 + pud = pud_offset(pgd, vaddr);
22386 + pmd = pmd_offset(pud, vaddr);
22387 +
22388 +#ifdef CONFIG_X86_PAE
22389 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22390 +#endif
22391 +
22392 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
22393 pmd++, pmd_idx++) {
22394 pte = page_table_kmap_check(one_page_table_init(pmd),
22395 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
22396 }
22397 }
22398
22399 -static inline int is_kernel_text(unsigned long addr)
22400 +static inline int is_kernel_text(unsigned long start, unsigned long end)
22401 {
22402 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
22403 - return 1;
22404 - return 0;
22405 + if ((start > ktla_ktva((unsigned long)_etext) ||
22406 + end <= ktla_ktva((unsigned long)_stext)) &&
22407 + (start > ktla_ktva((unsigned long)_einittext) ||
22408 + end <= ktla_ktva((unsigned long)_sinittext)) &&
22409 +
22410 +#ifdef CONFIG_ACPI_SLEEP
22411 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
22412 +#endif
22413 +
22414 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
22415 + return 0;
22416 + return 1;
22417 }
22418
22419 /*
22420 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
22421 unsigned long last_map_addr = end;
22422 unsigned long start_pfn, end_pfn;
22423 pgd_t *pgd_base = swapper_pg_dir;
22424 - int pgd_idx, pmd_idx, pte_ofs;
22425 + unsigned int pgd_idx, pmd_idx, pte_ofs;
22426 unsigned long pfn;
22427 pgd_t *pgd;
22428 + pud_t *pud;
22429 pmd_t *pmd;
22430 pte_t *pte;
22431 unsigned pages_2m, pages_4k;
22432 @@ -281,8 +282,13 @@ repeat:
22433 pfn = start_pfn;
22434 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22435 pgd = pgd_base + pgd_idx;
22436 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
22437 - pmd = one_md_table_init(pgd);
22438 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
22439 + pud = pud_offset(pgd, 0);
22440 + pmd = pmd_offset(pud, 0);
22441 +
22442 +#ifdef CONFIG_X86_PAE
22443 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
22444 +#endif
22445
22446 if (pfn >= end_pfn)
22447 continue;
22448 @@ -294,14 +300,13 @@ repeat:
22449 #endif
22450 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
22451 pmd++, pmd_idx++) {
22452 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
22453 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
22454
22455 /*
22456 * Map with big pages if possible, otherwise
22457 * create normal page tables:
22458 */
22459 if (use_pse) {
22460 - unsigned int addr2;
22461 pgprot_t prot = PAGE_KERNEL_LARGE;
22462 /*
22463 * first pass will use the same initial
22464 @@ -311,11 +316,7 @@ repeat:
22465 __pgprot(PTE_IDENT_ATTR |
22466 _PAGE_PSE);
22467
22468 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
22469 - PAGE_OFFSET + PAGE_SIZE-1;
22470 -
22471 - if (is_kernel_text(addr) ||
22472 - is_kernel_text(addr2))
22473 + if (is_kernel_text(address, address + PMD_SIZE))
22474 prot = PAGE_KERNEL_LARGE_EXEC;
22475
22476 pages_2m++;
22477 @@ -332,7 +333,7 @@ repeat:
22478 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
22479 pte += pte_ofs;
22480 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
22481 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
22482 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
22483 pgprot_t prot = PAGE_KERNEL;
22484 /*
22485 * first pass will use the same initial
22486 @@ -340,7 +341,7 @@ repeat:
22487 */
22488 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
22489
22490 - if (is_kernel_text(addr))
22491 + if (is_kernel_text(address, address + PAGE_SIZE))
22492 prot = PAGE_KERNEL_EXEC;
22493
22494 pages_4k++;
22495 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
22496
22497 pud = pud_offset(pgd, va);
22498 pmd = pmd_offset(pud, va);
22499 - if (!pmd_present(*pmd))
22500 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
22501 break;
22502
22503 pte = pte_offset_kernel(pmd, va);
22504 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_range_init(void)
22505
22506 static void __init pagetable_init(void)
22507 {
22508 - pgd_t *pgd_base = swapper_pg_dir;
22509 -
22510 - permanent_kmaps_init(pgd_base);
22511 + permanent_kmaps_init(swapper_pg_dir);
22512 }
22513
22514 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22515 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
22516 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22517
22518 /* user-defined highmem size */
22519 @@ -757,6 +756,12 @@ void __init mem_init(void)
22520
22521 pci_iommu_alloc();
22522
22523 +#ifdef CONFIG_PAX_PER_CPU_PGD
22524 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22525 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22526 + KERNEL_PGD_PTRS);
22527 +#endif
22528 +
22529 #ifdef CONFIG_FLATMEM
22530 BUG_ON(!mem_map);
22531 #endif
22532 @@ -774,7 +779,7 @@ void __init mem_init(void)
22533 set_highmem_pages_init();
22534
22535 codesize = (unsigned long) &_etext - (unsigned long) &_text;
22536 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
22537 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
22538 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
22539
22540 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
22541 @@ -815,10 +820,10 @@ void __init mem_init(void)
22542 ((unsigned long)&__init_end -
22543 (unsigned long)&__init_begin) >> 10,
22544
22545 - (unsigned long)&_etext, (unsigned long)&_edata,
22546 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
22547 + (unsigned long)&_sdata, (unsigned long)&_edata,
22548 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
22549
22550 - (unsigned long)&_text, (unsigned long)&_etext,
22551 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
22552 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
22553
22554 /*
22555 @@ -896,6 +901,7 @@ void set_kernel_text_rw(void)
22556 if (!kernel_set_to_readonly)
22557 return;
22558
22559 + start = ktla_ktva(start);
22560 pr_debug("Set kernel text: %lx - %lx for read write\n",
22561 start, start+size);
22562
22563 @@ -910,6 +916,7 @@ void set_kernel_text_ro(void)
22564 if (!kernel_set_to_readonly)
22565 return;
22566
22567 + start = ktla_ktva(start);
22568 pr_debug("Set kernel text: %lx - %lx for read only\n",
22569 start, start+size);
22570
22571 @@ -938,6 +945,7 @@ void mark_rodata_ro(void)
22572 unsigned long start = PFN_ALIGN(_text);
22573 unsigned long size = PFN_ALIGN(_etext) - start;
22574
22575 + start = ktla_ktva(start);
22576 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
22577 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
22578 size >> 10);
22579 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
22580 index bbaaa00..16dffad 100644
22581 --- a/arch/x86/mm/init_64.c
22582 +++ b/arch/x86/mm/init_64.c
22583 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
22584 * around without checking the pgd every time.
22585 */
22586
22587 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
22588 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
22589 EXPORT_SYMBOL_GPL(__supported_pte_mask);
22590
22591 int force_personality32;
22592 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22593
22594 for (address = start; address <= end; address += PGDIR_SIZE) {
22595 const pgd_t *pgd_ref = pgd_offset_k(address);
22596 +
22597 +#ifdef CONFIG_PAX_PER_CPU_PGD
22598 + unsigned long cpu;
22599 +#else
22600 struct page *page;
22601 +#endif
22602
22603 if (pgd_none(*pgd_ref))
22604 continue;
22605
22606 spin_lock(&pgd_lock);
22607 +
22608 +#ifdef CONFIG_PAX_PER_CPU_PGD
22609 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22610 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
22611 +#else
22612 list_for_each_entry(page, &pgd_list, lru) {
22613 pgd_t *pgd;
22614 spinlock_t *pgt_lock;
22615 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22616 /* the pgt_lock only for Xen */
22617 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
22618 spin_lock(pgt_lock);
22619 +#endif
22620
22621 if (pgd_none(*pgd))
22622 set_pgd(pgd, *pgd_ref);
22623 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
22624 BUG_ON(pgd_page_vaddr(*pgd)
22625 != pgd_page_vaddr(*pgd_ref));
22626
22627 +#ifndef CONFIG_PAX_PER_CPU_PGD
22628 spin_unlock(pgt_lock);
22629 +#endif
22630 +
22631 }
22632 spin_unlock(&pgd_lock);
22633 }
22634 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
22635 pmd = fill_pmd(pud, vaddr);
22636 pte = fill_pte(pmd, vaddr);
22637
22638 + pax_open_kernel();
22639 set_pte(pte, new_pte);
22640 + pax_close_kernel();
22641
22642 /*
22643 * It's enough to flush this one mapping.
22644 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
22645 pgd = pgd_offset_k((unsigned long)__va(phys));
22646 if (pgd_none(*pgd)) {
22647 pud = (pud_t *) spp_getpage();
22648 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
22649 - _PAGE_USER));
22650 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
22651 }
22652 pud = pud_offset(pgd, (unsigned long)__va(phys));
22653 if (pud_none(*pud)) {
22654 pmd = (pmd_t *) spp_getpage();
22655 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
22656 - _PAGE_USER));
22657 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
22658 }
22659 pmd = pmd_offset(pud, phys);
22660 BUG_ON(!pmd_none(*pmd));
22661 @@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
22662 if (pfn >= pgt_buf_top)
22663 panic("alloc_low_page: ran out of memory");
22664
22665 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22666 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
22667 clear_page(adr);
22668 *phys = pfn * PAGE_SIZE;
22669 return adr;
22670 @@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
22671
22672 phys = __pa(virt);
22673 left = phys & (PAGE_SIZE - 1);
22674 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22675 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
22676 adr = (void *)(((unsigned long)adr) | left);
22677
22678 return adr;
22679 @@ -693,6 +707,12 @@ void __init mem_init(void)
22680
22681 pci_iommu_alloc();
22682
22683 +#ifdef CONFIG_PAX_PER_CPU_PGD
22684 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
22685 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22686 + KERNEL_PGD_PTRS);
22687 +#endif
22688 +
22689 /* clear_bss() already clear the empty_zero_page */
22690
22691 reservedpages = 0;
22692 @@ -853,8 +873,8 @@ int kern_addr_valid(unsigned long addr)
22693 static struct vm_area_struct gate_vma = {
22694 .vm_start = VSYSCALL_START,
22695 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
22696 - .vm_page_prot = PAGE_READONLY_EXEC,
22697 - .vm_flags = VM_READ | VM_EXEC
22698 + .vm_page_prot = PAGE_READONLY,
22699 + .vm_flags = VM_READ
22700 };
22701
22702 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
22703 @@ -888,7 +908,7 @@ int in_gate_area_no_mm(unsigned long addr)
22704
22705 const char *arch_vma_name(struct vm_area_struct *vma)
22706 {
22707 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22708 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22709 return "[vdso]";
22710 if (vma == &gate_vma)
22711 return "[vsyscall]";
22712 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
22713 index 7b179b4..6bd1777 100644
22714 --- a/arch/x86/mm/iomap_32.c
22715 +++ b/arch/x86/mm/iomap_32.c
22716 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
22717 type = kmap_atomic_idx_push();
22718 idx = type + KM_TYPE_NR * smp_processor_id();
22719 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22720 +
22721 + pax_open_kernel();
22722 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22723 + pax_close_kernel();
22724 +
22725 arch_flush_lazy_mmu_mode();
22726
22727 return (void *)vaddr;
22728 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
22729 index be1ef57..9680edc 100644
22730 --- a/arch/x86/mm/ioremap.c
22731 +++ b/arch/x86/mm/ioremap.c
22732 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
22733 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
22734 int is_ram = page_is_ram(pfn);
22735
22736 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22737 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22738 return NULL;
22739 WARN_ON_ONCE(is_ram);
22740 }
22741 @@ -344,7 +344,7 @@ static int __init early_ioremap_debug_setup(char *str)
22742 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22743
22744 static __initdata int after_paging_init;
22745 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22746 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22747
22748 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22749 {
22750 @@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
22751 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22752
22753 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22754 - memset(bm_pte, 0, sizeof(bm_pte));
22755 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
22756 + pmd_populate_user(&init_mm, pmd, bm_pte);
22757
22758 /*
22759 * The boot-ioremap range spans multiple pmds, for which
22760 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
22761 index d87dd6d..bf3fa66 100644
22762 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
22763 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
22764 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
22765 * memory (e.g. tracked pages)? For now, we need this to avoid
22766 * invoking kmemcheck for PnP BIOS calls.
22767 */
22768 - if (regs->flags & X86_VM_MASK)
22769 + if (v8086_mode(regs))
22770 return false;
22771 - if (regs->cs != __KERNEL_CS)
22772 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
22773 return false;
22774
22775 pte = kmemcheck_pte_lookup(address);
22776 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
22777 index 1dab519..60a7e5f 100644
22778 --- a/arch/x86/mm/mmap.c
22779 +++ b/arch/x86/mm/mmap.c
22780 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
22781 * Leave an at least ~128 MB hole with possible stack randomization.
22782 */
22783 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
22784 -#define MAX_GAP (TASK_SIZE/6*5)
22785 +#define MAX_GAP (pax_task_size/6*5)
22786
22787 /*
22788 * True on X86_32 or when emulating IA32 on X86_64
22789 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
22790 return rnd << PAGE_SHIFT;
22791 }
22792
22793 -static unsigned long mmap_base(void)
22794 +static unsigned long mmap_base(struct mm_struct *mm)
22795 {
22796 unsigned long gap = rlimit(RLIMIT_STACK);
22797 + unsigned long pax_task_size = TASK_SIZE;
22798 +
22799 +#ifdef CONFIG_PAX_SEGMEXEC
22800 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22801 + pax_task_size = SEGMEXEC_TASK_SIZE;
22802 +#endif
22803
22804 if (gap < MIN_GAP)
22805 gap = MIN_GAP;
22806 else if (gap > MAX_GAP)
22807 gap = MAX_GAP;
22808
22809 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
22810 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
22811 }
22812
22813 /*
22814 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
22815 * does, but not when emulating X86_32
22816 */
22817 -static unsigned long mmap_legacy_base(void)
22818 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
22819 {
22820 - if (mmap_is_ia32())
22821 + if (mmap_is_ia32()) {
22822 +
22823 +#ifdef CONFIG_PAX_SEGMEXEC
22824 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
22825 + return SEGMEXEC_TASK_UNMAPPED_BASE;
22826 + else
22827 +#endif
22828 +
22829 return TASK_UNMAPPED_BASE;
22830 - else
22831 + } else
22832 return TASK_UNMAPPED_BASE + mmap_rnd();
22833 }
22834
22835 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
22836 void arch_pick_mmap_layout(struct mm_struct *mm)
22837 {
22838 if (mmap_is_legacy()) {
22839 - mm->mmap_base = mmap_legacy_base();
22840 + mm->mmap_base = mmap_legacy_base(mm);
22841 +
22842 +#ifdef CONFIG_PAX_RANDMMAP
22843 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22844 + mm->mmap_base += mm->delta_mmap;
22845 +#endif
22846 +
22847 mm->get_unmapped_area = arch_get_unmapped_area;
22848 mm->unmap_area = arch_unmap_area;
22849 } else {
22850 - mm->mmap_base = mmap_base();
22851 + mm->mmap_base = mmap_base(mm);
22852 +
22853 +#ifdef CONFIG_PAX_RANDMMAP
22854 + if (mm->pax_flags & MF_PAX_RANDMMAP)
22855 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
22856 +#endif
22857 +
22858 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
22859 mm->unmap_area = arch_unmap_area_topdown;
22860 }
22861 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
22862 index 67421f3..8d6b107 100644
22863 --- a/arch/x86/mm/mmio-mod.c
22864 +++ b/arch/x86/mm/mmio-mod.c
22865 @@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
22866 break;
22867 default:
22868 {
22869 - unsigned char *ip = (unsigned char *)instptr;
22870 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
22871 my_trace->opcode = MMIO_UNKNOWN_OP;
22872 my_trace->width = 0;
22873 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
22874 @@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
22875 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
22876 void __iomem *addr)
22877 {
22878 - static atomic_t next_id;
22879 + static atomic_unchecked_t next_id;
22880 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
22881 /* These are page-unaligned. */
22882 struct mmiotrace_map map = {
22883 @@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
22884 .private = trace
22885 },
22886 .phys = offset,
22887 - .id = atomic_inc_return(&next_id)
22888 + .id = atomic_inc_return_unchecked(&next_id)
22889 };
22890 map.map_id = trace->id;
22891
22892 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
22893 index b008656..773eac2 100644
22894 --- a/arch/x86/mm/pageattr-test.c
22895 +++ b/arch/x86/mm/pageattr-test.c
22896 @@ -36,7 +36,7 @@ enum {
22897
22898 static int pte_testbit(pte_t pte)
22899 {
22900 - return pte_flags(pte) & _PAGE_UNUSED1;
22901 + return pte_flags(pte) & _PAGE_CPA_TEST;
22902 }
22903
22904 struct split_state {
22905 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
22906 index f9e5267..6f6e27f 100644
22907 --- a/arch/x86/mm/pageattr.c
22908 +++ b/arch/x86/mm/pageattr.c
22909 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
22910 */
22911 #ifdef CONFIG_PCI_BIOS
22912 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
22913 - pgprot_val(forbidden) |= _PAGE_NX;
22914 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22915 #endif
22916
22917 /*
22918 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
22919 * Does not cover __inittext since that is gone later on. On
22920 * 64bit we do not enforce !NX on the low mapping
22921 */
22922 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
22923 - pgprot_val(forbidden) |= _PAGE_NX;
22924 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
22925 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22926
22927 +#ifdef CONFIG_DEBUG_RODATA
22928 /*
22929 * The .rodata section needs to be read-only. Using the pfn
22930 * catches all aliases.
22931 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
22932 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
22933 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
22934 pgprot_val(forbidden) |= _PAGE_RW;
22935 +#endif
22936
22937 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
22938 /*
22939 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
22940 }
22941 #endif
22942
22943 +#ifdef CONFIG_PAX_KERNEXEC
22944 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
22945 + pgprot_val(forbidden) |= _PAGE_RW;
22946 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22947 + }
22948 +#endif
22949 +
22950 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
22951
22952 return prot;
22953 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
22954 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
22955 {
22956 /* change init_mm */
22957 + pax_open_kernel();
22958 set_pte_atomic(kpte, pte);
22959 +
22960 #ifdef CONFIG_X86_32
22961 if (!SHARED_KERNEL_PMD) {
22962 +
22963 +#ifdef CONFIG_PAX_PER_CPU_PGD
22964 + unsigned long cpu;
22965 +#else
22966 struct page *page;
22967 +#endif
22968
22969 +#ifdef CONFIG_PAX_PER_CPU_PGD
22970 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22971 + pgd_t *pgd = get_cpu_pgd(cpu);
22972 +#else
22973 list_for_each_entry(page, &pgd_list, lru) {
22974 - pgd_t *pgd;
22975 + pgd_t *pgd = (pgd_t *)page_address(page);
22976 +#endif
22977 +
22978 pud_t *pud;
22979 pmd_t *pmd;
22980
22981 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
22982 + pgd += pgd_index(address);
22983 pud = pud_offset(pgd, address);
22984 pmd = pmd_offset(pud, address);
22985 set_pte_atomic((pte_t *)pmd, pte);
22986 }
22987 }
22988 #endif
22989 + pax_close_kernel();
22990 }
22991
22992 static int
22993 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
22994 index f6ff57b..481690f 100644
22995 --- a/arch/x86/mm/pat.c
22996 +++ b/arch/x86/mm/pat.c
22997 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
22998
22999 if (!entry) {
23000 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
23001 - current->comm, current->pid, start, end);
23002 + current->comm, task_pid_nr(current), start, end);
23003 return -EINVAL;
23004 }
23005
23006 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23007 while (cursor < to) {
23008 if (!devmem_is_allowed(pfn)) {
23009 printk(KERN_INFO
23010 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23011 - current->comm, from, to);
23012 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
23013 + current->comm, from, to, cursor);
23014 return 0;
23015 }
23016 cursor += PAGE_SIZE;
23017 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
23018 printk(KERN_INFO
23019 "%s:%d ioremap_change_attr failed %s "
23020 "for %Lx-%Lx\n",
23021 - current->comm, current->pid,
23022 + current->comm, task_pid_nr(current),
23023 cattr_name(flags),
23024 base, (unsigned long long)(base + size));
23025 return -EINVAL;
23026 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23027 if (want_flags != flags) {
23028 printk(KERN_WARNING
23029 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
23030 - current->comm, current->pid,
23031 + current->comm, task_pid_nr(current),
23032 cattr_name(want_flags),
23033 (unsigned long long)paddr,
23034 (unsigned long long)(paddr + size),
23035 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
23036 free_memtype(paddr, paddr + size);
23037 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
23038 " for %Lx-%Lx, got %s\n",
23039 - current->comm, current->pid,
23040 + current->comm, task_pid_nr(current),
23041 cattr_name(want_flags),
23042 (unsigned long long)paddr,
23043 (unsigned long long)(paddr + size),
23044 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
23045 index 9f0614d..92ae64a 100644
23046 --- a/arch/x86/mm/pf_in.c
23047 +++ b/arch/x86/mm/pf_in.c
23048 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
23049 int i;
23050 enum reason_type rv = OTHERS;
23051
23052 - p = (unsigned char *)ins_addr;
23053 + p = (unsigned char *)ktla_ktva(ins_addr);
23054 p += skip_prefix(p, &prf);
23055 p += get_opcode(p, &opcode);
23056
23057 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
23058 struct prefix_bits prf;
23059 int i;
23060
23061 - p = (unsigned char *)ins_addr;
23062 + p = (unsigned char *)ktla_ktva(ins_addr);
23063 p += skip_prefix(p, &prf);
23064 p += get_opcode(p, &opcode);
23065
23066 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
23067 struct prefix_bits prf;
23068 int i;
23069
23070 - p = (unsigned char *)ins_addr;
23071 + p = (unsigned char *)ktla_ktva(ins_addr);
23072 p += skip_prefix(p, &prf);
23073 p += get_opcode(p, &opcode);
23074
23075 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
23076 struct prefix_bits prf;
23077 int i;
23078
23079 - p = (unsigned char *)ins_addr;
23080 + p = (unsigned char *)ktla_ktva(ins_addr);
23081 p += skip_prefix(p, &prf);
23082 p += get_opcode(p, &opcode);
23083 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
23084 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
23085 struct prefix_bits prf;
23086 int i;
23087
23088 - p = (unsigned char *)ins_addr;
23089 + p = (unsigned char *)ktla_ktva(ins_addr);
23090 p += skip_prefix(p, &prf);
23091 p += get_opcode(p, &opcode);
23092 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
23093 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
23094 index 8573b83..6372501 100644
23095 --- a/arch/x86/mm/pgtable.c
23096 +++ b/arch/x86/mm/pgtable.c
23097 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *pgd)
23098 list_del(&page->lru);
23099 }
23100
23101 -#define UNSHARED_PTRS_PER_PGD \
23102 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23103 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23104 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
23105
23106 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23107 +{
23108 + while (count--)
23109 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
23110 +}
23111 +#endif
23112 +
23113 +#ifdef CONFIG_PAX_PER_CPU_PGD
23114 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
23115 +{
23116 + while (count--)
23117 +
23118 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23119 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
23120 +#else
23121 + *dst++ = *src++;
23122 +#endif
23123 +
23124 +}
23125 +#endif
23126 +
23127 +#ifdef CONFIG_X86_64
23128 +#define pxd_t pud_t
23129 +#define pyd_t pgd_t
23130 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
23131 +#define pxd_free(mm, pud) pud_free((mm), (pud))
23132 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
23133 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
23134 +#define PYD_SIZE PGDIR_SIZE
23135 +#else
23136 +#define pxd_t pmd_t
23137 +#define pyd_t pud_t
23138 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
23139 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
23140 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
23141 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
23142 +#define PYD_SIZE PUD_SIZE
23143 +#endif
23144
23145 +#ifdef CONFIG_PAX_PER_CPU_PGD
23146 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
23147 +static inline void pgd_dtor(pgd_t *pgd) {}
23148 +#else
23149 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
23150 {
23151 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
23152 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
23153 pgd_list_del(pgd);
23154 spin_unlock(&pgd_lock);
23155 }
23156 +#endif
23157
23158 /*
23159 * List of all pgd's needed for non-PAE so it can invalidate entries
23160 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
23161 * -- wli
23162 */
23163
23164 -#ifdef CONFIG_X86_PAE
23165 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
23166 /*
23167 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
23168 * updating the top-level pagetable entries to guarantee the
23169 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
23170 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
23171 * and initialize the kernel pmds here.
23172 */
23173 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
23174 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
23175
23176 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23177 {
23178 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
23179 */
23180 flush_tlb_mm(mm);
23181 }
23182 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
23183 +#define PREALLOCATED_PXDS USER_PGD_PTRS
23184 #else /* !CONFIG_X86_PAE */
23185
23186 /* No need to prepopulate any pagetable entries in non-PAE modes. */
23187 -#define PREALLOCATED_PMDS 0
23188 +#define PREALLOCATED_PXDS 0
23189
23190 #endif /* CONFIG_X86_PAE */
23191
23192 -static void free_pmds(pmd_t *pmds[])
23193 +static void free_pxds(pxd_t *pxds[])
23194 {
23195 int i;
23196
23197 - for(i = 0; i < PREALLOCATED_PMDS; i++)
23198 - if (pmds[i])
23199 - free_page((unsigned long)pmds[i]);
23200 + for(i = 0; i < PREALLOCATED_PXDS; i++)
23201 + if (pxds[i])
23202 + free_page((unsigned long)pxds[i]);
23203 }
23204
23205 -static int preallocate_pmds(pmd_t *pmds[])
23206 +static int preallocate_pxds(pxd_t *pxds[])
23207 {
23208 int i;
23209 bool failed = false;
23210
23211 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
23212 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
23213 - if (pmd == NULL)
23214 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
23215 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
23216 + if (pxd == NULL)
23217 failed = true;
23218 - pmds[i] = pmd;
23219 + pxds[i] = pxd;
23220 }
23221
23222 if (failed) {
23223 - free_pmds(pmds);
23224 + free_pxds(pxds);
23225 return -ENOMEM;
23226 }
23227
23228 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[])
23229 * preallocate which never got a corresponding vma will need to be
23230 * freed manually.
23231 */
23232 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
23233 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
23234 {
23235 int i;
23236
23237 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
23238 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
23239 pgd_t pgd = pgdp[i];
23240
23241 if (pgd_val(pgd) != 0) {
23242 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
23243 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
23244
23245 - pgdp[i] = native_make_pgd(0);
23246 + set_pgd(pgdp + i, native_make_pgd(0));
23247
23248 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
23249 - pmd_free(mm, pmd);
23250 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
23251 + pxd_free(mm, pxd);
23252 }
23253 }
23254 }
23255
23256 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
23257 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
23258 {
23259 - pud_t *pud;
23260 + pyd_t *pyd;
23261 unsigned long addr;
23262 int i;
23263
23264 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
23265 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
23266 return;
23267
23268 - pud = pud_offset(pgd, 0);
23269 +#ifdef CONFIG_X86_64
23270 + pyd = pyd_offset(mm, 0L);
23271 +#else
23272 + pyd = pyd_offset(pgd, 0L);
23273 +#endif
23274
23275 - for (addr = i = 0; i < PREALLOCATED_PMDS;
23276 - i++, pud++, addr += PUD_SIZE) {
23277 - pmd_t *pmd = pmds[i];
23278 + for (addr = i = 0; i < PREALLOCATED_PXDS;
23279 + i++, pyd++, addr += PYD_SIZE) {
23280 + pxd_t *pxd = pxds[i];
23281
23282 if (i >= KERNEL_PGD_BOUNDARY)
23283 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23284 - sizeof(pmd_t) * PTRS_PER_PMD);
23285 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
23286 + sizeof(pxd_t) * PTRS_PER_PMD);
23287
23288 - pud_populate(mm, pud, pmd);
23289 + pyd_populate(mm, pyd, pxd);
23290 }
23291 }
23292
23293 pgd_t *pgd_alloc(struct mm_struct *mm)
23294 {
23295 pgd_t *pgd;
23296 - pmd_t *pmds[PREALLOCATED_PMDS];
23297 + pxd_t *pxds[PREALLOCATED_PXDS];
23298
23299 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
23300
23301 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23302
23303 mm->pgd = pgd;
23304
23305 - if (preallocate_pmds(pmds) != 0)
23306 + if (preallocate_pxds(pxds) != 0)
23307 goto out_free_pgd;
23308
23309 if (paravirt_pgd_alloc(mm) != 0)
23310 - goto out_free_pmds;
23311 + goto out_free_pxds;
23312
23313 /*
23314 * Make sure that pre-populating the pmds is atomic with
23315 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
23316 spin_lock(&pgd_lock);
23317
23318 pgd_ctor(mm, pgd);
23319 - pgd_prepopulate_pmd(mm, pgd, pmds);
23320 + pgd_prepopulate_pxd(mm, pgd, pxds);
23321
23322 spin_unlock(&pgd_lock);
23323
23324 return pgd;
23325
23326 -out_free_pmds:
23327 - free_pmds(pmds);
23328 +out_free_pxds:
23329 + free_pxds(pxds);
23330 out_free_pgd:
23331 free_page((unsigned long)pgd);
23332 out:
23333 @@ -295,7 +344,7 @@ out:
23334
23335 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23336 {
23337 - pgd_mop_up_pmds(mm, pgd);
23338 + pgd_mop_up_pxds(mm, pgd);
23339 pgd_dtor(pgd);
23340 paravirt_pgd_free(mm, pgd);
23341 free_page((unsigned long)pgd);
23342 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
23343 index cac7184..09a39fa 100644
23344 --- a/arch/x86/mm/pgtable_32.c
23345 +++ b/arch/x86/mm/pgtable_32.c
23346 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
23347 return;
23348 }
23349 pte = pte_offset_kernel(pmd, vaddr);
23350 +
23351 + pax_open_kernel();
23352 if (pte_val(pteval))
23353 set_pte_at(&init_mm, vaddr, pte, pteval);
23354 else
23355 pte_clear(&init_mm, vaddr, pte);
23356 + pax_close_kernel();
23357
23358 /*
23359 * It's enough to flush this one mapping.
23360 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
23361 index 410531d..0f16030 100644
23362 --- a/arch/x86/mm/setup_nx.c
23363 +++ b/arch/x86/mm/setup_nx.c
23364 @@ -5,8 +5,10 @@
23365 #include <asm/pgtable.h>
23366 #include <asm/proto.h>
23367
23368 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23369 static int disable_nx __cpuinitdata;
23370
23371 +#ifndef CONFIG_PAX_PAGEEXEC
23372 /*
23373 * noexec = on|off
23374 *
23375 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
23376 return 0;
23377 }
23378 early_param("noexec", noexec_setup);
23379 +#endif
23380 +
23381 +#endif
23382
23383 void __cpuinit x86_configure_nx(void)
23384 {
23385 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23386 if (cpu_has_nx && !disable_nx)
23387 __supported_pte_mask |= _PAGE_NX;
23388 else
23389 +#endif
23390 __supported_pte_mask &= ~_PAGE_NX;
23391 }
23392
23393 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
23394 index d6c0418..06a0ad5 100644
23395 --- a/arch/x86/mm/tlb.c
23396 +++ b/arch/x86/mm/tlb.c
23397 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
23398 BUG();
23399 cpumask_clear_cpu(cpu,
23400 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
23401 +
23402 +#ifndef CONFIG_PAX_PER_CPU_PGD
23403 load_cr3(swapper_pg_dir);
23404 +#endif
23405 +
23406 }
23407 EXPORT_SYMBOL_GPL(leave_mm);
23408
23409 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
23410 index 6687022..ceabcfa 100644
23411 --- a/arch/x86/net/bpf_jit.S
23412 +++ b/arch/x86/net/bpf_jit.S
23413 @@ -9,6 +9,7 @@
23414 */
23415 #include <linux/linkage.h>
23416 #include <asm/dwarf2.h>
23417 +#include <asm/alternative-asm.h>
23418
23419 /*
23420 * Calling convention :
23421 @@ -35,6 +36,7 @@ sk_load_word:
23422 jle bpf_slow_path_word
23423 mov (SKBDATA,%rsi),%eax
23424 bswap %eax /* ntohl() */
23425 + pax_force_retaddr
23426 ret
23427
23428
23429 @@ -53,6 +55,7 @@ sk_load_half:
23430 jle bpf_slow_path_half
23431 movzwl (SKBDATA,%rsi),%eax
23432 rol $8,%ax # ntohs()
23433 + pax_force_retaddr
23434 ret
23435
23436 sk_load_byte_ind:
23437 @@ -66,6 +69,7 @@ sk_load_byte:
23438 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
23439 jle bpf_slow_path_byte
23440 movzbl (SKBDATA,%rsi),%eax
23441 + pax_force_retaddr
23442 ret
23443
23444 /**
23445 @@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
23446 movzbl (SKBDATA,%rsi),%ebx
23447 and $15,%bl
23448 shl $2,%bl
23449 + pax_force_retaddr
23450 ret
23451 CFI_ENDPROC
23452 ENDPROC(sk_load_byte_msh)
23453 @@ -91,6 +96,7 @@ bpf_error:
23454 xor %eax,%eax
23455 mov -8(%rbp),%rbx
23456 leaveq
23457 + pax_force_retaddr
23458 ret
23459
23460 /* rsi contains offset and can be scratched */
23461 @@ -113,6 +119,7 @@ bpf_slow_path_word:
23462 js bpf_error
23463 mov -12(%rbp),%eax
23464 bswap %eax
23465 + pax_force_retaddr
23466 ret
23467
23468 bpf_slow_path_half:
23469 @@ -121,12 +128,14 @@ bpf_slow_path_half:
23470 mov -12(%rbp),%ax
23471 rol $8,%ax
23472 movzwl %ax,%eax
23473 + pax_force_retaddr
23474 ret
23475
23476 bpf_slow_path_byte:
23477 bpf_slow_path_common(1)
23478 js bpf_error
23479 movzbl -12(%rbp),%eax
23480 + pax_force_retaddr
23481 ret
23482
23483 bpf_slow_path_byte_msh:
23484 @@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
23485 and $15,%al
23486 shl $2,%al
23487 xchg %eax,%ebx
23488 + pax_force_retaddr
23489 ret
23490 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
23491 index bfab3fa..05aac3a 100644
23492 --- a/arch/x86/net/bpf_jit_comp.c
23493 +++ b/arch/x86/net/bpf_jit_comp.c
23494 @@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
23495 set_fs(old_fs);
23496 }
23497
23498 +struct bpf_jit_work {
23499 + struct work_struct work;
23500 + void *image;
23501 +};
23502
23503 void bpf_jit_compile(struct sk_filter *fp)
23504 {
23505 @@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
23506 if (addrs == NULL)
23507 return;
23508
23509 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
23510 + if (!fp->work)
23511 + goto out;
23512 +
23513 /* Before first pass, make a rough estimation of addrs[]
23514 * each bpf instruction is translated to less than 64 bytes
23515 */
23516 @@ -585,11 +593,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23517 if (image) {
23518 if (unlikely(proglen + ilen > oldproglen)) {
23519 pr_err("bpb_jit_compile fatal error\n");
23520 - kfree(addrs);
23521 - module_free(NULL, image);
23522 - return;
23523 + module_free_exec(NULL, image);
23524 + goto out;
23525 }
23526 + pax_open_kernel();
23527 memcpy(image + proglen, temp, ilen);
23528 + pax_close_kernel();
23529 }
23530 proglen += ilen;
23531 addrs[i] = proglen;
23532 @@ -609,7 +618,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23533 break;
23534 }
23535 if (proglen == oldproglen) {
23536 - image = module_alloc(max_t(unsigned int,
23537 + image = module_alloc_exec(max_t(unsigned int,
23538 proglen,
23539 sizeof(struct work_struct)));
23540 if (!image)
23541 @@ -631,24 +640,27 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
23542 fp->bpf_func = (void *)image;
23543 }
23544 out:
23545 + kfree(fp->work);
23546 kfree(addrs);
23547 return;
23548 }
23549
23550 static void jit_free_defer(struct work_struct *arg)
23551 {
23552 - module_free(NULL, arg);
23553 + module_free_exec(NULL, ((struct bpf_jit_work*)arg)->image);
23554 + kfree(arg);
23555 }
23556
23557 /* run from softirq, we must use a work_struct to call
23558 - * module_free() from process context
23559 + * module_free_exec() from process context
23560 */
23561 void bpf_jit_free(struct sk_filter *fp)
23562 {
23563 if (fp->bpf_func != sk_run_filter) {
23564 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
23565 + struct work_struct *work = &fp->work->work;
23566
23567 INIT_WORK(work, jit_free_defer);
23568 + fp->work->image = fp->bpf_func;
23569 schedule_work(work);
23570 }
23571 }
23572 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
23573 index bff89df..377758a 100644
23574 --- a/arch/x86/oprofile/backtrace.c
23575 +++ b/arch/x86/oprofile/backtrace.c
23576 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
23577 struct stack_frame_ia32 *fp;
23578 unsigned long bytes;
23579
23580 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23581 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23582 if (bytes != sizeof(bufhead))
23583 return NULL;
23584
23585 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
23586 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
23587
23588 oprofile_add_trace(bufhead[0].return_address);
23589
23590 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
23591 struct stack_frame bufhead[2];
23592 unsigned long bytes;
23593
23594 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
23595 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
23596 if (bytes != sizeof(bufhead))
23597 return NULL;
23598
23599 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
23600 {
23601 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
23602
23603 - if (!user_mode_vm(regs)) {
23604 + if (!user_mode(regs)) {
23605 unsigned long stack = kernel_stack_pointer(regs);
23606 if (depth)
23607 dump_trace(NULL, regs, (unsigned long *)stack, 0,
23608 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
23609 index cb29191..036766d 100644
23610 --- a/arch/x86/pci/mrst.c
23611 +++ b/arch/x86/pci/mrst.c
23612 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
23613 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
23614 pci_mmcfg_late_init();
23615 pcibios_enable_irq = mrst_pci_irq_enable;
23616 - pci_root_ops = pci_mrst_ops;
23617 + pax_open_kernel();
23618 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
23619 + pax_close_kernel();
23620 /* Continue with standard init */
23621 return 1;
23622 }
23623 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
23624 index f685535..2b76a81 100644
23625 --- a/arch/x86/pci/pcbios.c
23626 +++ b/arch/x86/pci/pcbios.c
23627 @@ -79,50 +79,93 @@ union bios32 {
23628 static struct {
23629 unsigned long address;
23630 unsigned short segment;
23631 -} bios32_indirect = { 0, __KERNEL_CS };
23632 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
23633
23634 /*
23635 * Returns the entry point for the given service, NULL on error
23636 */
23637
23638 -static unsigned long bios32_service(unsigned long service)
23639 +static unsigned long __devinit bios32_service(unsigned long service)
23640 {
23641 unsigned char return_code; /* %al */
23642 unsigned long address; /* %ebx */
23643 unsigned long length; /* %ecx */
23644 unsigned long entry; /* %edx */
23645 unsigned long flags;
23646 + struct desc_struct d, *gdt;
23647
23648 local_irq_save(flags);
23649 - __asm__("lcall *(%%edi); cld"
23650 +
23651 + gdt = get_cpu_gdt_table(smp_processor_id());
23652 +
23653 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23654 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23655 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23656 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23657 +
23658 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23659 : "=a" (return_code),
23660 "=b" (address),
23661 "=c" (length),
23662 "=d" (entry)
23663 : "0" (service),
23664 "1" (0),
23665 - "D" (&bios32_indirect));
23666 + "D" (&bios32_indirect),
23667 + "r"(__PCIBIOS_DS)
23668 + : "memory");
23669 +
23670 + pax_open_kernel();
23671 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23672 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23673 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23674 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23675 + pax_close_kernel();
23676 +
23677 local_irq_restore(flags);
23678
23679 switch (return_code) {
23680 - case 0:
23681 - return address + entry;
23682 - case 0x80: /* Not present */
23683 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23684 - return 0;
23685 - default: /* Shouldn't happen */
23686 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23687 - service, return_code);
23688 + case 0: {
23689 + int cpu;
23690 + unsigned char flags;
23691 +
23692 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23693 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23694 + printk(KERN_WARNING "bios32_service: not valid\n");
23695 return 0;
23696 + }
23697 + address = address + PAGE_OFFSET;
23698 + length += 16UL; /* some BIOSs underreport this... */
23699 + flags = 4;
23700 + if (length >= 64*1024*1024) {
23701 + length >>= PAGE_SHIFT;
23702 + flags |= 8;
23703 + }
23704 +
23705 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
23706 + gdt = get_cpu_gdt_table(cpu);
23707 + pack_descriptor(&d, address, length, 0x9b, flags);
23708 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23709 + pack_descriptor(&d, address, length, 0x93, flags);
23710 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23711 + }
23712 + return entry;
23713 + }
23714 + case 0x80: /* Not present */
23715 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23716 + return 0;
23717 + default: /* Shouldn't happen */
23718 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23719 + service, return_code);
23720 + return 0;
23721 }
23722 }
23723
23724 static struct {
23725 unsigned long address;
23726 unsigned short segment;
23727 -} pci_indirect = { 0, __KERNEL_CS };
23728 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23729
23730 -static int pci_bios_present;
23731 +static int pci_bios_present __read_only;
23732
23733 static int __devinit check_pcibios(void)
23734 {
23735 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
23736 unsigned long flags, pcibios_entry;
23737
23738 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23739 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23740 + pci_indirect.address = pcibios_entry;
23741
23742 local_irq_save(flags);
23743 - __asm__(
23744 - "lcall *(%%edi); cld\n\t"
23745 + __asm__("movw %w6, %%ds\n\t"
23746 + "lcall *%%ss:(%%edi); cld\n\t"
23747 + "push %%ss\n\t"
23748 + "pop %%ds\n\t"
23749 "jc 1f\n\t"
23750 "xor %%ah, %%ah\n"
23751 "1:"
23752 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
23753 "=b" (ebx),
23754 "=c" (ecx)
23755 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23756 - "D" (&pci_indirect)
23757 + "D" (&pci_indirect),
23758 + "r" (__PCIBIOS_DS)
23759 : "memory");
23760 local_irq_restore(flags);
23761
23762 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23763
23764 switch (len) {
23765 case 1:
23766 - __asm__("lcall *(%%esi); cld\n\t"
23767 + __asm__("movw %w6, %%ds\n\t"
23768 + "lcall *%%ss:(%%esi); cld\n\t"
23769 + "push %%ss\n\t"
23770 + "pop %%ds\n\t"
23771 "jc 1f\n\t"
23772 "xor %%ah, %%ah\n"
23773 "1:"
23774 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23775 : "1" (PCIBIOS_READ_CONFIG_BYTE),
23776 "b" (bx),
23777 "D" ((long)reg),
23778 - "S" (&pci_indirect));
23779 + "S" (&pci_indirect),
23780 + "r" (__PCIBIOS_DS));
23781 /*
23782 * Zero-extend the result beyond 8 bits, do not trust the
23783 * BIOS having done it:
23784 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23785 *value &= 0xff;
23786 break;
23787 case 2:
23788 - __asm__("lcall *(%%esi); cld\n\t"
23789 + __asm__("movw %w6, %%ds\n\t"
23790 + "lcall *%%ss:(%%esi); cld\n\t"
23791 + "push %%ss\n\t"
23792 + "pop %%ds\n\t"
23793 "jc 1f\n\t"
23794 "xor %%ah, %%ah\n"
23795 "1:"
23796 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23797 : "1" (PCIBIOS_READ_CONFIG_WORD),
23798 "b" (bx),
23799 "D" ((long)reg),
23800 - "S" (&pci_indirect));
23801 + "S" (&pci_indirect),
23802 + "r" (__PCIBIOS_DS));
23803 /*
23804 * Zero-extend the result beyond 16 bits, do not trust the
23805 * BIOS having done it:
23806 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23807 *value &= 0xffff;
23808 break;
23809 case 4:
23810 - __asm__("lcall *(%%esi); cld\n\t"
23811 + __asm__("movw %w6, %%ds\n\t"
23812 + "lcall *%%ss:(%%esi); cld\n\t"
23813 + "push %%ss\n\t"
23814 + "pop %%ds\n\t"
23815 "jc 1f\n\t"
23816 "xor %%ah, %%ah\n"
23817 "1:"
23818 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
23819 : "1" (PCIBIOS_READ_CONFIG_DWORD),
23820 "b" (bx),
23821 "D" ((long)reg),
23822 - "S" (&pci_indirect));
23823 + "S" (&pci_indirect),
23824 + "r" (__PCIBIOS_DS));
23825 break;
23826 }
23827
23828 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
23829
23830 switch (len) {
23831 case 1:
23832 - __asm__("lcall *(%%esi); cld\n\t"
23833 + __asm__("movw %w6, %%ds\n\t"
23834 + "lcall *%%ss:(%%esi); cld\n\t"
23835 + "push %%ss\n\t"
23836 + "pop %%ds\n\t"
23837 "jc 1f\n\t"
23838 "xor %%ah, %%ah\n"
23839 "1:"
23840 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
23841 "c" (value),
23842 "b" (bx),
23843 "D" ((long)reg),
23844 - "S" (&pci_indirect));
23845 + "S" (&pci_indirect),
23846 + "r" (__PCIBIOS_DS));
23847 break;
23848 case 2:
23849 - __asm__("lcall *(%%esi); cld\n\t"
23850 + __asm__("movw %w6, %%ds\n\t"
23851 + "lcall *%%ss:(%%esi); cld\n\t"
23852 + "push %%ss\n\t"
23853 + "pop %%ds\n\t"
23854 "jc 1f\n\t"
23855 "xor %%ah, %%ah\n"
23856 "1:"
23857 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
23858 "c" (value),
23859 "b" (bx),
23860 "D" ((long)reg),
23861 - "S" (&pci_indirect));
23862 + "S" (&pci_indirect),
23863 + "r" (__PCIBIOS_DS));
23864 break;
23865 case 4:
23866 - __asm__("lcall *(%%esi); cld\n\t"
23867 + __asm__("movw %w6, %%ds\n\t"
23868 + "lcall *%%ss:(%%esi); cld\n\t"
23869 + "push %%ss\n\t"
23870 + "pop %%ds\n\t"
23871 "jc 1f\n\t"
23872 "xor %%ah, %%ah\n"
23873 "1:"
23874 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
23875 "c" (value),
23876 "b" (bx),
23877 "D" ((long)reg),
23878 - "S" (&pci_indirect));
23879 + "S" (&pci_indirect),
23880 + "r" (__PCIBIOS_DS));
23881 break;
23882 }
23883
23884 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
23885
23886 DBG("PCI: Fetching IRQ routing table... ");
23887 __asm__("push %%es\n\t"
23888 + "movw %w8, %%ds\n\t"
23889 "push %%ds\n\t"
23890 "pop %%es\n\t"
23891 - "lcall *(%%esi); cld\n\t"
23892 + "lcall *%%ss:(%%esi); cld\n\t"
23893 "pop %%es\n\t"
23894 + "push %%ss\n\t"
23895 + "pop %%ds\n"
23896 "jc 1f\n\t"
23897 "xor %%ah, %%ah\n"
23898 "1:"
23899 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
23900 "1" (0),
23901 "D" ((long) &opt),
23902 "S" (&pci_indirect),
23903 - "m" (opt)
23904 + "m" (opt),
23905 + "r" (__PCIBIOS_DS)
23906 : "memory");
23907 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
23908 if (ret & 0xff00)
23909 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
23910 {
23911 int ret;
23912
23913 - __asm__("lcall *(%%esi); cld\n\t"
23914 + __asm__("movw %w5, %%ds\n\t"
23915 + "lcall *%%ss:(%%esi); cld\n\t"
23916 + "push %%ss\n\t"
23917 + "pop %%ds\n"
23918 "jc 1f\n\t"
23919 "xor %%ah, %%ah\n"
23920 "1:"
23921 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
23922 : "0" (PCIBIOS_SET_PCI_HW_INT),
23923 "b" ((dev->bus->number << 8) | dev->devfn),
23924 "c" ((irq << 8) | (pin + 10)),
23925 - "S" (&pci_indirect));
23926 + "S" (&pci_indirect),
23927 + "r" (__PCIBIOS_DS));
23928 return !(ret & 0xff00);
23929 }
23930 EXPORT_SYMBOL(pcibios_set_irq_routing);
23931 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
23932 index 5cab48e..b025f9b 100644
23933 --- a/arch/x86/platform/efi/efi_32.c
23934 +++ b/arch/x86/platform/efi/efi_32.c
23935 @@ -38,70 +38,56 @@
23936 */
23937
23938 static unsigned long efi_rt_eflags;
23939 -static pgd_t efi_bak_pg_dir_pointer[2];
23940 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
23941
23942 -void efi_call_phys_prelog(void)
23943 +void __init efi_call_phys_prelog(void)
23944 {
23945 - unsigned long cr4;
23946 - unsigned long temp;
23947 struct desc_ptr gdt_descr;
23948
23949 +#ifdef CONFIG_PAX_KERNEXEC
23950 + struct desc_struct d;
23951 +#endif
23952 +
23953 local_irq_save(efi_rt_eflags);
23954
23955 - /*
23956 - * If I don't have PAE, I should just duplicate two entries in page
23957 - * directory. If I have PAE, I just need to duplicate one entry in
23958 - * page directory.
23959 - */
23960 - cr4 = read_cr4_safe();
23961 -
23962 - if (cr4 & X86_CR4_PAE) {
23963 - efi_bak_pg_dir_pointer[0].pgd =
23964 - swapper_pg_dir[pgd_index(0)].pgd;
23965 - swapper_pg_dir[0].pgd =
23966 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
23967 - } else {
23968 - efi_bak_pg_dir_pointer[0].pgd =
23969 - swapper_pg_dir[pgd_index(0)].pgd;
23970 - efi_bak_pg_dir_pointer[1].pgd =
23971 - swapper_pg_dir[pgd_index(0x400000)].pgd;
23972 - swapper_pg_dir[pgd_index(0)].pgd =
23973 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
23974 - temp = PAGE_OFFSET + 0x400000;
23975 - swapper_pg_dir[pgd_index(0x400000)].pgd =
23976 - swapper_pg_dir[pgd_index(temp)].pgd;
23977 - }
23978 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
23979 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
23980 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
23981
23982 /*
23983 * After the lock is released, the original page table is restored.
23984 */
23985 __flush_tlb_all();
23986
23987 +#ifdef CONFIG_PAX_KERNEXEC
23988 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
23989 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
23990 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
23991 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
23992 +#endif
23993 +
23994 gdt_descr.address = __pa(get_cpu_gdt_table(0));
23995 gdt_descr.size = GDT_SIZE - 1;
23996 load_gdt(&gdt_descr);
23997 }
23998
23999 -void efi_call_phys_epilog(void)
24000 +void __init efi_call_phys_epilog(void)
24001 {
24002 - unsigned long cr4;
24003 struct desc_ptr gdt_descr;
24004
24005 +#ifdef CONFIG_PAX_KERNEXEC
24006 + struct desc_struct d;
24007 +
24008 + memset(&d, 0, sizeof d);
24009 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
24010 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
24011 +#endif
24012 +
24013 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
24014 gdt_descr.size = GDT_SIZE - 1;
24015 load_gdt(&gdt_descr);
24016
24017 - cr4 = read_cr4_safe();
24018 -
24019 - if (cr4 & X86_CR4_PAE) {
24020 - swapper_pg_dir[pgd_index(0)].pgd =
24021 - efi_bak_pg_dir_pointer[0].pgd;
24022 - } else {
24023 - swapper_pg_dir[pgd_index(0)].pgd =
24024 - efi_bak_pg_dir_pointer[0].pgd;
24025 - swapper_pg_dir[pgd_index(0x400000)].pgd =
24026 - efi_bak_pg_dir_pointer[1].pgd;
24027 - }
24028 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
24029
24030 /*
24031 * After the lock is released, the original page table is restored.
24032 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
24033 index fbe66e6..c5c0dd2 100644
24034 --- a/arch/x86/platform/efi/efi_stub_32.S
24035 +++ b/arch/x86/platform/efi/efi_stub_32.S
24036 @@ -6,7 +6,9 @@
24037 */
24038
24039 #include <linux/linkage.h>
24040 +#include <linux/init.h>
24041 #include <asm/page_types.h>
24042 +#include <asm/segment.h>
24043
24044 /*
24045 * efi_call_phys(void *, ...) is a function with variable parameters.
24046 @@ -20,7 +22,7 @@
24047 * service functions will comply with gcc calling convention, too.
24048 */
24049
24050 -.text
24051 +__INIT
24052 ENTRY(efi_call_phys)
24053 /*
24054 * 0. The function can only be called in Linux kernel. So CS has been
24055 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
24056 * The mapping of lower virtual memory has been created in prelog and
24057 * epilog.
24058 */
24059 - movl $1f, %edx
24060 - subl $__PAGE_OFFSET, %edx
24061 - jmp *%edx
24062 + movl $(__KERNEXEC_EFI_DS), %edx
24063 + mov %edx, %ds
24064 + mov %edx, %es
24065 + mov %edx, %ss
24066 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
24067 1:
24068
24069 /*
24070 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
24071 * parameter 2, ..., param n. To make things easy, we save the return
24072 * address of efi_call_phys in a global variable.
24073 */
24074 - popl %edx
24075 - movl %edx, saved_return_addr
24076 - /* get the function pointer into ECX*/
24077 - popl %ecx
24078 - movl %ecx, efi_rt_function_ptr
24079 - movl $2f, %edx
24080 - subl $__PAGE_OFFSET, %edx
24081 - pushl %edx
24082 + popl (saved_return_addr)
24083 + popl (efi_rt_function_ptr)
24084
24085 /*
24086 * 3. Clear PG bit in %CR0.
24087 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
24088 /*
24089 * 5. Call the physical function.
24090 */
24091 - jmp *%ecx
24092 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
24093
24094 -2:
24095 /*
24096 * 6. After EFI runtime service returns, control will return to
24097 * following instruction. We'd better readjust stack pointer first.
24098 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
24099 movl %cr0, %edx
24100 orl $0x80000000, %edx
24101 movl %edx, %cr0
24102 - jmp 1f
24103 -1:
24104 +
24105 /*
24106 * 8. Now restore the virtual mode from flat mode by
24107 * adding EIP with PAGE_OFFSET.
24108 */
24109 - movl $1f, %edx
24110 - jmp *%edx
24111 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
24112 1:
24113 + movl $(__KERNEL_DS), %edx
24114 + mov %edx, %ds
24115 + mov %edx, %es
24116 + mov %edx, %ss
24117
24118 /*
24119 * 9. Balance the stack. And because EAX contain the return value,
24120 * we'd better not clobber it.
24121 */
24122 - leal efi_rt_function_ptr, %edx
24123 - movl (%edx), %ecx
24124 - pushl %ecx
24125 + pushl (efi_rt_function_ptr)
24126
24127 /*
24128 - * 10. Push the saved return address onto the stack and return.
24129 + * 10. Return to the saved return address.
24130 */
24131 - leal saved_return_addr, %edx
24132 - movl (%edx), %ecx
24133 - pushl %ecx
24134 - ret
24135 + jmpl *(saved_return_addr)
24136 ENDPROC(efi_call_phys)
24137 .previous
24138
24139 -.data
24140 +__INITDATA
24141 saved_return_addr:
24142 .long 0
24143 efi_rt_function_ptr:
24144 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
24145 index 4c07cca..2c8427d 100644
24146 --- a/arch/x86/platform/efi/efi_stub_64.S
24147 +++ b/arch/x86/platform/efi/efi_stub_64.S
24148 @@ -7,6 +7,7 @@
24149 */
24150
24151 #include <linux/linkage.h>
24152 +#include <asm/alternative-asm.h>
24153
24154 #define SAVE_XMM \
24155 mov %rsp, %rax; \
24156 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
24157 call *%rdi
24158 addq $32, %rsp
24159 RESTORE_XMM
24160 + pax_force_retaddr 0, 1
24161 ret
24162 ENDPROC(efi_call0)
24163
24164 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
24165 call *%rdi
24166 addq $32, %rsp
24167 RESTORE_XMM
24168 + pax_force_retaddr 0, 1
24169 ret
24170 ENDPROC(efi_call1)
24171
24172 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
24173 call *%rdi
24174 addq $32, %rsp
24175 RESTORE_XMM
24176 + pax_force_retaddr 0, 1
24177 ret
24178 ENDPROC(efi_call2)
24179
24180 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
24181 call *%rdi
24182 addq $32, %rsp
24183 RESTORE_XMM
24184 + pax_force_retaddr 0, 1
24185 ret
24186 ENDPROC(efi_call3)
24187
24188 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
24189 call *%rdi
24190 addq $32, %rsp
24191 RESTORE_XMM
24192 + pax_force_retaddr 0, 1
24193 ret
24194 ENDPROC(efi_call4)
24195
24196 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
24197 call *%rdi
24198 addq $48, %rsp
24199 RESTORE_XMM
24200 + pax_force_retaddr 0, 1
24201 ret
24202 ENDPROC(efi_call5)
24203
24204 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
24205 call *%rdi
24206 addq $48, %rsp
24207 RESTORE_XMM
24208 + pax_force_retaddr 0, 1
24209 ret
24210 ENDPROC(efi_call6)
24211 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
24212 index fe73276..70fe25a 100644
24213 --- a/arch/x86/platform/mrst/mrst.c
24214 +++ b/arch/x86/platform/mrst/mrst.c
24215 @@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
24216 }
24217
24218 /* Reboot and power off are handled by the SCU on a MID device */
24219 -static void mrst_power_off(void)
24220 +static __noreturn void mrst_power_off(void)
24221 {
24222 intel_scu_ipc_simple_command(0xf1, 1);
24223 + BUG();
24224 }
24225
24226 -static void mrst_reboot(void)
24227 +static __noreturn void mrst_reboot(void)
24228 {
24229 intel_scu_ipc_simple_command(0xf1, 0);
24230 + BUG();
24231 }
24232
24233 /*
24234 diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
24235 index 5b55219..b326540 100644
24236 --- a/arch/x86/platform/uv/tlb_uv.c
24237 +++ b/arch/x86/platform/uv/tlb_uv.c
24238 @@ -377,6 +377,8 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
24239 struct bau_control *smaster = bcp->socket_master;
24240 struct reset_args reset_args;
24241
24242 + pax_track_stack();
24243 +
24244 reset_args.sender = sender;
24245 cpus_clear(*mask);
24246 /* find a single cpu for each uvhub in this distribution mask */
24247 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
24248 index 87bb35e..eff2da8 100644
24249 --- a/arch/x86/power/cpu.c
24250 +++ b/arch/x86/power/cpu.c
24251 @@ -130,7 +130,7 @@ static void do_fpu_end(void)
24252 static void fix_processor_context(void)
24253 {
24254 int cpu = smp_processor_id();
24255 - struct tss_struct *t = &per_cpu(init_tss, cpu);
24256 + struct tss_struct *t = init_tss + cpu;
24257
24258 set_tss_desc(cpu, t); /*
24259 * This just modifies memory; should not be
24260 @@ -140,7 +140,9 @@ static void fix_processor_context(void)
24261 */
24262
24263 #ifdef CONFIG_X86_64
24264 + pax_open_kernel();
24265 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
24266 + pax_close_kernel();
24267
24268 syscall_init(); /* This sets MSR_*STAR and related */
24269 #endif
24270 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
24271 index 5d17950..2253fc9 100644
24272 --- a/arch/x86/vdso/Makefile
24273 +++ b/arch/x86/vdso/Makefile
24274 @@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
24275 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
24276 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
24277
24278 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24279 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
24280 GCOV_PROFILE := n
24281
24282 #
24283 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
24284 index 468d591..8e80a0a 100644
24285 --- a/arch/x86/vdso/vdso32-setup.c
24286 +++ b/arch/x86/vdso/vdso32-setup.c
24287 @@ -25,6 +25,7 @@
24288 #include <asm/tlbflush.h>
24289 #include <asm/vdso.h>
24290 #include <asm/proto.h>
24291 +#include <asm/mman.h>
24292
24293 enum {
24294 VDSO_DISABLED = 0,
24295 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
24296 void enable_sep_cpu(void)
24297 {
24298 int cpu = get_cpu();
24299 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
24300 + struct tss_struct *tss = init_tss + cpu;
24301
24302 if (!boot_cpu_has(X86_FEATURE_SEP)) {
24303 put_cpu();
24304 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
24305 gate_vma.vm_start = FIXADDR_USER_START;
24306 gate_vma.vm_end = FIXADDR_USER_END;
24307 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
24308 - gate_vma.vm_page_prot = __P101;
24309 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
24310 /*
24311 * Make sure the vDSO gets into every core dump.
24312 * Dumping its contents makes post-mortem fully interpretable later
24313 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24314 if (compat)
24315 addr = VDSO_HIGH_BASE;
24316 else {
24317 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
24318 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
24319 if (IS_ERR_VALUE(addr)) {
24320 ret = addr;
24321 goto up_fail;
24322 }
24323 }
24324
24325 - current->mm->context.vdso = (void *)addr;
24326 + current->mm->context.vdso = addr;
24327
24328 if (compat_uses_vma || !compat) {
24329 /*
24330 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24331 }
24332
24333 current_thread_info()->sysenter_return =
24334 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24335 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
24336
24337 up_fail:
24338 if (ret)
24339 - current->mm->context.vdso = NULL;
24340 + current->mm->context.vdso = 0;
24341
24342 up_write(&mm->mmap_sem);
24343
24344 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
24345
24346 const char *arch_vma_name(struct vm_area_struct *vma)
24347 {
24348 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24349 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24350 return "[vdso]";
24351 +
24352 +#ifdef CONFIG_PAX_SEGMEXEC
24353 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
24354 + return "[vdso]";
24355 +#endif
24356 +
24357 return NULL;
24358 }
24359
24360 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24361 * Check to see if the corresponding task was created in compat vdso
24362 * mode.
24363 */
24364 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
24365 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
24366 return &gate_vma;
24367 return NULL;
24368 }
24369 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
24370 index 316fbca..4638633 100644
24371 --- a/arch/x86/vdso/vma.c
24372 +++ b/arch/x86/vdso/vma.c
24373 @@ -16,8 +16,6 @@
24374 #include <asm/vdso.h>
24375 #include <asm/page.h>
24376
24377 -unsigned int __read_mostly vdso_enabled = 1;
24378 -
24379 extern char vdso_start[], vdso_end[];
24380 extern unsigned short vdso_sync_cpuid;
24381
24382 @@ -97,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
24383 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24384 {
24385 struct mm_struct *mm = current->mm;
24386 - unsigned long addr;
24387 + unsigned long addr = 0;
24388 int ret;
24389
24390 - if (!vdso_enabled)
24391 - return 0;
24392 -
24393 down_write(&mm->mmap_sem);
24394 +
24395 +#ifdef CONFIG_PAX_RANDMMAP
24396 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
24397 +#endif
24398 +
24399 addr = vdso_addr(mm->start_stack, vdso_size);
24400 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
24401 if (IS_ERR_VALUE(addr)) {
24402 @@ -111,26 +111,18 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
24403 goto up_fail;
24404 }
24405
24406 - current->mm->context.vdso = (void *)addr;
24407 + mm->context.vdso = addr;
24408
24409 ret = install_special_mapping(mm, addr, vdso_size,
24410 VM_READ|VM_EXEC|
24411 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
24412 VM_ALWAYSDUMP,
24413 vdso_pages);
24414 - if (ret) {
24415 - current->mm->context.vdso = NULL;
24416 - goto up_fail;
24417 - }
24418 +
24419 + if (ret)
24420 + mm->context.vdso = 0;
24421
24422 up_fail:
24423 up_write(&mm->mmap_sem);
24424 return ret;
24425 }
24426 -
24427 -static __init int vdso_setup(char *s)
24428 -{
24429 - vdso_enabled = simple_strtoul(s, NULL, 0);
24430 - return 0;
24431 -}
24432 -__setup("vdso=", vdso_setup);
24433 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
24434 index 46c8069..6330d3c 100644
24435 --- a/arch/x86/xen/enlighten.c
24436 +++ b/arch/x86/xen/enlighten.c
24437 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
24438
24439 struct shared_info xen_dummy_shared_info;
24440
24441 -void *xen_initial_gdt;
24442 -
24443 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
24444 __read_mostly int xen_have_vector_callback;
24445 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
24446 @@ -1028,7 +1026,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
24447 #endif
24448 };
24449
24450 -static void xen_reboot(int reason)
24451 +static __noreturn void xen_reboot(int reason)
24452 {
24453 struct sched_shutdown r = { .reason = reason };
24454
24455 @@ -1036,17 +1034,17 @@ static void xen_reboot(int reason)
24456 BUG();
24457 }
24458
24459 -static void xen_restart(char *msg)
24460 +static __noreturn void xen_restart(char *msg)
24461 {
24462 xen_reboot(SHUTDOWN_reboot);
24463 }
24464
24465 -static void xen_emergency_restart(void)
24466 +static __noreturn void xen_emergency_restart(void)
24467 {
24468 xen_reboot(SHUTDOWN_reboot);
24469 }
24470
24471 -static void xen_machine_halt(void)
24472 +static __noreturn void xen_machine_halt(void)
24473 {
24474 xen_reboot(SHUTDOWN_poweroff);
24475 }
24476 @@ -1152,7 +1150,17 @@ asmlinkage void __init xen_start_kernel(void)
24477 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
24478
24479 /* Work out if we support NX */
24480 - x86_configure_nx();
24481 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
24482 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
24483 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
24484 + unsigned l, h;
24485 +
24486 + __supported_pte_mask |= _PAGE_NX;
24487 + rdmsr(MSR_EFER, l, h);
24488 + l |= EFER_NX;
24489 + wrmsr(MSR_EFER, l, h);
24490 + }
24491 +#endif
24492
24493 xen_setup_features();
24494
24495 @@ -1183,13 +1191,6 @@ asmlinkage void __init xen_start_kernel(void)
24496
24497 machine_ops = xen_machine_ops;
24498
24499 - /*
24500 - * The only reliable way to retain the initial address of the
24501 - * percpu gdt_page is to remember it here, so we can go and
24502 - * mark it RW later, when the initial percpu area is freed.
24503 - */
24504 - xen_initial_gdt = &per_cpu(gdt_page, 0);
24505 -
24506 xen_smp_init();
24507
24508 #ifdef CONFIG_ACPI_NUMA
24509 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
24510 index 3dd53f9..9e8ba48 100644
24511 --- a/arch/x86/xen/mmu.c
24512 +++ b/arch/x86/xen/mmu.c
24513 @@ -1768,6 +1768,8 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24514 convert_pfn_mfn(init_level4_pgt);
24515 convert_pfn_mfn(level3_ident_pgt);
24516 convert_pfn_mfn(level3_kernel_pgt);
24517 + convert_pfn_mfn(level3_vmalloc_pgt);
24518 + convert_pfn_mfn(level3_vmemmap_pgt);
24519
24520 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
24521 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
24522 @@ -1786,7 +1788,10 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
24523 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
24524 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
24525 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
24526 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
24527 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
24528 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
24529 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
24530 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
24531 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
24532
24533 @@ -2000,6 +2005,7 @@ static void __init xen_post_allocator_init(void)
24534 pv_mmu_ops.set_pud = xen_set_pud;
24535 #if PAGETABLE_LEVELS == 4
24536 pv_mmu_ops.set_pgd = xen_set_pgd;
24537 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
24538 #endif
24539
24540 /* This will work as long as patching hasn't happened yet
24541 @@ -2081,6 +2087,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
24542 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
24543 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
24544 .set_pgd = xen_set_pgd_hyper,
24545 + .set_pgd_batched = xen_set_pgd_hyper,
24546
24547 .alloc_pud = xen_alloc_pmd_init,
24548 .release_pud = xen_release_pmd_init,
24549 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
24550 index 041d4fe..7666b7e 100644
24551 --- a/arch/x86/xen/smp.c
24552 +++ b/arch/x86/xen/smp.c
24553 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
24554 {
24555 BUG_ON(smp_processor_id() != 0);
24556 native_smp_prepare_boot_cpu();
24557 -
24558 - /* We've switched to the "real" per-cpu gdt, so make sure the
24559 - old memory can be recycled */
24560 - make_lowmem_page_readwrite(xen_initial_gdt);
24561 -
24562 xen_filter_cpu_maps();
24563 xen_setup_vcpu_info_placement();
24564 }
24565 @@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
24566 gdt = get_cpu_gdt_table(cpu);
24567
24568 ctxt->flags = VGCF_IN_KERNEL;
24569 - ctxt->user_regs.ds = __USER_DS;
24570 - ctxt->user_regs.es = __USER_DS;
24571 + ctxt->user_regs.ds = __KERNEL_DS;
24572 + ctxt->user_regs.es = __KERNEL_DS;
24573 ctxt->user_regs.ss = __KERNEL_DS;
24574 #ifdef CONFIG_X86_32
24575 ctxt->user_regs.fs = __KERNEL_PERCPU;
24576 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
24577 + savesegment(gs, ctxt->user_regs.gs);
24578 #else
24579 ctxt->gs_base_kernel = per_cpu_offset(cpu);
24580 #endif
24581 @@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
24582 int rc;
24583
24584 per_cpu(current_task, cpu) = idle;
24585 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
24586 #ifdef CONFIG_X86_32
24587 irq_ctx_init(cpu);
24588 #else
24589 clear_tsk_thread_flag(idle, TIF_FORK);
24590 - per_cpu(kernel_stack, cpu) =
24591 - (unsigned long)task_stack_page(idle) -
24592 - KERNEL_STACK_OFFSET + THREAD_SIZE;
24593 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
24594 #endif
24595 xen_setup_runstate_info(cpu);
24596 xen_setup_timer(cpu);
24597 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
24598 index b040b0e..8cc4fe0 100644
24599 --- a/arch/x86/xen/xen-asm_32.S
24600 +++ b/arch/x86/xen/xen-asm_32.S
24601 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
24602 ESP_OFFSET=4 # bytes pushed onto stack
24603
24604 /*
24605 - * Store vcpu_info pointer for easy access. Do it this way to
24606 - * avoid having to reload %fs
24607 + * Store vcpu_info pointer for easy access.
24608 */
24609 #ifdef CONFIG_SMP
24610 - GET_THREAD_INFO(%eax)
24611 - movl TI_cpu(%eax), %eax
24612 - movl __per_cpu_offset(,%eax,4), %eax
24613 - mov xen_vcpu(%eax), %eax
24614 + push %fs
24615 + mov $(__KERNEL_PERCPU), %eax
24616 + mov %eax, %fs
24617 + mov PER_CPU_VAR(xen_vcpu), %eax
24618 + pop %fs
24619 #else
24620 movl xen_vcpu, %eax
24621 #endif
24622 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
24623 index aaa7291..3f77960 100644
24624 --- a/arch/x86/xen/xen-head.S
24625 +++ b/arch/x86/xen/xen-head.S
24626 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
24627 #ifdef CONFIG_X86_32
24628 mov %esi,xen_start_info
24629 mov $init_thread_union+THREAD_SIZE,%esp
24630 +#ifdef CONFIG_SMP
24631 + movl $cpu_gdt_table,%edi
24632 + movl $__per_cpu_load,%eax
24633 + movw %ax,__KERNEL_PERCPU + 2(%edi)
24634 + rorl $16,%eax
24635 + movb %al,__KERNEL_PERCPU + 4(%edi)
24636 + movb %ah,__KERNEL_PERCPU + 7(%edi)
24637 + movl $__per_cpu_end - 1,%eax
24638 + subl $__per_cpu_start,%eax
24639 + movw %ax,__KERNEL_PERCPU + 0(%edi)
24640 +#endif
24641 #else
24642 mov %rsi,xen_start_info
24643 mov $init_thread_union+THREAD_SIZE,%rsp
24644 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
24645 index b095739..8c17bcd 100644
24646 --- a/arch/x86/xen/xen-ops.h
24647 +++ b/arch/x86/xen/xen-ops.h
24648 @@ -10,8 +10,6 @@
24649 extern const char xen_hypervisor_callback[];
24650 extern const char xen_failsafe_callback[];
24651
24652 -extern void *xen_initial_gdt;
24653 -
24654 struct trap_info;
24655 void xen_copy_trap_info(struct trap_info *traps);
24656
24657 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
24658 index 58916af..9cb880b 100644
24659 --- a/block/blk-iopoll.c
24660 +++ b/block/blk-iopoll.c
24661 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
24662 }
24663 EXPORT_SYMBOL(blk_iopoll_complete);
24664
24665 -static void blk_iopoll_softirq(struct softirq_action *h)
24666 +static void blk_iopoll_softirq(void)
24667 {
24668 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
24669 int rearm = 0, budget = blk_iopoll_budget;
24670 diff --git a/block/blk-map.c b/block/blk-map.c
24671 index 164cd00..6d96fc1 100644
24672 --- a/block/blk-map.c
24673 +++ b/block/blk-map.c
24674 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
24675 if (!len || !kbuf)
24676 return -EINVAL;
24677
24678 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
24679 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
24680 if (do_copy)
24681 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
24682 else
24683 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
24684 index 1366a89..e17f54b 100644
24685 --- a/block/blk-softirq.c
24686 +++ b/block/blk-softirq.c
24687 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
24688 * Softirq action handler - move entries to local list and loop over them
24689 * while passing them to the queue registered handler.
24690 */
24691 -static void blk_done_softirq(struct softirq_action *h)
24692 +static void blk_done_softirq(void)
24693 {
24694 struct list_head *cpu_list, local_list;
24695
24696 diff --git a/block/bsg.c b/block/bsg.c
24697 index 702f131..37808bf 100644
24698 --- a/block/bsg.c
24699 +++ b/block/bsg.c
24700 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
24701 struct sg_io_v4 *hdr, struct bsg_device *bd,
24702 fmode_t has_write_perm)
24703 {
24704 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24705 + unsigned char *cmdptr;
24706 +
24707 if (hdr->request_len > BLK_MAX_CDB) {
24708 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
24709 if (!rq->cmd)
24710 return -ENOMEM;
24711 - }
24712 + cmdptr = rq->cmd;
24713 + } else
24714 + cmdptr = tmpcmd;
24715
24716 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
24717 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
24718 hdr->request_len))
24719 return -EFAULT;
24720
24721 + if (cmdptr != rq->cmd)
24722 + memcpy(rq->cmd, cmdptr, hdr->request_len);
24723 +
24724 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
24725 if (blk_verify_command(rq->cmd, has_write_perm))
24726 return -EPERM;
24727 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
24728 index 7b72502..646105c 100644
24729 --- a/block/compat_ioctl.c
24730 +++ b/block/compat_ioctl.c
24731 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
24732 err |= __get_user(f->spec1, &uf->spec1);
24733 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
24734 err |= __get_user(name, &uf->name);
24735 - f->name = compat_ptr(name);
24736 + f->name = (void __force_kernel *)compat_ptr(name);
24737 if (err) {
24738 err = -EFAULT;
24739 goto out;
24740 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
24741 index 4f4230b..0feae9a 100644
24742 --- a/block/scsi_ioctl.c
24743 +++ b/block/scsi_ioctl.c
24744 @@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
24745 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
24746 struct sg_io_hdr *hdr, fmode_t mode)
24747 {
24748 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
24749 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24750 + unsigned char *cmdptr;
24751 +
24752 + if (rq->cmd != rq->__cmd)
24753 + cmdptr = rq->cmd;
24754 + else
24755 + cmdptr = tmpcmd;
24756 +
24757 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
24758 return -EFAULT;
24759 +
24760 + if (cmdptr != rq->cmd)
24761 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
24762 +
24763 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
24764 return -EPERM;
24765
24766 @@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24767 int err;
24768 unsigned int in_len, out_len, bytes, opcode, cmdlen;
24769 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
24770 + unsigned char tmpcmd[sizeof(rq->__cmd)];
24771 + unsigned char *cmdptr;
24772
24773 if (!sic)
24774 return -EINVAL;
24775 @@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
24776 */
24777 err = -EFAULT;
24778 rq->cmd_len = cmdlen;
24779 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
24780 +
24781 + if (rq->cmd != rq->__cmd)
24782 + cmdptr = rq->cmd;
24783 + else
24784 + cmdptr = tmpcmd;
24785 +
24786 + if (copy_from_user(cmdptr, sic->data, cmdlen))
24787 goto error;
24788
24789 + if (rq->cmd != cmdptr)
24790 + memcpy(rq->cmd, cmdptr, cmdlen);
24791 +
24792 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
24793 goto error;
24794
24795 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
24796 index 671d4d6..5f24030 100644
24797 --- a/crypto/cryptd.c
24798 +++ b/crypto/cryptd.c
24799 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
24800
24801 struct cryptd_blkcipher_request_ctx {
24802 crypto_completion_t complete;
24803 -};
24804 +} __no_const;
24805
24806 struct cryptd_hash_ctx {
24807 struct crypto_shash *child;
24808 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
24809
24810 struct cryptd_aead_request_ctx {
24811 crypto_completion_t complete;
24812 -};
24813 +} __no_const;
24814
24815 static void cryptd_queue_worker(struct work_struct *work);
24816
24817 diff --git a/crypto/serpent.c b/crypto/serpent.c
24818 index b651a55..a9ddd79b 100644
24819 --- a/crypto/serpent.c
24820 +++ b/crypto/serpent.c
24821 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
24822 u32 r0,r1,r2,r3,r4;
24823 int i;
24824
24825 + pax_track_stack();
24826 +
24827 /* Copy key, add padding */
24828
24829 for (i = 0; i < keylen; ++i)
24830 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
24831 index 5d41894..22021e4 100644
24832 --- a/drivers/acpi/apei/cper.c
24833 +++ b/drivers/acpi/apei/cper.c
24834 @@ -38,12 +38,12 @@
24835 */
24836 u64 cper_next_record_id(void)
24837 {
24838 - static atomic64_t seq;
24839 + static atomic64_unchecked_t seq;
24840
24841 - if (!atomic64_read(&seq))
24842 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
24843 + if (!atomic64_read_unchecked(&seq))
24844 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
24845
24846 - return atomic64_inc_return(&seq);
24847 + return atomic64_inc_return_unchecked(&seq);
24848 }
24849 EXPORT_SYMBOL_GPL(cper_next_record_id);
24850
24851 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
24852 index 22f918b..9fafb84 100644
24853 --- a/drivers/acpi/ec_sys.c
24854 +++ b/drivers/acpi/ec_sys.c
24855 @@ -11,6 +11,7 @@
24856 #include <linux/kernel.h>
24857 #include <linux/acpi.h>
24858 #include <linux/debugfs.h>
24859 +#include <asm/uaccess.h>
24860 #include "internal.h"
24861
24862 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
24863 @@ -39,7 +40,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
24864 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
24865 */
24866 unsigned int size = EC_SPACE_SIZE;
24867 - u8 *data = (u8 *) buf;
24868 + u8 data;
24869 loff_t init_off = *off;
24870 int err = 0;
24871
24872 @@ -52,9 +53,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
24873 size = count;
24874
24875 while (size) {
24876 - err = ec_read(*off, &data[*off - init_off]);
24877 + err = ec_read(*off, &data);
24878 if (err)
24879 return err;
24880 + if (put_user(data, &buf[*off - init_off]))
24881 + return -EFAULT;
24882 *off += 1;
24883 size--;
24884 }
24885 @@ -70,7 +73,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
24886
24887 unsigned int size = count;
24888 loff_t init_off = *off;
24889 - u8 *data = (u8 *) buf;
24890 int err = 0;
24891
24892 if (*off >= EC_SPACE_SIZE)
24893 @@ -81,7 +83,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
24894 }
24895
24896 while (size) {
24897 - u8 byte_write = data[*off - init_off];
24898 + u8 byte_write;
24899 + if (get_user(byte_write, &buf[*off - init_off]))
24900 + return -EFAULT;
24901 err = ec_write(*off, byte_write);
24902 if (err)
24903 return err;
24904 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
24905 index f5f9869..da87aeb 100644
24906 --- a/drivers/acpi/proc.c
24907 +++ b/drivers/acpi/proc.c
24908 @@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct file *file,
24909 size_t count, loff_t * ppos)
24910 {
24911 struct list_head *node, *next;
24912 - char strbuf[5];
24913 - char str[5] = "";
24914 - unsigned int len = count;
24915 + char strbuf[5] = {0};
24916
24917 - if (len > 4)
24918 - len = 4;
24919 - if (len < 0)
24920 + if (count > 4)
24921 + count = 4;
24922 + if (copy_from_user(strbuf, buffer, count))
24923 return -EFAULT;
24924 -
24925 - if (copy_from_user(strbuf, buffer, len))
24926 - return -EFAULT;
24927 - strbuf[len] = '\0';
24928 - sscanf(strbuf, "%s", str);
24929 + strbuf[count] = '\0';
24930
24931 mutex_lock(&acpi_device_lock);
24932 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
24933 @@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct file *file,
24934 if (!dev->wakeup.flags.valid)
24935 continue;
24936
24937 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
24938 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
24939 if (device_can_wakeup(&dev->dev)) {
24940 bool enable = !device_may_wakeup(&dev->dev);
24941 device_set_wakeup_enable(&dev->dev, enable);
24942 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
24943 index a4e0f1b..9793b28 100644
24944 --- a/drivers/acpi/processor_driver.c
24945 +++ b/drivers/acpi/processor_driver.c
24946 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
24947 return 0;
24948 #endif
24949
24950 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
24951 + BUG_ON(pr->id >= nr_cpu_ids);
24952
24953 /*
24954 * Buggy BIOS check
24955 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
24956 index 4a3a5ae..cbee192 100644
24957 --- a/drivers/ata/libata-core.c
24958 +++ b/drivers/ata/libata-core.c
24959 @@ -4733,7 +4733,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
24960 struct ata_port *ap;
24961 unsigned int tag;
24962
24963 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24964 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24965 ap = qc->ap;
24966
24967 qc->flags = 0;
24968 @@ -4749,7 +4749,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
24969 struct ata_port *ap;
24970 struct ata_link *link;
24971
24972 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24973 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24974 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
24975 ap = qc->ap;
24976 link = qc->dev->link;
24977 @@ -5754,6 +5754,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
24978 return;
24979
24980 spin_lock(&lock);
24981 + pax_open_kernel();
24982
24983 for (cur = ops->inherits; cur; cur = cur->inherits) {
24984 void **inherit = (void **)cur;
24985 @@ -5767,8 +5768,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
24986 if (IS_ERR(*pp))
24987 *pp = NULL;
24988
24989 - ops->inherits = NULL;
24990 + *(struct ata_port_operations **)&ops->inherits = NULL;
24991
24992 + pax_close_kernel();
24993 spin_unlock(&lock);
24994 }
24995
24996 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
24997 index ed16fbe..fc92cb8 100644
24998 --- a/drivers/ata/libata-eh.c
24999 +++ b/drivers/ata/libata-eh.c
25000 @@ -2515,6 +2515,8 @@ void ata_eh_report(struct ata_port *ap)
25001 {
25002 struct ata_link *link;
25003
25004 + pax_track_stack();
25005 +
25006 ata_for_each_link(link, ap, HOST_FIRST)
25007 ata_eh_link_report(link);
25008 }
25009 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
25010 index 719bb73..79ce858 100644
25011 --- a/drivers/ata/pata_arasan_cf.c
25012 +++ b/drivers/ata/pata_arasan_cf.c
25013 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
25014 /* Handle platform specific quirks */
25015 if (pdata->quirk) {
25016 if (pdata->quirk & CF_BROKEN_PIO) {
25017 - ap->ops->set_piomode = NULL;
25018 + pax_open_kernel();
25019 + *(void **)&ap->ops->set_piomode = NULL;
25020 + pax_close_kernel();
25021 ap->pio_mask = 0;
25022 }
25023 if (pdata->quirk & CF_BROKEN_MWDMA)
25024 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
25025 index f9b983a..887b9d8 100644
25026 --- a/drivers/atm/adummy.c
25027 +++ b/drivers/atm/adummy.c
25028 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
25029 vcc->pop(vcc, skb);
25030 else
25031 dev_kfree_skb_any(skb);
25032 - atomic_inc(&vcc->stats->tx);
25033 + atomic_inc_unchecked(&vcc->stats->tx);
25034
25035 return 0;
25036 }
25037 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
25038 index f8f41e0..1f987dd 100644
25039 --- a/drivers/atm/ambassador.c
25040 +++ b/drivers/atm/ambassador.c
25041 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
25042 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25043
25044 // VC layer stats
25045 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25046 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25047
25048 // free the descriptor
25049 kfree (tx_descr);
25050 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25051 dump_skb ("<<<", vc, skb);
25052
25053 // VC layer stats
25054 - atomic_inc(&atm_vcc->stats->rx);
25055 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25056 __net_timestamp(skb);
25057 // end of our responsibility
25058 atm_vcc->push (atm_vcc, skb);
25059 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
25060 } else {
25061 PRINTK (KERN_INFO, "dropped over-size frame");
25062 // should we count this?
25063 - atomic_inc(&atm_vcc->stats->rx_drop);
25064 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25065 }
25066
25067 } else {
25068 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
25069 }
25070
25071 if (check_area (skb->data, skb->len)) {
25072 - atomic_inc(&atm_vcc->stats->tx_err);
25073 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25074 return -ENOMEM; // ?
25075 }
25076
25077 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
25078 index b22d71c..d6e1049 100644
25079 --- a/drivers/atm/atmtcp.c
25080 +++ b/drivers/atm/atmtcp.c
25081 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25082 if (vcc->pop) vcc->pop(vcc,skb);
25083 else dev_kfree_skb(skb);
25084 if (dev_data) return 0;
25085 - atomic_inc(&vcc->stats->tx_err);
25086 + atomic_inc_unchecked(&vcc->stats->tx_err);
25087 return -ENOLINK;
25088 }
25089 size = skb->len+sizeof(struct atmtcp_hdr);
25090 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25091 if (!new_skb) {
25092 if (vcc->pop) vcc->pop(vcc,skb);
25093 else dev_kfree_skb(skb);
25094 - atomic_inc(&vcc->stats->tx_err);
25095 + atomic_inc_unchecked(&vcc->stats->tx_err);
25096 return -ENOBUFS;
25097 }
25098 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25099 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
25100 if (vcc->pop) vcc->pop(vcc,skb);
25101 else dev_kfree_skb(skb);
25102 out_vcc->push(out_vcc,new_skb);
25103 - atomic_inc(&vcc->stats->tx);
25104 - atomic_inc(&out_vcc->stats->rx);
25105 + atomic_inc_unchecked(&vcc->stats->tx);
25106 + atomic_inc_unchecked(&out_vcc->stats->rx);
25107 return 0;
25108 }
25109
25110 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25111 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25112 read_unlock(&vcc_sklist_lock);
25113 if (!out_vcc) {
25114 - atomic_inc(&vcc->stats->tx_err);
25115 + atomic_inc_unchecked(&vcc->stats->tx_err);
25116 goto done;
25117 }
25118 skb_pull(skb,sizeof(struct atmtcp_hdr));
25119 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
25120 __net_timestamp(new_skb);
25121 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25122 out_vcc->push(out_vcc,new_skb);
25123 - atomic_inc(&vcc->stats->tx);
25124 - atomic_inc(&out_vcc->stats->rx);
25125 + atomic_inc_unchecked(&vcc->stats->tx);
25126 + atomic_inc_unchecked(&out_vcc->stats->rx);
25127 done:
25128 if (vcc->pop) vcc->pop(vcc,skb);
25129 else dev_kfree_skb(skb);
25130 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
25131 index 9307141..d8521bf 100644
25132 --- a/drivers/atm/eni.c
25133 +++ b/drivers/atm/eni.c
25134 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25135 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25136 vcc->dev->number);
25137 length = 0;
25138 - atomic_inc(&vcc->stats->rx_err);
25139 + atomic_inc_unchecked(&vcc->stats->rx_err);
25140 }
25141 else {
25142 length = ATM_CELL_SIZE-1; /* no HEC */
25143 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25144 size);
25145 }
25146 eff = length = 0;
25147 - atomic_inc(&vcc->stats->rx_err);
25148 + atomic_inc_unchecked(&vcc->stats->rx_err);
25149 }
25150 else {
25151 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25152 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25153 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25154 vcc->dev->number,vcc->vci,length,size << 2,descr);
25155 length = eff = 0;
25156 - atomic_inc(&vcc->stats->rx_err);
25157 + atomic_inc_unchecked(&vcc->stats->rx_err);
25158 }
25159 }
25160 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25161 @@ -771,7 +771,7 @@ rx_dequeued++;
25162 vcc->push(vcc,skb);
25163 pushed++;
25164 }
25165 - atomic_inc(&vcc->stats->rx);
25166 + atomic_inc_unchecked(&vcc->stats->rx);
25167 }
25168 wake_up(&eni_dev->rx_wait);
25169 }
25170 @@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *dev)
25171 PCI_DMA_TODEVICE);
25172 if (vcc->pop) vcc->pop(vcc,skb);
25173 else dev_kfree_skb_irq(skb);
25174 - atomic_inc(&vcc->stats->tx);
25175 + atomic_inc_unchecked(&vcc->stats->tx);
25176 wake_up(&eni_dev->tx_wait);
25177 dma_complete++;
25178 }
25179 @@ -1568,7 +1568,7 @@ tx_complete++;
25180 /*--------------------------------- entries ---------------------------------*/
25181
25182
25183 -static const char *media_name[] __devinitdata = {
25184 +static const char *media_name[] __devinitconst = {
25185 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
25186 "UTP", "05?", "06?", "07?", /* 4- 7 */
25187 "TAXI","09?", "10?", "11?", /* 8-11 */
25188 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
25189 index 5072f8a..fa52520 100644
25190 --- a/drivers/atm/firestream.c
25191 +++ b/drivers/atm/firestream.c
25192 @@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
25193 }
25194 }
25195
25196 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25197 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25198
25199 fs_dprintk (FS_DEBUG_TXMEM, "i");
25200 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25201 @@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25202 #endif
25203 skb_put (skb, qe->p1 & 0xffff);
25204 ATM_SKB(skb)->vcc = atm_vcc;
25205 - atomic_inc(&atm_vcc->stats->rx);
25206 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25207 __net_timestamp(skb);
25208 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25209 atm_vcc->push (atm_vcc, skb);
25210 @@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
25211 kfree (pe);
25212 }
25213 if (atm_vcc)
25214 - atomic_inc(&atm_vcc->stats->rx_drop);
25215 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25216 break;
25217 case 0x1f: /* Reassembly abort: no buffers. */
25218 /* Silently increment error counter. */
25219 if (atm_vcc)
25220 - atomic_inc(&atm_vcc->stats->rx_drop);
25221 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25222 break;
25223 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25224 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25225 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
25226 index 361f5ae..7fc552d 100644
25227 --- a/drivers/atm/fore200e.c
25228 +++ b/drivers/atm/fore200e.c
25229 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
25230 #endif
25231 /* check error condition */
25232 if (*entry->status & STATUS_ERROR)
25233 - atomic_inc(&vcc->stats->tx_err);
25234 + atomic_inc_unchecked(&vcc->stats->tx_err);
25235 else
25236 - atomic_inc(&vcc->stats->tx);
25237 + atomic_inc_unchecked(&vcc->stats->tx);
25238 }
25239 }
25240
25241 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25242 if (skb == NULL) {
25243 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25244
25245 - atomic_inc(&vcc->stats->rx_drop);
25246 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25247 return -ENOMEM;
25248 }
25249
25250 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
25251
25252 dev_kfree_skb_any(skb);
25253
25254 - atomic_inc(&vcc->stats->rx_drop);
25255 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25256 return -ENOMEM;
25257 }
25258
25259 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25260
25261 vcc->push(vcc, skb);
25262 - atomic_inc(&vcc->stats->rx);
25263 + atomic_inc_unchecked(&vcc->stats->rx);
25264
25265 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25266
25267 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
25268 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25269 fore200e->atm_dev->number,
25270 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25271 - atomic_inc(&vcc->stats->rx_err);
25272 + atomic_inc_unchecked(&vcc->stats->rx_err);
25273 }
25274 }
25275
25276 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
25277 goto retry_here;
25278 }
25279
25280 - atomic_inc(&vcc->stats->tx_err);
25281 + atomic_inc_unchecked(&vcc->stats->tx_err);
25282
25283 fore200e->tx_sat++;
25284 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25285 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
25286 index 9a51df4..f3bb5f8 100644
25287 --- a/drivers/atm/he.c
25288 +++ b/drivers/atm/he.c
25289 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25290
25291 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25292 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25293 - atomic_inc(&vcc->stats->rx_drop);
25294 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25295 goto return_host_buffers;
25296 }
25297
25298 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25299 RBRQ_LEN_ERR(he_dev->rbrq_head)
25300 ? "LEN_ERR" : "",
25301 vcc->vpi, vcc->vci);
25302 - atomic_inc(&vcc->stats->rx_err);
25303 + atomic_inc_unchecked(&vcc->stats->rx_err);
25304 goto return_host_buffers;
25305 }
25306
25307 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
25308 vcc->push(vcc, skb);
25309 spin_lock(&he_dev->global_lock);
25310
25311 - atomic_inc(&vcc->stats->rx);
25312 + atomic_inc_unchecked(&vcc->stats->rx);
25313
25314 return_host_buffers:
25315 ++pdus_assembled;
25316 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
25317 tpd->vcc->pop(tpd->vcc, tpd->skb);
25318 else
25319 dev_kfree_skb_any(tpd->skb);
25320 - atomic_inc(&tpd->vcc->stats->tx_err);
25321 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25322 }
25323 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25324 return;
25325 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25326 vcc->pop(vcc, skb);
25327 else
25328 dev_kfree_skb_any(skb);
25329 - atomic_inc(&vcc->stats->tx_err);
25330 + atomic_inc_unchecked(&vcc->stats->tx_err);
25331 return -EINVAL;
25332 }
25333
25334 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25335 vcc->pop(vcc, skb);
25336 else
25337 dev_kfree_skb_any(skb);
25338 - atomic_inc(&vcc->stats->tx_err);
25339 + atomic_inc_unchecked(&vcc->stats->tx_err);
25340 return -EINVAL;
25341 }
25342 #endif
25343 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25344 vcc->pop(vcc, skb);
25345 else
25346 dev_kfree_skb_any(skb);
25347 - atomic_inc(&vcc->stats->tx_err);
25348 + atomic_inc_unchecked(&vcc->stats->tx_err);
25349 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25350 return -ENOMEM;
25351 }
25352 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25353 vcc->pop(vcc, skb);
25354 else
25355 dev_kfree_skb_any(skb);
25356 - atomic_inc(&vcc->stats->tx_err);
25357 + atomic_inc_unchecked(&vcc->stats->tx_err);
25358 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25359 return -ENOMEM;
25360 }
25361 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
25362 __enqueue_tpd(he_dev, tpd, cid);
25363 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25364
25365 - atomic_inc(&vcc->stats->tx);
25366 + atomic_inc_unchecked(&vcc->stats->tx);
25367
25368 return 0;
25369 }
25370 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
25371 index b812103..e391a49 100644
25372 --- a/drivers/atm/horizon.c
25373 +++ b/drivers/atm/horizon.c
25374 @@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
25375 {
25376 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25377 // VC layer stats
25378 - atomic_inc(&vcc->stats->rx);
25379 + atomic_inc_unchecked(&vcc->stats->rx);
25380 __net_timestamp(skb);
25381 // end of our responsibility
25382 vcc->push (vcc, skb);
25383 @@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
25384 dev->tx_iovec = NULL;
25385
25386 // VC layer stats
25387 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25388 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25389
25390 // free the skb
25391 hrz_kfree_skb (skb);
25392 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
25393 index db06f34..dcebb61 100644
25394 --- a/drivers/atm/idt77252.c
25395 +++ b/drivers/atm/idt77252.c
25396 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
25397 else
25398 dev_kfree_skb(skb);
25399
25400 - atomic_inc(&vcc->stats->tx);
25401 + atomic_inc_unchecked(&vcc->stats->tx);
25402 }
25403
25404 atomic_dec(&scq->used);
25405 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25406 if ((sb = dev_alloc_skb(64)) == NULL) {
25407 printk("%s: Can't allocate buffers for aal0.\n",
25408 card->name);
25409 - atomic_add(i, &vcc->stats->rx_drop);
25410 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25411 break;
25412 }
25413 if (!atm_charge(vcc, sb->truesize)) {
25414 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25415 card->name);
25416 - atomic_add(i - 1, &vcc->stats->rx_drop);
25417 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25418 dev_kfree_skb(sb);
25419 break;
25420 }
25421 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25422 ATM_SKB(sb)->vcc = vcc;
25423 __net_timestamp(sb);
25424 vcc->push(vcc, sb);
25425 - atomic_inc(&vcc->stats->rx);
25426 + atomic_inc_unchecked(&vcc->stats->rx);
25427
25428 cell += ATM_CELL_PAYLOAD;
25429 }
25430 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25431 "(CDC: %08x)\n",
25432 card->name, len, rpp->len, readl(SAR_REG_CDC));
25433 recycle_rx_pool_skb(card, rpp);
25434 - atomic_inc(&vcc->stats->rx_err);
25435 + atomic_inc_unchecked(&vcc->stats->rx_err);
25436 return;
25437 }
25438 if (stat & SAR_RSQE_CRC) {
25439 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25440 recycle_rx_pool_skb(card, rpp);
25441 - atomic_inc(&vcc->stats->rx_err);
25442 + atomic_inc_unchecked(&vcc->stats->rx_err);
25443 return;
25444 }
25445 if (skb_queue_len(&rpp->queue) > 1) {
25446 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25447 RXPRINTK("%s: Can't alloc RX skb.\n",
25448 card->name);
25449 recycle_rx_pool_skb(card, rpp);
25450 - atomic_inc(&vcc->stats->rx_err);
25451 + atomic_inc_unchecked(&vcc->stats->rx_err);
25452 return;
25453 }
25454 if (!atm_charge(vcc, skb->truesize)) {
25455 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25456 __net_timestamp(skb);
25457
25458 vcc->push(vcc, skb);
25459 - atomic_inc(&vcc->stats->rx);
25460 + atomic_inc_unchecked(&vcc->stats->rx);
25461
25462 return;
25463 }
25464 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
25465 __net_timestamp(skb);
25466
25467 vcc->push(vcc, skb);
25468 - atomic_inc(&vcc->stats->rx);
25469 + atomic_inc_unchecked(&vcc->stats->rx);
25470
25471 if (skb->truesize > SAR_FB_SIZE_3)
25472 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25473 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
25474 if (vcc->qos.aal != ATM_AAL0) {
25475 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25476 card->name, vpi, vci);
25477 - atomic_inc(&vcc->stats->rx_drop);
25478 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25479 goto drop;
25480 }
25481
25482 if ((sb = dev_alloc_skb(64)) == NULL) {
25483 printk("%s: Can't allocate buffers for AAL0.\n",
25484 card->name);
25485 - atomic_inc(&vcc->stats->rx_err);
25486 + atomic_inc_unchecked(&vcc->stats->rx_err);
25487 goto drop;
25488 }
25489
25490 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
25491 ATM_SKB(sb)->vcc = vcc;
25492 __net_timestamp(sb);
25493 vcc->push(vcc, sb);
25494 - atomic_inc(&vcc->stats->rx);
25495 + atomic_inc_unchecked(&vcc->stats->rx);
25496
25497 drop:
25498 skb_pull(queue, 64);
25499 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25500
25501 if (vc == NULL) {
25502 printk("%s: NULL connection in send().\n", card->name);
25503 - atomic_inc(&vcc->stats->tx_err);
25504 + atomic_inc_unchecked(&vcc->stats->tx_err);
25505 dev_kfree_skb(skb);
25506 return -EINVAL;
25507 }
25508 if (!test_bit(VCF_TX, &vc->flags)) {
25509 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25510 - atomic_inc(&vcc->stats->tx_err);
25511 + atomic_inc_unchecked(&vcc->stats->tx_err);
25512 dev_kfree_skb(skb);
25513 return -EINVAL;
25514 }
25515 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25516 break;
25517 default:
25518 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25519 - atomic_inc(&vcc->stats->tx_err);
25520 + atomic_inc_unchecked(&vcc->stats->tx_err);
25521 dev_kfree_skb(skb);
25522 return -EINVAL;
25523 }
25524
25525 if (skb_shinfo(skb)->nr_frags != 0) {
25526 printk("%s: No scatter-gather yet.\n", card->name);
25527 - atomic_inc(&vcc->stats->tx_err);
25528 + atomic_inc_unchecked(&vcc->stats->tx_err);
25529 dev_kfree_skb(skb);
25530 return -EINVAL;
25531 }
25532 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
25533
25534 err = queue_skb(card, vc, skb, oam);
25535 if (err) {
25536 - atomic_inc(&vcc->stats->tx_err);
25537 + atomic_inc_unchecked(&vcc->stats->tx_err);
25538 dev_kfree_skb(skb);
25539 return err;
25540 }
25541 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
25542 skb = dev_alloc_skb(64);
25543 if (!skb) {
25544 printk("%s: Out of memory in send_oam().\n", card->name);
25545 - atomic_inc(&vcc->stats->tx_err);
25546 + atomic_inc_unchecked(&vcc->stats->tx_err);
25547 return -ENOMEM;
25548 }
25549 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25550 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
25551 index cb90f7a..bd33566 100644
25552 --- a/drivers/atm/iphase.c
25553 +++ b/drivers/atm/iphase.c
25554 @@ -1121,7 +1121,7 @@ static int rx_pkt(struct atm_dev *dev)
25555 status = (u_short) (buf_desc_ptr->desc_mode);
25556 if (status & (RX_CER | RX_PTE | RX_OFL))
25557 {
25558 - atomic_inc(&vcc->stats->rx_err);
25559 + atomic_inc_unchecked(&vcc->stats->rx_err);
25560 IF_ERR(printk("IA: bad packet, dropping it");)
25561 if (status & RX_CER) {
25562 IF_ERR(printk(" cause: packet CRC error\n");)
25563 @@ -1144,7 +1144,7 @@ static int rx_pkt(struct atm_dev *dev)
25564 len = dma_addr - buf_addr;
25565 if (len > iadev->rx_buf_sz) {
25566 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25567 - atomic_inc(&vcc->stats->rx_err);
25568 + atomic_inc_unchecked(&vcc->stats->rx_err);
25569 goto out_free_desc;
25570 }
25571
25572 @@ -1294,7 +1294,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25573 ia_vcc = INPH_IA_VCC(vcc);
25574 if (ia_vcc == NULL)
25575 {
25576 - atomic_inc(&vcc->stats->rx_err);
25577 + atomic_inc_unchecked(&vcc->stats->rx_err);
25578 dev_kfree_skb_any(skb);
25579 atm_return(vcc, atm_guess_pdu2truesize(len));
25580 goto INCR_DLE;
25581 @@ -1306,7 +1306,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25582 if ((length > iadev->rx_buf_sz) || (length >
25583 (skb->len - sizeof(struct cpcs_trailer))))
25584 {
25585 - atomic_inc(&vcc->stats->rx_err);
25586 + atomic_inc_unchecked(&vcc->stats->rx_err);
25587 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
25588 length, skb->len);)
25589 dev_kfree_skb_any(skb);
25590 @@ -1322,7 +1322,7 @@ static void rx_dle_intr(struct atm_dev *dev)
25591
25592 IF_RX(printk("rx_dle_intr: skb push");)
25593 vcc->push(vcc,skb);
25594 - atomic_inc(&vcc->stats->rx);
25595 + atomic_inc_unchecked(&vcc->stats->rx);
25596 iadev->rx_pkt_cnt++;
25597 }
25598 INCR_DLE:
25599 @@ -2802,15 +2802,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
25600 {
25601 struct k_sonet_stats *stats;
25602 stats = &PRIV(_ia_dev[board])->sonet_stats;
25603 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
25604 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
25605 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
25606 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
25607 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
25608 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
25609 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
25610 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
25611 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
25612 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
25613 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
25614 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
25615 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
25616 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
25617 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
25618 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
25619 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
25620 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
25621 }
25622 ia_cmds.status = 0;
25623 break;
25624 @@ -2915,7 +2915,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25625 if ((desc == 0) || (desc > iadev->num_tx_desc))
25626 {
25627 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
25628 - atomic_inc(&vcc->stats->tx);
25629 + atomic_inc_unchecked(&vcc->stats->tx);
25630 if (vcc->pop)
25631 vcc->pop(vcc, skb);
25632 else
25633 @@ -3020,14 +3020,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
25634 ATM_DESC(skb) = vcc->vci;
25635 skb_queue_tail(&iadev->tx_dma_q, skb);
25636
25637 - atomic_inc(&vcc->stats->tx);
25638 + atomic_inc_unchecked(&vcc->stats->tx);
25639 iadev->tx_pkt_cnt++;
25640 /* Increment transaction counter */
25641 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
25642
25643 #if 0
25644 /* add flow control logic */
25645 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
25646 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
25647 if (iavcc->vc_desc_cnt > 10) {
25648 vcc->tx_quota = vcc->tx_quota * 3 / 4;
25649 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
25650 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
25651 index e828c54..ae83976 100644
25652 --- a/drivers/atm/lanai.c
25653 +++ b/drivers/atm/lanai.c
25654 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
25655 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
25656 lanai_endtx(lanai, lvcc);
25657 lanai_free_skb(lvcc->tx.atmvcc, skb);
25658 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
25659 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
25660 }
25661
25662 /* Try to fill the buffer - don't call unless there is backlog */
25663 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
25664 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
25665 __net_timestamp(skb);
25666 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
25667 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
25668 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
25669 out:
25670 lvcc->rx.buf.ptr = end;
25671 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
25672 @@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25673 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
25674 "vcc %d\n", lanai->number, (unsigned int) s, vci);
25675 lanai->stats.service_rxnotaal5++;
25676 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25677 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25678 return 0;
25679 }
25680 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
25681 @@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25682 int bytes;
25683 read_unlock(&vcc_sklist_lock);
25684 DPRINTK("got trashed rx pdu on vci %d\n", vci);
25685 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25686 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25687 lvcc->stats.x.aal5.service_trash++;
25688 bytes = (SERVICE_GET_END(s) * 16) -
25689 (((unsigned long) lvcc->rx.buf.ptr) -
25690 @@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25691 }
25692 if (s & SERVICE_STREAM) {
25693 read_unlock(&vcc_sklist_lock);
25694 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25695 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25696 lvcc->stats.x.aal5.service_stream++;
25697 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
25698 "PDU on VCI %d!\n", lanai->number, vci);
25699 @@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
25700 return 0;
25701 }
25702 DPRINTK("got rx crc error on vci %d\n", vci);
25703 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25704 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25705 lvcc->stats.x.aal5.service_rxcrc++;
25706 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
25707 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
25708 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
25709 index 1c70c45..300718d 100644
25710 --- a/drivers/atm/nicstar.c
25711 +++ b/drivers/atm/nicstar.c
25712 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25713 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
25714 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
25715 card->index);
25716 - atomic_inc(&vcc->stats->tx_err);
25717 + atomic_inc_unchecked(&vcc->stats->tx_err);
25718 dev_kfree_skb_any(skb);
25719 return -EINVAL;
25720 }
25721 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25722 if (!vc->tx) {
25723 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
25724 card->index);
25725 - atomic_inc(&vcc->stats->tx_err);
25726 + atomic_inc_unchecked(&vcc->stats->tx_err);
25727 dev_kfree_skb_any(skb);
25728 return -EINVAL;
25729 }
25730 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25731 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
25732 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
25733 card->index);
25734 - atomic_inc(&vcc->stats->tx_err);
25735 + atomic_inc_unchecked(&vcc->stats->tx_err);
25736 dev_kfree_skb_any(skb);
25737 return -EINVAL;
25738 }
25739
25740 if (skb_shinfo(skb)->nr_frags != 0) {
25741 printk("nicstar%d: No scatter-gather yet.\n", card->index);
25742 - atomic_inc(&vcc->stats->tx_err);
25743 + atomic_inc_unchecked(&vcc->stats->tx_err);
25744 dev_kfree_skb_any(skb);
25745 return -EINVAL;
25746 }
25747 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
25748 }
25749
25750 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
25751 - atomic_inc(&vcc->stats->tx_err);
25752 + atomic_inc_unchecked(&vcc->stats->tx_err);
25753 dev_kfree_skb_any(skb);
25754 return -EIO;
25755 }
25756 - atomic_inc(&vcc->stats->tx);
25757 + atomic_inc_unchecked(&vcc->stats->tx);
25758
25759 return 0;
25760 }
25761 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25762 printk
25763 ("nicstar%d: Can't allocate buffers for aal0.\n",
25764 card->index);
25765 - atomic_add(i, &vcc->stats->rx_drop);
25766 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25767 break;
25768 }
25769 if (!atm_charge(vcc, sb->truesize)) {
25770 RXPRINTK
25771 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
25772 card->index);
25773 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
25774 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
25775 dev_kfree_skb_any(sb);
25776 break;
25777 }
25778 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25779 ATM_SKB(sb)->vcc = vcc;
25780 __net_timestamp(sb);
25781 vcc->push(vcc, sb);
25782 - atomic_inc(&vcc->stats->rx);
25783 + atomic_inc_unchecked(&vcc->stats->rx);
25784 cell += ATM_CELL_PAYLOAD;
25785 }
25786
25787 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25788 if (iovb == NULL) {
25789 printk("nicstar%d: Out of iovec buffers.\n",
25790 card->index);
25791 - atomic_inc(&vcc->stats->rx_drop);
25792 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25793 recycle_rx_buf(card, skb);
25794 return;
25795 }
25796 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25797 small or large buffer itself. */
25798 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
25799 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
25800 - atomic_inc(&vcc->stats->rx_err);
25801 + atomic_inc_unchecked(&vcc->stats->rx_err);
25802 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25803 NS_MAX_IOVECS);
25804 NS_PRV_IOVCNT(iovb) = 0;
25805 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25806 ("nicstar%d: Expected a small buffer, and this is not one.\n",
25807 card->index);
25808 which_list(card, skb);
25809 - atomic_inc(&vcc->stats->rx_err);
25810 + atomic_inc_unchecked(&vcc->stats->rx_err);
25811 recycle_rx_buf(card, skb);
25812 vc->rx_iov = NULL;
25813 recycle_iov_buf(card, iovb);
25814 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25815 ("nicstar%d: Expected a large buffer, and this is not one.\n",
25816 card->index);
25817 which_list(card, skb);
25818 - atomic_inc(&vcc->stats->rx_err);
25819 + atomic_inc_unchecked(&vcc->stats->rx_err);
25820 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25821 NS_PRV_IOVCNT(iovb));
25822 vc->rx_iov = NULL;
25823 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25824 printk(" - PDU size mismatch.\n");
25825 else
25826 printk(".\n");
25827 - atomic_inc(&vcc->stats->rx_err);
25828 + atomic_inc_unchecked(&vcc->stats->rx_err);
25829 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
25830 NS_PRV_IOVCNT(iovb));
25831 vc->rx_iov = NULL;
25832 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25833 /* skb points to a small buffer */
25834 if (!atm_charge(vcc, skb->truesize)) {
25835 push_rxbufs(card, skb);
25836 - atomic_inc(&vcc->stats->rx_drop);
25837 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25838 } else {
25839 skb_put(skb, len);
25840 dequeue_sm_buf(card, skb);
25841 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25842 ATM_SKB(skb)->vcc = vcc;
25843 __net_timestamp(skb);
25844 vcc->push(vcc, skb);
25845 - atomic_inc(&vcc->stats->rx);
25846 + atomic_inc_unchecked(&vcc->stats->rx);
25847 }
25848 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
25849 struct sk_buff *sb;
25850 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25851 if (len <= NS_SMBUFSIZE) {
25852 if (!atm_charge(vcc, sb->truesize)) {
25853 push_rxbufs(card, sb);
25854 - atomic_inc(&vcc->stats->rx_drop);
25855 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25856 } else {
25857 skb_put(sb, len);
25858 dequeue_sm_buf(card, sb);
25859 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25860 ATM_SKB(sb)->vcc = vcc;
25861 __net_timestamp(sb);
25862 vcc->push(vcc, sb);
25863 - atomic_inc(&vcc->stats->rx);
25864 + atomic_inc_unchecked(&vcc->stats->rx);
25865 }
25866
25867 push_rxbufs(card, skb);
25868 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25869
25870 if (!atm_charge(vcc, skb->truesize)) {
25871 push_rxbufs(card, skb);
25872 - atomic_inc(&vcc->stats->rx_drop);
25873 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25874 } else {
25875 dequeue_lg_buf(card, skb);
25876 #ifdef NS_USE_DESTRUCTORS
25877 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25878 ATM_SKB(skb)->vcc = vcc;
25879 __net_timestamp(skb);
25880 vcc->push(vcc, skb);
25881 - atomic_inc(&vcc->stats->rx);
25882 + atomic_inc_unchecked(&vcc->stats->rx);
25883 }
25884
25885 push_rxbufs(card, sb);
25886 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25887 printk
25888 ("nicstar%d: Out of huge buffers.\n",
25889 card->index);
25890 - atomic_inc(&vcc->stats->rx_drop);
25891 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25892 recycle_iovec_rx_bufs(card,
25893 (struct iovec *)
25894 iovb->data,
25895 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25896 card->hbpool.count++;
25897 } else
25898 dev_kfree_skb_any(hb);
25899 - atomic_inc(&vcc->stats->rx_drop);
25900 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25901 } else {
25902 /* Copy the small buffer to the huge buffer */
25903 sb = (struct sk_buff *)iov->iov_base;
25904 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
25905 #endif /* NS_USE_DESTRUCTORS */
25906 __net_timestamp(hb);
25907 vcc->push(vcc, hb);
25908 - atomic_inc(&vcc->stats->rx);
25909 + atomic_inc_unchecked(&vcc->stats->rx);
25910 }
25911 }
25912
25913 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
25914 index 5d1d076..4f31f42 100644
25915 --- a/drivers/atm/solos-pci.c
25916 +++ b/drivers/atm/solos-pci.c
25917 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
25918 }
25919 atm_charge(vcc, skb->truesize);
25920 vcc->push(vcc, skb);
25921 - atomic_inc(&vcc->stats->rx);
25922 + atomic_inc_unchecked(&vcc->stats->rx);
25923 break;
25924
25925 case PKT_STATUS:
25926 @@ -899,6 +899,8 @@ static int print_buffer(struct sk_buff *buf)
25927 char msg[500];
25928 char item[10];
25929
25930 + pax_track_stack();
25931 +
25932 len = buf->len;
25933 for (i = 0; i < len; i++){
25934 if(i % 8 == 0)
25935 @@ -1008,7 +1010,7 @@ static uint32_t fpga_tx(struct solos_card *card)
25936 vcc = SKB_CB(oldskb)->vcc;
25937
25938 if (vcc) {
25939 - atomic_inc(&vcc->stats->tx);
25940 + atomic_inc_unchecked(&vcc->stats->tx);
25941 solos_pop(vcc, oldskb);
25942 } else
25943 dev_kfree_skb_irq(oldskb);
25944 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
25945 index 90f1ccc..04c4a1e 100644
25946 --- a/drivers/atm/suni.c
25947 +++ b/drivers/atm/suni.c
25948 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
25949
25950
25951 #define ADD_LIMITED(s,v) \
25952 - atomic_add((v),&stats->s); \
25953 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
25954 + atomic_add_unchecked((v),&stats->s); \
25955 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
25956
25957
25958 static void suni_hz(unsigned long from_timer)
25959 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
25960 index 5120a96..e2572bd 100644
25961 --- a/drivers/atm/uPD98402.c
25962 +++ b/drivers/atm/uPD98402.c
25963 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
25964 struct sonet_stats tmp;
25965 int error = 0;
25966
25967 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
25968 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
25969 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
25970 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
25971 if (zero && !error) {
25972 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
25973
25974
25975 #define ADD_LIMITED(s,v) \
25976 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
25977 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
25978 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
25979 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
25980 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
25981 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
25982
25983
25984 static void stat_event(struct atm_dev *dev)
25985 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
25986 if (reason & uPD98402_INT_PFM) stat_event(dev);
25987 if (reason & uPD98402_INT_PCO) {
25988 (void) GET(PCOCR); /* clear interrupt cause */
25989 - atomic_add(GET(HECCT),
25990 + atomic_add_unchecked(GET(HECCT),
25991 &PRIV(dev)->sonet_stats.uncorr_hcs);
25992 }
25993 if ((reason & uPD98402_INT_RFO) &&
25994 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
25995 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
25996 uPD98402_INT_LOS),PIMR); /* enable them */
25997 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
25998 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
25999 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26000 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26001 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26002 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26003 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26004 return 0;
26005 }
26006
26007 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
26008 index d889f56..17eb71e 100644
26009 --- a/drivers/atm/zatm.c
26010 +++ b/drivers/atm/zatm.c
26011 @@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26012 }
26013 if (!size) {
26014 dev_kfree_skb_irq(skb);
26015 - if (vcc) atomic_inc(&vcc->stats->rx_err);
26016 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26017 continue;
26018 }
26019 if (!atm_charge(vcc,skb->truesize)) {
26020 @@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
26021 skb->len = size;
26022 ATM_SKB(skb)->vcc = vcc;
26023 vcc->push(vcc,skb);
26024 - atomic_inc(&vcc->stats->rx);
26025 + atomic_inc_unchecked(&vcc->stats->rx);
26026 }
26027 zout(pos & 0xffff,MTA(mbx));
26028 #if 0 /* probably a stupid idea */
26029 @@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
26030 skb_queue_head(&zatm_vcc->backlog,skb);
26031 break;
26032 }
26033 - atomic_inc(&vcc->stats->tx);
26034 + atomic_inc_unchecked(&vcc->stats->tx);
26035 wake_up(&zatm_vcc->tx_wait);
26036 }
26037
26038 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
26039 index a4760e0..51283cf 100644
26040 --- a/drivers/base/devtmpfs.c
26041 +++ b/drivers/base/devtmpfs.c
26042 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
26043 if (!thread)
26044 return 0;
26045
26046 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
26047 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
26048 if (err)
26049 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
26050 else
26051 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
26052 index 84f7c7d..37cfd87 100644
26053 --- a/drivers/base/power/wakeup.c
26054 +++ b/drivers/base/power/wakeup.c
26055 @@ -29,14 +29,14 @@ bool events_check_enabled;
26056 * They need to be modified together atomically, so it's better to use one
26057 * atomic variable to hold them both.
26058 */
26059 -static atomic_t combined_event_count = ATOMIC_INIT(0);
26060 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
26061
26062 #define IN_PROGRESS_BITS (sizeof(int) * 4)
26063 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
26064
26065 static void split_counters(unsigned int *cnt, unsigned int *inpr)
26066 {
26067 - unsigned int comb = atomic_read(&combined_event_count);
26068 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
26069
26070 *cnt = (comb >> IN_PROGRESS_BITS);
26071 *inpr = comb & MAX_IN_PROGRESS;
26072 @@ -350,7 +350,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
26073 ws->last_time = ktime_get();
26074
26075 /* Increment the counter of events in progress. */
26076 - atomic_inc(&combined_event_count);
26077 + atomic_inc_unchecked(&combined_event_count);
26078 }
26079
26080 /**
26081 @@ -440,7 +440,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
26082 * Increment the counter of registered wakeup events and decrement the
26083 * couter of wakeup events in progress simultaneously.
26084 */
26085 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
26086 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
26087 }
26088
26089 /**
26090 diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
26091 index e086fbb..398e1fe 100644
26092 --- a/drivers/block/DAC960.c
26093 +++ b/drivers/block/DAC960.c
26094 @@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
26095 unsigned long flags;
26096 int Channel, TargetID;
26097
26098 + pax_track_stack();
26099 +
26100 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
26101 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
26102 sizeof(DAC960_SCSI_Inquiry_T) +
26103 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
26104 index c2f9b3e..5911988 100644
26105 --- a/drivers/block/cciss.c
26106 +++ b/drivers/block/cciss.c
26107 @@ -1179,6 +1179,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
26108 int err;
26109 u32 cp;
26110
26111 + memset(&arg64, 0, sizeof(arg64));
26112 +
26113 err = 0;
26114 err |=
26115 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26116 @@ -2986,7 +2988,7 @@ static void start_io(ctlr_info_t *h)
26117 while (!list_empty(&h->reqQ)) {
26118 c = list_entry(h->reqQ.next, CommandList_struct, list);
26119 /* can't do anything if fifo is full */
26120 - if ((h->access.fifo_full(h))) {
26121 + if ((h->access->fifo_full(h))) {
26122 dev_warn(&h->pdev->dev, "fifo full\n");
26123 break;
26124 }
26125 @@ -2996,7 +2998,7 @@ static void start_io(ctlr_info_t *h)
26126 h->Qdepth--;
26127
26128 /* Tell the controller execute command */
26129 - h->access.submit_command(h, c);
26130 + h->access->submit_command(h, c);
26131
26132 /* Put job onto the completed Q */
26133 addQ(&h->cmpQ, c);
26134 @@ -3422,17 +3424,17 @@ startio:
26135
26136 static inline unsigned long get_next_completion(ctlr_info_t *h)
26137 {
26138 - return h->access.command_completed(h);
26139 + return h->access->command_completed(h);
26140 }
26141
26142 static inline int interrupt_pending(ctlr_info_t *h)
26143 {
26144 - return h->access.intr_pending(h);
26145 + return h->access->intr_pending(h);
26146 }
26147
26148 static inline long interrupt_not_for_us(ctlr_info_t *h)
26149 {
26150 - return ((h->access.intr_pending(h) == 0) ||
26151 + return ((h->access->intr_pending(h) == 0) ||
26152 (h->interrupts_enabled == 0));
26153 }
26154
26155 @@ -3465,7 +3467,7 @@ static inline u32 next_command(ctlr_info_t *h)
26156 u32 a;
26157
26158 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
26159 - return h->access.command_completed(h);
26160 + return h->access->command_completed(h);
26161
26162 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
26163 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
26164 @@ -4020,7 +4022,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
26165 trans_support & CFGTBL_Trans_use_short_tags);
26166
26167 /* Change the access methods to the performant access methods */
26168 - h->access = SA5_performant_access;
26169 + h->access = &SA5_performant_access;
26170 h->transMethod = CFGTBL_Trans_Performant;
26171
26172 return;
26173 @@ -4292,7 +4294,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
26174 if (prod_index < 0)
26175 return -ENODEV;
26176 h->product_name = products[prod_index].product_name;
26177 - h->access = *(products[prod_index].access);
26178 + h->access = products[prod_index].access;
26179
26180 if (cciss_board_disabled(h)) {
26181 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
26182 @@ -5009,7 +5011,7 @@ reinit_after_soft_reset:
26183 }
26184
26185 /* make sure the board interrupts are off */
26186 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26187 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26188 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
26189 if (rc)
26190 goto clean2;
26191 @@ -5061,7 +5063,7 @@ reinit_after_soft_reset:
26192 * fake ones to scoop up any residual completions.
26193 */
26194 spin_lock_irqsave(&h->lock, flags);
26195 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26196 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26197 spin_unlock_irqrestore(&h->lock, flags);
26198 free_irq(h->intr[PERF_MODE_INT], h);
26199 rc = cciss_request_irq(h, cciss_msix_discard_completions,
26200 @@ -5081,9 +5083,9 @@ reinit_after_soft_reset:
26201 dev_info(&h->pdev->dev, "Board READY.\n");
26202 dev_info(&h->pdev->dev,
26203 "Waiting for stale completions to drain.\n");
26204 - h->access.set_intr_mask(h, CCISS_INTR_ON);
26205 + h->access->set_intr_mask(h, CCISS_INTR_ON);
26206 msleep(10000);
26207 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26208 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26209
26210 rc = controller_reset_failed(h->cfgtable);
26211 if (rc)
26212 @@ -5106,7 +5108,7 @@ reinit_after_soft_reset:
26213 cciss_scsi_setup(h);
26214
26215 /* Turn the interrupts on so we can service requests */
26216 - h->access.set_intr_mask(h, CCISS_INTR_ON);
26217 + h->access->set_intr_mask(h, CCISS_INTR_ON);
26218
26219 /* Get the firmware version */
26220 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
26221 @@ -5178,7 +5180,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
26222 kfree(flush_buf);
26223 if (return_code != IO_OK)
26224 dev_warn(&h->pdev->dev, "Error flushing cache\n");
26225 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
26226 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
26227 free_irq(h->intr[PERF_MODE_INT], h);
26228 }
26229
26230 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
26231 index c049548..a09cb6e 100644
26232 --- a/drivers/block/cciss.h
26233 +++ b/drivers/block/cciss.h
26234 @@ -100,7 +100,7 @@ struct ctlr_info
26235 /* information about each logical volume */
26236 drive_info_struct *drv[CISS_MAX_LUN];
26237
26238 - struct access_method access;
26239 + struct access_method *access;
26240
26241 /* queue and queue Info */
26242 struct list_head reqQ;
26243 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
26244 index b2fceb5..87fec83 100644
26245 --- a/drivers/block/cpqarray.c
26246 +++ b/drivers/block/cpqarray.c
26247 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26248 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
26249 goto Enomem4;
26250 }
26251 - hba[i]->access.set_intr_mask(hba[i], 0);
26252 + hba[i]->access->set_intr_mask(hba[i], 0);
26253 if (request_irq(hba[i]->intr, do_ida_intr,
26254 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
26255 {
26256 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
26257 add_timer(&hba[i]->timer);
26258
26259 /* Enable IRQ now that spinlock and rate limit timer are set up */
26260 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26261 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
26262
26263 for(j=0; j<NWD; j++) {
26264 struct gendisk *disk = ida_gendisk[i][j];
26265 @@ -694,7 +694,7 @@ DBGINFO(
26266 for(i=0; i<NR_PRODUCTS; i++) {
26267 if (board_id == products[i].board_id) {
26268 c->product_name = products[i].product_name;
26269 - c->access = *(products[i].access);
26270 + c->access = products[i].access;
26271 break;
26272 }
26273 }
26274 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
26275 hba[ctlr]->intr = intr;
26276 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
26277 hba[ctlr]->product_name = products[j].product_name;
26278 - hba[ctlr]->access = *(products[j].access);
26279 + hba[ctlr]->access = products[j].access;
26280 hba[ctlr]->ctlr = ctlr;
26281 hba[ctlr]->board_id = board_id;
26282 hba[ctlr]->pci_dev = NULL; /* not PCI */
26283 @@ -911,6 +911,8 @@ static void do_ida_request(struct request_queue *q)
26284 struct scatterlist tmp_sg[SG_MAX];
26285 int i, dir, seg;
26286
26287 + pax_track_stack();
26288 +
26289 queue_next:
26290 creq = blk_peek_request(q);
26291 if (!creq)
26292 @@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
26293
26294 while((c = h->reqQ) != NULL) {
26295 /* Can't do anything if we're busy */
26296 - if (h->access.fifo_full(h) == 0)
26297 + if (h->access->fifo_full(h) == 0)
26298 return;
26299
26300 /* Get the first entry from the request Q */
26301 @@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
26302 h->Qdepth--;
26303
26304 /* Tell the controller to do our bidding */
26305 - h->access.submit_command(h, c);
26306 + h->access->submit_command(h, c);
26307
26308 /* Get onto the completion Q */
26309 addQ(&h->cmpQ, c);
26310 @@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26311 unsigned long flags;
26312 __u32 a,a1;
26313
26314 - istat = h->access.intr_pending(h);
26315 + istat = h->access->intr_pending(h);
26316 /* Is this interrupt for us? */
26317 if (istat == 0)
26318 return IRQ_NONE;
26319 @@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
26320 */
26321 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
26322 if (istat & FIFO_NOT_EMPTY) {
26323 - while((a = h->access.command_completed(h))) {
26324 + while((a = h->access->command_completed(h))) {
26325 a1 = a; a &= ~3;
26326 if ((c = h->cmpQ) == NULL)
26327 {
26328 @@ -1449,11 +1451,11 @@ static int sendcmd(
26329 /*
26330 * Disable interrupt
26331 */
26332 - info_p->access.set_intr_mask(info_p, 0);
26333 + info_p->access->set_intr_mask(info_p, 0);
26334 /* Make sure there is room in the command FIFO */
26335 /* Actually it should be completely empty at this time. */
26336 for (i = 200000; i > 0; i--) {
26337 - temp = info_p->access.fifo_full(info_p);
26338 + temp = info_p->access->fifo_full(info_p);
26339 if (temp != 0) {
26340 break;
26341 }
26342 @@ -1466,7 +1468,7 @@ DBG(
26343 /*
26344 * Send the cmd
26345 */
26346 - info_p->access.submit_command(info_p, c);
26347 + info_p->access->submit_command(info_p, c);
26348 complete = pollcomplete(ctlr);
26349
26350 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
26351 @@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t *host)
26352 * we check the new geometry. Then turn interrupts back on when
26353 * we're done.
26354 */
26355 - host->access.set_intr_mask(host, 0);
26356 + host->access->set_intr_mask(host, 0);
26357 getgeometry(ctlr);
26358 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
26359 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
26360
26361 for(i=0; i<NWD; i++) {
26362 struct gendisk *disk = ida_gendisk[ctlr][i];
26363 @@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
26364 /* Wait (up to 2 seconds) for a command to complete */
26365
26366 for (i = 200000; i > 0; i--) {
26367 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
26368 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
26369 if (done == 0) {
26370 udelay(10); /* a short fixed delay */
26371 } else
26372 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
26373 index be73e9d..7fbf140 100644
26374 --- a/drivers/block/cpqarray.h
26375 +++ b/drivers/block/cpqarray.h
26376 @@ -99,7 +99,7 @@ struct ctlr_info {
26377 drv_info_t drv[NWD];
26378 struct proc_dir_entry *proc;
26379
26380 - struct access_method access;
26381 + struct access_method *access;
26382
26383 cmdlist_t *reqQ;
26384 cmdlist_t *cmpQ;
26385 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
26386 index ef2ceed..c9cb18e 100644
26387 --- a/drivers/block/drbd/drbd_int.h
26388 +++ b/drivers/block/drbd/drbd_int.h
26389 @@ -737,7 +737,7 @@ struct drbd_request;
26390 struct drbd_epoch {
26391 struct list_head list;
26392 unsigned int barrier_nr;
26393 - atomic_t epoch_size; /* increased on every request added. */
26394 + atomic_unchecked_t epoch_size; /* increased on every request added. */
26395 atomic_t active; /* increased on every req. added, and dec on every finished. */
26396 unsigned long flags;
26397 };
26398 @@ -1109,7 +1109,7 @@ struct drbd_conf {
26399 void *int_dig_in;
26400 void *int_dig_vv;
26401 wait_queue_head_t seq_wait;
26402 - atomic_t packet_seq;
26403 + atomic_unchecked_t packet_seq;
26404 unsigned int peer_seq;
26405 spinlock_t peer_seq_lock;
26406 unsigned int minor;
26407 @@ -1618,30 +1618,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
26408
26409 static inline void drbd_tcp_cork(struct socket *sock)
26410 {
26411 - int __user val = 1;
26412 + int val = 1;
26413 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26414 - (char __user *)&val, sizeof(val));
26415 + (char __force_user *)&val, sizeof(val));
26416 }
26417
26418 static inline void drbd_tcp_uncork(struct socket *sock)
26419 {
26420 - int __user val = 0;
26421 + int val = 0;
26422 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
26423 - (char __user *)&val, sizeof(val));
26424 + (char __force_user *)&val, sizeof(val));
26425 }
26426
26427 static inline void drbd_tcp_nodelay(struct socket *sock)
26428 {
26429 - int __user val = 1;
26430 + int val = 1;
26431 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
26432 - (char __user *)&val, sizeof(val));
26433 + (char __force_user *)&val, sizeof(val));
26434 }
26435
26436 static inline void drbd_tcp_quickack(struct socket *sock)
26437 {
26438 - int __user val = 2;
26439 + int val = 2;
26440 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
26441 - (char __user *)&val, sizeof(val));
26442 + (char __force_user *)&val, sizeof(val));
26443 }
26444
26445 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
26446 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
26447 index 0358e55..bc33689 100644
26448 --- a/drivers/block/drbd/drbd_main.c
26449 +++ b/drivers/block/drbd/drbd_main.c
26450 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
26451 p.sector = sector;
26452 p.block_id = block_id;
26453 p.blksize = blksize;
26454 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
26455 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
26456
26457 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
26458 return false;
26459 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
26460 p.sector = cpu_to_be64(req->sector);
26461 p.block_id = (unsigned long)req;
26462 p.seq_num = cpu_to_be32(req->seq_num =
26463 - atomic_add_return(1, &mdev->packet_seq));
26464 + atomic_add_return_unchecked(1, &mdev->packet_seq));
26465
26466 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
26467
26468 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
26469 atomic_set(&mdev->unacked_cnt, 0);
26470 atomic_set(&mdev->local_cnt, 0);
26471 atomic_set(&mdev->net_cnt, 0);
26472 - atomic_set(&mdev->packet_seq, 0);
26473 + atomic_set_unchecked(&mdev->packet_seq, 0);
26474 atomic_set(&mdev->pp_in_use, 0);
26475 atomic_set(&mdev->pp_in_use_by_net, 0);
26476 atomic_set(&mdev->rs_sect_in, 0);
26477 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
26478 mdev->receiver.t_state);
26479
26480 /* no need to lock it, I'm the only thread alive */
26481 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
26482 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
26483 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
26484 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
26485 mdev->al_writ_cnt =
26486 mdev->bm_writ_cnt =
26487 mdev->read_cnt =
26488 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
26489 index 0feab26..5d9b3dd 100644
26490 --- a/drivers/block/drbd/drbd_nl.c
26491 +++ b/drivers/block/drbd/drbd_nl.c
26492 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
26493 module_put(THIS_MODULE);
26494 }
26495
26496 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26497 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
26498
26499 static unsigned short *
26500 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
26501 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
26502 cn_reply->id.idx = CN_IDX_DRBD;
26503 cn_reply->id.val = CN_VAL_DRBD;
26504
26505 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26506 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26507 cn_reply->ack = 0; /* not used here. */
26508 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26509 (int)((char *)tl - (char *)reply->tag_list);
26510 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
26511 cn_reply->id.idx = CN_IDX_DRBD;
26512 cn_reply->id.val = CN_VAL_DRBD;
26513
26514 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26515 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26516 cn_reply->ack = 0; /* not used here. */
26517 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26518 (int)((char *)tl - (char *)reply->tag_list);
26519 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
26520 cn_reply->id.idx = CN_IDX_DRBD;
26521 cn_reply->id.val = CN_VAL_DRBD;
26522
26523 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
26524 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
26525 cn_reply->ack = 0; // not used here.
26526 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26527 (int)((char*)tl - (char*)reply->tag_list);
26528 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
26529 cn_reply->id.idx = CN_IDX_DRBD;
26530 cn_reply->id.val = CN_VAL_DRBD;
26531
26532 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
26533 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
26534 cn_reply->ack = 0; /* not used here. */
26535 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
26536 (int)((char *)tl - (char *)reply->tag_list);
26537 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
26538 index 43beaca..4a5b1dd 100644
26539 --- a/drivers/block/drbd/drbd_receiver.c
26540 +++ b/drivers/block/drbd/drbd_receiver.c
26541 @@ -894,7 +894,7 @@ retry:
26542 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
26543 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
26544
26545 - atomic_set(&mdev->packet_seq, 0);
26546 + atomic_set_unchecked(&mdev->packet_seq, 0);
26547 mdev->peer_seq = 0;
26548
26549 drbd_thread_start(&mdev->asender);
26550 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26551 do {
26552 next_epoch = NULL;
26553
26554 - epoch_size = atomic_read(&epoch->epoch_size);
26555 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
26556
26557 switch (ev & ~EV_CLEANUP) {
26558 case EV_PUT:
26559 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
26560 rv = FE_DESTROYED;
26561 } else {
26562 epoch->flags = 0;
26563 - atomic_set(&epoch->epoch_size, 0);
26564 + atomic_set_unchecked(&epoch->epoch_size, 0);
26565 /* atomic_set(&epoch->active, 0); is already zero */
26566 if (rv == FE_STILL_LIVE)
26567 rv = FE_RECYCLED;
26568 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26569 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
26570 drbd_flush(mdev);
26571
26572 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
26573 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26574 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
26575 if (epoch)
26576 break;
26577 }
26578
26579 epoch = mdev->current_epoch;
26580 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
26581 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
26582
26583 D_ASSERT(atomic_read(&epoch->active) == 0);
26584 D_ASSERT(epoch->flags == 0);
26585 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
26586 }
26587
26588 epoch->flags = 0;
26589 - atomic_set(&epoch->epoch_size, 0);
26590 + atomic_set_unchecked(&epoch->epoch_size, 0);
26591 atomic_set(&epoch->active, 0);
26592
26593 spin_lock(&mdev->epoch_lock);
26594 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
26595 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
26596 list_add(&epoch->list, &mdev->current_epoch->list);
26597 mdev->current_epoch = epoch;
26598 mdev->epochs++;
26599 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26600 spin_unlock(&mdev->peer_seq_lock);
26601
26602 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
26603 - atomic_inc(&mdev->current_epoch->epoch_size);
26604 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
26605 return drbd_drain_block(mdev, data_size);
26606 }
26607
26608 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
26609
26610 spin_lock(&mdev->epoch_lock);
26611 e->epoch = mdev->current_epoch;
26612 - atomic_inc(&e->epoch->epoch_size);
26613 + atomic_inc_unchecked(&e->epoch->epoch_size);
26614 atomic_inc(&e->epoch->active);
26615 spin_unlock(&mdev->epoch_lock);
26616
26617 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
26618 D_ASSERT(list_empty(&mdev->done_ee));
26619
26620 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
26621 - atomic_set(&mdev->current_epoch->epoch_size, 0);
26622 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
26623 D_ASSERT(list_empty(&mdev->current_epoch->list));
26624 }
26625
26626 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
26627 index 4720c7a..2c49af1 100644
26628 --- a/drivers/block/loop.c
26629 +++ b/drivers/block/loop.c
26630 @@ -283,7 +283,7 @@ static int __do_lo_send_write(struct file *file,
26631 mm_segment_t old_fs = get_fs();
26632
26633 set_fs(get_ds());
26634 - bw = file->f_op->write(file, buf, len, &pos);
26635 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
26636 set_fs(old_fs);
26637 if (likely(bw == len))
26638 return 0;
26639 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
26640 index f533f33..6177bcb 100644
26641 --- a/drivers/block/nbd.c
26642 +++ b/drivers/block/nbd.c
26643 @@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
26644 struct kvec iov;
26645 sigset_t blocked, oldset;
26646
26647 + pax_track_stack();
26648 +
26649 if (unlikely(!sock)) {
26650 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
26651 lo->disk->disk_name, (send ? "send" : "recv"));
26652 @@ -572,6 +574,8 @@ static void do_nbd_request(struct request_queue *q)
26653 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
26654 unsigned int cmd, unsigned long arg)
26655 {
26656 + pax_track_stack();
26657 +
26658 switch (cmd) {
26659 case NBD_DISCONNECT: {
26660 struct request sreq;
26661 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
26662 index 423fd56..06d3be0 100644
26663 --- a/drivers/char/Kconfig
26664 +++ b/drivers/char/Kconfig
26665 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
26666
26667 config DEVKMEM
26668 bool "/dev/kmem virtual device support"
26669 - default y
26670 + default n
26671 + depends on !GRKERNSEC_KMEM
26672 help
26673 Say Y here if you want to support the /dev/kmem device. The
26674 /dev/kmem device is rarely used, but can be used for certain
26675 @@ -596,6 +597,7 @@ config DEVPORT
26676 bool
26677 depends on !M68K
26678 depends on ISA || PCI
26679 + depends on !GRKERNSEC_KMEM
26680 default y
26681
26682 source "drivers/s390/char/Kconfig"
26683 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
26684 index 2e04433..22afc64 100644
26685 --- a/drivers/char/agp/frontend.c
26686 +++ b/drivers/char/agp/frontend.c
26687 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
26688 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26689 return -EFAULT;
26690
26691 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26692 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26693 return -EFAULT;
26694
26695 client = agp_find_client_by_pid(reserve.pid);
26696 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
26697 index 095ab90..afad0a4 100644
26698 --- a/drivers/char/briq_panel.c
26699 +++ b/drivers/char/briq_panel.c
26700 @@ -9,6 +9,7 @@
26701 #include <linux/types.h>
26702 #include <linux/errno.h>
26703 #include <linux/tty.h>
26704 +#include <linux/mutex.h>
26705 #include <linux/timer.h>
26706 #include <linux/kernel.h>
26707 #include <linux/wait.h>
26708 @@ -34,6 +35,7 @@ static int vfd_is_open;
26709 static unsigned char vfd[40];
26710 static int vfd_cursor;
26711 static unsigned char ledpb, led;
26712 +static DEFINE_MUTEX(vfd_mutex);
26713
26714 static void update_vfd(void)
26715 {
26716 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26717 if (!vfd_is_open)
26718 return -EBUSY;
26719
26720 + mutex_lock(&vfd_mutex);
26721 for (;;) {
26722 char c;
26723 if (!indx)
26724 break;
26725 - if (get_user(c, buf))
26726 + if (get_user(c, buf)) {
26727 + mutex_unlock(&vfd_mutex);
26728 return -EFAULT;
26729 + }
26730 if (esc) {
26731 set_led(c);
26732 esc = 0;
26733 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
26734 buf++;
26735 }
26736 update_vfd();
26737 + mutex_unlock(&vfd_mutex);
26738
26739 return len;
26740 }
26741 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
26742 index f773a9d..65cd683 100644
26743 --- a/drivers/char/genrtc.c
26744 +++ b/drivers/char/genrtc.c
26745 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
26746 switch (cmd) {
26747
26748 case RTC_PLL_GET:
26749 + memset(&pll, 0, sizeof(pll));
26750 if (get_rtc_pll(&pll))
26751 return -EINVAL;
26752 else
26753 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
26754 index 0833896..cccce52 100644
26755 --- a/drivers/char/hpet.c
26756 +++ b/drivers/char/hpet.c
26757 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
26758 }
26759
26760 static int
26761 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
26762 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
26763 struct hpet_info *info)
26764 {
26765 struct hpet_timer __iomem *timer;
26766 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
26767 index 58c0e63..25aed94 100644
26768 --- a/drivers/char/ipmi/ipmi_msghandler.c
26769 +++ b/drivers/char/ipmi/ipmi_msghandler.c
26770 @@ -415,7 +415,7 @@ struct ipmi_smi {
26771 struct proc_dir_entry *proc_dir;
26772 char proc_dir_name[10];
26773
26774 - atomic_t stats[IPMI_NUM_STATS];
26775 + atomic_unchecked_t stats[IPMI_NUM_STATS];
26776
26777 /*
26778 * run_to_completion duplicate of smb_info, smi_info
26779 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
26780
26781
26782 #define ipmi_inc_stat(intf, stat) \
26783 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
26784 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
26785 #define ipmi_get_stat(intf, stat) \
26786 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
26787 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
26788
26789 static int is_lan_addr(struct ipmi_addr *addr)
26790 {
26791 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
26792 INIT_LIST_HEAD(&intf->cmd_rcvrs);
26793 init_waitqueue_head(&intf->waitq);
26794 for (i = 0; i < IPMI_NUM_STATS; i++)
26795 - atomic_set(&intf->stats[i], 0);
26796 + atomic_set_unchecked(&intf->stats[i], 0);
26797
26798 intf->proc_dir = NULL;
26799
26800 @@ -4220,6 +4220,8 @@ static void send_panic_events(char *str)
26801 struct ipmi_smi_msg smi_msg;
26802 struct ipmi_recv_msg recv_msg;
26803
26804 + pax_track_stack();
26805 +
26806 si = (struct ipmi_system_interface_addr *) &addr;
26807 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
26808 si->channel = IPMI_BMC_CHANNEL;
26809 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
26810 index 9397ab4..d01bee1 100644
26811 --- a/drivers/char/ipmi/ipmi_si_intf.c
26812 +++ b/drivers/char/ipmi/ipmi_si_intf.c
26813 @@ -277,7 +277,7 @@ struct smi_info {
26814 unsigned char slave_addr;
26815
26816 /* Counters and things for the proc filesystem. */
26817 - atomic_t stats[SI_NUM_STATS];
26818 + atomic_unchecked_t stats[SI_NUM_STATS];
26819
26820 struct task_struct *thread;
26821
26822 @@ -286,9 +286,9 @@ struct smi_info {
26823 };
26824
26825 #define smi_inc_stat(smi, stat) \
26826 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
26827 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
26828 #define smi_get_stat(smi, stat) \
26829 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
26830 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
26831
26832 #define SI_MAX_PARMS 4
26833
26834 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
26835 atomic_set(&new_smi->req_events, 0);
26836 new_smi->run_to_completion = 0;
26837 for (i = 0; i < SI_NUM_STATS; i++)
26838 - atomic_set(&new_smi->stats[i], 0);
26839 + atomic_set_unchecked(&new_smi->stats[i], 0);
26840
26841 new_smi->interrupt_disabled = 1;
26842 atomic_set(&new_smi->stop_operation, 0);
26843 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
26844 index 1aeaaba..e018570 100644
26845 --- a/drivers/char/mbcs.c
26846 +++ b/drivers/char/mbcs.c
26847 @@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
26848 return 0;
26849 }
26850
26851 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
26852 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
26853 {
26854 .part_num = MBCS_PART_NUM,
26855 .mfg_num = MBCS_MFG_NUM,
26856 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
26857 index 8fc04b4..cebdeec 100644
26858 --- a/drivers/char/mem.c
26859 +++ b/drivers/char/mem.c
26860 @@ -18,6 +18,7 @@
26861 #include <linux/raw.h>
26862 #include <linux/tty.h>
26863 #include <linux/capability.h>
26864 +#include <linux/security.h>
26865 #include <linux/ptrace.h>
26866 #include <linux/device.h>
26867 #include <linux/highmem.h>
26868 @@ -34,6 +35,10 @@
26869 # include <linux/efi.h>
26870 #endif
26871
26872 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26873 +extern const struct file_operations grsec_fops;
26874 +#endif
26875 +
26876 static inline unsigned long size_inside_page(unsigned long start,
26877 unsigned long size)
26878 {
26879 @@ -65,9 +70,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26880
26881 while (cursor < to) {
26882 if (!devmem_is_allowed(pfn)) {
26883 +#ifdef CONFIG_GRKERNSEC_KMEM
26884 + gr_handle_mem_readwrite(from, to);
26885 +#else
26886 printk(KERN_INFO
26887 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
26888 current->comm, from, to);
26889 +#endif
26890 return 0;
26891 }
26892 cursor += PAGE_SIZE;
26893 @@ -75,6 +84,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26894 }
26895 return 1;
26896 }
26897 +#elif defined(CONFIG_GRKERNSEC_KMEM)
26898 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26899 +{
26900 + return 0;
26901 +}
26902 #else
26903 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26904 {
26905 @@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
26906
26907 while (count > 0) {
26908 unsigned long remaining;
26909 + char *temp;
26910
26911 sz = size_inside_page(p, count);
26912
26913 @@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
26914 if (!ptr)
26915 return -EFAULT;
26916
26917 - remaining = copy_to_user(buf, ptr, sz);
26918 +#ifdef CONFIG_PAX_USERCOPY
26919 + temp = kmalloc(sz, GFP_KERNEL);
26920 + if (!temp) {
26921 + unxlate_dev_mem_ptr(p, ptr);
26922 + return -ENOMEM;
26923 + }
26924 + memcpy(temp, ptr, sz);
26925 +#else
26926 + temp = ptr;
26927 +#endif
26928 +
26929 + remaining = copy_to_user(buf, temp, sz);
26930 +
26931 +#ifdef CONFIG_PAX_USERCOPY
26932 + kfree(temp);
26933 +#endif
26934 +
26935 unxlate_dev_mem_ptr(p, ptr);
26936 if (remaining)
26937 return -EFAULT;
26938 @@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
26939 size_t count, loff_t *ppos)
26940 {
26941 unsigned long p = *ppos;
26942 - ssize_t low_count, read, sz;
26943 + ssize_t low_count, read, sz, err = 0;
26944 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
26945 - int err = 0;
26946
26947 read = 0;
26948 if (p < (unsigned long) high_memory) {
26949 @@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
26950 }
26951 #endif
26952 while (low_count > 0) {
26953 + char *temp;
26954 +
26955 sz = size_inside_page(p, low_count);
26956
26957 /*
26958 @@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
26959 */
26960 kbuf = xlate_dev_kmem_ptr((char *)p);
26961
26962 - if (copy_to_user(buf, kbuf, sz))
26963 +#ifdef CONFIG_PAX_USERCOPY
26964 + temp = kmalloc(sz, GFP_KERNEL);
26965 + if (!temp)
26966 + return -ENOMEM;
26967 + memcpy(temp, kbuf, sz);
26968 +#else
26969 + temp = kbuf;
26970 +#endif
26971 +
26972 + err = copy_to_user(buf, temp, sz);
26973 +
26974 +#ifdef CONFIG_PAX_USERCOPY
26975 + kfree(temp);
26976 +#endif
26977 +
26978 + if (err)
26979 return -EFAULT;
26980 buf += sz;
26981 p += sz;
26982 @@ -866,6 +913,9 @@ static const struct memdev {
26983 #ifdef CONFIG_CRASH_DUMP
26984 [12] = { "oldmem", 0, &oldmem_fops, NULL },
26985 #endif
26986 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26987 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
26988 +#endif
26989 };
26990
26991 static int memory_open(struct inode *inode, struct file *filp)
26992 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
26993 index da3cfee..a5a6606 100644
26994 --- a/drivers/char/nvram.c
26995 +++ b/drivers/char/nvram.c
26996 @@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
26997
26998 spin_unlock_irq(&rtc_lock);
26999
27000 - if (copy_to_user(buf, contents, tmp - contents))
27001 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
27002 return -EFAULT;
27003
27004 *ppos = i;
27005 diff --git a/drivers/char/random.c b/drivers/char/random.c
27006 index c35a785..6d82202 100644
27007 --- a/drivers/char/random.c
27008 +++ b/drivers/char/random.c
27009 @@ -261,8 +261,13 @@
27010 /*
27011 * Configuration information
27012 */
27013 +#ifdef CONFIG_GRKERNSEC_RANDNET
27014 +#define INPUT_POOL_WORDS 512
27015 +#define OUTPUT_POOL_WORDS 128
27016 +#else
27017 #define INPUT_POOL_WORDS 128
27018 #define OUTPUT_POOL_WORDS 32
27019 +#endif
27020 #define SEC_XFER_SIZE 512
27021 #define EXTRACT_SIZE 10
27022
27023 @@ -300,10 +305,17 @@ static struct poolinfo {
27024 int poolwords;
27025 int tap1, tap2, tap3, tap4, tap5;
27026 } poolinfo_table[] = {
27027 +#ifdef CONFIG_GRKERNSEC_RANDNET
27028 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27029 + { 512, 411, 308, 208, 104, 1 },
27030 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27031 + { 128, 103, 76, 51, 25, 1 },
27032 +#else
27033 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27034 { 128, 103, 76, 51, 25, 1 },
27035 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27036 { 32, 26, 20, 14, 7, 1 },
27037 +#endif
27038 #if 0
27039 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27040 { 2048, 1638, 1231, 819, 411, 1 },
27041 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
27042
27043 extract_buf(r, tmp);
27044 i = min_t(int, nbytes, EXTRACT_SIZE);
27045 - if (copy_to_user(buf, tmp, i)) {
27046 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
27047 ret = -EFAULT;
27048 break;
27049 }
27050 @@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27051 #include <linux/sysctl.h>
27052
27053 static int min_read_thresh = 8, min_write_thresh;
27054 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
27055 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27056 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27057 static char sysctl_bootid[16];
27058
27059 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
27060 index 1ee8ce7..b778bef 100644
27061 --- a/drivers/char/sonypi.c
27062 +++ b/drivers/char/sonypi.c
27063 @@ -55,6 +55,7 @@
27064 #include <asm/uaccess.h>
27065 #include <asm/io.h>
27066 #include <asm/system.h>
27067 +#include <asm/local.h>
27068
27069 #include <linux/sonypi.h>
27070
27071 @@ -491,7 +492,7 @@ static struct sonypi_device {
27072 spinlock_t fifo_lock;
27073 wait_queue_head_t fifo_proc_list;
27074 struct fasync_struct *fifo_async;
27075 - int open_count;
27076 + local_t open_count;
27077 int model;
27078 struct input_dev *input_jog_dev;
27079 struct input_dev *input_key_dev;
27080 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
27081 static int sonypi_misc_release(struct inode *inode, struct file *file)
27082 {
27083 mutex_lock(&sonypi_device.lock);
27084 - sonypi_device.open_count--;
27085 + local_dec(&sonypi_device.open_count);
27086 mutex_unlock(&sonypi_device.lock);
27087 return 0;
27088 }
27089 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
27090 {
27091 mutex_lock(&sonypi_device.lock);
27092 /* Flush input queue on first open */
27093 - if (!sonypi_device.open_count)
27094 + if (!local_read(&sonypi_device.open_count))
27095 kfifo_reset(&sonypi_device.fifo);
27096 - sonypi_device.open_count++;
27097 + local_inc(&sonypi_device.open_count);
27098 mutex_unlock(&sonypi_device.lock);
27099
27100 return 0;
27101 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
27102 index 9ca5c02..7ce352c 100644
27103 --- a/drivers/char/tpm/tpm.c
27104 +++ b/drivers/char/tpm/tpm.c
27105 @@ -414,7 +414,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
27106 chip->vendor.req_complete_val)
27107 goto out_recv;
27108
27109 - if ((status == chip->vendor.req_canceled)) {
27110 + if (status == chip->vendor.req_canceled) {
27111 dev_err(chip->dev, "Operation Canceled\n");
27112 rc = -ECANCELED;
27113 goto out;
27114 @@ -862,6 +862,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
27115
27116 struct tpm_chip *chip = dev_get_drvdata(dev);
27117
27118 + pax_track_stack();
27119 +
27120 tpm_cmd.header.in = tpm_readpubek_header;
27121 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
27122 "attempting to read the PUBEK");
27123 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
27124 index 0636520..169c1d0 100644
27125 --- a/drivers/char/tpm/tpm_bios.c
27126 +++ b/drivers/char/tpm/tpm_bios.c
27127 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
27128 event = addr;
27129
27130 if ((event->event_type == 0 && event->event_size == 0) ||
27131 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27132 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27133 return NULL;
27134
27135 return addr;
27136 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
27137 return NULL;
27138
27139 if ((event->event_type == 0 && event->event_size == 0) ||
27140 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27141 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27142 return NULL;
27143
27144 (*pos)++;
27145 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
27146 int i;
27147
27148 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27149 - seq_putc(m, data[i]);
27150 + if (!seq_putc(m, data[i]))
27151 + return -EFAULT;
27152
27153 return 0;
27154 }
27155 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
27156 log->bios_event_log_end = log->bios_event_log + len;
27157
27158 virt = acpi_os_map_memory(start, len);
27159 + if (!virt) {
27160 + kfree(log->bios_event_log);
27161 + log->bios_event_log = NULL;
27162 + return -EFAULT;
27163 + }
27164
27165 - memcpy(log->bios_event_log, virt, len);
27166 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
27167
27168 acpi_os_unmap_memory(virt, len);
27169 return 0;
27170 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
27171 index fb68b12..0f6c6ca 100644
27172 --- a/drivers/char/virtio_console.c
27173 +++ b/drivers/char/virtio_console.c
27174 @@ -555,7 +555,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
27175 if (to_user) {
27176 ssize_t ret;
27177
27178 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
27179 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
27180 if (ret)
27181 return -EFAULT;
27182 } else {
27183 @@ -654,7 +654,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
27184 if (!port_has_data(port) && !port->host_connected)
27185 return 0;
27186
27187 - return fill_readbuf(port, ubuf, count, true);
27188 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
27189 }
27190
27191 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
27192 diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
27193 index a84250a..68c725e 100644
27194 --- a/drivers/crypto/hifn_795x.c
27195 +++ b/drivers/crypto/hifn_795x.c
27196 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
27197 0xCA, 0x34, 0x2B, 0x2E};
27198 struct scatterlist sg;
27199
27200 + pax_track_stack();
27201 +
27202 memset(src, 0, sizeof(src));
27203 memset(ctx.key, 0, sizeof(ctx.key));
27204
27205 diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
27206 index db33d30..7823369 100644
27207 --- a/drivers/crypto/padlock-aes.c
27208 +++ b/drivers/crypto/padlock-aes.c
27209 @@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
27210 struct crypto_aes_ctx gen_aes;
27211 int cpu;
27212
27213 + pax_track_stack();
27214 +
27215 if (key_len % 8) {
27216 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
27217 return -EINVAL;
27218 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
27219 index 9a8bebc..b1e4989 100644
27220 --- a/drivers/edac/amd64_edac.c
27221 +++ b/drivers/edac/amd64_edac.c
27222 @@ -2670,7 +2670,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
27223 * PCI core identifies what devices are on a system during boot, and then
27224 * inquiry this table to see if this driver is for a given device found.
27225 */
27226 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
27227 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
27228 {
27229 .vendor = PCI_VENDOR_ID_AMD,
27230 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
27231 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
27232 index e47e73b..348e0bd 100644
27233 --- a/drivers/edac/amd76x_edac.c
27234 +++ b/drivers/edac/amd76x_edac.c
27235 @@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
27236 edac_mc_free(mci);
27237 }
27238
27239 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
27240 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
27241 {
27242 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27243 AMD762},
27244 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
27245 index 1af531a..3a8ff27 100644
27246 --- a/drivers/edac/e752x_edac.c
27247 +++ b/drivers/edac/e752x_edac.c
27248 @@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
27249 edac_mc_free(mci);
27250 }
27251
27252 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
27253 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
27254 {
27255 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27256 E7520},
27257 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
27258 index 6ffb6d2..383d8d7 100644
27259 --- a/drivers/edac/e7xxx_edac.c
27260 +++ b/drivers/edac/e7xxx_edac.c
27261 @@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
27262 edac_mc_free(mci);
27263 }
27264
27265 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
27266 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
27267 {
27268 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27269 E7205},
27270 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
27271 index 495198a..ac08c85 100644
27272 --- a/drivers/edac/edac_pci_sysfs.c
27273 +++ b/drivers/edac/edac_pci_sysfs.c
27274 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
27275 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27276 static int edac_pci_poll_msec = 1000; /* one second workq period */
27277
27278 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
27279 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27280 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27281 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27282
27283 static struct kobject *edac_pci_top_main_kobj;
27284 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27285 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27286 edac_printk(KERN_CRIT, EDAC_PCI,
27287 "Signaled System Error on %s\n",
27288 pci_name(dev));
27289 - atomic_inc(&pci_nonparity_count);
27290 + atomic_inc_unchecked(&pci_nonparity_count);
27291 }
27292
27293 if (status & (PCI_STATUS_PARITY)) {
27294 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27295 "Master Data Parity Error on %s\n",
27296 pci_name(dev));
27297
27298 - atomic_inc(&pci_parity_count);
27299 + atomic_inc_unchecked(&pci_parity_count);
27300 }
27301
27302 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27303 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27304 "Detected Parity Error on %s\n",
27305 pci_name(dev));
27306
27307 - atomic_inc(&pci_parity_count);
27308 + atomic_inc_unchecked(&pci_parity_count);
27309 }
27310 }
27311
27312 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27313 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27314 "Signaled System Error on %s\n",
27315 pci_name(dev));
27316 - atomic_inc(&pci_nonparity_count);
27317 + atomic_inc_unchecked(&pci_nonparity_count);
27318 }
27319
27320 if (status & (PCI_STATUS_PARITY)) {
27321 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27322 "Master Data Parity Error on "
27323 "%s\n", pci_name(dev));
27324
27325 - atomic_inc(&pci_parity_count);
27326 + atomic_inc_unchecked(&pci_parity_count);
27327 }
27328
27329 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27330 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
27331 "Detected Parity Error on %s\n",
27332 pci_name(dev));
27333
27334 - atomic_inc(&pci_parity_count);
27335 + atomic_inc_unchecked(&pci_parity_count);
27336 }
27337 }
27338 }
27339 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
27340 if (!check_pci_errors)
27341 return;
27342
27343 - before_count = atomic_read(&pci_parity_count);
27344 + before_count = atomic_read_unchecked(&pci_parity_count);
27345
27346 /* scan all PCI devices looking for a Parity Error on devices and
27347 * bridges.
27348 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
27349 /* Only if operator has selected panic on PCI Error */
27350 if (edac_pci_get_panic_on_pe()) {
27351 /* If the count is different 'after' from 'before' */
27352 - if (before_count != atomic_read(&pci_parity_count))
27353 + if (before_count != atomic_read_unchecked(&pci_parity_count))
27354 panic("EDAC: PCI Parity Error");
27355 }
27356 }
27357 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
27358 index c0510b3..6e2a954 100644
27359 --- a/drivers/edac/i3000_edac.c
27360 +++ b/drivers/edac/i3000_edac.c
27361 @@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
27362 edac_mc_free(mci);
27363 }
27364
27365 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
27366 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
27367 {
27368 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27369 I3000},
27370 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
27371 index aa08497..7e6822a 100644
27372 --- a/drivers/edac/i3200_edac.c
27373 +++ b/drivers/edac/i3200_edac.c
27374 @@ -456,7 +456,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
27375 edac_mc_free(mci);
27376 }
27377
27378 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
27379 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
27380 {
27381 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27382 I3200},
27383 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
27384 index 4dc3ac2..67d05a6 100644
27385 --- a/drivers/edac/i5000_edac.c
27386 +++ b/drivers/edac/i5000_edac.c
27387 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
27388 *
27389 * The "E500P" device is the first device supported.
27390 */
27391 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
27392 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
27393 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
27394 .driver_data = I5000P},
27395
27396 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
27397 index bcbdeec..9886d16 100644
27398 --- a/drivers/edac/i5100_edac.c
27399 +++ b/drivers/edac/i5100_edac.c
27400 @@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
27401 edac_mc_free(mci);
27402 }
27403
27404 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
27405 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
27406 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
27407 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
27408 { 0, }
27409 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
27410 index 74d6ec34..baff517 100644
27411 --- a/drivers/edac/i5400_edac.c
27412 +++ b/drivers/edac/i5400_edac.c
27413 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
27414 *
27415 * The "E500P" device is the first device supported.
27416 */
27417 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
27418 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
27419 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
27420 {0,} /* 0 terminated list. */
27421 };
27422 diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
27423 index a76fe83..15479e6 100644
27424 --- a/drivers/edac/i7300_edac.c
27425 +++ b/drivers/edac/i7300_edac.c
27426 @@ -1191,7 +1191,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
27427 *
27428 * Has only 8086:360c PCI ID
27429 */
27430 -static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
27431 +static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
27432 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
27433 {0,} /* 0 terminated list. */
27434 };
27435 diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
27436 index f6cf448..3f612e9 100644
27437 --- a/drivers/edac/i7core_edac.c
27438 +++ b/drivers/edac/i7core_edac.c
27439 @@ -359,7 +359,7 @@ static const struct pci_id_table pci_dev_table[] = {
27440 /*
27441 * pci_device_id table for which devices we are looking for
27442 */
27443 -static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
27444 +static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
27445 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
27446 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
27447 {0,} /* 0 terminated list. */
27448 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
27449 index 4329d39..f3022ef 100644
27450 --- a/drivers/edac/i82443bxgx_edac.c
27451 +++ b/drivers/edac/i82443bxgx_edac.c
27452 @@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
27453
27454 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
27455
27456 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
27457 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
27458 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
27459 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
27460 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
27461 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
27462 index 931a057..fd28340 100644
27463 --- a/drivers/edac/i82860_edac.c
27464 +++ b/drivers/edac/i82860_edac.c
27465 @@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
27466 edac_mc_free(mci);
27467 }
27468
27469 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
27470 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
27471 {
27472 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27473 I82860},
27474 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
27475 index 33864c6..01edc61 100644
27476 --- a/drivers/edac/i82875p_edac.c
27477 +++ b/drivers/edac/i82875p_edac.c
27478 @@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
27479 edac_mc_free(mci);
27480 }
27481
27482 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
27483 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
27484 {
27485 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27486 I82875P},
27487 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
27488 index a5da732..983363b 100644
27489 --- a/drivers/edac/i82975x_edac.c
27490 +++ b/drivers/edac/i82975x_edac.c
27491 @@ -604,7 +604,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
27492 edac_mc_free(mci);
27493 }
27494
27495 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
27496 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
27497 {
27498 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27499 I82975X
27500 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
27501 index 795a320..3bbc3d3 100644
27502 --- a/drivers/edac/mce_amd.h
27503 +++ b/drivers/edac/mce_amd.h
27504 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
27505 bool (*dc_mce)(u16, u8);
27506 bool (*ic_mce)(u16, u8);
27507 bool (*nb_mce)(u16, u8);
27508 -};
27509 +} __no_const;
27510
27511 void amd_report_gart_errors(bool);
27512 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
27513 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
27514 index b153674..ad2ba9b 100644
27515 --- a/drivers/edac/r82600_edac.c
27516 +++ b/drivers/edac/r82600_edac.c
27517 @@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
27518 edac_mc_free(mci);
27519 }
27520
27521 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
27522 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
27523 {
27524 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
27525 },
27526 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
27527 index b6f47de..c5acf3a 100644
27528 --- a/drivers/edac/x38_edac.c
27529 +++ b/drivers/edac/x38_edac.c
27530 @@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
27531 edac_mc_free(mci);
27532 }
27533
27534 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
27535 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
27536 {
27537 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
27538 X38},
27539 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
27540 index 85661b0..c784559a 100644
27541 --- a/drivers/firewire/core-card.c
27542 +++ b/drivers/firewire/core-card.c
27543 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
27544
27545 void fw_core_remove_card(struct fw_card *card)
27546 {
27547 - struct fw_card_driver dummy_driver = dummy_driver_template;
27548 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
27549
27550 card->driver->update_phy_reg(card, 4,
27551 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
27552 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
27553 index 4799393..37bd3ab 100644
27554 --- a/drivers/firewire/core-cdev.c
27555 +++ b/drivers/firewire/core-cdev.c
27556 @@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
27557 int ret;
27558
27559 if ((request->channels == 0 && request->bandwidth == 0) ||
27560 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27561 - request->bandwidth < 0)
27562 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27563 return -EINVAL;
27564
27565 r = kmalloc(sizeof(*r), GFP_KERNEL);
27566 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
27567 index 334b82a..ea5261d 100644
27568 --- a/drivers/firewire/core-transaction.c
27569 +++ b/drivers/firewire/core-transaction.c
27570 @@ -37,6 +37,7 @@
27571 #include <linux/timer.h>
27572 #include <linux/types.h>
27573 #include <linux/workqueue.h>
27574 +#include <linux/sched.h>
27575
27576 #include <asm/byteorder.h>
27577
27578 @@ -422,6 +423,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
27579 struct transaction_callback_data d;
27580 struct fw_transaction t;
27581
27582 + pax_track_stack();
27583 +
27584 init_timer_on_stack(&t.split_timeout_timer);
27585 init_completion(&d.done);
27586 d.payload = payload;
27587 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
27588 index b45be57..5fad18b 100644
27589 --- a/drivers/firewire/core.h
27590 +++ b/drivers/firewire/core.h
27591 @@ -101,6 +101,7 @@ struct fw_card_driver {
27592
27593 int (*stop_iso)(struct fw_iso_context *ctx);
27594 };
27595 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
27596
27597 void fw_card_initialize(struct fw_card *card,
27598 const struct fw_card_driver *driver, struct device *device);
27599 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
27600 index bcb1126..2cc2121 100644
27601 --- a/drivers/firmware/dmi_scan.c
27602 +++ b/drivers/firmware/dmi_scan.c
27603 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
27604 }
27605 }
27606 else {
27607 - /*
27608 - * no iounmap() for that ioremap(); it would be a no-op, but
27609 - * it's so early in setup that sucker gets confused into doing
27610 - * what it shouldn't if we actually call it.
27611 - */
27612 p = dmi_ioremap(0xF0000, 0x10000);
27613 if (p == NULL)
27614 goto error;
27615 @@ -725,7 +720,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
27616 if (buf == NULL)
27617 return -1;
27618
27619 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
27620 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
27621
27622 iounmap(buf);
27623 return 0;
27624 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
27625 index 98723cb..10ca85b 100644
27626 --- a/drivers/gpio/gpio-vr41xx.c
27627 +++ b/drivers/gpio/gpio-vr41xx.c
27628 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27629 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27630 maskl, pendl, maskh, pendh);
27631
27632 - atomic_inc(&irq_err_count);
27633 + atomic_inc_unchecked(&irq_err_count);
27634
27635 return -EINVAL;
27636 }
27637 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
27638 index fe738f0..2d03563 100644
27639 --- a/drivers/gpu/drm/drm_crtc.c
27640 +++ b/drivers/gpu/drm/drm_crtc.c
27641 @@ -1374,7 +1374,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27642 */
27643 if ((out_resp->count_modes >= mode_count) && mode_count) {
27644 copied = 0;
27645 - mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
27646 + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
27647 list_for_each_entry(mode, &connector->modes, head) {
27648 drm_crtc_convert_to_umode(&u_mode, mode);
27649 if (copy_to_user(mode_ptr + copied,
27650 @@ -1389,8 +1389,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27651
27652 if ((out_resp->count_props >= props_count) && props_count) {
27653 copied = 0;
27654 - prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
27655 - prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
27656 + prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
27657 + prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
27658 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
27659 if (connector->property_ids[i] != 0) {
27660 if (put_user(connector->property_ids[i],
27661 @@ -1412,7 +1412,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
27662
27663 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
27664 copied = 0;
27665 - encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
27666 + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
27667 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
27668 if (connector->encoder_ids[i] != 0) {
27669 if (put_user(connector->encoder_ids[i],
27670 @@ -1571,7 +1571,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
27671 }
27672
27673 for (i = 0; i < crtc_req->count_connectors; i++) {
27674 - set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
27675 + set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
27676 if (get_user(out_id, &set_connectors_ptr[i])) {
27677 ret = -EFAULT;
27678 goto out;
27679 @@ -1852,7 +1852,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
27680 fb = obj_to_fb(obj);
27681
27682 num_clips = r->num_clips;
27683 - clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
27684 + clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
27685
27686 if (!num_clips != !clips_ptr) {
27687 ret = -EINVAL;
27688 @@ -1868,6 +1868,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
27689 }
27690
27691 if (num_clips && clips_ptr) {
27692 + if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
27693 + ret = -EINVAL;
27694 + goto out_err1;
27695 + }
27696 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
27697 if (!clips) {
27698 ret = -ENOMEM;
27699 @@ -2272,7 +2276,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27700 out_resp->flags = property->flags;
27701
27702 if ((out_resp->count_values >= value_count) && value_count) {
27703 - values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
27704 + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
27705 for (i = 0; i < value_count; i++) {
27706 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
27707 ret = -EFAULT;
27708 @@ -2285,7 +2289,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27709 if (property->flags & DRM_MODE_PROP_ENUM) {
27710 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
27711 copied = 0;
27712 - enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
27713 + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
27714 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
27715
27716 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
27717 @@ -2308,7 +2312,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
27718 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
27719 copied = 0;
27720 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
27721 - blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
27722 + blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
27723
27724 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
27725 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
27726 @@ -2369,7 +2373,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27727 struct drm_mode_get_blob *out_resp = data;
27728 struct drm_property_blob *blob;
27729 int ret = 0;
27730 - void *blob_ptr;
27731 + void __user *blob_ptr;
27732
27733 if (!drm_core_check_feature(dev, DRIVER_MODESET))
27734 return -EINVAL;
27735 @@ -2383,7 +2387,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
27736 blob = obj_to_blob(obj);
27737
27738 if (out_resp->length == blob->length) {
27739 - blob_ptr = (void *)(unsigned long)out_resp->data;
27740 + blob_ptr = (void __user *)(unsigned long)out_resp->data;
27741 if (copy_to_user(blob_ptr, blob->data, blob->length)){
27742 ret = -EFAULT;
27743 goto done;
27744 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
27745 index f88a9b2..8f4078f 100644
27746 --- a/drivers/gpu/drm/drm_crtc_helper.c
27747 +++ b/drivers/gpu/drm/drm_crtc_helper.c
27748 @@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
27749 struct drm_crtc *tmp;
27750 int crtc_mask = 1;
27751
27752 - WARN(!crtc, "checking null crtc?\n");
27753 + BUG_ON(!crtc);
27754
27755 dev = crtc->dev;
27756
27757 @@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
27758 struct drm_encoder *encoder;
27759 bool ret = true;
27760
27761 + pax_track_stack();
27762 +
27763 crtc->enabled = drm_helper_crtc_in_use(crtc);
27764 if (!crtc->enabled)
27765 return true;
27766 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
27767 index 93a112d..c8b065d 100644
27768 --- a/drivers/gpu/drm/drm_drv.c
27769 +++ b/drivers/gpu/drm/drm_drv.c
27770 @@ -307,7 +307,7 @@ module_exit(drm_core_exit);
27771 /**
27772 * Copy and IOCTL return string to user space
27773 */
27774 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
27775 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
27776 {
27777 int len;
27778
27779 @@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
27780
27781 dev = file_priv->minor->dev;
27782 atomic_inc(&dev->ioctl_count);
27783 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
27784 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
27785 ++file_priv->ioctl_count;
27786
27787 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
27788 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
27789 index 2ec7d48..be14bb1 100644
27790 --- a/drivers/gpu/drm/drm_fops.c
27791 +++ b/drivers/gpu/drm/drm_fops.c
27792 @@ -70,7 +70,7 @@ static int drm_setup(struct drm_device * dev)
27793 }
27794
27795 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
27796 - atomic_set(&dev->counts[i], 0);
27797 + atomic_set_unchecked(&dev->counts[i], 0);
27798
27799 dev->sigdata.lock = NULL;
27800
27801 @@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct file *filp)
27802
27803 retcode = drm_open_helper(inode, filp, dev);
27804 if (!retcode) {
27805 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
27806 - if (!dev->open_count++)
27807 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
27808 + if (local_inc_return(&dev->open_count) == 1)
27809 retcode = drm_setup(dev);
27810 }
27811 if (!retcode) {
27812 @@ -472,7 +472,7 @@ int drm_release(struct inode *inode, struct file *filp)
27813
27814 mutex_lock(&drm_global_mutex);
27815
27816 - DRM_DEBUG("open_count = %d\n", dev->open_count);
27817 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
27818
27819 if (dev->driver->preclose)
27820 dev->driver->preclose(dev, file_priv);
27821 @@ -484,7 +484,7 @@ int drm_release(struct inode *inode, struct file *filp)
27822 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
27823 task_pid_nr(current),
27824 (long)old_encode_dev(file_priv->minor->device),
27825 - dev->open_count);
27826 + local_read(&dev->open_count));
27827
27828 /* if the master has gone away we can't do anything with the lock */
27829 if (file_priv->minor->master)
27830 @@ -565,8 +565,8 @@ int drm_release(struct inode *inode, struct file *filp)
27831 * End inline drm_release
27832 */
27833
27834 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
27835 - if (!--dev->open_count) {
27836 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
27837 + if (local_dec_and_test(&dev->open_count)) {
27838 if (atomic_read(&dev->ioctl_count)) {
27839 DRM_ERROR("Device busy: %d\n",
27840 atomic_read(&dev->ioctl_count));
27841 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
27842 index c87dc96..326055d 100644
27843 --- a/drivers/gpu/drm/drm_global.c
27844 +++ b/drivers/gpu/drm/drm_global.c
27845 @@ -36,7 +36,7 @@
27846 struct drm_global_item {
27847 struct mutex mutex;
27848 void *object;
27849 - int refcount;
27850 + atomic_t refcount;
27851 };
27852
27853 static struct drm_global_item glob[DRM_GLOBAL_NUM];
27854 @@ -49,7 +49,7 @@ void drm_global_init(void)
27855 struct drm_global_item *item = &glob[i];
27856 mutex_init(&item->mutex);
27857 item->object = NULL;
27858 - item->refcount = 0;
27859 + atomic_set(&item->refcount, 0);
27860 }
27861 }
27862
27863 @@ -59,7 +59,7 @@ void drm_global_release(void)
27864 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
27865 struct drm_global_item *item = &glob[i];
27866 BUG_ON(item->object != NULL);
27867 - BUG_ON(item->refcount != 0);
27868 + BUG_ON(atomic_read(&item->refcount) != 0);
27869 }
27870 }
27871
27872 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
27873 void *object;
27874
27875 mutex_lock(&item->mutex);
27876 - if (item->refcount == 0) {
27877 + if (atomic_read(&item->refcount) == 0) {
27878 item->object = kzalloc(ref->size, GFP_KERNEL);
27879 if (unlikely(item->object == NULL)) {
27880 ret = -ENOMEM;
27881 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
27882 goto out_err;
27883
27884 }
27885 - ++item->refcount;
27886 + atomic_inc(&item->refcount);
27887 ref->object = item->object;
27888 object = item->object;
27889 mutex_unlock(&item->mutex);
27890 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
27891 struct drm_global_item *item = &glob[ref->global_type];
27892
27893 mutex_lock(&item->mutex);
27894 - BUG_ON(item->refcount == 0);
27895 + BUG_ON(atomic_read(&item->refcount) == 0);
27896 BUG_ON(ref->object != item->object);
27897 - if (--item->refcount == 0) {
27898 + if (atomic_dec_and_test(&item->refcount)) {
27899 ref->release(ref);
27900 item->object = NULL;
27901 }
27902 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
27903 index ab1162d..42587b2 100644
27904 --- a/drivers/gpu/drm/drm_info.c
27905 +++ b/drivers/gpu/drm/drm_info.c
27906 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
27907 struct drm_local_map *map;
27908 struct drm_map_list *r_list;
27909
27910 - /* Hardcoded from _DRM_FRAME_BUFFER,
27911 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
27912 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
27913 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
27914 + static const char * const types[] = {
27915 + [_DRM_FRAME_BUFFER] = "FB",
27916 + [_DRM_REGISTERS] = "REG",
27917 + [_DRM_SHM] = "SHM",
27918 + [_DRM_AGP] = "AGP",
27919 + [_DRM_SCATTER_GATHER] = "SG",
27920 + [_DRM_CONSISTENT] = "PCI",
27921 + [_DRM_GEM] = "GEM" };
27922 const char *type;
27923 int i;
27924
27925 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
27926 map = r_list->map;
27927 if (!map)
27928 continue;
27929 - if (map->type < 0 || map->type > 5)
27930 + if (map->type >= ARRAY_SIZE(types))
27931 type = "??";
27932 else
27933 type = types[map->type];
27934 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
27935 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
27936 vma->vm_flags & VM_LOCKED ? 'l' : '-',
27937 vma->vm_flags & VM_IO ? 'i' : '-',
27938 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27939 + 0);
27940 +#else
27941 vma->vm_pgoff);
27942 +#endif
27943
27944 #if defined(__i386__)
27945 pgprot = pgprot_val(vma->vm_page_prot);
27946 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
27947 index 4a058c7..b42cd92 100644
27948 --- a/drivers/gpu/drm/drm_ioc32.c
27949 +++ b/drivers/gpu/drm/drm_ioc32.c
27950 @@ -455,7 +455,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
27951 request = compat_alloc_user_space(nbytes);
27952 if (!access_ok(VERIFY_WRITE, request, nbytes))
27953 return -EFAULT;
27954 - list = (struct drm_buf_desc *) (request + 1);
27955 + list = (struct drm_buf_desc __user *) (request + 1);
27956
27957 if (__put_user(count, &request->count)
27958 || __put_user(list, &request->list))
27959 @@ -516,7 +516,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
27960 request = compat_alloc_user_space(nbytes);
27961 if (!access_ok(VERIFY_WRITE, request, nbytes))
27962 return -EFAULT;
27963 - list = (struct drm_buf_pub *) (request + 1);
27964 + list = (struct drm_buf_pub __user *) (request + 1);
27965
27966 if (__put_user(count, &request->count)
27967 || __put_user(list, &request->list))
27968 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
27969 index 904d7e9..ab88581 100644
27970 --- a/drivers/gpu/drm/drm_ioctl.c
27971 +++ b/drivers/gpu/drm/drm_ioctl.c
27972 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, void *data,
27973 stats->data[i].value =
27974 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
27975 else
27976 - stats->data[i].value = atomic_read(&dev->counts[i]);
27977 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
27978 stats->data[i].type = dev->types[i];
27979 }
27980
27981 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
27982 index 632ae24..244cf4a 100644
27983 --- a/drivers/gpu/drm/drm_lock.c
27984 +++ b/drivers/gpu/drm/drm_lock.c
27985 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
27986 if (drm_lock_take(&master->lock, lock->context)) {
27987 master->lock.file_priv = file_priv;
27988 master->lock.lock_time = jiffies;
27989 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
27990 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
27991 break; /* Got lock */
27992 }
27993
27994 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
27995 return -EINVAL;
27996 }
27997
27998 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
27999 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28000
28001 if (drm_lock_free(&master->lock, lock->context)) {
28002 /* FIXME: Should really bail out here. */
28003 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
28004 index 8f371e8..9f85d52 100644
28005 --- a/drivers/gpu/drm/i810/i810_dma.c
28006 +++ b/drivers/gpu/drm/i810/i810_dma.c
28007 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
28008 dma->buflist[vertex->idx],
28009 vertex->discard, vertex->used);
28010
28011 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28012 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28013 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28014 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28015 sarea_priv->last_enqueue = dev_priv->counter - 1;
28016 sarea_priv->last_dispatch = (int)hw_status[5];
28017
28018 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
28019 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28020 mc->last_render);
28021
28022 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28023 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28024 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28025 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28026 sarea_priv->last_enqueue = dev_priv->counter - 1;
28027 sarea_priv->last_dispatch = (int)hw_status[5];
28028
28029 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
28030 index c9339f4..f5e1b9d 100644
28031 --- a/drivers/gpu/drm/i810/i810_drv.h
28032 +++ b/drivers/gpu/drm/i810/i810_drv.h
28033 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28034 int page_flipping;
28035
28036 wait_queue_head_t irq_queue;
28037 - atomic_t irq_received;
28038 - atomic_t irq_emitted;
28039 + atomic_unchecked_t irq_received;
28040 + atomic_unchecked_t irq_emitted;
28041
28042 int front_offset;
28043 } drm_i810_private_t;
28044 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
28045 index 3c395a5..02889c2 100644
28046 --- a/drivers/gpu/drm/i915/i915_debugfs.c
28047 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
28048 @@ -497,7 +497,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
28049 I915_READ(GTIMR));
28050 }
28051 seq_printf(m, "Interrupts received: %d\n",
28052 - atomic_read(&dev_priv->irq_received));
28053 + atomic_read_unchecked(&dev_priv->irq_received));
28054 for (i = 0; i < I915_NUM_RINGS; i++) {
28055 if (IS_GEN6(dev) || IS_GEN7(dev)) {
28056 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
28057 @@ -1185,7 +1185,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
28058 return ret;
28059
28060 if (opregion->header)
28061 - seq_write(m, opregion->header, OPREGION_SIZE);
28062 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
28063
28064 mutex_unlock(&dev->struct_mutex);
28065
28066 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
28067 index 8a3942c..1b73bf1 100644
28068 --- a/drivers/gpu/drm/i915/i915_dma.c
28069 +++ b/drivers/gpu/drm/i915/i915_dma.c
28070 @@ -1171,7 +1171,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
28071 bool can_switch;
28072
28073 spin_lock(&dev->count_lock);
28074 - can_switch = (dev->open_count == 0);
28075 + can_switch = (local_read(&dev->open_count) == 0);
28076 spin_unlock(&dev->count_lock);
28077 return can_switch;
28078 }
28079 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
28080 index 7916bd9..7c17a0f 100644
28081 --- a/drivers/gpu/drm/i915/i915_drv.h
28082 +++ b/drivers/gpu/drm/i915/i915_drv.h
28083 @@ -222,7 +222,7 @@ struct drm_i915_display_funcs {
28084 /* render clock increase/decrease */
28085 /* display clock increase/decrease */
28086 /* pll clock increase/decrease */
28087 -};
28088 +} __no_const;
28089
28090 struct intel_device_info {
28091 u8 gen;
28092 @@ -305,7 +305,7 @@ typedef struct drm_i915_private {
28093 int current_page;
28094 int page_flipping;
28095
28096 - atomic_t irq_received;
28097 + atomic_unchecked_t irq_received;
28098
28099 /* protects the irq masks */
28100 spinlock_t irq_lock;
28101 @@ -882,7 +882,7 @@ struct drm_i915_gem_object {
28102 * will be page flipped away on the next vblank. When it
28103 * reaches 0, dev_priv->pending_flip_queue will be woken up.
28104 */
28105 - atomic_t pending_flip;
28106 + atomic_unchecked_t pending_flip;
28107 };
28108
28109 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
28110 @@ -1262,7 +1262,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
28111 extern void intel_teardown_gmbus(struct drm_device *dev);
28112 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
28113 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
28114 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28115 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
28116 {
28117 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
28118 }
28119 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28120 index 4934cf8..52e8e83 100644
28121 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28122 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
28123 @@ -188,7 +188,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
28124 i915_gem_clflush_object(obj);
28125
28126 if (obj->base.pending_write_domain)
28127 - cd->flips |= atomic_read(&obj->pending_flip);
28128 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
28129
28130 /* The actual obj->write_domain will be updated with
28131 * pending_write_domain after we emit the accumulated flush for all
28132 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
28133 index 9cbb0cd..958a31f 100644
28134 --- a/drivers/gpu/drm/i915/i915_irq.c
28135 +++ b/drivers/gpu/drm/i915/i915_irq.c
28136 @@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
28137 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
28138 struct drm_i915_master_private *master_priv;
28139
28140 - atomic_inc(&dev_priv->irq_received);
28141 + atomic_inc_unchecked(&dev_priv->irq_received);
28142
28143 /* disable master interrupt before clearing iir */
28144 de_ier = I915_READ(DEIER);
28145 @@ -565,7 +565,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
28146 struct drm_i915_master_private *master_priv;
28147 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
28148
28149 - atomic_inc(&dev_priv->irq_received);
28150 + atomic_inc_unchecked(&dev_priv->irq_received);
28151
28152 if (IS_GEN6(dev))
28153 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
28154 @@ -1228,7 +1228,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
28155 int ret = IRQ_NONE, pipe;
28156 bool blc_event = false;
28157
28158 - atomic_inc(&dev_priv->irq_received);
28159 + atomic_inc_unchecked(&dev_priv->irq_received);
28160
28161 iir = I915_READ(IIR);
28162
28163 @@ -1740,7 +1740,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
28164 {
28165 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28166
28167 - atomic_set(&dev_priv->irq_received, 0);
28168 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28169
28170 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28171 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28172 @@ -1904,7 +1904,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
28173 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28174 int pipe;
28175
28176 - atomic_set(&dev_priv->irq_received, 0);
28177 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28178
28179 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28180 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28181 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
28182 index e1340a2..24f40c3 100644
28183 --- a/drivers/gpu/drm/i915/intel_display.c
28184 +++ b/drivers/gpu/drm/i915/intel_display.c
28185 @@ -2205,7 +2205,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
28186
28187 wait_event(dev_priv->pending_flip_queue,
28188 atomic_read(&dev_priv->mm.wedged) ||
28189 - atomic_read(&obj->pending_flip) == 0);
28190 + atomic_read_unchecked(&obj->pending_flip) == 0);
28191
28192 /* Big Hammer, we also need to ensure that any pending
28193 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
28194 @@ -2826,7 +2826,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
28195 obj = to_intel_framebuffer(crtc->fb)->obj;
28196 dev_priv = crtc->dev->dev_private;
28197 wait_event(dev_priv->pending_flip_queue,
28198 - atomic_read(&obj->pending_flip) == 0);
28199 + atomic_read_unchecked(&obj->pending_flip) == 0);
28200 }
28201
28202 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
28203 @@ -6676,7 +6676,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
28204
28205 atomic_clear_mask(1 << intel_crtc->plane,
28206 &obj->pending_flip.counter);
28207 - if (atomic_read(&obj->pending_flip) == 0)
28208 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
28209 wake_up(&dev_priv->pending_flip_queue);
28210
28211 schedule_work(&work->work);
28212 @@ -6965,7 +6965,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28213 /* Block clients from rendering to the new back buffer until
28214 * the flip occurs and the object is no longer visible.
28215 */
28216 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28217 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28218
28219 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
28220 if (ret)
28221 @@ -6979,7 +6979,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
28222 return 0;
28223
28224 cleanup_pending:
28225 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28226 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
28227 cleanup_objs:
28228 drm_gem_object_unreference(&work->old_fb_obj->base);
28229 drm_gem_object_unreference(&obj->base);
28230 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
28231 index 54558a0..2d97005 100644
28232 --- a/drivers/gpu/drm/mga/mga_drv.h
28233 +++ b/drivers/gpu/drm/mga/mga_drv.h
28234 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28235 u32 clear_cmd;
28236 u32 maccess;
28237
28238 - atomic_t vbl_received; /**< Number of vblanks received. */
28239 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28240 wait_queue_head_t fence_queue;
28241 - atomic_t last_fence_retired;
28242 + atomic_unchecked_t last_fence_retired;
28243 u32 next_fence_to_post;
28244
28245 unsigned int fb_cpp;
28246 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
28247 index 2581202..f230a8d9 100644
28248 --- a/drivers/gpu/drm/mga/mga_irq.c
28249 +++ b/drivers/gpu/drm/mga/mga_irq.c
28250 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
28251 if (crtc != 0)
28252 return 0;
28253
28254 - return atomic_read(&dev_priv->vbl_received);
28255 + return atomic_read_unchecked(&dev_priv->vbl_received);
28256 }
28257
28258
28259 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28260 /* VBLANK interrupt */
28261 if (status & MGA_VLINEPEN) {
28262 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28263 - atomic_inc(&dev_priv->vbl_received);
28264 + atomic_inc_unchecked(&dev_priv->vbl_received);
28265 drm_handle_vblank(dev, 0);
28266 handled = 1;
28267 }
28268 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
28269 if ((prim_start & ~0x03) != (prim_end & ~0x03))
28270 MGA_WRITE(MGA_PRIMEND, prim_end);
28271
28272 - atomic_inc(&dev_priv->last_fence_retired);
28273 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
28274 DRM_WAKEUP(&dev_priv->fence_queue);
28275 handled = 1;
28276 }
28277 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
28278 * using fences.
28279 */
28280 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28281 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28282 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28283 - *sequence) <= (1 << 23)));
28284
28285 *sequence = cur_fence;
28286 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
28287 index b311fab..dc11d6a 100644
28288 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
28289 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
28290 @@ -201,7 +201,7 @@ struct methods {
28291 const char desc[8];
28292 void (*loadbios)(struct drm_device *, uint8_t *);
28293 const bool rw;
28294 -};
28295 +} __do_const;
28296
28297 static struct methods shadow_methods[] = {
28298 { "PRAMIN", load_vbios_pramin, true },
28299 @@ -5489,7 +5489,7 @@ parse_bit_displayport_tbl_entry(struct drm_device *dev, struct nvbios *bios,
28300 struct bit_table {
28301 const char id;
28302 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
28303 -};
28304 +} __no_const;
28305
28306 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
28307
28308 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
28309 index d7d51de..7c6a7f1 100644
28310 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
28311 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
28312 @@ -238,7 +238,7 @@ struct nouveau_channel {
28313 struct list_head pending;
28314 uint32_t sequence;
28315 uint32_t sequence_ack;
28316 - atomic_t last_sequence_irq;
28317 + atomic_unchecked_t last_sequence_irq;
28318 struct nouveau_vma vma;
28319 } fence;
28320
28321 @@ -319,7 +319,7 @@ struct nouveau_exec_engine {
28322 u32 handle, u16 class);
28323 void (*set_tile_region)(struct drm_device *dev, int i);
28324 void (*tlb_flush)(struct drm_device *, int engine);
28325 -};
28326 +} __no_const;
28327
28328 struct nouveau_instmem_engine {
28329 void *priv;
28330 @@ -341,13 +341,13 @@ struct nouveau_instmem_engine {
28331 struct nouveau_mc_engine {
28332 int (*init)(struct drm_device *dev);
28333 void (*takedown)(struct drm_device *dev);
28334 -};
28335 +} __no_const;
28336
28337 struct nouveau_timer_engine {
28338 int (*init)(struct drm_device *dev);
28339 void (*takedown)(struct drm_device *dev);
28340 uint64_t (*read)(struct drm_device *dev);
28341 -};
28342 +} __no_const;
28343
28344 struct nouveau_fb_engine {
28345 int num_tiles;
28346 @@ -513,7 +513,7 @@ struct nouveau_vram_engine {
28347 void (*put)(struct drm_device *, struct nouveau_mem **);
28348
28349 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
28350 -};
28351 +} __no_const;
28352
28353 struct nouveau_engine {
28354 struct nouveau_instmem_engine instmem;
28355 @@ -660,7 +660,7 @@ struct drm_nouveau_private {
28356 struct drm_global_reference mem_global_ref;
28357 struct ttm_bo_global_ref bo_global_ref;
28358 struct ttm_bo_device bdev;
28359 - atomic_t validate_sequence;
28360 + atomic_unchecked_t validate_sequence;
28361 } ttm;
28362
28363 struct {
28364 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
28365 index ae22dfa..4f09960 100644
28366 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
28367 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
28368 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
28369 if (USE_REFCNT(dev))
28370 sequence = nvchan_rd32(chan, 0x48);
28371 else
28372 - sequence = atomic_read(&chan->fence.last_sequence_irq);
28373 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
28374
28375 if (chan->fence.sequence_ack == sequence)
28376 goto out;
28377 @@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
28378 return ret;
28379 }
28380
28381 - atomic_set(&chan->fence.last_sequence_irq, 0);
28382 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
28383 return 0;
28384 }
28385
28386 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
28387 index 5f0bc57..eb9fac8 100644
28388 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
28389 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
28390 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
28391 int trycnt = 0;
28392 int ret, i;
28393
28394 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
28395 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
28396 retry:
28397 if (++trycnt > 100000) {
28398 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
28399 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
28400 index 10656e4..59bf2a4 100644
28401 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
28402 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
28403 @@ -496,7 +496,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
28404 bool can_switch;
28405
28406 spin_lock(&dev->count_lock);
28407 - can_switch = (dev->open_count == 0);
28408 + can_switch = (local_read(&dev->open_count) == 0);
28409 spin_unlock(&dev->count_lock);
28410 return can_switch;
28411 }
28412 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
28413 index dbdea8e..cd6eeeb 100644
28414 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
28415 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
28416 @@ -554,7 +554,7 @@ static int
28417 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
28418 u32 class, u32 mthd, u32 data)
28419 {
28420 - atomic_set(&chan->fence.last_sequence_irq, data);
28421 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
28422 return 0;
28423 }
28424
28425 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
28426 index 570e190..084a31a 100644
28427 --- a/drivers/gpu/drm/r128/r128_cce.c
28428 +++ b/drivers/gpu/drm/r128/r128_cce.c
28429 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
28430
28431 /* GH: Simple idle check.
28432 */
28433 - atomic_set(&dev_priv->idle_count, 0);
28434 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28435
28436 /* We don't support anything other than bus-mastering ring mode,
28437 * but the ring can be in either AGP or PCI space for the ring
28438 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
28439 index 930c71b..499aded 100644
28440 --- a/drivers/gpu/drm/r128/r128_drv.h
28441 +++ b/drivers/gpu/drm/r128/r128_drv.h
28442 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28443 int is_pci;
28444 unsigned long cce_buffers_offset;
28445
28446 - atomic_t idle_count;
28447 + atomic_unchecked_t idle_count;
28448
28449 int page_flipping;
28450 int current_page;
28451 u32 crtc_offset;
28452 u32 crtc_offset_cntl;
28453
28454 - atomic_t vbl_received;
28455 + atomic_unchecked_t vbl_received;
28456
28457 u32 color_fmt;
28458 unsigned int front_offset;
28459 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
28460 index 429d5a0..7e899ed 100644
28461 --- a/drivers/gpu/drm/r128/r128_irq.c
28462 +++ b/drivers/gpu/drm/r128/r128_irq.c
28463 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
28464 if (crtc != 0)
28465 return 0;
28466
28467 - return atomic_read(&dev_priv->vbl_received);
28468 + return atomic_read_unchecked(&dev_priv->vbl_received);
28469 }
28470
28471 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28472 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28473 /* VBLANK interrupt */
28474 if (status & R128_CRTC_VBLANK_INT) {
28475 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28476 - atomic_inc(&dev_priv->vbl_received);
28477 + atomic_inc_unchecked(&dev_priv->vbl_received);
28478 drm_handle_vblank(dev, 0);
28479 return IRQ_HANDLED;
28480 }
28481 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
28482 index a9e33ce..09edd4b 100644
28483 --- a/drivers/gpu/drm/r128/r128_state.c
28484 +++ b/drivers/gpu/drm/r128/r128_state.c
28485 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
28486
28487 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
28488 {
28489 - if (atomic_read(&dev_priv->idle_count) == 0)
28490 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
28491 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28492 else
28493 - atomic_set(&dev_priv->idle_count, 0);
28494 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28495 }
28496
28497 #endif
28498 diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
28499 index 14cc88a..cc7b3a5 100644
28500 --- a/drivers/gpu/drm/radeon/atom.c
28501 +++ b/drivers/gpu/drm/radeon/atom.c
28502 @@ -1254,6 +1254,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
28503 char name[512];
28504 int i;
28505
28506 + pax_track_stack();
28507 +
28508 if (!ctx)
28509 return NULL;
28510
28511 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
28512 index 5a82b6b..9e69c73 100644
28513 --- a/drivers/gpu/drm/radeon/mkregtable.c
28514 +++ b/drivers/gpu/drm/radeon/mkregtable.c
28515 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
28516 regex_t mask_rex;
28517 regmatch_t match[4];
28518 char buf[1024];
28519 - size_t end;
28520 + long end;
28521 int len;
28522 int done = 0;
28523 int r;
28524 unsigned o;
28525 struct offset *offset;
28526 char last_reg_s[10];
28527 - int last_reg;
28528 + unsigned long last_reg;
28529
28530 if (regcomp
28531 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28532 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
28533 index 184628c..30e1725 100644
28534 --- a/drivers/gpu/drm/radeon/radeon.h
28535 +++ b/drivers/gpu/drm/radeon/radeon.h
28536 @@ -192,7 +192,7 @@ extern int sumo_get_temp(struct radeon_device *rdev);
28537 */
28538 struct radeon_fence_driver {
28539 uint32_t scratch_reg;
28540 - atomic_t seq;
28541 + atomic_unchecked_t seq;
28542 uint32_t last_seq;
28543 unsigned long last_jiffies;
28544 unsigned long last_timeout;
28545 @@ -962,7 +962,7 @@ struct radeon_asic {
28546 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
28547 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
28548 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
28549 -};
28550 +} __no_const;
28551
28552 /*
28553 * Asic structures
28554 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
28555 index bf2b615..c821ec8 100644
28556 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
28557 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
28558 @@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
28559 struct radeon_gpio_rec gpio;
28560 struct radeon_hpd hpd;
28561
28562 + pax_track_stack();
28563 +
28564 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
28565 return false;
28566
28567 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
28568 index b51e157..8f14fb9 100644
28569 --- a/drivers/gpu/drm/radeon/radeon_device.c
28570 +++ b/drivers/gpu/drm/radeon/radeon_device.c
28571 @@ -684,7 +684,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
28572 bool can_switch;
28573
28574 spin_lock(&dev->count_lock);
28575 - can_switch = (dev->open_count == 0);
28576 + can_switch = (local_read(&dev->open_count) == 0);
28577 spin_unlock(&dev->count_lock);
28578 return can_switch;
28579 }
28580 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
28581 index 6adb3e5..b91553e2 100644
28582 --- a/drivers/gpu/drm/radeon/radeon_display.c
28583 +++ b/drivers/gpu/drm/radeon/radeon_display.c
28584 @@ -925,6 +925,8 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
28585 uint32_t post_div;
28586 u32 pll_out_min, pll_out_max;
28587
28588 + pax_track_stack();
28589 +
28590 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
28591 freq = freq * 1000;
28592
28593 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
28594 index a1b59ca..86f2d44 100644
28595 --- a/drivers/gpu/drm/radeon/radeon_drv.h
28596 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
28597 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
28598
28599 /* SW interrupt */
28600 wait_queue_head_t swi_queue;
28601 - atomic_t swi_emitted;
28602 + atomic_unchecked_t swi_emitted;
28603 int vblank_crtc;
28604 uint32_t irq_enable_reg;
28605 uint32_t r500_disp_irq_reg;
28606 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
28607 index 7fd4e3e..9748ab5 100644
28608 --- a/drivers/gpu/drm/radeon/radeon_fence.c
28609 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
28610 @@ -78,7 +78,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
28611 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28612 return 0;
28613 }
28614 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28615 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28616 if (!rdev->cp.ready)
28617 /* FIXME: cp is not running assume everythings is done right
28618 * away
28619 @@ -373,7 +373,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
28620 return r;
28621 }
28622 radeon_fence_write(rdev, 0);
28623 - atomic_set(&rdev->fence_drv.seq, 0);
28624 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28625 INIT_LIST_HEAD(&rdev->fence_drv.created);
28626 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28627 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28628 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
28629 index 48b7cea..342236f 100644
28630 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
28631 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
28632 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
28633 request = compat_alloc_user_space(sizeof(*request));
28634 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28635 || __put_user(req32.param, &request->param)
28636 - || __put_user((void __user *)(unsigned long)req32.value,
28637 + || __put_user((unsigned long)req32.value,
28638 &request->value))
28639 return -EFAULT;
28640
28641 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
28642 index 465746b..cb2b055 100644
28643 --- a/drivers/gpu/drm/radeon/radeon_irq.c
28644 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
28645 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
28646 unsigned int ret;
28647 RING_LOCALS;
28648
28649 - atomic_inc(&dev_priv->swi_emitted);
28650 - ret = atomic_read(&dev_priv->swi_emitted);
28651 + atomic_inc_unchecked(&dev_priv->swi_emitted);
28652 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28653
28654 BEGIN_RING(4);
28655 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28656 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
28657 drm_radeon_private_t *dev_priv =
28658 (drm_radeon_private_t *) dev->dev_private;
28659
28660 - atomic_set(&dev_priv->swi_emitted, 0);
28661 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28662 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28663
28664 dev->max_vblank_count = 0x001fffff;
28665 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
28666 index 92e7ea7..147ffad 100644
28667 --- a/drivers/gpu/drm/radeon/radeon_state.c
28668 +++ b/drivers/gpu/drm/radeon/radeon_state.c
28669 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
28670 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
28671 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
28672
28673 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28674 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
28675 sarea_priv->nbox * sizeof(depth_boxes[0])))
28676 return -EFAULT;
28677
28678 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
28679 {
28680 drm_radeon_private_t *dev_priv = dev->dev_private;
28681 drm_radeon_getparam_t *param = data;
28682 - int value;
28683 + int value = 0;
28684
28685 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28686
28687 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
28688 index 0b5468b..9c4b308 100644
28689 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
28690 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
28691 @@ -672,8 +672,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
28692 }
28693 if (unlikely(ttm_vm_ops == NULL)) {
28694 ttm_vm_ops = vma->vm_ops;
28695 - radeon_ttm_vm_ops = *ttm_vm_ops;
28696 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28697 + pax_open_kernel();
28698 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
28699 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28700 + pax_close_kernel();
28701 }
28702 vma->vm_ops = &radeon_ttm_vm_ops;
28703 return 0;
28704 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
28705 index a9049ed..501f284 100644
28706 --- a/drivers/gpu/drm/radeon/rs690.c
28707 +++ b/drivers/gpu/drm/radeon/rs690.c
28708 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
28709 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
28710 rdev->pm.sideport_bandwidth.full)
28711 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
28712 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
28713 + read_delay_latency.full = dfixed_const(800 * 1000);
28714 read_delay_latency.full = dfixed_div(read_delay_latency,
28715 rdev->pm.igp_sideport_mclk);
28716 + a.full = dfixed_const(370);
28717 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
28718 } else {
28719 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
28720 rdev->pm.k8_bandwidth.full)
28721 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28722 index 727e93d..1565650 100644
28723 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
28724 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
28725 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
28726 static int ttm_pool_mm_shrink(struct shrinker *shrink,
28727 struct shrink_control *sc)
28728 {
28729 - static atomic_t start_pool = ATOMIC_INIT(0);
28730 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
28731 unsigned i;
28732 - unsigned pool_offset = atomic_add_return(1, &start_pool);
28733 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
28734 struct ttm_page_pool *pool;
28735 int shrink_pages = sc->nr_to_scan;
28736
28737 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
28738 index 9cf87d9..2000b7d 100644
28739 --- a/drivers/gpu/drm/via/via_drv.h
28740 +++ b/drivers/gpu/drm/via/via_drv.h
28741 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
28742 typedef uint32_t maskarray_t[5];
28743
28744 typedef struct drm_via_irq {
28745 - atomic_t irq_received;
28746 + atomic_unchecked_t irq_received;
28747 uint32_t pending_mask;
28748 uint32_t enable_mask;
28749 wait_queue_head_t irq_queue;
28750 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
28751 struct timeval last_vblank;
28752 int last_vblank_valid;
28753 unsigned usec_per_vblank;
28754 - atomic_t vbl_received;
28755 + atomic_unchecked_t vbl_received;
28756 drm_via_state_t hc_state;
28757 char pci_buf[VIA_PCI_BUF_SIZE];
28758 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
28759 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
28760 index d391f48..10c8ca3 100644
28761 --- a/drivers/gpu/drm/via/via_irq.c
28762 +++ b/drivers/gpu/drm/via/via_irq.c
28763 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
28764 if (crtc != 0)
28765 return 0;
28766
28767 - return atomic_read(&dev_priv->vbl_received);
28768 + return atomic_read_unchecked(&dev_priv->vbl_received);
28769 }
28770
28771 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28772 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28773
28774 status = VIA_READ(VIA_REG_INTERRUPT);
28775 if (status & VIA_IRQ_VBLANK_PENDING) {
28776 - atomic_inc(&dev_priv->vbl_received);
28777 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
28778 + atomic_inc_unchecked(&dev_priv->vbl_received);
28779 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
28780 do_gettimeofday(&cur_vblank);
28781 if (dev_priv->last_vblank_valid) {
28782 dev_priv->usec_per_vblank =
28783 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28784 dev_priv->last_vblank = cur_vblank;
28785 dev_priv->last_vblank_valid = 1;
28786 }
28787 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
28788 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
28789 DRM_DEBUG("US per vblank is: %u\n",
28790 dev_priv->usec_per_vblank);
28791 }
28792 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28793
28794 for (i = 0; i < dev_priv->num_irqs; ++i) {
28795 if (status & cur_irq->pending_mask) {
28796 - atomic_inc(&cur_irq->irq_received);
28797 + atomic_inc_unchecked(&cur_irq->irq_received);
28798 DRM_WAKEUP(&cur_irq->irq_queue);
28799 handled = 1;
28800 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
28801 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
28802 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28803 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
28804 masks[irq][4]));
28805 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
28806 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
28807 } else {
28808 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28809 (((cur_irq_sequence =
28810 - atomic_read(&cur_irq->irq_received)) -
28811 + atomic_read_unchecked(&cur_irq->irq_received)) -
28812 *sequence) <= (1 << 23)));
28813 }
28814 *sequence = cur_irq_sequence;
28815 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
28816 }
28817
28818 for (i = 0; i < dev_priv->num_irqs; ++i) {
28819 - atomic_set(&cur_irq->irq_received, 0);
28820 + atomic_set_unchecked(&cur_irq->irq_received, 0);
28821 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
28822 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
28823 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
28824 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
28825 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
28826 case VIA_IRQ_RELATIVE:
28827 irqwait->request.sequence +=
28828 - atomic_read(&cur_irq->irq_received);
28829 + atomic_read_unchecked(&cur_irq->irq_received);
28830 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
28831 case VIA_IRQ_ABSOLUTE:
28832 break;
28833 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28834 index 10fc01f..b4e9822 100644
28835 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28836 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
28837 @@ -240,7 +240,7 @@ struct vmw_private {
28838 * Fencing and IRQs.
28839 */
28840
28841 - atomic_t fence_seq;
28842 + atomic_unchecked_t fence_seq;
28843 wait_queue_head_t fence_queue;
28844 wait_queue_head_t fifo_queue;
28845 atomic_t fence_queue_waiters;
28846 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
28847 index 41b95ed..69ea504 100644
28848 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
28849 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
28850 @@ -610,7 +610,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
28851 struct drm_vmw_fence_rep fence_rep;
28852 struct drm_vmw_fence_rep __user *user_fence_rep;
28853 int ret;
28854 - void *user_cmd;
28855 + void __user *user_cmd;
28856 void *cmd;
28857 uint32_t sequence;
28858 struct vmw_sw_context *sw_context = &dev_priv->ctx;
28859 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
28860 index 61eacc1..ee38ce8 100644
28861 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
28862 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
28863 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
28864 while (!vmw_lag_lt(queue, us)) {
28865 spin_lock(&queue->lock);
28866 if (list_empty(&queue->head))
28867 - sequence = atomic_read(&dev_priv->fence_seq);
28868 + sequence = atomic_read_unchecked(&dev_priv->fence_seq);
28869 else {
28870 fence = list_first_entry(&queue->head,
28871 struct vmw_fence, head);
28872 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28873 index 635c0ff..2641bbb 100644
28874 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28875 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
28876 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
28877 (unsigned int) min,
28878 (unsigned int) fifo->capabilities);
28879
28880 - atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
28881 + atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
28882 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
28883 vmw_fence_queue_init(&fifo->fence_queue);
28884 return vmw_fifo_send_fence(dev_priv, &dummy);
28885 @@ -356,7 +356,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
28886 if (reserveable)
28887 iowrite32(bytes, fifo_mem +
28888 SVGA_FIFO_RESERVED);
28889 - return fifo_mem + (next_cmd >> 2);
28890 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
28891 } else {
28892 need_bounce = true;
28893 }
28894 @@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
28895
28896 fm = vmw_fifo_reserve(dev_priv, bytes);
28897 if (unlikely(fm == NULL)) {
28898 - *sequence = atomic_read(&dev_priv->fence_seq);
28899 + *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
28900 ret = -ENOMEM;
28901 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
28902 false, 3*HZ);
28903 @@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
28904 }
28905
28906 do {
28907 - *sequence = atomic_add_return(1, &dev_priv->fence_seq);
28908 + *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
28909 } while (*sequence == 0);
28910
28911 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
28912 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
28913 index e92298a..f68f2d6 100644
28914 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
28915 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
28916 @@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
28917 * emitted. Then the fence is stale and signaled.
28918 */
28919
28920 - ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
28921 + ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
28922 > VMW_FENCE_WRAP);
28923
28924 return ret;
28925 @@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
28926
28927 if (fifo_idle)
28928 down_read(&fifo_state->rwsem);
28929 - signal_seq = atomic_read(&dev_priv->fence_seq);
28930 + signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
28931 ret = 0;
28932
28933 for (;;) {
28934 diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
28935 index c72f1c0..18376f1 100644
28936 --- a/drivers/gpu/vga/vgaarb.c
28937 +++ b/drivers/gpu/vga/vgaarb.c
28938 @@ -993,14 +993,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
28939 uc = &priv->cards[i];
28940 }
28941
28942 - if (!uc)
28943 - return -EINVAL;
28944 + if (!uc) {
28945 + ret_val = -EINVAL;
28946 + goto done;
28947 + }
28948
28949 - if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
28950 - return -EINVAL;
28951 + if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
28952 + ret_val = -EINVAL;
28953 + goto done;
28954 + }
28955
28956 - if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
28957 - return -EINVAL;
28958 + if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
28959 + ret_val = -EINVAL;
28960 + goto done;
28961 + }
28962
28963 vga_put(pdev, io_state);
28964
28965 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
28966 index 5be9f47..aa81d42 100644
28967 --- a/drivers/hid/hid-core.c
28968 +++ b/drivers/hid/hid-core.c
28969 @@ -1951,7 +1951,7 @@ static bool hid_ignore(struct hid_device *hdev)
28970
28971 int hid_add_device(struct hid_device *hdev)
28972 {
28973 - static atomic_t id = ATOMIC_INIT(0);
28974 + static atomic_unchecked_t id = ATOMIC_INIT(0);
28975 int ret;
28976
28977 if (WARN_ON(hdev->status & HID_STAT_ADDED))
28978 @@ -1966,7 +1966,7 @@ int hid_add_device(struct hid_device *hdev)
28979 /* XXX hack, any other cleaner solution after the driver core
28980 * is converted to allow more than 20 bytes as the device name? */
28981 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
28982 - hdev->vendor, hdev->product, atomic_inc_return(&id));
28983 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
28984
28985 hid_debug_register(hdev, dev_name(&hdev->dev));
28986 ret = device_add(&hdev->dev);
28987 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
28988 index 7c1188b..5a64357 100644
28989 --- a/drivers/hid/usbhid/hiddev.c
28990 +++ b/drivers/hid/usbhid/hiddev.c
28991 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
28992 break;
28993
28994 case HIDIOCAPPLICATION:
28995 - if (arg < 0 || arg >= hid->maxapplication)
28996 + if (arg >= hid->maxapplication)
28997 break;
28998
28999 for (i = 0; i < hid->maxcollection; i++)
29000 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
29001 index 66f6729..2d6de0a 100644
29002 --- a/drivers/hwmon/acpi_power_meter.c
29003 +++ b/drivers/hwmon/acpi_power_meter.c
29004 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
29005 return res;
29006
29007 temp /= 1000;
29008 - if (temp < 0)
29009 - return -EINVAL;
29010
29011 mutex_lock(&resource->lock);
29012 resource->trip[attr->index - 7] = temp;
29013 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
29014 index fe4104c..346febb 100644
29015 --- a/drivers/hwmon/sht15.c
29016 +++ b/drivers/hwmon/sht15.c
29017 @@ -166,7 +166,7 @@ struct sht15_data {
29018 int supply_uV;
29019 bool supply_uV_valid;
29020 struct work_struct update_supply_work;
29021 - atomic_t interrupt_handled;
29022 + atomic_unchecked_t interrupt_handled;
29023 };
29024
29025 /**
29026 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
29027 return ret;
29028
29029 gpio_direction_input(data->pdata->gpio_data);
29030 - atomic_set(&data->interrupt_handled, 0);
29031 + atomic_set_unchecked(&data->interrupt_handled, 0);
29032
29033 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29034 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29035 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29036 /* Only relevant if the interrupt hasn't occurred. */
29037 - if (!atomic_read(&data->interrupt_handled))
29038 + if (!atomic_read_unchecked(&data->interrupt_handled))
29039 schedule_work(&data->read_work);
29040 }
29041 ret = wait_event_timeout(data->wait_queue,
29042 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
29043
29044 /* First disable the interrupt */
29045 disable_irq_nosync(irq);
29046 - atomic_inc(&data->interrupt_handled);
29047 + atomic_inc_unchecked(&data->interrupt_handled);
29048 /* Then schedule a reading work struct */
29049 if (data->state != SHT15_READING_NOTHING)
29050 schedule_work(&data->read_work);
29051 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
29052 * If not, then start the interrupt again - care here as could
29053 * have gone low in meantime so verify it hasn't!
29054 */
29055 - atomic_set(&data->interrupt_handled, 0);
29056 + atomic_set_unchecked(&data->interrupt_handled, 0);
29057 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29058 /* If still not occurred or another handler has been scheduled */
29059 if (gpio_get_value(data->pdata->gpio_data)
29060 - || atomic_read(&data->interrupt_handled))
29061 + || atomic_read_unchecked(&data->interrupt_handled))
29062 return;
29063 }
29064
29065 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
29066 index 378fcb5..5e91fa8 100644
29067 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
29068 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
29069 @@ -43,7 +43,7 @@
29070 extern struct i2c_adapter amd756_smbus;
29071
29072 static struct i2c_adapter *s4882_adapter;
29073 -static struct i2c_algorithm *s4882_algo;
29074 +static i2c_algorithm_no_const *s4882_algo;
29075
29076 /* Wrapper access functions for multiplexed SMBus */
29077 static DEFINE_MUTEX(amd756_lock);
29078 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
29079 index 29015eb..af2d8e9 100644
29080 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
29081 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
29082 @@ -41,7 +41,7 @@
29083 extern struct i2c_adapter *nforce2_smbus;
29084
29085 static struct i2c_adapter *s4985_adapter;
29086 -static struct i2c_algorithm *s4985_algo;
29087 +static i2c_algorithm_no_const *s4985_algo;
29088
29089 /* Wrapper access functions for multiplexed SMBus */
29090 static DEFINE_MUTEX(nforce2_lock);
29091 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
29092 index d7a4833..7fae376 100644
29093 --- a/drivers/i2c/i2c-mux.c
29094 +++ b/drivers/i2c/i2c-mux.c
29095 @@ -28,7 +28,7 @@
29096 /* multiplexer per channel data */
29097 struct i2c_mux_priv {
29098 struct i2c_adapter adap;
29099 - struct i2c_algorithm algo;
29100 + i2c_algorithm_no_const algo;
29101
29102 struct i2c_adapter *parent;
29103 void *mux_dev; /* the mux chip/device */
29104 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
29105 index 57d00ca..0145194 100644
29106 --- a/drivers/ide/aec62xx.c
29107 +++ b/drivers/ide/aec62xx.c
29108 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
29109 .cable_detect = atp86x_cable_detect,
29110 };
29111
29112 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
29113 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
29114 { /* 0: AEC6210 */
29115 .name = DRV_NAME,
29116 .init_chipset = init_chipset_aec62xx,
29117 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
29118 index 2c8016a..911a27c 100644
29119 --- a/drivers/ide/alim15x3.c
29120 +++ b/drivers/ide/alim15x3.c
29121 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
29122 .dma_sff_read_status = ide_dma_sff_read_status,
29123 };
29124
29125 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
29126 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
29127 .name = DRV_NAME,
29128 .init_chipset = init_chipset_ali15x3,
29129 .init_hwif = init_hwif_ali15x3,
29130 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
29131 index 3747b25..56fc995 100644
29132 --- a/drivers/ide/amd74xx.c
29133 +++ b/drivers/ide/amd74xx.c
29134 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
29135 .udma_mask = udma, \
29136 }
29137
29138 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
29139 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
29140 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
29141 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
29142 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
29143 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
29144 index 15f0ead..cb43480 100644
29145 --- a/drivers/ide/atiixp.c
29146 +++ b/drivers/ide/atiixp.c
29147 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
29148 .cable_detect = atiixp_cable_detect,
29149 };
29150
29151 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
29152 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
29153 { /* 0: IXP200/300/400/700 */
29154 .name = DRV_NAME,
29155 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
29156 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
29157 index 5f80312..d1fc438 100644
29158 --- a/drivers/ide/cmd64x.c
29159 +++ b/drivers/ide/cmd64x.c
29160 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
29161 .dma_sff_read_status = ide_dma_sff_read_status,
29162 };
29163
29164 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
29165 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
29166 { /* 0: CMD643 */
29167 .name = DRV_NAME,
29168 .init_chipset = init_chipset_cmd64x,
29169 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
29170 index 2c1e5f7..1444762 100644
29171 --- a/drivers/ide/cs5520.c
29172 +++ b/drivers/ide/cs5520.c
29173 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
29174 .set_dma_mode = cs5520_set_dma_mode,
29175 };
29176
29177 -static const struct ide_port_info cyrix_chipset __devinitdata = {
29178 +static const struct ide_port_info cyrix_chipset __devinitconst = {
29179 .name = DRV_NAME,
29180 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
29181 .port_ops = &cs5520_port_ops,
29182 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
29183 index 4dc4eb9..49b40ad 100644
29184 --- a/drivers/ide/cs5530.c
29185 +++ b/drivers/ide/cs5530.c
29186 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
29187 .udma_filter = cs5530_udma_filter,
29188 };
29189
29190 -static const struct ide_port_info cs5530_chipset __devinitdata = {
29191 +static const struct ide_port_info cs5530_chipset __devinitconst = {
29192 .name = DRV_NAME,
29193 .init_chipset = init_chipset_cs5530,
29194 .init_hwif = init_hwif_cs5530,
29195 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
29196 index 5059faf..18d4c85 100644
29197 --- a/drivers/ide/cs5535.c
29198 +++ b/drivers/ide/cs5535.c
29199 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
29200 .cable_detect = cs5535_cable_detect,
29201 };
29202
29203 -static const struct ide_port_info cs5535_chipset __devinitdata = {
29204 +static const struct ide_port_info cs5535_chipset __devinitconst = {
29205 .name = DRV_NAME,
29206 .port_ops = &cs5535_port_ops,
29207 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
29208 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
29209 index 67cbcfa..37ea151 100644
29210 --- a/drivers/ide/cy82c693.c
29211 +++ b/drivers/ide/cy82c693.c
29212 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
29213 .set_dma_mode = cy82c693_set_dma_mode,
29214 };
29215
29216 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
29217 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
29218 .name = DRV_NAME,
29219 .init_iops = init_iops_cy82c693,
29220 .port_ops = &cy82c693_port_ops,
29221 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
29222 index 58c51cd..4aec3b8 100644
29223 --- a/drivers/ide/hpt366.c
29224 +++ b/drivers/ide/hpt366.c
29225 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
29226 }
29227 };
29228
29229 -static const struct hpt_info hpt36x __devinitdata = {
29230 +static const struct hpt_info hpt36x __devinitconst = {
29231 .chip_name = "HPT36x",
29232 .chip_type = HPT36x,
29233 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
29234 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
29235 .timings = &hpt36x_timings
29236 };
29237
29238 -static const struct hpt_info hpt370 __devinitdata = {
29239 +static const struct hpt_info hpt370 __devinitconst = {
29240 .chip_name = "HPT370",
29241 .chip_type = HPT370,
29242 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29243 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
29244 .timings = &hpt37x_timings
29245 };
29246
29247 -static const struct hpt_info hpt370a __devinitdata = {
29248 +static const struct hpt_info hpt370a __devinitconst = {
29249 .chip_name = "HPT370A",
29250 .chip_type = HPT370A,
29251 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
29252 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
29253 .timings = &hpt37x_timings
29254 };
29255
29256 -static const struct hpt_info hpt374 __devinitdata = {
29257 +static const struct hpt_info hpt374 __devinitconst = {
29258 .chip_name = "HPT374",
29259 .chip_type = HPT374,
29260 .udma_mask = ATA_UDMA5,
29261 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
29262 .timings = &hpt37x_timings
29263 };
29264
29265 -static const struct hpt_info hpt372 __devinitdata = {
29266 +static const struct hpt_info hpt372 __devinitconst = {
29267 .chip_name = "HPT372",
29268 .chip_type = HPT372,
29269 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29270 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
29271 .timings = &hpt37x_timings
29272 };
29273
29274 -static const struct hpt_info hpt372a __devinitdata = {
29275 +static const struct hpt_info hpt372a __devinitconst = {
29276 .chip_name = "HPT372A",
29277 .chip_type = HPT372A,
29278 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29279 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
29280 .timings = &hpt37x_timings
29281 };
29282
29283 -static const struct hpt_info hpt302 __devinitdata = {
29284 +static const struct hpt_info hpt302 __devinitconst = {
29285 .chip_name = "HPT302",
29286 .chip_type = HPT302,
29287 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29288 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
29289 .timings = &hpt37x_timings
29290 };
29291
29292 -static const struct hpt_info hpt371 __devinitdata = {
29293 +static const struct hpt_info hpt371 __devinitconst = {
29294 .chip_name = "HPT371",
29295 .chip_type = HPT371,
29296 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29297 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
29298 .timings = &hpt37x_timings
29299 };
29300
29301 -static const struct hpt_info hpt372n __devinitdata = {
29302 +static const struct hpt_info hpt372n __devinitconst = {
29303 .chip_name = "HPT372N",
29304 .chip_type = HPT372N,
29305 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29306 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
29307 .timings = &hpt37x_timings
29308 };
29309
29310 -static const struct hpt_info hpt302n __devinitdata = {
29311 +static const struct hpt_info hpt302n __devinitconst = {
29312 .chip_name = "HPT302N",
29313 .chip_type = HPT302N,
29314 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29315 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
29316 .timings = &hpt37x_timings
29317 };
29318
29319 -static const struct hpt_info hpt371n __devinitdata = {
29320 +static const struct hpt_info hpt371n __devinitconst = {
29321 .chip_name = "HPT371N",
29322 .chip_type = HPT371N,
29323 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
29324 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
29325 .dma_sff_read_status = ide_dma_sff_read_status,
29326 };
29327
29328 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
29329 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
29330 { /* 0: HPT36x */
29331 .name = DRV_NAME,
29332 .init_chipset = init_chipset_hpt366,
29333 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
29334 index 04b0956..f5b47dc 100644
29335 --- a/drivers/ide/ide-cd.c
29336 +++ b/drivers/ide/ide-cd.c
29337 @@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
29338 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29339 if ((unsigned long)buf & alignment
29340 || blk_rq_bytes(rq) & q->dma_pad_mask
29341 - || object_is_on_stack(buf))
29342 + || object_starts_on_stack(buf))
29343 drive->dma = 0;
29344 }
29345 }
29346 diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
29347 index 61fdf54..2834ea6 100644
29348 --- a/drivers/ide/ide-floppy.c
29349 +++ b/drivers/ide/ide-floppy.c
29350 @@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
29351 u8 pc_buf[256], header_len, desc_cnt;
29352 int i, rc = 1, blocks, length;
29353
29354 + pax_track_stack();
29355 +
29356 ide_debug_log(IDE_DBG_FUNC, "enter");
29357
29358 drive->bios_cyl = 0;
29359 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
29360 index a743e68..1cfd674 100644
29361 --- a/drivers/ide/ide-pci-generic.c
29362 +++ b/drivers/ide/ide-pci-generic.c
29363 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
29364 .udma_mask = ATA_UDMA6, \
29365 }
29366
29367 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
29368 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
29369 /* 0: Unknown */
29370 DECLARE_GENERIC_PCI_DEV(0),
29371
29372 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
29373 index 560e66d..d5dd180 100644
29374 --- a/drivers/ide/it8172.c
29375 +++ b/drivers/ide/it8172.c
29376 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
29377 .set_dma_mode = it8172_set_dma_mode,
29378 };
29379
29380 -static const struct ide_port_info it8172_port_info __devinitdata = {
29381 +static const struct ide_port_info it8172_port_info __devinitconst = {
29382 .name = DRV_NAME,
29383 .port_ops = &it8172_port_ops,
29384 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
29385 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
29386 index 46816ba..1847aeb 100644
29387 --- a/drivers/ide/it8213.c
29388 +++ b/drivers/ide/it8213.c
29389 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
29390 .cable_detect = it8213_cable_detect,
29391 };
29392
29393 -static const struct ide_port_info it8213_chipset __devinitdata = {
29394 +static const struct ide_port_info it8213_chipset __devinitconst = {
29395 .name = DRV_NAME,
29396 .enablebits = { {0x41, 0x80, 0x80} },
29397 .port_ops = &it8213_port_ops,
29398 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
29399 index 2e3169f..c5611db 100644
29400 --- a/drivers/ide/it821x.c
29401 +++ b/drivers/ide/it821x.c
29402 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
29403 .cable_detect = it821x_cable_detect,
29404 };
29405
29406 -static const struct ide_port_info it821x_chipset __devinitdata = {
29407 +static const struct ide_port_info it821x_chipset __devinitconst = {
29408 .name = DRV_NAME,
29409 .init_chipset = init_chipset_it821x,
29410 .init_hwif = init_hwif_it821x,
29411 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
29412 index 74c2c4a..efddd7d 100644
29413 --- a/drivers/ide/jmicron.c
29414 +++ b/drivers/ide/jmicron.c
29415 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
29416 .cable_detect = jmicron_cable_detect,
29417 };
29418
29419 -static const struct ide_port_info jmicron_chipset __devinitdata = {
29420 +static const struct ide_port_info jmicron_chipset __devinitconst = {
29421 .name = DRV_NAME,
29422 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
29423 .port_ops = &jmicron_port_ops,
29424 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
29425 index 95327a2..73f78d8 100644
29426 --- a/drivers/ide/ns87415.c
29427 +++ b/drivers/ide/ns87415.c
29428 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
29429 .dma_sff_read_status = superio_dma_sff_read_status,
29430 };
29431
29432 -static const struct ide_port_info ns87415_chipset __devinitdata = {
29433 +static const struct ide_port_info ns87415_chipset __devinitconst = {
29434 .name = DRV_NAME,
29435 .init_hwif = init_hwif_ns87415,
29436 .tp_ops = &ns87415_tp_ops,
29437 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
29438 index 1a53a4c..39edc66 100644
29439 --- a/drivers/ide/opti621.c
29440 +++ b/drivers/ide/opti621.c
29441 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
29442 .set_pio_mode = opti621_set_pio_mode,
29443 };
29444
29445 -static const struct ide_port_info opti621_chipset __devinitdata = {
29446 +static const struct ide_port_info opti621_chipset __devinitconst = {
29447 .name = DRV_NAME,
29448 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
29449 .port_ops = &opti621_port_ops,
29450 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
29451 index 9546fe2..2e5ceb6 100644
29452 --- a/drivers/ide/pdc202xx_new.c
29453 +++ b/drivers/ide/pdc202xx_new.c
29454 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
29455 .udma_mask = udma, \
29456 }
29457
29458 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
29459 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
29460 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
29461 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
29462 };
29463 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
29464 index 3a35ec6..5634510 100644
29465 --- a/drivers/ide/pdc202xx_old.c
29466 +++ b/drivers/ide/pdc202xx_old.c
29467 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
29468 .max_sectors = sectors, \
29469 }
29470
29471 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
29472 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
29473 { /* 0: PDC20246 */
29474 .name = DRV_NAME,
29475 .init_chipset = init_chipset_pdc202xx,
29476 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
29477 index b59d04c..368c2a7 100644
29478 --- a/drivers/ide/piix.c
29479 +++ b/drivers/ide/piix.c
29480 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
29481 .udma_mask = udma, \
29482 }
29483
29484 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
29485 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
29486 /* 0: MPIIX */
29487 { /*
29488 * MPIIX actually has only a single IDE channel mapped to
29489 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
29490 index a6414a8..c04173e 100644
29491 --- a/drivers/ide/rz1000.c
29492 +++ b/drivers/ide/rz1000.c
29493 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
29494 }
29495 }
29496
29497 -static const struct ide_port_info rz1000_chipset __devinitdata = {
29498 +static const struct ide_port_info rz1000_chipset __devinitconst = {
29499 .name = DRV_NAME,
29500 .host_flags = IDE_HFLAG_NO_DMA,
29501 };
29502 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
29503 index 356b9b5..d4758eb 100644
29504 --- a/drivers/ide/sc1200.c
29505 +++ b/drivers/ide/sc1200.c
29506 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
29507 .dma_sff_read_status = ide_dma_sff_read_status,
29508 };
29509
29510 -static const struct ide_port_info sc1200_chipset __devinitdata = {
29511 +static const struct ide_port_info sc1200_chipset __devinitconst = {
29512 .name = DRV_NAME,
29513 .port_ops = &sc1200_port_ops,
29514 .dma_ops = &sc1200_dma_ops,
29515 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
29516 index b7f5b0c..9701038 100644
29517 --- a/drivers/ide/scc_pata.c
29518 +++ b/drivers/ide/scc_pata.c
29519 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
29520 .dma_sff_read_status = scc_dma_sff_read_status,
29521 };
29522
29523 -static const struct ide_port_info scc_chipset __devinitdata = {
29524 +static const struct ide_port_info scc_chipset __devinitconst = {
29525 .name = "sccIDE",
29526 .init_iops = init_iops_scc,
29527 .init_dma = scc_init_dma,
29528 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
29529 index 35fb8da..24d72ef 100644
29530 --- a/drivers/ide/serverworks.c
29531 +++ b/drivers/ide/serverworks.c
29532 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
29533 .cable_detect = svwks_cable_detect,
29534 };
29535
29536 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
29537 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
29538 { /* 0: OSB4 */
29539 .name = DRV_NAME,
29540 .init_chipset = init_chipset_svwks,
29541 diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
29542 index ab3db61..afed580 100644
29543 --- a/drivers/ide/setup-pci.c
29544 +++ b/drivers/ide/setup-pci.c
29545 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
29546 int ret, i, n_ports = dev2 ? 4 : 2;
29547 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
29548
29549 + pax_track_stack();
29550 +
29551 for (i = 0; i < n_ports / 2; i++) {
29552 ret = ide_setup_pci_controller(pdev[i], d, !i);
29553 if (ret < 0)
29554 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
29555 index ddeda44..46f7e30 100644
29556 --- a/drivers/ide/siimage.c
29557 +++ b/drivers/ide/siimage.c
29558 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
29559 .udma_mask = ATA_UDMA6, \
29560 }
29561
29562 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
29563 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
29564 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
29565 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
29566 };
29567 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
29568 index 4a00225..09e61b4 100644
29569 --- a/drivers/ide/sis5513.c
29570 +++ b/drivers/ide/sis5513.c
29571 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
29572 .cable_detect = sis_cable_detect,
29573 };
29574
29575 -static const struct ide_port_info sis5513_chipset __devinitdata = {
29576 +static const struct ide_port_info sis5513_chipset __devinitconst = {
29577 .name = DRV_NAME,
29578 .init_chipset = init_chipset_sis5513,
29579 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
29580 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
29581 index f21dc2a..d051cd2 100644
29582 --- a/drivers/ide/sl82c105.c
29583 +++ b/drivers/ide/sl82c105.c
29584 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
29585 .dma_sff_read_status = ide_dma_sff_read_status,
29586 };
29587
29588 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
29589 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
29590 .name = DRV_NAME,
29591 .init_chipset = init_chipset_sl82c105,
29592 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
29593 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
29594 index 864ffe0..863a5e9 100644
29595 --- a/drivers/ide/slc90e66.c
29596 +++ b/drivers/ide/slc90e66.c
29597 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
29598 .cable_detect = slc90e66_cable_detect,
29599 };
29600
29601 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
29602 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
29603 .name = DRV_NAME,
29604 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
29605 .port_ops = &slc90e66_port_ops,
29606 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
29607 index e444d24..ba577de 100644
29608 --- a/drivers/ide/tc86c001.c
29609 +++ b/drivers/ide/tc86c001.c
29610 @@ -191,7 +191,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
29611 .dma_sff_read_status = ide_dma_sff_read_status,
29612 };
29613
29614 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
29615 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
29616 .name = DRV_NAME,
29617 .init_hwif = init_hwif_tc86c001,
29618 .port_ops = &tc86c001_port_ops,
29619 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
29620 index e53a1b7..d11aff7 100644
29621 --- a/drivers/ide/triflex.c
29622 +++ b/drivers/ide/triflex.c
29623 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
29624 .set_dma_mode = triflex_set_mode,
29625 };
29626
29627 -static const struct ide_port_info triflex_device __devinitdata = {
29628 +static const struct ide_port_info triflex_device __devinitconst = {
29629 .name = DRV_NAME,
29630 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
29631 .port_ops = &triflex_port_ops,
29632 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
29633 index 4b42ca0..e494a98 100644
29634 --- a/drivers/ide/trm290.c
29635 +++ b/drivers/ide/trm290.c
29636 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
29637 .dma_check = trm290_dma_check,
29638 };
29639
29640 -static const struct ide_port_info trm290_chipset __devinitdata = {
29641 +static const struct ide_port_info trm290_chipset __devinitconst = {
29642 .name = DRV_NAME,
29643 .init_hwif = init_hwif_trm290,
29644 .tp_ops = &trm290_tp_ops,
29645 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
29646 index f46f49c..eb77678 100644
29647 --- a/drivers/ide/via82cxxx.c
29648 +++ b/drivers/ide/via82cxxx.c
29649 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
29650 .cable_detect = via82cxxx_cable_detect,
29651 };
29652
29653 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
29654 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
29655 .name = DRV_NAME,
29656 .init_chipset = init_chipset_via82cxxx,
29657 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
29658 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
29659 index fc0f2bd..ac2f8a5 100644
29660 --- a/drivers/infiniband/core/cm.c
29661 +++ b/drivers/infiniband/core/cm.c
29662 @@ -113,7 +113,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
29663
29664 struct cm_counter_group {
29665 struct kobject obj;
29666 - atomic_long_t counter[CM_ATTR_COUNT];
29667 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29668 };
29669
29670 struct cm_counter_attribute {
29671 @@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm_work *work,
29672 struct ib_mad_send_buf *msg = NULL;
29673 int ret;
29674
29675 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29676 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29677 counter[CM_REQ_COUNTER]);
29678
29679 /* Quick state check to discard duplicate REQs. */
29680 @@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
29681 if (!cm_id_priv)
29682 return;
29683
29684 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29685 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29686 counter[CM_REP_COUNTER]);
29687 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29688 if (ret)
29689 @@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work *work)
29690 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29691 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29692 spin_unlock_irq(&cm_id_priv->lock);
29693 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29694 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29695 counter[CM_RTU_COUNTER]);
29696 goto out;
29697 }
29698 @@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_work *work)
29699 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29700 dreq_msg->local_comm_id);
29701 if (!cm_id_priv) {
29702 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29703 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29704 counter[CM_DREQ_COUNTER]);
29705 cm_issue_drep(work->port, work->mad_recv_wc);
29706 return -EINVAL;
29707 @@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_work *work)
29708 case IB_CM_MRA_REP_RCVD:
29709 break;
29710 case IB_CM_TIMEWAIT:
29711 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29712 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29713 counter[CM_DREQ_COUNTER]);
29714 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29715 goto unlock;
29716 @@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_work *work)
29717 cm_free_msg(msg);
29718 goto deref;
29719 case IB_CM_DREQ_RCVD:
29720 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29721 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29722 counter[CM_DREQ_COUNTER]);
29723 goto unlock;
29724 default:
29725 @@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work *work)
29726 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29727 cm_id_priv->msg, timeout)) {
29728 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29729 - atomic_long_inc(&work->port->
29730 + atomic_long_inc_unchecked(&work->port->
29731 counter_group[CM_RECV_DUPLICATES].
29732 counter[CM_MRA_COUNTER]);
29733 goto out;
29734 @@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work *work)
29735 break;
29736 case IB_CM_MRA_REQ_RCVD:
29737 case IB_CM_MRA_REP_RCVD:
29738 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29739 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29740 counter[CM_MRA_COUNTER]);
29741 /* fall through */
29742 default:
29743 @@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work *work)
29744 case IB_CM_LAP_IDLE:
29745 break;
29746 case IB_CM_MRA_LAP_SENT:
29747 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29748 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29749 counter[CM_LAP_COUNTER]);
29750 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29751 goto unlock;
29752 @@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work *work)
29753 cm_free_msg(msg);
29754 goto deref;
29755 case IB_CM_LAP_RCVD:
29756 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29757 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29758 counter[CM_LAP_COUNTER]);
29759 goto unlock;
29760 default:
29761 @@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
29762 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29763 if (cur_cm_id_priv) {
29764 spin_unlock_irq(&cm.lock);
29765 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29766 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29767 counter[CM_SIDR_REQ_COUNTER]);
29768 goto out; /* Duplicate message. */
29769 }
29770 @@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
29771 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29772 msg->retries = 1;
29773
29774 - atomic_long_add(1 + msg->retries,
29775 + atomic_long_add_unchecked(1 + msg->retries,
29776 &port->counter_group[CM_XMIT].counter[attr_index]);
29777 if (msg->retries)
29778 - atomic_long_add(msg->retries,
29779 + atomic_long_add_unchecked(msg->retries,
29780 &port->counter_group[CM_XMIT_RETRIES].
29781 counter[attr_index]);
29782
29783 @@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
29784 }
29785
29786 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29787 - atomic_long_inc(&port->counter_group[CM_RECV].
29788 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29789 counter[attr_id - CM_ATTR_ID_OFFSET]);
29790
29791 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29792 @@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
29793 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29794
29795 return sprintf(buf, "%ld\n",
29796 - atomic_long_read(&group->counter[cm_attr->index]));
29797 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29798 }
29799
29800 static const struct sysfs_ops cm_counter_ops = {
29801 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
29802 index 4507043..14ad522 100644
29803 --- a/drivers/infiniband/core/fmr_pool.c
29804 +++ b/drivers/infiniband/core/fmr_pool.c
29805 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
29806
29807 struct task_struct *thread;
29808
29809 - atomic_t req_ser;
29810 - atomic_t flush_ser;
29811 + atomic_unchecked_t req_ser;
29812 + atomic_unchecked_t flush_ser;
29813
29814 wait_queue_head_t force_wait;
29815 };
29816 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
29817 struct ib_fmr_pool *pool = pool_ptr;
29818
29819 do {
29820 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29821 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29822 ib_fmr_batch_release(pool);
29823
29824 - atomic_inc(&pool->flush_ser);
29825 + atomic_inc_unchecked(&pool->flush_ser);
29826 wake_up_interruptible(&pool->force_wait);
29827
29828 if (pool->flush_function)
29829 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
29830 }
29831
29832 set_current_state(TASK_INTERRUPTIBLE);
29833 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
29834 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
29835 !kthread_should_stop())
29836 schedule();
29837 __set_current_state(TASK_RUNNING);
29838 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
29839 pool->dirty_watermark = params->dirty_watermark;
29840 pool->dirty_len = 0;
29841 spin_lock_init(&pool->pool_lock);
29842 - atomic_set(&pool->req_ser, 0);
29843 - atomic_set(&pool->flush_ser, 0);
29844 + atomic_set_unchecked(&pool->req_ser, 0);
29845 + atomic_set_unchecked(&pool->flush_ser, 0);
29846 init_waitqueue_head(&pool->force_wait);
29847
29848 pool->thread = kthread_run(ib_fmr_cleanup_thread,
29849 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
29850 }
29851 spin_unlock_irq(&pool->pool_lock);
29852
29853 - serial = atomic_inc_return(&pool->req_ser);
29854 + serial = atomic_inc_return_unchecked(&pool->req_ser);
29855 wake_up_process(pool->thread);
29856
29857 if (wait_event_interruptible(pool->force_wait,
29858 - atomic_read(&pool->flush_ser) - serial >= 0))
29859 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
29860 return -EINTR;
29861
29862 return 0;
29863 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
29864 } else {
29865 list_add_tail(&fmr->list, &pool->dirty_list);
29866 if (++pool->dirty_len >= pool->dirty_watermark) {
29867 - atomic_inc(&pool->req_ser);
29868 + atomic_inc_unchecked(&pool->req_ser);
29869 wake_up_process(pool->thread);
29870 }
29871 }
29872 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
29873 index 40c8353..946b0e4 100644
29874 --- a/drivers/infiniband/hw/cxgb4/mem.c
29875 +++ b/drivers/infiniband/hw/cxgb4/mem.c
29876 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
29877 int err;
29878 struct fw_ri_tpte tpt;
29879 u32 stag_idx;
29880 - static atomic_t key;
29881 + static atomic_unchecked_t key;
29882
29883 if (c4iw_fatal_error(rdev))
29884 return -EIO;
29885 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
29886 &rdev->resource.tpt_fifo_lock);
29887 if (!stag_idx)
29888 return -ENOMEM;
29889 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
29890 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
29891 }
29892 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
29893 __func__, stag_state, type, pdid, stag_idx);
29894 diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
29895 index 31ae1b1..2f5b038 100644
29896 --- a/drivers/infiniband/hw/ipath/ipath_fs.c
29897 +++ b/drivers/infiniband/hw/ipath/ipath_fs.c
29898 @@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
29899 struct infinipath_counters counters;
29900 struct ipath_devdata *dd;
29901
29902 + pax_track_stack();
29903 +
29904 dd = file->f_path.dentry->d_inode->i_private;
29905 dd->ipath_f_read_counters(dd, &counters);
29906
29907 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
29908 index 79b3dbc..96e5fcc 100644
29909 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
29910 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
29911 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
29912 struct ib_atomic_eth *ateth;
29913 struct ipath_ack_entry *e;
29914 u64 vaddr;
29915 - atomic64_t *maddr;
29916 + atomic64_unchecked_t *maddr;
29917 u64 sdata;
29918 u32 rkey;
29919 u8 next;
29920 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
29921 IB_ACCESS_REMOTE_ATOMIC)))
29922 goto nack_acc_unlck;
29923 /* Perform atomic OP and save result. */
29924 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
29925 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
29926 sdata = be64_to_cpu(ateth->swap_data);
29927 e = &qp->s_ack_queue[qp->r_head_ack_queue];
29928 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
29929 - (u64) atomic64_add_return(sdata, maddr) - sdata :
29930 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
29931 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
29932 be64_to_cpu(ateth->compare_data),
29933 sdata);
29934 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
29935 index 1f95bba..9530f87 100644
29936 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
29937 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
29938 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
29939 unsigned long flags;
29940 struct ib_wc wc;
29941 u64 sdata;
29942 - atomic64_t *maddr;
29943 + atomic64_unchecked_t *maddr;
29944 enum ib_wc_status send_status;
29945
29946 /*
29947 @@ -382,11 +382,11 @@ again:
29948 IB_ACCESS_REMOTE_ATOMIC)))
29949 goto acc_err;
29950 /* Perform atomic OP and save result. */
29951 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
29952 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
29953 sdata = wqe->wr.wr.atomic.compare_add;
29954 *(u64 *) sqp->s_sge.sge.vaddr =
29955 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
29956 - (u64) atomic64_add_return(sdata, maddr) - sdata :
29957 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
29958 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
29959 sdata, wqe->wr.wr.atomic.swap);
29960 goto send_comp;
29961 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
29962 index 2d668c6..3312bb7 100644
29963 --- a/drivers/infiniband/hw/nes/nes.c
29964 +++ b/drivers/infiniband/hw/nes/nes.c
29965 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
29966 LIST_HEAD(nes_adapter_list);
29967 static LIST_HEAD(nes_dev_list);
29968
29969 -atomic_t qps_destroyed;
29970 +atomic_unchecked_t qps_destroyed;
29971
29972 static unsigned int ee_flsh_adapter;
29973 static unsigned int sysfs_nonidx_addr;
29974 @@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
29975 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
29976 struct nes_adapter *nesadapter = nesdev->nesadapter;
29977
29978 - atomic_inc(&qps_destroyed);
29979 + atomic_inc_unchecked(&qps_destroyed);
29980
29981 /* Free the control structures */
29982
29983 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
29984 index 6fe7987..68637b5 100644
29985 --- a/drivers/infiniband/hw/nes/nes.h
29986 +++ b/drivers/infiniband/hw/nes/nes.h
29987 @@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
29988 extern unsigned int wqm_quanta;
29989 extern struct list_head nes_adapter_list;
29990
29991 -extern atomic_t cm_connects;
29992 -extern atomic_t cm_accepts;
29993 -extern atomic_t cm_disconnects;
29994 -extern atomic_t cm_closes;
29995 -extern atomic_t cm_connecteds;
29996 -extern atomic_t cm_connect_reqs;
29997 -extern atomic_t cm_rejects;
29998 -extern atomic_t mod_qp_timouts;
29999 -extern atomic_t qps_created;
30000 -extern atomic_t qps_destroyed;
30001 -extern atomic_t sw_qps_destroyed;
30002 +extern atomic_unchecked_t cm_connects;
30003 +extern atomic_unchecked_t cm_accepts;
30004 +extern atomic_unchecked_t cm_disconnects;
30005 +extern atomic_unchecked_t cm_closes;
30006 +extern atomic_unchecked_t cm_connecteds;
30007 +extern atomic_unchecked_t cm_connect_reqs;
30008 +extern atomic_unchecked_t cm_rejects;
30009 +extern atomic_unchecked_t mod_qp_timouts;
30010 +extern atomic_unchecked_t qps_created;
30011 +extern atomic_unchecked_t qps_destroyed;
30012 +extern atomic_unchecked_t sw_qps_destroyed;
30013 extern u32 mh_detected;
30014 extern u32 mh_pauses_sent;
30015 extern u32 cm_packets_sent;
30016 @@ -194,14 +194,14 @@ extern u32 cm_packets_created;
30017 extern u32 cm_packets_received;
30018 extern u32 cm_packets_dropped;
30019 extern u32 cm_packets_retrans;
30020 -extern atomic_t cm_listens_created;
30021 -extern atomic_t cm_listens_destroyed;
30022 +extern atomic_unchecked_t cm_listens_created;
30023 +extern atomic_unchecked_t cm_listens_destroyed;
30024 extern u32 cm_backlog_drops;
30025 -extern atomic_t cm_loopbacks;
30026 -extern atomic_t cm_nodes_created;
30027 -extern atomic_t cm_nodes_destroyed;
30028 -extern atomic_t cm_accel_dropped_pkts;
30029 -extern atomic_t cm_resets_recvd;
30030 +extern atomic_unchecked_t cm_loopbacks;
30031 +extern atomic_unchecked_t cm_nodes_created;
30032 +extern atomic_unchecked_t cm_nodes_destroyed;
30033 +extern atomic_unchecked_t cm_accel_dropped_pkts;
30034 +extern atomic_unchecked_t cm_resets_recvd;
30035
30036 extern u32 int_mod_timer_init;
30037 extern u32 int_mod_cq_depth_256;
30038 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
30039 index c118663..049a3ab 100644
30040 --- a/drivers/infiniband/hw/nes/nes_cm.c
30041 +++ b/drivers/infiniband/hw/nes/nes_cm.c
30042 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
30043 u32 cm_packets_retrans;
30044 u32 cm_packets_created;
30045 u32 cm_packets_received;
30046 -atomic_t cm_listens_created;
30047 -atomic_t cm_listens_destroyed;
30048 +atomic_unchecked_t cm_listens_created;
30049 +atomic_unchecked_t cm_listens_destroyed;
30050 u32 cm_backlog_drops;
30051 -atomic_t cm_loopbacks;
30052 -atomic_t cm_nodes_created;
30053 -atomic_t cm_nodes_destroyed;
30054 -atomic_t cm_accel_dropped_pkts;
30055 -atomic_t cm_resets_recvd;
30056 +atomic_unchecked_t cm_loopbacks;
30057 +atomic_unchecked_t cm_nodes_created;
30058 +atomic_unchecked_t cm_nodes_destroyed;
30059 +atomic_unchecked_t cm_accel_dropped_pkts;
30060 +atomic_unchecked_t cm_resets_recvd;
30061
30062 static inline int mini_cm_accelerated(struct nes_cm_core *,
30063 struct nes_cm_node *);
30064 @@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
30065
30066 static struct nes_cm_core *g_cm_core;
30067
30068 -atomic_t cm_connects;
30069 -atomic_t cm_accepts;
30070 -atomic_t cm_disconnects;
30071 -atomic_t cm_closes;
30072 -atomic_t cm_connecteds;
30073 -atomic_t cm_connect_reqs;
30074 -atomic_t cm_rejects;
30075 +atomic_unchecked_t cm_connects;
30076 +atomic_unchecked_t cm_accepts;
30077 +atomic_unchecked_t cm_disconnects;
30078 +atomic_unchecked_t cm_closes;
30079 +atomic_unchecked_t cm_connecteds;
30080 +atomic_unchecked_t cm_connect_reqs;
30081 +atomic_unchecked_t cm_rejects;
30082
30083
30084 /**
30085 @@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
30086 kfree(listener);
30087 listener = NULL;
30088 ret = 0;
30089 - atomic_inc(&cm_listens_destroyed);
30090 + atomic_inc_unchecked(&cm_listens_destroyed);
30091 } else {
30092 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
30093 }
30094 @@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
30095 cm_node->rem_mac);
30096
30097 add_hte_node(cm_core, cm_node);
30098 - atomic_inc(&cm_nodes_created);
30099 + atomic_inc_unchecked(&cm_nodes_created);
30100
30101 return cm_node;
30102 }
30103 @@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
30104 }
30105
30106 atomic_dec(&cm_core->node_cnt);
30107 - atomic_inc(&cm_nodes_destroyed);
30108 + atomic_inc_unchecked(&cm_nodes_destroyed);
30109 nesqp = cm_node->nesqp;
30110 if (nesqp) {
30111 nesqp->cm_node = NULL;
30112 @@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
30113
30114 static void drop_packet(struct sk_buff *skb)
30115 {
30116 - atomic_inc(&cm_accel_dropped_pkts);
30117 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30118 dev_kfree_skb_any(skb);
30119 }
30120
30121 @@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
30122 {
30123
30124 int reset = 0; /* whether to send reset in case of err.. */
30125 - atomic_inc(&cm_resets_recvd);
30126 + atomic_inc_unchecked(&cm_resets_recvd);
30127 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30128 " refcnt=%d\n", cm_node, cm_node->state,
30129 atomic_read(&cm_node->ref_count));
30130 @@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
30131 rem_ref_cm_node(cm_node->cm_core, cm_node);
30132 return NULL;
30133 }
30134 - atomic_inc(&cm_loopbacks);
30135 + atomic_inc_unchecked(&cm_loopbacks);
30136 loopbackremotenode->loopbackpartner = cm_node;
30137 loopbackremotenode->tcp_cntxt.rcv_wscale =
30138 NES_CM_DEFAULT_RCV_WND_SCALE;
30139 @@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
30140 add_ref_cm_node(cm_node);
30141 } else if (cm_node->state == NES_CM_STATE_TSA) {
30142 rem_ref_cm_node(cm_core, cm_node);
30143 - atomic_inc(&cm_accel_dropped_pkts);
30144 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
30145 dev_kfree_skb_any(skb);
30146 break;
30147 }
30148 @@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30149
30150 if ((cm_id) && (cm_id->event_handler)) {
30151 if (issue_disconn) {
30152 - atomic_inc(&cm_disconnects);
30153 + atomic_inc_unchecked(&cm_disconnects);
30154 cm_event.event = IW_CM_EVENT_DISCONNECT;
30155 cm_event.status = disconn_status;
30156 cm_event.local_addr = cm_id->local_addr;
30157 @@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
30158 }
30159
30160 if (issue_close) {
30161 - atomic_inc(&cm_closes);
30162 + atomic_inc_unchecked(&cm_closes);
30163 nes_disconnect(nesqp, 1);
30164
30165 cm_id->provider_data = nesqp;
30166 @@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30167
30168 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30169 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30170 - atomic_inc(&cm_accepts);
30171 + atomic_inc_unchecked(&cm_accepts);
30172
30173 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30174 netdev_refcnt_read(nesvnic->netdev));
30175 @@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
30176
30177 struct nes_cm_core *cm_core;
30178
30179 - atomic_inc(&cm_rejects);
30180 + atomic_inc_unchecked(&cm_rejects);
30181 cm_node = (struct nes_cm_node *) cm_id->provider_data;
30182 loopback = cm_node->loopbackpartner;
30183 cm_core = cm_node->cm_core;
30184 @@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
30185 ntohl(cm_id->local_addr.sin_addr.s_addr),
30186 ntohs(cm_id->local_addr.sin_port));
30187
30188 - atomic_inc(&cm_connects);
30189 + atomic_inc_unchecked(&cm_connects);
30190 nesqp->active_conn = 1;
30191
30192 /* cache the cm_id in the qp */
30193 @@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
30194 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
30195 return err;
30196 }
30197 - atomic_inc(&cm_listens_created);
30198 + atomic_inc_unchecked(&cm_listens_created);
30199 }
30200
30201 cm_id->add_ref(cm_id);
30202 @@ -3278,7 +3278,7 @@ static void cm_event_connected(struct nes_cm_event *event)
30203 if (nesqp->destroyed) {
30204 return;
30205 }
30206 - atomic_inc(&cm_connecteds);
30207 + atomic_inc_unchecked(&cm_connecteds);
30208 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30209 " local port 0x%04X. jiffies = %lu.\n",
30210 nesqp->hwqp.qp_id,
30211 @@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm_event *event)
30212
30213 cm_id->add_ref(cm_id);
30214 ret = cm_id->event_handler(cm_id, &cm_event);
30215 - atomic_inc(&cm_closes);
30216 + atomic_inc_unchecked(&cm_closes);
30217 cm_event.event = IW_CM_EVENT_CLOSE;
30218 cm_event.status = 0;
30219 cm_event.provider_data = cm_id->provider_data;
30220 @@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
30221 return;
30222 cm_id = cm_node->cm_id;
30223
30224 - atomic_inc(&cm_connect_reqs);
30225 + atomic_inc_unchecked(&cm_connect_reqs);
30226 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30227 cm_node, cm_id, jiffies);
30228
30229 @@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
30230 return;
30231 cm_id = cm_node->cm_id;
30232
30233 - atomic_inc(&cm_connect_reqs);
30234 + atomic_inc_unchecked(&cm_connect_reqs);
30235 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30236 cm_node, cm_id, jiffies);
30237
30238 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
30239 index 9d7ffeb..a95dd7d 100644
30240 --- a/drivers/infiniband/hw/nes/nes_nic.c
30241 +++ b/drivers/infiniband/hw/nes/nes_nic.c
30242 @@ -1274,31 +1274,31 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
30243 target_stat_values[++index] = mh_detected;
30244 target_stat_values[++index] = mh_pauses_sent;
30245 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30246 - target_stat_values[++index] = atomic_read(&cm_connects);
30247 - target_stat_values[++index] = atomic_read(&cm_accepts);
30248 - target_stat_values[++index] = atomic_read(&cm_disconnects);
30249 - target_stat_values[++index] = atomic_read(&cm_connecteds);
30250 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30251 - target_stat_values[++index] = atomic_read(&cm_rejects);
30252 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30253 - target_stat_values[++index] = atomic_read(&qps_created);
30254 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30255 - target_stat_values[++index] = atomic_read(&qps_destroyed);
30256 - target_stat_values[++index] = atomic_read(&cm_closes);
30257 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30258 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30259 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30260 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30261 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30262 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30263 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30264 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30265 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30266 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30267 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30268 target_stat_values[++index] = cm_packets_sent;
30269 target_stat_values[++index] = cm_packets_bounced;
30270 target_stat_values[++index] = cm_packets_created;
30271 target_stat_values[++index] = cm_packets_received;
30272 target_stat_values[++index] = cm_packets_dropped;
30273 target_stat_values[++index] = cm_packets_retrans;
30274 - target_stat_values[++index] = atomic_read(&cm_listens_created);
30275 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
30276 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
30277 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
30278 target_stat_values[++index] = cm_backlog_drops;
30279 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
30280 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
30281 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30282 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30283 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30284 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30285 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30286 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30287 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30288 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30289 target_stat_values[++index] = nesadapter->free_4kpbl;
30290 target_stat_values[++index] = nesadapter->free_256pbl;
30291 target_stat_values[++index] = int_mod_timer_init;
30292 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
30293 index 9f2f7d4..6d2fee2 100644
30294 --- a/drivers/infiniband/hw/nes/nes_verbs.c
30295 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
30296 @@ -46,9 +46,9 @@
30297
30298 #include <rdma/ib_umem.h>
30299
30300 -atomic_t mod_qp_timouts;
30301 -atomic_t qps_created;
30302 -atomic_t sw_qps_destroyed;
30303 +atomic_unchecked_t mod_qp_timouts;
30304 +atomic_unchecked_t qps_created;
30305 +atomic_unchecked_t sw_qps_destroyed;
30306
30307 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30308
30309 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
30310 if (init_attr->create_flags)
30311 return ERR_PTR(-EINVAL);
30312
30313 - atomic_inc(&qps_created);
30314 + atomic_inc_unchecked(&qps_created);
30315 switch (init_attr->qp_type) {
30316 case IB_QPT_RC:
30317 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30318 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
30319 struct iw_cm_event cm_event;
30320 int ret;
30321
30322 - atomic_inc(&sw_qps_destroyed);
30323 + atomic_inc_unchecked(&sw_qps_destroyed);
30324 nesqp->destroyed = 1;
30325
30326 /* Blow away the connection if it exists. */
30327 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
30328 index c9624ea..e025b66 100644
30329 --- a/drivers/infiniband/hw/qib/qib.h
30330 +++ b/drivers/infiniband/hw/qib/qib.h
30331 @@ -51,6 +51,7 @@
30332 #include <linux/completion.h>
30333 #include <linux/kref.h>
30334 #include <linux/sched.h>
30335 +#include <linux/slab.h>
30336
30337 #include "qib_common.h"
30338 #include "qib_verbs.h"
30339 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
30340 index c351aa4..e6967c2 100644
30341 --- a/drivers/input/gameport/gameport.c
30342 +++ b/drivers/input/gameport/gameport.c
30343 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
30344 */
30345 static void gameport_init_port(struct gameport *gameport)
30346 {
30347 - static atomic_t gameport_no = ATOMIC_INIT(0);
30348 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30349
30350 __module_get(THIS_MODULE);
30351
30352 mutex_init(&gameport->drv_mutex);
30353 device_initialize(&gameport->dev);
30354 dev_set_name(&gameport->dev, "gameport%lu",
30355 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
30356 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30357 gameport->dev.bus = &gameport_bus;
30358 gameport->dev.release = gameport_release_port;
30359 if (gameport->parent)
30360 diff --git a/drivers/input/input.c b/drivers/input/input.c
30361 index da38d97..2aa0b79 100644
30362 --- a/drivers/input/input.c
30363 +++ b/drivers/input/input.c
30364 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
30365 */
30366 int input_register_device(struct input_dev *dev)
30367 {
30368 - static atomic_t input_no = ATOMIC_INIT(0);
30369 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30370 struct input_handler *handler;
30371 const char *path;
30372 int error;
30373 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
30374 dev->setkeycode = input_default_setkeycode;
30375
30376 dev_set_name(&dev->dev, "input%ld",
30377 - (unsigned long) atomic_inc_return(&input_no) - 1);
30378 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30379
30380 error = device_add(&dev->dev);
30381 if (error)
30382 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
30383 index b8d8611..15f8d2c 100644
30384 --- a/drivers/input/joystick/sidewinder.c
30385 +++ b/drivers/input/joystick/sidewinder.c
30386 @@ -30,6 +30,7 @@
30387 #include <linux/kernel.h>
30388 #include <linux/module.h>
30389 #include <linux/slab.h>
30390 +#include <linux/sched.h>
30391 #include <linux/init.h>
30392 #include <linux/input.h>
30393 #include <linux/gameport.h>
30394 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
30395 unsigned char buf[SW_LENGTH];
30396 int i;
30397
30398 + pax_track_stack();
30399 +
30400 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
30401
30402 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
30403 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
30404 index d728875..844c89b 100644
30405 --- a/drivers/input/joystick/xpad.c
30406 +++ b/drivers/input/joystick/xpad.c
30407 @@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
30408
30409 static int xpad_led_probe(struct usb_xpad *xpad)
30410 {
30411 - static atomic_t led_seq = ATOMIC_INIT(0);
30412 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30413 long led_no;
30414 struct xpad_led *led;
30415 struct led_classdev *led_cdev;
30416 @@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
30417 if (!led)
30418 return -ENOMEM;
30419
30420 - led_no = (long)atomic_inc_return(&led_seq) - 1;
30421 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30422
30423 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30424 led->xpad = xpad;
30425 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
30426 index 0110b5a..d3ad144 100644
30427 --- a/drivers/input/mousedev.c
30428 +++ b/drivers/input/mousedev.c
30429 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
30430
30431 spin_unlock_irq(&client->packet_lock);
30432
30433 - if (copy_to_user(buffer, data, count))
30434 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
30435 return -EFAULT;
30436
30437 return count;
30438 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
30439 index ba70058..571d25d 100644
30440 --- a/drivers/input/serio/serio.c
30441 +++ b/drivers/input/serio/serio.c
30442 @@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
30443 */
30444 static void serio_init_port(struct serio *serio)
30445 {
30446 - static atomic_t serio_no = ATOMIC_INIT(0);
30447 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30448
30449 __module_get(THIS_MODULE);
30450
30451 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
30452 mutex_init(&serio->drv_mutex);
30453 device_initialize(&serio->dev);
30454 dev_set_name(&serio->dev, "serio%ld",
30455 - (long)atomic_inc_return(&serio_no) - 1);
30456 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
30457 serio->dev.bus = &serio_bus;
30458 serio->dev.release = serio_release_port;
30459 serio->dev.groups = serio_device_attr_groups;
30460 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
30461 index e44933d..9ba484a 100644
30462 --- a/drivers/isdn/capi/capi.c
30463 +++ b/drivers/isdn/capi/capi.c
30464 @@ -83,8 +83,8 @@ struct capiminor {
30465
30466 struct capi20_appl *ap;
30467 u32 ncci;
30468 - atomic_t datahandle;
30469 - atomic_t msgid;
30470 + atomic_unchecked_t datahandle;
30471 + atomic_unchecked_t msgid;
30472
30473 struct tty_port port;
30474 int ttyinstop;
30475 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
30476 capimsg_setu16(s, 2, mp->ap->applid);
30477 capimsg_setu8 (s, 4, CAPI_DATA_B3);
30478 capimsg_setu8 (s, 5, CAPI_RESP);
30479 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
30480 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
30481 capimsg_setu32(s, 8, mp->ncci);
30482 capimsg_setu16(s, 12, datahandle);
30483 }
30484 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
30485 mp->outbytes -= len;
30486 spin_unlock_bh(&mp->outlock);
30487
30488 - datahandle = atomic_inc_return(&mp->datahandle);
30489 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
30490 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
30491 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30492 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
30493 capimsg_setu16(skb->data, 2, mp->ap->applid);
30494 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
30495 capimsg_setu8 (skb->data, 5, CAPI_REQ);
30496 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
30497 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
30498 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
30499 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
30500 capimsg_setu16(skb->data, 16, len); /* Data length */
30501 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
30502 index db621db..825ea1a 100644
30503 --- a/drivers/isdn/gigaset/common.c
30504 +++ b/drivers/isdn/gigaset/common.c
30505 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
30506 cs->commands_pending = 0;
30507 cs->cur_at_seq = 0;
30508 cs->gotfwver = -1;
30509 - cs->open_count = 0;
30510 + local_set(&cs->open_count, 0);
30511 cs->dev = NULL;
30512 cs->tty = NULL;
30513 cs->tty_dev = NULL;
30514 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
30515 index 212efaf..f187c6b 100644
30516 --- a/drivers/isdn/gigaset/gigaset.h
30517 +++ b/drivers/isdn/gigaset/gigaset.h
30518 @@ -35,6 +35,7 @@
30519 #include <linux/tty_driver.h>
30520 #include <linux/list.h>
30521 #include <linux/atomic.h>
30522 +#include <asm/local.h>
30523
30524 #define GIG_VERSION {0, 5, 0, 0}
30525 #define GIG_COMPAT {0, 4, 0, 0}
30526 @@ -433,7 +434,7 @@ struct cardstate {
30527 spinlock_t cmdlock;
30528 unsigned curlen, cmdbytes;
30529
30530 - unsigned open_count;
30531 + local_t open_count;
30532 struct tty_struct *tty;
30533 struct tasklet_struct if_wake_tasklet;
30534 unsigned control_state;
30535 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
30536 index e35058b..5898a8b 100644
30537 --- a/drivers/isdn/gigaset/interface.c
30538 +++ b/drivers/isdn/gigaset/interface.c
30539 @@ -162,9 +162,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
30540 }
30541 tty->driver_data = cs;
30542
30543 - ++cs->open_count;
30544 -
30545 - if (cs->open_count == 1) {
30546 + if (local_inc_return(&cs->open_count) == 1) {
30547 spin_lock_irqsave(&cs->lock, flags);
30548 cs->tty = tty;
30549 spin_unlock_irqrestore(&cs->lock, flags);
30550 @@ -192,10 +190,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
30551
30552 if (!cs->connected)
30553 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30554 - else if (!cs->open_count)
30555 + else if (!local_read(&cs->open_count))
30556 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30557 else {
30558 - if (!--cs->open_count) {
30559 + if (!local_dec_return(&cs->open_count)) {
30560 spin_lock_irqsave(&cs->lock, flags);
30561 cs->tty = NULL;
30562 spin_unlock_irqrestore(&cs->lock, flags);
30563 @@ -230,7 +228,7 @@ static int if_ioctl(struct tty_struct *tty,
30564 if (!cs->connected) {
30565 gig_dbg(DEBUG_IF, "not connected");
30566 retval = -ENODEV;
30567 - } else if (!cs->open_count)
30568 + } else if (!local_read(&cs->open_count))
30569 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30570 else {
30571 retval = 0;
30572 @@ -360,7 +358,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
30573 retval = -ENODEV;
30574 goto done;
30575 }
30576 - if (!cs->open_count) {
30577 + if (!local_read(&cs->open_count)) {
30578 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30579 retval = -ENODEV;
30580 goto done;
30581 @@ -413,7 +411,7 @@ static int if_write_room(struct tty_struct *tty)
30582 if (!cs->connected) {
30583 gig_dbg(DEBUG_IF, "not connected");
30584 retval = -ENODEV;
30585 - } else if (!cs->open_count)
30586 + } else if (!local_read(&cs->open_count))
30587 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30588 else if (cs->mstate != MS_LOCKED) {
30589 dev_warn(cs->dev, "can't write to unlocked device\n");
30590 @@ -443,7 +441,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
30591
30592 if (!cs->connected)
30593 gig_dbg(DEBUG_IF, "not connected");
30594 - else if (!cs->open_count)
30595 + else if (!local_read(&cs->open_count))
30596 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30597 else if (cs->mstate != MS_LOCKED)
30598 dev_warn(cs->dev, "can't write to unlocked device\n");
30599 @@ -471,7 +469,7 @@ static void if_throttle(struct tty_struct *tty)
30600
30601 if (!cs->connected)
30602 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30603 - else if (!cs->open_count)
30604 + else if (!local_read(&cs->open_count))
30605 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30606 else
30607 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30608 @@ -495,7 +493,7 @@ static void if_unthrottle(struct tty_struct *tty)
30609
30610 if (!cs->connected)
30611 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30612 - else if (!cs->open_count)
30613 + else if (!local_read(&cs->open_count))
30614 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30615 else
30616 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
30617 @@ -526,7 +524,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
30618 goto out;
30619 }
30620
30621 - if (!cs->open_count) {
30622 + if (!local_read(&cs->open_count)) {
30623 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30624 goto out;
30625 }
30626 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
30627 index 2a57da59..e7a12ed 100644
30628 --- a/drivers/isdn/hardware/avm/b1.c
30629 +++ b/drivers/isdn/hardware/avm/b1.c
30630 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
30631 }
30632 if (left) {
30633 if (t4file->user) {
30634 - if (copy_from_user(buf, dp, left))
30635 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30636 return -EFAULT;
30637 } else {
30638 memcpy(buf, dp, left);
30639 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
30640 }
30641 if (left) {
30642 if (config->user) {
30643 - if (copy_from_user(buf, dp, left))
30644 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30645 return -EFAULT;
30646 } else {
30647 memcpy(buf, dp, left);
30648 diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
30649 index f130724..c373c68 100644
30650 --- a/drivers/isdn/hardware/eicon/capidtmf.c
30651 +++ b/drivers/isdn/hardware/eicon/capidtmf.c
30652 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
30653 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
30654 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
30655
30656 + pax_track_stack();
30657
30658 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
30659 {
30660 diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
30661 index 4d425c6..a9be6c4 100644
30662 --- a/drivers/isdn/hardware/eicon/capifunc.c
30663 +++ b/drivers/isdn/hardware/eicon/capifunc.c
30664 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
30665 IDI_SYNC_REQ req;
30666 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30667
30668 + pax_track_stack();
30669 +
30670 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30671
30672 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30673 diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
30674 index 3029234..ef0d9e2 100644
30675 --- a/drivers/isdn/hardware/eicon/diddfunc.c
30676 +++ b/drivers/isdn/hardware/eicon/diddfunc.c
30677 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
30678 IDI_SYNC_REQ req;
30679 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30680
30681 + pax_track_stack();
30682 +
30683 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30684
30685 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30686 diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
30687 index 0bbee78..a0d0a01 100644
30688 --- a/drivers/isdn/hardware/eicon/divasfunc.c
30689 +++ b/drivers/isdn/hardware/eicon/divasfunc.c
30690 @@ -160,6 +160,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
30691 IDI_SYNC_REQ req;
30692 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30693
30694 + pax_track_stack();
30695 +
30696 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30697
30698 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30699 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
30700 index 85784a7..a19ca98 100644
30701 --- a/drivers/isdn/hardware/eicon/divasync.h
30702 +++ b/drivers/isdn/hardware/eicon/divasync.h
30703 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
30704 } diva_didd_add_adapter_t;
30705 typedef struct _diva_didd_remove_adapter {
30706 IDI_CALL p_request;
30707 -} diva_didd_remove_adapter_t;
30708 +} __no_const diva_didd_remove_adapter_t;
30709 typedef struct _diva_didd_read_adapter_array {
30710 void * buffer;
30711 dword length;
30712 diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
30713 index db87d51..7d09acf 100644
30714 --- a/drivers/isdn/hardware/eicon/idifunc.c
30715 +++ b/drivers/isdn/hardware/eicon/idifunc.c
30716 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
30717 IDI_SYNC_REQ req;
30718 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30719
30720 + pax_track_stack();
30721 +
30722 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30723
30724 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30725 diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
30726 index a339598..b6a8bfc 100644
30727 --- a/drivers/isdn/hardware/eicon/message.c
30728 +++ b/drivers/isdn/hardware/eicon/message.c
30729 @@ -4886,6 +4886,8 @@ static void sig_ind(PLCI *plci)
30730 dword d;
30731 word w;
30732
30733 + pax_track_stack();
30734 +
30735 a = plci->adapter;
30736 Id = ((word)plci->Id<<8)|a->Id;
30737 PUT_WORD(&SS_Ind[4],0x0000);
30738 @@ -7480,6 +7482,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
30739 word j, n, w;
30740 dword d;
30741
30742 + pax_track_stack();
30743 +
30744
30745 for(i=0;i<8;i++) bp_parms[i].length = 0;
30746 for(i=0;i<2;i++) global_config[i].length = 0;
30747 @@ -7954,6 +7958,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
30748 const byte llc3[] = {4,3,2,2,6,6,0};
30749 const byte header[] = {0,2,3,3,0,0,0};
30750
30751 + pax_track_stack();
30752 +
30753 for(i=0;i<8;i++) bp_parms[i].length = 0;
30754 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
30755 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
30756 @@ -14741,6 +14747,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
30757 word appl_number_group_type[MAX_APPL];
30758 PLCI *auxplci;
30759
30760 + pax_track_stack();
30761 +
30762 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
30763
30764 if(!a->group_optimization_enabled)
30765 diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
30766 index a564b75..f3cf8b5 100644
30767 --- a/drivers/isdn/hardware/eicon/mntfunc.c
30768 +++ b/drivers/isdn/hardware/eicon/mntfunc.c
30769 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
30770 IDI_SYNC_REQ req;
30771 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30772
30773 + pax_track_stack();
30774 +
30775 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30776
30777 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30778 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
30779 index a3bd163..8956575 100644
30780 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
30781 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
30782 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
30783 typedef struct _diva_os_idi_adapter_interface {
30784 diva_init_card_proc_t cleanup_adapter_proc;
30785 diva_cmd_card_proc_t cmd_proc;
30786 -} diva_os_idi_adapter_interface_t;
30787 +} __no_const diva_os_idi_adapter_interface_t;
30788
30789 typedef struct _diva_os_xdi_adapter {
30790 struct list_head link;
30791 diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
30792 index 6ed82ad..b05ac05 100644
30793 --- a/drivers/isdn/i4l/isdn_common.c
30794 +++ b/drivers/isdn/i4l/isdn_common.c
30795 @@ -1286,6 +1286,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
30796 } iocpar;
30797 void __user *argp = (void __user *)arg;
30798
30799 + pax_track_stack();
30800 +
30801 #define name iocpar.name
30802 #define bname iocpar.bname
30803 #define iocts iocpar.iocts
30804 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
30805 index 1f355bb..43f1fea 100644
30806 --- a/drivers/isdn/icn/icn.c
30807 +++ b/drivers/isdn/icn/icn.c
30808 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
30809 if (count > len)
30810 count = len;
30811 if (user) {
30812 - if (copy_from_user(msg, buf, count))
30813 + if (count > sizeof msg || copy_from_user(msg, buf, count))
30814 return -EFAULT;
30815 } else
30816 memcpy(msg, buf, count);
30817 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
30818 index 2535933..09a8e86 100644
30819 --- a/drivers/lguest/core.c
30820 +++ b/drivers/lguest/core.c
30821 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
30822 * it's worked so far. The end address needs +1 because __get_vm_area
30823 * allocates an extra guard page, so we need space for that.
30824 */
30825 +
30826 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30827 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30828 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30829 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30830 +#else
30831 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30832 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30833 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30834 +#endif
30835 +
30836 if (!switcher_vma) {
30837 err = -ENOMEM;
30838 printk("lguest: could not map switcher pages high\n");
30839 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
30840 * Now the Switcher is mapped at the right address, we can't fail!
30841 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
30842 */
30843 - memcpy(switcher_vma->addr, start_switcher_text,
30844 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30845 end_switcher_text - start_switcher_text);
30846
30847 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30848 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
30849 index 65af42f..530c87a 100644
30850 --- a/drivers/lguest/x86/core.c
30851 +++ b/drivers/lguest/x86/core.c
30852 @@ -59,7 +59,7 @@ static struct {
30853 /* Offset from where switcher.S was compiled to where we've copied it */
30854 static unsigned long switcher_offset(void)
30855 {
30856 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30857 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30858 }
30859
30860 /* This cpu's struct lguest_pages. */
30861 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
30862 * These copies are pretty cheap, so we do them unconditionally: */
30863 /* Save the current Host top-level page directory.
30864 */
30865 +
30866 +#ifdef CONFIG_PAX_PER_CPU_PGD
30867 + pages->state.host_cr3 = read_cr3();
30868 +#else
30869 pages->state.host_cr3 = __pa(current->mm->pgd);
30870 +#endif
30871 +
30872 /*
30873 * Set up the Guest's page tables to see this CPU's pages (and no
30874 * other CPU's pages).
30875 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
30876 * compiled-in switcher code and the high-mapped copy we just made.
30877 */
30878 for (i = 0; i < IDT_ENTRIES; i++)
30879 - default_idt_entries[i] += switcher_offset();
30880 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
30881
30882 /*
30883 * Set up the Switcher's per-cpu areas.
30884 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
30885 * it will be undisturbed when we switch. To change %cs and jump we
30886 * need this structure to feed to Intel's "lcall" instruction.
30887 */
30888 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
30889 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
30890 lguest_entry.segment = LGUEST_CS;
30891
30892 /*
30893 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
30894 index 40634b0..4f5855e 100644
30895 --- a/drivers/lguest/x86/switcher_32.S
30896 +++ b/drivers/lguest/x86/switcher_32.S
30897 @@ -87,6 +87,7 @@
30898 #include <asm/page.h>
30899 #include <asm/segment.h>
30900 #include <asm/lguest.h>
30901 +#include <asm/processor-flags.h>
30902
30903 // We mark the start of the code to copy
30904 // It's placed in .text tho it's never run here
30905 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
30906 // Changes type when we load it: damn Intel!
30907 // For after we switch over our page tables
30908 // That entry will be read-only: we'd crash.
30909 +
30910 +#ifdef CONFIG_PAX_KERNEXEC
30911 + mov %cr0, %edx
30912 + xor $X86_CR0_WP, %edx
30913 + mov %edx, %cr0
30914 +#endif
30915 +
30916 movl $(GDT_ENTRY_TSS*8), %edx
30917 ltr %dx
30918
30919 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
30920 // Let's clear it again for our return.
30921 // The GDT descriptor of the Host
30922 // Points to the table after two "size" bytes
30923 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
30924 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
30925 // Clear "used" from type field (byte 5, bit 2)
30926 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
30927 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
30928 +
30929 +#ifdef CONFIG_PAX_KERNEXEC
30930 + mov %cr0, %eax
30931 + xor $X86_CR0_WP, %eax
30932 + mov %eax, %cr0
30933 +#endif
30934
30935 // Once our page table's switched, the Guest is live!
30936 // The Host fades as we run this final step.
30937 @@ -295,13 +309,12 @@ deliver_to_host:
30938 // I consulted gcc, and it gave
30939 // These instructions, which I gladly credit:
30940 leal (%edx,%ebx,8), %eax
30941 - movzwl (%eax),%edx
30942 - movl 4(%eax), %eax
30943 - xorw %ax, %ax
30944 - orl %eax, %edx
30945 + movl 4(%eax), %edx
30946 + movw (%eax), %dx
30947 // Now the address of the handler's in %edx
30948 // We call it now: its "iret" drops us home.
30949 - jmp *%edx
30950 + ljmp $__KERNEL_CS, $1f
30951 +1: jmp *%edx
30952
30953 // Every interrupt can come to us here
30954 // But we must truly tell each apart.
30955 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
30956 index 4daf9e5..b8d1d0f 100644
30957 --- a/drivers/macintosh/macio_asic.c
30958 +++ b/drivers/macintosh/macio_asic.c
30959 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
30960 * MacIO is matched against any Apple ID, it's probe() function
30961 * will then decide wether it applies or not
30962 */
30963 -static const struct pci_device_id __devinitdata pci_ids [] = { {
30964 +static const struct pci_device_id __devinitconst pci_ids [] = { {
30965 .vendor = PCI_VENDOR_ID_APPLE,
30966 .device = PCI_ANY_ID,
30967 .subvendor = PCI_ANY_ID,
30968 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
30969 index 2e9a3ca..c2fb229 100644
30970 --- a/drivers/md/dm-ioctl.c
30971 +++ b/drivers/md/dm-ioctl.c
30972 @@ -1578,7 +1578,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
30973 cmd == DM_LIST_VERSIONS_CMD)
30974 return 0;
30975
30976 - if ((cmd == DM_DEV_CREATE_CMD)) {
30977 + if (cmd == DM_DEV_CREATE_CMD) {
30978 if (!*param->name) {
30979 DMWARN("name not supplied when creating device");
30980 return -EINVAL;
30981 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
30982 index 9bfd057..01180bc 100644
30983 --- a/drivers/md/dm-raid1.c
30984 +++ b/drivers/md/dm-raid1.c
30985 @@ -40,7 +40,7 @@ enum dm_raid1_error {
30986
30987 struct mirror {
30988 struct mirror_set *ms;
30989 - atomic_t error_count;
30990 + atomic_unchecked_t error_count;
30991 unsigned long error_type;
30992 struct dm_dev *dev;
30993 sector_t offset;
30994 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
30995 struct mirror *m;
30996
30997 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
30998 - if (!atomic_read(&m->error_count))
30999 + if (!atomic_read_unchecked(&m->error_count))
31000 return m;
31001
31002 return NULL;
31003 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
31004 * simple way to tell if a device has encountered
31005 * errors.
31006 */
31007 - atomic_inc(&m->error_count);
31008 + atomic_inc_unchecked(&m->error_count);
31009
31010 if (test_and_set_bit(error_type, &m->error_type))
31011 return;
31012 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
31013 struct mirror *m = get_default_mirror(ms);
31014
31015 do {
31016 - if (likely(!atomic_read(&m->error_count)))
31017 + if (likely(!atomic_read_unchecked(&m->error_count)))
31018 return m;
31019
31020 if (m-- == ms->mirror)
31021 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
31022 {
31023 struct mirror *default_mirror = get_default_mirror(m->ms);
31024
31025 - return !atomic_read(&default_mirror->error_count);
31026 + return !atomic_read_unchecked(&default_mirror->error_count);
31027 }
31028
31029 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31030 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
31031 */
31032 if (likely(region_in_sync(ms, region, 1)))
31033 m = choose_mirror(ms, bio->bi_sector);
31034 - else if (m && atomic_read(&m->error_count))
31035 + else if (m && atomic_read_unchecked(&m->error_count))
31036 m = NULL;
31037
31038 if (likely(m))
31039 @@ -937,7 +937,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
31040 }
31041
31042 ms->mirror[mirror].ms = ms;
31043 - atomic_set(&(ms->mirror[mirror].error_count), 0);
31044 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31045 ms->mirror[mirror].error_type = 0;
31046 ms->mirror[mirror].offset = offset;
31047
31048 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_target *ti)
31049 */
31050 static char device_status_char(struct mirror *m)
31051 {
31052 - if (!atomic_read(&(m->error_count)))
31053 + if (!atomic_read_unchecked(&(m->error_count)))
31054 return 'A';
31055
31056 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
31057 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
31058 index 3d80cf0..b77cc47 100644
31059 --- a/drivers/md/dm-stripe.c
31060 +++ b/drivers/md/dm-stripe.c
31061 @@ -20,7 +20,7 @@ struct stripe {
31062 struct dm_dev *dev;
31063 sector_t physical_start;
31064
31065 - atomic_t error_count;
31066 + atomic_unchecked_t error_count;
31067 };
31068
31069 struct stripe_c {
31070 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
31071 kfree(sc);
31072 return r;
31073 }
31074 - atomic_set(&(sc->stripe[i].error_count), 0);
31075 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31076 }
31077
31078 ti->private = sc;
31079 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_target *ti,
31080 DMEMIT("%d ", sc->stripes);
31081 for (i = 0; i < sc->stripes; i++) {
31082 DMEMIT("%s ", sc->stripe[i].dev->name);
31083 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31084 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31085 'D' : 'A';
31086 }
31087 buffer[i] = '\0';
31088 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
31089 */
31090 for (i = 0; i < sc->stripes; i++)
31091 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31092 - atomic_inc(&(sc->stripe[i].error_count));
31093 - if (atomic_read(&(sc->stripe[i].error_count)) <
31094 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
31095 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31096 DM_IO_ERROR_THRESHOLD)
31097 schedule_work(&sc->trigger_event);
31098 }
31099 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
31100 index bc04518..7a83b81 100644
31101 --- a/drivers/md/dm-table.c
31102 +++ b/drivers/md/dm-table.c
31103 @@ -389,7 +389,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
31104 if (!dev_size)
31105 return 0;
31106
31107 - if ((start >= dev_size) || (start + len > dev_size)) {
31108 + if ((start >= dev_size) || (len > dev_size - start)) {
31109 DMWARN("%s: %s too small for target: "
31110 "start=%llu, len=%llu, dev_size=%llu",
31111 dm_device_name(ti->table->md), bdevname(bdev, b),
31112 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
31113 index 52b39f3..83a8b6b 100644
31114 --- a/drivers/md/dm.c
31115 +++ b/drivers/md/dm.c
31116 @@ -165,9 +165,9 @@ struct mapped_device {
31117 /*
31118 * Event handling.
31119 */
31120 - atomic_t event_nr;
31121 + atomic_unchecked_t event_nr;
31122 wait_queue_head_t eventq;
31123 - atomic_t uevent_seq;
31124 + atomic_unchecked_t uevent_seq;
31125 struct list_head uevent_list;
31126 spinlock_t uevent_lock; /* Protect access to uevent_list */
31127
31128 @@ -1843,8 +1843,8 @@ static struct mapped_device *alloc_dev(int minor)
31129 rwlock_init(&md->map_lock);
31130 atomic_set(&md->holders, 1);
31131 atomic_set(&md->open_count, 0);
31132 - atomic_set(&md->event_nr, 0);
31133 - atomic_set(&md->uevent_seq, 0);
31134 + atomic_set_unchecked(&md->event_nr, 0);
31135 + atomic_set_unchecked(&md->uevent_seq, 0);
31136 INIT_LIST_HEAD(&md->uevent_list);
31137 spin_lock_init(&md->uevent_lock);
31138
31139 @@ -1978,7 +1978,7 @@ static void event_callback(void *context)
31140
31141 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31142
31143 - atomic_inc(&md->event_nr);
31144 + atomic_inc_unchecked(&md->event_nr);
31145 wake_up(&md->eventq);
31146 }
31147
31148 @@ -2614,18 +2614,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
31149
31150 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31151 {
31152 - return atomic_add_return(1, &md->uevent_seq);
31153 + return atomic_add_return_unchecked(1, &md->uevent_seq);
31154 }
31155
31156 uint32_t dm_get_event_nr(struct mapped_device *md)
31157 {
31158 - return atomic_read(&md->event_nr);
31159 + return atomic_read_unchecked(&md->event_nr);
31160 }
31161
31162 int dm_wait_event(struct mapped_device *md, int event_nr)
31163 {
31164 return wait_event_interruptible(md->eventq,
31165 - (event_nr != atomic_read(&md->event_nr)));
31166 + (event_nr != atomic_read_unchecked(&md->event_nr)));
31167 }
31168
31169 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31170 diff --git a/drivers/md/md.c b/drivers/md/md.c
31171 index 5c95ccb..217fa57 100644
31172 --- a/drivers/md/md.c
31173 +++ b/drivers/md/md.c
31174 @@ -280,10 +280,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
31175 * start build, activate spare
31176 */
31177 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31178 -static atomic_t md_event_count;
31179 +static atomic_unchecked_t md_event_count;
31180 void md_new_event(mddev_t *mddev)
31181 {
31182 - atomic_inc(&md_event_count);
31183 + atomic_inc_unchecked(&md_event_count);
31184 wake_up(&md_event_waiters);
31185 }
31186 EXPORT_SYMBOL_GPL(md_new_event);
31187 @@ -293,7 +293,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31188 */
31189 static void md_new_event_inintr(mddev_t *mddev)
31190 {
31191 - atomic_inc(&md_event_count);
31192 + atomic_inc_unchecked(&md_event_count);
31193 wake_up(&md_event_waiters);
31194 }
31195
31196 @@ -1531,7 +1531,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
31197
31198 rdev->preferred_minor = 0xffff;
31199 rdev->data_offset = le64_to_cpu(sb->data_offset);
31200 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31201 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31202
31203 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31204 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31205 @@ -1748,7 +1748,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
31206 else
31207 sb->resync_offset = cpu_to_le64(0);
31208
31209 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31210 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31211
31212 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31213 sb->size = cpu_to_le64(mddev->dev_sectors);
31214 @@ -2643,7 +2643,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
31215 static ssize_t
31216 errors_show(mdk_rdev_t *rdev, char *page)
31217 {
31218 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31219 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31220 }
31221
31222 static ssize_t
31223 @@ -2652,7 +2652,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
31224 char *e;
31225 unsigned long n = simple_strtoul(buf, &e, 10);
31226 if (*buf && (*e == 0 || *e == '\n')) {
31227 - atomic_set(&rdev->corrected_errors, n);
31228 + atomic_set_unchecked(&rdev->corrected_errors, n);
31229 return len;
31230 }
31231 return -EINVAL;
31232 @@ -3042,8 +3042,8 @@ int md_rdev_init(mdk_rdev_t *rdev)
31233 rdev->sb_loaded = 0;
31234 rdev->bb_page = NULL;
31235 atomic_set(&rdev->nr_pending, 0);
31236 - atomic_set(&rdev->read_errors, 0);
31237 - atomic_set(&rdev->corrected_errors, 0);
31238 + atomic_set_unchecked(&rdev->read_errors, 0);
31239 + atomic_set_unchecked(&rdev->corrected_errors, 0);
31240
31241 INIT_LIST_HEAD(&rdev->same_set);
31242 init_waitqueue_head(&rdev->blocked_wait);
31243 @@ -6667,7 +6667,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31244
31245 spin_unlock(&pers_lock);
31246 seq_printf(seq, "\n");
31247 - seq->poll_event = atomic_read(&md_event_count);
31248 + seq->poll_event = atomic_read_unchecked(&md_event_count);
31249 return 0;
31250 }
31251 if (v == (void*)2) {
31252 @@ -6756,7 +6756,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
31253 chunk_kb ? "KB" : "B");
31254 if (bitmap->file) {
31255 seq_printf(seq, ", file: ");
31256 - seq_path(seq, &bitmap->file->f_path, " \t\n");
31257 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31258 }
31259
31260 seq_printf(seq, "\n");
31261 @@ -6787,7 +6787,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
31262 return error;
31263
31264 seq = file->private_data;
31265 - seq->poll_event = atomic_read(&md_event_count);
31266 + seq->poll_event = atomic_read_unchecked(&md_event_count);
31267 return error;
31268 }
31269
31270 @@ -6801,7 +6801,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
31271 /* always allow read */
31272 mask = POLLIN | POLLRDNORM;
31273
31274 - if (seq->poll_event != atomic_read(&md_event_count))
31275 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
31276 mask |= POLLERR | POLLPRI;
31277 return mask;
31278 }
31279 @@ -6845,7 +6845,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
31280 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31281 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31282 (int)part_stat_read(&disk->part0, sectors[1]) -
31283 - atomic_read(&disk->sync_io);
31284 + atomic_read_unchecked(&disk->sync_io);
31285 /* sync IO will cause sync_io to increase before the disk_stats
31286 * as sync_io is counted when a request starts, and
31287 * disk_stats is counted when it completes.
31288 diff --git a/drivers/md/md.h b/drivers/md/md.h
31289 index 0a309dc..7e01d7f 100644
31290 --- a/drivers/md/md.h
31291 +++ b/drivers/md/md.h
31292 @@ -124,13 +124,13 @@ struct mdk_rdev_s
31293 * only maintained for arrays that
31294 * support hot removal
31295 */
31296 - atomic_t read_errors; /* number of consecutive read errors that
31297 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
31298 * we have tried to ignore.
31299 */
31300 struct timespec last_read_error; /* monotonic time since our
31301 * last read error
31302 */
31303 - atomic_t corrected_errors; /* number of corrected read errors,
31304 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31305 * for reporting to userspace and storing
31306 * in superblock.
31307 */
31308 @@ -415,7 +415,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
31309
31310 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31311 {
31312 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31313 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31314 }
31315
31316 struct mdk_personality
31317 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
31318 index d9587df..83a0dc3 100644
31319 --- a/drivers/md/raid1.c
31320 +++ b/drivers/md/raid1.c
31321 @@ -1541,7 +1541,7 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
31322 if (r1_sync_page_io(rdev, sect, s,
31323 bio->bi_io_vec[idx].bv_page,
31324 READ) != 0)
31325 - atomic_add(s, &rdev->corrected_errors);
31326 + atomic_add_unchecked(s, &rdev->corrected_errors);
31327 }
31328 sectors -= s;
31329 sect += s;
31330 @@ -1754,7 +1754,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
31331 test_bit(In_sync, &rdev->flags)) {
31332 if (r1_sync_page_io(rdev, sect, s,
31333 conf->tmppage, READ)) {
31334 - atomic_add(s, &rdev->corrected_errors);
31335 + atomic_add_unchecked(s, &rdev->corrected_errors);
31336 printk(KERN_INFO
31337 "md/raid1:%s: read error corrected "
31338 "(%d sectors at %llu on %s)\n",
31339 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
31340 index 1d44228..98db57d 100644
31341 --- a/drivers/md/raid10.c
31342 +++ b/drivers/md/raid10.c
31343 @@ -1423,7 +1423,7 @@ static void end_sync_read(struct bio *bio, int error)
31344 /* The write handler will notice the lack of
31345 * R10BIO_Uptodate and record any errors etc
31346 */
31347 - atomic_add(r10_bio->sectors,
31348 + atomic_add_unchecked(r10_bio->sectors,
31349 &conf->mirrors[d].rdev->corrected_errors);
31350
31351 /* for reconstruct, we always reschedule after a read.
31352 @@ -1723,7 +1723,7 @@ static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
31353 {
31354 struct timespec cur_time_mon;
31355 unsigned long hours_since_last;
31356 - unsigned int read_errors = atomic_read(&rdev->read_errors);
31357 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
31358
31359 ktime_get_ts(&cur_time_mon);
31360
31361 @@ -1745,9 +1745,9 @@ static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
31362 * overflowing the shift of read_errors by hours_since_last.
31363 */
31364 if (hours_since_last >= 8 * sizeof(read_errors))
31365 - atomic_set(&rdev->read_errors, 0);
31366 + atomic_set_unchecked(&rdev->read_errors, 0);
31367 else
31368 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
31369 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
31370 }
31371
31372 static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector,
31373 @@ -1797,8 +1797,8 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
31374 return;
31375
31376 check_decay_read_errors(mddev, rdev);
31377 - atomic_inc(&rdev->read_errors);
31378 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
31379 + atomic_inc_unchecked(&rdev->read_errors);
31380 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
31381 char b[BDEVNAME_SIZE];
31382 bdevname(rdev->bdev, b);
31383
31384 @@ -1806,7 +1806,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
31385 "md/raid10:%s: %s: Raid device exceeded "
31386 "read_error threshold [cur %d:max %d]\n",
31387 mdname(mddev), b,
31388 - atomic_read(&rdev->read_errors), max_read_errors);
31389 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
31390 printk(KERN_NOTICE
31391 "md/raid10:%s: %s: Failing raid device\n",
31392 mdname(mddev), b);
31393 @@ -1951,7 +1951,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
31394 (unsigned long long)(
31395 sect + rdev->data_offset),
31396 bdevname(rdev->bdev, b));
31397 - atomic_add(s, &rdev->corrected_errors);
31398 + atomic_add_unchecked(s, &rdev->corrected_errors);
31399 }
31400
31401 rdev_dec_pending(rdev, mddev);
31402 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
31403 index b6200c3..02e8702 100644
31404 --- a/drivers/md/raid5.c
31405 +++ b/drivers/md/raid5.c
31406 @@ -1616,19 +1616,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
31407 (unsigned long long)(sh->sector
31408 + rdev->data_offset),
31409 bdevname(rdev->bdev, b));
31410 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
31411 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
31412 clear_bit(R5_ReadError, &sh->dev[i].flags);
31413 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31414 }
31415 - if (atomic_read(&conf->disks[i].rdev->read_errors))
31416 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
31417 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31418 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31419 } else {
31420 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31421 int retry = 0;
31422 rdev = conf->disks[i].rdev;
31423
31424 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31425 - atomic_inc(&rdev->read_errors);
31426 + atomic_inc_unchecked(&rdev->read_errors);
31427 if (conf->mddev->degraded >= conf->max_degraded)
31428 printk_ratelimited(
31429 KERN_WARNING
31430 @@ -1648,7 +1648,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
31431 (unsigned long long)(sh->sector
31432 + rdev->data_offset),
31433 bdn);
31434 - else if (atomic_read(&rdev->read_errors)
31435 + else if (atomic_read_unchecked(&rdev->read_errors)
31436 > conf->max_nr_stripes)
31437 printk(KERN_WARNING
31438 "md/raid:%s: Too many read errors, failing device %s.\n",
31439 @@ -1978,6 +1978,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
31440 sector_t r_sector;
31441 struct stripe_head sh2;
31442
31443 + pax_track_stack();
31444
31445 chunk_offset = sector_div(new_sector, sectors_per_chunk);
31446 stripe = new_sector;
31447 diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
31448 index 1d1d8d2..6c6837a 100644
31449 --- a/drivers/media/common/saa7146_hlp.c
31450 +++ b/drivers/media/common/saa7146_hlp.c
31451 @@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
31452
31453 int x[32], y[32], w[32], h[32];
31454
31455 + pax_track_stack();
31456 +
31457 /* clear out memory */
31458 memset(&line_list[0], 0x00, sizeof(u32)*32);
31459 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
31460 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
31461 index 573d540..16f78f3 100644
31462 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
31463 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
31464 @@ -1675,7 +1675,7 @@ static struct ddb_info ddb_v6 = {
31465 .subvendor = _subvend, .subdevice = _subdev, \
31466 .driver_data = (unsigned long)&_driverdata }
31467
31468 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
31469 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
31470 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
31471 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
31472 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
31473 diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
31474 index 7ea517b..252fe54 100644
31475 --- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
31476 +++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
31477 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
31478 u8 buf[HOST_LINK_BUF_SIZE];
31479 int i;
31480
31481 + pax_track_stack();
31482 +
31483 dprintk("%s\n", __func__);
31484
31485 /* check if we have space for a link buf in the rx_buffer */
31486 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
31487 unsigned long timeout;
31488 int written;
31489
31490 + pax_track_stack();
31491 +
31492 dprintk("%s\n", __func__);
31493
31494 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
31495 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
31496 index a7d876f..8c21b61 100644
31497 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
31498 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
31499 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
31500 union {
31501 dmx_ts_cb ts;
31502 dmx_section_cb sec;
31503 - } cb;
31504 + } __no_const cb;
31505
31506 struct dvb_demux *demux;
31507 void *priv;
31508 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
31509 index f732877..d38c35a 100644
31510 --- a/drivers/media/dvb/dvb-core/dvbdev.c
31511 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
31512 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
31513 const struct dvb_device *template, void *priv, int type)
31514 {
31515 struct dvb_device *dvbdev;
31516 - struct file_operations *dvbdevfops;
31517 + file_operations_no_const *dvbdevfops;
31518 struct device *clsdev;
31519 int minor;
31520 int id;
31521 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
31522 index acb5fb2..2413f1d 100644
31523 --- a/drivers/media/dvb/dvb-usb/cxusb.c
31524 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
31525 @@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
31526 struct dib0700_adapter_state {
31527 int (*set_param_save) (struct dvb_frontend *,
31528 struct dvb_frontend_parameters *);
31529 -};
31530 +} __no_const;
31531
31532 static int dib7070_set_param_override(struct dvb_frontend *fe,
31533 struct dvb_frontend_parameters *fep)
31534 diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
31535 index a224e94..503b76a 100644
31536 --- a/drivers/media/dvb/dvb-usb/dib0700_core.c
31537 +++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
31538 @@ -478,6 +478,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
31539 if (!buf)
31540 return -ENOMEM;
31541
31542 + pax_track_stack();
31543 +
31544 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
31545 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
31546 hx.addr, hx.len, hx.chk);
31547 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
31548 index 058b231..183d2b3 100644
31549 --- a/drivers/media/dvb/dvb-usb/dw2102.c
31550 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
31551 @@ -95,7 +95,7 @@ struct su3000_state {
31552
31553 struct s6x0_state {
31554 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
31555 -};
31556 +} __no_const;
31557
31558 /* debug */
31559 static int dvb_usb_dw2102_debug;
31560 diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
31561 index 37b1469..28a6f6f 100644
31562 --- a/drivers/media/dvb/dvb-usb/lmedm04.c
31563 +++ b/drivers/media/dvb/dvb-usb/lmedm04.c
31564 @@ -742,6 +742,7 @@ static int lme2510_download_firmware(struct usb_device *dev,
31565 usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
31566 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000);
31567
31568 + pax_track_stack();
31569
31570 data[0] = 0x8a;
31571 len_in = 1;
31572 @@ -764,6 +765,8 @@ static void lme_coldreset(struct usb_device *dev)
31573 int ret = 0, len_in;
31574 u8 data[512] = {0};
31575
31576 + pax_track_stack();
31577 +
31578 data[0] = 0x0a;
31579 len_in = 1;
31580 info("FRM Firmware Cold Reset");
31581 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
31582 index ba91735..4261d84 100644
31583 --- a/drivers/media/dvb/frontends/dib3000.h
31584 +++ b/drivers/media/dvb/frontends/dib3000.h
31585 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
31586 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
31587 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
31588 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
31589 -};
31590 +} __no_const;
31591
31592 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
31593 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
31594 diff --git a/drivers/media/dvb/frontends/mb86a16.c b/drivers/media/dvb/frontends/mb86a16.c
31595 index c283112..7f367a7 100644
31596 --- a/drivers/media/dvb/frontends/mb86a16.c
31597 +++ b/drivers/media/dvb/frontends/mb86a16.c
31598 @@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16_state *state)
31599 int ret = -1;
31600 int sync;
31601
31602 + pax_track_stack();
31603 +
31604 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
31605
31606 fcp = 3000;
31607 diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
31608 index c709ce6..b3fe620 100644
31609 --- a/drivers/media/dvb/frontends/or51211.c
31610 +++ b/drivers/media/dvb/frontends/or51211.c
31611 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
31612 u8 tudata[585];
31613 int i;
31614
31615 + pax_track_stack();
31616 +
31617 dprintk("Firmware is %zd bytes\n",fw->size);
31618
31619 /* Get eprom data */
31620 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
31621 index 0564192..75b16f5 100644
31622 --- a/drivers/media/dvb/ngene/ngene-cards.c
31623 +++ b/drivers/media/dvb/ngene/ngene-cards.c
31624 @@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
31625
31626 /****************************************************************************/
31627
31628 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
31629 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
31630 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
31631 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
31632 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
31633 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
31634 index 16a089f..ab1667d 100644
31635 --- a/drivers/media/radio/radio-cadet.c
31636 +++ b/drivers/media/radio/radio-cadet.c
31637 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
31638 unsigned char readbuf[RDS_BUFFER];
31639 int i = 0;
31640
31641 + if (count > RDS_BUFFER)
31642 + return -EFAULT;
31643 mutex_lock(&dev->lock);
31644 if (dev->rdsstat == 0) {
31645 dev->rdsstat = 1;
31646 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
31647 index 9cde353..8c6a1c3 100644
31648 --- a/drivers/media/video/au0828/au0828.h
31649 +++ b/drivers/media/video/au0828/au0828.h
31650 @@ -191,7 +191,7 @@ struct au0828_dev {
31651
31652 /* I2C */
31653 struct i2c_adapter i2c_adap;
31654 - struct i2c_algorithm i2c_algo;
31655 + i2c_algorithm_no_const i2c_algo;
31656 struct i2c_client i2c_client;
31657 u32 i2c_rc;
31658
31659 diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
31660 index 9e2f870..22e3a08 100644
31661 --- a/drivers/media/video/cx18/cx18-driver.c
31662 +++ b/drivers/media/video/cx18/cx18-driver.c
31663 @@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
31664 struct i2c_client c;
31665 u8 eedata[256];
31666
31667 + pax_track_stack();
31668 +
31669 memset(&c, 0, sizeof(c));
31670 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
31671 c.adapter = &cx->i2c_adap[0];
31672 diff --git a/drivers/media/video/cx23885/cx23885-input.c b/drivers/media/video/cx23885/cx23885-input.c
31673 index ce765e3..f9e1b04 100644
31674 --- a/drivers/media/video/cx23885/cx23885-input.c
31675 +++ b/drivers/media/video/cx23885/cx23885-input.c
31676 @@ -53,6 +53,8 @@ static void cx23885_input_process_measurements(struct cx23885_dev *dev,
31677 bool handle = false;
31678 struct ir_raw_event ir_core_event[64];
31679
31680 + pax_track_stack();
31681 +
31682 do {
31683 num = 0;
31684 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
31685 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
31686 index 68d1240..46b32eb 100644
31687 --- a/drivers/media/video/cx88/cx88-alsa.c
31688 +++ b/drivers/media/video/cx88/cx88-alsa.c
31689 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
31690 * Only boards with eeprom and byte 1 at eeprom=1 have it
31691 */
31692
31693 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
31694 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
31695 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31696 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
31697 {0, }
31698 diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
31699 index 9515f3a..c9ecb85 100644
31700 --- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
31701 +++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
31702 @@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
31703 u8 *eeprom;
31704 struct tveeprom tvdata;
31705
31706 + pax_track_stack();
31707 +
31708 memset(&tvdata,0,sizeof(tvdata));
31709
31710 eeprom = pvr2_eeprom_fetch(hdw);
31711 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31712 index 305e6aa..0143317 100644
31713 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31714 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
31715 @@ -196,7 +196,7 @@ struct pvr2_hdw {
31716
31717 /* I2C stuff */
31718 struct i2c_adapter i2c_adap;
31719 - struct i2c_algorithm i2c_algo;
31720 + i2c_algorithm_no_const i2c_algo;
31721 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
31722 int i2c_cx25840_hack_state;
31723 int i2c_linked;
31724 diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
31725 index f9f29cc..5a2e330 100644
31726 --- a/drivers/media/video/saa7134/saa6752hs.c
31727 +++ b/drivers/media/video/saa7134/saa6752hs.c
31728 @@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
31729 unsigned char localPAT[256];
31730 unsigned char localPMT[256];
31731
31732 + pax_track_stack();
31733 +
31734 /* Set video format - must be done first as it resets other settings */
31735 set_reg8(client, 0x41, h->video_format);
31736
31737 diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
31738 index 62fac7f..f29e0b9 100644
31739 --- a/drivers/media/video/saa7164/saa7164-cmd.c
31740 +++ b/drivers/media/video/saa7164/saa7164-cmd.c
31741 @@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
31742 u8 tmp[512];
31743 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31744
31745 + pax_track_stack();
31746 +
31747 /* While any outstand message on the bus exists... */
31748 do {
31749
31750 @@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
31751 u8 tmp[512];
31752 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31753
31754 + pax_track_stack();
31755 +
31756 while (loop) {
31757
31758 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
31759 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
31760 index 84cd1b6..f741e07 100644
31761 --- a/drivers/media/video/timblogiw.c
31762 +++ b/drivers/media/video/timblogiw.c
31763 @@ -744,7 +744,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
31764
31765 /* Platform device functions */
31766
31767 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31768 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
31769 .vidioc_querycap = timblogiw_querycap,
31770 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
31771 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
31772 @@ -766,7 +766,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
31773 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
31774 };
31775
31776 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
31777 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
31778 .owner = THIS_MODULE,
31779 .open = timblogiw_open,
31780 .release = timblogiw_close,
31781 diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
31782 index f344411..6ae9974 100644
31783 --- a/drivers/media/video/usbvision/usbvision-core.c
31784 +++ b/drivers/media/video/usbvision/usbvision-core.c
31785 @@ -707,6 +707,8 @@ static enum parse_state usbvision_parse_compress(struct usb_usbvision *usbvision
31786 unsigned char rv, gv, bv;
31787 static unsigned char *Y, *U, *V;
31788
31789 + pax_track_stack();
31790 +
31791 frame = usbvision->cur_frame;
31792 image_size = frame->frmwidth * frame->frmheight;
31793 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
31794 diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
31795 index f300dea..04834ba 100644
31796 --- a/drivers/media/video/videobuf-dma-sg.c
31797 +++ b/drivers/media/video/videobuf-dma-sg.c
31798 @@ -607,6 +607,8 @@ void *videobuf_sg_alloc(size_t size)
31799 {
31800 struct videobuf_queue q;
31801
31802 + pax_track_stack();
31803 +
31804 /* Required to make generic handler to call __videobuf_alloc */
31805 q.int_ops = &sg_ops;
31806
31807 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
31808 index 7956a10..f39232f 100644
31809 --- a/drivers/message/fusion/mptbase.c
31810 +++ b/drivers/message/fusion/mptbase.c
31811 @@ -6681,8 +6681,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
31812 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31813 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31814
31815 +#ifdef CONFIG_GRKERNSEC_HIDESYM
31816 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
31817 +#else
31818 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31819 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31820 +#endif
31821 +
31822 /*
31823 * Rounding UP to nearest 4-kB boundary here...
31824 */
31825 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
31826 index 7596aec..f7ae9aa 100644
31827 --- a/drivers/message/fusion/mptsas.c
31828 +++ b/drivers/message/fusion/mptsas.c
31829 @@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
31830 return 0;
31831 }
31832
31833 +static inline void
31834 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31835 +{
31836 + if (phy_info->port_details) {
31837 + phy_info->port_details->rphy = rphy;
31838 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31839 + ioc->name, rphy));
31840 + }
31841 +
31842 + if (rphy) {
31843 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31844 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31845 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31846 + ioc->name, rphy, rphy->dev.release));
31847 + }
31848 +}
31849 +
31850 /* no mutex */
31851 static void
31852 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31853 @@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
31854 return NULL;
31855 }
31856
31857 -static inline void
31858 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31859 -{
31860 - if (phy_info->port_details) {
31861 - phy_info->port_details->rphy = rphy;
31862 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31863 - ioc->name, rphy));
31864 - }
31865 -
31866 - if (rphy) {
31867 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31868 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31869 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31870 - ioc->name, rphy, rphy->dev.release));
31871 - }
31872 -}
31873 -
31874 static inline struct sas_port *
31875 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31876 {
31877 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
31878 index ce61a57..3da8862 100644
31879 --- a/drivers/message/fusion/mptscsih.c
31880 +++ b/drivers/message/fusion/mptscsih.c
31881 @@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31882
31883 h = shost_priv(SChost);
31884
31885 - if (h) {
31886 - if (h->info_kbuf == NULL)
31887 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31888 - return h->info_kbuf;
31889 - h->info_kbuf[0] = '\0';
31890 + if (!h)
31891 + return NULL;
31892
31893 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31894 - h->info_kbuf[size-1] = '\0';
31895 - }
31896 + if (h->info_kbuf == NULL)
31897 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31898 + return h->info_kbuf;
31899 + h->info_kbuf[0] = '\0';
31900 +
31901 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31902 + h->info_kbuf[size-1] = '\0';
31903
31904 return h->info_kbuf;
31905 }
31906 diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
31907 index 098de2b..fbb922c 100644
31908 --- a/drivers/message/i2o/i2o_config.c
31909 +++ b/drivers/message/i2o/i2o_config.c
31910 @@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned long arg)
31911 struct i2o_message *msg;
31912 unsigned int iop;
31913
31914 + pax_track_stack();
31915 +
31916 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
31917 return -EFAULT;
31918
31919 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
31920 index 07dbeaf..5533142 100644
31921 --- a/drivers/message/i2o/i2o_proc.c
31922 +++ b/drivers/message/i2o/i2o_proc.c
31923 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
31924 "Array Controller Device"
31925 };
31926
31927 -static char *chtostr(u8 * chars, int n)
31928 -{
31929 - char tmp[256];
31930 - tmp[0] = 0;
31931 - return strncat(tmp, (char *)chars, n);
31932 -}
31933 -
31934 static int i2o_report_query_status(struct seq_file *seq, int block_status,
31935 char *group)
31936 {
31937 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
31938
31939 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
31940 seq_printf(seq, "%-#8x", ddm_table.module_id);
31941 - seq_printf(seq, "%-29s",
31942 - chtostr(ddm_table.module_name_version, 28));
31943 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
31944 seq_printf(seq, "%9d ", ddm_table.data_size);
31945 seq_printf(seq, "%8d", ddm_table.code_size);
31946
31947 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
31948
31949 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
31950 seq_printf(seq, "%-#8x", dst->module_id);
31951 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
31952 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
31953 + seq_printf(seq, "%-.28s", dst->module_name_version);
31954 + seq_printf(seq, "%-.8s", dst->date);
31955 seq_printf(seq, "%8d ", dst->module_size);
31956 seq_printf(seq, "%8d ", dst->mpb_size);
31957 seq_printf(seq, "0x%04x", dst->module_flags);
31958 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
31959 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
31960 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
31961 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
31962 - seq_printf(seq, "Vendor info : %s\n",
31963 - chtostr((u8 *) (work32 + 2), 16));
31964 - seq_printf(seq, "Product info : %s\n",
31965 - chtostr((u8 *) (work32 + 6), 16));
31966 - seq_printf(seq, "Description : %s\n",
31967 - chtostr((u8 *) (work32 + 10), 16));
31968 - seq_printf(seq, "Product rev. : %s\n",
31969 - chtostr((u8 *) (work32 + 14), 8));
31970 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
31971 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
31972 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
31973 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
31974
31975 seq_printf(seq, "Serial number : ");
31976 print_serial_number(seq, (u8 *) (work32 + 16),
31977 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
31978 }
31979
31980 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
31981 - seq_printf(seq, "Module name : %s\n",
31982 - chtostr(result.module_name, 24));
31983 - seq_printf(seq, "Module revision : %s\n",
31984 - chtostr(result.module_rev, 8));
31985 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
31986 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
31987
31988 seq_printf(seq, "Serial number : ");
31989 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
31990 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
31991 return 0;
31992 }
31993
31994 - seq_printf(seq, "Device name : %s\n",
31995 - chtostr(result.device_name, 64));
31996 - seq_printf(seq, "Service name : %s\n",
31997 - chtostr(result.service_name, 64));
31998 - seq_printf(seq, "Physical name : %s\n",
31999 - chtostr(result.physical_location, 64));
32000 - seq_printf(seq, "Instance number : %s\n",
32001 - chtostr(result.instance_number, 4));
32002 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
32003 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
32004 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
32005 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
32006
32007 return 0;
32008 }
32009 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
32010 index a8c08f3..155fe3d 100644
32011 --- a/drivers/message/i2o/iop.c
32012 +++ b/drivers/message/i2o/iop.c
32013 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
32014
32015 spin_lock_irqsave(&c->context_list_lock, flags);
32016
32017 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
32018 - atomic_inc(&c->context_list_counter);
32019 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
32020 + atomic_inc_unchecked(&c->context_list_counter);
32021
32022 - entry->context = atomic_read(&c->context_list_counter);
32023 + entry->context = atomic_read_unchecked(&c->context_list_counter);
32024
32025 list_add(&entry->list, &c->context_list);
32026
32027 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
32028
32029 #if BITS_PER_LONG == 64
32030 spin_lock_init(&c->context_list_lock);
32031 - atomic_set(&c->context_list_counter, 0);
32032 + atomic_set_unchecked(&c->context_list_counter, 0);
32033 INIT_LIST_HEAD(&c->context_list);
32034 #endif
32035
32036 diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
32037 index a20e1c4..4f57255 100644
32038 --- a/drivers/mfd/ab3100-core.c
32039 +++ b/drivers/mfd/ab3100-core.c
32040 @@ -809,7 +809,7 @@ struct ab_family_id {
32041 char *name;
32042 };
32043
32044 -static const struct ab_family_id ids[] __devinitdata = {
32045 +static const struct ab_family_id ids[] __devinitconst = {
32046 /* AB3100 */
32047 {
32048 .id = 0xc0,
32049 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
32050 index f12720d..3c251fd 100644
32051 --- a/drivers/mfd/abx500-core.c
32052 +++ b/drivers/mfd/abx500-core.c
32053 @@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
32054
32055 struct abx500_device_entry {
32056 struct list_head list;
32057 - struct abx500_ops ops;
32058 + abx500_ops_no_const ops;
32059 struct device *dev;
32060 };
32061
32062 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
32063 index 5c2a06a..8fa077c 100644
32064 --- a/drivers/mfd/janz-cmodio.c
32065 +++ b/drivers/mfd/janz-cmodio.c
32066 @@ -13,6 +13,7 @@
32067
32068 #include <linux/kernel.h>
32069 #include <linux/module.h>
32070 +#include <linux/slab.h>
32071 #include <linux/init.h>
32072 #include <linux/pci.h>
32073 #include <linux/interrupt.h>
32074 diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
32075 index 5fe5de1..af64f53 100644
32076 --- a/drivers/mfd/wm8350-i2c.c
32077 +++ b/drivers/mfd/wm8350-i2c.c
32078 @@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
32079 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
32080 int ret;
32081
32082 + pax_track_stack();
32083 +
32084 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
32085 return -EINVAL;
32086
32087 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
32088 index 8b51cd6..f628f8d 100644
32089 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
32090 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
32091 @@ -437,7 +437,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
32092 * the lid is closed. This leads to interrupts as soon as a little move
32093 * is done.
32094 */
32095 - atomic_inc(&lis3_dev.count);
32096 + atomic_inc_unchecked(&lis3_dev.count);
32097
32098 wake_up_interruptible(&lis3_dev.misc_wait);
32099 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
32100 @@ -520,7 +520,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
32101 if (lis3_dev.pm_dev)
32102 pm_runtime_get_sync(lis3_dev.pm_dev);
32103
32104 - atomic_set(&lis3_dev.count, 0);
32105 + atomic_set_unchecked(&lis3_dev.count, 0);
32106 return 0;
32107 }
32108
32109 @@ -547,7 +547,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
32110 add_wait_queue(&lis3_dev.misc_wait, &wait);
32111 while (true) {
32112 set_current_state(TASK_INTERRUPTIBLE);
32113 - data = atomic_xchg(&lis3_dev.count, 0);
32114 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
32115 if (data)
32116 break;
32117
32118 @@ -585,7 +585,7 @@ out:
32119 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
32120 {
32121 poll_wait(file, &lis3_dev.misc_wait, wait);
32122 - if (atomic_read(&lis3_dev.count))
32123 + if (atomic_read_unchecked(&lis3_dev.count))
32124 return POLLIN | POLLRDNORM;
32125 return 0;
32126 }
32127 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
32128 index a193958..4d7ecd2 100644
32129 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
32130 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
32131 @@ -265,7 +265,7 @@ struct lis3lv02d {
32132 struct input_polled_dev *idev; /* input device */
32133 struct platform_device *pdev; /* platform device */
32134 struct regulator_bulk_data regulators[2];
32135 - atomic_t count; /* interrupt count after last read */
32136 + atomic_unchecked_t count; /* interrupt count after last read */
32137 union axis_conversion ac; /* hw -> logical axis */
32138 int mapped_btns[3];
32139
32140 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
32141 index 2f30bad..c4c13d0 100644
32142 --- a/drivers/misc/sgi-gru/gruhandles.c
32143 +++ b/drivers/misc/sgi-gru/gruhandles.c
32144 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32145 unsigned long nsec;
32146
32147 nsec = CLKS2NSEC(clks);
32148 - atomic_long_inc(&mcs_op_statistics[op].count);
32149 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
32150 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32151 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
32152 if (mcs_op_statistics[op].max < nsec)
32153 mcs_op_statistics[op].max = nsec;
32154 }
32155 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
32156 index 7768b87..f8aac38 100644
32157 --- a/drivers/misc/sgi-gru/gruprocfs.c
32158 +++ b/drivers/misc/sgi-gru/gruprocfs.c
32159 @@ -32,9 +32,9 @@
32160
32161 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32162
32163 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32164 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32165 {
32166 - unsigned long val = atomic_long_read(v);
32167 + unsigned long val = atomic_long_read_unchecked(v);
32168
32169 seq_printf(s, "%16lu %s\n", val, id);
32170 }
32171 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
32172
32173 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
32174 for (op = 0; op < mcsop_last; op++) {
32175 - count = atomic_long_read(&mcs_op_statistics[op].count);
32176 - total = atomic_long_read(&mcs_op_statistics[op].total);
32177 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32178 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32179 max = mcs_op_statistics[op].max;
32180 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32181 count ? total / count : 0, max);
32182 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
32183 index 5c3ce24..4915ccb 100644
32184 --- a/drivers/misc/sgi-gru/grutables.h
32185 +++ b/drivers/misc/sgi-gru/grutables.h
32186 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
32187 * GRU statistics.
32188 */
32189 struct gru_stats_s {
32190 - atomic_long_t vdata_alloc;
32191 - atomic_long_t vdata_free;
32192 - atomic_long_t gts_alloc;
32193 - atomic_long_t gts_free;
32194 - atomic_long_t gms_alloc;
32195 - atomic_long_t gms_free;
32196 - atomic_long_t gts_double_allocate;
32197 - atomic_long_t assign_context;
32198 - atomic_long_t assign_context_failed;
32199 - atomic_long_t free_context;
32200 - atomic_long_t load_user_context;
32201 - atomic_long_t load_kernel_context;
32202 - atomic_long_t lock_kernel_context;
32203 - atomic_long_t unlock_kernel_context;
32204 - atomic_long_t steal_user_context;
32205 - atomic_long_t steal_kernel_context;
32206 - atomic_long_t steal_context_failed;
32207 - atomic_long_t nopfn;
32208 - atomic_long_t asid_new;
32209 - atomic_long_t asid_next;
32210 - atomic_long_t asid_wrap;
32211 - atomic_long_t asid_reuse;
32212 - atomic_long_t intr;
32213 - atomic_long_t intr_cbr;
32214 - atomic_long_t intr_tfh;
32215 - atomic_long_t intr_spurious;
32216 - atomic_long_t intr_mm_lock_failed;
32217 - atomic_long_t call_os;
32218 - atomic_long_t call_os_wait_queue;
32219 - atomic_long_t user_flush_tlb;
32220 - atomic_long_t user_unload_context;
32221 - atomic_long_t user_exception;
32222 - atomic_long_t set_context_option;
32223 - atomic_long_t check_context_retarget_intr;
32224 - atomic_long_t check_context_unload;
32225 - atomic_long_t tlb_dropin;
32226 - atomic_long_t tlb_preload_page;
32227 - atomic_long_t tlb_dropin_fail_no_asid;
32228 - atomic_long_t tlb_dropin_fail_upm;
32229 - atomic_long_t tlb_dropin_fail_invalid;
32230 - atomic_long_t tlb_dropin_fail_range_active;
32231 - atomic_long_t tlb_dropin_fail_idle;
32232 - atomic_long_t tlb_dropin_fail_fmm;
32233 - atomic_long_t tlb_dropin_fail_no_exception;
32234 - atomic_long_t tfh_stale_on_fault;
32235 - atomic_long_t mmu_invalidate_range;
32236 - atomic_long_t mmu_invalidate_page;
32237 - atomic_long_t flush_tlb;
32238 - atomic_long_t flush_tlb_gru;
32239 - atomic_long_t flush_tlb_gru_tgh;
32240 - atomic_long_t flush_tlb_gru_zero_asid;
32241 -
32242 - atomic_long_t copy_gpa;
32243 - atomic_long_t read_gpa;
32244 -
32245 - atomic_long_t mesq_receive;
32246 - atomic_long_t mesq_receive_none;
32247 - atomic_long_t mesq_send;
32248 - atomic_long_t mesq_send_failed;
32249 - atomic_long_t mesq_noop;
32250 - atomic_long_t mesq_send_unexpected_error;
32251 - atomic_long_t mesq_send_lb_overflow;
32252 - atomic_long_t mesq_send_qlimit_reached;
32253 - atomic_long_t mesq_send_amo_nacked;
32254 - atomic_long_t mesq_send_put_nacked;
32255 - atomic_long_t mesq_page_overflow;
32256 - atomic_long_t mesq_qf_locked;
32257 - atomic_long_t mesq_qf_noop_not_full;
32258 - atomic_long_t mesq_qf_switch_head_failed;
32259 - atomic_long_t mesq_qf_unexpected_error;
32260 - atomic_long_t mesq_noop_unexpected_error;
32261 - atomic_long_t mesq_noop_lb_overflow;
32262 - atomic_long_t mesq_noop_qlimit_reached;
32263 - atomic_long_t mesq_noop_amo_nacked;
32264 - atomic_long_t mesq_noop_put_nacked;
32265 - atomic_long_t mesq_noop_page_overflow;
32266 + atomic_long_unchecked_t vdata_alloc;
32267 + atomic_long_unchecked_t vdata_free;
32268 + atomic_long_unchecked_t gts_alloc;
32269 + atomic_long_unchecked_t gts_free;
32270 + atomic_long_unchecked_t gms_alloc;
32271 + atomic_long_unchecked_t gms_free;
32272 + atomic_long_unchecked_t gts_double_allocate;
32273 + atomic_long_unchecked_t assign_context;
32274 + atomic_long_unchecked_t assign_context_failed;
32275 + atomic_long_unchecked_t free_context;
32276 + atomic_long_unchecked_t load_user_context;
32277 + atomic_long_unchecked_t load_kernel_context;
32278 + atomic_long_unchecked_t lock_kernel_context;
32279 + atomic_long_unchecked_t unlock_kernel_context;
32280 + atomic_long_unchecked_t steal_user_context;
32281 + atomic_long_unchecked_t steal_kernel_context;
32282 + atomic_long_unchecked_t steal_context_failed;
32283 + atomic_long_unchecked_t nopfn;
32284 + atomic_long_unchecked_t asid_new;
32285 + atomic_long_unchecked_t asid_next;
32286 + atomic_long_unchecked_t asid_wrap;
32287 + atomic_long_unchecked_t asid_reuse;
32288 + atomic_long_unchecked_t intr;
32289 + atomic_long_unchecked_t intr_cbr;
32290 + atomic_long_unchecked_t intr_tfh;
32291 + atomic_long_unchecked_t intr_spurious;
32292 + atomic_long_unchecked_t intr_mm_lock_failed;
32293 + atomic_long_unchecked_t call_os;
32294 + atomic_long_unchecked_t call_os_wait_queue;
32295 + atomic_long_unchecked_t user_flush_tlb;
32296 + atomic_long_unchecked_t user_unload_context;
32297 + atomic_long_unchecked_t user_exception;
32298 + atomic_long_unchecked_t set_context_option;
32299 + atomic_long_unchecked_t check_context_retarget_intr;
32300 + atomic_long_unchecked_t check_context_unload;
32301 + atomic_long_unchecked_t tlb_dropin;
32302 + atomic_long_unchecked_t tlb_preload_page;
32303 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32304 + atomic_long_unchecked_t tlb_dropin_fail_upm;
32305 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
32306 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
32307 + atomic_long_unchecked_t tlb_dropin_fail_idle;
32308 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
32309 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32310 + atomic_long_unchecked_t tfh_stale_on_fault;
32311 + atomic_long_unchecked_t mmu_invalidate_range;
32312 + atomic_long_unchecked_t mmu_invalidate_page;
32313 + atomic_long_unchecked_t flush_tlb;
32314 + atomic_long_unchecked_t flush_tlb_gru;
32315 + atomic_long_unchecked_t flush_tlb_gru_tgh;
32316 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32317 +
32318 + atomic_long_unchecked_t copy_gpa;
32319 + atomic_long_unchecked_t read_gpa;
32320 +
32321 + atomic_long_unchecked_t mesq_receive;
32322 + atomic_long_unchecked_t mesq_receive_none;
32323 + atomic_long_unchecked_t mesq_send;
32324 + atomic_long_unchecked_t mesq_send_failed;
32325 + atomic_long_unchecked_t mesq_noop;
32326 + atomic_long_unchecked_t mesq_send_unexpected_error;
32327 + atomic_long_unchecked_t mesq_send_lb_overflow;
32328 + atomic_long_unchecked_t mesq_send_qlimit_reached;
32329 + atomic_long_unchecked_t mesq_send_amo_nacked;
32330 + atomic_long_unchecked_t mesq_send_put_nacked;
32331 + atomic_long_unchecked_t mesq_page_overflow;
32332 + atomic_long_unchecked_t mesq_qf_locked;
32333 + atomic_long_unchecked_t mesq_qf_noop_not_full;
32334 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
32335 + atomic_long_unchecked_t mesq_qf_unexpected_error;
32336 + atomic_long_unchecked_t mesq_noop_unexpected_error;
32337 + atomic_long_unchecked_t mesq_noop_lb_overflow;
32338 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
32339 + atomic_long_unchecked_t mesq_noop_amo_nacked;
32340 + atomic_long_unchecked_t mesq_noop_put_nacked;
32341 + atomic_long_unchecked_t mesq_noop_page_overflow;
32342
32343 };
32344
32345 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
32346 tghop_invalidate, mcsop_last};
32347
32348 struct mcs_op_statistic {
32349 - atomic_long_t count;
32350 - atomic_long_t total;
32351 + atomic_long_unchecked_t count;
32352 + atomic_long_unchecked_t total;
32353 unsigned long max;
32354 };
32355
32356 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
32357
32358 #define STAT(id) do { \
32359 if (gru_options & OPT_STATS) \
32360 - atomic_long_inc(&gru_stats.id); \
32361 + atomic_long_inc_unchecked(&gru_stats.id); \
32362 } while (0)
32363
32364 #ifdef CONFIG_SGI_GRU_DEBUG
32365 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
32366 index 851b2f2..a4ec097 100644
32367 --- a/drivers/misc/sgi-xp/xp.h
32368 +++ b/drivers/misc/sgi-xp/xp.h
32369 @@ -289,7 +289,7 @@ struct xpc_interface {
32370 xpc_notify_func, void *);
32371 void (*received) (short, int, void *);
32372 enum xp_retval (*partid_to_nasids) (short, void *);
32373 -};
32374 +} __no_const;
32375
32376 extern struct xpc_interface xpc_interface;
32377
32378 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
32379 index b94d5f7..7f494c5 100644
32380 --- a/drivers/misc/sgi-xp/xpc.h
32381 +++ b/drivers/misc/sgi-xp/xpc.h
32382 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
32383 void (*received_payload) (struct xpc_channel *, void *);
32384 void (*notify_senders_of_disconnect) (struct xpc_channel *);
32385 };
32386 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
32387
32388 /* struct xpc_partition act_state values (for XPC HB) */
32389
32390 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
32391 /* found in xpc_main.c */
32392 extern struct device *xpc_part;
32393 extern struct device *xpc_chan;
32394 -extern struct xpc_arch_operations xpc_arch_ops;
32395 +extern xpc_arch_operations_no_const xpc_arch_ops;
32396 extern int xpc_disengage_timelimit;
32397 extern int xpc_disengage_timedout;
32398 extern int xpc_activate_IRQ_rcvd;
32399 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
32400 index 8d082b4..aa749ae 100644
32401 --- a/drivers/misc/sgi-xp/xpc_main.c
32402 +++ b/drivers/misc/sgi-xp/xpc_main.c
32403 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
32404 .notifier_call = xpc_system_die,
32405 };
32406
32407 -struct xpc_arch_operations xpc_arch_ops;
32408 +xpc_arch_operations_no_const xpc_arch_ops;
32409
32410 /*
32411 * Timer function to enforce the timelimit on the partition disengage.
32412 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
32413 index 26c5286..292d261 100644
32414 --- a/drivers/mmc/host/sdhci-pci.c
32415 +++ b/drivers/mmc/host/sdhci-pci.c
32416 @@ -542,7 +542,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
32417 .probe = via_probe,
32418 };
32419
32420 -static const struct pci_device_id pci_ids[] __devinitdata = {
32421 +static const struct pci_device_id pci_ids[] __devinitconst = {
32422 {
32423 .vendor = PCI_VENDOR_ID_RICOH,
32424 .device = PCI_DEVICE_ID_RICOH_R5C822,
32425 diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
32426 index e1e122f..d99a6ea 100644
32427 --- a/drivers/mtd/chips/cfi_cmdset_0001.c
32428 +++ b/drivers/mtd/chips/cfi_cmdset_0001.c
32429 @@ -757,6 +757,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
32430 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
32431 unsigned long timeo = jiffies + HZ;
32432
32433 + pax_track_stack();
32434 +
32435 /* Prevent setting state FL_SYNCING for chip in suspended state. */
32436 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
32437 goto sleep;
32438 @@ -1653,6 +1655,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
32439 unsigned long initial_adr;
32440 int initial_len = len;
32441
32442 + pax_track_stack();
32443 +
32444 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
32445 adr += chip->start;
32446 initial_adr = adr;
32447 @@ -1871,6 +1875,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
32448 int retries = 3;
32449 int ret;
32450
32451 + pax_track_stack();
32452 +
32453 adr += chip->start;
32454
32455 retry:
32456 diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
32457 index 179814a..abe9d60 100644
32458 --- a/drivers/mtd/chips/cfi_cmdset_0020.c
32459 +++ b/drivers/mtd/chips/cfi_cmdset_0020.c
32460 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
32461 unsigned long cmd_addr;
32462 struct cfi_private *cfi = map->fldrv_priv;
32463
32464 + pax_track_stack();
32465 +
32466 adr += chip->start;
32467
32468 /* Ensure cmd read/writes are aligned. */
32469 @@ -429,6 +431,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
32470 DECLARE_WAITQUEUE(wait, current);
32471 int wbufsize, z;
32472
32473 + pax_track_stack();
32474 +
32475 /* M58LW064A requires bus alignment for buffer wriets -- saw */
32476 if (adr & (map_bankwidth(map)-1))
32477 return -EINVAL;
32478 @@ -743,6 +747,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
32479 DECLARE_WAITQUEUE(wait, current);
32480 int ret = 0;
32481
32482 + pax_track_stack();
32483 +
32484 adr += chip->start;
32485
32486 /* Let's determine this according to the interleave only once */
32487 @@ -1048,6 +1054,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
32488 unsigned long timeo = jiffies + HZ;
32489 DECLARE_WAITQUEUE(wait, current);
32490
32491 + pax_track_stack();
32492 +
32493 adr += chip->start;
32494
32495 /* Let's determine this according to the interleave only once */
32496 @@ -1197,6 +1205,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
32497 unsigned long timeo = jiffies + HZ;
32498 DECLARE_WAITQUEUE(wait, current);
32499
32500 + pax_track_stack();
32501 +
32502 adr += chip->start;
32503
32504 /* Let's determine this according to the interleave only once */
32505 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
32506 index f7fbf60..9866457 100644
32507 --- a/drivers/mtd/devices/doc2000.c
32508 +++ b/drivers/mtd/devices/doc2000.c
32509 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
32510
32511 /* The ECC will not be calculated correctly if less than 512 is written */
32512 /* DBB-
32513 - if (len != 0x200 && eccbuf)
32514 + if (len != 0x200)
32515 printk(KERN_WARNING
32516 "ECC needs a full sector write (adr: %lx size %lx)\n",
32517 (long) to, (long) len);
32518 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
32519 index 241192f..d0c35a3 100644
32520 --- a/drivers/mtd/devices/doc2001.c
32521 +++ b/drivers/mtd/devices/doc2001.c
32522 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
32523 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32524
32525 /* Don't allow read past end of device */
32526 - if (from >= this->totlen)
32527 + if (from >= this->totlen || !len)
32528 return -EINVAL;
32529
32530 /* Don't allow a single read to cross a 512-byte block boundary */
32531 diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
32532 index 037b399..225a71d 100644
32533 --- a/drivers/mtd/ftl.c
32534 +++ b/drivers/mtd/ftl.c
32535 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
32536 loff_t offset;
32537 uint16_t srcunitswap = cpu_to_le16(srcunit);
32538
32539 + pax_track_stack();
32540 +
32541 eun = &part->EUNInfo[srcunit];
32542 xfer = &part->XferInfo[xferunit];
32543 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
32544 diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
32545 index d7592e6..31c505c 100644
32546 --- a/drivers/mtd/inftlcore.c
32547 +++ b/drivers/mtd/inftlcore.c
32548 @@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
32549 struct inftl_oob oob;
32550 size_t retlen;
32551
32552 + pax_track_stack();
32553 +
32554 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
32555 "pending=%d)\n", inftl, thisVUC, pendingblock);
32556
32557 diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
32558 index 104052e..6232be5 100644
32559 --- a/drivers/mtd/inftlmount.c
32560 +++ b/drivers/mtd/inftlmount.c
32561 @@ -53,6 +53,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
32562 struct INFTLPartition *ip;
32563 size_t retlen;
32564
32565 + pax_track_stack();
32566 +
32567 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
32568
32569 /*
32570 diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
32571 index dbfe17b..c7b0918 100644
32572 --- a/drivers/mtd/lpddr/qinfo_probe.c
32573 +++ b/drivers/mtd/lpddr/qinfo_probe.c
32574 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
32575 {
32576 map_word pfow_val[4];
32577
32578 + pax_track_stack();
32579 +
32580 /* Check identification string */
32581 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
32582 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
32583 diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
32584 index 49e20a4..60fbfa5 100644
32585 --- a/drivers/mtd/mtdchar.c
32586 +++ b/drivers/mtd/mtdchar.c
32587 @@ -554,6 +554,8 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
32588 u_long size;
32589 struct mtd_info_user info;
32590
32591 + pax_track_stack();
32592 +
32593 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
32594
32595 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
32596 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
32597 index d527621..2491fab 100644
32598 --- a/drivers/mtd/nand/denali.c
32599 +++ b/drivers/mtd/nand/denali.c
32600 @@ -26,6 +26,7 @@
32601 #include <linux/pci.h>
32602 #include <linux/mtd/mtd.h>
32603 #include <linux/module.h>
32604 +#include <linux/slab.h>
32605
32606 #include "denali.h"
32607
32608 diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
32609 index b155666..611b801 100644
32610 --- a/drivers/mtd/nftlcore.c
32611 +++ b/drivers/mtd/nftlcore.c
32612 @@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
32613 int inplace = 1;
32614 size_t retlen;
32615
32616 + pax_track_stack();
32617 +
32618 memset(BlockMap, 0xff, sizeof(BlockMap));
32619 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
32620
32621 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
32622 index e3cd1ff..0ea79a3 100644
32623 --- a/drivers/mtd/nftlmount.c
32624 +++ b/drivers/mtd/nftlmount.c
32625 @@ -24,6 +24,7 @@
32626 #include <asm/errno.h>
32627 #include <linux/delay.h>
32628 #include <linux/slab.h>
32629 +#include <linux/sched.h>
32630 #include <linux/mtd/mtd.h>
32631 #include <linux/mtd/nand.h>
32632 #include <linux/mtd/nftl.h>
32633 @@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
32634 struct mtd_info *mtd = nftl->mbd.mtd;
32635 unsigned int i;
32636
32637 + pax_track_stack();
32638 +
32639 /* Assume logical EraseSize == physical erasesize for starting the scan.
32640 We'll sort it out later if we find a MediaHeader which says otherwise */
32641 /* Actually, we won't. The new DiskOnChip driver has already scanned
32642 diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
32643 index 6c3fb5a..c542a81 100644
32644 --- a/drivers/mtd/ubi/build.c
32645 +++ b/drivers/mtd/ubi/build.c
32646 @@ -1311,7 +1311,7 @@ module_exit(ubi_exit);
32647 static int __init bytes_str_to_int(const char *str)
32648 {
32649 char *endp;
32650 - unsigned long result;
32651 + unsigned long result, scale = 1;
32652
32653 result = simple_strtoul(str, &endp, 0);
32654 if (str == endp || result >= INT_MAX) {
32655 @@ -1322,11 +1322,11 @@ static int __init bytes_str_to_int(const char *str)
32656
32657 switch (*endp) {
32658 case 'G':
32659 - result *= 1024;
32660 + scale *= 1024;
32661 case 'M':
32662 - result *= 1024;
32663 + scale *= 1024;
32664 case 'K':
32665 - result *= 1024;
32666 + scale *= 1024;
32667 if (endp[1] == 'i' && endp[2] == 'B')
32668 endp += 2;
32669 case '\0':
32670 @@ -1337,7 +1337,13 @@ static int __init bytes_str_to_int(const char *str)
32671 return -EINVAL;
32672 }
32673
32674 - return result;
32675 + if ((intoverflow_t)result*scale >= INT_MAX) {
32676 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
32677 + str);
32678 + return -EINVAL;
32679 + }
32680 +
32681 + return result*scale;
32682 }
32683
32684 /**
32685 diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
32686 index d4f7dda..d627d46 100644
32687 --- a/drivers/net/atlx/atl2.c
32688 +++ b/drivers/net/atlx/atl2.c
32689 @@ -2857,7 +2857,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
32690 */
32691
32692 #define ATL2_PARAM(X, desc) \
32693 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32694 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
32695 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
32696 MODULE_PARM_DESC(X, desc);
32697 #else
32698 diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
32699 index 87aecdf..ec23470 100644
32700 --- a/drivers/net/bna/bfa_ioc_ct.c
32701 +++ b/drivers/net/bna/bfa_ioc_ct.c
32702 @@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
32703 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
32704 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
32705
32706 -static struct bfa_ioc_hwif nw_hwif_ct;
32707 +static struct bfa_ioc_hwif nw_hwif_ct = {
32708 + .ioc_pll_init = bfa_ioc_ct_pll_init,
32709 + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
32710 + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
32711 + .ioc_reg_init = bfa_ioc_ct_reg_init,
32712 + .ioc_map_port = bfa_ioc_ct_map_port,
32713 + .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
32714 + .ioc_notify_fail = bfa_ioc_ct_notify_fail,
32715 + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
32716 + .ioc_sync_start = bfa_ioc_ct_sync_start,
32717 + .ioc_sync_join = bfa_ioc_ct_sync_join,
32718 + .ioc_sync_leave = bfa_ioc_ct_sync_leave,
32719 + .ioc_sync_ack = bfa_ioc_ct_sync_ack,
32720 + .ioc_sync_complete = bfa_ioc_ct_sync_complete
32721 +};
32722
32723 /**
32724 * Called from bfa_ioc_attach() to map asic specific calls.
32725 @@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
32726 void
32727 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
32728 {
32729 - nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
32730 - nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
32731 - nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
32732 - nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
32733 - nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
32734 - nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
32735 - nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
32736 - nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
32737 - nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
32738 - nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
32739 - nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
32740 - nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
32741 - nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
32742 -
32743 ioc->ioc_hwif = &nw_hwif_ct;
32744 }
32745
32746 diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
32747 index 8e35b25..c39f205 100644
32748 --- a/drivers/net/bna/bnad.c
32749 +++ b/drivers/net/bna/bnad.c
32750 @@ -1673,7 +1673,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx_id)
32751 struct bna_intr_info *intr_info =
32752 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
32753 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
32754 - struct bna_tx_event_cbfn tx_cbfn;
32755 + static struct bna_tx_event_cbfn tx_cbfn = {
32756 + /* Initialize the tx event handlers */
32757 + .tcb_setup_cbfn = bnad_cb_tcb_setup,
32758 + .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
32759 + .tx_stall_cbfn = bnad_cb_tx_stall,
32760 + .tx_resume_cbfn = bnad_cb_tx_resume,
32761 + .tx_cleanup_cbfn = bnad_cb_tx_cleanup
32762 + };
32763 struct bna_tx *tx;
32764 unsigned long flags;
32765
32766 @@ -1682,13 +1689,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx_id)
32767 tx_config->txq_depth = bnad->txq_depth;
32768 tx_config->tx_type = BNA_TX_T_REGULAR;
32769
32770 - /* Initialize the tx event handlers */
32771 - tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
32772 - tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
32773 - tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
32774 - tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
32775 - tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
32776 -
32777 /* Get BNA's resource requirement for one tx object */
32778 spin_lock_irqsave(&bnad->bna_lock, flags);
32779 bna_tx_res_req(bnad->num_txq_per_tx,
32780 @@ -1819,21 +1819,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
32781 struct bna_intr_info *intr_info =
32782 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
32783 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
32784 - struct bna_rx_event_cbfn rx_cbfn;
32785 + static struct bna_rx_event_cbfn rx_cbfn = {
32786 + /* Initialize the Rx event handlers */
32787 + .rcb_setup_cbfn = bnad_cb_rcb_setup,
32788 + .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
32789 + .ccb_setup_cbfn = bnad_cb_ccb_setup,
32790 + .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
32791 + .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
32792 + .rx_post_cbfn = bnad_cb_rx_post
32793 + };
32794 struct bna_rx *rx;
32795 unsigned long flags;
32796
32797 /* Initialize the Rx object configuration */
32798 bnad_init_rx_config(bnad, rx_config);
32799
32800 - /* Initialize the Rx event handlers */
32801 - rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
32802 - rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
32803 - rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
32804 - rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
32805 - rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
32806 - rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
32807 -
32808 /* Get BNA's resource requirement for one Rx object */
32809 spin_lock_irqsave(&bnad->bna_lock, flags);
32810 bna_rx_res_req(rx_config, res_info);
32811 diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
32812 index 4b2b570..31033f4 100644
32813 --- a/drivers/net/bnx2.c
32814 +++ b/drivers/net/bnx2.c
32815 @@ -5877,6 +5877,8 @@ bnx2_test_nvram(struct bnx2 *bp)
32816 int rc = 0;
32817 u32 magic, csum;
32818
32819 + pax_track_stack();
32820 +
32821 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
32822 goto test_nvram_done;
32823
32824 diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
32825 index cf3e479..5dc0ecc 100644
32826 --- a/drivers/net/bnx2x/bnx2x_ethtool.c
32827 +++ b/drivers/net/bnx2x/bnx2x_ethtool.c
32828 @@ -1943,6 +1943,8 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
32829 int i, rc;
32830 u32 magic, crc;
32831
32832 + pax_track_stack();
32833 +
32834 if (BP_NOMCP(bp))
32835 return 0;
32836
32837 diff --git a/drivers/net/bnx2x/bnx2x_sp.h b/drivers/net/bnx2x/bnx2x_sp.h
32838 index 9a517c2..a50cfcb 100644
32839 --- a/drivers/net/bnx2x/bnx2x_sp.h
32840 +++ b/drivers/net/bnx2x/bnx2x_sp.h
32841 @@ -449,7 +449,7 @@ struct bnx2x_rx_mode_obj {
32842
32843 int (*wait_comp)(struct bnx2x *bp,
32844 struct bnx2x_rx_mode_ramrod_params *p);
32845 -};
32846 +} __no_const;
32847
32848 /********************** Set multicast group ***********************************/
32849
32850 diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
32851 index c5f5479..2e8c260 100644
32852 --- a/drivers/net/cxgb3/l2t.h
32853 +++ b/drivers/net/cxgb3/l2t.h
32854 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
32855 */
32856 struct l2t_skb_cb {
32857 arp_failure_handler_func arp_failure_handler;
32858 -};
32859 +} __no_const;
32860
32861 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
32862
32863 diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
32864 index b4efa29..c5f2703 100644
32865 --- a/drivers/net/cxgb4/cxgb4_main.c
32866 +++ b/drivers/net/cxgb4/cxgb4_main.c
32867 @@ -3396,6 +3396,8 @@ static int __devinit enable_msix(struct adapter *adap)
32868 unsigned int nchan = adap->params.nports;
32869 struct msix_entry entries[MAX_INGQ + 1];
32870
32871 + pax_track_stack();
32872 +
32873 for (i = 0; i < ARRAY_SIZE(entries); ++i)
32874 entries[i].entry = i;
32875
32876 diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
32877 index d1ec111..12735bc 100644
32878 --- a/drivers/net/cxgb4/t4_hw.c
32879 +++ b/drivers/net/cxgb4/t4_hw.c
32880 @@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
32881 u8 vpd[VPD_LEN], csum;
32882 unsigned int vpdr_len, kw_offset, id_len;
32883
32884 + pax_track_stack();
32885 +
32886 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
32887 if (ret < 0)
32888 return ret;
32889 diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
32890 index 536b3a5..e6f8dcc 100644
32891 --- a/drivers/net/e1000e/82571.c
32892 +++ b/drivers/net/e1000e/82571.c
32893 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
32894 {
32895 struct e1000_hw *hw = &adapter->hw;
32896 struct e1000_mac_info *mac = &hw->mac;
32897 - struct e1000_mac_operations *func = &mac->ops;
32898 + e1000_mac_operations_no_const *func = &mac->ops;
32899 u32 swsm = 0;
32900 u32 swsm2 = 0;
32901 bool force_clear_smbi = false;
32902 diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
32903 index e4f4225..24da2ea 100644
32904 --- a/drivers/net/e1000e/es2lan.c
32905 +++ b/drivers/net/e1000e/es2lan.c
32906 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
32907 {
32908 struct e1000_hw *hw = &adapter->hw;
32909 struct e1000_mac_info *mac = &hw->mac;
32910 - struct e1000_mac_operations *func = &mac->ops;
32911 + e1000_mac_operations_no_const *func = &mac->ops;
32912
32913 /* Set media type */
32914 switch (adapter->pdev->device) {
32915 diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
32916 index 2967039..ca8c40c 100644
32917 --- a/drivers/net/e1000e/hw.h
32918 +++ b/drivers/net/e1000e/hw.h
32919 @@ -778,6 +778,7 @@ struct e1000_mac_operations {
32920 void (*write_vfta)(struct e1000_hw *, u32, u32);
32921 s32 (*read_mac_addr)(struct e1000_hw *);
32922 };
32923 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
32924
32925 /*
32926 * When to use various PHY register access functions:
32927 @@ -818,6 +819,7 @@ struct e1000_phy_operations {
32928 void (*power_up)(struct e1000_hw *);
32929 void (*power_down)(struct e1000_hw *);
32930 };
32931 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
32932
32933 /* Function pointers for the NVM. */
32934 struct e1000_nvm_operations {
32935 @@ -829,9 +831,10 @@ struct e1000_nvm_operations {
32936 s32 (*validate)(struct e1000_hw *);
32937 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32938 };
32939 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
32940
32941 struct e1000_mac_info {
32942 - struct e1000_mac_operations ops;
32943 + e1000_mac_operations_no_const ops;
32944 u8 addr[ETH_ALEN];
32945 u8 perm_addr[ETH_ALEN];
32946
32947 @@ -872,7 +875,7 @@ struct e1000_mac_info {
32948 };
32949
32950 struct e1000_phy_info {
32951 - struct e1000_phy_operations ops;
32952 + e1000_phy_operations_no_const ops;
32953
32954 enum e1000_phy_type type;
32955
32956 @@ -906,7 +909,7 @@ struct e1000_phy_info {
32957 };
32958
32959 struct e1000_nvm_info {
32960 - struct e1000_nvm_operations ops;
32961 + e1000_nvm_operations_no_const ops;
32962
32963 enum e1000_nvm_type type;
32964 enum e1000_nvm_override override;
32965 diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
32966 index fa8677c..196356f 100644
32967 --- a/drivers/net/fealnx.c
32968 +++ b/drivers/net/fealnx.c
32969 @@ -150,7 +150,7 @@ struct chip_info {
32970 int flags;
32971 };
32972
32973 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
32974 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
32975 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32976 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
32977 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
32978 diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
32979 index 2a5a34d..be871cc 100644
32980 --- a/drivers/net/hamradio/6pack.c
32981 +++ b/drivers/net/hamradio/6pack.c
32982 @@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
32983 unsigned char buf[512];
32984 int count1;
32985
32986 + pax_track_stack();
32987 +
32988 if (!count)
32989 return;
32990
32991 diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
32992 index 4519a13..f97fcd0 100644
32993 --- a/drivers/net/igb/e1000_hw.h
32994 +++ b/drivers/net/igb/e1000_hw.h
32995 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
32996 s32 (*read_mac_addr)(struct e1000_hw *);
32997 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
32998 };
32999 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33000
33001 struct e1000_phy_operations {
33002 s32 (*acquire)(struct e1000_hw *);
33003 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
33004 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
33005 s32 (*write_reg)(struct e1000_hw *, u32, u16);
33006 };
33007 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
33008
33009 struct e1000_nvm_operations {
33010 s32 (*acquire)(struct e1000_hw *);
33011 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
33012 s32 (*update)(struct e1000_hw *);
33013 s32 (*validate)(struct e1000_hw *);
33014 };
33015 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
33016
33017 struct e1000_info {
33018 s32 (*get_invariants)(struct e1000_hw *);
33019 @@ -350,7 +353,7 @@ struct e1000_info {
33020 extern const struct e1000_info e1000_82575_info;
33021
33022 struct e1000_mac_info {
33023 - struct e1000_mac_operations ops;
33024 + e1000_mac_operations_no_const ops;
33025
33026 u8 addr[6];
33027 u8 perm_addr[6];
33028 @@ -388,7 +391,7 @@ struct e1000_mac_info {
33029 };
33030
33031 struct e1000_phy_info {
33032 - struct e1000_phy_operations ops;
33033 + e1000_phy_operations_no_const ops;
33034
33035 enum e1000_phy_type type;
33036
33037 @@ -423,7 +426,7 @@ struct e1000_phy_info {
33038 };
33039
33040 struct e1000_nvm_info {
33041 - struct e1000_nvm_operations ops;
33042 + e1000_nvm_operations_no_const ops;
33043 enum e1000_nvm_type type;
33044 enum e1000_nvm_override override;
33045
33046 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
33047 s32 (*check_for_ack)(struct e1000_hw *, u16);
33048 s32 (*check_for_rst)(struct e1000_hw *, u16);
33049 };
33050 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
33051
33052 struct e1000_mbx_stats {
33053 u32 msgs_tx;
33054 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
33055 };
33056
33057 struct e1000_mbx_info {
33058 - struct e1000_mbx_operations ops;
33059 + e1000_mbx_operations_no_const ops;
33060 struct e1000_mbx_stats stats;
33061 u32 timeout;
33062 u32 usec_delay;
33063 diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
33064 index d7ed58f..64cde36 100644
33065 --- a/drivers/net/igbvf/vf.h
33066 +++ b/drivers/net/igbvf/vf.h
33067 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
33068 s32 (*read_mac_addr)(struct e1000_hw *);
33069 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
33070 };
33071 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
33072
33073 struct e1000_mac_info {
33074 - struct e1000_mac_operations ops;
33075 + e1000_mac_operations_no_const ops;
33076 u8 addr[6];
33077 u8 perm_addr[6];
33078
33079 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
33080 s32 (*check_for_ack)(struct e1000_hw *);
33081 s32 (*check_for_rst)(struct e1000_hw *);
33082 };
33083 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
33084
33085 struct e1000_mbx_stats {
33086 u32 msgs_tx;
33087 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
33088 };
33089
33090 struct e1000_mbx_info {
33091 - struct e1000_mbx_operations ops;
33092 + e1000_mbx_operations_no_const ops;
33093 struct e1000_mbx_stats stats;
33094 u32 timeout;
33095 u32 usec_delay;
33096 diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
33097 index 6a130eb..1aeb9e4 100644
33098 --- a/drivers/net/ixgb/ixgb_main.c
33099 +++ b/drivers/net/ixgb/ixgb_main.c
33100 @@ -1070,6 +1070,8 @@ ixgb_set_multi(struct net_device *netdev)
33101 u32 rctl;
33102 int i;
33103
33104 + pax_track_stack();
33105 +
33106 /* Check for Promiscuous and All Multicast modes */
33107
33108 rctl = IXGB_READ_REG(hw, RCTL);
33109 diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
33110 index dd7fbeb..44b9bbf 100644
33111 --- a/drivers/net/ixgb/ixgb_param.c
33112 +++ b/drivers/net/ixgb/ixgb_param.c
33113 @@ -261,6 +261,9 @@ void __devinit
33114 ixgb_check_options(struct ixgb_adapter *adapter)
33115 {
33116 int bd = adapter->bd_number;
33117 +
33118 + pax_track_stack();
33119 +
33120 if (bd >= IXGB_MAX_NIC) {
33121 pr_notice("Warning: no configuration for board #%i\n", bd);
33122 pr_notice("Using defaults for all values\n");
33123 diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
33124 index e0d970e..1cfdea5 100644
33125 --- a/drivers/net/ixgbe/ixgbe_type.h
33126 +++ b/drivers/net/ixgbe/ixgbe_type.h
33127 @@ -2642,6 +2642,7 @@ struct ixgbe_eeprom_operations {
33128 s32 (*update_checksum)(struct ixgbe_hw *);
33129 u16 (*calc_checksum)(struct ixgbe_hw *);
33130 };
33131 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
33132
33133 struct ixgbe_mac_operations {
33134 s32 (*init_hw)(struct ixgbe_hw *);
33135 @@ -2703,6 +2704,7 @@ struct ixgbe_mac_operations {
33136 /* Manageability interface */
33137 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
33138 };
33139 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
33140
33141 struct ixgbe_phy_operations {
33142 s32 (*identify)(struct ixgbe_hw *);
33143 @@ -2722,9 +2724,10 @@ struct ixgbe_phy_operations {
33144 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
33145 s32 (*check_overtemp)(struct ixgbe_hw *);
33146 };
33147 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
33148
33149 struct ixgbe_eeprom_info {
33150 - struct ixgbe_eeprom_operations ops;
33151 + ixgbe_eeprom_operations_no_const ops;
33152 enum ixgbe_eeprom_type type;
33153 u32 semaphore_delay;
33154 u16 word_size;
33155 @@ -2734,7 +2737,7 @@ struct ixgbe_eeprom_info {
33156
33157 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
33158 struct ixgbe_mac_info {
33159 - struct ixgbe_mac_operations ops;
33160 + ixgbe_mac_operations_no_const ops;
33161 enum ixgbe_mac_type type;
33162 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
33163 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
33164 @@ -2762,7 +2765,7 @@ struct ixgbe_mac_info {
33165 };
33166
33167 struct ixgbe_phy_info {
33168 - struct ixgbe_phy_operations ops;
33169 + ixgbe_phy_operations_no_const ops;
33170 struct mdio_if_info mdio;
33171 enum ixgbe_phy_type type;
33172 u32 id;
33173 @@ -2790,6 +2793,7 @@ struct ixgbe_mbx_operations {
33174 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
33175 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
33176 };
33177 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
33178
33179 struct ixgbe_mbx_stats {
33180 u32 msgs_tx;
33181 @@ -2801,7 +2805,7 @@ struct ixgbe_mbx_stats {
33182 };
33183
33184 struct ixgbe_mbx_info {
33185 - struct ixgbe_mbx_operations ops;
33186 + ixgbe_mbx_operations_no_const ops;
33187 struct ixgbe_mbx_stats stats;
33188 u32 timeout;
33189 u32 usec_delay;
33190 diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
33191 index 10306b4..28df758 100644
33192 --- a/drivers/net/ixgbevf/vf.h
33193 +++ b/drivers/net/ixgbevf/vf.h
33194 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
33195 s32 (*clear_vfta)(struct ixgbe_hw *);
33196 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
33197 };
33198 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
33199
33200 enum ixgbe_mac_type {
33201 ixgbe_mac_unknown = 0,
33202 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
33203 };
33204
33205 struct ixgbe_mac_info {
33206 - struct ixgbe_mac_operations ops;
33207 + ixgbe_mac_operations_no_const ops;
33208 u8 addr[6];
33209 u8 perm_addr[6];
33210
33211 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
33212 s32 (*check_for_ack)(struct ixgbe_hw *);
33213 s32 (*check_for_rst)(struct ixgbe_hw *);
33214 };
33215 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
33216
33217 struct ixgbe_mbx_stats {
33218 u32 msgs_tx;
33219 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
33220 };
33221
33222 struct ixgbe_mbx_info {
33223 - struct ixgbe_mbx_operations ops;
33224 + ixgbe_mbx_operations_no_const ops;
33225 struct ixgbe_mbx_stats stats;
33226 u32 timeout;
33227 u32 udelay;
33228 diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
33229 index 27418d3..adf15bb 100644
33230 --- a/drivers/net/ksz884x.c
33231 +++ b/drivers/net/ksz884x.c
33232 @@ -6533,6 +6533,8 @@ static void netdev_get_ethtool_stats(struct net_device *dev,
33233 int rc;
33234 u64 counter[TOTAL_PORT_COUNTER_NUM];
33235
33236 + pax_track_stack();
33237 +
33238 mutex_lock(&hw_priv->lock);
33239 n = SWITCH_PORT_NUM;
33240 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
33241 diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
33242 index f0ee35d..3831c8a 100644
33243 --- a/drivers/net/mlx4/main.c
33244 +++ b/drivers/net/mlx4/main.c
33245 @@ -40,6 +40,7 @@
33246 #include <linux/dma-mapping.h>
33247 #include <linux/slab.h>
33248 #include <linux/io-mapping.h>
33249 +#include <linux/sched.h>
33250
33251 #include <linux/mlx4/device.h>
33252 #include <linux/mlx4/doorbell.h>
33253 @@ -762,6 +763,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
33254 u64 icm_size;
33255 int err;
33256
33257 + pax_track_stack();
33258 +
33259 err = mlx4_QUERY_FW(dev);
33260 if (err) {
33261 if (err == -EACCES)
33262 diff --git a/drivers/net/niu.c b/drivers/net/niu.c
33263 index ed47585..5e5be8f 100644
33264 --- a/drivers/net/niu.c
33265 +++ b/drivers/net/niu.c
33266 @@ -9061,6 +9061,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
33267 int i, num_irqs, err;
33268 u8 first_ldg;
33269
33270 + pax_track_stack();
33271 +
33272 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
33273 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
33274 ldg_num_map[i] = first_ldg + i;
33275 diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
33276 index 80b6f36..5cd8938 100644
33277 --- a/drivers/net/pcnet32.c
33278 +++ b/drivers/net/pcnet32.c
33279 @@ -270,7 +270,7 @@ struct pcnet32_private {
33280 struct sk_buff **rx_skbuff;
33281 dma_addr_t *tx_dma_addr;
33282 dma_addr_t *rx_dma_addr;
33283 - struct pcnet32_access a;
33284 + struct pcnet32_access *a;
33285 spinlock_t lock; /* Guard lock */
33286 unsigned int cur_rx, cur_tx; /* The next free ring entry */
33287 unsigned int rx_ring_size; /* current rx ring size */
33288 @@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct net_device *dev)
33289 u16 val;
33290
33291 netif_wake_queue(dev);
33292 - val = lp->a.read_csr(ioaddr, CSR3);
33293 + val = lp->a->read_csr(ioaddr, CSR3);
33294 val &= 0x00ff;
33295 - lp->a.write_csr(ioaddr, CSR3, val);
33296 + lp->a->write_csr(ioaddr, CSR3, val);
33297 napi_enable(&lp->napi);
33298 }
33299
33300 @@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
33301 r = mii_link_ok(&lp->mii_if);
33302 } else if (lp->chip_version >= PCNET32_79C970A) {
33303 ulong ioaddr = dev->base_addr; /* card base I/O address */
33304 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
33305 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
33306 } else { /* can not detect link on really old chips */
33307 r = 1;
33308 }
33309 @@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
33310 pcnet32_netif_stop(dev);
33311
33312 spin_lock_irqsave(&lp->lock, flags);
33313 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
33314 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
33315
33316 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
33317
33318 @@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
33319 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
33320 {
33321 struct pcnet32_private *lp = netdev_priv(dev);
33322 - struct pcnet32_access *a = &lp->a; /* access to registers */
33323 + struct pcnet32_access *a = lp->a; /* access to registers */
33324 ulong ioaddr = dev->base_addr; /* card base I/O address */
33325 struct sk_buff *skb; /* sk buff */
33326 int x, i; /* counters */
33327 @@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
33328 pcnet32_netif_stop(dev);
33329
33330 spin_lock_irqsave(&lp->lock, flags);
33331 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
33332 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
33333
33334 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
33335
33336 /* Reset the PCNET32 */
33337 - lp->a.reset(ioaddr);
33338 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33339 + lp->a->reset(ioaddr);
33340 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33341
33342 /* switch pcnet32 to 32bit mode */
33343 - lp->a.write_bcr(ioaddr, 20, 2);
33344 + lp->a->write_bcr(ioaddr, 20, 2);
33345
33346 /* purge & init rings but don't actually restart */
33347 pcnet32_restart(dev, 0x0000);
33348
33349 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
33350 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
33351
33352 /* Initialize Transmit buffers. */
33353 size = data_len + 15;
33354 @@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
33355
33356 /* set int loopback in CSR15 */
33357 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
33358 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
33359 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
33360
33361 teststatus = cpu_to_le16(0x8000);
33362 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
33363 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
33364
33365 /* Check status of descriptors */
33366 for (x = 0; x < numbuffs; x++) {
33367 @@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
33368 }
33369 }
33370
33371 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
33372 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
33373 wmb();
33374 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
33375 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
33376 @@ -1015,7 +1015,7 @@ clean_up:
33377 pcnet32_restart(dev, CSR0_NORMAL);
33378 } else {
33379 pcnet32_purge_rx_ring(dev);
33380 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
33381 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
33382 }
33383 spin_unlock_irqrestore(&lp->lock, flags);
33384
33385 @@ -1026,7 +1026,7 @@ static int pcnet32_set_phys_id(struct net_device *dev,
33386 enum ethtool_phys_id_state state)
33387 {
33388 struct pcnet32_private *lp = netdev_priv(dev);
33389 - struct pcnet32_access *a = &lp->a;
33390 + struct pcnet32_access *a = lp->a;
33391 ulong ioaddr = dev->base_addr;
33392 unsigned long flags;
33393 int i;
33394 @@ -1067,7 +1067,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
33395 {
33396 int csr5;
33397 struct pcnet32_private *lp = netdev_priv(dev);
33398 - struct pcnet32_access *a = &lp->a;
33399 + struct pcnet32_access *a = lp->a;
33400 ulong ioaddr = dev->base_addr;
33401 int ticks;
33402
33403 @@ -1324,8 +1324,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
33404 spin_lock_irqsave(&lp->lock, flags);
33405 if (pcnet32_tx(dev)) {
33406 /* reset the chip to clear the error condition, then restart */
33407 - lp->a.reset(ioaddr);
33408 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33409 + lp->a->reset(ioaddr);
33410 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33411 pcnet32_restart(dev, CSR0_START);
33412 netif_wake_queue(dev);
33413 }
33414 @@ -1337,12 +1337,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
33415 __napi_complete(napi);
33416
33417 /* clear interrupt masks */
33418 - val = lp->a.read_csr(ioaddr, CSR3);
33419 + val = lp->a->read_csr(ioaddr, CSR3);
33420 val &= 0x00ff;
33421 - lp->a.write_csr(ioaddr, CSR3, val);
33422 + lp->a->write_csr(ioaddr, CSR3, val);
33423
33424 /* Set interrupt enable. */
33425 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
33426 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
33427
33428 spin_unlock_irqrestore(&lp->lock, flags);
33429 }
33430 @@ -1365,7 +1365,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
33431 int i, csr0;
33432 u16 *buff = ptr;
33433 struct pcnet32_private *lp = netdev_priv(dev);
33434 - struct pcnet32_access *a = &lp->a;
33435 + struct pcnet32_access *a = lp->a;
33436 ulong ioaddr = dev->base_addr;
33437 unsigned long flags;
33438
33439 @@ -1401,9 +1401,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
33440 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
33441 if (lp->phymask & (1 << j)) {
33442 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
33443 - lp->a.write_bcr(ioaddr, 33,
33444 + lp->a->write_bcr(ioaddr, 33,
33445 (j << 5) | i);
33446 - *buff++ = lp->a.read_bcr(ioaddr, 34);
33447 + *buff++ = lp->a->read_bcr(ioaddr, 34);
33448 }
33449 }
33450 }
33451 @@ -1785,7 +1785,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
33452 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
33453 lp->options |= PCNET32_PORT_FD;
33454
33455 - lp->a = *a;
33456 + lp->a = a;
33457
33458 /* prior to register_netdev, dev->name is not yet correct */
33459 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
33460 @@ -1844,7 +1844,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
33461 if (lp->mii) {
33462 /* lp->phycount and lp->phymask are set to 0 by memset above */
33463
33464 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
33465 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
33466 /* scan for PHYs */
33467 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
33468 unsigned short id1, id2;
33469 @@ -1864,7 +1864,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
33470 pr_info("Found PHY %04x:%04x at address %d\n",
33471 id1, id2, i);
33472 }
33473 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
33474 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
33475 if (lp->phycount > 1)
33476 lp->options |= PCNET32_PORT_MII;
33477 }
33478 @@ -2020,10 +2020,10 @@ static int pcnet32_open(struct net_device *dev)
33479 }
33480
33481 /* Reset the PCNET32 */
33482 - lp->a.reset(ioaddr);
33483 + lp->a->reset(ioaddr);
33484
33485 /* switch pcnet32 to 32bit mode */
33486 - lp->a.write_bcr(ioaddr, 20, 2);
33487 + lp->a->write_bcr(ioaddr, 20, 2);
33488
33489 netif_printk(lp, ifup, KERN_DEBUG, dev,
33490 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
33491 @@ -2032,14 +2032,14 @@ static int pcnet32_open(struct net_device *dev)
33492 (u32) (lp->init_dma_addr));
33493
33494 /* set/reset autoselect bit */
33495 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
33496 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
33497 if (lp->options & PCNET32_PORT_ASEL)
33498 val |= 2;
33499 - lp->a.write_bcr(ioaddr, 2, val);
33500 + lp->a->write_bcr(ioaddr, 2, val);
33501
33502 /* handle full duplex setting */
33503 if (lp->mii_if.full_duplex) {
33504 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
33505 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
33506 if (lp->options & PCNET32_PORT_FD) {
33507 val |= 1;
33508 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
33509 @@ -2049,14 +2049,14 @@ static int pcnet32_open(struct net_device *dev)
33510 if (lp->chip_version == 0x2627)
33511 val |= 3;
33512 }
33513 - lp->a.write_bcr(ioaddr, 9, val);
33514 + lp->a->write_bcr(ioaddr, 9, val);
33515 }
33516
33517 /* set/reset GPSI bit in test register */
33518 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
33519 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
33520 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
33521 val |= 0x10;
33522 - lp->a.write_csr(ioaddr, 124, val);
33523 + lp->a->write_csr(ioaddr, 124, val);
33524
33525 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
33526 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
33527 @@ -2075,24 +2075,24 @@ static int pcnet32_open(struct net_device *dev)
33528 * duplex, and/or enable auto negotiation, and clear DANAS
33529 */
33530 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
33531 - lp->a.write_bcr(ioaddr, 32,
33532 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
33533 + lp->a->write_bcr(ioaddr, 32,
33534 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
33535 /* disable Auto Negotiation, set 10Mpbs, HD */
33536 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
33537 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
33538 if (lp->options & PCNET32_PORT_FD)
33539 val |= 0x10;
33540 if (lp->options & PCNET32_PORT_100)
33541 val |= 0x08;
33542 - lp->a.write_bcr(ioaddr, 32, val);
33543 + lp->a->write_bcr(ioaddr, 32, val);
33544 } else {
33545 if (lp->options & PCNET32_PORT_ASEL) {
33546 - lp->a.write_bcr(ioaddr, 32,
33547 - lp->a.read_bcr(ioaddr,
33548 + lp->a->write_bcr(ioaddr, 32,
33549 + lp->a->read_bcr(ioaddr,
33550 32) | 0x0080);
33551 /* enable auto negotiate, setup, disable fd */
33552 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
33553 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
33554 val |= 0x20;
33555 - lp->a.write_bcr(ioaddr, 32, val);
33556 + lp->a->write_bcr(ioaddr, 32, val);
33557 }
33558 }
33559 } else {
33560 @@ -2105,10 +2105,10 @@ static int pcnet32_open(struct net_device *dev)
33561 * There is really no good other way to handle multiple PHYs
33562 * other than turning off all automatics
33563 */
33564 - val = lp->a.read_bcr(ioaddr, 2);
33565 - lp->a.write_bcr(ioaddr, 2, val & ~2);
33566 - val = lp->a.read_bcr(ioaddr, 32);
33567 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
33568 + val = lp->a->read_bcr(ioaddr, 2);
33569 + lp->a->write_bcr(ioaddr, 2, val & ~2);
33570 + val = lp->a->read_bcr(ioaddr, 32);
33571 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
33572
33573 if (!(lp->options & PCNET32_PORT_ASEL)) {
33574 /* setup ecmd */
33575 @@ -2118,7 +2118,7 @@ static int pcnet32_open(struct net_device *dev)
33576 ethtool_cmd_speed_set(&ecmd,
33577 (lp->options & PCNET32_PORT_100) ?
33578 SPEED_100 : SPEED_10);
33579 - bcr9 = lp->a.read_bcr(ioaddr, 9);
33580 + bcr9 = lp->a->read_bcr(ioaddr, 9);
33581
33582 if (lp->options & PCNET32_PORT_FD) {
33583 ecmd.duplex = DUPLEX_FULL;
33584 @@ -2127,7 +2127,7 @@ static int pcnet32_open(struct net_device *dev)
33585 ecmd.duplex = DUPLEX_HALF;
33586 bcr9 |= ~(1 << 0);
33587 }
33588 - lp->a.write_bcr(ioaddr, 9, bcr9);
33589 + lp->a->write_bcr(ioaddr, 9, bcr9);
33590 }
33591
33592 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
33593 @@ -2158,9 +2158,9 @@ static int pcnet32_open(struct net_device *dev)
33594
33595 #ifdef DO_DXSUFLO
33596 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
33597 - val = lp->a.read_csr(ioaddr, CSR3);
33598 + val = lp->a->read_csr(ioaddr, CSR3);
33599 val |= 0x40;
33600 - lp->a.write_csr(ioaddr, CSR3, val);
33601 + lp->a->write_csr(ioaddr, CSR3, val);
33602 }
33603 #endif
33604
33605 @@ -2176,11 +2176,11 @@ static int pcnet32_open(struct net_device *dev)
33606 napi_enable(&lp->napi);
33607
33608 /* Re-initialize the PCNET32, and start it when done. */
33609 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
33610 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
33611 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
33612 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
33613
33614 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33615 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
33616 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
33617 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
33618
33619 netif_start_queue(dev);
33620
33621 @@ -2192,19 +2192,19 @@ static int pcnet32_open(struct net_device *dev)
33622
33623 i = 0;
33624 while (i++ < 100)
33625 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
33626 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
33627 break;
33628 /*
33629 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
33630 * reports that doing so triggers a bug in the '974.
33631 */
33632 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
33633 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
33634
33635 netif_printk(lp, ifup, KERN_DEBUG, dev,
33636 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
33637 i,
33638 (u32) (lp->init_dma_addr),
33639 - lp->a.read_csr(ioaddr, CSR0));
33640 + lp->a->read_csr(ioaddr, CSR0));
33641
33642 spin_unlock_irqrestore(&lp->lock, flags);
33643
33644 @@ -2218,7 +2218,7 @@ err_free_ring:
33645 * Switch back to 16bit mode to avoid problems with dumb
33646 * DOS packet driver after a warm reboot
33647 */
33648 - lp->a.write_bcr(ioaddr, 20, 4);
33649 + lp->a->write_bcr(ioaddr, 20, 4);
33650
33651 err_free_irq:
33652 spin_unlock_irqrestore(&lp->lock, flags);
33653 @@ -2323,7 +2323,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
33654
33655 /* wait for stop */
33656 for (i = 0; i < 100; i++)
33657 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
33658 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
33659 break;
33660
33661 if (i >= 100)
33662 @@ -2335,13 +2335,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
33663 return;
33664
33665 /* ReInit Ring */
33666 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
33667 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
33668 i = 0;
33669 while (i++ < 1000)
33670 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
33671 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
33672 break;
33673
33674 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
33675 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
33676 }
33677
33678 static void pcnet32_tx_timeout(struct net_device *dev)
33679 @@ -2353,8 +2353,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
33680 /* Transmitter timeout, serious problems. */
33681 if (pcnet32_debug & NETIF_MSG_DRV)
33682 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
33683 - dev->name, lp->a.read_csr(ioaddr, CSR0));
33684 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
33685 + dev->name, lp->a->read_csr(ioaddr, CSR0));
33686 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
33687 dev->stats.tx_errors++;
33688 if (netif_msg_tx_err(lp)) {
33689 int i;
33690 @@ -2397,7 +2397,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
33691
33692 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
33693 "%s() called, csr0 %4.4x\n",
33694 - __func__, lp->a.read_csr(ioaddr, CSR0));
33695 + __func__, lp->a->read_csr(ioaddr, CSR0));
33696
33697 /* Default status -- will not enable Successful-TxDone
33698 * interrupt when that option is available to us.
33699 @@ -2427,7 +2427,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
33700 dev->stats.tx_bytes += skb->len;
33701
33702 /* Trigger an immediate send poll. */
33703 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
33704 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
33705
33706 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
33707 lp->tx_full = 1;
33708 @@ -2452,16 +2452,16 @@ pcnet32_interrupt(int irq, void *dev_id)
33709
33710 spin_lock(&lp->lock);
33711
33712 - csr0 = lp->a.read_csr(ioaddr, CSR0);
33713 + csr0 = lp->a->read_csr(ioaddr, CSR0);
33714 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
33715 if (csr0 == 0xffff)
33716 break; /* PCMCIA remove happened */
33717 /* Acknowledge all of the current interrupt sources ASAP. */
33718 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
33719 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
33720
33721 netif_printk(lp, intr, KERN_DEBUG, dev,
33722 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
33723 - csr0, lp->a.read_csr(ioaddr, CSR0));
33724 + csr0, lp->a->read_csr(ioaddr, CSR0));
33725
33726 /* Log misc errors. */
33727 if (csr0 & 0x4000)
33728 @@ -2488,19 +2488,19 @@ pcnet32_interrupt(int irq, void *dev_id)
33729 if (napi_schedule_prep(&lp->napi)) {
33730 u16 val;
33731 /* set interrupt masks */
33732 - val = lp->a.read_csr(ioaddr, CSR3);
33733 + val = lp->a->read_csr(ioaddr, CSR3);
33734 val |= 0x5f00;
33735 - lp->a.write_csr(ioaddr, CSR3, val);
33736 + lp->a->write_csr(ioaddr, CSR3, val);
33737
33738 __napi_schedule(&lp->napi);
33739 break;
33740 }
33741 - csr0 = lp->a.read_csr(ioaddr, CSR0);
33742 + csr0 = lp->a->read_csr(ioaddr, CSR0);
33743 }
33744
33745 netif_printk(lp, intr, KERN_DEBUG, dev,
33746 "exiting interrupt, csr0=%#4.4x\n",
33747 - lp->a.read_csr(ioaddr, CSR0));
33748 + lp->a->read_csr(ioaddr, CSR0));
33749
33750 spin_unlock(&lp->lock);
33751
33752 @@ -2520,20 +2520,20 @@ static int pcnet32_close(struct net_device *dev)
33753
33754 spin_lock_irqsave(&lp->lock, flags);
33755
33756 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
33757 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
33758
33759 netif_printk(lp, ifdown, KERN_DEBUG, dev,
33760 "Shutting down ethercard, status was %2.2x\n",
33761 - lp->a.read_csr(ioaddr, CSR0));
33762 + lp->a->read_csr(ioaddr, CSR0));
33763
33764 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
33765 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
33766 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
33767
33768 /*
33769 * Switch back to 16bit mode to avoid problems with dumb
33770 * DOS packet driver after a warm reboot
33771 */
33772 - lp->a.write_bcr(ioaddr, 20, 4);
33773 + lp->a->write_bcr(ioaddr, 20, 4);
33774
33775 spin_unlock_irqrestore(&lp->lock, flags);
33776
33777 @@ -2556,7 +2556,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
33778 unsigned long flags;
33779
33780 spin_lock_irqsave(&lp->lock, flags);
33781 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
33782 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
33783 spin_unlock_irqrestore(&lp->lock, flags);
33784
33785 return &dev->stats;
33786 @@ -2577,10 +2577,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
33787 if (dev->flags & IFF_ALLMULTI) {
33788 ib->filter[0] = cpu_to_le32(~0U);
33789 ib->filter[1] = cpu_to_le32(~0U);
33790 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
33791 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
33792 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
33793 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
33794 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
33795 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
33796 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
33797 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
33798 return;
33799 }
33800 /* clear the multicast filter */
33801 @@ -2594,7 +2594,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
33802 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
33803 }
33804 for (i = 0; i < 4; i++)
33805 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
33806 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
33807 le16_to_cpu(mcast_table[i]));
33808 }
33809
33810 @@ -2609,28 +2609,28 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
33811
33812 spin_lock_irqsave(&lp->lock, flags);
33813 suspended = pcnet32_suspend(dev, &flags, 0);
33814 - csr15 = lp->a.read_csr(ioaddr, CSR15);
33815 + csr15 = lp->a->read_csr(ioaddr, CSR15);
33816 if (dev->flags & IFF_PROMISC) {
33817 /* Log any net taps. */
33818 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
33819 lp->init_block->mode =
33820 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
33821 7);
33822 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
33823 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
33824 } else {
33825 lp->init_block->mode =
33826 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
33827 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
33828 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
33829 pcnet32_load_multicast(dev);
33830 }
33831
33832 if (suspended) {
33833 int csr5;
33834 /* clear SUSPEND (SPND) - CSR5 bit 0 */
33835 - csr5 = lp->a.read_csr(ioaddr, CSR5);
33836 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
33837 + csr5 = lp->a->read_csr(ioaddr, CSR5);
33838 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
33839 } else {
33840 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
33841 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
33842 pcnet32_restart(dev, CSR0_NORMAL);
33843 netif_wake_queue(dev);
33844 }
33845 @@ -2648,8 +2648,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
33846 if (!lp->mii)
33847 return 0;
33848
33849 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
33850 - val_out = lp->a.read_bcr(ioaddr, 34);
33851 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
33852 + val_out = lp->a->read_bcr(ioaddr, 34);
33853
33854 return val_out;
33855 }
33856 @@ -2663,8 +2663,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
33857 if (!lp->mii)
33858 return;
33859
33860 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
33861 - lp->a.write_bcr(ioaddr, 34, val);
33862 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
33863 + lp->a->write_bcr(ioaddr, 34, val);
33864 }
33865
33866 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
33867 @@ -2741,7 +2741,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
33868 curr_link = mii_link_ok(&lp->mii_if);
33869 } else {
33870 ulong ioaddr = dev->base_addr; /* card base I/O address */
33871 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
33872 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
33873 }
33874 if (!curr_link) {
33875 if (prev_link || verbose) {
33876 @@ -2764,13 +2764,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
33877 (ecmd.duplex == DUPLEX_FULL)
33878 ? "full" : "half");
33879 }
33880 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
33881 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
33882 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
33883 if (lp->mii_if.full_duplex)
33884 bcr9 |= (1 << 0);
33885 else
33886 bcr9 &= ~(1 << 0);
33887 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
33888 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
33889 }
33890 } else {
33891 netif_info(lp, link, dev, "link up\n");
33892 diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
33893 index edfa15d..002bfa9 100644
33894 --- a/drivers/net/ppp_generic.c
33895 +++ b/drivers/net/ppp_generic.c
33896 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
33897 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
33898 struct ppp_stats stats;
33899 struct ppp_comp_stats cstats;
33900 - char *vers;
33901
33902 switch (cmd) {
33903 case SIOCGPPPSTATS:
33904 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
33905 break;
33906
33907 case SIOCGPPPVER:
33908 - vers = PPP_VERSION;
33909 - if (copy_to_user(addr, vers, strlen(vers) + 1))
33910 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
33911 break;
33912 err = 0;
33913 break;
33914 diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
33915 index 6d657ca..d1be94b 100644
33916 --- a/drivers/net/r8169.c
33917 +++ b/drivers/net/r8169.c
33918 @@ -663,12 +663,12 @@ struct rtl8169_private {
33919 struct mdio_ops {
33920 void (*write)(void __iomem *, int, int);
33921 int (*read)(void __iomem *, int);
33922 - } mdio_ops;
33923 + } __no_const mdio_ops;
33924
33925 struct pll_power_ops {
33926 void (*down)(struct rtl8169_private *);
33927 void (*up)(struct rtl8169_private *);
33928 - } pll_power_ops;
33929 + } __no_const pll_power_ops;
33930
33931 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
33932 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
33933 diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
33934 index 3c0f131..17f8b02 100644
33935 --- a/drivers/net/sis190.c
33936 +++ b/drivers/net/sis190.c
33937 @@ -1624,7 +1624,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
33938 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
33939 struct net_device *dev)
33940 {
33941 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
33942 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
33943 struct sis190_private *tp = netdev_priv(dev);
33944 struct pci_dev *isa_bridge;
33945 u8 reg, tmp8;
33946 diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
33947 index 4793df8..44c9849 100644
33948 --- a/drivers/net/sundance.c
33949 +++ b/drivers/net/sundance.c
33950 @@ -218,7 +218,7 @@ enum {
33951 struct pci_id_info {
33952 const char *name;
33953 };
33954 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
33955 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
33956 {"D-Link DFE-550TX FAST Ethernet Adapter"},
33957 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
33958 {"D-Link DFE-580TX 4 port Server Adapter"},
33959 diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
33960 index 2ea456d..3ad9523 100644
33961 --- a/drivers/net/tg3.h
33962 +++ b/drivers/net/tg3.h
33963 @@ -134,6 +134,7 @@
33964 #define CHIPREV_ID_5750_A0 0x4000
33965 #define CHIPREV_ID_5750_A1 0x4001
33966 #define CHIPREV_ID_5750_A3 0x4003
33967 +#define CHIPREV_ID_5750_C1 0x4201
33968 #define CHIPREV_ID_5750_C2 0x4202
33969 #define CHIPREV_ID_5752_A0_HW 0x5000
33970 #define CHIPREV_ID_5752_A0 0x6000
33971 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
33972 index 515f122..41dd273 100644
33973 --- a/drivers/net/tokenring/abyss.c
33974 +++ b/drivers/net/tokenring/abyss.c
33975 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
33976
33977 static int __init abyss_init (void)
33978 {
33979 - abyss_netdev_ops = tms380tr_netdev_ops;
33980 + pax_open_kernel();
33981 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
33982
33983 - abyss_netdev_ops.ndo_open = abyss_open;
33984 - abyss_netdev_ops.ndo_stop = abyss_close;
33985 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
33986 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
33987 + pax_close_kernel();
33988
33989 return pci_register_driver(&abyss_driver);
33990 }
33991 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
33992 index 6153cfd..cf69c1c 100644
33993 --- a/drivers/net/tokenring/madgemc.c
33994 +++ b/drivers/net/tokenring/madgemc.c
33995 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
33996
33997 static int __init madgemc_init (void)
33998 {
33999 - madgemc_netdev_ops = tms380tr_netdev_ops;
34000 - madgemc_netdev_ops.ndo_open = madgemc_open;
34001 - madgemc_netdev_ops.ndo_stop = madgemc_close;
34002 + pax_open_kernel();
34003 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34004 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
34005 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
34006 + pax_close_kernel();
34007
34008 return mca_register_driver (&madgemc_driver);
34009 }
34010 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
34011 index 8d362e6..f91cc52 100644
34012 --- a/drivers/net/tokenring/proteon.c
34013 +++ b/drivers/net/tokenring/proteon.c
34014 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
34015 struct platform_device *pdev;
34016 int i, num = 0, err = 0;
34017
34018 - proteon_netdev_ops = tms380tr_netdev_ops;
34019 - proteon_netdev_ops.ndo_open = proteon_open;
34020 - proteon_netdev_ops.ndo_stop = tms380tr_close;
34021 + pax_open_kernel();
34022 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34023 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
34024 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
34025 + pax_close_kernel();
34026
34027 err = platform_driver_register(&proteon_driver);
34028 if (err)
34029 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
34030 index 46db5c5..37c1536 100644
34031 --- a/drivers/net/tokenring/skisa.c
34032 +++ b/drivers/net/tokenring/skisa.c
34033 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
34034 struct platform_device *pdev;
34035 int i, num = 0, err = 0;
34036
34037 - sk_isa_netdev_ops = tms380tr_netdev_ops;
34038 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
34039 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34040 + pax_open_kernel();
34041 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
34042 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
34043 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
34044 + pax_close_kernel();
34045
34046 err = platform_driver_register(&sk_isa_driver);
34047 if (err)
34048 diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
34049 index ce90efc..2676f89 100644
34050 --- a/drivers/net/tulip/de2104x.c
34051 +++ b/drivers/net/tulip/de2104x.c
34052 @@ -1795,6 +1795,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
34053 struct de_srom_info_leaf *il;
34054 void *bufp;
34055
34056 + pax_track_stack();
34057 +
34058 /* download entire eeprom */
34059 for (i = 0; i < DE_EEPROM_WORDS; i++)
34060 ((__le16 *)ee_data)[i] =
34061 diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
34062 index 959b410..c97fac2 100644
34063 --- a/drivers/net/tulip/de4x5.c
34064 +++ b/drivers/net/tulip/de4x5.c
34065 @@ -5397,7 +5397,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34066 for (i=0; i<ETH_ALEN; i++) {
34067 tmp.addr[i] = dev->dev_addr[i];
34068 }
34069 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34070 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34071 break;
34072
34073 case DE4X5_SET_HWADDR: /* Set the hardware address */
34074 @@ -5437,7 +5437,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34075 spin_lock_irqsave(&lp->lock, flags);
34076 memcpy(&statbuf, &lp->pktStats, ioc->len);
34077 spin_unlock_irqrestore(&lp->lock, flags);
34078 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
34079 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34080 return -EFAULT;
34081 break;
34082 }
34083 diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
34084 index fa5eee9..e074432 100644
34085 --- a/drivers/net/tulip/eeprom.c
34086 +++ b/drivers/net/tulip/eeprom.c
34087 @@ -81,7 +81,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
34088 {NULL}};
34089
34090
34091 -static const char *block_name[] __devinitdata = {
34092 +static const char *block_name[] __devinitconst = {
34093 "21140 non-MII",
34094 "21140 MII PHY",
34095 "21142 Serial PHY",
34096 diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
34097 index 862eadf..3eee1e6 100644
34098 --- a/drivers/net/tulip/winbond-840.c
34099 +++ b/drivers/net/tulip/winbond-840.c
34100 @@ -236,7 +236,7 @@ struct pci_id_info {
34101 int drv_flags; /* Driver use, intended as capability flags. */
34102 };
34103
34104 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34105 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34106 { /* Sometime a Level-One switch card. */
34107 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34108 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
34109 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
34110 index 304fe78..db112fa 100644
34111 --- a/drivers/net/usb/hso.c
34112 +++ b/drivers/net/usb/hso.c
34113 @@ -71,7 +71,7 @@
34114 #include <asm/byteorder.h>
34115 #include <linux/serial_core.h>
34116 #include <linux/serial.h>
34117 -
34118 +#include <asm/local.h>
34119
34120 #define MOD_AUTHOR "Option Wireless"
34121 #define MOD_DESCRIPTION "USB High Speed Option driver"
34122 @@ -257,7 +257,7 @@ struct hso_serial {
34123
34124 /* from usb_serial_port */
34125 struct tty_struct *tty;
34126 - int open_count;
34127 + local_t open_count;
34128 spinlock_t serial_lock;
34129
34130 int (*write_data) (struct hso_serial *serial);
34131 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
34132 struct urb *urb;
34133
34134 urb = serial->rx_urb[0];
34135 - if (serial->open_count > 0) {
34136 + if (local_read(&serial->open_count) > 0) {
34137 count = put_rxbuf_data(urb, serial);
34138 if (count == -1)
34139 return;
34140 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
34141 DUMP1(urb->transfer_buffer, urb->actual_length);
34142
34143 /* Anyone listening? */
34144 - if (serial->open_count == 0)
34145 + if (local_read(&serial->open_count) == 0)
34146 return;
34147
34148 if (status == 0) {
34149 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
34150 spin_unlock_irq(&serial->serial_lock);
34151
34152 /* check for port already opened, if not set the termios */
34153 - serial->open_count++;
34154 - if (serial->open_count == 1) {
34155 + if (local_inc_return(&serial->open_count) == 1) {
34156 serial->rx_state = RX_IDLE;
34157 /* Force default termio settings */
34158 _hso_serial_set_termios(tty, NULL);
34159 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
34160 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
34161 if (result) {
34162 hso_stop_serial_device(serial->parent);
34163 - serial->open_count--;
34164 + local_dec(&serial->open_count);
34165 kref_put(&serial->parent->ref, hso_serial_ref_free);
34166 }
34167 } else {
34168 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
34169
34170 /* reset the rts and dtr */
34171 /* do the actual close */
34172 - serial->open_count--;
34173 + local_dec(&serial->open_count);
34174
34175 - if (serial->open_count <= 0) {
34176 - serial->open_count = 0;
34177 + if (local_read(&serial->open_count) <= 0) {
34178 + local_set(&serial->open_count, 0);
34179 spin_lock_irq(&serial->serial_lock);
34180 if (serial->tty == tty) {
34181 serial->tty->driver_data = NULL;
34182 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
34183
34184 /* the actual setup */
34185 spin_lock_irqsave(&serial->serial_lock, flags);
34186 - if (serial->open_count)
34187 + if (local_read(&serial->open_count))
34188 _hso_serial_set_termios(tty, old);
34189 else
34190 tty->termios = old;
34191 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
34192 D1("Pending read interrupt on port %d\n", i);
34193 spin_lock(&serial->serial_lock);
34194 if (serial->rx_state == RX_IDLE &&
34195 - serial->open_count > 0) {
34196 + local_read(&serial->open_count) > 0) {
34197 /* Setup and send a ctrl req read on
34198 * port i */
34199 if (!serial->rx_urb_filled[0]) {
34200 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
34201 /* Start all serial ports */
34202 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
34203 if (serial_table[i] && (serial_table[i]->interface == iface)) {
34204 - if (dev2ser(serial_table[i])->open_count) {
34205 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
34206 result =
34207 hso_start_serial_device(serial_table[i], GFP_NOIO);
34208 hso_kick_transmit(dev2ser(serial_table[i]));
34209 diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
34210 index 27400ed..c796e05 100644
34211 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
34212 +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
34213 @@ -601,8 +601,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
34214 * Return with error code if any of the queue indices
34215 * is out of range
34216 */
34217 - if (p->ring_index[i] < 0 ||
34218 - p->ring_index[i] >= adapter->num_rx_queues)
34219 + if (p->ring_index[i] >= adapter->num_rx_queues)
34220 return -EINVAL;
34221 }
34222
34223 diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
34224 index dd36258..e47fd31 100644
34225 --- a/drivers/net/vxge/vxge-config.h
34226 +++ b/drivers/net/vxge/vxge-config.h
34227 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
34228 void (*link_down)(struct __vxge_hw_device *devh);
34229 void (*crit_err)(struct __vxge_hw_device *devh,
34230 enum vxge_hw_event type, u64 ext_data);
34231 -};
34232 +} __no_const;
34233
34234 /*
34235 * struct __vxge_hw_blockpool_entry - Block private data structure
34236 diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
34237 index 178348a2..18bb433 100644
34238 --- a/drivers/net/vxge/vxge-main.c
34239 +++ b/drivers/net/vxge/vxge-main.c
34240 @@ -100,6 +100,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
34241 struct sk_buff *completed[NR_SKB_COMPLETED];
34242 int more;
34243
34244 + pax_track_stack();
34245 +
34246 do {
34247 more = 0;
34248 skb_ptr = completed;
34249 @@ -1915,6 +1917,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
34250 u8 mtable[256] = {0}; /* CPU to vpath mapping */
34251 int index;
34252
34253 + pax_track_stack();
34254 +
34255 /*
34256 * Filling
34257 * - itable with bucket numbers
34258 diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
34259 index 4a518a3..936b334 100644
34260 --- a/drivers/net/vxge/vxge-traffic.h
34261 +++ b/drivers/net/vxge/vxge-traffic.h
34262 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
34263 struct vxge_hw_mempool_dma *dma_object,
34264 u32 index,
34265 u32 is_last);
34266 -};
34267 +} __no_const;
34268
34269 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
34270 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
34271 diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
34272 index 56aeb01..547f71f 100644
34273 --- a/drivers/net/wan/hdlc_x25.c
34274 +++ b/drivers/net/wan/hdlc_x25.c
34275 @@ -134,16 +134,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
34276
34277 static int x25_open(struct net_device *dev)
34278 {
34279 - struct lapb_register_struct cb;
34280 + static struct lapb_register_struct cb = {
34281 + .connect_confirmation = x25_connected,
34282 + .connect_indication = x25_connected,
34283 + .disconnect_confirmation = x25_disconnected,
34284 + .disconnect_indication = x25_disconnected,
34285 + .data_indication = x25_data_indication,
34286 + .data_transmit = x25_data_transmit
34287 + };
34288 int result;
34289
34290 - cb.connect_confirmation = x25_connected;
34291 - cb.connect_indication = x25_connected;
34292 - cb.disconnect_confirmation = x25_disconnected;
34293 - cb.disconnect_indication = x25_disconnected;
34294 - cb.data_indication = x25_data_indication;
34295 - cb.data_transmit = x25_data_transmit;
34296 -
34297 result = lapb_register(dev, &cb);
34298 if (result != LAPB_OK)
34299 return result;
34300 diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
34301 index 1fda46c..f2858f2 100644
34302 --- a/drivers/net/wimax/i2400m/usb-fw.c
34303 +++ b/drivers/net/wimax/i2400m/usb-fw.c
34304 @@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
34305 int do_autopm = 1;
34306 DECLARE_COMPLETION_ONSTACK(notif_completion);
34307
34308 + pax_track_stack();
34309 +
34310 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
34311 i2400m, ack, ack_size);
34312 BUG_ON(_ack == i2400m->bm_ack_buf);
34313 diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
34314 index e1b3e3c..e413f18 100644
34315 --- a/drivers/net/wireless/airo.c
34316 +++ b/drivers/net/wireless/airo.c
34317 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
34318 BSSListElement * loop_net;
34319 BSSListElement * tmp_net;
34320
34321 + pax_track_stack();
34322 +
34323 /* Blow away current list of scan results */
34324 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
34325 list_move_tail (&loop_net->list, &ai->network_free_list);
34326 @@ -3794,6 +3796,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
34327 WepKeyRid wkr;
34328 int rc;
34329
34330 + pax_track_stack();
34331 +
34332 memset( &mySsid, 0, sizeof( mySsid ) );
34333 kfree (ai->flash);
34334 ai->flash = NULL;
34335 @@ -4753,6 +4757,8 @@ static int proc_stats_rid_open( struct inode *inode,
34336 __le32 *vals = stats.vals;
34337 int len;
34338
34339 + pax_track_stack();
34340 +
34341 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
34342 return -ENOMEM;
34343 data = file->private_data;
34344 @@ -5476,6 +5482,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
34345 /* If doLoseSync is not 1, we won't do a Lose Sync */
34346 int doLoseSync = -1;
34347
34348 + pax_track_stack();
34349 +
34350 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
34351 return -ENOMEM;
34352 data = file->private_data;
34353 @@ -7181,6 +7189,8 @@ static int airo_get_aplist(struct net_device *dev,
34354 int i;
34355 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
34356
34357 + pax_track_stack();
34358 +
34359 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
34360 if (!qual)
34361 return -ENOMEM;
34362 @@ -7741,6 +7751,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
34363 CapabilityRid cap_rid;
34364 __le32 *vals = stats_rid.vals;
34365
34366 + pax_track_stack();
34367 +
34368 /* Get stats out of the card */
34369 clear_bit(JOB_WSTATS, &local->jobs);
34370 if (local->power.event) {
34371 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
34372 index 17c4b56..00d836f 100644
34373 --- a/drivers/net/wireless/ath/ath.h
34374 +++ b/drivers/net/wireless/ath/ath.h
34375 @@ -121,6 +121,7 @@ struct ath_ops {
34376 void (*write_flush) (void *);
34377 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
34378 };
34379 +typedef struct ath_ops __no_const ath_ops_no_const;
34380
34381 struct ath_common;
34382 struct ath_bus_ops;
34383 diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
34384 index ccca724..7afbadc 100644
34385 --- a/drivers/net/wireless/ath/ath5k/debug.c
34386 +++ b/drivers/net/wireless/ath/ath5k/debug.c
34387 @@ -203,6 +203,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
34388 unsigned int v;
34389 u64 tsf;
34390
34391 + pax_track_stack();
34392 +
34393 v = ath5k_hw_reg_read(ah, AR5K_BEACON);
34394 len += snprintf(buf + len, sizeof(buf) - len,
34395 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
34396 @@ -321,6 +323,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
34397 unsigned int len = 0;
34398 unsigned int i;
34399
34400 + pax_track_stack();
34401 +
34402 len += snprintf(buf + len, sizeof(buf) - len,
34403 "DEBUG LEVEL: 0x%08x\n\n", ah->debug.level);
34404
34405 @@ -492,6 +496,8 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
34406 unsigned int len = 0;
34407 u32 filt = ath5k_hw_get_rx_filter(ah);
34408
34409 + pax_track_stack();
34410 +
34411 len += snprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n",
34412 ah->bssidmask);
34413 len += snprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ",
34414 @@ -548,6 +554,8 @@ static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
34415 unsigned int len = 0;
34416 int i;
34417
34418 + pax_track_stack();
34419 +
34420 len += snprintf(buf + len, sizeof(buf) - len,
34421 "RX\n---------------------\n");
34422 len += snprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n",
34423 @@ -665,6 +673,8 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
34424 char buf[700];
34425 unsigned int len = 0;
34426
34427 + pax_track_stack();
34428 +
34429 len += snprintf(buf + len, sizeof(buf) - len,
34430 "HW has PHY error counters:\t%s\n",
34431 ah->ah_capabilities.cap_has_phyerr_counters ?
34432 @@ -829,6 +839,8 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
34433 struct ath5k_buf *bf, *bf0;
34434 int i, n;
34435
34436 + pax_track_stack();
34437 +
34438 len += snprintf(buf + len, sizeof(buf) - len,
34439 "available txbuffers: %d\n", ah->txbuf_len);
34440
34441 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
34442 index 7c2aaad..ad14dee 100644
34443 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
34444 +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
34445 @@ -758,6 +758,8 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah)
34446 int i, im, j;
34447 int nmeasurement;
34448
34449 + pax_track_stack();
34450 +
34451 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
34452 if (ah->txchainmask & (1 << i))
34453 num_chains++;
34454 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
34455 index f80d1d6..08b773d 100644
34456 --- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
34457 +++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
34458 @@ -406,6 +406,8 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
34459 int theta_low_bin = 0;
34460 int i;
34461
34462 + pax_track_stack();
34463 +
34464 /* disregard any bin that contains <= 16 samples */
34465 thresh_accum_cnt = 16;
34466 scale_factor = 5;
34467 diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
34468 index d1eb896..8b67cd4 100644
34469 --- a/drivers/net/wireless/ath/ath9k/debug.c
34470 +++ b/drivers/net/wireless/ath/ath9k/debug.c
34471 @@ -387,6 +387,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
34472 char buf[512];
34473 unsigned int len = 0;
34474
34475 + pax_track_stack();
34476 +
34477 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
34478 len += snprintf(buf + len, sizeof(buf) - len,
34479 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
34480 @@ -477,6 +479,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
34481 u8 addr[ETH_ALEN];
34482 u32 tmp;
34483
34484 + pax_track_stack();
34485 +
34486 len += snprintf(buf + len, sizeof(buf) - len,
34487 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
34488 wiphy_name(sc->hw->wiphy),
34489 diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
34490 index d3ff33c..309398e 100644
34491 --- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
34492 +++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
34493 @@ -31,6 +31,8 @@ static ssize_t read_file_tgt_int_stats(struct file *file, char __user *user_buf,
34494 unsigned int len = 0;
34495 int ret = 0;
34496
34497 + pax_track_stack();
34498 +
34499 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
34500
34501 ath9k_htc_ps_wakeup(priv);
34502 @@ -89,6 +91,8 @@ static ssize_t read_file_tgt_tx_stats(struct file *file, char __user *user_buf,
34503 unsigned int len = 0;
34504 int ret = 0;
34505
34506 + pax_track_stack();
34507 +
34508 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
34509
34510 ath9k_htc_ps_wakeup(priv);
34511 @@ -159,6 +163,8 @@ static ssize_t read_file_tgt_rx_stats(struct file *file, char __user *user_buf,
34512 unsigned int len = 0;
34513 int ret = 0;
34514
34515 + pax_track_stack();
34516 +
34517 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
34518
34519 ath9k_htc_ps_wakeup(priv);
34520 @@ -203,6 +209,8 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
34521 char buf[512];
34522 unsigned int len = 0;
34523
34524 + pax_track_stack();
34525 +
34526 len += snprintf(buf + len, sizeof(buf) - len,
34527 "%20s : %10u\n", "Buffers queued",
34528 priv->debug.tx_stats.buf_queued);
34529 @@ -376,6 +384,8 @@ static ssize_t read_file_slot(struct file *file, char __user *user_buf,
34530 char buf[512];
34531 unsigned int len = 0;
34532
34533 + pax_track_stack();
34534 +
34535 spin_lock_bh(&priv->tx.tx_lock);
34536
34537 len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
34538 @@ -411,6 +421,8 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
34539 char buf[512];
34540 unsigned int len = 0;
34541
34542 + pax_track_stack();
34543 +
34544 len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
34545 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
34546
34547 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
34548 index c798890..c19a8fb 100644
34549 --- a/drivers/net/wireless/ath/ath9k/hw.h
34550 +++ b/drivers/net/wireless/ath/ath9k/hw.h
34551 @@ -588,7 +588,7 @@ struct ath_hw_private_ops {
34552
34553 /* ANI */
34554 void (*ani_cache_ini_regs)(struct ath_hw *ah);
34555 -};
34556 +} __no_const;
34557
34558 /**
34559 * struct ath_hw_ops - callbacks used by hardware code and driver code
34560 @@ -639,7 +639,7 @@ struct ath_hw_ops {
34561 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
34562 struct ath_hw_antcomb_conf *antconf);
34563
34564 -};
34565 +} __no_const;
34566
34567 struct ath_nf_limits {
34568 s16 max;
34569 @@ -652,7 +652,7 @@ struct ath_nf_limits {
34570 #define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
34571
34572 struct ath_hw {
34573 - struct ath_ops reg_ops;
34574 + ath_ops_no_const reg_ops;
34575
34576 struct ieee80211_hw *hw;
34577 struct ath_common common;
34578 diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
34579 index ef9ad79..f5f8d80 100644
34580 --- a/drivers/net/wireless/ipw2x00/ipw2100.c
34581 +++ b/drivers/net/wireless/ipw2x00/ipw2100.c
34582 @@ -2102,6 +2102,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
34583 int err;
34584 DECLARE_SSID_BUF(ssid);
34585
34586 + pax_track_stack();
34587 +
34588 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
34589
34590 if (ssid_len)
34591 @@ -5451,6 +5453,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
34592 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
34593 int err;
34594
34595 + pax_track_stack();
34596 +
34597 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
34598 idx, keylen, len);
34599
34600 diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
34601 index 32a9966..de69787 100644
34602 --- a/drivers/net/wireless/ipw2x00/libipw_rx.c
34603 +++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
34604 @@ -1565,6 +1565,8 @@ static void libipw_process_probe_response(struct libipw_device
34605 unsigned long flags;
34606 DECLARE_SSID_BUF(ssid);
34607
34608 + pax_track_stack();
34609 +
34610 LIBIPW_DEBUG_SCAN("'%s' (%pM"
34611 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
34612 print_ssid(ssid, info_element->data, info_element->len),
34613 diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
34614 index 66ee1562..b90412b 100644
34615 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
34616 +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
34617 @@ -3687,7 +3687,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
34618 */
34619 if (iwl3945_mod_params.disable_hw_scan) {
34620 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
34621 - iwl3945_hw_ops.hw_scan = NULL;
34622 + pax_open_kernel();
34623 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
34624 + pax_close_kernel();
34625 }
34626
34627 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
34628 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
34629 index 3789ff4..22ab151 100644
34630 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
34631 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
34632 @@ -920,6 +920,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
34633 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
34634 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
34635
34636 + pax_track_stack();
34637 +
34638 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
34639
34640 /* Treat uninitialized rate scaling data same as non-existing. */
34641 @@ -2931,6 +2933,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
34642 container_of(lq_sta, struct iwl_station_priv, lq_sta);
34643 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
34644
34645 + pax_track_stack();
34646 +
34647 /* Override starting rate (index 0) if needed for debug purposes */
34648 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
34649
34650 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
34651 index f9a407e..a6f2bb7 100644
34652 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
34653 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
34654 @@ -68,8 +68,8 @@ do { \
34655 } while (0)
34656
34657 #else
34658 -#define IWL_DEBUG(__priv, level, fmt, args...)
34659 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
34660 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
34661 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
34662 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
34663 const void *p, u32 len)
34664 {}
34665 diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
34666 index ec1485b..900c3bd 100644
34667 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
34668 +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
34669 @@ -561,6 +561,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
34670 int pos = 0;
34671 const size_t bufsz = sizeof(buf);
34672
34673 + pax_track_stack();
34674 +
34675 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
34676 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
34677 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
34678 @@ -693,6 +695,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
34679 char buf[256 * NUM_IWL_RXON_CTX];
34680 const size_t bufsz = sizeof(buf);
34681
34682 + pax_track_stack();
34683 +
34684 for_each_context(priv, ctx) {
34685 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
34686 ctx->ctxid);
34687 diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
34688 index 0a0cc96..fd49ad8 100644
34689 --- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
34690 +++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
34691 @@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
34692 int buf_len = 512;
34693 size_t len = 0;
34694
34695 + pax_track_stack();
34696 +
34697 if (*ppos != 0)
34698 return 0;
34699 if (count < sizeof(buf))
34700 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
34701 index 031cd89..bdc8435 100644
34702 --- a/drivers/net/wireless/mac80211_hwsim.c
34703 +++ b/drivers/net/wireless/mac80211_hwsim.c
34704 @@ -1670,9 +1670,11 @@ static int __init init_mac80211_hwsim(void)
34705 return -EINVAL;
34706
34707 if (fake_hw_scan) {
34708 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
34709 - mac80211_hwsim_ops.sw_scan_start = NULL;
34710 - mac80211_hwsim_ops.sw_scan_complete = NULL;
34711 + pax_open_kernel();
34712 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
34713 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
34714 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
34715 + pax_close_kernel();
34716 }
34717
34718 spin_lock_init(&hwsim_radio_lock);
34719 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
34720 index 2215c3c..64e6a47 100644
34721 --- a/drivers/net/wireless/mwifiex/main.h
34722 +++ b/drivers/net/wireless/mwifiex/main.h
34723 @@ -560,7 +560,7 @@ struct mwifiex_if_ops {
34724
34725 void (*update_mp_end_port) (struct mwifiex_adapter *, u16);
34726 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
34727 -};
34728 +} __no_const;
34729
34730 struct mwifiex_adapter {
34731 struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM];
34732 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
34733 index 29f9389..f6d2ce0 100644
34734 --- a/drivers/net/wireless/rndis_wlan.c
34735 +++ b/drivers/net/wireless/rndis_wlan.c
34736 @@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
34737
34738 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
34739
34740 - if (rts_threshold < 0 || rts_threshold > 2347)
34741 + if (rts_threshold > 2347)
34742 rts_threshold = 2347;
34743
34744 tmp = cpu_to_le32(rts_threshold);
34745 diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
34746 index 3b11642..d6bb049 100644
34747 --- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
34748 +++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
34749 @@ -837,6 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
34750 u8 rfpath;
34751 u8 num_total_rfpath = rtlphy->num_total_rfpath;
34752
34753 + pax_track_stack();
34754 +
34755 precommoncmdcnt = 0;
34756 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
34757 MAX_PRECMD_CNT,
34758 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
34759 index a77f1bb..c608b2b 100644
34760 --- a/drivers/net/wireless/wl1251/wl1251.h
34761 +++ b/drivers/net/wireless/wl1251/wl1251.h
34762 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
34763 void (*reset)(struct wl1251 *wl);
34764 void (*enable_irq)(struct wl1251 *wl);
34765 void (*disable_irq)(struct wl1251 *wl);
34766 -};
34767 +} __no_const;
34768
34769 struct wl1251 {
34770 struct ieee80211_hw *hw;
34771 diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
34772 index e0b3736..4b466e6 100644
34773 --- a/drivers/net/wireless/wl12xx/spi.c
34774 +++ b/drivers/net/wireless/wl12xx/spi.c
34775 @@ -281,6 +281,8 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
34776 u32 chunk_len;
34777 int i;
34778
34779 + pax_track_stack();
34780 +
34781 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
34782
34783 spi_message_init(&m);
34784 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
34785 index f34b5b2..b5abb9f 100644
34786 --- a/drivers/oprofile/buffer_sync.c
34787 +++ b/drivers/oprofile/buffer_sync.c
34788 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
34789 if (cookie == NO_COOKIE)
34790 offset = pc;
34791 if (cookie == INVALID_COOKIE) {
34792 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
34793 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
34794 offset = pc;
34795 }
34796 if (cookie != last_cookie) {
34797 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
34798 /* add userspace sample */
34799
34800 if (!mm) {
34801 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
34802 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
34803 return 0;
34804 }
34805
34806 cookie = lookup_dcookie(mm, s->eip, &offset);
34807
34808 if (cookie == INVALID_COOKIE) {
34809 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
34810 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
34811 return 0;
34812 }
34813
34814 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
34815 /* ignore backtraces if failed to add a sample */
34816 if (state == sb_bt_start) {
34817 state = sb_bt_ignore;
34818 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
34819 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
34820 }
34821 }
34822 release_mm(mm);
34823 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
34824 index dd87e86..bc0148c 100644
34825 --- a/drivers/oprofile/event_buffer.c
34826 +++ b/drivers/oprofile/event_buffer.c
34827 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
34828 }
34829
34830 if (buffer_pos == buffer_size) {
34831 - atomic_inc(&oprofile_stats.event_lost_overflow);
34832 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
34833 return;
34834 }
34835
34836 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
34837 index dccd863..8d35669 100644
34838 --- a/drivers/oprofile/oprof.c
34839 +++ b/drivers/oprofile/oprof.c
34840 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
34841 if (oprofile_ops.switch_events())
34842 return;
34843
34844 - atomic_inc(&oprofile_stats.multiplex_counter);
34845 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
34846 start_switch_worker();
34847 }
34848
34849 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
34850 index 917d28e..d62d981 100644
34851 --- a/drivers/oprofile/oprofile_stats.c
34852 +++ b/drivers/oprofile/oprofile_stats.c
34853 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
34854 cpu_buf->sample_invalid_eip = 0;
34855 }
34856
34857 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
34858 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
34859 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
34860 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
34861 - atomic_set(&oprofile_stats.multiplex_counter, 0);
34862 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
34863 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
34864 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
34865 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
34866 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
34867 }
34868
34869
34870 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
34871 index 38b6fc0..b5cbfce 100644
34872 --- a/drivers/oprofile/oprofile_stats.h
34873 +++ b/drivers/oprofile/oprofile_stats.h
34874 @@ -13,11 +13,11 @@
34875 #include <linux/atomic.h>
34876
34877 struct oprofile_stat_struct {
34878 - atomic_t sample_lost_no_mm;
34879 - atomic_t sample_lost_no_mapping;
34880 - atomic_t bt_lost_no_mapping;
34881 - atomic_t event_lost_overflow;
34882 - atomic_t multiplex_counter;
34883 + atomic_unchecked_t sample_lost_no_mm;
34884 + atomic_unchecked_t sample_lost_no_mapping;
34885 + atomic_unchecked_t bt_lost_no_mapping;
34886 + atomic_unchecked_t event_lost_overflow;
34887 + atomic_unchecked_t multiplex_counter;
34888 };
34889
34890 extern struct oprofile_stat_struct oprofile_stats;
34891 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
34892 index e9ff6f7..28e259a 100644
34893 --- a/drivers/oprofile/oprofilefs.c
34894 +++ b/drivers/oprofile/oprofilefs.c
34895 @@ -186,7 +186,7 @@ static const struct file_operations atomic_ro_fops = {
34896
34897
34898 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
34899 - char const *name, atomic_t *val)
34900 + char const *name, atomic_unchecked_t *val)
34901 {
34902 return __oprofilefs_create_file(sb, root, name,
34903 &atomic_ro_fops, 0444, val);
34904 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
34905 index 3f56bc0..707d642 100644
34906 --- a/drivers/parport/procfs.c
34907 +++ b/drivers/parport/procfs.c
34908 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
34909
34910 *ppos += len;
34911
34912 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
34913 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
34914 }
34915
34916 #ifdef CONFIG_PARPORT_1284
34917 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
34918
34919 *ppos += len;
34920
34921 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
34922 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
34923 }
34924 #endif /* IEEE1284.3 support. */
34925
34926 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
34927 index 9fff878..ad0ad53 100644
34928 --- a/drivers/pci/hotplug/cpci_hotplug.h
34929 +++ b/drivers/pci/hotplug/cpci_hotplug.h
34930 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
34931 int (*hardware_test) (struct slot* slot, u32 value);
34932 u8 (*get_power) (struct slot* slot);
34933 int (*set_power) (struct slot* slot, int value);
34934 -};
34935 +} __no_const;
34936
34937 struct cpci_hp_controller {
34938 unsigned int irq;
34939 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
34940 index 76ba8a1..20ca857 100644
34941 --- a/drivers/pci/hotplug/cpqphp_nvram.c
34942 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
34943 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
34944
34945 void compaq_nvram_init (void __iomem *rom_start)
34946 {
34947 +
34948 +#ifndef CONFIG_PAX_KERNEXEC
34949 if (rom_start) {
34950 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
34951 }
34952 +#endif
34953 +
34954 dbg("int15 entry = %p\n", compaq_int15_entry_point);
34955
34956 /* initialize our int15 lock */
34957 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
34958 index cbfbab1..6a9fced 100644
34959 --- a/drivers/pci/pcie/aspm.c
34960 +++ b/drivers/pci/pcie/aspm.c
34961 @@ -27,9 +27,9 @@
34962 #define MODULE_PARAM_PREFIX "pcie_aspm."
34963
34964 /* Note: those are not register definitions */
34965 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
34966 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
34967 -#define ASPM_STATE_L1 (4) /* L1 state */
34968 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
34969 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
34970 +#define ASPM_STATE_L1 (4U) /* L1 state */
34971 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
34972 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
34973
34974 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
34975 index 6ab6bd3..72bdc69 100644
34976 --- a/drivers/pci/probe.c
34977 +++ b/drivers/pci/probe.c
34978 @@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
34979 u32 l, sz, mask;
34980 u16 orig_cmd;
34981
34982 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
34983 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
34984
34985 if (!dev->mmio_always_on) {
34986 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
34987 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
34988 index 27911b5..5b6db88 100644
34989 --- a/drivers/pci/proc.c
34990 +++ b/drivers/pci/proc.c
34991 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
34992 static int __init pci_proc_init(void)
34993 {
34994 struct pci_dev *dev = NULL;
34995 +
34996 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
34997 +#ifdef CONFIG_GRKERNSEC_PROC_USER
34998 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
34999 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35000 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35001 +#endif
35002 +#else
35003 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35004 +#endif
35005 proc_create("devices", 0, proc_bus_pci_dir,
35006 &proc_bus_pci_dev_operations);
35007 proc_initialized = 1;
35008 diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
35009 index 90832a9..419089a 100644
35010 --- a/drivers/pci/xen-pcifront.c
35011 +++ b/drivers/pci/xen-pcifront.c
35012 @@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn,
35013 struct pcifront_sd *sd = bus->sysdata;
35014 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35015
35016 + pax_track_stack();
35017 +
35018 if (verbose_request)
35019 dev_info(&pdev->xdev->dev,
35020 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
35021 @@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
35022 struct pcifront_sd *sd = bus->sysdata;
35023 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35024
35025 + pax_track_stack();
35026 +
35027 if (verbose_request)
35028 dev_info(&pdev->xdev->dev,
35029 "write dev=%04x:%02x:%02x.%01x - "
35030 @@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
35031 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35032 struct msi_desc *entry;
35033
35034 + pax_track_stack();
35035 +
35036 if (nvec > SH_INFO_MAX_VEC) {
35037 dev_err(&dev->dev, "too much vector for pci frontend: %x."
35038 " Increase SH_INFO_MAX_VEC.\n", nvec);
35039 @@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(struct pci_dev *dev)
35040 struct pcifront_sd *sd = dev->bus->sysdata;
35041 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35042
35043 + pax_track_stack();
35044 +
35045 err = do_pci_op(pdev, &op);
35046
35047 /* What should do for error ? */
35048 @@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struct pci_dev *dev, int vector[])
35049 struct pcifront_sd *sd = dev->bus->sysdata;
35050 struct pcifront_device *pdev = pcifront_get_pdev(sd);
35051
35052 + pax_track_stack();
35053 +
35054 err = do_pci_op(pdev, &op);
35055 if (likely(!err)) {
35056 vector[0] = op.value;
35057 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
35058 index 7bd829f..a3237ad 100644
35059 --- a/drivers/platform/x86/thinkpad_acpi.c
35060 +++ b/drivers/platform/x86/thinkpad_acpi.c
35061 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
35062 return 0;
35063 }
35064
35065 -void static hotkey_mask_warn_incomplete_mask(void)
35066 +static void hotkey_mask_warn_incomplete_mask(void)
35067 {
35068 /* log only what the user can fix... */
35069 const u32 wantedmask = hotkey_driver_mask &
35070 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
35071 }
35072 }
35073
35074 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35075 - struct tp_nvram_state *newn,
35076 - const u32 event_mask)
35077 -{
35078 -
35079 #define TPACPI_COMPARE_KEY(__scancode, __member) \
35080 do { \
35081 if ((event_mask & (1 << __scancode)) && \
35082 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35083 tpacpi_hotkey_send_key(__scancode); \
35084 } while (0)
35085
35086 - void issue_volchange(const unsigned int oldvol,
35087 - const unsigned int newvol)
35088 - {
35089 - unsigned int i = oldvol;
35090 +static void issue_volchange(const unsigned int oldvol,
35091 + const unsigned int newvol,
35092 + const u32 event_mask)
35093 +{
35094 + unsigned int i = oldvol;
35095
35096 - while (i > newvol) {
35097 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35098 - i--;
35099 - }
35100 - while (i < newvol) {
35101 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35102 - i++;
35103 - }
35104 + while (i > newvol) {
35105 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
35106 + i--;
35107 }
35108 + while (i < newvol) {
35109 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35110 + i++;
35111 + }
35112 +}
35113
35114 - void issue_brightnesschange(const unsigned int oldbrt,
35115 - const unsigned int newbrt)
35116 - {
35117 - unsigned int i = oldbrt;
35118 +static void issue_brightnesschange(const unsigned int oldbrt,
35119 + const unsigned int newbrt,
35120 + const u32 event_mask)
35121 +{
35122 + unsigned int i = oldbrt;
35123
35124 - while (i > newbrt) {
35125 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35126 - i--;
35127 - }
35128 - while (i < newbrt) {
35129 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35130 - i++;
35131 - }
35132 + while (i > newbrt) {
35133 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
35134 + i--;
35135 + }
35136 + while (i < newbrt) {
35137 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35138 + i++;
35139 }
35140 +}
35141
35142 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35143 + struct tp_nvram_state *newn,
35144 + const u32 event_mask)
35145 +{
35146 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
35147 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
35148 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
35149 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35150 oldn->volume_level != newn->volume_level) {
35151 /* recently muted, or repeated mute keypress, or
35152 * multiple presses ending in mute */
35153 - issue_volchange(oldn->volume_level, newn->volume_level);
35154 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35155 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
35156 }
35157 } else {
35158 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35159 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
35160 }
35161 if (oldn->volume_level != newn->volume_level) {
35162 - issue_volchange(oldn->volume_level, newn->volume_level);
35163 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
35164 } else if (oldn->volume_toggle != newn->volume_toggle) {
35165 /* repeated vol up/down keypress at end of scale ? */
35166 if (newn->volume_level == 0)
35167 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35168 /* handle brightness */
35169 if (oldn->brightness_level != newn->brightness_level) {
35170 issue_brightnesschange(oldn->brightness_level,
35171 - newn->brightness_level);
35172 + newn->brightness_level,
35173 + event_mask);
35174 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
35175 /* repeated key presses that didn't change state */
35176 if (newn->brightness_level == 0)
35177 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
35178 && !tp_features.bright_unkfw)
35179 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
35180 }
35181 +}
35182
35183 #undef TPACPI_COMPARE_KEY
35184 #undef TPACPI_MAY_SEND_KEY
35185 -}
35186
35187 /*
35188 * Polling driver
35189 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
35190 index b859d16..5cc6b1a 100644
35191 --- a/drivers/pnp/pnpbios/bioscalls.c
35192 +++ b/drivers/pnp/pnpbios/bioscalls.c
35193 @@ -59,7 +59,7 @@ do { \
35194 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
35195 } while(0)
35196
35197 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
35198 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
35199 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
35200
35201 /*
35202 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35203
35204 cpu = get_cpu();
35205 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
35206 +
35207 + pax_open_kernel();
35208 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
35209 + pax_close_kernel();
35210
35211 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
35212 spin_lock_irqsave(&pnp_bios_lock, flags);
35213 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
35214 :"memory");
35215 spin_unlock_irqrestore(&pnp_bios_lock, flags);
35216
35217 + pax_open_kernel();
35218 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
35219 + pax_close_kernel();
35220 +
35221 put_cpu();
35222
35223 /* If we get here and this is set then the PnP BIOS faulted on us. */
35224 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
35225 return status;
35226 }
35227
35228 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
35229 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
35230 {
35231 int i;
35232
35233 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35234 pnp_bios_callpoint.offset = header->fields.pm16offset;
35235 pnp_bios_callpoint.segment = PNP_CS16;
35236
35237 + pax_open_kernel();
35238 +
35239 for_each_possible_cpu(i) {
35240 struct desc_struct *gdt = get_cpu_gdt_table(i);
35241 if (!gdt)
35242 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
35243 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
35244 (unsigned long)__va(header->fields.pm16dseg));
35245 }
35246 +
35247 + pax_close_kernel();
35248 }
35249 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
35250 index b0ecacb..7c9da2e 100644
35251 --- a/drivers/pnp/resource.c
35252 +++ b/drivers/pnp/resource.c
35253 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
35254 return 1;
35255
35256 /* check if the resource is valid */
35257 - if (*irq < 0 || *irq > 15)
35258 + if (*irq > 15)
35259 return 0;
35260
35261 /* check if the resource is reserved */
35262 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
35263 return 1;
35264
35265 /* check if the resource is valid */
35266 - if (*dma < 0 || *dma == 4 || *dma > 7)
35267 + if (*dma == 4 || *dma > 7)
35268 return 0;
35269
35270 /* check if the resource is reserved */
35271 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
35272 index bb16f5b..c751eef 100644
35273 --- a/drivers/power/bq27x00_battery.c
35274 +++ b/drivers/power/bq27x00_battery.c
35275 @@ -67,7 +67,7 @@
35276 struct bq27x00_device_info;
35277 struct bq27x00_access_methods {
35278 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
35279 -};
35280 +} __no_const;
35281
35282 enum bq27x00_chip { BQ27000, BQ27500 };
35283
35284 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
35285 index 33f5d9a..d957d3f 100644
35286 --- a/drivers/regulator/max8660.c
35287 +++ b/drivers/regulator/max8660.c
35288 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
35289 max8660->shadow_regs[MAX8660_OVER1] = 5;
35290 } else {
35291 /* Otherwise devices can be toggled via software */
35292 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
35293 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
35294 + pax_open_kernel();
35295 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
35296 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
35297 + pax_close_kernel();
35298 }
35299
35300 /*
35301 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
35302 index 3285d41..ab7c22a 100644
35303 --- a/drivers/regulator/mc13892-regulator.c
35304 +++ b/drivers/regulator/mc13892-regulator.c
35305 @@ -564,10 +564,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
35306 }
35307 mc13xxx_unlock(mc13892);
35308
35309 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35310 + pax_open_kernel();
35311 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
35312 = mc13892_vcam_set_mode;
35313 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35314 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
35315 = mc13892_vcam_get_mode;
35316 + pax_close_kernel();
35317 for (i = 0; i < pdata->num_regulators; i++) {
35318 init_data = &pdata->regulators[i];
35319 priv->regulators[i] = regulator_register(
35320 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
35321 index cace6d3..f623fda 100644
35322 --- a/drivers/rtc/rtc-dev.c
35323 +++ b/drivers/rtc/rtc-dev.c
35324 @@ -14,6 +14,7 @@
35325 #include <linux/module.h>
35326 #include <linux/rtc.h>
35327 #include <linux/sched.h>
35328 +#include <linux/grsecurity.h>
35329 #include "rtc-core.h"
35330
35331 static dev_t rtc_devt;
35332 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
35333 if (copy_from_user(&tm, uarg, sizeof(tm)))
35334 return -EFAULT;
35335
35336 + gr_log_timechange();
35337 +
35338 return rtc_set_time(rtc, &tm);
35339
35340 case RTC_PIE_ON:
35341 diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
35342 index f66c33b..7ae5823 100644
35343 --- a/drivers/scsi/BusLogic.c
35344 +++ b/drivers/scsi/BusLogic.c
35345 @@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
35346 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
35347 *PrototypeHostAdapter)
35348 {
35349 + pax_track_stack();
35350 +
35351 /*
35352 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
35353 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
35354 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
35355 index ffb5878..e6d785c 100644
35356 --- a/drivers/scsi/aacraid/aacraid.h
35357 +++ b/drivers/scsi/aacraid/aacraid.h
35358 @@ -492,7 +492,7 @@ struct adapter_ops
35359 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
35360 /* Administrative operations */
35361 int (*adapter_comm)(struct aac_dev * dev, int comm);
35362 -};
35363 +} __no_const;
35364
35365 /*
35366 * Define which interrupt handler needs to be installed
35367 diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
35368 index 8a0b330..b4286de 100644
35369 --- a/drivers/scsi/aacraid/commctrl.c
35370 +++ b/drivers/scsi/aacraid/commctrl.c
35371 @@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
35372 u32 actual_fibsize64, actual_fibsize = 0;
35373 int i;
35374
35375 + pax_track_stack();
35376
35377 if (dev->in_reset) {
35378 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
35379 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
35380 index c7b6fed..4db0569 100644
35381 --- a/drivers/scsi/aacraid/linit.c
35382 +++ b/drivers/scsi/aacraid/linit.c
35383 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
35384 #elif defined(__devinitconst)
35385 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35386 #else
35387 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
35388 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
35389 #endif
35390 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
35391 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
35392 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
35393 index d5ff142..49c0ebb 100644
35394 --- a/drivers/scsi/aic94xx/aic94xx_init.c
35395 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
35396 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
35397 .lldd_control_phy = asd_control_phy,
35398 };
35399
35400 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
35401 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
35402 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
35403 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
35404 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
35405 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
35406 index a796de9..1ef20e1 100644
35407 --- a/drivers/scsi/bfa/bfa.h
35408 +++ b/drivers/scsi/bfa/bfa.h
35409 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
35410 u32 *end);
35411 int cpe_vec_q0;
35412 int rme_vec_q0;
35413 -};
35414 +} __no_const;
35415 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
35416
35417 struct bfa_faa_cbfn_s {
35418 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
35419 index e07bd47..dbd260a 100644
35420 --- a/drivers/scsi/bfa/bfa_fcpim.c
35421 +++ b/drivers/scsi/bfa/bfa_fcpim.c
35422 @@ -4179,7 +4179,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
35423 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
35424 {
35425 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
35426 - struct bfa_itn_s *itn;
35427 + bfa_itn_s_no_const *itn;
35428
35429 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
35430 itn->isr = isr;
35431 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
35432 index 1080bcb..a3b39e3 100644
35433 --- a/drivers/scsi/bfa/bfa_fcpim.h
35434 +++ b/drivers/scsi/bfa/bfa_fcpim.h
35435 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
35436 struct bfa_itn_s {
35437 bfa_isr_func_t isr;
35438 };
35439 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
35440
35441 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
35442 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
35443 @@ -149,7 +150,7 @@ struct bfa_fcp_mod_s {
35444 struct list_head iotag_tio_free_q; /* free IO resources */
35445 struct list_head iotag_unused_q; /* unused IO resources*/
35446 struct bfa_iotag_s *iotag_arr;
35447 - struct bfa_itn_s *itn_arr;
35448 + bfa_itn_s_no_const *itn_arr;
35449 int num_ioim_reqs;
35450 int num_fwtio_reqs;
35451 int num_itns;
35452 diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
35453 index d4f951f..197c350 100644
35454 --- a/drivers/scsi/bfa/bfa_fcs_lport.c
35455 +++ b/drivers/scsi/bfa/bfa_fcs_lport.c
35456 @@ -1700,6 +1700,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
35457 u16 len, count;
35458 u16 templen;
35459
35460 + pax_track_stack();
35461 +
35462 /*
35463 * get hba attributes
35464 */
35465 @@ -1977,6 +1979,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
35466 u8 count = 0;
35467 u16 templen;
35468
35469 + pax_track_stack();
35470 +
35471 /*
35472 * get port attributes
35473 */
35474 diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
35475 index 52628d5..f89d033 100644
35476 --- a/drivers/scsi/bfa/bfa_fcs_rport.c
35477 +++ b/drivers/scsi/bfa/bfa_fcs_rport.c
35478 @@ -1871,6 +1871,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport,
35479 struct fc_rpsc_speed_info_s speeds;
35480 struct bfa_port_attr_s pport_attr;
35481
35482 + pax_track_stack();
35483 +
35484 bfa_trc(port->fcs, rx_fchs->s_id);
35485 bfa_trc(port->fcs, rx_fchs->d_id);
35486
35487 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
35488 index 546d46b..642fa5b 100644
35489 --- a/drivers/scsi/bfa/bfa_ioc.h
35490 +++ b/drivers/scsi/bfa/bfa_ioc.h
35491 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
35492 bfa_ioc_disable_cbfn_t disable_cbfn;
35493 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
35494 bfa_ioc_reset_cbfn_t reset_cbfn;
35495 -};
35496 +} __no_const;
35497
35498 /*
35499 * IOC event notification mechanism.
35500 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
35501 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
35502 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
35503 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
35504 -};
35505 +} __no_const;
35506
35507 /*
35508 * Queue element to wait for room in request queue. FIFO order is
35509 diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
35510 index 66fb725..0fe05ab 100644
35511 --- a/drivers/scsi/bfa/bfad.c
35512 +++ b/drivers/scsi/bfa/bfad.c
35513 @@ -1019,6 +1019,8 @@ bfad_start_ops(struct bfad_s *bfad) {
35514 struct bfad_vport_s *vport, *vport_new;
35515 struct bfa_fcs_driver_info_s driver_info;
35516
35517 + pax_track_stack();
35518 +
35519 /* Limit min/max. xfer size to [64k-32MB] */
35520 if (max_xfer_size < BFAD_MIN_SECTORS >> 1)
35521 max_xfer_size = BFAD_MIN_SECTORS >> 1;
35522 diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
35523 index b4f6c9a..0eb1938 100644
35524 --- a/drivers/scsi/dpt_i2o.c
35525 +++ b/drivers/scsi/dpt_i2o.c
35526 @@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
35527 dma_addr_t addr;
35528 ulong flags = 0;
35529
35530 + pax_track_stack();
35531 +
35532 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
35533 // get user msg size in u32s
35534 if(get_user(size, &user_msg[0])){
35535 @@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
35536 s32 rcode;
35537 dma_addr_t addr;
35538
35539 + pax_track_stack();
35540 +
35541 memset(msg, 0 , sizeof(msg));
35542 len = scsi_bufflen(cmd);
35543 direction = 0x00000000;
35544 diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
35545 index 94de889..ca4f0cf 100644
35546 --- a/drivers/scsi/eata.c
35547 +++ b/drivers/scsi/eata.c
35548 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
35549 struct hostdata *ha;
35550 char name[16];
35551
35552 + pax_track_stack();
35553 +
35554 sprintf(name, "%s%d", driver_name, j);
35555
35556 if (!request_region(port_base, REGION_SIZE, driver_name)) {
35557 diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
35558 index c74c4b8..c41ca3f 100644
35559 --- a/drivers/scsi/fcoe/fcoe_ctlr.c
35560 +++ b/drivers/scsi/fcoe/fcoe_ctlr.c
35561 @@ -2503,6 +2503,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
35562 } buf;
35563 int rc;
35564
35565 + pax_track_stack();
35566 +
35567 fiph = (struct fip_header *)skb->data;
35568 sub = fiph->fip_subcode;
35569
35570 diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
35571 index 3242bca..45a83e7 100644
35572 --- a/drivers/scsi/gdth.c
35573 +++ b/drivers/scsi/gdth.c
35574 @@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
35575 unsigned long flags;
35576 gdth_ha_str *ha;
35577
35578 + pax_track_stack();
35579 +
35580 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
35581 return -EFAULT;
35582 ha = gdth_find_ha(ldrv.ionode);
35583 @@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
35584 gdth_ha_str *ha;
35585 int rval;
35586
35587 + pax_track_stack();
35588 +
35589 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
35590 res.number >= MAX_HDRIVES)
35591 return -EFAULT;
35592 @@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg, char *cmnd)
35593 gdth_ha_str *ha;
35594 int rval;
35595
35596 + pax_track_stack();
35597 +
35598 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
35599 return -EFAULT;
35600 ha = gdth_find_ha(gen.ionode);
35601 @@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
35602 int i;
35603 gdth_cmd_str gdtcmd;
35604 char cmnd[MAX_COMMAND_SIZE];
35605 +
35606 + pax_track_stack();
35607 +
35608 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
35609
35610 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
35611 diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
35612 index 6527543..81e4fe2 100644
35613 --- a/drivers/scsi/gdth_proc.c
35614 +++ b/drivers/scsi/gdth_proc.c
35615 @@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
35616 u64 paddr;
35617
35618 char cmnd[MAX_COMMAND_SIZE];
35619 +
35620 + pax_track_stack();
35621 +
35622 memset(cmnd, 0xff, 12);
35623 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
35624
35625 @@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
35626 gdth_hget_str *phg;
35627 char cmnd[MAX_COMMAND_SIZE];
35628
35629 + pax_track_stack();
35630 +
35631 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
35632 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
35633 if (!gdtcmd || !estr)
35634 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
35635 index 351dc0b..951dc32 100644
35636 --- a/drivers/scsi/hosts.c
35637 +++ b/drivers/scsi/hosts.c
35638 @@ -42,7 +42,7 @@
35639 #include "scsi_logging.h"
35640
35641
35642 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
35643 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
35644
35645
35646 static void scsi_host_cls_release(struct device *dev)
35647 @@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
35648 * subtract one because we increment first then return, but we need to
35649 * know what the next host number was before increment
35650 */
35651 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
35652 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
35653 shost->dma_channel = 0xff;
35654
35655 /* These three are default values which can be overridden */
35656 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
35657 index 418ce83..7ee1225 100644
35658 --- a/drivers/scsi/hpsa.c
35659 +++ b/drivers/scsi/hpsa.c
35660 @@ -499,7 +499,7 @@ static inline u32 next_command(struct ctlr_info *h)
35661 u32 a;
35662
35663 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
35664 - return h->access.command_completed(h);
35665 + return h->access->command_completed(h);
35666
35667 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
35668 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
35669 @@ -2956,7 +2956,7 @@ static void start_io(struct ctlr_info *h)
35670 while (!list_empty(&h->reqQ)) {
35671 c = list_entry(h->reqQ.next, struct CommandList, list);
35672 /* can't do anything if fifo is full */
35673 - if ((h->access.fifo_full(h))) {
35674 + if ((h->access->fifo_full(h))) {
35675 dev_warn(&h->pdev->dev, "fifo full\n");
35676 break;
35677 }
35678 @@ -2966,7 +2966,7 @@ static void start_io(struct ctlr_info *h)
35679 h->Qdepth--;
35680
35681 /* Tell the controller execute command */
35682 - h->access.submit_command(h, c);
35683 + h->access->submit_command(h, c);
35684
35685 /* Put job onto the completed Q */
35686 addQ(&h->cmpQ, c);
35687 @@ -2975,17 +2975,17 @@ static void start_io(struct ctlr_info *h)
35688
35689 static inline unsigned long get_next_completion(struct ctlr_info *h)
35690 {
35691 - return h->access.command_completed(h);
35692 + return h->access->command_completed(h);
35693 }
35694
35695 static inline bool interrupt_pending(struct ctlr_info *h)
35696 {
35697 - return h->access.intr_pending(h);
35698 + return h->access->intr_pending(h);
35699 }
35700
35701 static inline long interrupt_not_for_us(struct ctlr_info *h)
35702 {
35703 - return (h->access.intr_pending(h) == 0) ||
35704 + return (h->access->intr_pending(h) == 0) ||
35705 (h->interrupts_enabled == 0);
35706 }
35707
35708 @@ -3882,7 +3882,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
35709 if (prod_index < 0)
35710 return -ENODEV;
35711 h->product_name = products[prod_index].product_name;
35712 - h->access = *(products[prod_index].access);
35713 + h->access = products[prod_index].access;
35714
35715 if (hpsa_board_disabled(h->pdev)) {
35716 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
35717 @@ -4163,7 +4163,7 @@ reinit_after_soft_reset:
35718 }
35719
35720 /* make sure the board interrupts are off */
35721 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35722 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35723
35724 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
35725 goto clean2;
35726 @@ -4197,7 +4197,7 @@ reinit_after_soft_reset:
35727 * fake ones to scoop up any residual completions.
35728 */
35729 spin_lock_irqsave(&h->lock, flags);
35730 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35731 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35732 spin_unlock_irqrestore(&h->lock, flags);
35733 free_irq(h->intr[h->intr_mode], h);
35734 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
35735 @@ -4216,9 +4216,9 @@ reinit_after_soft_reset:
35736 dev_info(&h->pdev->dev, "Board READY.\n");
35737 dev_info(&h->pdev->dev,
35738 "Waiting for stale completions to drain.\n");
35739 - h->access.set_intr_mask(h, HPSA_INTR_ON);
35740 + h->access->set_intr_mask(h, HPSA_INTR_ON);
35741 msleep(10000);
35742 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35743 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35744
35745 rc = controller_reset_failed(h->cfgtable);
35746 if (rc)
35747 @@ -4239,7 +4239,7 @@ reinit_after_soft_reset:
35748 }
35749
35750 /* Turn the interrupts on so we can service requests */
35751 - h->access.set_intr_mask(h, HPSA_INTR_ON);
35752 + h->access->set_intr_mask(h, HPSA_INTR_ON);
35753
35754 hpsa_hba_inquiry(h);
35755 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
35756 @@ -4292,7 +4292,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
35757 * To write all data in the battery backed cache to disks
35758 */
35759 hpsa_flush_cache(h);
35760 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
35761 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
35762 free_irq(h->intr[h->intr_mode], h);
35763 #ifdef CONFIG_PCI_MSI
35764 if (h->msix_vector)
35765 @@ -4455,7 +4455,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
35766 return;
35767 }
35768 /* Change the access methods to the performant access methods */
35769 - h->access = SA5_performant_access;
35770 + h->access = &SA5_performant_access;
35771 h->transMethod = CFGTBL_Trans_Performant;
35772 }
35773
35774 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
35775 index 7f53cea..a8c7188 100644
35776 --- a/drivers/scsi/hpsa.h
35777 +++ b/drivers/scsi/hpsa.h
35778 @@ -73,7 +73,7 @@ struct ctlr_info {
35779 unsigned int msix_vector;
35780 unsigned int msi_vector;
35781 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
35782 - struct access_method access;
35783 + struct access_method *access;
35784
35785 /* queue and queue Info */
35786 struct list_head reqQ;
35787 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
35788 index f2df059..a3a9930 100644
35789 --- a/drivers/scsi/ips.h
35790 +++ b/drivers/scsi/ips.h
35791 @@ -1027,7 +1027,7 @@ typedef struct {
35792 int (*intr)(struct ips_ha *);
35793 void (*enableint)(struct ips_ha *);
35794 uint32_t (*statupd)(struct ips_ha *);
35795 -} ips_hw_func_t;
35796 +} __no_const ips_hw_func_t;
35797
35798 typedef struct ips_ha {
35799 uint8_t ha_id[IPS_MAX_CHANNELS+1];
35800 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
35801 index d261e98..1e00f35 100644
35802 --- a/drivers/scsi/libfc/fc_exch.c
35803 +++ b/drivers/scsi/libfc/fc_exch.c
35804 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
35805 * all together if not used XXX
35806 */
35807 struct {
35808 - atomic_t no_free_exch;
35809 - atomic_t no_free_exch_xid;
35810 - atomic_t xid_not_found;
35811 - atomic_t xid_busy;
35812 - atomic_t seq_not_found;
35813 - atomic_t non_bls_resp;
35814 + atomic_unchecked_t no_free_exch;
35815 + atomic_unchecked_t no_free_exch_xid;
35816 + atomic_unchecked_t xid_not_found;
35817 + atomic_unchecked_t xid_busy;
35818 + atomic_unchecked_t seq_not_found;
35819 + atomic_unchecked_t non_bls_resp;
35820 } stats;
35821 };
35822
35823 @@ -718,7 +718,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
35824 /* allocate memory for exchange */
35825 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
35826 if (!ep) {
35827 - atomic_inc(&mp->stats.no_free_exch);
35828 + atomic_inc_unchecked(&mp->stats.no_free_exch);
35829 goto out;
35830 }
35831 memset(ep, 0, sizeof(*ep));
35832 @@ -779,7 +779,7 @@ out:
35833 return ep;
35834 err:
35835 spin_unlock_bh(&pool->lock);
35836 - atomic_inc(&mp->stats.no_free_exch_xid);
35837 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
35838 mempool_free(ep, mp->ep_pool);
35839 return NULL;
35840 }
35841 @@ -922,7 +922,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35842 xid = ntohs(fh->fh_ox_id); /* we originated exch */
35843 ep = fc_exch_find(mp, xid);
35844 if (!ep) {
35845 - atomic_inc(&mp->stats.xid_not_found);
35846 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35847 reject = FC_RJT_OX_ID;
35848 goto out;
35849 }
35850 @@ -952,7 +952,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35851 ep = fc_exch_find(mp, xid);
35852 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
35853 if (ep) {
35854 - atomic_inc(&mp->stats.xid_busy);
35855 + atomic_inc_unchecked(&mp->stats.xid_busy);
35856 reject = FC_RJT_RX_ID;
35857 goto rel;
35858 }
35859 @@ -963,7 +963,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35860 }
35861 xid = ep->xid; /* get our XID */
35862 } else if (!ep) {
35863 - atomic_inc(&mp->stats.xid_not_found);
35864 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35865 reject = FC_RJT_RX_ID; /* XID not found */
35866 goto out;
35867 }
35868 @@ -980,7 +980,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
35869 } else {
35870 sp = &ep->seq;
35871 if (sp->id != fh->fh_seq_id) {
35872 - atomic_inc(&mp->stats.seq_not_found);
35873 + atomic_inc_unchecked(&mp->stats.seq_not_found);
35874 if (f_ctl & FC_FC_END_SEQ) {
35875 /*
35876 * Update sequence_id based on incoming last
35877 @@ -1430,22 +1430,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
35878
35879 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
35880 if (!ep) {
35881 - atomic_inc(&mp->stats.xid_not_found);
35882 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35883 goto out;
35884 }
35885 if (ep->esb_stat & ESB_ST_COMPLETE) {
35886 - atomic_inc(&mp->stats.xid_not_found);
35887 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35888 goto rel;
35889 }
35890 if (ep->rxid == FC_XID_UNKNOWN)
35891 ep->rxid = ntohs(fh->fh_rx_id);
35892 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
35893 - atomic_inc(&mp->stats.xid_not_found);
35894 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35895 goto rel;
35896 }
35897 if (ep->did != ntoh24(fh->fh_s_id) &&
35898 ep->did != FC_FID_FLOGI) {
35899 - atomic_inc(&mp->stats.xid_not_found);
35900 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35901 goto rel;
35902 }
35903 sof = fr_sof(fp);
35904 @@ -1454,7 +1454,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
35905 sp->ssb_stat |= SSB_ST_RESP;
35906 sp->id = fh->fh_seq_id;
35907 } else if (sp->id != fh->fh_seq_id) {
35908 - atomic_inc(&mp->stats.seq_not_found);
35909 + atomic_inc_unchecked(&mp->stats.seq_not_found);
35910 goto rel;
35911 }
35912
35913 @@ -1518,9 +1518,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
35914 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
35915
35916 if (!sp)
35917 - atomic_inc(&mp->stats.xid_not_found);
35918 + atomic_inc_unchecked(&mp->stats.xid_not_found);
35919 else
35920 - atomic_inc(&mp->stats.non_bls_resp);
35921 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
35922
35923 fc_frame_free(fp);
35924 }
35925 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
35926 index db9238f..4378ed2 100644
35927 --- a/drivers/scsi/libsas/sas_ata.c
35928 +++ b/drivers/scsi/libsas/sas_ata.c
35929 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
35930 .postreset = ata_std_postreset,
35931 .error_handler = ata_std_error_handler,
35932 .post_internal_cmd = sas_ata_post_internal,
35933 - .qc_defer = ata_std_qc_defer,
35934 + .qc_defer = ata_std_qc_defer,
35935 .qc_prep = ata_noop_qc_prep,
35936 .qc_issue = sas_ata_qc_issue,
35937 .qc_fill_rtf = sas_ata_qc_fill_rtf,
35938 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
35939 index c088a36..01c73b0 100644
35940 --- a/drivers/scsi/lpfc/lpfc.h
35941 +++ b/drivers/scsi/lpfc/lpfc.h
35942 @@ -425,7 +425,7 @@ struct lpfc_vport {
35943 struct dentry *debug_nodelist;
35944 struct dentry *vport_debugfs_root;
35945 struct lpfc_debugfs_trc *disc_trc;
35946 - atomic_t disc_trc_cnt;
35947 + atomic_unchecked_t disc_trc_cnt;
35948 #endif
35949 uint8_t stat_data_enabled;
35950 uint8_t stat_data_blocked;
35951 @@ -835,8 +835,8 @@ struct lpfc_hba {
35952 struct timer_list fabric_block_timer;
35953 unsigned long bit_flags;
35954 #define FABRIC_COMANDS_BLOCKED 0
35955 - atomic_t num_rsrc_err;
35956 - atomic_t num_cmd_success;
35957 + atomic_unchecked_t num_rsrc_err;
35958 + atomic_unchecked_t num_cmd_success;
35959 unsigned long last_rsrc_error_time;
35960 unsigned long last_ramp_down_time;
35961 unsigned long last_ramp_up_time;
35962 @@ -850,7 +850,7 @@ struct lpfc_hba {
35963 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
35964 struct dentry *debug_slow_ring_trc;
35965 struct lpfc_debugfs_trc *slow_ring_trc;
35966 - atomic_t slow_ring_trc_cnt;
35967 + atomic_unchecked_t slow_ring_trc_cnt;
35968 /* iDiag debugfs sub-directory */
35969 struct dentry *idiag_root;
35970 struct dentry *idiag_pci_cfg;
35971 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
35972 index a0424dd..2499b6b 100644
35973 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
35974 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
35975 @@ -105,7 +105,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
35976
35977 #include <linux/debugfs.h>
35978
35979 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
35980 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
35981 static unsigned long lpfc_debugfs_start_time = 0L;
35982
35983 /* iDiag */
35984 @@ -146,7 +146,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
35985 lpfc_debugfs_enable = 0;
35986
35987 len = 0;
35988 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
35989 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
35990 (lpfc_debugfs_max_disc_trc - 1);
35991 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
35992 dtp = vport->disc_trc + i;
35993 @@ -212,7 +212,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
35994 lpfc_debugfs_enable = 0;
35995
35996 len = 0;
35997 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
35998 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
35999 (lpfc_debugfs_max_slow_ring_trc - 1);
36000 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36001 dtp = phba->slow_ring_trc + i;
36002 @@ -635,14 +635,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
36003 !vport || !vport->disc_trc)
36004 return;
36005
36006 - index = atomic_inc_return(&vport->disc_trc_cnt) &
36007 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36008 (lpfc_debugfs_max_disc_trc - 1);
36009 dtp = vport->disc_trc + index;
36010 dtp->fmt = fmt;
36011 dtp->data1 = data1;
36012 dtp->data2 = data2;
36013 dtp->data3 = data3;
36014 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36015 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36016 dtp->jif = jiffies;
36017 #endif
36018 return;
36019 @@ -673,14 +673,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
36020 !phba || !phba->slow_ring_trc)
36021 return;
36022
36023 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36024 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36025 (lpfc_debugfs_max_slow_ring_trc - 1);
36026 dtp = phba->slow_ring_trc + index;
36027 dtp->fmt = fmt;
36028 dtp->data1 = data1;
36029 dtp->data2 = data2;
36030 dtp->data3 = data3;
36031 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36032 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36033 dtp->jif = jiffies;
36034 #endif
36035 return;
36036 @@ -3828,7 +3828,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36037 "slow_ring buffer\n");
36038 goto debug_failed;
36039 }
36040 - atomic_set(&phba->slow_ring_trc_cnt, 0);
36041 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36042 memset(phba->slow_ring_trc, 0,
36043 (sizeof(struct lpfc_debugfs_trc) *
36044 lpfc_debugfs_max_slow_ring_trc));
36045 @@ -3874,7 +3874,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36046 "buffer\n");
36047 goto debug_failed;
36048 }
36049 - atomic_set(&vport->disc_trc_cnt, 0);
36050 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36051
36052 snprintf(name, sizeof(name), "discovery_trace");
36053 vport->debug_disc_trc =
36054 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
36055 index a3c8200..31e562e 100644
36056 --- a/drivers/scsi/lpfc/lpfc_init.c
36057 +++ b/drivers/scsi/lpfc/lpfc_init.c
36058 @@ -9969,8 +9969,10 @@ lpfc_init(void)
36059 printk(LPFC_COPYRIGHT "\n");
36060
36061 if (lpfc_enable_npiv) {
36062 - lpfc_transport_functions.vport_create = lpfc_vport_create;
36063 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36064 + pax_open_kernel();
36065 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36066 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36067 + pax_close_kernel();
36068 }
36069 lpfc_transport_template =
36070 fc_attach_transport(&lpfc_transport_functions);
36071 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
36072 index eadd241..26c8e0f 100644
36073 --- a/drivers/scsi/lpfc/lpfc_scsi.c
36074 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
36075 @@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
36076 uint32_t evt_posted;
36077
36078 spin_lock_irqsave(&phba->hbalock, flags);
36079 - atomic_inc(&phba->num_rsrc_err);
36080 + atomic_inc_unchecked(&phba->num_rsrc_err);
36081 phba->last_rsrc_error_time = jiffies;
36082
36083 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36084 @@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
36085 unsigned long flags;
36086 struct lpfc_hba *phba = vport->phba;
36087 uint32_t evt_posted;
36088 - atomic_inc(&phba->num_cmd_success);
36089 + atomic_inc_unchecked(&phba->num_cmd_success);
36090
36091 if (vport->cfg_lun_queue_depth <= queue_depth)
36092 return;
36093 @@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36094 unsigned long num_rsrc_err, num_cmd_success;
36095 int i;
36096
36097 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36098 - num_cmd_success = atomic_read(&phba->num_cmd_success);
36099 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36100 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36101
36102 vports = lpfc_create_vport_work_array(phba);
36103 if (vports != NULL)
36104 @@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36105 }
36106 }
36107 lpfc_destroy_vport_work_array(phba, vports);
36108 - atomic_set(&phba->num_rsrc_err, 0);
36109 - atomic_set(&phba->num_cmd_success, 0);
36110 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36111 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36112 }
36113
36114 /**
36115 @@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
36116 }
36117 }
36118 lpfc_destroy_vport_work_array(phba, vports);
36119 - atomic_set(&phba->num_rsrc_err, 0);
36120 - atomic_set(&phba->num_cmd_success, 0);
36121 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36122 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36123 }
36124
36125 /**
36126 diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
36127 index 2e6619e..fa64494 100644
36128 --- a/drivers/scsi/megaraid/megaraid_mbox.c
36129 +++ b/drivers/scsi/megaraid/megaraid_mbox.c
36130 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
36131 int rval;
36132 int i;
36133
36134 + pax_track_stack();
36135 +
36136 // Allocate memory for the base list of scb for management module.
36137 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
36138
36139 diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
36140 index 86afb13f..c912398 100644
36141 --- a/drivers/scsi/osd/osd_initiator.c
36142 +++ b/drivers/scsi/osd/osd_initiator.c
36143 @@ -97,6 +97,8 @@ static int _osd_get_print_system_info(struct osd_dev *od,
36144 int nelem = ARRAY_SIZE(get_attrs), a = 0;
36145 int ret;
36146
36147 + pax_track_stack();
36148 +
36149 or = osd_start_request(od, GFP_KERNEL);
36150 if (!or)
36151 return -ENOMEM;
36152 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
36153 index d079f9a..d26072c 100644
36154 --- a/drivers/scsi/pmcraid.c
36155 +++ b/drivers/scsi/pmcraid.c
36156 @@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
36157 res->scsi_dev = scsi_dev;
36158 scsi_dev->hostdata = res;
36159 res->change_detected = 0;
36160 - atomic_set(&res->read_failures, 0);
36161 - atomic_set(&res->write_failures, 0);
36162 + atomic_set_unchecked(&res->read_failures, 0);
36163 + atomic_set_unchecked(&res->write_failures, 0);
36164 rc = 0;
36165 }
36166 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36167 @@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
36168
36169 /* If this was a SCSI read/write command keep count of errors */
36170 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36171 - atomic_inc(&res->read_failures);
36172 + atomic_inc_unchecked(&res->read_failures);
36173 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36174 - atomic_inc(&res->write_failures);
36175 + atomic_inc_unchecked(&res->write_failures);
36176
36177 if (!RES_IS_GSCSI(res->cfg_entry) &&
36178 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36179 @@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
36180 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36181 * hrrq_id assigned here in queuecommand
36182 */
36183 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36184 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36185 pinstance->num_hrrq;
36186 cmd->cmd_done = pmcraid_io_done;
36187
36188 @@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
36189 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36190 * hrrq_id assigned here in queuecommand
36191 */
36192 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36193 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36194 pinstance->num_hrrq;
36195
36196 if (request_size) {
36197 @@ -4498,7 +4498,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
36198
36199 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36200 /* add resources only after host is added into system */
36201 - if (!atomic_read(&pinstance->expose_resources))
36202 + if (!atomic_read_unchecked(&pinstance->expose_resources))
36203 return;
36204
36205 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
36206 @@ -5332,8 +5332,8 @@ static int __devinit pmcraid_init_instance(
36207 init_waitqueue_head(&pinstance->reset_wait_q);
36208
36209 atomic_set(&pinstance->outstanding_cmds, 0);
36210 - atomic_set(&pinstance->last_message_id, 0);
36211 - atomic_set(&pinstance->expose_resources, 0);
36212 + atomic_set_unchecked(&pinstance->last_message_id, 0);
36213 + atomic_set_unchecked(&pinstance->expose_resources, 0);
36214
36215 INIT_LIST_HEAD(&pinstance->free_res_q);
36216 INIT_LIST_HEAD(&pinstance->used_res_q);
36217 @@ -6048,7 +6048,7 @@ static int __devinit pmcraid_probe(
36218 /* Schedule worker thread to handle CCN and take care of adding and
36219 * removing devices to OS
36220 */
36221 - atomic_set(&pinstance->expose_resources, 1);
36222 + atomic_set_unchecked(&pinstance->expose_resources, 1);
36223 schedule_work(&pinstance->worker_q);
36224 return rc;
36225
36226 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
36227 index f920baf..4417389 100644
36228 --- a/drivers/scsi/pmcraid.h
36229 +++ b/drivers/scsi/pmcraid.h
36230 @@ -749,7 +749,7 @@ struct pmcraid_instance {
36231 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
36232
36233 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
36234 - atomic_t last_message_id;
36235 + atomic_unchecked_t last_message_id;
36236
36237 /* configuration table */
36238 struct pmcraid_config_table *cfg_table;
36239 @@ -778,7 +778,7 @@ struct pmcraid_instance {
36240 atomic_t outstanding_cmds;
36241
36242 /* should add/delete resources to mid-layer now ?*/
36243 - atomic_t expose_resources;
36244 + atomic_unchecked_t expose_resources;
36245
36246
36247
36248 @@ -814,8 +814,8 @@ struct pmcraid_resource_entry {
36249 struct pmcraid_config_table_entry_ext cfg_entry_ext;
36250 };
36251 struct scsi_device *scsi_dev; /* Link scsi_device structure */
36252 - atomic_t read_failures; /* count of failed READ commands */
36253 - atomic_t write_failures; /* count of failed WRITE commands */
36254 + atomic_unchecked_t read_failures; /* count of failed READ commands */
36255 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
36256
36257 /* To indicate add/delete/modify during CCN */
36258 u8 change_detected;
36259 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
36260 index a03eaf4..a6b3fd9 100644
36261 --- a/drivers/scsi/qla2xxx/qla_def.h
36262 +++ b/drivers/scsi/qla2xxx/qla_def.h
36263 @@ -2244,7 +2244,7 @@ struct isp_operations {
36264 int (*get_flash_version) (struct scsi_qla_host *, void *);
36265 int (*start_scsi) (srb_t *);
36266 int (*abort_isp) (struct scsi_qla_host *);
36267 -};
36268 +} __no_const;
36269
36270 /* MSI-X Support *************************************************************/
36271
36272 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
36273 index 473c5c8..4e2f24a 100644
36274 --- a/drivers/scsi/qla4xxx/ql4_def.h
36275 +++ b/drivers/scsi/qla4xxx/ql4_def.h
36276 @@ -256,7 +256,7 @@ struct ddb_entry {
36277 atomic_t retry_relogin_timer; /* Min Time between relogins
36278 * (4000 only) */
36279 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
36280 - atomic_t relogin_retry_count; /* Num of times relogin has been
36281 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
36282 * retried */
36283
36284 uint16_t port;
36285 diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
36286 index 42ed5db..0262f9e 100644
36287 --- a/drivers/scsi/qla4xxx/ql4_init.c
36288 +++ b/drivers/scsi/qla4xxx/ql4_init.c
36289 @@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
36290 ddb_entry->fw_ddb_index = fw_ddb_index;
36291 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
36292 atomic_set(&ddb_entry->relogin_timer, 0);
36293 - atomic_set(&ddb_entry->relogin_retry_count, 0);
36294 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36295 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
36296 list_add_tail(&ddb_entry->list, &ha->ddb_list);
36297 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
36298 @@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
36299 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
36300 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
36301 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
36302 - atomic_set(&ddb_entry->relogin_retry_count, 0);
36303 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
36304 atomic_set(&ddb_entry->relogin_timer, 0);
36305 clear_bit(DF_RELOGIN, &ddb_entry->flags);
36306 iscsi_unblock_session(ddb_entry->sess);
36307 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
36308 index f2364ec..44c42b1 100644
36309 --- a/drivers/scsi/qla4xxx/ql4_os.c
36310 +++ b/drivers/scsi/qla4xxx/ql4_os.c
36311 @@ -811,13 +811,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
36312 ddb_entry->fw_ddb_device_state ==
36313 DDB_DS_SESSION_FAILED) {
36314 /* Reset retry relogin timer */
36315 - atomic_inc(&ddb_entry->relogin_retry_count);
36316 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
36317 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
36318 " timed out-retrying"
36319 " relogin (%d)\n",
36320 ha->host_no,
36321 ddb_entry->fw_ddb_index,
36322 - atomic_read(&ddb_entry->
36323 + atomic_read_unchecked(&ddb_entry->
36324 relogin_retry_count))
36325 );
36326 start_dpc++;
36327 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
36328 index 2aeb2e9..46e3925 100644
36329 --- a/drivers/scsi/scsi.c
36330 +++ b/drivers/scsi/scsi.c
36331 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
36332 unsigned long timeout;
36333 int rtn = 0;
36334
36335 - atomic_inc(&cmd->device->iorequest_cnt);
36336 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36337
36338 /* check if the device is still usable */
36339 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
36340 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
36341 index 6888b2c..45befa1 100644
36342 --- a/drivers/scsi/scsi_debug.c
36343 +++ b/drivers/scsi/scsi_debug.c
36344 @@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
36345 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
36346 unsigned char *cmd = (unsigned char *)scp->cmnd;
36347
36348 + pax_track_stack();
36349 +
36350 if ((errsts = check_readiness(scp, 1, devip)))
36351 return errsts;
36352 memset(arr, 0, sizeof(arr));
36353 @@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
36354 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
36355 unsigned char *cmd = (unsigned char *)scp->cmnd;
36356
36357 + pax_track_stack();
36358 +
36359 if ((errsts = check_readiness(scp, 1, devip)))
36360 return errsts;
36361 memset(arr, 0, sizeof(arr));
36362 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
36363 index b4d43ae..26edd69 100644
36364 --- a/drivers/scsi/scsi_lib.c
36365 +++ b/drivers/scsi/scsi_lib.c
36366 @@ -1413,7 +1413,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
36367 shost = sdev->host;
36368 scsi_init_cmd_errh(cmd);
36369 cmd->result = DID_NO_CONNECT << 16;
36370 - atomic_inc(&cmd->device->iorequest_cnt);
36371 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
36372
36373 /*
36374 * SCSI request completion path will do scsi_device_unbusy(),
36375 @@ -1439,9 +1439,9 @@ static void scsi_softirq_done(struct request *rq)
36376
36377 INIT_LIST_HEAD(&cmd->eh_entry);
36378
36379 - atomic_inc(&cmd->device->iodone_cnt);
36380 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
36381 if (cmd->result)
36382 - atomic_inc(&cmd->device->ioerr_cnt);
36383 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
36384
36385 disposition = scsi_decide_disposition(cmd);
36386 if (disposition != SUCCESS &&
36387 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
36388 index e0bd3f7..816b8a6 100644
36389 --- a/drivers/scsi/scsi_sysfs.c
36390 +++ b/drivers/scsi/scsi_sysfs.c
36391 @@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
36392 char *buf) \
36393 { \
36394 struct scsi_device *sdev = to_scsi_device(dev); \
36395 - unsigned long long count = atomic_read(&sdev->field); \
36396 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
36397 return snprintf(buf, 20, "0x%llx\n", count); \
36398 } \
36399 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
36400 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
36401 index 84a1fdf..693b0d6 100644
36402 --- a/drivers/scsi/scsi_tgt_lib.c
36403 +++ b/drivers/scsi/scsi_tgt_lib.c
36404 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
36405 int err;
36406
36407 dprintk("%lx %u\n", uaddr, len);
36408 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
36409 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
36410 if (err) {
36411 /*
36412 * TODO: need to fixup sg_tablesize, max_segment_size,
36413 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
36414 index 1b21491..1b7f60e 100644
36415 --- a/drivers/scsi/scsi_transport_fc.c
36416 +++ b/drivers/scsi/scsi_transport_fc.c
36417 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
36418 * Netlink Infrastructure
36419 */
36420
36421 -static atomic_t fc_event_seq;
36422 +static atomic_unchecked_t fc_event_seq;
36423
36424 /**
36425 * fc_get_event_number - Obtain the next sequential FC event number
36426 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
36427 u32
36428 fc_get_event_number(void)
36429 {
36430 - return atomic_add_return(1, &fc_event_seq);
36431 + return atomic_add_return_unchecked(1, &fc_event_seq);
36432 }
36433 EXPORT_SYMBOL(fc_get_event_number);
36434
36435 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
36436 {
36437 int error;
36438
36439 - atomic_set(&fc_event_seq, 0);
36440 + atomic_set_unchecked(&fc_event_seq, 0);
36441
36442 error = transport_class_register(&fc_host_class);
36443 if (error)
36444 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
36445 char *cp;
36446
36447 *val = simple_strtoul(buf, &cp, 0);
36448 - if ((*cp && (*cp != '\n')) || (*val < 0))
36449 + if (*cp && (*cp != '\n'))
36450 return -EINVAL;
36451 /*
36452 * Check for overflow; dev_loss_tmo is u32
36453 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
36454 index 3fd16d7..ba0871f 100644
36455 --- a/drivers/scsi/scsi_transport_iscsi.c
36456 +++ b/drivers/scsi/scsi_transport_iscsi.c
36457 @@ -83,7 +83,7 @@ struct iscsi_internal {
36458 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
36459 };
36460
36461 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
36462 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
36463 static struct workqueue_struct *iscsi_eh_timer_workq;
36464
36465 /*
36466 @@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
36467 int err;
36468
36469 ihost = shost->shost_data;
36470 - session->sid = atomic_add_return(1, &iscsi_session_nr);
36471 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
36472
36473 if (id == ISCSI_MAX_TARGET) {
36474 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
36475 @@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(void)
36476 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
36477 ISCSI_TRANSPORT_VERSION);
36478
36479 - atomic_set(&iscsi_session_nr, 0);
36480 + atomic_set_unchecked(&iscsi_session_nr, 0);
36481
36482 err = class_register(&iscsi_transport_class);
36483 if (err)
36484 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
36485 index 21a045e..ec89e03 100644
36486 --- a/drivers/scsi/scsi_transport_srp.c
36487 +++ b/drivers/scsi/scsi_transport_srp.c
36488 @@ -33,7 +33,7 @@
36489 #include "scsi_transport_srp_internal.h"
36490
36491 struct srp_host_attrs {
36492 - atomic_t next_port_id;
36493 + atomic_unchecked_t next_port_id;
36494 };
36495 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
36496
36497 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
36498 struct Scsi_Host *shost = dev_to_shost(dev);
36499 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
36500
36501 - atomic_set(&srp_host->next_port_id, 0);
36502 + atomic_set_unchecked(&srp_host->next_port_id, 0);
36503 return 0;
36504 }
36505
36506 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
36507 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
36508 rport->roles = ids->roles;
36509
36510 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
36511 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
36512 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
36513
36514 transport_setup_device(&rport->dev);
36515 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
36516 index 909ed9e..1ae290a 100644
36517 --- a/drivers/scsi/sg.c
36518 +++ b/drivers/scsi/sg.c
36519 @@ -1075,7 +1075,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
36520 sdp->disk->disk_name,
36521 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
36522 NULL,
36523 - (char *)arg);
36524 + (char __user *)arg);
36525 case BLKTRACESTART:
36526 return blk_trace_startstop(sdp->device->request_queue, 1);
36527 case BLKTRACESTOP:
36528 @@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
36529 const struct file_operations * fops;
36530 };
36531
36532 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
36533 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
36534 {"allow_dio", &adio_fops},
36535 {"debug", &debug_fops},
36536 {"def_reserved_size", &dressz_fops},
36537 @@ -2325,7 +2325,7 @@ sg_proc_init(void)
36538 {
36539 int k, mask;
36540 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
36541 - struct sg_proc_leaf * leaf;
36542 + const struct sg_proc_leaf * leaf;
36543
36544 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
36545 if (!sg_proc_sgp)
36546 diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
36547 index b4543f5..e1b34b8 100644
36548 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c
36549 +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
36550 @@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
36551 int do_iounmap = 0;
36552 int do_disable_device = 1;
36553
36554 + pax_track_stack();
36555 +
36556 memset(&sym_dev, 0, sizeof(sym_dev));
36557 memset(&nvram, 0, sizeof(nvram));
36558 sym_dev.pdev = pdev;
36559 diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
36560 index a18996d..fe993cb 100644
36561 --- a/drivers/scsi/vmw_pvscsi.c
36562 +++ b/drivers/scsi/vmw_pvscsi.c
36563 @@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
36564 dma_addr_t base;
36565 unsigned i;
36566
36567 + pax_track_stack();
36568 +
36569 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
36570 cmd.reqRingNumPages = adapter->req_pages;
36571 cmd.cmpRingNumPages = adapter->cmp_pages;
36572 diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
36573 index c5f37f0..898d202 100644
36574 --- a/drivers/spi/spi-dw-pci.c
36575 +++ b/drivers/spi/spi-dw-pci.c
36576 @@ -148,7 +148,7 @@ static int spi_resume(struct pci_dev *pdev)
36577 #define spi_resume NULL
36578 #endif
36579
36580 -static const struct pci_device_id pci_ids[] __devinitdata = {
36581 +static const struct pci_device_id pci_ids[] __devinitconst = {
36582 /* Intel MID platform SPI controller 0 */
36583 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
36584 {},
36585 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
36586 index 4d1b9f5..8408fe3 100644
36587 --- a/drivers/spi/spi.c
36588 +++ b/drivers/spi/spi.c
36589 @@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *master)
36590 EXPORT_SYMBOL_GPL(spi_bus_unlock);
36591
36592 /* portable code must never pass more than 32 bytes */
36593 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
36594 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
36595
36596 static u8 *buf;
36597
36598 diff --git a/drivers/staging/ath6kl/os/linux/ar6000_drv.c b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
36599 index 32ee39a..3004c3d 100644
36600 --- a/drivers/staging/ath6kl/os/linux/ar6000_drv.c
36601 +++ b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
36602 @@ -362,7 +362,7 @@ static struct ar_cookie s_ar_cookie_mem[MAX_COOKIE_NUM];
36603 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
36604
36605
36606 -static struct net_device_ops ar6000_netdev_ops = {
36607 +static net_device_ops_no_const ar6000_netdev_ops = {
36608 .ndo_init = NULL,
36609 .ndo_open = ar6000_open,
36610 .ndo_stop = ar6000_close,
36611 diff --git a/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h b/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
36612 index 39e0873..0925710 100644
36613 --- a/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
36614 +++ b/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
36615 @@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void *pHciPalInfo, void *skb);
36616 typedef struct ar6k_pal_config_s
36617 {
36618 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
36619 -}ar6k_pal_config_t;
36620 +} __no_const ar6k_pal_config_t;
36621
36622 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
36623 #endif /* _AR6K_PAL_H_ */
36624 diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
36625 index 05dada9..96171c6 100644
36626 --- a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
36627 +++ b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
36628 @@ -451,14 +451,14 @@ static void brcmf_op_if(struct brcmf_if *ifp)
36629 free_netdev(ifp->net);
36630 }
36631 /* Allocate etherdev, including space for private structure */
36632 - ifp->net = alloc_etherdev(sizeof(drvr_priv));
36633 + ifp->net = alloc_etherdev(sizeof(*drvr_priv));
36634 if (!ifp->net) {
36635 BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
36636 ret = -ENOMEM;
36637 }
36638 if (ret == 0) {
36639 strcpy(ifp->net->name, ifp->name);
36640 - memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(drvr_priv));
36641 + memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(*drvr_priv));
36642 err = brcmf_net_attach(&drvr_priv->pub, ifp->idx);
36643 if (err != 0) {
36644 BRCMF_ERROR(("%s: brcmf_net_attach failed, "
36645 @@ -1279,7 +1279,7 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
36646 BRCMF_TRACE(("%s: Enter\n", __func__));
36647
36648 /* Allocate etherdev, including space for private structure */
36649 - net = alloc_etherdev(sizeof(drvr_priv));
36650 + net = alloc_etherdev(sizeof(*drvr_priv));
36651 if (!net) {
36652 BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
36653 goto fail;
36654 @@ -1295,7 +1295,7 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
36655 /*
36656 * Save the brcmf_info into the priv
36657 */
36658 - memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
36659 + memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv));
36660
36661 /* Set network interface name if it was provided as module parameter */
36662 if (iface_name[0]) {
36663 @@ -1352,7 +1352,7 @@ struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
36664 /*
36665 * Save the brcmf_info into the priv
36666 */
36667 - memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
36668 + memcpy(netdev_priv(net), &drvr_priv, sizeof(*drvr_priv));
36669
36670 #if defined(CONFIG_PM_SLEEP)
36671 atomic_set(&brcmf_mmc_suspend, false);
36672 diff --git a/drivers/staging/brcm80211/brcmfmac/sdio_host.h b/drivers/staging/brcm80211/brcmfmac/sdio_host.h
36673 index d345472..cedb19e 100644
36674 --- a/drivers/staging/brcm80211/brcmfmac/sdio_host.h
36675 +++ b/drivers/staging/brcm80211/brcmfmac/sdio_host.h
36676 @@ -263,7 +263,7 @@ struct brcmf_sdioh_driver {
36677 u16 func, uint bustype, u32 regsva, void *param);
36678 /* detach from device */
36679 void (*detach) (void *ch);
36680 -};
36681 +} __no_const;
36682
36683 struct sdioh_info;
36684
36685 diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h b/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
36686 index a01b01c..b3f721c 100644
36687 --- a/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
36688 +++ b/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
36689 @@ -591,7 +591,7 @@ struct phy_func_ptr {
36690 initfn_t carrsuppr;
36691 rxsigpwrfn_t rxsigpwr;
36692 detachfn_t detach;
36693 -};
36694 +} __no_const;
36695
36696 struct brcms_phy {
36697 struct brcms_phy_pub pubpi_ro;
36698 diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
36699 index 8fb3051..a8b6c67 100644
36700 --- a/drivers/staging/et131x/et1310_tx.c
36701 +++ b/drivers/staging/et131x/et1310_tx.c
36702 @@ -635,11 +635,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
36703 struct net_device_stats *stats = &etdev->net_stats;
36704
36705 if (tcb->flags & fMP_DEST_BROAD)
36706 - atomic_inc(&etdev->stats.brdcstxmt);
36707 + atomic_inc_unchecked(&etdev->stats.brdcstxmt);
36708 else if (tcb->flags & fMP_DEST_MULTI)
36709 - atomic_inc(&etdev->stats.multixmt);
36710 + atomic_inc_unchecked(&etdev->stats.multixmt);
36711 else
36712 - atomic_inc(&etdev->stats.unixmt);
36713 + atomic_inc_unchecked(&etdev->stats.unixmt);
36714
36715 if (tcb->skb) {
36716 stats->tx_bytes += tcb->skb->len;
36717 diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
36718 index 408c50b..fd65e9f 100644
36719 --- a/drivers/staging/et131x/et131x_adapter.h
36720 +++ b/drivers/staging/et131x/et131x_adapter.h
36721 @@ -106,11 +106,11 @@ struct ce_stats {
36722 * operations
36723 */
36724 u32 unircv; /* # multicast packets received */
36725 - atomic_t unixmt; /* # multicast packets for Tx */
36726 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
36727 u32 multircv; /* # multicast packets received */
36728 - atomic_t multixmt; /* # multicast packets for Tx */
36729 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
36730 u32 brdcstrcv; /* # broadcast packets received */
36731 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
36732 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
36733 u32 norcvbuf; /* # Rx packets discarded */
36734 u32 noxmtbuf; /* # Tx packets discarded */
36735
36736 diff --git a/drivers/staging/hv/channel.c b/drivers/staging/hv/channel.c
36737 index 455f47a..86205ff 100644
36738 --- a/drivers/staging/hv/channel.c
36739 +++ b/drivers/staging/hv/channel.c
36740 @@ -447,8 +447,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
36741 int ret = 0;
36742 int t;
36743
36744 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
36745 - atomic_inc(&vmbus_connection.next_gpadl_handle);
36746 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
36747 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
36748
36749 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
36750 if (ret)
36751 diff --git a/drivers/staging/hv/hv.c b/drivers/staging/hv/hv.c
36752 index 824f816..a800af7 100644
36753 --- a/drivers/staging/hv/hv.c
36754 +++ b/drivers/staging/hv/hv.c
36755 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
36756 u64 output_address = (output) ? virt_to_phys(output) : 0;
36757 u32 output_address_hi = output_address >> 32;
36758 u32 output_address_lo = output_address & 0xFFFFFFFF;
36759 - volatile void *hypercall_page = hv_context.hypercall_page;
36760 + volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
36761
36762 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
36763 "=a"(hv_status_lo) : "d" (control_hi),
36764 diff --git a/drivers/staging/hv/hv_mouse.c b/drivers/staging/hv/hv_mouse.c
36765 index d957fc2..43cedd9 100644
36766 --- a/drivers/staging/hv/hv_mouse.c
36767 +++ b/drivers/staging/hv/hv_mouse.c
36768 @@ -878,8 +878,10 @@ static void reportdesc_callback(struct hv_device *dev, void *packet, u32 len)
36769 if (hid_dev) {
36770 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
36771
36772 - hid_dev->ll_driver->open = mousevsc_hid_open;
36773 - hid_dev->ll_driver->close = mousevsc_hid_close;
36774 + pax_open_kernel();
36775 + *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
36776 + *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
36777 + pax_close_kernel();
36778
36779 hid_dev->bus = BUS_VIRTUAL;
36780 hid_dev->vendor = input_device_ctx->device_info.vendor;
36781 diff --git a/drivers/staging/hv/hyperv_vmbus.h b/drivers/staging/hv/hyperv_vmbus.h
36782 index 349ad80..3f75719 100644
36783 --- a/drivers/staging/hv/hyperv_vmbus.h
36784 +++ b/drivers/staging/hv/hyperv_vmbus.h
36785 @@ -559,7 +559,7 @@ enum vmbus_connect_state {
36786 struct vmbus_connection {
36787 enum vmbus_connect_state conn_state;
36788
36789 - atomic_t next_gpadl_handle;
36790 + atomic_unchecked_t next_gpadl_handle;
36791
36792 /*
36793 * Represents channel interrupts. Each bit position represents a
36794 diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
36795 index dbb5201..d6047c6 100644
36796 --- a/drivers/staging/hv/rndis_filter.c
36797 +++ b/drivers/staging/hv/rndis_filter.c
36798 @@ -43,7 +43,7 @@ struct rndis_device {
36799
36800 enum rndis_device_state state;
36801 u32 link_stat;
36802 - atomic_t new_req_id;
36803 + atomic_unchecked_t new_req_id;
36804
36805 spinlock_t request_lock;
36806 struct list_head req_list;
36807 @@ -117,7 +117,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
36808 * template
36809 */
36810 set = &rndis_msg->msg.set_req;
36811 - set->req_id = atomic_inc_return(&dev->new_req_id);
36812 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36813
36814 /* Add to the request list */
36815 spin_lock_irqsave(&dev->request_lock, flags);
36816 @@ -622,7 +622,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
36817
36818 /* Setup the rndis set */
36819 halt = &request->request_msg.msg.halt_req;
36820 - halt->req_id = atomic_inc_return(&dev->new_req_id);
36821 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
36822
36823 /* Ignore return since this msg is optional. */
36824 rndis_filter_send_request(dev, request);
36825 diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
36826 index 1c949f5..7a8b104 100644
36827 --- a/drivers/staging/hv/vmbus_drv.c
36828 +++ b/drivers/staging/hv/vmbus_drv.c
36829 @@ -660,11 +660,11 @@ int vmbus_child_device_register(struct hv_device *child_device_obj)
36830 {
36831 int ret = 0;
36832
36833 - static atomic_t device_num = ATOMIC_INIT(0);
36834 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
36835
36836 /* Set the device name. Otherwise, device_register() will fail. */
36837 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
36838 - atomic_inc_return(&device_num));
36839 + atomic_inc_return_unchecked(&device_num));
36840
36841 /* The new device belongs to this bus */
36842 child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
36843 diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
36844 index 3f26f71..fb5c787 100644
36845 --- a/drivers/staging/iio/ring_generic.h
36846 +++ b/drivers/staging/iio/ring_generic.h
36847 @@ -62,7 +62,7 @@ struct iio_ring_access_funcs {
36848
36849 int (*is_enabled)(struct iio_ring_buffer *ring);
36850 int (*enable)(struct iio_ring_buffer *ring);
36851 -};
36852 +} __no_const;
36853
36854 struct iio_ring_setup_ops {
36855 int (*preenable)(struct iio_dev *);
36856 diff --git a/drivers/staging/mei/interface.c b/drivers/staging/mei/interface.c
36857 index cfec92d..a65dacf 100644
36858 --- a/drivers/staging/mei/interface.c
36859 +++ b/drivers/staging/mei/interface.c
36860 @@ -332,7 +332,7 @@ int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl)
36861 mei_hdr->reserved = 0;
36862
36863 mei_flow_control = (struct hbm_flow_control *) &dev->wr_msg_buf[1];
36864 - memset(mei_flow_control, 0, sizeof(mei_flow_control));
36865 + memset(mei_flow_control, 0, sizeof(*mei_flow_control));
36866 mei_flow_control->host_addr = cl->host_client_id;
36867 mei_flow_control->me_addr = cl->me_client_id;
36868 mei_flow_control->cmd.cmd = MEI_FLOW_CONTROL_CMD;
36869 @@ -396,7 +396,7 @@ int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
36870
36871 mei_cli_disconnect =
36872 (struct hbm_client_disconnect_request *) &dev->wr_msg_buf[1];
36873 - memset(mei_cli_disconnect, 0, sizeof(mei_cli_disconnect));
36874 + memset(mei_cli_disconnect, 0, sizeof(*mei_cli_disconnect));
36875 mei_cli_disconnect->host_addr = cl->host_client_id;
36876 mei_cli_disconnect->me_addr = cl->me_client_id;
36877 mei_cli_disconnect->cmd.cmd = CLIENT_DISCONNECT_REQ_CMD;
36878 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
36879 index 8b307b4..a97ac91 100644
36880 --- a/drivers/staging/octeon/ethernet-rx.c
36881 +++ b/drivers/staging/octeon/ethernet-rx.c
36882 @@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
36883 /* Increment RX stats for virtual ports */
36884 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
36885 #ifdef CONFIG_64BIT
36886 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
36887 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
36888 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
36889 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
36890 #else
36891 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
36892 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
36893 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
36894 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
36895 #endif
36896 }
36897 netif_receive_skb(skb);
36898 @@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
36899 dev->name);
36900 */
36901 #ifdef CONFIG_64BIT
36902 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
36903 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
36904 #else
36905 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
36906 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
36907 #endif
36908 dev_kfree_skb_irq(skb);
36909 }
36910 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
36911 index a8f780e..aef1098 100644
36912 --- a/drivers/staging/octeon/ethernet.c
36913 +++ b/drivers/staging/octeon/ethernet.c
36914 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
36915 * since the RX tasklet also increments it.
36916 */
36917 #ifdef CONFIG_64BIT
36918 - atomic64_add(rx_status.dropped_packets,
36919 - (atomic64_t *)&priv->stats.rx_dropped);
36920 + atomic64_add_unchecked(rx_status.dropped_packets,
36921 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
36922 #else
36923 - atomic_add(rx_status.dropped_packets,
36924 - (atomic_t *)&priv->stats.rx_dropped);
36925 + atomic_add_unchecked(rx_status.dropped_packets,
36926 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
36927 #endif
36928 }
36929
36930 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
36931 index f3c6060..56bf826 100644
36932 --- a/drivers/staging/pohmelfs/inode.c
36933 +++ b/drivers/staging/pohmelfs/inode.c
36934 @@ -1861,7 +1861,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
36935 mutex_init(&psb->mcache_lock);
36936 psb->mcache_root = RB_ROOT;
36937 psb->mcache_timeout = msecs_to_jiffies(5000);
36938 - atomic_long_set(&psb->mcache_gen, 0);
36939 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
36940
36941 psb->trans_max_pages = 100;
36942
36943 @@ -1876,7 +1876,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
36944 INIT_LIST_HEAD(&psb->crypto_ready_list);
36945 INIT_LIST_HEAD(&psb->crypto_active_list);
36946
36947 - atomic_set(&psb->trans_gen, 1);
36948 + atomic_set_unchecked(&psb->trans_gen, 1);
36949 atomic_long_set(&psb->total_inodes, 0);
36950
36951 mutex_init(&psb->state_lock);
36952 diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
36953 index e22665c..a2a9390 100644
36954 --- a/drivers/staging/pohmelfs/mcache.c
36955 +++ b/drivers/staging/pohmelfs/mcache.c
36956 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
36957 m->data = data;
36958 m->start = start;
36959 m->size = size;
36960 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
36961 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
36962
36963 mutex_lock(&psb->mcache_lock);
36964 err = pohmelfs_mcache_insert(psb, m);
36965 diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
36966 index 985b6b7..7699e05 100644
36967 --- a/drivers/staging/pohmelfs/netfs.h
36968 +++ b/drivers/staging/pohmelfs/netfs.h
36969 @@ -571,14 +571,14 @@ struct pohmelfs_config;
36970 struct pohmelfs_sb {
36971 struct rb_root mcache_root;
36972 struct mutex mcache_lock;
36973 - atomic_long_t mcache_gen;
36974 + atomic_long_unchecked_t mcache_gen;
36975 unsigned long mcache_timeout;
36976
36977 unsigned int idx;
36978
36979 unsigned int trans_retries;
36980
36981 - atomic_t trans_gen;
36982 + atomic_unchecked_t trans_gen;
36983
36984 unsigned int crypto_attached_size;
36985 unsigned int crypto_align_size;
36986 diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
36987 index 36a2535..0591bf4 100644
36988 --- a/drivers/staging/pohmelfs/trans.c
36989 +++ b/drivers/staging/pohmelfs/trans.c
36990 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
36991 int err;
36992 struct netfs_cmd *cmd = t->iovec.iov_base;
36993
36994 - t->gen = atomic_inc_return(&psb->trans_gen);
36995 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
36996
36997 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
36998 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
36999 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
37000 index b70cb2b..4db41a7 100644
37001 --- a/drivers/staging/rtl8712/rtl871x_io.h
37002 +++ b/drivers/staging/rtl8712/rtl871x_io.h
37003 @@ -83,7 +83,7 @@ struct _io_ops {
37004 u8 *pmem);
37005 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
37006 u8 *pmem);
37007 -};
37008 +} __no_const;
37009
37010 struct io_req {
37011 struct list_head list;
37012 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
37013 index c7b5e8b..783d6cb 100644
37014 --- a/drivers/staging/sbe-2t3e3/netdev.c
37015 +++ b/drivers/staging/sbe-2t3e3/netdev.c
37016 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37017 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
37018
37019 if (rlen)
37020 - if (copy_to_user(data, &resp, rlen))
37021 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
37022 return -EFAULT;
37023
37024 return 0;
37025 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
37026 index be21617..0954e45 100644
37027 --- a/drivers/staging/usbip/usbip_common.h
37028 +++ b/drivers/staging/usbip/usbip_common.h
37029 @@ -289,7 +289,7 @@ struct usbip_device {
37030 void (*shutdown)(struct usbip_device *);
37031 void (*reset)(struct usbip_device *);
37032 void (*unusable)(struct usbip_device *);
37033 - } eh_ops;
37034 + } __no_const eh_ops;
37035 };
37036
37037 #if 0
37038 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
37039 index 71a586e..4d8a91a 100644
37040 --- a/drivers/staging/usbip/vhci.h
37041 +++ b/drivers/staging/usbip/vhci.h
37042 @@ -85,7 +85,7 @@ struct vhci_hcd {
37043 unsigned resuming:1;
37044 unsigned long re_timeout;
37045
37046 - atomic_t seqnum;
37047 + atomic_unchecked_t seqnum;
37048
37049 /*
37050 * NOTE:
37051 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
37052 index 2ee97e2..0420b86 100644
37053 --- a/drivers/staging/usbip/vhci_hcd.c
37054 +++ b/drivers/staging/usbip/vhci_hcd.c
37055 @@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
37056 return;
37057 }
37058
37059 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37060 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37061 if (priv->seqnum == 0xffff)
37062 dev_info(&urb->dev->dev, "seqnum max\n");
37063
37064 @@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
37065 return -ENOMEM;
37066 }
37067
37068 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37069 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37070 if (unlink->seqnum == 0xffff)
37071 pr_info("seqnum max\n");
37072
37073 @@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
37074 vdev->rhport = rhport;
37075 }
37076
37077 - atomic_set(&vhci->seqnum, 0);
37078 + atomic_set_unchecked(&vhci->seqnum, 0);
37079 spin_lock_init(&vhci->lock);
37080
37081 hcd->power_budget = 0; /* no limit */
37082 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
37083 index 09c44ab..6692d83 100644
37084 --- a/drivers/staging/usbip/vhci_rx.c
37085 +++ b/drivers/staging/usbip/vhci_rx.c
37086 @@ -76,7 +76,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
37087 if (!urb) {
37088 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
37089 pr_info("max seqnum %d\n",
37090 - atomic_read(&the_controller->seqnum));
37091 + atomic_read_unchecked(&the_controller->seqnum));
37092 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37093 return;
37094 }
37095 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
37096 index 7735027..30eed13 100644
37097 --- a/drivers/staging/vt6655/hostap.c
37098 +++ b/drivers/staging/vt6655/hostap.c
37099 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
37100 *
37101 */
37102
37103 +static net_device_ops_no_const apdev_netdev_ops;
37104 +
37105 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37106 {
37107 PSDevice apdev_priv;
37108 struct net_device *dev = pDevice->dev;
37109 int ret;
37110 - const struct net_device_ops apdev_netdev_ops = {
37111 - .ndo_start_xmit = pDevice->tx_80211,
37112 - };
37113
37114 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37115
37116 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37117 *apdev_priv = *pDevice;
37118 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37119
37120 + /* only half broken now */
37121 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37122 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37123
37124 pDevice->apdev->type = ARPHRD_IEEE80211;
37125 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
37126 index 51b5adf..098e320 100644
37127 --- a/drivers/staging/vt6656/hostap.c
37128 +++ b/drivers/staging/vt6656/hostap.c
37129 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
37130 *
37131 */
37132
37133 +static net_device_ops_no_const apdev_netdev_ops;
37134 +
37135 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37136 {
37137 PSDevice apdev_priv;
37138 struct net_device *dev = pDevice->dev;
37139 int ret;
37140 - const struct net_device_ops apdev_netdev_ops = {
37141 - .ndo_start_xmit = pDevice->tx_80211,
37142 - };
37143
37144 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37145
37146 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37147 *apdev_priv = *pDevice;
37148 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37149
37150 + /* only half broken now */
37151 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37152 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37153
37154 pDevice->apdev->type = ARPHRD_IEEE80211;
37155 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
37156 index 7843dfd..3db105f 100644
37157 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
37158 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
37159 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
37160
37161 struct usbctlx_completor {
37162 int (*complete) (struct usbctlx_completor *);
37163 -};
37164 +} __no_const;
37165
37166 static int
37167 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
37168 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
37169 index 1ca66ea..76f1343 100644
37170 --- a/drivers/staging/zcache/tmem.c
37171 +++ b/drivers/staging/zcache/tmem.c
37172 @@ -39,7 +39,7 @@
37173 * A tmem host implementation must use this function to register callbacks
37174 * for memory allocation.
37175 */
37176 -static struct tmem_hostops tmem_hostops;
37177 +static tmem_hostops_no_const tmem_hostops;
37178
37179 static void tmem_objnode_tree_init(void);
37180
37181 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
37182 * A tmem host implementation must use this function to register
37183 * callbacks for a page-accessible memory (PAM) implementation
37184 */
37185 -static struct tmem_pamops tmem_pamops;
37186 +static tmem_pamops_no_const tmem_pamops;
37187
37188 void tmem_register_pamops(struct tmem_pamops *m)
37189 {
37190 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
37191 index ed147c4..94fc3c6 100644
37192 --- a/drivers/staging/zcache/tmem.h
37193 +++ b/drivers/staging/zcache/tmem.h
37194 @@ -180,6 +180,7 @@ struct tmem_pamops {
37195 void (*new_obj)(struct tmem_obj *);
37196 int (*replace_in_obj)(void *, struct tmem_obj *);
37197 };
37198 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
37199 extern void tmem_register_pamops(struct tmem_pamops *m);
37200
37201 /* memory allocation methods provided by the host implementation */
37202 @@ -189,6 +190,7 @@ struct tmem_hostops {
37203 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
37204 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
37205 };
37206 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
37207 extern void tmem_register_hostops(struct tmem_hostops *m);
37208
37209 /* core tmem accessor functions */
37210 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
37211 index 26a5d8b..74434f8 100644
37212 --- a/drivers/target/iscsi/iscsi_target.c
37213 +++ b/drivers/target/iscsi/iscsi_target.c
37214 @@ -1368,7 +1368,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
37215 * outstanding_r2ts reaches zero, go ahead and send the delayed
37216 * TASK_ABORTED status.
37217 */
37218 - if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
37219 + if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
37220 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
37221 if (--cmd->outstanding_r2ts < 1) {
37222 iscsit_stop_dataout_timer(cmd);
37223 diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
37224 index 8badcb4..94c9ac6 100644
37225 --- a/drivers/target/target_core_alua.c
37226 +++ b/drivers/target/target_core_alua.c
37227 @@ -723,6 +723,8 @@ static int core_alua_update_tpg_primary_metadata(
37228 char path[ALUA_METADATA_PATH_LEN];
37229 int len;
37230
37231 + pax_track_stack();
37232 +
37233 memset(path, 0, ALUA_METADATA_PATH_LEN);
37234
37235 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
37236 @@ -986,6 +988,8 @@ static int core_alua_update_tpg_secondary_metadata(
37237 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
37238 int len;
37239
37240 + pax_track_stack();
37241 +
37242 memset(path, 0, ALUA_METADATA_PATH_LEN);
37243 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
37244
37245 diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
37246 index f04d4ef..7de212b 100644
37247 --- a/drivers/target/target_core_cdb.c
37248 +++ b/drivers/target/target_core_cdb.c
37249 @@ -933,6 +933,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
37250 int length = 0;
37251 unsigned char buf[SE_MODE_PAGE_BUF];
37252
37253 + pax_track_stack();
37254 +
37255 memset(buf, 0, SE_MODE_PAGE_BUF);
37256
37257 switch (cdb[2] & 0x3f) {
37258 diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
37259 index b2575d8..b6b28fd 100644
37260 --- a/drivers/target/target_core_configfs.c
37261 +++ b/drivers/target/target_core_configfs.c
37262 @@ -1267,6 +1267,8 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
37263 ssize_t len = 0;
37264 int reg_count = 0, prf_isid;
37265
37266 + pax_track_stack();
37267 +
37268 if (!su_dev->se_dev_ptr)
37269 return -ENODEV;
37270
37271 diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
37272 index 7fd3a16..bc2fb3e 100644
37273 --- a/drivers/target/target_core_pr.c
37274 +++ b/drivers/target/target_core_pr.c
37275 @@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_registration(
37276 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
37277 u16 tpgt;
37278
37279 + pax_track_stack();
37280 +
37281 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
37282 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
37283 /*
37284 @@ -1867,6 +1869,8 @@ static int __core_scsi3_update_aptpl_buf(
37285 ssize_t len = 0;
37286 int reg_count = 0;
37287
37288 + pax_track_stack();
37289 +
37290 memset(buf, 0, pr_aptpl_buf_len);
37291 /*
37292 * Called to clear metadata once APTPL has been deactivated.
37293 @@ -1989,6 +1993,8 @@ static int __core_scsi3_write_aptpl_to_file(
37294 char path[512];
37295 int ret;
37296
37297 + pax_track_stack();
37298 +
37299 memset(iov, 0, sizeof(struct iovec));
37300 memset(path, 0, 512);
37301
37302 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
37303 index 5c1b8c5..0cb7d0e 100644
37304 --- a/drivers/target/target_core_tmr.c
37305 +++ b/drivers/target/target_core_tmr.c
37306 @@ -255,7 +255,7 @@ static void core_tmr_drain_task_list(
37307 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
37308 cmd->t_task_list_num,
37309 atomic_read(&cmd->t_task_cdbs_left),
37310 - atomic_read(&cmd->t_task_cdbs_sent),
37311 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37312 atomic_read(&cmd->t_transport_active),
37313 atomic_read(&cmd->t_transport_stop),
37314 atomic_read(&cmd->t_transport_sent));
37315 @@ -291,7 +291,7 @@ static void core_tmr_drain_task_list(
37316 pr_debug("LUN_RESET: got t_transport_active = 1 for"
37317 " task: %p, t_fe_count: %d dev: %p\n", task,
37318 fe_count, dev);
37319 - atomic_set(&cmd->t_transport_aborted, 1);
37320 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
37321 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
37322
37323 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
37324 @@ -299,7 +299,7 @@ static void core_tmr_drain_task_list(
37325 }
37326 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
37327 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
37328 - atomic_set(&cmd->t_transport_aborted, 1);
37329 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
37330 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
37331
37332 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
37333 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
37334 index 013c100..8fd2e57 100644
37335 --- a/drivers/target/target_core_transport.c
37336 +++ b/drivers/target/target_core_transport.c
37337 @@ -1445,7 +1445,7 @@ struct se_device *transport_add_device_to_core_hba(
37338
37339 dev->queue_depth = dev_limits->queue_depth;
37340 atomic_set(&dev->depth_left, dev->queue_depth);
37341 - atomic_set(&dev->dev_ordered_id, 0);
37342 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
37343
37344 se_dev_set_default_attribs(dev, dev_limits);
37345
37346 @@ -1633,7 +1633,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
37347 * Used to determine when ORDERED commands should go from
37348 * Dormant to Active status.
37349 */
37350 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
37351 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
37352 smp_mb__after_atomic_inc();
37353 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
37354 cmd->se_ordered_id, cmd->sam_task_attr,
37355 @@ -1960,7 +1960,7 @@ static void transport_generic_request_failure(
37356 " t_transport_active: %d t_transport_stop: %d"
37357 " t_transport_sent: %d\n", cmd->t_task_list_num,
37358 atomic_read(&cmd->t_task_cdbs_left),
37359 - atomic_read(&cmd->t_task_cdbs_sent),
37360 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37361 atomic_read(&cmd->t_task_cdbs_ex_left),
37362 atomic_read(&cmd->t_transport_active),
37363 atomic_read(&cmd->t_transport_stop),
37364 @@ -2460,9 +2460,9 @@ check_depth:
37365 spin_lock_irqsave(&cmd->t_state_lock, flags);
37366 atomic_set(&task->task_active, 1);
37367 atomic_set(&task->task_sent, 1);
37368 - atomic_inc(&cmd->t_task_cdbs_sent);
37369 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
37370
37371 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
37372 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
37373 cmd->t_task_list_num)
37374 atomic_set(&cmd->transport_sent, 1);
37375
37376 @@ -4665,7 +4665,7 @@ static void transport_generic_wait_for_tasks(
37377 atomic_set(&cmd->transport_lun_stop, 0);
37378 }
37379 if (!atomic_read(&cmd->t_transport_active) ||
37380 - atomic_read(&cmd->t_transport_aborted))
37381 + atomic_read_unchecked(&cmd->t_transport_aborted))
37382 goto remove;
37383
37384 atomic_set(&cmd->t_transport_stop, 1);
37385 @@ -4900,7 +4900,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
37386 {
37387 int ret = 0;
37388
37389 - if (atomic_read(&cmd->t_transport_aborted) != 0) {
37390 + if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
37391 if (!send_status ||
37392 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
37393 return 1;
37394 @@ -4937,7 +4937,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
37395 */
37396 if (cmd->data_direction == DMA_TO_DEVICE) {
37397 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
37398 - atomic_inc(&cmd->t_transport_aborted);
37399 + atomic_inc_unchecked(&cmd->t_transport_aborted);
37400 smp_mb__after_atomic_inc();
37401 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
37402 transport_new_cmd_failure(cmd);
37403 @@ -5051,7 +5051,7 @@ static void transport_processing_shutdown(struct se_device *dev)
37404 cmd->se_tfo->get_task_tag(cmd),
37405 cmd->t_task_list_num,
37406 atomic_read(&cmd->t_task_cdbs_left),
37407 - atomic_read(&cmd->t_task_cdbs_sent),
37408 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37409 atomic_read(&cmd->t_transport_active),
37410 atomic_read(&cmd->t_transport_stop),
37411 atomic_read(&cmd->t_transport_sent));
37412 diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
37413 index d5f923b..9c78228 100644
37414 --- a/drivers/telephony/ixj.c
37415 +++ b/drivers/telephony/ixj.c
37416 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
37417 bool mContinue;
37418 char *pIn, *pOut;
37419
37420 + pax_track_stack();
37421 +
37422 if (!SCI_Prepare(j))
37423 return 0;
37424
37425 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
37426 index 4c8b665..1d931eb 100644
37427 --- a/drivers/tty/hvc/hvcs.c
37428 +++ b/drivers/tty/hvc/hvcs.c
37429 @@ -83,6 +83,7 @@
37430 #include <asm/hvcserver.h>
37431 #include <asm/uaccess.h>
37432 #include <asm/vio.h>
37433 +#include <asm/local.h>
37434
37435 /*
37436 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
37437 @@ -270,7 +271,7 @@ struct hvcs_struct {
37438 unsigned int index;
37439
37440 struct tty_struct *tty;
37441 - int open_count;
37442 + local_t open_count;
37443
37444 /*
37445 * Used to tell the driver kernel_thread what operations need to take
37446 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
37447
37448 spin_lock_irqsave(&hvcsd->lock, flags);
37449
37450 - if (hvcsd->open_count > 0) {
37451 + if (local_read(&hvcsd->open_count) > 0) {
37452 spin_unlock_irqrestore(&hvcsd->lock, flags);
37453 printk(KERN_INFO "HVCS: vterm state unchanged. "
37454 "The hvcs device node is still in use.\n");
37455 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
37456 if ((retval = hvcs_partner_connect(hvcsd)))
37457 goto error_release;
37458
37459 - hvcsd->open_count = 1;
37460 + local_set(&hvcsd->open_count, 1);
37461 hvcsd->tty = tty;
37462 tty->driver_data = hvcsd;
37463
37464 @@ -1179,7 +1180,7 @@ fast_open:
37465
37466 spin_lock_irqsave(&hvcsd->lock, flags);
37467 kref_get(&hvcsd->kref);
37468 - hvcsd->open_count++;
37469 + local_inc(&hvcsd->open_count);
37470 hvcsd->todo_mask |= HVCS_SCHED_READ;
37471 spin_unlock_irqrestore(&hvcsd->lock, flags);
37472
37473 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37474 hvcsd = tty->driver_data;
37475
37476 spin_lock_irqsave(&hvcsd->lock, flags);
37477 - if (--hvcsd->open_count == 0) {
37478 + if (local_dec_and_test(&hvcsd->open_count)) {
37479
37480 vio_disable_interrupts(hvcsd->vdev);
37481
37482 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37483 free_irq(irq, hvcsd);
37484 kref_put(&hvcsd->kref, destroy_hvcs_struct);
37485 return;
37486 - } else if (hvcsd->open_count < 0) {
37487 + } else if (local_read(&hvcsd->open_count) < 0) {
37488 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
37489 " is missmanaged.\n",
37490 - hvcsd->vdev->unit_address, hvcsd->open_count);
37491 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
37492 }
37493
37494 spin_unlock_irqrestore(&hvcsd->lock, flags);
37495 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37496
37497 spin_lock_irqsave(&hvcsd->lock, flags);
37498 /* Preserve this so that we know how many kref refs to put */
37499 - temp_open_count = hvcsd->open_count;
37500 + temp_open_count = local_read(&hvcsd->open_count);
37501
37502 /*
37503 * Don't kref put inside the spinlock because the destruction
37504 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37505 hvcsd->tty->driver_data = NULL;
37506 hvcsd->tty = NULL;
37507
37508 - hvcsd->open_count = 0;
37509 + local_set(&hvcsd->open_count, 0);
37510
37511 /* This will drop any buffered data on the floor which is OK in a hangup
37512 * scenario. */
37513 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
37514 * the middle of a write operation? This is a crummy place to do this
37515 * but we want to keep it all in the spinlock.
37516 */
37517 - if (hvcsd->open_count <= 0) {
37518 + if (local_read(&hvcsd->open_count) <= 0) {
37519 spin_unlock_irqrestore(&hvcsd->lock, flags);
37520 return -ENODEV;
37521 }
37522 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
37523 {
37524 struct hvcs_struct *hvcsd = tty->driver_data;
37525
37526 - if (!hvcsd || hvcsd->open_count <= 0)
37527 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
37528 return 0;
37529
37530 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
37531 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
37532 index ef92869..f4ebd88 100644
37533 --- a/drivers/tty/ipwireless/tty.c
37534 +++ b/drivers/tty/ipwireless/tty.c
37535 @@ -29,6 +29,7 @@
37536 #include <linux/tty_driver.h>
37537 #include <linux/tty_flip.h>
37538 #include <linux/uaccess.h>
37539 +#include <asm/local.h>
37540
37541 #include "tty.h"
37542 #include "network.h"
37543 @@ -51,7 +52,7 @@ struct ipw_tty {
37544 int tty_type;
37545 struct ipw_network *network;
37546 struct tty_struct *linux_tty;
37547 - int open_count;
37548 + local_t open_count;
37549 unsigned int control_lines;
37550 struct mutex ipw_tty_mutex;
37551 int tx_bytes_queued;
37552 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37553 mutex_unlock(&tty->ipw_tty_mutex);
37554 return -ENODEV;
37555 }
37556 - if (tty->open_count == 0)
37557 + if (local_read(&tty->open_count) == 0)
37558 tty->tx_bytes_queued = 0;
37559
37560 - tty->open_count++;
37561 + local_inc(&tty->open_count);
37562
37563 tty->linux_tty = linux_tty;
37564 linux_tty->driver_data = tty;
37565 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37566
37567 static void do_ipw_close(struct ipw_tty *tty)
37568 {
37569 - tty->open_count--;
37570 -
37571 - if (tty->open_count == 0) {
37572 + if (local_dec_return(&tty->open_count) == 0) {
37573 struct tty_struct *linux_tty = tty->linux_tty;
37574
37575 if (linux_tty != NULL) {
37576 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
37577 return;
37578
37579 mutex_lock(&tty->ipw_tty_mutex);
37580 - if (tty->open_count == 0) {
37581 + if (local_read(&tty->open_count) == 0) {
37582 mutex_unlock(&tty->ipw_tty_mutex);
37583 return;
37584 }
37585 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
37586 return;
37587 }
37588
37589 - if (!tty->open_count) {
37590 + if (!local_read(&tty->open_count)) {
37591 mutex_unlock(&tty->ipw_tty_mutex);
37592 return;
37593 }
37594 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
37595 return -ENODEV;
37596
37597 mutex_lock(&tty->ipw_tty_mutex);
37598 - if (!tty->open_count) {
37599 + if (!local_read(&tty->open_count)) {
37600 mutex_unlock(&tty->ipw_tty_mutex);
37601 return -EINVAL;
37602 }
37603 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
37604 if (!tty)
37605 return -ENODEV;
37606
37607 - if (!tty->open_count)
37608 + if (!local_read(&tty->open_count))
37609 return -EINVAL;
37610
37611 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
37612 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
37613 if (!tty)
37614 return 0;
37615
37616 - if (!tty->open_count)
37617 + if (!local_read(&tty->open_count))
37618 return 0;
37619
37620 return tty->tx_bytes_queued;
37621 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
37622 if (!tty)
37623 return -ENODEV;
37624
37625 - if (!tty->open_count)
37626 + if (!local_read(&tty->open_count))
37627 return -EINVAL;
37628
37629 return get_control_lines(tty);
37630 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
37631 if (!tty)
37632 return -ENODEV;
37633
37634 - if (!tty->open_count)
37635 + if (!local_read(&tty->open_count))
37636 return -EINVAL;
37637
37638 return set_control_lines(tty, set, clear);
37639 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
37640 if (!tty)
37641 return -ENODEV;
37642
37643 - if (!tty->open_count)
37644 + if (!local_read(&tty->open_count))
37645 return -EINVAL;
37646
37647 /* FIXME: Exactly how is the tty object locked here .. */
37648 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
37649 against a parallel ioctl etc */
37650 mutex_lock(&ttyj->ipw_tty_mutex);
37651 }
37652 - while (ttyj->open_count)
37653 + while (local_read(&ttyj->open_count))
37654 do_ipw_close(ttyj);
37655 ipwireless_disassociate_network_ttys(network,
37656 ttyj->channel_idx);
37657 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
37658 index 8a50e4e..7d9ca3d 100644
37659 --- a/drivers/tty/n_gsm.c
37660 +++ b/drivers/tty/n_gsm.c
37661 @@ -1625,7 +1625,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
37662 kref_init(&dlci->ref);
37663 mutex_init(&dlci->mutex);
37664 dlci->fifo = &dlci->_fifo;
37665 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
37666 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
37667 kfree(dlci);
37668 return NULL;
37669 }
37670 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
37671 index 39d6ab6..eb97f41 100644
37672 --- a/drivers/tty/n_tty.c
37673 +++ b/drivers/tty/n_tty.c
37674 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
37675 {
37676 *ops = tty_ldisc_N_TTY;
37677 ops->owner = NULL;
37678 - ops->refcount = ops->flags = 0;
37679 + atomic_set(&ops->refcount, 0);
37680 + ops->flags = 0;
37681 }
37682 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
37683 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
37684 index e18604b..a7d5a11 100644
37685 --- a/drivers/tty/pty.c
37686 +++ b/drivers/tty/pty.c
37687 @@ -773,8 +773,10 @@ static void __init unix98_pty_init(void)
37688 register_sysctl_table(pty_root_table);
37689
37690 /* Now create the /dev/ptmx special device */
37691 + pax_open_kernel();
37692 tty_default_fops(&ptmx_fops);
37693 - ptmx_fops.open = ptmx_open;
37694 + *(void **)&ptmx_fops.open = ptmx_open;
37695 + pax_close_kernel();
37696
37697 cdev_init(&ptmx_cdev, &ptmx_fops);
37698 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
37699 diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
37700 index 6a1241c..d04ab0d 100644
37701 --- a/drivers/tty/rocket.c
37702 +++ b/drivers/tty/rocket.c
37703 @@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
37704 struct rocket_ports tmp;
37705 int board;
37706
37707 + pax_track_stack();
37708 +
37709 if (!retports)
37710 return -EFAULT;
37711 memset(&tmp, 0, sizeof (tmp));
37712 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
37713 index 87e7e6c..89744e0 100644
37714 --- a/drivers/tty/serial/kgdboc.c
37715 +++ b/drivers/tty/serial/kgdboc.c
37716 @@ -23,8 +23,9 @@
37717 #define MAX_CONFIG_LEN 40
37718
37719 static struct kgdb_io kgdboc_io_ops;
37720 +static struct kgdb_io kgdboc_io_ops_console;
37721
37722 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37723 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
37724 static int configured = -1;
37725
37726 static char config[MAX_CONFIG_LEN];
37727 @@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
37728 kgdboc_unregister_kbd();
37729 if (configured == 1)
37730 kgdb_unregister_io_module(&kgdboc_io_ops);
37731 + else if (configured == 2)
37732 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
37733 }
37734
37735 static int configure_kgdboc(void)
37736 @@ -156,13 +159,13 @@ static int configure_kgdboc(void)
37737 int err;
37738 char *cptr = config;
37739 struct console *cons;
37740 + int is_console = 0;
37741
37742 err = kgdboc_option_setup(config);
37743 if (err || !strlen(config) || isspace(config[0]))
37744 goto noconfig;
37745
37746 err = -ENODEV;
37747 - kgdboc_io_ops.is_console = 0;
37748 kgdb_tty_driver = NULL;
37749
37750 kgdboc_use_kms = 0;
37751 @@ -183,7 +186,7 @@ static int configure_kgdboc(void)
37752 int idx;
37753 if (cons->device && cons->device(cons, &idx) == p &&
37754 idx == tty_line) {
37755 - kgdboc_io_ops.is_console = 1;
37756 + is_console = 1;
37757 break;
37758 }
37759 cons = cons->next;
37760 @@ -193,12 +196,16 @@ static int configure_kgdboc(void)
37761 kgdb_tty_line = tty_line;
37762
37763 do_register:
37764 - err = kgdb_register_io_module(&kgdboc_io_ops);
37765 + if (is_console) {
37766 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
37767 + configured = 2;
37768 + } else {
37769 + err = kgdb_register_io_module(&kgdboc_io_ops);
37770 + configured = 1;
37771 + }
37772 if (err)
37773 goto noconfig;
37774
37775 - configured = 1;
37776 -
37777 return 0;
37778
37779 noconfig:
37780 @@ -212,7 +219,7 @@ noconfig:
37781 static int __init init_kgdboc(void)
37782 {
37783 /* Already configured? */
37784 - if (configured == 1)
37785 + if (configured >= 1)
37786 return 0;
37787
37788 return configure_kgdboc();
37789 @@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
37790 if (config[len - 1] == '\n')
37791 config[len - 1] = '\0';
37792
37793 - if (configured == 1)
37794 + if (configured >= 1)
37795 cleanup_kgdboc();
37796
37797 /* Go and configure with the new params. */
37798 @@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
37799 .post_exception = kgdboc_post_exp_handler,
37800 };
37801
37802 +static struct kgdb_io kgdboc_io_ops_console = {
37803 + .name = "kgdboc",
37804 + .read_char = kgdboc_get_char,
37805 + .write_char = kgdboc_put_char,
37806 + .pre_exception = kgdboc_pre_exp_handler,
37807 + .post_exception = kgdboc_post_exp_handler,
37808 + .is_console = 1
37809 +};
37810 +
37811 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
37812 /* This is only available if kgdboc is a built in for early debugging */
37813 static int __init kgdboc_early_init(char *opt)
37814 diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
37815 index cab52f4..29fc6aa 100644
37816 --- a/drivers/tty/serial/mfd.c
37817 +++ b/drivers/tty/serial/mfd.c
37818 @@ -1423,7 +1423,7 @@ static void serial_hsu_remove(struct pci_dev *pdev)
37819 }
37820
37821 /* First 3 are UART ports, and the 4th is the DMA */
37822 -static const struct pci_device_id pci_ids[] __devinitdata = {
37823 +static const struct pci_device_id pci_ids[] __devinitconst = {
37824 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081B) },
37825 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081C) },
37826 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081D) },
37827 diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
37828 index 23bc743..d425c07 100644
37829 --- a/drivers/tty/serial/mrst_max3110.c
37830 +++ b/drivers/tty/serial/mrst_max3110.c
37831 @@ -393,6 +393,8 @@ static void max3110_con_receive(struct uart_max3110 *max)
37832 int loop = 1, num, total = 0;
37833 u8 recv_buf[512], *pbuf;
37834
37835 + pax_track_stack();
37836 +
37837 pbuf = recv_buf;
37838 do {
37839 num = max3110_read_multi(max, pbuf);
37840 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
37841 index 1a890e2..1d8139c 100644
37842 --- a/drivers/tty/tty_io.c
37843 +++ b/drivers/tty/tty_io.c
37844 @@ -3238,7 +3238,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
37845
37846 void tty_default_fops(struct file_operations *fops)
37847 {
37848 - *fops = tty_fops;
37849 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
37850 }
37851
37852 /*
37853 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
37854 index a76c808..ecbc743 100644
37855 --- a/drivers/tty/tty_ldisc.c
37856 +++ b/drivers/tty/tty_ldisc.c
37857 @@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *ld)
37858 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
37859 struct tty_ldisc_ops *ldo = ld->ops;
37860
37861 - ldo->refcount--;
37862 + atomic_dec(&ldo->refcount);
37863 module_put(ldo->owner);
37864 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37865
37866 @@ -110,7 +110,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
37867 spin_lock_irqsave(&tty_ldisc_lock, flags);
37868 tty_ldiscs[disc] = new_ldisc;
37869 new_ldisc->num = disc;
37870 - new_ldisc->refcount = 0;
37871 + atomic_set(&new_ldisc->refcount, 0);
37872 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37873
37874 return ret;
37875 @@ -138,7 +138,7 @@ int tty_unregister_ldisc(int disc)
37876 return -EINVAL;
37877
37878 spin_lock_irqsave(&tty_ldisc_lock, flags);
37879 - if (tty_ldiscs[disc]->refcount)
37880 + if (atomic_read(&tty_ldiscs[disc]->refcount))
37881 ret = -EBUSY;
37882 else
37883 tty_ldiscs[disc] = NULL;
37884 @@ -159,7 +159,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
37885 if (ldops) {
37886 ret = ERR_PTR(-EAGAIN);
37887 if (try_module_get(ldops->owner)) {
37888 - ldops->refcount++;
37889 + atomic_inc(&ldops->refcount);
37890 ret = ldops;
37891 }
37892 }
37893 @@ -172,7 +172,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
37894 unsigned long flags;
37895
37896 spin_lock_irqsave(&tty_ldisc_lock, flags);
37897 - ldops->refcount--;
37898 + atomic_dec(&ldops->refcount);
37899 module_put(ldops->owner);
37900 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
37901 }
37902 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
37903 index 3761ccf..2c613b3 100644
37904 --- a/drivers/tty/vt/keyboard.c
37905 +++ b/drivers/tty/vt/keyboard.c
37906 @@ -656,6 +656,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
37907 kbd->kbdmode == VC_OFF) &&
37908 value != KVAL(K_SAK))
37909 return; /* SAK is allowed even in raw mode */
37910 +
37911 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
37912 + {
37913 + void *func = fn_handler[value];
37914 + if (func == fn_show_state || func == fn_show_ptregs ||
37915 + func == fn_show_mem)
37916 + return;
37917 + }
37918 +#endif
37919 +
37920 fn_handler[value](vc);
37921 }
37922
37923 diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
37924 index b3915b7..e716839 100644
37925 --- a/drivers/tty/vt/vt.c
37926 +++ b/drivers/tty/vt/vt.c
37927 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
37928
37929 static void notify_write(struct vc_data *vc, unsigned int unicode)
37930 {
37931 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
37932 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
37933 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
37934 }
37935
37936 diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
37937 index 5e096f4..0da1363 100644
37938 --- a/drivers/tty/vt/vt_ioctl.c
37939 +++ b/drivers/tty/vt/vt_ioctl.c
37940 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
37941 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
37942 return -EFAULT;
37943
37944 - if (!capable(CAP_SYS_TTY_CONFIG))
37945 - perm = 0;
37946 -
37947 switch (cmd) {
37948 case KDGKBENT:
37949 key_map = key_maps[s];
37950 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
37951 val = (i ? K_HOLE : K_NOSUCHMAP);
37952 return put_user(val, &user_kbe->kb_value);
37953 case KDSKBENT:
37954 + if (!capable(CAP_SYS_TTY_CONFIG))
37955 + perm = 0;
37956 +
37957 if (!perm)
37958 return -EPERM;
37959 if (!i && v == K_NOSUCHMAP) {
37960 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37961 int i, j, k;
37962 int ret;
37963
37964 - if (!capable(CAP_SYS_TTY_CONFIG))
37965 - perm = 0;
37966 -
37967 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
37968 if (!kbs) {
37969 ret = -ENOMEM;
37970 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
37971 kfree(kbs);
37972 return ((p && *p) ? -EOVERFLOW : 0);
37973 case KDSKBSENT:
37974 + if (!capable(CAP_SYS_TTY_CONFIG))
37975 + perm = 0;
37976 +
37977 if (!perm) {
37978 ret = -EPERM;
37979 goto reterr;
37980 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
37981 index d2efe82..9440ab6 100644
37982 --- a/drivers/uio/uio.c
37983 +++ b/drivers/uio/uio.c
37984 @@ -25,6 +25,7 @@
37985 #include <linux/kobject.h>
37986 #include <linux/cdev.h>
37987 #include <linux/uio_driver.h>
37988 +#include <asm/local.h>
37989
37990 #define UIO_MAX_DEVICES (1U << MINORBITS)
37991
37992 @@ -32,10 +33,10 @@ struct uio_device {
37993 struct module *owner;
37994 struct device *dev;
37995 int minor;
37996 - atomic_t event;
37997 + atomic_unchecked_t event;
37998 struct fasync_struct *async_queue;
37999 wait_queue_head_t wait;
38000 - int vma_count;
38001 + local_t vma_count;
38002 struct uio_info *info;
38003 struct kobject *map_dir;
38004 struct kobject *portio_dir;
38005 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
38006 struct device_attribute *attr, char *buf)
38007 {
38008 struct uio_device *idev = dev_get_drvdata(dev);
38009 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
38010 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
38011 }
38012
38013 static struct device_attribute uio_class_attributes[] = {
38014 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
38015 {
38016 struct uio_device *idev = info->uio_dev;
38017
38018 - atomic_inc(&idev->event);
38019 + atomic_inc_unchecked(&idev->event);
38020 wake_up_interruptible(&idev->wait);
38021 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38022 }
38023 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
38024 }
38025
38026 listener->dev = idev;
38027 - listener->event_count = atomic_read(&idev->event);
38028 + listener->event_count = atomic_read_unchecked(&idev->event);
38029 filep->private_data = listener;
38030
38031 if (idev->info->open) {
38032 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
38033 return -EIO;
38034
38035 poll_wait(filep, &idev->wait, wait);
38036 - if (listener->event_count != atomic_read(&idev->event))
38037 + if (listener->event_count != atomic_read_unchecked(&idev->event))
38038 return POLLIN | POLLRDNORM;
38039 return 0;
38040 }
38041 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
38042 do {
38043 set_current_state(TASK_INTERRUPTIBLE);
38044
38045 - event_count = atomic_read(&idev->event);
38046 + event_count = atomic_read_unchecked(&idev->event);
38047 if (event_count != listener->event_count) {
38048 if (copy_to_user(buf, &event_count, count))
38049 retval = -EFAULT;
38050 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
38051 static void uio_vma_open(struct vm_area_struct *vma)
38052 {
38053 struct uio_device *idev = vma->vm_private_data;
38054 - idev->vma_count++;
38055 + local_inc(&idev->vma_count);
38056 }
38057
38058 static void uio_vma_close(struct vm_area_struct *vma)
38059 {
38060 struct uio_device *idev = vma->vm_private_data;
38061 - idev->vma_count--;
38062 + local_dec(&idev->vma_count);
38063 }
38064
38065 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38066 @@ -823,7 +824,7 @@ int __uio_register_device(struct module *owner,
38067 idev->owner = owner;
38068 idev->info = info;
38069 init_waitqueue_head(&idev->wait);
38070 - atomic_set(&idev->event, 0);
38071 + atomic_set_unchecked(&idev->event, 0);
38072
38073 ret = uio_get_minor(idev);
38074 if (ret)
38075 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
38076 index a845f8b..4f54072 100644
38077 --- a/drivers/usb/atm/cxacru.c
38078 +++ b/drivers/usb/atm/cxacru.c
38079 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
38080 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
38081 if (ret < 2)
38082 return -EINVAL;
38083 - if (index < 0 || index > 0x7f)
38084 + if (index > 0x7f)
38085 return -EINVAL;
38086 pos += tmp;
38087
38088 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
38089 index d3448ca..d2864ca 100644
38090 --- a/drivers/usb/atm/usbatm.c
38091 +++ b/drivers/usb/atm/usbatm.c
38092 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38093 if (printk_ratelimit())
38094 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38095 __func__, vpi, vci);
38096 - atomic_inc(&vcc->stats->rx_err);
38097 + atomic_inc_unchecked(&vcc->stats->rx_err);
38098 return;
38099 }
38100
38101 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38102 if (length > ATM_MAX_AAL5_PDU) {
38103 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38104 __func__, length, vcc);
38105 - atomic_inc(&vcc->stats->rx_err);
38106 + atomic_inc_unchecked(&vcc->stats->rx_err);
38107 goto out;
38108 }
38109
38110 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38111 if (sarb->len < pdu_length) {
38112 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38113 __func__, pdu_length, sarb->len, vcc);
38114 - atomic_inc(&vcc->stats->rx_err);
38115 + atomic_inc_unchecked(&vcc->stats->rx_err);
38116 goto out;
38117 }
38118
38119 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38120 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38121 __func__, vcc);
38122 - atomic_inc(&vcc->stats->rx_err);
38123 + atomic_inc_unchecked(&vcc->stats->rx_err);
38124 goto out;
38125 }
38126
38127 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38128 if (printk_ratelimit())
38129 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38130 __func__, length);
38131 - atomic_inc(&vcc->stats->rx_drop);
38132 + atomic_inc_unchecked(&vcc->stats->rx_drop);
38133 goto out;
38134 }
38135
38136 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38137
38138 vcc->push(vcc, skb);
38139
38140 - atomic_inc(&vcc->stats->rx);
38141 + atomic_inc_unchecked(&vcc->stats->rx);
38142 out:
38143 skb_trim(sarb, 0);
38144 }
38145 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
38146 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38147
38148 usbatm_pop(vcc, skb);
38149 - atomic_inc(&vcc->stats->tx);
38150 + atomic_inc_unchecked(&vcc->stats->tx);
38151
38152 skb = skb_dequeue(&instance->sndqueue);
38153 }
38154 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
38155 if (!left--)
38156 return sprintf(page,
38157 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38158 - atomic_read(&atm_dev->stats.aal5.tx),
38159 - atomic_read(&atm_dev->stats.aal5.tx_err),
38160 - atomic_read(&atm_dev->stats.aal5.rx),
38161 - atomic_read(&atm_dev->stats.aal5.rx_err),
38162 - atomic_read(&atm_dev->stats.aal5.rx_drop));
38163 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38164 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38165 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38166 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38167 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38168
38169 if (!left--) {
38170 if (instance->disconnected)
38171 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
38172 index 0149c09..f108812 100644
38173 --- a/drivers/usb/core/devices.c
38174 +++ b/drivers/usb/core/devices.c
38175 @@ -126,7 +126,7 @@ static const char format_endpt[] =
38176 * time it gets called.
38177 */
38178 static struct device_connect_event {
38179 - atomic_t count;
38180 + atomic_unchecked_t count;
38181 wait_queue_head_t wait;
38182 } device_event = {
38183 .count = ATOMIC_INIT(1),
38184 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
38185
38186 void usbfs_conn_disc_event(void)
38187 {
38188 - atomic_add(2, &device_event.count);
38189 + atomic_add_unchecked(2, &device_event.count);
38190 wake_up(&device_event.wait);
38191 }
38192
38193 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
38194
38195 poll_wait(file, &device_event.wait, wait);
38196
38197 - event_count = atomic_read(&device_event.count);
38198 + event_count = atomic_read_unchecked(&device_event.count);
38199 if (file->f_version != event_count) {
38200 file->f_version = event_count;
38201 return POLLIN | POLLRDNORM;
38202 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
38203 index 0b5ec23..0da3d76 100644
38204 --- a/drivers/usb/core/message.c
38205 +++ b/drivers/usb/core/message.c
38206 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
38207 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
38208 if (buf) {
38209 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
38210 - if (len > 0) {
38211 - smallbuf = kmalloc(++len, GFP_NOIO);
38212 + if (len++ > 0) {
38213 + smallbuf = kmalloc(len, GFP_NOIO);
38214 if (!smallbuf)
38215 return buf;
38216 memcpy(smallbuf, buf, len);
38217 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
38218 index 1fc8f12..20647c1 100644
38219 --- a/drivers/usb/early/ehci-dbgp.c
38220 +++ b/drivers/usb/early/ehci-dbgp.c
38221 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
38222
38223 #ifdef CONFIG_KGDB
38224 static struct kgdb_io kgdbdbgp_io_ops;
38225 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
38226 +static struct kgdb_io kgdbdbgp_io_ops_console;
38227 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
38228 #else
38229 #define dbgp_kgdb_mode (0)
38230 #endif
38231 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
38232 .write_char = kgdbdbgp_write_char,
38233 };
38234
38235 +static struct kgdb_io kgdbdbgp_io_ops_console = {
38236 + .name = "kgdbdbgp",
38237 + .read_char = kgdbdbgp_read_char,
38238 + .write_char = kgdbdbgp_write_char,
38239 + .is_console = 1
38240 +};
38241 +
38242 static int kgdbdbgp_wait_time;
38243
38244 static int __init kgdbdbgp_parse_config(char *str)
38245 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
38246 ptr++;
38247 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
38248 }
38249 - kgdb_register_io_module(&kgdbdbgp_io_ops);
38250 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
38251 + if (early_dbgp_console.index != -1)
38252 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
38253 + else
38254 + kgdb_register_io_module(&kgdbdbgp_io_ops);
38255
38256 return 0;
38257 }
38258 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
38259 index d718033..6075579 100644
38260 --- a/drivers/usb/host/xhci-mem.c
38261 +++ b/drivers/usb/host/xhci-mem.c
38262 @@ -1685,6 +1685,8 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
38263 unsigned int num_tests;
38264 int i, ret;
38265
38266 + pax_track_stack();
38267 +
38268 num_tests = ARRAY_SIZE(simple_test_vector);
38269 for (i = 0; i < num_tests; i++) {
38270 ret = xhci_test_trb_in_td(xhci,
38271 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
38272 index d6bea3e..60b250e 100644
38273 --- a/drivers/usb/wusbcore/wa-hc.h
38274 +++ b/drivers/usb/wusbcore/wa-hc.h
38275 @@ -192,7 +192,7 @@ struct wahc {
38276 struct list_head xfer_delayed_list;
38277 spinlock_t xfer_list_lock;
38278 struct work_struct xfer_work;
38279 - atomic_t xfer_id_count;
38280 + atomic_unchecked_t xfer_id_count;
38281 };
38282
38283
38284 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
38285 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38286 spin_lock_init(&wa->xfer_list_lock);
38287 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38288 - atomic_set(&wa->xfer_id_count, 1);
38289 + atomic_set_unchecked(&wa->xfer_id_count, 1);
38290 }
38291
38292 /**
38293 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
38294 index 4193345..49ae93d 100644
38295 --- a/drivers/usb/wusbcore/wa-xfer.c
38296 +++ b/drivers/usb/wusbcore/wa-xfer.c
38297 @@ -295,7 +295,7 @@ out:
38298 */
38299 static void wa_xfer_id_init(struct wa_xfer *xfer)
38300 {
38301 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38302 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38303 }
38304
38305 /*
38306 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
38307 index c14c42b..f955cc2 100644
38308 --- a/drivers/vhost/vhost.c
38309 +++ b/drivers/vhost/vhost.c
38310 @@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
38311 return 0;
38312 }
38313
38314 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
38315 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
38316 {
38317 struct file *eventfp, *filep = NULL,
38318 *pollstart = NULL, *pollstop = NULL;
38319 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
38320 index b0b2ac3..89a4399 100644
38321 --- a/drivers/video/aty/aty128fb.c
38322 +++ b/drivers/video/aty/aty128fb.c
38323 @@ -148,7 +148,7 @@ enum {
38324 };
38325
38326 /* Must match above enum */
38327 -static const char *r128_family[] __devinitdata = {
38328 +static const char *r128_family[] __devinitconst = {
38329 "AGP",
38330 "PCI",
38331 "PRO AGP",
38332 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
38333 index 5c3960d..15cf8fc 100644
38334 --- a/drivers/video/fbcmap.c
38335 +++ b/drivers/video/fbcmap.c
38336 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
38337 rc = -ENODEV;
38338 goto out;
38339 }
38340 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38341 - !info->fbops->fb_setcmap)) {
38342 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38343 rc = -EINVAL;
38344 goto out1;
38345 }
38346 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
38347 index ad93629..ca6a218 100644
38348 --- a/drivers/video/fbmem.c
38349 +++ b/drivers/video/fbmem.c
38350 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38351 image->dx += image->width + 8;
38352 }
38353 } else if (rotate == FB_ROTATE_UD) {
38354 - for (x = 0; x < num && image->dx >= 0; x++) {
38355 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38356 info->fbops->fb_imageblit(info, image);
38357 image->dx -= image->width + 8;
38358 }
38359 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38360 image->dy += image->height + 8;
38361 }
38362 } else if (rotate == FB_ROTATE_CCW) {
38363 - for (x = 0; x < num && image->dy >= 0; x++) {
38364 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38365 info->fbops->fb_imageblit(info, image);
38366 image->dy -= image->height + 8;
38367 }
38368 @@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
38369 int flags = info->flags;
38370 int ret = 0;
38371
38372 + pax_track_stack();
38373 +
38374 if (var->activate & FB_ACTIVATE_INV_MODE) {
38375 struct fb_videomode mode1, mode2;
38376
38377 @@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
38378 void __user *argp = (void __user *)arg;
38379 long ret = 0;
38380
38381 + pax_track_stack();
38382 +
38383 switch (cmd) {
38384 case FBIOGET_VSCREENINFO:
38385 if (!lock_fb_info(info))
38386 @@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
38387 return -EFAULT;
38388 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38389 return -EINVAL;
38390 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38391 + if (con2fb.framebuffer >= FB_MAX)
38392 return -EINVAL;
38393 if (!registered_fb[con2fb.framebuffer])
38394 request_module("fb%d", con2fb.framebuffer);
38395 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
38396 index 5a5d092..265c5ed 100644
38397 --- a/drivers/video/geode/gx1fb_core.c
38398 +++ b/drivers/video/geode/gx1fb_core.c
38399 @@ -29,7 +29,7 @@ static int crt_option = 1;
38400 static char panel_option[32] = "";
38401
38402 /* Modes relevant to the GX1 (taken from modedb.c) */
38403 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
38404 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
38405 /* 640x480-60 VESA */
38406 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
38407 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
38408 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
38409 index 896e53d..4d87d0b 100644
38410 --- a/drivers/video/gxt4500.c
38411 +++ b/drivers/video/gxt4500.c
38412 @@ -156,7 +156,7 @@ struct gxt4500_par {
38413 static char *mode_option;
38414
38415 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
38416 -static const struct fb_videomode defaultmode __devinitdata = {
38417 +static const struct fb_videomode defaultmode __devinitconst = {
38418 .refresh = 60,
38419 .xres = 1280,
38420 .yres = 1024,
38421 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
38422 return 0;
38423 }
38424
38425 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
38426 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
38427 .id = "IBM GXT4500P",
38428 .type = FB_TYPE_PACKED_PIXELS,
38429 .visual = FB_VISUAL_PSEUDOCOLOR,
38430 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
38431 index 7672d2e..b56437f 100644
38432 --- a/drivers/video/i810/i810_accel.c
38433 +++ b/drivers/video/i810/i810_accel.c
38434 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
38435 }
38436 }
38437 printk("ringbuffer lockup!!!\n");
38438 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38439 i810_report_error(mmio);
38440 par->dev_flags |= LOCKUP;
38441 info->pixmap.scan_align = 1;
38442 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
38443 index 318f6fb..9a389c1 100644
38444 --- a/drivers/video/i810/i810_main.c
38445 +++ b/drivers/video/i810/i810_main.c
38446 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
38447 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
38448
38449 /* PCI */
38450 -static const char *i810_pci_list[] __devinitdata = {
38451 +static const char *i810_pci_list[] __devinitconst = {
38452 "Intel(R) 810 Framebuffer Device" ,
38453 "Intel(R) 810-DC100 Framebuffer Device" ,
38454 "Intel(R) 810E Framebuffer Device" ,
38455 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
38456 index de36693..3c63fc2 100644
38457 --- a/drivers/video/jz4740_fb.c
38458 +++ b/drivers/video/jz4740_fb.c
38459 @@ -136,7 +136,7 @@ struct jzfb {
38460 uint32_t pseudo_palette[16];
38461 };
38462
38463 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
38464 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
38465 .id = "JZ4740 FB",
38466 .type = FB_TYPE_PACKED_PIXELS,
38467 .visual = FB_VISUAL_TRUECOLOR,
38468 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
38469 index 3c14e43..eafa544 100644
38470 --- a/drivers/video/logo/logo_linux_clut224.ppm
38471 +++ b/drivers/video/logo/logo_linux_clut224.ppm
38472 @@ -1,1604 +1,1123 @@
38473 P3
38474 -# Standard 224-color Linux logo
38475 80 80
38476 255
38477 - 0 0 0 0 0 0 0 0 0 0 0 0
38478 - 0 0 0 0 0 0 0 0 0 0 0 0
38479 - 0 0 0 0 0 0 0 0 0 0 0 0
38480 - 0 0 0 0 0 0 0 0 0 0 0 0
38481 - 0 0 0 0 0 0 0 0 0 0 0 0
38482 - 0 0 0 0 0 0 0 0 0 0 0 0
38483 - 0 0 0 0 0 0 0 0 0 0 0 0
38484 - 0 0 0 0 0 0 0 0 0 0 0 0
38485 - 0 0 0 0 0 0 0 0 0 0 0 0
38486 - 6 6 6 6 6 6 10 10 10 10 10 10
38487 - 10 10 10 6 6 6 6 6 6 6 6 6
38488 - 0 0 0 0 0 0 0 0 0 0 0 0
38489 - 0 0 0 0 0 0 0 0 0 0 0 0
38490 - 0 0 0 0 0 0 0 0 0 0 0 0
38491 - 0 0 0 0 0 0 0 0 0 0 0 0
38492 - 0 0 0 0 0 0 0 0 0 0 0 0
38493 - 0 0 0 0 0 0 0 0 0 0 0 0
38494 - 0 0 0 0 0 0 0 0 0 0 0 0
38495 - 0 0 0 0 0 0 0 0 0 0 0 0
38496 - 0 0 0 0 0 0 0 0 0 0 0 0
38497 - 0 0 0 0 0 0 0 0 0 0 0 0
38498 - 0 0 0 0 0 0 0 0 0 0 0 0
38499 - 0 0 0 0 0 0 0 0 0 0 0 0
38500 - 0 0 0 0 0 0 0 0 0 0 0 0
38501 - 0 0 0 0 0 0 0 0 0 0 0 0
38502 - 0 0 0 0 0 0 0 0 0 0 0 0
38503 - 0 0 0 0 0 0 0 0 0 0 0 0
38504 - 0 0 0 0 0 0 0 0 0 0 0 0
38505 - 0 0 0 6 6 6 10 10 10 14 14 14
38506 - 22 22 22 26 26 26 30 30 30 34 34 34
38507 - 30 30 30 30 30 30 26 26 26 18 18 18
38508 - 14 14 14 10 10 10 6 6 6 0 0 0
38509 - 0 0 0 0 0 0 0 0 0 0 0 0
38510 - 0 0 0 0 0 0 0 0 0 0 0 0
38511 - 0 0 0 0 0 0 0 0 0 0 0 0
38512 - 0 0 0 0 0 0 0 0 0 0 0 0
38513 - 0 0 0 0 0 0 0 0 0 0 0 0
38514 - 0 0 0 0 0 0 0 0 0 0 0 0
38515 - 0 0 0 0 0 0 0 0 0 0 0 0
38516 - 0 0 0 0 0 0 0 0 0 0 0 0
38517 - 0 0 0 0 0 0 0 0 0 0 0 0
38518 - 0 0 0 0 0 1 0 0 1 0 0 0
38519 - 0 0 0 0 0 0 0 0 0 0 0 0
38520 - 0 0 0 0 0 0 0 0 0 0 0 0
38521 - 0 0 0 0 0 0 0 0 0 0 0 0
38522 - 0 0 0 0 0 0 0 0 0 0 0 0
38523 - 0 0 0 0 0 0 0 0 0 0 0 0
38524 - 0 0 0 0 0 0 0 0 0 0 0 0
38525 - 6 6 6 14 14 14 26 26 26 42 42 42
38526 - 54 54 54 66 66 66 78 78 78 78 78 78
38527 - 78 78 78 74 74 74 66 66 66 54 54 54
38528 - 42 42 42 26 26 26 18 18 18 10 10 10
38529 - 6 6 6 0 0 0 0 0 0 0 0 0
38530 - 0 0 0 0 0 0 0 0 0 0 0 0
38531 - 0 0 0 0 0 0 0 0 0 0 0 0
38532 - 0 0 0 0 0 0 0 0 0 0 0 0
38533 - 0 0 0 0 0 0 0 0 0 0 0 0
38534 - 0 0 0 0 0 0 0 0 0 0 0 0
38535 - 0 0 0 0 0 0 0 0 0 0 0 0
38536 - 0 0 0 0 0 0 0 0 0 0 0 0
38537 - 0 0 0 0 0 0 0 0 0 0 0 0
38538 - 0 0 1 0 0 0 0 0 0 0 0 0
38539 - 0 0 0 0 0 0 0 0 0 0 0 0
38540 - 0 0 0 0 0 0 0 0 0 0 0 0
38541 - 0 0 0 0 0 0 0 0 0 0 0 0
38542 - 0 0 0 0 0 0 0 0 0 0 0 0
38543 - 0 0 0 0 0 0 0 0 0 0 0 0
38544 - 0 0 0 0 0 0 0 0 0 10 10 10
38545 - 22 22 22 42 42 42 66 66 66 86 86 86
38546 - 66 66 66 38 38 38 38 38 38 22 22 22
38547 - 26 26 26 34 34 34 54 54 54 66 66 66
38548 - 86 86 86 70 70 70 46 46 46 26 26 26
38549 - 14 14 14 6 6 6 0 0 0 0 0 0
38550 - 0 0 0 0 0 0 0 0 0 0 0 0
38551 - 0 0 0 0 0 0 0 0 0 0 0 0
38552 - 0 0 0 0 0 0 0 0 0 0 0 0
38553 - 0 0 0 0 0 0 0 0 0 0 0 0
38554 - 0 0 0 0 0 0 0 0 0 0 0 0
38555 - 0 0 0 0 0 0 0 0 0 0 0 0
38556 - 0 0 0 0 0 0 0 0 0 0 0 0
38557 - 0 0 0 0 0 0 0 0 0 0 0 0
38558 - 0 0 1 0 0 1 0 0 1 0 0 0
38559 - 0 0 0 0 0 0 0 0 0 0 0 0
38560 - 0 0 0 0 0 0 0 0 0 0 0 0
38561 - 0 0 0 0 0 0 0 0 0 0 0 0
38562 - 0 0 0 0 0 0 0 0 0 0 0 0
38563 - 0 0 0 0 0 0 0 0 0 0 0 0
38564 - 0 0 0 0 0 0 10 10 10 26 26 26
38565 - 50 50 50 82 82 82 58 58 58 6 6 6
38566 - 2 2 6 2 2 6 2 2 6 2 2 6
38567 - 2 2 6 2 2 6 2 2 6 2 2 6
38568 - 6 6 6 54 54 54 86 86 86 66 66 66
38569 - 38 38 38 18 18 18 6 6 6 0 0 0
38570 - 0 0 0 0 0 0 0 0 0 0 0 0
38571 - 0 0 0 0 0 0 0 0 0 0 0 0
38572 - 0 0 0 0 0 0 0 0 0 0 0 0
38573 - 0 0 0 0 0 0 0 0 0 0 0 0
38574 - 0 0 0 0 0 0 0 0 0 0 0 0
38575 - 0 0 0 0 0 0 0 0 0 0 0 0
38576 - 0 0 0 0 0 0 0 0 0 0 0 0
38577 - 0 0 0 0 0 0 0 0 0 0 0 0
38578 - 0 0 0 0 0 0 0 0 0 0 0 0
38579 - 0 0 0 0 0 0 0 0 0 0 0 0
38580 - 0 0 0 0 0 0 0 0 0 0 0 0
38581 - 0 0 0 0 0 0 0 0 0 0 0 0
38582 - 0 0 0 0 0 0 0 0 0 0 0 0
38583 - 0 0 0 0 0 0 0 0 0 0 0 0
38584 - 0 0 0 6 6 6 22 22 22 50 50 50
38585 - 78 78 78 34 34 34 2 2 6 2 2 6
38586 - 2 2 6 2 2 6 2 2 6 2 2 6
38587 - 2 2 6 2 2 6 2 2 6 2 2 6
38588 - 2 2 6 2 2 6 6 6 6 70 70 70
38589 - 78 78 78 46 46 46 22 22 22 6 6 6
38590 - 0 0 0 0 0 0 0 0 0 0 0 0
38591 - 0 0 0 0 0 0 0 0 0 0 0 0
38592 - 0 0 0 0 0 0 0 0 0 0 0 0
38593 - 0 0 0 0 0 0 0 0 0 0 0 0
38594 - 0 0 0 0 0 0 0 0 0 0 0 0
38595 - 0 0 0 0 0 0 0 0 0 0 0 0
38596 - 0 0 0 0 0 0 0 0 0 0 0 0
38597 - 0 0 0 0 0 0 0 0 0 0 0 0
38598 - 0 0 1 0 0 1 0 0 1 0 0 0
38599 - 0 0 0 0 0 0 0 0 0 0 0 0
38600 - 0 0 0 0 0 0 0 0 0 0 0 0
38601 - 0 0 0 0 0 0 0 0 0 0 0 0
38602 - 0 0 0 0 0 0 0 0 0 0 0 0
38603 - 0 0 0 0 0 0 0 0 0 0 0 0
38604 - 6 6 6 18 18 18 42 42 42 82 82 82
38605 - 26 26 26 2 2 6 2 2 6 2 2 6
38606 - 2 2 6 2 2 6 2 2 6 2 2 6
38607 - 2 2 6 2 2 6 2 2 6 14 14 14
38608 - 46 46 46 34 34 34 6 6 6 2 2 6
38609 - 42 42 42 78 78 78 42 42 42 18 18 18
38610 - 6 6 6 0 0 0 0 0 0 0 0 0
38611 - 0 0 0 0 0 0 0 0 0 0 0 0
38612 - 0 0 0 0 0 0 0 0 0 0 0 0
38613 - 0 0 0 0 0 0 0 0 0 0 0 0
38614 - 0 0 0 0 0 0 0 0 0 0 0 0
38615 - 0 0 0 0 0 0 0 0 0 0 0 0
38616 - 0 0 0 0 0 0 0 0 0 0 0 0
38617 - 0 0 0 0 0 0 0 0 0 0 0 0
38618 - 0 0 1 0 0 0 0 0 1 0 0 0
38619 - 0 0 0 0 0 0 0 0 0 0 0 0
38620 - 0 0 0 0 0 0 0 0 0 0 0 0
38621 - 0 0 0 0 0 0 0 0 0 0 0 0
38622 - 0 0 0 0 0 0 0 0 0 0 0 0
38623 - 0 0 0 0 0 0 0 0 0 0 0 0
38624 - 10 10 10 30 30 30 66 66 66 58 58 58
38625 - 2 2 6 2 2 6 2 2 6 2 2 6
38626 - 2 2 6 2 2 6 2 2 6 2 2 6
38627 - 2 2 6 2 2 6 2 2 6 26 26 26
38628 - 86 86 86 101 101 101 46 46 46 10 10 10
38629 - 2 2 6 58 58 58 70 70 70 34 34 34
38630 - 10 10 10 0 0 0 0 0 0 0 0 0
38631 - 0 0 0 0 0 0 0 0 0 0 0 0
38632 - 0 0 0 0 0 0 0 0 0 0 0 0
38633 - 0 0 0 0 0 0 0 0 0 0 0 0
38634 - 0 0 0 0 0 0 0 0 0 0 0 0
38635 - 0 0 0 0 0 0 0 0 0 0 0 0
38636 - 0 0 0 0 0 0 0 0 0 0 0 0
38637 - 0 0 0 0 0 0 0 0 0 0 0 0
38638 - 0 0 1 0 0 1 0 0 1 0 0 0
38639 - 0 0 0 0 0 0 0 0 0 0 0 0
38640 - 0 0 0 0 0 0 0 0 0 0 0 0
38641 - 0 0 0 0 0 0 0 0 0 0 0 0
38642 - 0 0 0 0 0 0 0 0 0 0 0 0
38643 - 0 0 0 0 0 0 0 0 0 0 0 0
38644 - 14 14 14 42 42 42 86 86 86 10 10 10
38645 - 2 2 6 2 2 6 2 2 6 2 2 6
38646 - 2 2 6 2 2 6 2 2 6 2 2 6
38647 - 2 2 6 2 2 6 2 2 6 30 30 30
38648 - 94 94 94 94 94 94 58 58 58 26 26 26
38649 - 2 2 6 6 6 6 78 78 78 54 54 54
38650 - 22 22 22 6 6 6 0 0 0 0 0 0
38651 - 0 0 0 0 0 0 0 0 0 0 0 0
38652 - 0 0 0 0 0 0 0 0 0 0 0 0
38653 - 0 0 0 0 0 0 0 0 0 0 0 0
38654 - 0 0 0 0 0 0 0 0 0 0 0 0
38655 - 0 0 0 0 0 0 0 0 0 0 0 0
38656 - 0 0 0 0 0 0 0 0 0 0 0 0
38657 - 0 0 0 0 0 0 0 0 0 0 0 0
38658 - 0 0 0 0 0 0 0 0 0 0 0 0
38659 - 0 0 0 0 0 0 0 0 0 0 0 0
38660 - 0 0 0 0 0 0 0 0 0 0 0 0
38661 - 0 0 0 0 0 0 0 0 0 0 0 0
38662 - 0 0 0 0 0 0 0 0 0 0 0 0
38663 - 0 0 0 0 0 0 0 0 0 6 6 6
38664 - 22 22 22 62 62 62 62 62 62 2 2 6
38665 - 2 2 6 2 2 6 2 2 6 2 2 6
38666 - 2 2 6 2 2 6 2 2 6 2 2 6
38667 - 2 2 6 2 2 6 2 2 6 26 26 26
38668 - 54 54 54 38 38 38 18 18 18 10 10 10
38669 - 2 2 6 2 2 6 34 34 34 82 82 82
38670 - 38 38 38 14 14 14 0 0 0 0 0 0
38671 - 0 0 0 0 0 0 0 0 0 0 0 0
38672 - 0 0 0 0 0 0 0 0 0 0 0 0
38673 - 0 0 0 0 0 0 0 0 0 0 0 0
38674 - 0 0 0 0 0 0 0 0 0 0 0 0
38675 - 0 0 0 0 0 0 0 0 0 0 0 0
38676 - 0 0 0 0 0 0 0 0 0 0 0 0
38677 - 0 0 0 0 0 0 0 0 0 0 0 0
38678 - 0 0 0 0 0 1 0 0 1 0 0 0
38679 - 0 0 0 0 0 0 0 0 0 0 0 0
38680 - 0 0 0 0 0 0 0 0 0 0 0 0
38681 - 0 0 0 0 0 0 0 0 0 0 0 0
38682 - 0 0 0 0 0 0 0 0 0 0 0 0
38683 - 0 0 0 0 0 0 0 0 0 6 6 6
38684 - 30 30 30 78 78 78 30 30 30 2 2 6
38685 - 2 2 6 2 2 6 2 2 6 2 2 6
38686 - 2 2 6 2 2 6 2 2 6 2 2 6
38687 - 2 2 6 2 2 6 2 2 6 10 10 10
38688 - 10 10 10 2 2 6 2 2 6 2 2 6
38689 - 2 2 6 2 2 6 2 2 6 78 78 78
38690 - 50 50 50 18 18 18 6 6 6 0 0 0
38691 - 0 0 0 0 0 0 0 0 0 0 0 0
38692 - 0 0 0 0 0 0 0 0 0 0 0 0
38693 - 0 0 0 0 0 0 0 0 0 0 0 0
38694 - 0 0 0 0 0 0 0 0 0 0 0 0
38695 - 0 0 0 0 0 0 0 0 0 0 0 0
38696 - 0 0 0 0 0 0 0 0 0 0 0 0
38697 - 0 0 0 0 0 0 0 0 0 0 0 0
38698 - 0 0 1 0 0 0 0 0 0 0 0 0
38699 - 0 0 0 0 0 0 0 0 0 0 0 0
38700 - 0 0 0 0 0 0 0 0 0 0 0 0
38701 - 0 0 0 0 0 0 0 0 0 0 0 0
38702 - 0 0 0 0 0 0 0 0 0 0 0 0
38703 - 0 0 0 0 0 0 0 0 0 10 10 10
38704 - 38 38 38 86 86 86 14 14 14 2 2 6
38705 - 2 2 6 2 2 6 2 2 6 2 2 6
38706 - 2 2 6 2 2 6 2 2 6 2 2 6
38707 - 2 2 6 2 2 6 2 2 6 2 2 6
38708 - 2 2 6 2 2 6 2 2 6 2 2 6
38709 - 2 2 6 2 2 6 2 2 6 54 54 54
38710 - 66 66 66 26 26 26 6 6 6 0 0 0
38711 - 0 0 0 0 0 0 0 0 0 0 0 0
38712 - 0 0 0 0 0 0 0 0 0 0 0 0
38713 - 0 0 0 0 0 0 0 0 0 0 0 0
38714 - 0 0 0 0 0 0 0 0 0 0 0 0
38715 - 0 0 0 0 0 0 0 0 0 0 0 0
38716 - 0 0 0 0 0 0 0 0 0 0 0 0
38717 - 0 0 0 0 0 0 0 0 0 0 0 0
38718 - 0 0 0 0 0 1 0 0 1 0 0 0
38719 - 0 0 0 0 0 0 0 0 0 0 0 0
38720 - 0 0 0 0 0 0 0 0 0 0 0 0
38721 - 0 0 0 0 0 0 0 0 0 0 0 0
38722 - 0 0 0 0 0 0 0 0 0 0 0 0
38723 - 0 0 0 0 0 0 0 0 0 14 14 14
38724 - 42 42 42 82 82 82 2 2 6 2 2 6
38725 - 2 2 6 6 6 6 10 10 10 2 2 6
38726 - 2 2 6 2 2 6 2 2 6 2 2 6
38727 - 2 2 6 2 2 6 2 2 6 6 6 6
38728 - 14 14 14 10 10 10 2 2 6 2 2 6
38729 - 2 2 6 2 2 6 2 2 6 18 18 18
38730 - 82 82 82 34 34 34 10 10 10 0 0 0
38731 - 0 0 0 0 0 0 0 0 0 0 0 0
38732 - 0 0 0 0 0 0 0 0 0 0 0 0
38733 - 0 0 0 0 0 0 0 0 0 0 0 0
38734 - 0 0 0 0 0 0 0 0 0 0 0 0
38735 - 0 0 0 0 0 0 0 0 0 0 0 0
38736 - 0 0 0 0 0 0 0 0 0 0 0 0
38737 - 0 0 0 0 0 0 0 0 0 0 0 0
38738 - 0 0 1 0 0 0 0 0 0 0 0 0
38739 - 0 0 0 0 0 0 0 0 0 0 0 0
38740 - 0 0 0 0 0 0 0 0 0 0 0 0
38741 - 0 0 0 0 0 0 0 0 0 0 0 0
38742 - 0 0 0 0 0 0 0 0 0 0 0 0
38743 - 0 0 0 0 0 0 0 0 0 14 14 14
38744 - 46 46 46 86 86 86 2 2 6 2 2 6
38745 - 6 6 6 6 6 6 22 22 22 34 34 34
38746 - 6 6 6 2 2 6 2 2 6 2 2 6
38747 - 2 2 6 2 2 6 18 18 18 34 34 34
38748 - 10 10 10 50 50 50 22 22 22 2 2 6
38749 - 2 2 6 2 2 6 2 2 6 10 10 10
38750 - 86 86 86 42 42 42 14 14 14 0 0 0
38751 - 0 0 0 0 0 0 0 0 0 0 0 0
38752 - 0 0 0 0 0 0 0 0 0 0 0 0
38753 - 0 0 0 0 0 0 0 0 0 0 0 0
38754 - 0 0 0 0 0 0 0 0 0 0 0 0
38755 - 0 0 0 0 0 0 0 0 0 0 0 0
38756 - 0 0 0 0 0 0 0 0 0 0 0 0
38757 - 0 0 0 0 0 0 0 0 0 0 0 0
38758 - 0 0 1 0 0 1 0 0 1 0 0 0
38759 - 0 0 0 0 0 0 0 0 0 0 0 0
38760 - 0 0 0 0 0 0 0 0 0 0 0 0
38761 - 0 0 0 0 0 0 0 0 0 0 0 0
38762 - 0 0 0 0 0 0 0 0 0 0 0 0
38763 - 0 0 0 0 0 0 0 0 0 14 14 14
38764 - 46 46 46 86 86 86 2 2 6 2 2 6
38765 - 38 38 38 116 116 116 94 94 94 22 22 22
38766 - 22 22 22 2 2 6 2 2 6 2 2 6
38767 - 14 14 14 86 86 86 138 138 138 162 162 162
38768 -154 154 154 38 38 38 26 26 26 6 6 6
38769 - 2 2 6 2 2 6 2 2 6 2 2 6
38770 - 86 86 86 46 46 46 14 14 14 0 0 0
38771 - 0 0 0 0 0 0 0 0 0 0 0 0
38772 - 0 0 0 0 0 0 0 0 0 0 0 0
38773 - 0 0 0 0 0 0 0 0 0 0 0 0
38774 - 0 0 0 0 0 0 0 0 0 0 0 0
38775 - 0 0 0 0 0 0 0 0 0 0 0 0
38776 - 0 0 0 0 0 0 0 0 0 0 0 0
38777 - 0 0 0 0 0 0 0 0 0 0 0 0
38778 - 0 0 0 0 0 0 0 0 0 0 0 0
38779 - 0 0 0 0 0 0 0 0 0 0 0 0
38780 - 0 0 0 0 0 0 0 0 0 0 0 0
38781 - 0 0 0 0 0 0 0 0 0 0 0 0
38782 - 0 0 0 0 0 0 0 0 0 0 0 0
38783 - 0 0 0 0 0 0 0 0 0 14 14 14
38784 - 46 46 46 86 86 86 2 2 6 14 14 14
38785 -134 134 134 198 198 198 195 195 195 116 116 116
38786 - 10 10 10 2 2 6 2 2 6 6 6 6
38787 -101 98 89 187 187 187 210 210 210 218 218 218
38788 -214 214 214 134 134 134 14 14 14 6 6 6
38789 - 2 2 6 2 2 6 2 2 6 2 2 6
38790 - 86 86 86 50 50 50 18 18 18 6 6 6
38791 - 0 0 0 0 0 0 0 0 0 0 0 0
38792 - 0 0 0 0 0 0 0 0 0 0 0 0
38793 - 0 0 0 0 0 0 0 0 0 0 0 0
38794 - 0 0 0 0 0 0 0 0 0 0 0 0
38795 - 0 0 0 0 0 0 0 0 0 0 0 0
38796 - 0 0 0 0 0 0 0 0 0 0 0 0
38797 - 0 0 0 0 0 0 0 0 1 0 0 0
38798 - 0 0 1 0 0 1 0 0 1 0 0 0
38799 - 0 0 0 0 0 0 0 0 0 0 0 0
38800 - 0 0 0 0 0 0 0 0 0 0 0 0
38801 - 0 0 0 0 0 0 0 0 0 0 0 0
38802 - 0 0 0 0 0 0 0 0 0 0 0 0
38803 - 0 0 0 0 0 0 0 0 0 14 14 14
38804 - 46 46 46 86 86 86 2 2 6 54 54 54
38805 -218 218 218 195 195 195 226 226 226 246 246 246
38806 - 58 58 58 2 2 6 2 2 6 30 30 30
38807 -210 210 210 253 253 253 174 174 174 123 123 123
38808 -221 221 221 234 234 234 74 74 74 2 2 6
38809 - 2 2 6 2 2 6 2 2 6 2 2 6
38810 - 70 70 70 58 58 58 22 22 22 6 6 6
38811 - 0 0 0 0 0 0 0 0 0 0 0 0
38812 - 0 0 0 0 0 0 0 0 0 0 0 0
38813 - 0 0 0 0 0 0 0 0 0 0 0 0
38814 - 0 0 0 0 0 0 0 0 0 0 0 0
38815 - 0 0 0 0 0 0 0 0 0 0 0 0
38816 - 0 0 0 0 0 0 0 0 0 0 0 0
38817 - 0 0 0 0 0 0 0 0 0 0 0 0
38818 - 0 0 0 0 0 0 0 0 0 0 0 0
38819 - 0 0 0 0 0 0 0 0 0 0 0 0
38820 - 0 0 0 0 0 0 0 0 0 0 0 0
38821 - 0 0 0 0 0 0 0 0 0 0 0 0
38822 - 0 0 0 0 0 0 0 0 0 0 0 0
38823 - 0 0 0 0 0 0 0 0 0 14 14 14
38824 - 46 46 46 82 82 82 2 2 6 106 106 106
38825 -170 170 170 26 26 26 86 86 86 226 226 226
38826 -123 123 123 10 10 10 14 14 14 46 46 46
38827 -231 231 231 190 190 190 6 6 6 70 70 70
38828 - 90 90 90 238 238 238 158 158 158 2 2 6
38829 - 2 2 6 2 2 6 2 2 6 2 2 6
38830 - 70 70 70 58 58 58 22 22 22 6 6 6
38831 - 0 0 0 0 0 0 0 0 0 0 0 0
38832 - 0 0 0 0 0 0 0 0 0 0 0 0
38833 - 0 0 0 0 0 0 0 0 0 0 0 0
38834 - 0 0 0 0 0 0 0 0 0 0 0 0
38835 - 0 0 0 0 0 0 0 0 0 0 0 0
38836 - 0 0 0 0 0 0 0 0 0 0 0 0
38837 - 0 0 0 0 0 0 0 0 1 0 0 0
38838 - 0 0 1 0 0 1 0 0 1 0 0 0
38839 - 0 0 0 0 0 0 0 0 0 0 0 0
38840 - 0 0 0 0 0 0 0 0 0 0 0 0
38841 - 0 0 0 0 0 0 0 0 0 0 0 0
38842 - 0 0 0 0 0 0 0 0 0 0 0 0
38843 - 0 0 0 0 0 0 0 0 0 14 14 14
38844 - 42 42 42 86 86 86 6 6 6 116 116 116
38845 -106 106 106 6 6 6 70 70 70 149 149 149
38846 -128 128 128 18 18 18 38 38 38 54 54 54
38847 -221 221 221 106 106 106 2 2 6 14 14 14
38848 - 46 46 46 190 190 190 198 198 198 2 2 6
38849 - 2 2 6 2 2 6 2 2 6 2 2 6
38850 - 74 74 74 62 62 62 22 22 22 6 6 6
38851 - 0 0 0 0 0 0 0 0 0 0 0 0
38852 - 0 0 0 0 0 0 0 0 0 0 0 0
38853 - 0 0 0 0 0 0 0 0 0 0 0 0
38854 - 0 0 0 0 0 0 0 0 0 0 0 0
38855 - 0 0 0 0 0 0 0 0 0 0 0 0
38856 - 0 0 0 0 0 0 0 0 0 0 0 0
38857 - 0 0 0 0 0 0 0 0 1 0 0 0
38858 - 0 0 1 0 0 0 0 0 1 0 0 0
38859 - 0 0 0 0 0 0 0 0 0 0 0 0
38860 - 0 0 0 0 0 0 0 0 0 0 0 0
38861 - 0 0 0 0 0 0 0 0 0 0 0 0
38862 - 0 0 0 0 0 0 0 0 0 0 0 0
38863 - 0 0 0 0 0 0 0 0 0 14 14 14
38864 - 42 42 42 94 94 94 14 14 14 101 101 101
38865 -128 128 128 2 2 6 18 18 18 116 116 116
38866 -118 98 46 121 92 8 121 92 8 98 78 10
38867 -162 162 162 106 106 106 2 2 6 2 2 6
38868 - 2 2 6 195 195 195 195 195 195 6 6 6
38869 - 2 2 6 2 2 6 2 2 6 2 2 6
38870 - 74 74 74 62 62 62 22 22 22 6 6 6
38871 - 0 0 0 0 0 0 0 0 0 0 0 0
38872 - 0 0 0 0 0 0 0 0 0 0 0 0
38873 - 0 0 0 0 0 0 0 0 0 0 0 0
38874 - 0 0 0 0 0 0 0 0 0 0 0 0
38875 - 0 0 0 0 0 0 0 0 0 0 0 0
38876 - 0 0 0 0 0 0 0 0 0 0 0 0
38877 - 0 0 0 0 0 0 0 0 1 0 0 1
38878 - 0 0 1 0 0 0 0 0 1 0 0 0
38879 - 0 0 0 0 0 0 0 0 0 0 0 0
38880 - 0 0 0 0 0 0 0 0 0 0 0 0
38881 - 0 0 0 0 0 0 0 0 0 0 0 0
38882 - 0 0 0 0 0 0 0 0 0 0 0 0
38883 - 0 0 0 0 0 0 0 0 0 10 10 10
38884 - 38 38 38 90 90 90 14 14 14 58 58 58
38885 -210 210 210 26 26 26 54 38 6 154 114 10
38886 -226 170 11 236 186 11 225 175 15 184 144 12
38887 -215 174 15 175 146 61 37 26 9 2 2 6
38888 - 70 70 70 246 246 246 138 138 138 2 2 6
38889 - 2 2 6 2 2 6 2 2 6 2 2 6
38890 - 70 70 70 66 66 66 26 26 26 6 6 6
38891 - 0 0 0 0 0 0 0 0 0 0 0 0
38892 - 0 0 0 0 0 0 0 0 0 0 0 0
38893 - 0 0 0 0 0 0 0 0 0 0 0 0
38894 - 0 0 0 0 0 0 0 0 0 0 0 0
38895 - 0 0 0 0 0 0 0 0 0 0 0 0
38896 - 0 0 0 0 0 0 0 0 0 0 0 0
38897 - 0 0 0 0 0 0 0 0 0 0 0 0
38898 - 0 0 0 0 0 0 0 0 0 0 0 0
38899 - 0 0 0 0 0 0 0 0 0 0 0 0
38900 - 0 0 0 0 0 0 0 0 0 0 0 0
38901 - 0 0 0 0 0 0 0 0 0 0 0 0
38902 - 0 0 0 0 0 0 0 0 0 0 0 0
38903 - 0 0 0 0 0 0 0 0 0 10 10 10
38904 - 38 38 38 86 86 86 14 14 14 10 10 10
38905 -195 195 195 188 164 115 192 133 9 225 175 15
38906 -239 182 13 234 190 10 232 195 16 232 200 30
38907 -245 207 45 241 208 19 232 195 16 184 144 12
38908 -218 194 134 211 206 186 42 42 42 2 2 6
38909 - 2 2 6 2 2 6 2 2 6 2 2 6
38910 - 50 50 50 74 74 74 30 30 30 6 6 6
38911 - 0 0 0 0 0 0 0 0 0 0 0 0
38912 - 0 0 0 0 0 0 0 0 0 0 0 0
38913 - 0 0 0 0 0 0 0 0 0 0 0 0
38914 - 0 0 0 0 0 0 0 0 0 0 0 0
38915 - 0 0 0 0 0 0 0 0 0 0 0 0
38916 - 0 0 0 0 0 0 0 0 0 0 0 0
38917 - 0 0 0 0 0 0 0 0 0 0 0 0
38918 - 0 0 0 0 0 0 0 0 0 0 0 0
38919 - 0 0 0 0 0 0 0 0 0 0 0 0
38920 - 0 0 0 0 0 0 0 0 0 0 0 0
38921 - 0 0 0 0 0 0 0 0 0 0 0 0
38922 - 0 0 0 0 0 0 0 0 0 0 0 0
38923 - 0 0 0 0 0 0 0 0 0 10 10 10
38924 - 34 34 34 86 86 86 14 14 14 2 2 6
38925 -121 87 25 192 133 9 219 162 10 239 182 13
38926 -236 186 11 232 195 16 241 208 19 244 214 54
38927 -246 218 60 246 218 38 246 215 20 241 208 19
38928 -241 208 19 226 184 13 121 87 25 2 2 6
38929 - 2 2 6 2 2 6 2 2 6 2 2 6
38930 - 50 50 50 82 82 82 34 34 34 10 10 10
38931 - 0 0 0 0 0 0 0 0 0 0 0 0
38932 - 0 0 0 0 0 0 0 0 0 0 0 0
38933 - 0 0 0 0 0 0 0 0 0 0 0 0
38934 - 0 0 0 0 0 0 0 0 0 0 0 0
38935 - 0 0 0 0 0 0 0 0 0 0 0 0
38936 - 0 0 0 0 0 0 0 0 0 0 0 0
38937 - 0 0 0 0 0 0 0 0 0 0 0 0
38938 - 0 0 0 0 0 0 0 0 0 0 0 0
38939 - 0 0 0 0 0 0 0 0 0 0 0 0
38940 - 0 0 0 0 0 0 0 0 0 0 0 0
38941 - 0 0 0 0 0 0 0 0 0 0 0 0
38942 - 0 0 0 0 0 0 0 0 0 0 0 0
38943 - 0 0 0 0 0 0 0 0 0 10 10 10
38944 - 34 34 34 82 82 82 30 30 30 61 42 6
38945 -180 123 7 206 145 10 230 174 11 239 182 13
38946 -234 190 10 238 202 15 241 208 19 246 218 74
38947 -246 218 38 246 215 20 246 215 20 246 215 20
38948 -226 184 13 215 174 15 184 144 12 6 6 6
38949 - 2 2 6 2 2 6 2 2 6 2 2 6
38950 - 26 26 26 94 94 94 42 42 42 14 14 14
38951 - 0 0 0 0 0 0 0 0 0 0 0 0
38952 - 0 0 0 0 0 0 0 0 0 0 0 0
38953 - 0 0 0 0 0 0 0 0 0 0 0 0
38954 - 0 0 0 0 0 0 0 0 0 0 0 0
38955 - 0 0 0 0 0 0 0 0 0 0 0 0
38956 - 0 0 0 0 0 0 0 0 0 0 0 0
38957 - 0 0 0 0 0 0 0 0 0 0 0 0
38958 - 0 0 0 0 0 0 0 0 0 0 0 0
38959 - 0 0 0 0 0 0 0 0 0 0 0 0
38960 - 0 0 0 0 0 0 0 0 0 0 0 0
38961 - 0 0 0 0 0 0 0 0 0 0 0 0
38962 - 0 0 0 0 0 0 0 0 0 0 0 0
38963 - 0 0 0 0 0 0 0 0 0 10 10 10
38964 - 30 30 30 78 78 78 50 50 50 104 69 6
38965 -192 133 9 216 158 10 236 178 12 236 186 11
38966 -232 195 16 241 208 19 244 214 54 245 215 43
38967 -246 215 20 246 215 20 241 208 19 198 155 10
38968 -200 144 11 216 158 10 156 118 10 2 2 6
38969 - 2 2 6 2 2 6 2 2 6 2 2 6
38970 - 6 6 6 90 90 90 54 54 54 18 18 18
38971 - 6 6 6 0 0 0 0 0 0 0 0 0
38972 - 0 0 0 0 0 0 0 0 0 0 0 0
38973 - 0 0 0 0 0 0 0 0 0 0 0 0
38974 - 0 0 0 0 0 0 0 0 0 0 0 0
38975 - 0 0 0 0 0 0 0 0 0 0 0 0
38976 - 0 0 0 0 0 0 0 0 0 0 0 0
38977 - 0 0 0 0 0 0 0 0 0 0 0 0
38978 - 0 0 0 0 0 0 0 0 0 0 0 0
38979 - 0 0 0 0 0 0 0 0 0 0 0 0
38980 - 0 0 0 0 0 0 0 0 0 0 0 0
38981 - 0 0 0 0 0 0 0 0 0 0 0 0
38982 - 0 0 0 0 0 0 0 0 0 0 0 0
38983 - 0 0 0 0 0 0 0 0 0 10 10 10
38984 - 30 30 30 78 78 78 46 46 46 22 22 22
38985 -137 92 6 210 162 10 239 182 13 238 190 10
38986 -238 202 15 241 208 19 246 215 20 246 215 20
38987 -241 208 19 203 166 17 185 133 11 210 150 10
38988 -216 158 10 210 150 10 102 78 10 2 2 6
38989 - 6 6 6 54 54 54 14 14 14 2 2 6
38990 - 2 2 6 62 62 62 74 74 74 30 30 30
38991 - 10 10 10 0 0 0 0 0 0 0 0 0
38992 - 0 0 0 0 0 0 0 0 0 0 0 0
38993 - 0 0 0 0 0 0 0 0 0 0 0 0
38994 - 0 0 0 0 0 0 0 0 0 0 0 0
38995 - 0 0 0 0 0 0 0 0 0 0 0 0
38996 - 0 0 0 0 0 0 0 0 0 0 0 0
38997 - 0 0 0 0 0 0 0 0 0 0 0 0
38998 - 0 0 0 0 0 0 0 0 0 0 0 0
38999 - 0 0 0 0 0 0 0 0 0 0 0 0
39000 - 0 0 0 0 0 0 0 0 0 0 0 0
39001 - 0 0 0 0 0 0 0 0 0 0 0 0
39002 - 0 0 0 0 0 0 0 0 0 0 0 0
39003 - 0 0 0 0 0 0 0 0 0 10 10 10
39004 - 34 34 34 78 78 78 50 50 50 6 6 6
39005 - 94 70 30 139 102 15 190 146 13 226 184 13
39006 -232 200 30 232 195 16 215 174 15 190 146 13
39007 -168 122 10 192 133 9 210 150 10 213 154 11
39008 -202 150 34 182 157 106 101 98 89 2 2 6
39009 - 2 2 6 78 78 78 116 116 116 58 58 58
39010 - 2 2 6 22 22 22 90 90 90 46 46 46
39011 - 18 18 18 6 6 6 0 0 0 0 0 0
39012 - 0 0 0 0 0 0 0 0 0 0 0 0
39013 - 0 0 0 0 0 0 0 0 0 0 0 0
39014 - 0 0 0 0 0 0 0 0 0 0 0 0
39015 - 0 0 0 0 0 0 0 0 0 0 0 0
39016 - 0 0 0 0 0 0 0 0 0 0 0 0
39017 - 0 0 0 0 0 0 0 0 0 0 0 0
39018 - 0 0 0 0 0 0 0 0 0 0 0 0
39019 - 0 0 0 0 0 0 0 0 0 0 0 0
39020 - 0 0 0 0 0 0 0 0 0 0 0 0
39021 - 0 0 0 0 0 0 0 0 0 0 0 0
39022 - 0 0 0 0 0 0 0 0 0 0 0 0
39023 - 0 0 0 0 0 0 0 0 0 10 10 10
39024 - 38 38 38 86 86 86 50 50 50 6 6 6
39025 -128 128 128 174 154 114 156 107 11 168 122 10
39026 -198 155 10 184 144 12 197 138 11 200 144 11
39027 -206 145 10 206 145 10 197 138 11 188 164 115
39028 -195 195 195 198 198 198 174 174 174 14 14 14
39029 - 2 2 6 22 22 22 116 116 116 116 116 116
39030 - 22 22 22 2 2 6 74 74 74 70 70 70
39031 - 30 30 30 10 10 10 0 0 0 0 0 0
39032 - 0 0 0 0 0 0 0 0 0 0 0 0
39033 - 0 0 0 0 0 0 0 0 0 0 0 0
39034 - 0 0 0 0 0 0 0 0 0 0 0 0
39035 - 0 0 0 0 0 0 0 0 0 0 0 0
39036 - 0 0 0 0 0 0 0 0 0 0 0 0
39037 - 0 0 0 0 0 0 0 0 0 0 0 0
39038 - 0 0 0 0 0 0 0 0 0 0 0 0
39039 - 0 0 0 0 0 0 0 0 0 0 0 0
39040 - 0 0 0 0 0 0 0 0 0 0 0 0
39041 - 0 0 0 0 0 0 0 0 0 0 0 0
39042 - 0 0 0 0 0 0 0 0 0 0 0 0
39043 - 0 0 0 0 0 0 6 6 6 18 18 18
39044 - 50 50 50 101 101 101 26 26 26 10 10 10
39045 -138 138 138 190 190 190 174 154 114 156 107 11
39046 -197 138 11 200 144 11 197 138 11 192 133 9
39047 -180 123 7 190 142 34 190 178 144 187 187 187
39048 -202 202 202 221 221 221 214 214 214 66 66 66
39049 - 2 2 6 2 2 6 50 50 50 62 62 62
39050 - 6 6 6 2 2 6 10 10 10 90 90 90
39051 - 50 50 50 18 18 18 6 6 6 0 0 0
39052 - 0 0 0 0 0 0 0 0 0 0 0 0
39053 - 0 0 0 0 0 0 0 0 0 0 0 0
39054 - 0 0 0 0 0 0 0 0 0 0 0 0
39055 - 0 0 0 0 0 0 0 0 0 0 0 0
39056 - 0 0 0 0 0 0 0 0 0 0 0 0
39057 - 0 0 0 0 0 0 0 0 0 0 0 0
39058 - 0 0 0 0 0 0 0 0 0 0 0 0
39059 - 0 0 0 0 0 0 0 0 0 0 0 0
39060 - 0 0 0 0 0 0 0 0 0 0 0 0
39061 - 0 0 0 0 0 0 0 0 0 0 0 0
39062 - 0 0 0 0 0 0 0 0 0 0 0 0
39063 - 0 0 0 0 0 0 10 10 10 34 34 34
39064 - 74 74 74 74 74 74 2 2 6 6 6 6
39065 -144 144 144 198 198 198 190 190 190 178 166 146
39066 -154 121 60 156 107 11 156 107 11 168 124 44
39067 -174 154 114 187 187 187 190 190 190 210 210 210
39068 -246 246 246 253 253 253 253 253 253 182 182 182
39069 - 6 6 6 2 2 6 2 2 6 2 2 6
39070 - 2 2 6 2 2 6 2 2 6 62 62 62
39071 - 74 74 74 34 34 34 14 14 14 0 0 0
39072 - 0 0 0 0 0 0 0 0 0 0 0 0
39073 - 0 0 0 0 0 0 0 0 0 0 0 0
39074 - 0 0 0 0 0 0 0 0 0 0 0 0
39075 - 0 0 0 0 0 0 0 0 0 0 0 0
39076 - 0 0 0 0 0 0 0 0 0 0 0 0
39077 - 0 0 0 0 0 0 0 0 0 0 0 0
39078 - 0 0 0 0 0 0 0 0 0 0 0 0
39079 - 0 0 0 0 0 0 0 0 0 0 0 0
39080 - 0 0 0 0 0 0 0 0 0 0 0 0
39081 - 0 0 0 0 0 0 0 0 0 0 0 0
39082 - 0 0 0 0 0 0 0 0 0 0 0 0
39083 - 0 0 0 10 10 10 22 22 22 54 54 54
39084 - 94 94 94 18 18 18 2 2 6 46 46 46
39085 -234 234 234 221 221 221 190 190 190 190 190 190
39086 -190 190 190 187 187 187 187 187 187 190 190 190
39087 -190 190 190 195 195 195 214 214 214 242 242 242
39088 -253 253 253 253 253 253 253 253 253 253 253 253
39089 - 82 82 82 2 2 6 2 2 6 2 2 6
39090 - 2 2 6 2 2 6 2 2 6 14 14 14
39091 - 86 86 86 54 54 54 22 22 22 6 6 6
39092 - 0 0 0 0 0 0 0 0 0 0 0 0
39093 - 0 0 0 0 0 0 0 0 0 0 0 0
39094 - 0 0 0 0 0 0 0 0 0 0 0 0
39095 - 0 0 0 0 0 0 0 0 0 0 0 0
39096 - 0 0 0 0 0 0 0 0 0 0 0 0
39097 - 0 0 0 0 0 0 0 0 0 0 0 0
39098 - 0 0 0 0 0 0 0 0 0 0 0 0
39099 - 0 0 0 0 0 0 0 0 0 0 0 0
39100 - 0 0 0 0 0 0 0 0 0 0 0 0
39101 - 0 0 0 0 0 0 0 0 0 0 0 0
39102 - 0 0 0 0 0 0 0 0 0 0 0 0
39103 - 6 6 6 18 18 18 46 46 46 90 90 90
39104 - 46 46 46 18 18 18 6 6 6 182 182 182
39105 -253 253 253 246 246 246 206 206 206 190 190 190
39106 -190 190 190 190 190 190 190 190 190 190 190 190
39107 -206 206 206 231 231 231 250 250 250 253 253 253
39108 -253 253 253 253 253 253 253 253 253 253 253 253
39109 -202 202 202 14 14 14 2 2 6 2 2 6
39110 - 2 2 6 2 2 6 2 2 6 2 2 6
39111 - 42 42 42 86 86 86 42 42 42 18 18 18
39112 - 6 6 6 0 0 0 0 0 0 0 0 0
39113 - 0 0 0 0 0 0 0 0 0 0 0 0
39114 - 0 0 0 0 0 0 0 0 0 0 0 0
39115 - 0 0 0 0 0 0 0 0 0 0 0 0
39116 - 0 0 0 0 0 0 0 0 0 0 0 0
39117 - 0 0 0 0 0 0 0 0 0 0 0 0
39118 - 0 0 0 0 0 0 0 0 0 0 0 0
39119 - 0 0 0 0 0 0 0 0 0 0 0 0
39120 - 0 0 0 0 0 0 0 0 0 0 0 0
39121 - 0 0 0 0 0 0 0 0 0 0 0 0
39122 - 0 0 0 0 0 0 0 0 0 6 6 6
39123 - 14 14 14 38 38 38 74 74 74 66 66 66
39124 - 2 2 6 6 6 6 90 90 90 250 250 250
39125 -253 253 253 253 253 253 238 238 238 198 198 198
39126 -190 190 190 190 190 190 195 195 195 221 221 221
39127 -246 246 246 253 253 253 253 253 253 253 253 253
39128 -253 253 253 253 253 253 253 253 253 253 253 253
39129 -253 253 253 82 82 82 2 2 6 2 2 6
39130 - 2 2 6 2 2 6 2 2 6 2 2 6
39131 - 2 2 6 78 78 78 70 70 70 34 34 34
39132 - 14 14 14 6 6 6 0 0 0 0 0 0
39133 - 0 0 0 0 0 0 0 0 0 0 0 0
39134 - 0 0 0 0 0 0 0 0 0 0 0 0
39135 - 0 0 0 0 0 0 0 0 0 0 0 0
39136 - 0 0 0 0 0 0 0 0 0 0 0 0
39137 - 0 0 0 0 0 0 0 0 0 0 0 0
39138 - 0 0 0 0 0 0 0 0 0 0 0 0
39139 - 0 0 0 0 0 0 0 0 0 0 0 0
39140 - 0 0 0 0 0 0 0 0 0 0 0 0
39141 - 0 0 0 0 0 0 0 0 0 0 0 0
39142 - 0 0 0 0 0 0 0 0 0 14 14 14
39143 - 34 34 34 66 66 66 78 78 78 6 6 6
39144 - 2 2 6 18 18 18 218 218 218 253 253 253
39145 -253 253 253 253 253 253 253 253 253 246 246 246
39146 -226 226 226 231 231 231 246 246 246 253 253 253
39147 -253 253 253 253 253 253 253 253 253 253 253 253
39148 -253 253 253 253 253 253 253 253 253 253 253 253
39149 -253 253 253 178 178 178 2 2 6 2 2 6
39150 - 2 2 6 2 2 6 2 2 6 2 2 6
39151 - 2 2 6 18 18 18 90 90 90 62 62 62
39152 - 30 30 30 10 10 10 0 0 0 0 0 0
39153 - 0 0 0 0 0 0 0 0 0 0 0 0
39154 - 0 0 0 0 0 0 0 0 0 0 0 0
39155 - 0 0 0 0 0 0 0 0 0 0 0 0
39156 - 0 0 0 0 0 0 0 0 0 0 0 0
39157 - 0 0 0 0 0 0 0 0 0 0 0 0
39158 - 0 0 0 0 0 0 0 0 0 0 0 0
39159 - 0 0 0 0 0 0 0 0 0 0 0 0
39160 - 0 0 0 0 0 0 0 0 0 0 0 0
39161 - 0 0 0 0 0 0 0 0 0 0 0 0
39162 - 0 0 0 0 0 0 10 10 10 26 26 26
39163 - 58 58 58 90 90 90 18 18 18 2 2 6
39164 - 2 2 6 110 110 110 253 253 253 253 253 253
39165 -253 253 253 253 253 253 253 253 253 253 253 253
39166 -250 250 250 253 253 253 253 253 253 253 253 253
39167 -253 253 253 253 253 253 253 253 253 253 253 253
39168 -253 253 253 253 253 253 253 253 253 253 253 253
39169 -253 253 253 231 231 231 18 18 18 2 2 6
39170 - 2 2 6 2 2 6 2 2 6 2 2 6
39171 - 2 2 6 2 2 6 18 18 18 94 94 94
39172 - 54 54 54 26 26 26 10 10 10 0 0 0
39173 - 0 0 0 0 0 0 0 0 0 0 0 0
39174 - 0 0 0 0 0 0 0 0 0 0 0 0
39175 - 0 0 0 0 0 0 0 0 0 0 0 0
39176 - 0 0 0 0 0 0 0 0 0 0 0 0
39177 - 0 0 0 0 0 0 0 0 0 0 0 0
39178 - 0 0 0 0 0 0 0 0 0 0 0 0
39179 - 0 0 0 0 0 0 0 0 0 0 0 0
39180 - 0 0 0 0 0 0 0 0 0 0 0 0
39181 - 0 0 0 0 0 0 0 0 0 0 0 0
39182 - 0 0 0 6 6 6 22 22 22 50 50 50
39183 - 90 90 90 26 26 26 2 2 6 2 2 6
39184 - 14 14 14 195 195 195 250 250 250 253 253 253
39185 -253 253 253 253 253 253 253 253 253 253 253 253
39186 -253 253 253 253 253 253 253 253 253 253 253 253
39187 -253 253 253 253 253 253 253 253 253 253 253 253
39188 -253 253 253 253 253 253 253 253 253 253 253 253
39189 -250 250 250 242 242 242 54 54 54 2 2 6
39190 - 2 2 6 2 2 6 2 2 6 2 2 6
39191 - 2 2 6 2 2 6 2 2 6 38 38 38
39192 - 86 86 86 50 50 50 22 22 22 6 6 6
39193 - 0 0 0 0 0 0 0 0 0 0 0 0
39194 - 0 0 0 0 0 0 0 0 0 0 0 0
39195 - 0 0 0 0 0 0 0 0 0 0 0 0
39196 - 0 0 0 0 0 0 0 0 0 0 0 0
39197 - 0 0 0 0 0 0 0 0 0 0 0 0
39198 - 0 0 0 0 0 0 0 0 0 0 0 0
39199 - 0 0 0 0 0 0 0 0 0 0 0 0
39200 - 0 0 0 0 0 0 0 0 0 0 0 0
39201 - 0 0 0 0 0 0 0 0 0 0 0 0
39202 - 6 6 6 14 14 14 38 38 38 82 82 82
39203 - 34 34 34 2 2 6 2 2 6 2 2 6
39204 - 42 42 42 195 195 195 246 246 246 253 253 253
39205 -253 253 253 253 253 253 253 253 253 250 250 250
39206 -242 242 242 242 242 242 250 250 250 253 253 253
39207 -253 253 253 253 253 253 253 253 253 253 253 253
39208 -253 253 253 250 250 250 246 246 246 238 238 238
39209 -226 226 226 231 231 231 101 101 101 6 6 6
39210 - 2 2 6 2 2 6 2 2 6 2 2 6
39211 - 2 2 6 2 2 6 2 2 6 2 2 6
39212 - 38 38 38 82 82 82 42 42 42 14 14 14
39213 - 6 6 6 0 0 0 0 0 0 0 0 0
39214 - 0 0 0 0 0 0 0 0 0 0 0 0
39215 - 0 0 0 0 0 0 0 0 0 0 0 0
39216 - 0 0 0 0 0 0 0 0 0 0 0 0
39217 - 0 0 0 0 0 0 0 0 0 0 0 0
39218 - 0 0 0 0 0 0 0 0 0 0 0 0
39219 - 0 0 0 0 0 0 0 0 0 0 0 0
39220 - 0 0 0 0 0 0 0 0 0 0 0 0
39221 - 0 0 0 0 0 0 0 0 0 0 0 0
39222 - 10 10 10 26 26 26 62 62 62 66 66 66
39223 - 2 2 6 2 2 6 2 2 6 6 6 6
39224 - 70 70 70 170 170 170 206 206 206 234 234 234
39225 -246 246 246 250 250 250 250 250 250 238 238 238
39226 -226 226 226 231 231 231 238 238 238 250 250 250
39227 -250 250 250 250 250 250 246 246 246 231 231 231
39228 -214 214 214 206 206 206 202 202 202 202 202 202
39229 -198 198 198 202 202 202 182 182 182 18 18 18
39230 - 2 2 6 2 2 6 2 2 6 2 2 6
39231 - 2 2 6 2 2 6 2 2 6 2 2 6
39232 - 2 2 6 62 62 62 66 66 66 30 30 30
39233 - 10 10 10 0 0 0 0 0 0 0 0 0
39234 - 0 0 0 0 0 0 0 0 0 0 0 0
39235 - 0 0 0 0 0 0 0 0 0 0 0 0
39236 - 0 0 0 0 0 0 0 0 0 0 0 0
39237 - 0 0 0 0 0 0 0 0 0 0 0 0
39238 - 0 0 0 0 0 0 0 0 0 0 0 0
39239 - 0 0 0 0 0 0 0 0 0 0 0 0
39240 - 0 0 0 0 0 0 0 0 0 0 0 0
39241 - 0 0 0 0 0 0 0 0 0 0 0 0
39242 - 14 14 14 42 42 42 82 82 82 18 18 18
39243 - 2 2 6 2 2 6 2 2 6 10 10 10
39244 - 94 94 94 182 182 182 218 218 218 242 242 242
39245 -250 250 250 253 253 253 253 253 253 250 250 250
39246 -234 234 234 253 253 253 253 253 253 253 253 253
39247 -253 253 253 253 253 253 253 253 253 246 246 246
39248 -238 238 238 226 226 226 210 210 210 202 202 202
39249 -195 195 195 195 195 195 210 210 210 158 158 158
39250 - 6 6 6 14 14 14 50 50 50 14 14 14
39251 - 2 2 6 2 2 6 2 2 6 2 2 6
39252 - 2 2 6 6 6 6 86 86 86 46 46 46
39253 - 18 18 18 6 6 6 0 0 0 0 0 0
39254 - 0 0 0 0 0 0 0 0 0 0 0 0
39255 - 0 0 0 0 0 0 0 0 0 0 0 0
39256 - 0 0 0 0 0 0 0 0 0 0 0 0
39257 - 0 0 0 0 0 0 0 0 0 0 0 0
39258 - 0 0 0 0 0 0 0 0 0 0 0 0
39259 - 0 0 0 0 0 0 0 0 0 0 0 0
39260 - 0 0 0 0 0 0 0 0 0 0 0 0
39261 - 0 0 0 0 0 0 0 0 0 6 6 6
39262 - 22 22 22 54 54 54 70 70 70 2 2 6
39263 - 2 2 6 10 10 10 2 2 6 22 22 22
39264 -166 166 166 231 231 231 250 250 250 253 253 253
39265 -253 253 253 253 253 253 253 253 253 250 250 250
39266 -242 242 242 253 253 253 253 253 253 253 253 253
39267 -253 253 253 253 253 253 253 253 253 253 253 253
39268 -253 253 253 253 253 253 253 253 253 246 246 246
39269 -231 231 231 206 206 206 198 198 198 226 226 226
39270 - 94 94 94 2 2 6 6 6 6 38 38 38
39271 - 30 30 30 2 2 6 2 2 6 2 2 6
39272 - 2 2 6 2 2 6 62 62 62 66 66 66
39273 - 26 26 26 10 10 10 0 0 0 0 0 0
39274 - 0 0 0 0 0 0 0 0 0 0 0 0
39275 - 0 0 0 0 0 0 0 0 0 0 0 0
39276 - 0 0 0 0 0 0 0 0 0 0 0 0
39277 - 0 0 0 0 0 0 0 0 0 0 0 0
39278 - 0 0 0 0 0 0 0 0 0 0 0 0
39279 - 0 0 0 0 0 0 0 0 0 0 0 0
39280 - 0 0 0 0 0 0 0 0 0 0 0 0
39281 - 0 0 0 0 0 0 0 0 0 10 10 10
39282 - 30 30 30 74 74 74 50 50 50 2 2 6
39283 - 26 26 26 26 26 26 2 2 6 106 106 106
39284 -238 238 238 253 253 253 253 253 253 253 253 253
39285 -253 253 253 253 253 253 253 253 253 253 253 253
39286 -253 253 253 253 253 253 253 253 253 253 253 253
39287 -253 253 253 253 253 253 253 253 253 253 253 253
39288 -253 253 253 253 253 253 253 253 253 253 253 253
39289 -253 253 253 246 246 246 218 218 218 202 202 202
39290 -210 210 210 14 14 14 2 2 6 2 2 6
39291 - 30 30 30 22 22 22 2 2 6 2 2 6
39292 - 2 2 6 2 2 6 18 18 18 86 86 86
39293 - 42 42 42 14 14 14 0 0 0 0 0 0
39294 - 0 0 0 0 0 0 0 0 0 0 0 0
39295 - 0 0 0 0 0 0 0 0 0 0 0 0
39296 - 0 0 0 0 0 0 0 0 0 0 0 0
39297 - 0 0 0 0 0 0 0 0 0 0 0 0
39298 - 0 0 0 0 0 0 0 0 0 0 0 0
39299 - 0 0 0 0 0 0 0 0 0 0 0 0
39300 - 0 0 0 0 0 0 0 0 0 0 0 0
39301 - 0 0 0 0 0 0 0 0 0 14 14 14
39302 - 42 42 42 90 90 90 22 22 22 2 2 6
39303 - 42 42 42 2 2 6 18 18 18 218 218 218
39304 -253 253 253 253 253 253 253 253 253 253 253 253
39305 -253 253 253 253 253 253 253 253 253 253 253 253
39306 -253 253 253 253 253 253 253 253 253 253 253 253
39307 -253 253 253 253 253 253 253 253 253 253 253 253
39308 -253 253 253 253 253 253 253 253 253 253 253 253
39309 -253 253 253 253 253 253 250 250 250 221 221 221
39310 -218 218 218 101 101 101 2 2 6 14 14 14
39311 - 18 18 18 38 38 38 10 10 10 2 2 6
39312 - 2 2 6 2 2 6 2 2 6 78 78 78
39313 - 58 58 58 22 22 22 6 6 6 0 0 0
39314 - 0 0 0 0 0 0 0 0 0 0 0 0
39315 - 0 0 0 0 0 0 0 0 0 0 0 0
39316 - 0 0 0 0 0 0 0 0 0 0 0 0
39317 - 0 0 0 0 0 0 0 0 0 0 0 0
39318 - 0 0 0 0 0 0 0 0 0 0 0 0
39319 - 0 0 0 0 0 0 0 0 0 0 0 0
39320 - 0 0 0 0 0 0 0 0 0 0 0 0
39321 - 0 0 0 0 0 0 6 6 6 18 18 18
39322 - 54 54 54 82 82 82 2 2 6 26 26 26
39323 - 22 22 22 2 2 6 123 123 123 253 253 253
39324 -253 253 253 253 253 253 253 253 253 253 253 253
39325 -253 253 253 253 253 253 253 253 253 253 253 253
39326 -253 253 253 253 253 253 253 253 253 253 253 253
39327 -253 253 253 253 253 253 253 253 253 253 253 253
39328 -253 253 253 253 253 253 253 253 253 253 253 253
39329 -253 253 253 253 253 253 253 253 253 250 250 250
39330 -238 238 238 198 198 198 6 6 6 38 38 38
39331 - 58 58 58 26 26 26 38 38 38 2 2 6
39332 - 2 2 6 2 2 6 2 2 6 46 46 46
39333 - 78 78 78 30 30 30 10 10 10 0 0 0
39334 - 0 0 0 0 0 0 0 0 0 0 0 0
39335 - 0 0 0 0 0 0 0 0 0 0 0 0
39336 - 0 0 0 0 0 0 0 0 0 0 0 0
39337 - 0 0 0 0 0 0 0 0 0 0 0 0
39338 - 0 0 0 0 0 0 0 0 0 0 0 0
39339 - 0 0 0 0 0 0 0 0 0 0 0 0
39340 - 0 0 0 0 0 0 0 0 0 0 0 0
39341 - 0 0 0 0 0 0 10 10 10 30 30 30
39342 - 74 74 74 58 58 58 2 2 6 42 42 42
39343 - 2 2 6 22 22 22 231 231 231 253 253 253
39344 -253 253 253 253 253 253 253 253 253 253 253 253
39345 -253 253 253 253 253 253 253 253 253 250 250 250
39346 -253 253 253 253 253 253 253 253 253 253 253 253
39347 -253 253 253 253 253 253 253 253 253 253 253 253
39348 -253 253 253 253 253 253 253 253 253 253 253 253
39349 -253 253 253 253 253 253 253 253 253 253 253 253
39350 -253 253 253 246 246 246 46 46 46 38 38 38
39351 - 42 42 42 14 14 14 38 38 38 14 14 14
39352 - 2 2 6 2 2 6 2 2 6 6 6 6
39353 - 86 86 86 46 46 46 14 14 14 0 0 0
39354 - 0 0 0 0 0 0 0 0 0 0 0 0
39355 - 0 0 0 0 0 0 0 0 0 0 0 0
39356 - 0 0 0 0 0 0 0 0 0 0 0 0
39357 - 0 0 0 0 0 0 0 0 0 0 0 0
39358 - 0 0 0 0 0 0 0 0 0 0 0 0
39359 - 0 0 0 0 0 0 0 0 0 0 0 0
39360 - 0 0 0 0 0 0 0 0 0 0 0 0
39361 - 0 0 0 6 6 6 14 14 14 42 42 42
39362 - 90 90 90 18 18 18 18 18 18 26 26 26
39363 - 2 2 6 116 116 116 253 253 253 253 253 253
39364 -253 253 253 253 253 253 253 253 253 253 253 253
39365 -253 253 253 253 253 253 250 250 250 238 238 238
39366 -253 253 253 253 253 253 253 253 253 253 253 253
39367 -253 253 253 253 253 253 253 253 253 253 253 253
39368 -253 253 253 253 253 253 253 253 253 253 253 253
39369 -253 253 253 253 253 253 253 253 253 253 253 253
39370 -253 253 253 253 253 253 94 94 94 6 6 6
39371 - 2 2 6 2 2 6 10 10 10 34 34 34
39372 - 2 2 6 2 2 6 2 2 6 2 2 6
39373 - 74 74 74 58 58 58 22 22 22 6 6 6
39374 - 0 0 0 0 0 0 0 0 0 0 0 0
39375 - 0 0 0 0 0 0 0 0 0 0 0 0
39376 - 0 0 0 0 0 0 0 0 0 0 0 0
39377 - 0 0 0 0 0 0 0 0 0 0 0 0
39378 - 0 0 0 0 0 0 0 0 0 0 0 0
39379 - 0 0 0 0 0 0 0 0 0 0 0 0
39380 - 0 0 0 0 0 0 0 0 0 0 0 0
39381 - 0 0 0 10 10 10 26 26 26 66 66 66
39382 - 82 82 82 2 2 6 38 38 38 6 6 6
39383 - 14 14 14 210 210 210 253 253 253 253 253 253
39384 -253 253 253 253 253 253 253 253 253 253 253 253
39385 -253 253 253 253 253 253 246 246 246 242 242 242
39386 -253 253 253 253 253 253 253 253 253 253 253 253
39387 -253 253 253 253 253 253 253 253 253 253 253 253
39388 -253 253 253 253 253 253 253 253 253 253 253 253
39389 -253 253 253 253 253 253 253 253 253 253 253 253
39390 -253 253 253 253 253 253 144 144 144 2 2 6
39391 - 2 2 6 2 2 6 2 2 6 46 46 46
39392 - 2 2 6 2 2 6 2 2 6 2 2 6
39393 - 42 42 42 74 74 74 30 30 30 10 10 10
39394 - 0 0 0 0 0 0 0 0 0 0 0 0
39395 - 0 0 0 0 0 0 0 0 0 0 0 0
39396 - 0 0 0 0 0 0 0 0 0 0 0 0
39397 - 0 0 0 0 0 0 0 0 0 0 0 0
39398 - 0 0 0 0 0 0 0 0 0 0 0 0
39399 - 0 0 0 0 0 0 0 0 0 0 0 0
39400 - 0 0 0 0 0 0 0 0 0 0 0 0
39401 - 6 6 6 14 14 14 42 42 42 90 90 90
39402 - 26 26 26 6 6 6 42 42 42 2 2 6
39403 - 74 74 74 250 250 250 253 253 253 253 253 253
39404 -253 253 253 253 253 253 253 253 253 253 253 253
39405 -253 253 253 253 253 253 242 242 242 242 242 242
39406 -253 253 253 253 253 253 253 253 253 253 253 253
39407 -253 253 253 253 253 253 253 253 253 253 253 253
39408 -253 253 253 253 253 253 253 253 253 253 253 253
39409 -253 253 253 253 253 253 253 253 253 253 253 253
39410 -253 253 253 253 253 253 182 182 182 2 2 6
39411 - 2 2 6 2 2 6 2 2 6 46 46 46
39412 - 2 2 6 2 2 6 2 2 6 2 2 6
39413 - 10 10 10 86 86 86 38 38 38 10 10 10
39414 - 0 0 0 0 0 0 0 0 0 0 0 0
39415 - 0 0 0 0 0 0 0 0 0 0 0 0
39416 - 0 0 0 0 0 0 0 0 0 0 0 0
39417 - 0 0 0 0 0 0 0 0 0 0 0 0
39418 - 0 0 0 0 0 0 0 0 0 0 0 0
39419 - 0 0 0 0 0 0 0 0 0 0 0 0
39420 - 0 0 0 0 0 0 0 0 0 0 0 0
39421 - 10 10 10 26 26 26 66 66 66 82 82 82
39422 - 2 2 6 22 22 22 18 18 18 2 2 6
39423 -149 149 149 253 253 253 253 253 253 253 253 253
39424 -253 253 253 253 253 253 253 253 253 253 253 253
39425 -253 253 253 253 253 253 234 234 234 242 242 242
39426 -253 253 253 253 253 253 253 253 253 253 253 253
39427 -253 253 253 253 253 253 253 253 253 253 253 253
39428 -253 253 253 253 253 253 253 253 253 253 253 253
39429 -253 253 253 253 253 253 253 253 253 253 253 253
39430 -253 253 253 253 253 253 206 206 206 2 2 6
39431 - 2 2 6 2 2 6 2 2 6 38 38 38
39432 - 2 2 6 2 2 6 2 2 6 2 2 6
39433 - 6 6 6 86 86 86 46 46 46 14 14 14
39434 - 0 0 0 0 0 0 0 0 0 0 0 0
39435 - 0 0 0 0 0 0 0 0 0 0 0 0
39436 - 0 0 0 0 0 0 0 0 0 0 0 0
39437 - 0 0 0 0 0 0 0 0 0 0 0 0
39438 - 0 0 0 0 0 0 0 0 0 0 0 0
39439 - 0 0 0 0 0 0 0 0 0 0 0 0
39440 - 0 0 0 0 0 0 0 0 0 6 6 6
39441 - 18 18 18 46 46 46 86 86 86 18 18 18
39442 - 2 2 6 34 34 34 10 10 10 6 6 6
39443 -210 210 210 253 253 253 253 253 253 253 253 253
39444 -253 253 253 253 253 253 253 253 253 253 253 253
39445 -253 253 253 253 253 253 234 234 234 242 242 242
39446 -253 253 253 253 253 253 253 253 253 253 253 253
39447 -253 253 253 253 253 253 253 253 253 253 253 253
39448 -253 253 253 253 253 253 253 253 253 253 253 253
39449 -253 253 253 253 253 253 253 253 253 253 253 253
39450 -253 253 253 253 253 253 221 221 221 6 6 6
39451 - 2 2 6 2 2 6 6 6 6 30 30 30
39452 - 2 2 6 2 2 6 2 2 6 2 2 6
39453 - 2 2 6 82 82 82 54 54 54 18 18 18
39454 - 6 6 6 0 0 0 0 0 0 0 0 0
39455 - 0 0 0 0 0 0 0 0 0 0 0 0
39456 - 0 0 0 0 0 0 0 0 0 0 0 0
39457 - 0 0 0 0 0 0 0 0 0 0 0 0
39458 - 0 0 0 0 0 0 0 0 0 0 0 0
39459 - 0 0 0 0 0 0 0 0 0 0 0 0
39460 - 0 0 0 0 0 0 0 0 0 10 10 10
39461 - 26 26 26 66 66 66 62 62 62 2 2 6
39462 - 2 2 6 38 38 38 10 10 10 26 26 26
39463 -238 238 238 253 253 253 253 253 253 253 253 253
39464 -253 253 253 253 253 253 253 253 253 253 253 253
39465 -253 253 253 253 253 253 231 231 231 238 238 238
39466 -253 253 253 253 253 253 253 253 253 253 253 253
39467 -253 253 253 253 253 253 253 253 253 253 253 253
39468 -253 253 253 253 253 253 253 253 253 253 253 253
39469 -253 253 253 253 253 253 253 253 253 253 253 253
39470 -253 253 253 253 253 253 231 231 231 6 6 6
39471 - 2 2 6 2 2 6 10 10 10 30 30 30
39472 - 2 2 6 2 2 6 2 2 6 2 2 6
39473 - 2 2 6 66 66 66 58 58 58 22 22 22
39474 - 6 6 6 0 0 0 0 0 0 0 0 0
39475 - 0 0 0 0 0 0 0 0 0 0 0 0
39476 - 0 0 0 0 0 0 0 0 0 0 0 0
39477 - 0 0 0 0 0 0 0 0 0 0 0 0
39478 - 0 0 0 0 0 0 0 0 0 0 0 0
39479 - 0 0 0 0 0 0 0 0 0 0 0 0
39480 - 0 0 0 0 0 0 0 0 0 10 10 10
39481 - 38 38 38 78 78 78 6 6 6 2 2 6
39482 - 2 2 6 46 46 46 14 14 14 42 42 42
39483 -246 246 246 253 253 253 253 253 253 253 253 253
39484 -253 253 253 253 253 253 253 253 253 253 253 253
39485 -253 253 253 253 253 253 231 231 231 242 242 242
39486 -253 253 253 253 253 253 253 253 253 253 253 253
39487 -253 253 253 253 253 253 253 253 253 253 253 253
39488 -253 253 253 253 253 253 253 253 253 253 253 253
39489 -253 253 253 253 253 253 253 253 253 253 253 253
39490 -253 253 253 253 253 253 234 234 234 10 10 10
39491 - 2 2 6 2 2 6 22 22 22 14 14 14
39492 - 2 2 6 2 2 6 2 2 6 2 2 6
39493 - 2 2 6 66 66 66 62 62 62 22 22 22
39494 - 6 6 6 0 0 0 0 0 0 0 0 0
39495 - 0 0 0 0 0 0 0 0 0 0 0 0
39496 - 0 0 0 0 0 0 0 0 0 0 0 0
39497 - 0 0 0 0 0 0 0 0 0 0 0 0
39498 - 0 0 0 0 0 0 0 0 0 0 0 0
39499 - 0 0 0 0 0 0 0 0 0 0 0 0
39500 - 0 0 0 0 0 0 6 6 6 18 18 18
39501 - 50 50 50 74 74 74 2 2 6 2 2 6
39502 - 14 14 14 70 70 70 34 34 34 62 62 62
39503 -250 250 250 253 253 253 253 253 253 253 253 253
39504 -253 253 253 253 253 253 253 253 253 253 253 253
39505 -253 253 253 253 253 253 231 231 231 246 246 246
39506 -253 253 253 253 253 253 253 253 253 253 253 253
39507 -253 253 253 253 253 253 253 253 253 253 253 253
39508 -253 253 253 253 253 253 253 253 253 253 253 253
39509 -253 253 253 253 253 253 253 253 253 253 253 253
39510 -253 253 253 253 253 253 234 234 234 14 14 14
39511 - 2 2 6 2 2 6 30 30 30 2 2 6
39512 - 2 2 6 2 2 6 2 2 6 2 2 6
39513 - 2 2 6 66 66 66 62 62 62 22 22 22
39514 - 6 6 6 0 0 0 0 0 0 0 0 0
39515 - 0 0 0 0 0 0 0 0 0 0 0 0
39516 - 0 0 0 0 0 0 0 0 0 0 0 0
39517 - 0 0 0 0 0 0 0 0 0 0 0 0
39518 - 0 0 0 0 0 0 0 0 0 0 0 0
39519 - 0 0 0 0 0 0 0 0 0 0 0 0
39520 - 0 0 0 0 0 0 6 6 6 18 18 18
39521 - 54 54 54 62 62 62 2 2 6 2 2 6
39522 - 2 2 6 30 30 30 46 46 46 70 70 70
39523 -250 250 250 253 253 253 253 253 253 253 253 253
39524 -253 253 253 253 253 253 253 253 253 253 253 253
39525 -253 253 253 253 253 253 231 231 231 246 246 246
39526 -253 253 253 253 253 253 253 253 253 253 253 253
39527 -253 253 253 253 253 253 253 253 253 253 253 253
39528 -253 253 253 253 253 253 253 253 253 253 253 253
39529 -253 253 253 253 253 253 253 253 253 253 253 253
39530 -253 253 253 253 253 253 226 226 226 10 10 10
39531 - 2 2 6 6 6 6 30 30 30 2 2 6
39532 - 2 2 6 2 2 6 2 2 6 2 2 6
39533 - 2 2 6 66 66 66 58 58 58 22 22 22
39534 - 6 6 6 0 0 0 0 0 0 0 0 0
39535 - 0 0 0 0 0 0 0 0 0 0 0 0
39536 - 0 0 0 0 0 0 0 0 0 0 0 0
39537 - 0 0 0 0 0 0 0 0 0 0 0 0
39538 - 0 0 0 0 0 0 0 0 0 0 0 0
39539 - 0 0 0 0 0 0 0 0 0 0 0 0
39540 - 0 0 0 0 0 0 6 6 6 22 22 22
39541 - 58 58 58 62 62 62 2 2 6 2 2 6
39542 - 2 2 6 2 2 6 30 30 30 78 78 78
39543 -250 250 250 253 253 253 253 253 253 253 253 253
39544 -253 253 253 253 253 253 253 253 253 253 253 253
39545 -253 253 253 253 253 253 231 231 231 246 246 246
39546 -253 253 253 253 253 253 253 253 253 253 253 253
39547 -253 253 253 253 253 253 253 253 253 253 253 253
39548 -253 253 253 253 253 253 253 253 253 253 253 253
39549 -253 253 253 253 253 253 253 253 253 253 253 253
39550 -253 253 253 253 253 253 206 206 206 2 2 6
39551 - 22 22 22 34 34 34 18 14 6 22 22 22
39552 - 26 26 26 18 18 18 6 6 6 2 2 6
39553 - 2 2 6 82 82 82 54 54 54 18 18 18
39554 - 6 6 6 0 0 0 0 0 0 0 0 0
39555 - 0 0 0 0 0 0 0 0 0 0 0 0
39556 - 0 0 0 0 0 0 0 0 0 0 0 0
39557 - 0 0 0 0 0 0 0 0 0 0 0 0
39558 - 0 0 0 0 0 0 0 0 0 0 0 0
39559 - 0 0 0 0 0 0 0 0 0 0 0 0
39560 - 0 0 0 0 0 0 6 6 6 26 26 26
39561 - 62 62 62 106 106 106 74 54 14 185 133 11
39562 -210 162 10 121 92 8 6 6 6 62 62 62
39563 -238 238 238 253 253 253 253 253 253 253 253 253
39564 -253 253 253 253 253 253 253 253 253 253 253 253
39565 -253 253 253 253 253 253 231 231 231 246 246 246
39566 -253 253 253 253 253 253 253 253 253 253 253 253
39567 -253 253 253 253 253 253 253 253 253 253 253 253
39568 -253 253 253 253 253 253 253 253 253 253 253 253
39569 -253 253 253 253 253 253 253 253 253 253 253 253
39570 -253 253 253 253 253 253 158 158 158 18 18 18
39571 - 14 14 14 2 2 6 2 2 6 2 2 6
39572 - 6 6 6 18 18 18 66 66 66 38 38 38
39573 - 6 6 6 94 94 94 50 50 50 18 18 18
39574 - 6 6 6 0 0 0 0 0 0 0 0 0
39575 - 0 0 0 0 0 0 0 0 0 0 0 0
39576 - 0 0 0 0 0 0 0 0 0 0 0 0
39577 - 0 0 0 0 0 0 0 0 0 0 0 0
39578 - 0 0 0 0 0 0 0 0 0 0 0 0
39579 - 0 0 0 0 0 0 0 0 0 6 6 6
39580 - 10 10 10 10 10 10 18 18 18 38 38 38
39581 - 78 78 78 142 134 106 216 158 10 242 186 14
39582 -246 190 14 246 190 14 156 118 10 10 10 10
39583 - 90 90 90 238 238 238 253 253 253 253 253 253
39584 -253 253 253 253 253 253 253 253 253 253 253 253
39585 -253 253 253 253 253 253 231 231 231 250 250 250
39586 -253 253 253 253 253 253 253 253 253 253 253 253
39587 -253 253 253 253 253 253 253 253 253 253 253 253
39588 -253 253 253 253 253 253 253 253 253 253 253 253
39589 -253 253 253 253 253 253 253 253 253 246 230 190
39590 -238 204 91 238 204 91 181 142 44 37 26 9
39591 - 2 2 6 2 2 6 2 2 6 2 2 6
39592 - 2 2 6 2 2 6 38 38 38 46 46 46
39593 - 26 26 26 106 106 106 54 54 54 18 18 18
39594 - 6 6 6 0 0 0 0 0 0 0 0 0
39595 - 0 0 0 0 0 0 0 0 0 0 0 0
39596 - 0 0 0 0 0 0 0 0 0 0 0 0
39597 - 0 0 0 0 0 0 0 0 0 0 0 0
39598 - 0 0 0 0 0 0 0 0 0 0 0 0
39599 - 0 0 0 6 6 6 14 14 14 22 22 22
39600 - 30 30 30 38 38 38 50 50 50 70 70 70
39601 -106 106 106 190 142 34 226 170 11 242 186 14
39602 -246 190 14 246 190 14 246 190 14 154 114 10
39603 - 6 6 6 74 74 74 226 226 226 253 253 253
39604 -253 253 253 253 253 253 253 253 253 253 253 253
39605 -253 253 253 253 253 253 231 231 231 250 250 250
39606 -253 253 253 253 253 253 253 253 253 253 253 253
39607 -253 253 253 253 253 253 253 253 253 253 253 253
39608 -253 253 253 253 253 253 253 253 253 253 253 253
39609 -253 253 253 253 253 253 253 253 253 228 184 62
39610 -241 196 14 241 208 19 232 195 16 38 30 10
39611 - 2 2 6 2 2 6 2 2 6 2 2 6
39612 - 2 2 6 6 6 6 30 30 30 26 26 26
39613 -203 166 17 154 142 90 66 66 66 26 26 26
39614 - 6 6 6 0 0 0 0 0 0 0 0 0
39615 - 0 0 0 0 0 0 0 0 0 0 0 0
39616 - 0 0 0 0 0 0 0 0 0 0 0 0
39617 - 0 0 0 0 0 0 0 0 0 0 0 0
39618 - 0 0 0 0 0 0 0 0 0 0 0 0
39619 - 6 6 6 18 18 18 38 38 38 58 58 58
39620 - 78 78 78 86 86 86 101 101 101 123 123 123
39621 -175 146 61 210 150 10 234 174 13 246 186 14
39622 -246 190 14 246 190 14 246 190 14 238 190 10
39623 -102 78 10 2 2 6 46 46 46 198 198 198
39624 -253 253 253 253 253 253 253 253 253 253 253 253
39625 -253 253 253 253 253 253 234 234 234 242 242 242
39626 -253 253 253 253 253 253 253 253 253 253 253 253
39627 -253 253 253 253 253 253 253 253 253 253 253 253
39628 -253 253 253 253 253 253 253 253 253 253 253 253
39629 -253 253 253 253 253 253 253 253 253 224 178 62
39630 -242 186 14 241 196 14 210 166 10 22 18 6
39631 - 2 2 6 2 2 6 2 2 6 2 2 6
39632 - 2 2 6 2 2 6 6 6 6 121 92 8
39633 -238 202 15 232 195 16 82 82 82 34 34 34
39634 - 10 10 10 0 0 0 0 0 0 0 0 0
39635 - 0 0 0 0 0 0 0 0 0 0 0 0
39636 - 0 0 0 0 0 0 0 0 0 0 0 0
39637 - 0 0 0 0 0 0 0 0 0 0 0 0
39638 - 0 0 0 0 0 0 0 0 0 0 0 0
39639 - 14 14 14 38 38 38 70 70 70 154 122 46
39640 -190 142 34 200 144 11 197 138 11 197 138 11
39641 -213 154 11 226 170 11 242 186 14 246 190 14
39642 -246 190 14 246 190 14 246 190 14 246 190 14
39643 -225 175 15 46 32 6 2 2 6 22 22 22
39644 -158 158 158 250 250 250 253 253 253 253 253 253
39645 -253 253 253 253 253 253 253 253 253 253 253 253
39646 -253 253 253 253 253 253 253 253 253 253 253 253
39647 -253 253 253 253 253 253 253 253 253 253 253 253
39648 -253 253 253 253 253 253 253 253 253 253 253 253
39649 -253 253 253 250 250 250 242 242 242 224 178 62
39650 -239 182 13 236 186 11 213 154 11 46 32 6
39651 - 2 2 6 2 2 6 2 2 6 2 2 6
39652 - 2 2 6 2 2 6 61 42 6 225 175 15
39653 -238 190 10 236 186 11 112 100 78 42 42 42
39654 - 14 14 14 0 0 0 0 0 0 0 0 0
39655 - 0 0 0 0 0 0 0 0 0 0 0 0
39656 - 0 0 0 0 0 0 0 0 0 0 0 0
39657 - 0 0 0 0 0 0 0 0 0 0 0 0
39658 - 0 0 0 0 0 0 0 0 0 6 6 6
39659 - 22 22 22 54 54 54 154 122 46 213 154 11
39660 -226 170 11 230 174 11 226 170 11 226 170 11
39661 -236 178 12 242 186 14 246 190 14 246 190 14
39662 -246 190 14 246 190 14 246 190 14 246 190 14
39663 -241 196 14 184 144 12 10 10 10 2 2 6
39664 - 6 6 6 116 116 116 242 242 242 253 253 253
39665 -253 253 253 253 253 253 253 253 253 253 253 253
39666 -253 253 253 253 253 253 253 253 253 253 253 253
39667 -253 253 253 253 253 253 253 253 253 253 253 253
39668 -253 253 253 253 253 253 253 253 253 253 253 253
39669 -253 253 253 231 231 231 198 198 198 214 170 54
39670 -236 178 12 236 178 12 210 150 10 137 92 6
39671 - 18 14 6 2 2 6 2 2 6 2 2 6
39672 - 6 6 6 70 47 6 200 144 11 236 178 12
39673 -239 182 13 239 182 13 124 112 88 58 58 58
39674 - 22 22 22 6 6 6 0 0 0 0 0 0
39675 - 0 0 0 0 0 0 0 0 0 0 0 0
39676 - 0 0 0 0 0 0 0 0 0 0 0 0
39677 - 0 0 0 0 0 0 0 0 0 0 0 0
39678 - 0 0 0 0 0 0 0 0 0 10 10 10
39679 - 30 30 30 70 70 70 180 133 36 226 170 11
39680 -239 182 13 242 186 14 242 186 14 246 186 14
39681 -246 190 14 246 190 14 246 190 14 246 190 14
39682 -246 190 14 246 190 14 246 190 14 246 190 14
39683 -246 190 14 232 195 16 98 70 6 2 2 6
39684 - 2 2 6 2 2 6 66 66 66 221 221 221
39685 -253 253 253 253 253 253 253 253 253 253 253 253
39686 -253 253 253 253 253 253 253 253 253 253 253 253
39687 -253 253 253 253 253 253 253 253 253 253 253 253
39688 -253 253 253 253 253 253 253 253 253 253 253 253
39689 -253 253 253 206 206 206 198 198 198 214 166 58
39690 -230 174 11 230 174 11 216 158 10 192 133 9
39691 -163 110 8 116 81 8 102 78 10 116 81 8
39692 -167 114 7 197 138 11 226 170 11 239 182 13
39693 -242 186 14 242 186 14 162 146 94 78 78 78
39694 - 34 34 34 14 14 14 6 6 6 0 0 0
39695 - 0 0 0 0 0 0 0 0 0 0 0 0
39696 - 0 0 0 0 0 0 0 0 0 0 0 0
39697 - 0 0 0 0 0 0 0 0 0 0 0 0
39698 - 0 0 0 0 0 0 0 0 0 6 6 6
39699 - 30 30 30 78 78 78 190 142 34 226 170 11
39700 -239 182 13 246 190 14 246 190 14 246 190 14
39701 -246 190 14 246 190 14 246 190 14 246 190 14
39702 -246 190 14 246 190 14 246 190 14 246 190 14
39703 -246 190 14 241 196 14 203 166 17 22 18 6
39704 - 2 2 6 2 2 6 2 2 6 38 38 38
39705 -218 218 218 253 253 253 253 253 253 253 253 253
39706 -253 253 253 253 253 253 253 253 253 253 253 253
39707 -253 253 253 253 253 253 253 253 253 253 253 253
39708 -253 253 253 253 253 253 253 253 253 253 253 253
39709 -250 250 250 206 206 206 198 198 198 202 162 69
39710 -226 170 11 236 178 12 224 166 10 210 150 10
39711 -200 144 11 197 138 11 192 133 9 197 138 11
39712 -210 150 10 226 170 11 242 186 14 246 190 14
39713 -246 190 14 246 186 14 225 175 15 124 112 88
39714 - 62 62 62 30 30 30 14 14 14 6 6 6
39715 - 0 0 0 0 0 0 0 0 0 0 0 0
39716 - 0 0 0 0 0 0 0 0 0 0 0 0
39717 - 0 0 0 0 0 0 0 0 0 0 0 0
39718 - 0 0 0 0 0 0 0 0 0 10 10 10
39719 - 30 30 30 78 78 78 174 135 50 224 166 10
39720 -239 182 13 246 190 14 246 190 14 246 190 14
39721 -246 190 14 246 190 14 246 190 14 246 190 14
39722 -246 190 14 246 190 14 246 190 14 246 190 14
39723 -246 190 14 246 190 14 241 196 14 139 102 15
39724 - 2 2 6 2 2 6 2 2 6 2 2 6
39725 - 78 78 78 250 250 250 253 253 253 253 253 253
39726 -253 253 253 253 253 253 253 253 253 253 253 253
39727 -253 253 253 253 253 253 253 253 253 253 253 253
39728 -253 253 253 253 253 253 253 253 253 253 253 253
39729 -250 250 250 214 214 214 198 198 198 190 150 46
39730 -219 162 10 236 178 12 234 174 13 224 166 10
39731 -216 158 10 213 154 11 213 154 11 216 158 10
39732 -226 170 11 239 182 13 246 190 14 246 190 14
39733 -246 190 14 246 190 14 242 186 14 206 162 42
39734 -101 101 101 58 58 58 30 30 30 14 14 14
39735 - 6 6 6 0 0 0 0 0 0 0 0 0
39736 - 0 0 0 0 0 0 0 0 0 0 0 0
39737 - 0 0 0 0 0 0 0 0 0 0 0 0
39738 - 0 0 0 0 0 0 0 0 0 10 10 10
39739 - 30 30 30 74 74 74 174 135 50 216 158 10
39740 -236 178 12 246 190 14 246 190 14 246 190 14
39741 -246 190 14 246 190 14 246 190 14 246 190 14
39742 -246 190 14 246 190 14 246 190 14 246 190 14
39743 -246 190 14 246 190 14 241 196 14 226 184 13
39744 - 61 42 6 2 2 6 2 2 6 2 2 6
39745 - 22 22 22 238 238 238 253 253 253 253 253 253
39746 -253 253 253 253 253 253 253 253 253 253 253 253
39747 -253 253 253 253 253 253 253 253 253 253 253 253
39748 -253 253 253 253 253 253 253 253 253 253 253 253
39749 -253 253 253 226 226 226 187 187 187 180 133 36
39750 -216 158 10 236 178 12 239 182 13 236 178 12
39751 -230 174 11 226 170 11 226 170 11 230 174 11
39752 -236 178 12 242 186 14 246 190 14 246 190 14
39753 -246 190 14 246 190 14 246 186 14 239 182 13
39754 -206 162 42 106 106 106 66 66 66 34 34 34
39755 - 14 14 14 6 6 6 0 0 0 0 0 0
39756 - 0 0 0 0 0 0 0 0 0 0 0 0
39757 - 0 0 0 0 0 0 0 0 0 0 0 0
39758 - 0 0 0 0 0 0 0 0 0 6 6 6
39759 - 26 26 26 70 70 70 163 133 67 213 154 11
39760 -236 178 12 246 190 14 246 190 14 246 190 14
39761 -246 190 14 246 190 14 246 190 14 246 190 14
39762 -246 190 14 246 190 14 246 190 14 246 190 14
39763 -246 190 14 246 190 14 246 190 14 241 196 14
39764 -190 146 13 18 14 6 2 2 6 2 2 6
39765 - 46 46 46 246 246 246 253 253 253 253 253 253
39766 -253 253 253 253 253 253 253 253 253 253 253 253
39767 -253 253 253 253 253 253 253 253 253 253 253 253
39768 -253 253 253 253 253 253 253 253 253 253 253 253
39769 -253 253 253 221 221 221 86 86 86 156 107 11
39770 -216 158 10 236 178 12 242 186 14 246 186 14
39771 -242 186 14 239 182 13 239 182 13 242 186 14
39772 -242 186 14 246 186 14 246 190 14 246 190 14
39773 -246 190 14 246 190 14 246 190 14 246 190 14
39774 -242 186 14 225 175 15 142 122 72 66 66 66
39775 - 30 30 30 10 10 10 0 0 0 0 0 0
39776 - 0 0 0 0 0 0 0 0 0 0 0 0
39777 - 0 0 0 0 0 0 0 0 0 0 0 0
39778 - 0 0 0 0 0 0 0 0 0 6 6 6
39779 - 26 26 26 70 70 70 163 133 67 210 150 10
39780 -236 178 12 246 190 14 246 190 14 246 190 14
39781 -246 190 14 246 190 14 246 190 14 246 190 14
39782 -246 190 14 246 190 14 246 190 14 246 190 14
39783 -246 190 14 246 190 14 246 190 14 246 190 14
39784 -232 195 16 121 92 8 34 34 34 106 106 106
39785 -221 221 221 253 253 253 253 253 253 253 253 253
39786 -253 253 253 253 253 253 253 253 253 253 253 253
39787 -253 253 253 253 253 253 253 253 253 253 253 253
39788 -253 253 253 253 253 253 253 253 253 253 253 253
39789 -242 242 242 82 82 82 18 14 6 163 110 8
39790 -216 158 10 236 178 12 242 186 14 246 190 14
39791 -246 190 14 246 190 14 246 190 14 246 190 14
39792 -246 190 14 246 190 14 246 190 14 246 190 14
39793 -246 190 14 246 190 14 246 190 14 246 190 14
39794 -246 190 14 246 190 14 242 186 14 163 133 67
39795 - 46 46 46 18 18 18 6 6 6 0 0 0
39796 - 0 0 0 0 0 0 0 0 0 0 0 0
39797 - 0 0 0 0 0 0 0 0 0 0 0 0
39798 - 0 0 0 0 0 0 0 0 0 10 10 10
39799 - 30 30 30 78 78 78 163 133 67 210 150 10
39800 -236 178 12 246 186 14 246 190 14 246 190 14
39801 -246 190 14 246 190 14 246 190 14 246 190 14
39802 -246 190 14 246 190 14 246 190 14 246 190 14
39803 -246 190 14 246 190 14 246 190 14 246 190 14
39804 -241 196 14 215 174 15 190 178 144 253 253 253
39805 -253 253 253 253 253 253 253 253 253 253 253 253
39806 -253 253 253 253 253 253 253 253 253 253 253 253
39807 -253 253 253 253 253 253 253 253 253 253 253 253
39808 -253 253 253 253 253 253 253 253 253 218 218 218
39809 - 58 58 58 2 2 6 22 18 6 167 114 7
39810 -216 158 10 236 178 12 246 186 14 246 190 14
39811 -246 190 14 246 190 14 246 190 14 246 190 14
39812 -246 190 14 246 190 14 246 190 14 246 190 14
39813 -246 190 14 246 190 14 246 190 14 246 190 14
39814 -246 190 14 246 186 14 242 186 14 190 150 46
39815 - 54 54 54 22 22 22 6 6 6 0 0 0
39816 - 0 0 0 0 0 0 0 0 0 0 0 0
39817 - 0 0 0 0 0 0 0 0 0 0 0 0
39818 - 0 0 0 0 0 0 0 0 0 14 14 14
39819 - 38 38 38 86 86 86 180 133 36 213 154 11
39820 -236 178 12 246 186 14 246 190 14 246 190 14
39821 -246 190 14 246 190 14 246 190 14 246 190 14
39822 -246 190 14 246 190 14 246 190 14 246 190 14
39823 -246 190 14 246 190 14 246 190 14 246 190 14
39824 -246 190 14 232 195 16 190 146 13 214 214 214
39825 -253 253 253 253 253 253 253 253 253 253 253 253
39826 -253 253 253 253 253 253 253 253 253 253 253 253
39827 -253 253 253 253 253 253 253 253 253 253 253 253
39828 -253 253 253 250 250 250 170 170 170 26 26 26
39829 - 2 2 6 2 2 6 37 26 9 163 110 8
39830 -219 162 10 239 182 13 246 186 14 246 190 14
39831 -246 190 14 246 190 14 246 190 14 246 190 14
39832 -246 190 14 246 190 14 246 190 14 246 190 14
39833 -246 190 14 246 190 14 246 190 14 246 190 14
39834 -246 186 14 236 178 12 224 166 10 142 122 72
39835 - 46 46 46 18 18 18 6 6 6 0 0 0
39836 - 0 0 0 0 0 0 0 0 0 0 0 0
39837 - 0 0 0 0 0 0 0 0 0 0 0 0
39838 - 0 0 0 0 0 0 6 6 6 18 18 18
39839 - 50 50 50 109 106 95 192 133 9 224 166 10
39840 -242 186 14 246 190 14 246 190 14 246 190 14
39841 -246 190 14 246 190 14 246 190 14 246 190 14
39842 -246 190 14 246 190 14 246 190 14 246 190 14
39843 -246 190 14 246 190 14 246 190 14 246 190 14
39844 -242 186 14 226 184 13 210 162 10 142 110 46
39845 -226 226 226 253 253 253 253 253 253 253 253 253
39846 -253 253 253 253 253 253 253 253 253 253 253 253
39847 -253 253 253 253 253 253 253 253 253 253 253 253
39848 -198 198 198 66 66 66 2 2 6 2 2 6
39849 - 2 2 6 2 2 6 50 34 6 156 107 11
39850 -219 162 10 239 182 13 246 186 14 246 190 14
39851 -246 190 14 246 190 14 246 190 14 246 190 14
39852 -246 190 14 246 190 14 246 190 14 246 190 14
39853 -246 190 14 246 190 14 246 190 14 242 186 14
39854 -234 174 13 213 154 11 154 122 46 66 66 66
39855 - 30 30 30 10 10 10 0 0 0 0 0 0
39856 - 0 0 0 0 0 0 0 0 0 0 0 0
39857 - 0 0 0 0 0 0 0 0 0 0 0 0
39858 - 0 0 0 0 0 0 6 6 6 22 22 22
39859 - 58 58 58 154 121 60 206 145 10 234 174 13
39860 -242 186 14 246 186 14 246 190 14 246 190 14
39861 -246 190 14 246 190 14 246 190 14 246 190 14
39862 -246 190 14 246 190 14 246 190 14 246 190 14
39863 -246 190 14 246 190 14 246 190 14 246 190 14
39864 -246 186 14 236 178 12 210 162 10 163 110 8
39865 - 61 42 6 138 138 138 218 218 218 250 250 250
39866 -253 253 253 253 253 253 253 253 253 250 250 250
39867 -242 242 242 210 210 210 144 144 144 66 66 66
39868 - 6 6 6 2 2 6 2 2 6 2 2 6
39869 - 2 2 6 2 2 6 61 42 6 163 110 8
39870 -216 158 10 236 178 12 246 190 14 246 190 14
39871 -246 190 14 246 190 14 246 190 14 246 190 14
39872 -246 190 14 246 190 14 246 190 14 246 190 14
39873 -246 190 14 239 182 13 230 174 11 216 158 10
39874 -190 142 34 124 112 88 70 70 70 38 38 38
39875 - 18 18 18 6 6 6 0 0 0 0 0 0
39876 - 0 0 0 0 0 0 0 0 0 0 0 0
39877 - 0 0 0 0 0 0 0 0 0 0 0 0
39878 - 0 0 0 0 0 0 6 6 6 22 22 22
39879 - 62 62 62 168 124 44 206 145 10 224 166 10
39880 -236 178 12 239 182 13 242 186 14 242 186 14
39881 -246 186 14 246 190 14 246 190 14 246 190 14
39882 -246 190 14 246 190 14 246 190 14 246 190 14
39883 -246 190 14 246 190 14 246 190 14 246 190 14
39884 -246 190 14 236 178 12 216 158 10 175 118 6
39885 - 80 54 7 2 2 6 6 6 6 30 30 30
39886 - 54 54 54 62 62 62 50 50 50 38 38 38
39887 - 14 14 14 2 2 6 2 2 6 2 2 6
39888 - 2 2 6 2 2 6 2 2 6 2 2 6
39889 - 2 2 6 6 6 6 80 54 7 167 114 7
39890 -213 154 11 236 178 12 246 190 14 246 190 14
39891 -246 190 14 246 190 14 246 190 14 246 190 14
39892 -246 190 14 242 186 14 239 182 13 239 182 13
39893 -230 174 11 210 150 10 174 135 50 124 112 88
39894 - 82 82 82 54 54 54 34 34 34 18 18 18
39895 - 6 6 6 0 0 0 0 0 0 0 0 0
39896 - 0 0 0 0 0 0 0 0 0 0 0 0
39897 - 0 0 0 0 0 0 0 0 0 0 0 0
39898 - 0 0 0 0 0 0 6 6 6 18 18 18
39899 - 50 50 50 158 118 36 192 133 9 200 144 11
39900 -216 158 10 219 162 10 224 166 10 226 170 11
39901 -230 174 11 236 178 12 239 182 13 239 182 13
39902 -242 186 14 246 186 14 246 190 14 246 190 14
39903 -246 190 14 246 190 14 246 190 14 246 190 14
39904 -246 186 14 230 174 11 210 150 10 163 110 8
39905 -104 69 6 10 10 10 2 2 6 2 2 6
39906 - 2 2 6 2 2 6 2 2 6 2 2 6
39907 - 2 2 6 2 2 6 2 2 6 2 2 6
39908 - 2 2 6 2 2 6 2 2 6 2 2 6
39909 - 2 2 6 6 6 6 91 60 6 167 114 7
39910 -206 145 10 230 174 11 242 186 14 246 190 14
39911 -246 190 14 246 190 14 246 186 14 242 186 14
39912 -239 182 13 230 174 11 224 166 10 213 154 11
39913 -180 133 36 124 112 88 86 86 86 58 58 58
39914 - 38 38 38 22 22 22 10 10 10 6 6 6
39915 - 0 0 0 0 0 0 0 0 0 0 0 0
39916 - 0 0 0 0 0 0 0 0 0 0 0 0
39917 - 0 0 0 0 0 0 0 0 0 0 0 0
39918 - 0 0 0 0 0 0 0 0 0 14 14 14
39919 - 34 34 34 70 70 70 138 110 50 158 118 36
39920 -167 114 7 180 123 7 192 133 9 197 138 11
39921 -200 144 11 206 145 10 213 154 11 219 162 10
39922 -224 166 10 230 174 11 239 182 13 242 186 14
39923 -246 186 14 246 186 14 246 186 14 246 186 14
39924 -239 182 13 216 158 10 185 133 11 152 99 6
39925 -104 69 6 18 14 6 2 2 6 2 2 6
39926 - 2 2 6 2 2 6 2 2 6 2 2 6
39927 - 2 2 6 2 2 6 2 2 6 2 2 6
39928 - 2 2 6 2 2 6 2 2 6 2 2 6
39929 - 2 2 6 6 6 6 80 54 7 152 99 6
39930 -192 133 9 219 162 10 236 178 12 239 182 13
39931 -246 186 14 242 186 14 239 182 13 236 178 12
39932 -224 166 10 206 145 10 192 133 9 154 121 60
39933 - 94 94 94 62 62 62 42 42 42 22 22 22
39934 - 14 14 14 6 6 6 0 0 0 0 0 0
39935 - 0 0 0 0 0 0 0 0 0 0 0 0
39936 - 0 0 0 0 0 0 0 0 0 0 0 0
39937 - 0 0 0 0 0 0 0 0 0 0 0 0
39938 - 0 0 0 0 0 0 0 0 0 6 6 6
39939 - 18 18 18 34 34 34 58 58 58 78 78 78
39940 -101 98 89 124 112 88 142 110 46 156 107 11
39941 -163 110 8 167 114 7 175 118 6 180 123 7
39942 -185 133 11 197 138 11 210 150 10 219 162 10
39943 -226 170 11 236 178 12 236 178 12 234 174 13
39944 -219 162 10 197 138 11 163 110 8 130 83 6
39945 - 91 60 6 10 10 10 2 2 6 2 2 6
39946 - 18 18 18 38 38 38 38 38 38 38 38 38
39947 - 38 38 38 38 38 38 38 38 38 38 38 38
39948 - 38 38 38 38 38 38 26 26 26 2 2 6
39949 - 2 2 6 6 6 6 70 47 6 137 92 6
39950 -175 118 6 200 144 11 219 162 10 230 174 11
39951 -234 174 13 230 174 11 219 162 10 210 150 10
39952 -192 133 9 163 110 8 124 112 88 82 82 82
39953 - 50 50 50 30 30 30 14 14 14 6 6 6
39954 - 0 0 0 0 0 0 0 0 0 0 0 0
39955 - 0 0 0 0 0 0 0 0 0 0 0 0
39956 - 0 0 0 0 0 0 0 0 0 0 0 0
39957 - 0 0 0 0 0 0 0 0 0 0 0 0
39958 - 0 0 0 0 0 0 0 0 0 0 0 0
39959 - 6 6 6 14 14 14 22 22 22 34 34 34
39960 - 42 42 42 58 58 58 74 74 74 86 86 86
39961 -101 98 89 122 102 70 130 98 46 121 87 25
39962 -137 92 6 152 99 6 163 110 8 180 123 7
39963 -185 133 11 197 138 11 206 145 10 200 144 11
39964 -180 123 7 156 107 11 130 83 6 104 69 6
39965 - 50 34 6 54 54 54 110 110 110 101 98 89
39966 - 86 86 86 82 82 82 78 78 78 78 78 78
39967 - 78 78 78 78 78 78 78 78 78 78 78 78
39968 - 78 78 78 82 82 82 86 86 86 94 94 94
39969 -106 106 106 101 101 101 86 66 34 124 80 6
39970 -156 107 11 180 123 7 192 133 9 200 144 11
39971 -206 145 10 200 144 11 192 133 9 175 118 6
39972 -139 102 15 109 106 95 70 70 70 42 42 42
39973 - 22 22 22 10 10 10 0 0 0 0 0 0
39974 - 0 0 0 0 0 0 0 0 0 0 0 0
39975 - 0 0 0 0 0 0 0 0 0 0 0 0
39976 - 0 0 0 0 0 0 0 0 0 0 0 0
39977 - 0 0 0 0 0 0 0 0 0 0 0 0
39978 - 0 0 0 0 0 0 0 0 0 0 0 0
39979 - 0 0 0 0 0 0 6 6 6 10 10 10
39980 - 14 14 14 22 22 22 30 30 30 38 38 38
39981 - 50 50 50 62 62 62 74 74 74 90 90 90
39982 -101 98 89 112 100 78 121 87 25 124 80 6
39983 -137 92 6 152 99 6 152 99 6 152 99 6
39984 -138 86 6 124 80 6 98 70 6 86 66 30
39985 -101 98 89 82 82 82 58 58 58 46 46 46
39986 - 38 38 38 34 34 34 34 34 34 34 34 34
39987 - 34 34 34 34 34 34 34 34 34 34 34 34
39988 - 34 34 34 34 34 34 38 38 38 42 42 42
39989 - 54 54 54 82 82 82 94 86 76 91 60 6
39990 -134 86 6 156 107 11 167 114 7 175 118 6
39991 -175 118 6 167 114 7 152 99 6 121 87 25
39992 -101 98 89 62 62 62 34 34 34 18 18 18
39993 - 6 6 6 0 0 0 0 0 0 0 0 0
39994 - 0 0 0 0 0 0 0 0 0 0 0 0
39995 - 0 0 0 0 0 0 0 0 0 0 0 0
39996 - 0 0 0 0 0 0 0 0 0 0 0 0
39997 - 0 0 0 0 0 0 0 0 0 0 0 0
39998 - 0 0 0 0 0 0 0 0 0 0 0 0
39999 - 0 0 0 0 0 0 0 0 0 0 0 0
40000 - 0 0 0 6 6 6 6 6 6 10 10 10
40001 - 18 18 18 22 22 22 30 30 30 42 42 42
40002 - 50 50 50 66 66 66 86 86 86 101 98 89
40003 -106 86 58 98 70 6 104 69 6 104 69 6
40004 -104 69 6 91 60 6 82 62 34 90 90 90
40005 - 62 62 62 38 38 38 22 22 22 14 14 14
40006 - 10 10 10 10 10 10 10 10 10 10 10 10
40007 - 10 10 10 10 10 10 6 6 6 10 10 10
40008 - 10 10 10 10 10 10 10 10 10 14 14 14
40009 - 22 22 22 42 42 42 70 70 70 89 81 66
40010 - 80 54 7 104 69 6 124 80 6 137 92 6
40011 -134 86 6 116 81 8 100 82 52 86 86 86
40012 - 58 58 58 30 30 30 14 14 14 6 6 6
40013 - 0 0 0 0 0 0 0 0 0 0 0 0
40014 - 0 0 0 0 0 0 0 0 0 0 0 0
40015 - 0 0 0 0 0 0 0 0 0 0 0 0
40016 - 0 0 0 0 0 0 0 0 0 0 0 0
40017 - 0 0 0 0 0 0 0 0 0 0 0 0
40018 - 0 0 0 0 0 0 0 0 0 0 0 0
40019 - 0 0 0 0 0 0 0 0 0 0 0 0
40020 - 0 0 0 0 0 0 0 0 0 0 0 0
40021 - 0 0 0 6 6 6 10 10 10 14 14 14
40022 - 18 18 18 26 26 26 38 38 38 54 54 54
40023 - 70 70 70 86 86 86 94 86 76 89 81 66
40024 - 89 81 66 86 86 86 74 74 74 50 50 50
40025 - 30 30 30 14 14 14 6 6 6 0 0 0
40026 - 0 0 0 0 0 0 0 0 0 0 0 0
40027 - 0 0 0 0 0 0 0 0 0 0 0 0
40028 - 0 0 0 0 0 0 0 0 0 0 0 0
40029 - 6 6 6 18 18 18 34 34 34 58 58 58
40030 - 82 82 82 89 81 66 89 81 66 89 81 66
40031 - 94 86 66 94 86 76 74 74 74 50 50 50
40032 - 26 26 26 14 14 14 6 6 6 0 0 0
40033 - 0 0 0 0 0 0 0 0 0 0 0 0
40034 - 0 0 0 0 0 0 0 0 0 0 0 0
40035 - 0 0 0 0 0 0 0 0 0 0 0 0
40036 - 0 0 0 0 0 0 0 0 0 0 0 0
40037 - 0 0 0 0 0 0 0 0 0 0 0 0
40038 - 0 0 0 0 0 0 0 0 0 0 0 0
40039 - 0 0 0 0 0 0 0 0 0 0 0 0
40040 - 0 0 0 0 0 0 0 0 0 0 0 0
40041 - 0 0 0 0 0 0 0 0 0 0 0 0
40042 - 6 6 6 6 6 6 14 14 14 18 18 18
40043 - 30 30 30 38 38 38 46 46 46 54 54 54
40044 - 50 50 50 42 42 42 30 30 30 18 18 18
40045 - 10 10 10 0 0 0 0 0 0 0 0 0
40046 - 0 0 0 0 0 0 0 0 0 0 0 0
40047 - 0 0 0 0 0 0 0 0 0 0 0 0
40048 - 0 0 0 0 0 0 0 0 0 0 0 0
40049 - 0 0 0 6 6 6 14 14 14 26 26 26
40050 - 38 38 38 50 50 50 58 58 58 58 58 58
40051 - 54 54 54 42 42 42 30 30 30 18 18 18
40052 - 10 10 10 0 0 0 0 0 0 0 0 0
40053 - 0 0 0 0 0 0 0 0 0 0 0 0
40054 - 0 0 0 0 0 0 0 0 0 0 0 0
40055 - 0 0 0 0 0 0 0 0 0 0 0 0
40056 - 0 0 0 0 0 0 0 0 0 0 0 0
40057 - 0 0 0 0 0 0 0 0 0 0 0 0
40058 - 0 0 0 0 0 0 0 0 0 0 0 0
40059 - 0 0 0 0 0 0 0 0 0 0 0 0
40060 - 0 0 0 0 0 0 0 0 0 0 0 0
40061 - 0 0 0 0 0 0 0 0 0 0 0 0
40062 - 0 0 0 0 0 0 0 0 0 6 6 6
40063 - 6 6 6 10 10 10 14 14 14 18 18 18
40064 - 18 18 18 14 14 14 10 10 10 6 6 6
40065 - 0 0 0 0 0 0 0 0 0 0 0 0
40066 - 0 0 0 0 0 0 0 0 0 0 0 0
40067 - 0 0 0 0 0 0 0 0 0 0 0 0
40068 - 0 0 0 0 0 0 0 0 0 0 0 0
40069 - 0 0 0 0 0 0 0 0 0 6 6 6
40070 - 14 14 14 18 18 18 22 22 22 22 22 22
40071 - 18 18 18 14 14 14 10 10 10 6 6 6
40072 - 0 0 0 0 0 0 0 0 0 0 0 0
40073 - 0 0 0 0 0 0 0 0 0 0 0 0
40074 - 0 0 0 0 0 0 0 0 0 0 0 0
40075 - 0 0 0 0 0 0 0 0 0 0 0 0
40076 - 0 0 0 0 0 0 0 0 0 0 0 0
40077 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40078 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40079 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40080 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40081 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40082 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40083 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40086 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40087 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40088 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40089 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40090 +4 4 4 4 4 4
40091 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40092 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40093 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40094 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40095 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40096 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40097 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40098 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40099 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40100 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40101 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40102 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40103 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40104 +4 4 4 4 4 4
40105 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40106 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40107 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40108 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40109 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40110 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40111 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40112 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40113 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40114 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40115 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40116 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40117 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40118 +4 4 4 4 4 4
40119 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40120 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40121 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40122 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40123 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40124 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40125 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40126 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40127 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40128 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40129 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40130 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40131 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40132 +4 4 4 4 4 4
40133 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40134 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40135 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40136 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40137 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40138 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40139 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40140 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40141 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40142 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40143 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40144 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40145 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40146 +4 4 4 4 4 4
40147 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40148 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40149 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40150 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40151 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40152 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40153 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40154 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40155 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40156 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40157 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40158 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40159 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40160 +4 4 4 4 4 4
40161 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40162 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40163 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40164 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40165 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
40166 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
40167 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40168 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40169 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40170 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
40171 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40172 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
40173 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40174 +4 4 4 4 4 4
40175 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40176 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40177 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40178 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40179 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
40180 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
40181 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40182 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40183 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40184 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
40185 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
40186 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
40187 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40188 +4 4 4 4 4 4
40189 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40190 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40191 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40192 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40193 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
40194 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
40195 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40196 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40197 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40198 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
40199 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
40200 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
40201 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
40202 +4 4 4 4 4 4
40203 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40204 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40205 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40206 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
40207 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
40208 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
40209 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
40210 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40211 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40212 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
40213 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
40214 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
40215 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
40216 +4 4 4 4 4 4
40217 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40218 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40219 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40220 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
40221 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
40222 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
40223 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
40224 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40225 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
40226 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
40227 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
40228 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
40229 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
40230 +4 4 4 4 4 4
40231 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40232 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40233 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40234 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
40235 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
40236 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
40237 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
40238 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40239 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
40240 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
40241 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
40242 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
40243 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
40244 +4 4 4 4 4 4
40245 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40246 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40247 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
40248 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
40249 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
40250 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
40251 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
40252 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
40253 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
40254 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
40255 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
40256 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
40257 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
40258 +4 4 4 4 4 4
40259 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40260 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40261 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
40262 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
40263 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
40264 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
40265 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
40266 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
40267 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
40268 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
40269 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
40270 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
40271 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
40272 +4 4 4 4 4 4
40273 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40274 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40275 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
40276 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
40277 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
40278 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
40279 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
40280 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
40281 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
40282 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
40283 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
40284 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
40285 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40286 +4 4 4 4 4 4
40287 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40288 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40289 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
40290 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
40291 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
40292 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
40293 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
40294 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
40295 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
40296 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
40297 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
40298 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
40299 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
40300 +4 4 4 4 4 4
40301 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40302 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
40303 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
40304 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
40305 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
40306 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
40307 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
40308 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
40309 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
40310 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
40311 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
40312 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
40313 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
40314 +4 4 4 4 4 4
40315 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40316 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
40317 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
40318 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
40319 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40320 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
40321 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
40322 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
40323 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
40324 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
40325 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
40326 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
40327 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
40328 +0 0 0 4 4 4
40329 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40330 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
40331 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
40332 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
40333 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
40334 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
40335 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
40336 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
40337 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
40338 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
40339 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
40340 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
40341 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
40342 +2 0 0 0 0 0
40343 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
40344 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
40345 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
40346 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
40347 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
40348 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
40349 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
40350 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
40351 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
40352 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
40353 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
40354 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
40355 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
40356 +37 38 37 0 0 0
40357 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40358 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
40359 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
40360 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
40361 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
40362 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
40363 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
40364 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
40365 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
40366 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
40367 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
40368 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
40369 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
40370 +85 115 134 4 0 0
40371 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
40372 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
40373 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
40374 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
40375 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
40376 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
40377 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
40378 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
40379 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
40380 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
40381 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
40382 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
40383 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
40384 +60 73 81 4 0 0
40385 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
40386 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
40387 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
40388 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
40389 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
40390 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
40391 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
40392 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
40393 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
40394 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
40395 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
40396 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
40397 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
40398 +16 19 21 4 0 0
40399 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
40400 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
40401 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
40402 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
40403 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
40404 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
40405 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
40406 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
40407 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
40408 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
40409 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
40410 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
40411 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
40412 +4 0 0 4 3 3
40413 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
40414 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
40415 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
40416 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
40417 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
40418 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
40419 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
40420 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
40421 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
40422 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
40423 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
40424 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
40425 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
40426 +3 2 2 4 4 4
40427 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
40428 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
40429 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
40430 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40431 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
40432 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
40433 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
40434 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
40435 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
40436 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
40437 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
40438 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
40439 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
40440 +4 4 4 4 4 4
40441 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
40442 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
40443 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
40444 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
40445 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
40446 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
40447 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
40448 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
40449 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
40450 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
40451 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
40452 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
40453 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
40454 +4 4 4 4 4 4
40455 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
40456 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
40457 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
40458 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
40459 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
40460 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40461 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
40462 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
40463 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
40464 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
40465 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
40466 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
40467 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
40468 +5 5 5 5 5 5
40469 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
40470 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
40471 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
40472 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
40473 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
40474 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40475 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
40476 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
40477 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
40478 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
40479 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
40480 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
40481 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40482 +5 5 5 4 4 4
40483 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
40484 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
40485 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
40486 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
40487 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40488 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
40489 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
40490 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
40491 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
40492 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
40493 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
40494 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40495 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40496 +4 4 4 4 4 4
40497 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
40498 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
40499 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
40500 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
40501 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
40502 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40503 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40504 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
40505 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
40506 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
40507 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
40508 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
40509 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40510 +4 4 4 4 4 4
40511 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
40512 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
40513 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
40514 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
40515 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40516 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
40517 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
40518 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
40519 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
40520 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
40521 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
40522 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40523 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40524 +4 4 4 4 4 4
40525 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
40526 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
40527 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
40528 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
40529 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40530 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40531 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40532 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
40533 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
40534 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
40535 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
40536 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40537 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40538 +4 4 4 4 4 4
40539 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
40540 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
40541 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
40542 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
40543 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40544 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
40545 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40546 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
40547 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
40548 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
40549 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40550 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40551 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40552 +4 4 4 4 4 4
40553 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
40554 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
40555 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
40556 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
40557 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40558 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
40559 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
40560 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
40561 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
40562 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
40563 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
40564 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40565 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40566 +4 4 4 4 4 4
40567 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
40568 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
40569 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
40570 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
40571 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40572 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
40573 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
40574 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
40575 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
40576 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
40577 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
40578 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40579 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40580 +4 4 4 4 4 4
40581 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
40582 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
40583 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
40584 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40585 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
40586 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
40587 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
40588 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
40589 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
40590 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
40591 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40592 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40593 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40594 +4 4 4 4 4 4
40595 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
40596 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
40597 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
40598 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40599 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40600 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
40601 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
40602 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
40603 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
40604 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
40605 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40606 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40607 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40608 +4 4 4 4 4 4
40609 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
40610 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
40611 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40612 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40613 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40614 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
40615 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
40616 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
40617 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
40618 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
40619 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40620 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40621 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40622 +4 4 4 4 4 4
40623 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
40624 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
40625 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40626 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40627 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40628 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
40629 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
40630 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
40631 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40632 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40633 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40634 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40635 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40636 +4 4 4 4 4 4
40637 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40638 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
40639 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40640 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
40641 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
40642 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
40643 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
40644 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
40645 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40646 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40647 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40648 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40649 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40650 +4 4 4 4 4 4
40651 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40652 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
40653 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40654 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
40655 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40656 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
40657 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
40658 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
40659 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40660 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40661 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40662 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40663 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40664 +4 4 4 4 4 4
40665 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
40666 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
40667 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40668 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
40669 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
40670 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
40671 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
40672 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
40673 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40674 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40675 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40676 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40677 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40678 +4 4 4 4 4 4
40679 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
40680 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
40681 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40682 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
40683 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
40684 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
40685 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
40686 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
40687 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40688 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40689 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40690 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40691 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40692 +4 4 4 4 4 4
40693 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40694 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
40695 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40696 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
40697 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
40698 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
40699 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
40700 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
40701 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40702 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40703 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40704 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40705 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40706 +4 4 4 4 4 4
40707 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
40708 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
40709 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40710 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
40711 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
40712 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
40713 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
40714 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
40715 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
40716 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40717 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40718 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40719 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40720 +4 4 4 4 4 4
40721 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40722 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
40723 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
40724 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
40725 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
40726 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
40727 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
40728 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
40729 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40730 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40731 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40732 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40733 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40734 +4 4 4 4 4 4
40735 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40736 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
40737 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40738 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
40739 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
40740 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
40741 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
40742 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
40743 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40744 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40745 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40746 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40747 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40748 +4 4 4 4 4 4
40749 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40750 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
40751 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
40752 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
40753 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
40754 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
40755 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40756 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
40757 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40758 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40759 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40760 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40761 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40762 +4 4 4 4 4 4
40763 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40764 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
40765 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
40766 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40767 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
40768 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
40769 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40770 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
40771 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40772 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40773 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40774 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40775 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40776 +4 4 4 4 4 4
40777 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40778 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
40779 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
40780 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
40781 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
40782 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
40783 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
40784 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
40785 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
40786 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40787 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40788 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40789 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40790 +4 4 4 4 4 4
40791 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40792 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
40793 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
40794 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
40795 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
40796 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
40797 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
40798 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
40799 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
40800 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40801 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40802 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40803 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40804 +4 4 4 4 4 4
40805 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
40806 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
40807 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
40808 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
40809 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40810 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
40811 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
40812 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
40813 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
40814 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40815 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40816 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40817 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40818 +4 4 4 4 4 4
40819 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40820 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
40821 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
40822 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
40823 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
40824 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
40825 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
40826 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
40827 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
40828 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40829 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40830 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40831 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40832 +4 4 4 4 4 4
40833 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
40834 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
40835 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
40836 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
40837 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
40838 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
40839 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
40840 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
40841 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
40842 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
40843 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40844 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40845 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40846 +4 4 4 4 4 4
40847 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
40848 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40849 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
40850 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
40851 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
40852 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
40853 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
40854 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
40855 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
40856 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
40857 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40858 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40859 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40860 +4 4 4 4 4 4
40861 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
40862 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
40863 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
40864 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
40865 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
40866 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
40867 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40868 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
40869 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
40870 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
40871 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40872 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40873 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40874 +4 4 4 4 4 4
40875 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
40876 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
40877 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
40878 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
40879 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
40880 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
40881 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
40882 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
40883 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
40884 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
40885 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40886 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40887 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40888 +4 4 4 4 4 4
40889 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
40890 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
40891 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40892 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
40893 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
40894 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
40895 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
40896 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
40897 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
40898 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
40899 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40900 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40901 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40902 +4 4 4 4 4 4
40903 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40904 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
40905 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
40906 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
40907 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
40908 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
40909 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
40910 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
40911 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
40912 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
40913 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40914 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40915 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40916 +4 4 4 4 4 4
40917 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
40918 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
40919 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
40920 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
40921 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
40922 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
40923 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
40924 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
40925 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
40926 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40927 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40928 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40929 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40930 +4 4 4 4 4 4
40931 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
40932 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
40933 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
40934 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
40935 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
40936 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
40937 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
40938 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
40939 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40940 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40941 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40942 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40943 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40944 +4 4 4 4 4 4
40945 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
40946 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
40947 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
40948 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
40949 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
40950 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
40951 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
40952 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
40953 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
40954 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40955 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40956 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40957 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40958 +4 4 4 4 4 4
40959 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
40960 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
40961 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
40962 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
40963 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
40964 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
40965 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
40966 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
40967 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
40968 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40969 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40970 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40971 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40972 +4 4 4 4 4 4
40973 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
40974 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
40975 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
40976 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
40977 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
40978 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
40979 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
40980 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
40981 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40982 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40983 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40984 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40985 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40986 +4 4 4 4 4 4
40987 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
40988 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
40989 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
40990 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
40991 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
40992 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
40993 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
40994 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
40995 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40996 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40997 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40998 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40999 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41000 +4 4 4 4 4 4
41001 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41002 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
41003 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
41004 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
41005 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
41006 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
41007 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
41008 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41009 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41010 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41011 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41012 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41013 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41014 +4 4 4 4 4 4
41015 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41016 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
41017 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41018 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
41019 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
41020 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
41021 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
41022 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
41023 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41024 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41025 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41026 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41027 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41028 +4 4 4 4 4 4
41029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41030 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
41031 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
41032 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
41033 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
41034 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
41035 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
41036 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
41037 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41038 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41039 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41040 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41041 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41042 +4 4 4 4 4 4
41043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41044 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
41045 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
41046 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41047 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
41048 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
41049 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
41050 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41051 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41052 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41053 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41054 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41055 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41056 +4 4 4 4 4 4
41057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41058 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41059 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41060 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
41061 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
41062 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
41063 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
41064 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41065 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41066 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41067 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41068 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41069 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41070 +4 4 4 4 4 4
41071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41072 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41073 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41074 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41075 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
41076 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
41077 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
41078 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41079 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41080 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41081 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41082 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41083 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41084 +4 4 4 4 4 4
41085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41086 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41087 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41088 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41089 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41090 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
41091 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
41092 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41093 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41094 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41095 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41096 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41097 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41098 +4 4 4 4 4 4
41099 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41100 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41101 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41102 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41103 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41104 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41105 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
41106 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41107 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41108 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41109 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41110 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41111 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41112 +4 4 4 4 4 4
41113 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41114 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41115 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41116 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
41117 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
41118 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
41119 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
41120 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41121 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41122 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41123 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41124 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41125 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41126 +4 4 4 4 4 4
41127 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41128 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41129 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41130 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41131 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
41132 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41133 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41134 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41135 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41136 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41137 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41138 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41139 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41140 +4 4 4 4 4 4
41141 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41142 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41143 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41144 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41145 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
41146 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
41147 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
41148 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41149 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41150 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41151 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41152 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41153 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41154 +4 4 4 4 4 4
41155 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41156 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41157 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41158 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41159 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
41160 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
41161 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41162 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41163 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41164 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41165 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41166 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41167 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41168 +4 4 4 4 4 4
41169 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41170 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41171 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41172 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41173 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
41174 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
41175 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41176 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41177 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41178 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41179 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41180 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41181 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41182 +4 4 4 4 4 4
41183 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41184 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41185 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41186 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41187 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41188 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
41189 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41190 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41191 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41192 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41193 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41194 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41195 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41196 +4 4 4 4 4 4
41197 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
41198 index 087fc99..f85ed76 100644
41199 --- a/drivers/video/udlfb.c
41200 +++ b/drivers/video/udlfb.c
41201 @@ -585,11 +585,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
41202 dlfb_urb_completion(urb);
41203
41204 error:
41205 - atomic_add(bytes_sent, &dev->bytes_sent);
41206 - atomic_add(bytes_identical, &dev->bytes_identical);
41207 - atomic_add(width*height*2, &dev->bytes_rendered);
41208 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41209 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41210 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
41211 end_cycles = get_cycles();
41212 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
41213 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41214 >> 10)), /* Kcycles */
41215 &dev->cpu_kcycles_used);
41216
41217 @@ -710,11 +710,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
41218 dlfb_urb_completion(urb);
41219
41220 error:
41221 - atomic_add(bytes_sent, &dev->bytes_sent);
41222 - atomic_add(bytes_identical, &dev->bytes_identical);
41223 - atomic_add(bytes_rendered, &dev->bytes_rendered);
41224 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41225 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41226 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
41227 end_cycles = get_cycles();
41228 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
41229 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41230 >> 10)), /* Kcycles */
41231 &dev->cpu_kcycles_used);
41232 }
41233 @@ -1306,7 +1306,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
41234 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41235 struct dlfb_data *dev = fb_info->par;
41236 return snprintf(buf, PAGE_SIZE, "%u\n",
41237 - atomic_read(&dev->bytes_rendered));
41238 + atomic_read_unchecked(&dev->bytes_rendered));
41239 }
41240
41241 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41242 @@ -1314,7 +1314,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41243 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41244 struct dlfb_data *dev = fb_info->par;
41245 return snprintf(buf, PAGE_SIZE, "%u\n",
41246 - atomic_read(&dev->bytes_identical));
41247 + atomic_read_unchecked(&dev->bytes_identical));
41248 }
41249
41250 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41251 @@ -1322,7 +1322,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41252 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41253 struct dlfb_data *dev = fb_info->par;
41254 return snprintf(buf, PAGE_SIZE, "%u\n",
41255 - atomic_read(&dev->bytes_sent));
41256 + atomic_read_unchecked(&dev->bytes_sent));
41257 }
41258
41259 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41260 @@ -1330,7 +1330,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41261 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41262 struct dlfb_data *dev = fb_info->par;
41263 return snprintf(buf, PAGE_SIZE, "%u\n",
41264 - atomic_read(&dev->cpu_kcycles_used));
41265 + atomic_read_unchecked(&dev->cpu_kcycles_used));
41266 }
41267
41268 static ssize_t edid_show(
41269 @@ -1387,10 +1387,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
41270 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41271 struct dlfb_data *dev = fb_info->par;
41272
41273 - atomic_set(&dev->bytes_rendered, 0);
41274 - atomic_set(&dev->bytes_identical, 0);
41275 - atomic_set(&dev->bytes_sent, 0);
41276 - atomic_set(&dev->cpu_kcycles_used, 0);
41277 + atomic_set_unchecked(&dev->bytes_rendered, 0);
41278 + atomic_set_unchecked(&dev->bytes_identical, 0);
41279 + atomic_set_unchecked(&dev->bytes_sent, 0);
41280 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
41281
41282 return count;
41283 }
41284 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
41285 index 7f8472c..9842e87 100644
41286 --- a/drivers/video/uvesafb.c
41287 +++ b/drivers/video/uvesafb.c
41288 @@ -19,6 +19,7 @@
41289 #include <linux/io.h>
41290 #include <linux/mutex.h>
41291 #include <linux/slab.h>
41292 +#include <linux/moduleloader.h>
41293 #include <video/edid.h>
41294 #include <video/uvesafb.h>
41295 #ifdef CONFIG_X86
41296 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
41297 NULL,
41298 };
41299
41300 - return call_usermodehelper(v86d_path, argv, envp, 1);
41301 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
41302 }
41303
41304 /*
41305 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
41306 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
41307 par->pmi_setpal = par->ypan = 0;
41308 } else {
41309 +
41310 +#ifdef CONFIG_PAX_KERNEXEC
41311 +#ifdef CONFIG_MODULES
41312 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
41313 +#endif
41314 + if (!par->pmi_code) {
41315 + par->pmi_setpal = par->ypan = 0;
41316 + return 0;
41317 + }
41318 +#endif
41319 +
41320 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
41321 + task->t.regs.edi);
41322 +
41323 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41324 + pax_open_kernel();
41325 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
41326 + pax_close_kernel();
41327 +
41328 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
41329 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
41330 +#else
41331 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
41332 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
41333 +#endif
41334 +
41335 printk(KERN_INFO "uvesafb: protected mode interface info at "
41336 "%04x:%04x\n",
41337 (u16)task->t.regs.es, (u16)task->t.regs.edi);
41338 @@ -1821,6 +1844,11 @@ out:
41339 if (par->vbe_modes)
41340 kfree(par->vbe_modes);
41341
41342 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41343 + if (par->pmi_code)
41344 + module_free_exec(NULL, par->pmi_code);
41345 +#endif
41346 +
41347 framebuffer_release(info);
41348 return err;
41349 }
41350 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
41351 kfree(par->vbe_state_orig);
41352 if (par->vbe_state_saved)
41353 kfree(par->vbe_state_saved);
41354 +
41355 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41356 + if (par->pmi_code)
41357 + module_free_exec(NULL, par->pmi_code);
41358 +#endif
41359 +
41360 }
41361
41362 framebuffer_release(info);
41363 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
41364 index 501b340..86bd4cf 100644
41365 --- a/drivers/video/vesafb.c
41366 +++ b/drivers/video/vesafb.c
41367 @@ -9,6 +9,7 @@
41368 */
41369
41370 #include <linux/module.h>
41371 +#include <linux/moduleloader.h>
41372 #include <linux/kernel.h>
41373 #include <linux/errno.h>
41374 #include <linux/string.h>
41375 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
41376 static int vram_total __initdata; /* Set total amount of memory */
41377 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
41378 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
41379 -static void (*pmi_start)(void) __read_mostly;
41380 -static void (*pmi_pal) (void) __read_mostly;
41381 +static void (*pmi_start)(void) __read_only;
41382 +static void (*pmi_pal) (void) __read_only;
41383 static int depth __read_mostly;
41384 static int vga_compat __read_mostly;
41385 /* --------------------------------------------------------------------- */
41386 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
41387 unsigned int size_vmode;
41388 unsigned int size_remap;
41389 unsigned int size_total;
41390 + void *pmi_code = NULL;
41391
41392 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
41393 return -ENODEV;
41394 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
41395 size_remap = size_total;
41396 vesafb_fix.smem_len = size_remap;
41397
41398 -#ifndef __i386__
41399 - screen_info.vesapm_seg = 0;
41400 -#endif
41401 -
41402 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
41403 printk(KERN_WARNING
41404 "vesafb: cannot reserve video memory at 0x%lx\n",
41405 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
41406 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
41407 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
41408
41409 +#ifdef __i386__
41410 +
41411 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41412 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
41413 + if (!pmi_code)
41414 +#elif !defined(CONFIG_PAX_KERNEXEC)
41415 + if (0)
41416 +#endif
41417 +
41418 +#endif
41419 + screen_info.vesapm_seg = 0;
41420 +
41421 if (screen_info.vesapm_seg) {
41422 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
41423 - screen_info.vesapm_seg,screen_info.vesapm_off);
41424 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
41425 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
41426 }
41427
41428 if (screen_info.vesapm_seg < 0xc000)
41429 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
41430
41431 if (ypan || pmi_setpal) {
41432 unsigned short *pmi_base;
41433 +
41434 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
41435 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
41436 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
41437 +
41438 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41439 + pax_open_kernel();
41440 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
41441 +#else
41442 + pmi_code = pmi_base;
41443 +#endif
41444 +
41445 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
41446 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
41447 +
41448 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41449 + pmi_start = ktva_ktla(pmi_start);
41450 + pmi_pal = ktva_ktla(pmi_pal);
41451 + pax_close_kernel();
41452 +#endif
41453 +
41454 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
41455 if (pmi_base[3]) {
41456 printk(KERN_INFO "vesafb: pmi: ports = ");
41457 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
41458 info->node, info->fix.id);
41459 return 0;
41460 err:
41461 +
41462 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41463 + module_free_exec(NULL, pmi_code);
41464 +#endif
41465 +
41466 if (info->screen_base)
41467 iounmap(info->screen_base);
41468 framebuffer_release(info);
41469 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
41470 index 88714ae..16c2e11 100644
41471 --- a/drivers/video/via/via_clock.h
41472 +++ b/drivers/video/via/via_clock.h
41473 @@ -56,7 +56,7 @@ struct via_clock {
41474
41475 void (*set_engine_pll_state)(u8 state);
41476 void (*set_engine_pll)(struct via_pll_config config);
41477 -};
41478 +} __no_const;
41479
41480
41481 static inline u32 get_pll_internal_frequency(u32 ref_freq,
41482 diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
41483 index e058ace..2424d93 100644
41484 --- a/drivers/virtio/virtio_balloon.c
41485 +++ b/drivers/virtio/virtio_balloon.c
41486 @@ -174,6 +174,8 @@ static void update_balloon_stats(struct virtio_balloon *vb)
41487 struct sysinfo i;
41488 int idx = 0;
41489
41490 + pax_track_stack();
41491 +
41492 all_vm_events(events);
41493 si_meminfo(&i);
41494
41495 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
41496 index e56c934..fc22f4b 100644
41497 --- a/drivers/xen/xen-pciback/conf_space.h
41498 +++ b/drivers/xen/xen-pciback/conf_space.h
41499 @@ -44,15 +44,15 @@ struct config_field {
41500 struct {
41501 conf_dword_write write;
41502 conf_dword_read read;
41503 - } dw;
41504 + } __no_const dw;
41505 struct {
41506 conf_word_write write;
41507 conf_word_read read;
41508 - } w;
41509 + } __no_const w;
41510 struct {
41511 conf_byte_write write;
41512 conf_byte_read read;
41513 - } b;
41514 + } __no_const b;
41515 } u;
41516 struct list_head list;
41517 };
41518 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
41519 index e3c03db..93b0172 100644
41520 --- a/fs/9p/vfs_inode.c
41521 +++ b/fs/9p/vfs_inode.c
41522 @@ -1288,7 +1288,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41523 void
41524 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41525 {
41526 - char *s = nd_get_link(nd);
41527 + const char *s = nd_get_link(nd);
41528
41529 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
41530 IS_ERR(s) ? "<error>" : s);
41531 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
41532 index 79e2ca7..5828ad1 100644
41533 --- a/fs/Kconfig.binfmt
41534 +++ b/fs/Kconfig.binfmt
41535 @@ -86,7 +86,7 @@ config HAVE_AOUT
41536
41537 config BINFMT_AOUT
41538 tristate "Kernel support for a.out and ECOFF binaries"
41539 - depends on HAVE_AOUT
41540 + depends on HAVE_AOUT && BROKEN
41541 ---help---
41542 A.out (Assembler.OUTput) is a set of formats for libraries and
41543 executables used in the earliest versions of UNIX. Linux used
41544 diff --git a/fs/aio.c b/fs/aio.c
41545 index e29ec48..f083e5e 100644
41546 --- a/fs/aio.c
41547 +++ b/fs/aio.c
41548 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
41549 size += sizeof(struct io_event) * nr_events;
41550 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
41551
41552 - if (nr_pages < 0)
41553 + if (nr_pages <= 0)
41554 return -EINVAL;
41555
41556 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
41557 @@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ctx,
41558 struct aio_timeout to;
41559 int retry = 0;
41560
41561 + pax_track_stack();
41562 +
41563 /* needed to zero any padding within an entry (there shouldn't be
41564 * any, but C is fun!
41565 */
41566 @@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
41567 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41568 {
41569 ssize_t ret;
41570 + struct iovec iovstack;
41571
41572 #ifdef CONFIG_COMPAT
41573 if (compat)
41574 ret = compat_rw_copy_check_uvector(type,
41575 (struct compat_iovec __user *)kiocb->ki_buf,
41576 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41577 + kiocb->ki_nbytes, 1, &iovstack,
41578 &kiocb->ki_iovec);
41579 else
41580 #endif
41581 ret = rw_copy_check_uvector(type,
41582 (struct iovec __user *)kiocb->ki_buf,
41583 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41584 + kiocb->ki_nbytes, 1, &iovstack,
41585 &kiocb->ki_iovec);
41586 if (ret < 0)
41587 goto out;
41588
41589 + if (kiocb->ki_iovec == &iovstack) {
41590 + kiocb->ki_inline_vec = iovstack;
41591 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
41592 + }
41593 kiocb->ki_nr_segs = kiocb->ki_nbytes;
41594 kiocb->ki_cur_seg = 0;
41595 /* ki_nbytes/left now reflect bytes instead of segs */
41596 diff --git a/fs/attr.c b/fs/attr.c
41597 index 538e279..046cc6d 100644
41598 --- a/fs/attr.c
41599 +++ b/fs/attr.c
41600 @@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
41601 unsigned long limit;
41602
41603 limit = rlimit(RLIMIT_FSIZE);
41604 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
41605 if (limit != RLIM_INFINITY && offset > limit)
41606 goto out_sig;
41607 if (offset > inode->i_sb->s_maxbytes)
41608 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
41609 index e1fbdee..cd5ea56 100644
41610 --- a/fs/autofs4/waitq.c
41611 +++ b/fs/autofs4/waitq.c
41612 @@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
41613 {
41614 unsigned long sigpipe, flags;
41615 mm_segment_t fs;
41616 - const char *data = (const char *)addr;
41617 + const char __user *data = (const char __force_user *)addr;
41618 ssize_t wr = 0;
41619
41620 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
41621 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
41622 index 720d885..012e7f0 100644
41623 --- a/fs/befs/linuxvfs.c
41624 +++ b/fs/befs/linuxvfs.c
41625 @@ -503,7 +503,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41626 {
41627 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
41628 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
41629 - char *link = nd_get_link(nd);
41630 + const char *link = nd_get_link(nd);
41631 if (!IS_ERR(link))
41632 kfree(link);
41633 }
41634 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
41635 index a6395bd..a5b24c4 100644
41636 --- a/fs/binfmt_aout.c
41637 +++ b/fs/binfmt_aout.c
41638 @@ -16,6 +16,7 @@
41639 #include <linux/string.h>
41640 #include <linux/fs.h>
41641 #include <linux/file.h>
41642 +#include <linux/security.h>
41643 #include <linux/stat.h>
41644 #include <linux/fcntl.h>
41645 #include <linux/ptrace.h>
41646 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
41647 #endif
41648 # define START_STACK(u) ((void __user *)u.start_stack)
41649
41650 + memset(&dump, 0, sizeof(dump));
41651 +
41652 fs = get_fs();
41653 set_fs(KERNEL_DS);
41654 has_dumped = 1;
41655 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
41656
41657 /* If the size of the dump file exceeds the rlimit, then see what would happen
41658 if we wrote the stack, but not the data area. */
41659 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
41660 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
41661 dump.u_dsize = 0;
41662
41663 /* Make sure we have enough room to write the stack and data areas. */
41664 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
41665 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
41666 dump.u_ssize = 0;
41667
41668 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41669 rlim = rlimit(RLIMIT_DATA);
41670 if (rlim >= RLIM_INFINITY)
41671 rlim = ~0;
41672 +
41673 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
41674 if (ex.a_data + ex.a_bss > rlim)
41675 return -ENOMEM;
41676
41677 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41678 install_exec_creds(bprm);
41679 current->flags &= ~PF_FORKNOEXEC;
41680
41681 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41682 + current->mm->pax_flags = 0UL;
41683 +#endif
41684 +
41685 +#ifdef CONFIG_PAX_PAGEEXEC
41686 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
41687 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
41688 +
41689 +#ifdef CONFIG_PAX_EMUTRAMP
41690 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
41691 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
41692 +#endif
41693 +
41694 +#ifdef CONFIG_PAX_MPROTECT
41695 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
41696 + current->mm->pax_flags |= MF_PAX_MPROTECT;
41697 +#endif
41698 +
41699 + }
41700 +#endif
41701 +
41702 if (N_MAGIC(ex) == OMAGIC) {
41703 unsigned long text_addr, map_size;
41704 loff_t pos;
41705 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41706
41707 down_write(&current->mm->mmap_sem);
41708 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
41709 - PROT_READ | PROT_WRITE | PROT_EXEC,
41710 + PROT_READ | PROT_WRITE,
41711 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
41712 fd_offset + ex.a_text);
41713 up_write(&current->mm->mmap_sem);
41714 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
41715 index 21ac5ee..171b1d0 100644
41716 --- a/fs/binfmt_elf.c
41717 +++ b/fs/binfmt_elf.c
41718 @@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump_params *cprm);
41719 #define elf_core_dump NULL
41720 #endif
41721
41722 +#ifdef CONFIG_PAX_MPROTECT
41723 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
41724 +#endif
41725 +
41726 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
41727 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
41728 #else
41729 @@ -70,6 +74,11 @@ static struct linux_binfmt elf_format = {
41730 .load_binary = load_elf_binary,
41731 .load_shlib = load_elf_library,
41732 .core_dump = elf_core_dump,
41733 +
41734 +#ifdef CONFIG_PAX_MPROTECT
41735 + .handle_mprotect= elf_handle_mprotect,
41736 +#endif
41737 +
41738 .min_coredump = ELF_EXEC_PAGESIZE,
41739 };
41740
41741 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format = {
41742
41743 static int set_brk(unsigned long start, unsigned long end)
41744 {
41745 + unsigned long e = end;
41746 +
41747 start = ELF_PAGEALIGN(start);
41748 end = ELF_PAGEALIGN(end);
41749 if (end > start) {
41750 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end)
41751 if (BAD_ADDR(addr))
41752 return addr;
41753 }
41754 - current->mm->start_brk = current->mm->brk = end;
41755 + current->mm->start_brk = current->mm->brk = e;
41756 return 0;
41757 }
41758
41759 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41760 elf_addr_t __user *u_rand_bytes;
41761 const char *k_platform = ELF_PLATFORM;
41762 const char *k_base_platform = ELF_BASE_PLATFORM;
41763 - unsigned char k_rand_bytes[16];
41764 + u32 k_rand_bytes[4];
41765 int items;
41766 elf_addr_t *elf_info;
41767 int ei_index = 0;
41768 const struct cred *cred = current_cred();
41769 struct vm_area_struct *vma;
41770 + unsigned long saved_auxv[AT_VECTOR_SIZE];
41771 +
41772 + pax_track_stack();
41773
41774 /*
41775 * In some cases (e.g. Hyper-Threading), we want to avoid L1
41776 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41777 * Generate 16 random bytes for userspace PRNG seeding.
41778 */
41779 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
41780 - u_rand_bytes = (elf_addr_t __user *)
41781 - STACK_ALLOC(p, sizeof(k_rand_bytes));
41782 + srandom32(k_rand_bytes[0] ^ random32());
41783 + srandom32(k_rand_bytes[1] ^ random32());
41784 + srandom32(k_rand_bytes[2] ^ random32());
41785 + srandom32(k_rand_bytes[3] ^ random32());
41786 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
41787 + u_rand_bytes = (elf_addr_t __user *) p;
41788 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
41789 return -EFAULT;
41790
41791 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41792 return -EFAULT;
41793 current->mm->env_end = p;
41794
41795 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
41796 +
41797 /* Put the elf_info on the stack in the right place. */
41798 sp = (elf_addr_t __user *)envp + 1;
41799 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
41800 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
41801 return -EFAULT;
41802 return 0;
41803 }
41804 @@ -381,10 +401,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41805 {
41806 struct elf_phdr *elf_phdata;
41807 struct elf_phdr *eppnt;
41808 - unsigned long load_addr = 0;
41809 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
41810 int load_addr_set = 0;
41811 unsigned long last_bss = 0, elf_bss = 0;
41812 - unsigned long error = ~0UL;
41813 + unsigned long error = -EINVAL;
41814 unsigned long total_size;
41815 int retval, i, size;
41816
41817 @@ -430,6 +450,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41818 goto out_close;
41819 }
41820
41821 +#ifdef CONFIG_PAX_SEGMEXEC
41822 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
41823 + pax_task_size = SEGMEXEC_TASK_SIZE;
41824 +#endif
41825 +
41826 eppnt = elf_phdata;
41827 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
41828 if (eppnt->p_type == PT_LOAD) {
41829 @@ -473,8 +498,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41830 k = load_addr + eppnt->p_vaddr;
41831 if (BAD_ADDR(k) ||
41832 eppnt->p_filesz > eppnt->p_memsz ||
41833 - eppnt->p_memsz > TASK_SIZE ||
41834 - TASK_SIZE - eppnt->p_memsz < k) {
41835 + eppnt->p_memsz > pax_task_size ||
41836 + pax_task_size - eppnt->p_memsz < k) {
41837 error = -ENOMEM;
41838 goto out_close;
41839 }
41840 @@ -528,6 +553,193 @@ out:
41841 return error;
41842 }
41843
41844 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
41845 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
41846 +{
41847 + unsigned long pax_flags = 0UL;
41848 +
41849 +#ifdef CONFIG_PAX_PAGEEXEC
41850 + if (elf_phdata->p_flags & PF_PAGEEXEC)
41851 + pax_flags |= MF_PAX_PAGEEXEC;
41852 +#endif
41853 +
41854 +#ifdef CONFIG_PAX_SEGMEXEC
41855 + if (elf_phdata->p_flags & PF_SEGMEXEC)
41856 + pax_flags |= MF_PAX_SEGMEXEC;
41857 +#endif
41858 +
41859 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41860 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41861 + if ((__supported_pte_mask & _PAGE_NX))
41862 + pax_flags &= ~MF_PAX_SEGMEXEC;
41863 + else
41864 + pax_flags &= ~MF_PAX_PAGEEXEC;
41865 + }
41866 +#endif
41867 +
41868 +#ifdef CONFIG_PAX_EMUTRAMP
41869 + if (elf_phdata->p_flags & PF_EMUTRAMP)
41870 + pax_flags |= MF_PAX_EMUTRAMP;
41871 +#endif
41872 +
41873 +#ifdef CONFIG_PAX_MPROTECT
41874 + if (elf_phdata->p_flags & PF_MPROTECT)
41875 + pax_flags |= MF_PAX_MPROTECT;
41876 +#endif
41877 +
41878 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41879 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
41880 + pax_flags |= MF_PAX_RANDMMAP;
41881 +#endif
41882 +
41883 + return pax_flags;
41884 +}
41885 +#endif
41886 +
41887 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41888 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
41889 +{
41890 + unsigned long pax_flags = 0UL;
41891 +
41892 +#ifdef CONFIG_PAX_PAGEEXEC
41893 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
41894 + pax_flags |= MF_PAX_PAGEEXEC;
41895 +#endif
41896 +
41897 +#ifdef CONFIG_PAX_SEGMEXEC
41898 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
41899 + pax_flags |= MF_PAX_SEGMEXEC;
41900 +#endif
41901 +
41902 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41903 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41904 + if ((__supported_pte_mask & _PAGE_NX))
41905 + pax_flags &= ~MF_PAX_SEGMEXEC;
41906 + else
41907 + pax_flags &= ~MF_PAX_PAGEEXEC;
41908 + }
41909 +#endif
41910 +
41911 +#ifdef CONFIG_PAX_EMUTRAMP
41912 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
41913 + pax_flags |= MF_PAX_EMUTRAMP;
41914 +#endif
41915 +
41916 +#ifdef CONFIG_PAX_MPROTECT
41917 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
41918 + pax_flags |= MF_PAX_MPROTECT;
41919 +#endif
41920 +
41921 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
41922 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
41923 + pax_flags |= MF_PAX_RANDMMAP;
41924 +#endif
41925 +
41926 + return pax_flags;
41927 +}
41928 +#endif
41929 +
41930 +#ifdef CONFIG_PAX_EI_PAX
41931 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
41932 +{
41933 + unsigned long pax_flags = 0UL;
41934 +
41935 +#ifdef CONFIG_PAX_PAGEEXEC
41936 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
41937 + pax_flags |= MF_PAX_PAGEEXEC;
41938 +#endif
41939 +
41940 +#ifdef CONFIG_PAX_SEGMEXEC
41941 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
41942 + pax_flags |= MF_PAX_SEGMEXEC;
41943 +#endif
41944 +
41945 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
41946 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41947 + if ((__supported_pte_mask & _PAGE_NX))
41948 + pax_flags &= ~MF_PAX_SEGMEXEC;
41949 + else
41950 + pax_flags &= ~MF_PAX_PAGEEXEC;
41951 + }
41952 +#endif
41953 +
41954 +#ifdef CONFIG_PAX_EMUTRAMP
41955 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
41956 + pax_flags |= MF_PAX_EMUTRAMP;
41957 +#endif
41958 +
41959 +#ifdef CONFIG_PAX_MPROTECT
41960 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
41961 + pax_flags |= MF_PAX_MPROTECT;
41962 +#endif
41963 +
41964 +#ifdef CONFIG_PAX_ASLR
41965 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
41966 + pax_flags |= MF_PAX_RANDMMAP;
41967 +#endif
41968 +
41969 + return pax_flags;
41970 +}
41971 +#endif
41972 +
41973 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
41974 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
41975 +{
41976 + unsigned long pax_flags = 0UL;
41977 +
41978 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41979 + unsigned long i;
41980 + int found_flags = 0;
41981 +#endif
41982 +
41983 +#ifdef CONFIG_PAX_EI_PAX
41984 + pax_flags = pax_parse_ei_pax(elf_ex);
41985 +#endif
41986 +
41987 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
41988 + for (i = 0UL; i < elf_ex->e_phnum; i++)
41989 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
41990 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
41991 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
41992 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
41993 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
41994 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
41995 + return -EINVAL;
41996 +
41997 +#ifdef CONFIG_PAX_SOFTMODE
41998 + if (pax_softmode)
41999 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
42000 + else
42001 +#endif
42002 +
42003 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
42004 + found_flags = 1;
42005 + break;
42006 + }
42007 +#endif
42008 +
42009 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
42010 + if (found_flags == 0) {
42011 + struct elf_phdr phdr;
42012 + memset(&phdr, 0, sizeof(phdr));
42013 + phdr.p_flags = PF_NOEMUTRAMP;
42014 +#ifdef CONFIG_PAX_SOFTMODE
42015 + if (pax_softmode)
42016 + pax_flags = pax_parse_softmode(&phdr);
42017 + else
42018 +#endif
42019 + pax_flags = pax_parse_hardmode(&phdr);
42020 + }
42021 +#endif
42022 +
42023 + if (0 > pax_check_flags(&pax_flags))
42024 + return -EINVAL;
42025 +
42026 + current->mm->pax_flags = pax_flags;
42027 + return 0;
42028 +}
42029 +#endif
42030 +
42031 /*
42032 * These are the functions used to load ELF style executables and shared
42033 * libraries. There is no binary dependent code anywhere else.
42034 @@ -544,6 +756,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
42035 {
42036 unsigned int random_variable = 0;
42037
42038 +#ifdef CONFIG_PAX_RANDUSTACK
42039 + if (randomize_va_space)
42040 + return stack_top - current->mm->delta_stack;
42041 +#endif
42042 +
42043 if ((current->flags & PF_RANDOMIZE) &&
42044 !(current->personality & ADDR_NO_RANDOMIZE)) {
42045 random_variable = get_random_int() & STACK_RND_MASK;
42046 @@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42047 unsigned long load_addr = 0, load_bias = 0;
42048 int load_addr_set = 0;
42049 char * elf_interpreter = NULL;
42050 - unsigned long error;
42051 + unsigned long error = 0;
42052 struct elf_phdr *elf_ppnt, *elf_phdata;
42053 unsigned long elf_bss, elf_brk;
42054 int retval, i;
42055 @@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42056 unsigned long start_code, end_code, start_data, end_data;
42057 unsigned long reloc_func_desc __maybe_unused = 0;
42058 int executable_stack = EXSTACK_DEFAULT;
42059 - unsigned long def_flags = 0;
42060 struct {
42061 struct elfhdr elf_ex;
42062 struct elfhdr interp_elf_ex;
42063 } *loc;
42064 + unsigned long pax_task_size = TASK_SIZE;
42065
42066 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
42067 if (!loc) {
42068 @@ -713,11 +930,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42069
42070 /* OK, This is the point of no return */
42071 current->flags &= ~PF_FORKNOEXEC;
42072 - current->mm->def_flags = def_flags;
42073 +
42074 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42075 + current->mm->pax_flags = 0UL;
42076 +#endif
42077 +
42078 +#ifdef CONFIG_PAX_DLRESOLVE
42079 + current->mm->call_dl_resolve = 0UL;
42080 +#endif
42081 +
42082 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
42083 + current->mm->call_syscall = 0UL;
42084 +#endif
42085 +
42086 +#ifdef CONFIG_PAX_ASLR
42087 + current->mm->delta_mmap = 0UL;
42088 + current->mm->delta_stack = 0UL;
42089 +#endif
42090 +
42091 + current->mm->def_flags = 0;
42092 +
42093 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
42094 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
42095 + send_sig(SIGKILL, current, 0);
42096 + goto out_free_dentry;
42097 + }
42098 +#endif
42099 +
42100 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
42101 + pax_set_initial_flags(bprm);
42102 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
42103 + if (pax_set_initial_flags_func)
42104 + (pax_set_initial_flags_func)(bprm);
42105 +#endif
42106 +
42107 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42108 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
42109 + current->mm->context.user_cs_limit = PAGE_SIZE;
42110 + current->mm->def_flags |= VM_PAGEEXEC;
42111 + }
42112 +#endif
42113 +
42114 +#ifdef CONFIG_PAX_SEGMEXEC
42115 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
42116 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
42117 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
42118 + pax_task_size = SEGMEXEC_TASK_SIZE;
42119 + current->mm->def_flags |= VM_NOHUGEPAGE;
42120 + }
42121 +#endif
42122 +
42123 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
42124 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42125 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
42126 + put_cpu();
42127 + }
42128 +#endif
42129
42130 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
42131 may depend on the personality. */
42132 SET_PERSONALITY(loc->elf_ex);
42133 +
42134 +#ifdef CONFIG_PAX_ASLR
42135 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42136 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
42137 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
42138 + }
42139 +#endif
42140 +
42141 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42142 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42143 + executable_stack = EXSTACK_DISABLE_X;
42144 + current->personality &= ~READ_IMPLIES_EXEC;
42145 + } else
42146 +#endif
42147 +
42148 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
42149 current->personality |= READ_IMPLIES_EXEC;
42150
42151 @@ -808,6 +1095,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42152 #else
42153 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
42154 #endif
42155 +
42156 +#ifdef CONFIG_PAX_RANDMMAP
42157 + /* PaX: randomize base address at the default exe base if requested */
42158 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
42159 +#ifdef CONFIG_SPARC64
42160 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
42161 +#else
42162 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
42163 +#endif
42164 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
42165 + elf_flags |= MAP_FIXED;
42166 + }
42167 +#endif
42168 +
42169 }
42170
42171 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
42172 @@ -840,9 +1141,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42173 * allowed task size. Note that p_filesz must always be
42174 * <= p_memsz so it is only necessary to check p_memsz.
42175 */
42176 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42177 - elf_ppnt->p_memsz > TASK_SIZE ||
42178 - TASK_SIZE - elf_ppnt->p_memsz < k) {
42179 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42180 + elf_ppnt->p_memsz > pax_task_size ||
42181 + pax_task_size - elf_ppnt->p_memsz < k) {
42182 /* set_brk can never work. Avoid overflows. */
42183 send_sig(SIGKILL, current, 0);
42184 retval = -EINVAL;
42185 @@ -870,6 +1171,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42186 start_data += load_bias;
42187 end_data += load_bias;
42188
42189 +#ifdef CONFIG_PAX_RANDMMAP
42190 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
42191 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
42192 +#endif
42193 +
42194 /* Calling set_brk effectively mmaps the pages that we need
42195 * for the bss and break sections. We must do this before
42196 * mapping in the interpreter, to make sure it doesn't wind
42197 @@ -881,9 +1187,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42198 goto out_free_dentry;
42199 }
42200 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
42201 - send_sig(SIGSEGV, current, 0);
42202 - retval = -EFAULT; /* Nobody gets to see this, but.. */
42203 - goto out_free_dentry;
42204 + /*
42205 + * This bss-zeroing can fail if the ELF
42206 + * file specifies odd protections. So
42207 + * we don't check the return value
42208 + */
42209 }
42210
42211 if (elf_interpreter) {
42212 @@ -1098,7 +1406,7 @@ out:
42213 * Decide what to dump of a segment, part, all or none.
42214 */
42215 static unsigned long vma_dump_size(struct vm_area_struct *vma,
42216 - unsigned long mm_flags)
42217 + unsigned long mm_flags, long signr)
42218 {
42219 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
42220
42221 @@ -1132,7 +1440,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
42222 if (vma->vm_file == NULL)
42223 return 0;
42224
42225 - if (FILTER(MAPPED_PRIVATE))
42226 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
42227 goto whole;
42228
42229 /*
42230 @@ -1354,9 +1662,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
42231 {
42232 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
42233 int i = 0;
42234 - do
42235 + do {
42236 i += 2;
42237 - while (auxv[i - 2] != AT_NULL);
42238 + } while (auxv[i - 2] != AT_NULL);
42239 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
42240 }
42241
42242 @@ -1862,14 +2170,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
42243 }
42244
42245 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
42246 - unsigned long mm_flags)
42247 + struct coredump_params *cprm)
42248 {
42249 struct vm_area_struct *vma;
42250 size_t size = 0;
42251
42252 for (vma = first_vma(current, gate_vma); vma != NULL;
42253 vma = next_vma(vma, gate_vma))
42254 - size += vma_dump_size(vma, mm_flags);
42255 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42256 return size;
42257 }
42258
42259 @@ -1963,7 +2271,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42260
42261 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
42262
42263 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
42264 + offset += elf_core_vma_data_size(gate_vma, cprm);
42265 offset += elf_core_extra_data_size();
42266 e_shoff = offset;
42267
42268 @@ -1977,10 +2285,12 @@ static int elf_core_dump(struct coredump_params *cprm)
42269 offset = dataoff;
42270
42271 size += sizeof(*elf);
42272 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42273 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
42274 goto end_coredump;
42275
42276 size += sizeof(*phdr4note);
42277 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42278 if (size > cprm->limit
42279 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
42280 goto end_coredump;
42281 @@ -1994,7 +2304,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42282 phdr.p_offset = offset;
42283 phdr.p_vaddr = vma->vm_start;
42284 phdr.p_paddr = 0;
42285 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
42286 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42287 phdr.p_memsz = vma->vm_end - vma->vm_start;
42288 offset += phdr.p_filesz;
42289 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
42290 @@ -2005,6 +2315,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42291 phdr.p_align = ELF_EXEC_PAGESIZE;
42292
42293 size += sizeof(phdr);
42294 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42295 if (size > cprm->limit
42296 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
42297 goto end_coredump;
42298 @@ -2029,7 +2340,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42299 unsigned long addr;
42300 unsigned long end;
42301
42302 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
42303 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42304
42305 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
42306 struct page *page;
42307 @@ -2038,6 +2349,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42308 page = get_dump_page(addr);
42309 if (page) {
42310 void *kaddr = kmap(page);
42311 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
42312 stop = ((size += PAGE_SIZE) > cprm->limit) ||
42313 !dump_write(cprm->file, kaddr,
42314 PAGE_SIZE);
42315 @@ -2055,6 +2367,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42316
42317 if (e_phnum == PN_XNUM) {
42318 size += sizeof(*shdr4extnum);
42319 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42320 if (size > cprm->limit
42321 || !dump_write(cprm->file, shdr4extnum,
42322 sizeof(*shdr4extnum)))
42323 @@ -2075,6 +2388,97 @@ out:
42324
42325 #endif /* CONFIG_ELF_CORE */
42326
42327 +#ifdef CONFIG_PAX_MPROTECT
42328 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
42329 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
42330 + * we'll remove VM_MAYWRITE for good on RELRO segments.
42331 + *
42332 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
42333 + * basis because we want to allow the common case and not the special ones.
42334 + */
42335 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
42336 +{
42337 + struct elfhdr elf_h;
42338 + struct elf_phdr elf_p;
42339 + unsigned long i;
42340 + unsigned long oldflags;
42341 + bool is_textrel_rw, is_textrel_rx, is_relro;
42342 +
42343 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
42344 + return;
42345 +
42346 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
42347 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
42348 +
42349 +#ifdef CONFIG_PAX_ELFRELOCS
42350 + /* possible TEXTREL */
42351 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
42352 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
42353 +#else
42354 + is_textrel_rw = false;
42355 + is_textrel_rx = false;
42356 +#endif
42357 +
42358 + /* possible RELRO */
42359 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
42360 +
42361 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
42362 + return;
42363 +
42364 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
42365 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
42366 +
42367 +#ifdef CONFIG_PAX_ETEXECRELOCS
42368 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42369 +#else
42370 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
42371 +#endif
42372 +
42373 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42374 + !elf_check_arch(&elf_h) ||
42375 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
42376 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
42377 + return;
42378 +
42379 + for (i = 0UL; i < elf_h.e_phnum; i++) {
42380 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
42381 + return;
42382 + switch (elf_p.p_type) {
42383 + case PT_DYNAMIC:
42384 + if (!is_textrel_rw && !is_textrel_rx)
42385 + continue;
42386 + i = 0UL;
42387 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
42388 + elf_dyn dyn;
42389 +
42390 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
42391 + return;
42392 + if (dyn.d_tag == DT_NULL)
42393 + return;
42394 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
42395 + gr_log_textrel(vma);
42396 + if (is_textrel_rw)
42397 + vma->vm_flags |= VM_MAYWRITE;
42398 + else
42399 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
42400 + vma->vm_flags &= ~VM_MAYWRITE;
42401 + return;
42402 + }
42403 + i++;
42404 + }
42405 + return;
42406 +
42407 + case PT_GNU_RELRO:
42408 + if (!is_relro)
42409 + continue;
42410 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
42411 + vma->vm_flags &= ~VM_MAYWRITE;
42412 + return;
42413 + }
42414 + }
42415 +}
42416 +#endif
42417 +
42418 static int __init init_elf_binfmt(void)
42419 {
42420 return register_binfmt(&elf_format);
42421 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
42422 index 1bffbe0..c8c283e 100644
42423 --- a/fs/binfmt_flat.c
42424 +++ b/fs/binfmt_flat.c
42425 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
42426 realdatastart = (unsigned long) -ENOMEM;
42427 printk("Unable to allocate RAM for process data, errno %d\n",
42428 (int)-realdatastart);
42429 + down_write(&current->mm->mmap_sem);
42430 do_munmap(current->mm, textpos, text_len);
42431 + up_write(&current->mm->mmap_sem);
42432 ret = realdatastart;
42433 goto err;
42434 }
42435 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42436 }
42437 if (IS_ERR_VALUE(result)) {
42438 printk("Unable to read data+bss, errno %d\n", (int)-result);
42439 + down_write(&current->mm->mmap_sem);
42440 do_munmap(current->mm, textpos, text_len);
42441 do_munmap(current->mm, realdatastart, len);
42442 + up_write(&current->mm->mmap_sem);
42443 ret = result;
42444 goto err;
42445 }
42446 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42447 }
42448 if (IS_ERR_VALUE(result)) {
42449 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
42450 + down_write(&current->mm->mmap_sem);
42451 do_munmap(current->mm, textpos, text_len + data_len + extra +
42452 MAX_SHARED_LIBS * sizeof(unsigned long));
42453 + up_write(&current->mm->mmap_sem);
42454 ret = result;
42455 goto err;
42456 }
42457 diff --git a/fs/bio.c b/fs/bio.c
42458 index 9bfade8..782f3b9 100644
42459 --- a/fs/bio.c
42460 +++ b/fs/bio.c
42461 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
42462 const int read = bio_data_dir(bio) == READ;
42463 struct bio_map_data *bmd = bio->bi_private;
42464 int i;
42465 - char *p = bmd->sgvecs[0].iov_base;
42466 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
42467
42468 __bio_for_each_segment(bvec, bio, i, 0) {
42469 char *addr = page_address(bvec->bv_page);
42470 diff --git a/fs/block_dev.c b/fs/block_dev.c
42471 index 1c44b8d..e2507b4 100644
42472 --- a/fs/block_dev.c
42473 +++ b/fs/block_dev.c
42474 @@ -681,7 +681,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
42475 else if (bdev->bd_contains == bdev)
42476 return true; /* is a whole device which isn't held */
42477
42478 - else if (whole->bd_holder == bd_may_claim)
42479 + else if (whole->bd_holder == (void *)bd_may_claim)
42480 return true; /* is a partition of a device that is being partitioned */
42481 else if (whole->bd_holder != NULL)
42482 return false; /* is a partition of a held device */
42483 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
42484 index 011cab3..9ace713 100644
42485 --- a/fs/btrfs/ctree.c
42486 +++ b/fs/btrfs/ctree.c
42487 @@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
42488 free_extent_buffer(buf);
42489 add_root_to_dirty_list(root);
42490 } else {
42491 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
42492 - parent_start = parent->start;
42493 - else
42494 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
42495 + if (parent)
42496 + parent_start = parent->start;
42497 + else
42498 + parent_start = 0;
42499 + } else
42500 parent_start = 0;
42501
42502 WARN_ON(trans->transid != btrfs_header_generation(parent));
42503 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
42504 index b2d004a..6bb543d 100644
42505 --- a/fs/btrfs/inode.c
42506 +++ b/fs/btrfs/inode.c
42507 @@ -6922,7 +6922,7 @@ fail:
42508 return -ENOMEM;
42509 }
42510
42511 -static int btrfs_getattr(struct vfsmount *mnt,
42512 +int btrfs_getattr(struct vfsmount *mnt,
42513 struct dentry *dentry, struct kstat *stat)
42514 {
42515 struct inode *inode = dentry->d_inode;
42516 @@ -6934,6 +6934,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
42517 return 0;
42518 }
42519
42520 +EXPORT_SYMBOL(btrfs_getattr);
42521 +
42522 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
42523 +{
42524 + return BTRFS_I(inode)->root->anon_dev;
42525 +}
42526 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
42527 +
42528 /*
42529 * If a file is moved, it will inherit the cow and compression flags of the new
42530 * directory.
42531 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
42532 index dae5dfe..6aa01b1 100644
42533 --- a/fs/btrfs/ioctl.c
42534 +++ b/fs/btrfs/ioctl.c
42535 @@ -2704,9 +2704,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42536 for (i = 0; i < num_types; i++) {
42537 struct btrfs_space_info *tmp;
42538
42539 + /* Don't copy in more than we allocated */
42540 if (!slot_count)
42541 break;
42542
42543 + slot_count--;
42544 +
42545 info = NULL;
42546 rcu_read_lock();
42547 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
42548 @@ -2728,15 +2731,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42549 memcpy(dest, &space, sizeof(space));
42550 dest++;
42551 space_args.total_spaces++;
42552 - slot_count--;
42553 }
42554 - if (!slot_count)
42555 - break;
42556 }
42557 up_read(&info->groups_sem);
42558 }
42559
42560 - user_dest = (struct btrfs_ioctl_space_info *)
42561 + user_dest = (struct btrfs_ioctl_space_info __user *)
42562 (arg + sizeof(struct btrfs_ioctl_space_args));
42563
42564 if (copy_to_user(user_dest, dest_orig, alloc_size))
42565 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
42566 index 59bb176..be9977d 100644
42567 --- a/fs/btrfs/relocation.c
42568 +++ b/fs/btrfs/relocation.c
42569 @@ -1242,7 +1242,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
42570 }
42571 spin_unlock(&rc->reloc_root_tree.lock);
42572
42573 - BUG_ON((struct btrfs_root *)node->data != root);
42574 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
42575
42576 if (!del) {
42577 spin_lock(&rc->reloc_root_tree.lock);
42578 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
42579 index 622f469..e8d2d55 100644
42580 --- a/fs/cachefiles/bind.c
42581 +++ b/fs/cachefiles/bind.c
42582 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
42583 args);
42584
42585 /* start by checking things over */
42586 - ASSERT(cache->fstop_percent >= 0 &&
42587 - cache->fstop_percent < cache->fcull_percent &&
42588 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
42589 cache->fcull_percent < cache->frun_percent &&
42590 cache->frun_percent < 100);
42591
42592 - ASSERT(cache->bstop_percent >= 0 &&
42593 - cache->bstop_percent < cache->bcull_percent &&
42594 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
42595 cache->bcull_percent < cache->brun_percent &&
42596 cache->brun_percent < 100);
42597
42598 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
42599 index 0a1467b..6a53245 100644
42600 --- a/fs/cachefiles/daemon.c
42601 +++ b/fs/cachefiles/daemon.c
42602 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
42603 if (n > buflen)
42604 return -EMSGSIZE;
42605
42606 - if (copy_to_user(_buffer, buffer, n) != 0)
42607 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
42608 return -EFAULT;
42609
42610 return n;
42611 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
42612 if (test_bit(CACHEFILES_DEAD, &cache->flags))
42613 return -EIO;
42614
42615 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
42616 + if (datalen > PAGE_SIZE - 1)
42617 return -EOPNOTSUPP;
42618
42619 /* drag the command string into the kernel so we can parse it */
42620 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
42621 if (args[0] != '%' || args[1] != '\0')
42622 return -EINVAL;
42623
42624 - if (fstop < 0 || fstop >= cache->fcull_percent)
42625 + if (fstop >= cache->fcull_percent)
42626 return cachefiles_daemon_range_error(cache, args);
42627
42628 cache->fstop_percent = fstop;
42629 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
42630 if (args[0] != '%' || args[1] != '\0')
42631 return -EINVAL;
42632
42633 - if (bstop < 0 || bstop >= cache->bcull_percent)
42634 + if (bstop >= cache->bcull_percent)
42635 return cachefiles_daemon_range_error(cache, args);
42636
42637 cache->bstop_percent = bstop;
42638 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
42639 index bd6bc1b..b627b53 100644
42640 --- a/fs/cachefiles/internal.h
42641 +++ b/fs/cachefiles/internal.h
42642 @@ -57,7 +57,7 @@ struct cachefiles_cache {
42643 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
42644 struct rb_root active_nodes; /* active nodes (can't be culled) */
42645 rwlock_t active_lock; /* lock for active_nodes */
42646 - atomic_t gravecounter; /* graveyard uniquifier */
42647 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
42648 unsigned frun_percent; /* when to stop culling (% files) */
42649 unsigned fcull_percent; /* when to start culling (% files) */
42650 unsigned fstop_percent; /* when to stop allocating (% files) */
42651 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
42652 * proc.c
42653 */
42654 #ifdef CONFIG_CACHEFILES_HISTOGRAM
42655 -extern atomic_t cachefiles_lookup_histogram[HZ];
42656 -extern atomic_t cachefiles_mkdir_histogram[HZ];
42657 -extern atomic_t cachefiles_create_histogram[HZ];
42658 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42659 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42660 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
42661
42662 extern int __init cachefiles_proc_init(void);
42663 extern void cachefiles_proc_cleanup(void);
42664 static inline
42665 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
42666 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
42667 {
42668 unsigned long jif = jiffies - start_jif;
42669 if (jif >= HZ)
42670 jif = HZ - 1;
42671 - atomic_inc(&histogram[jif]);
42672 + atomic_inc_unchecked(&histogram[jif]);
42673 }
42674
42675 #else
42676 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
42677 index a0358c2..d6137f2 100644
42678 --- a/fs/cachefiles/namei.c
42679 +++ b/fs/cachefiles/namei.c
42680 @@ -318,7 +318,7 @@ try_again:
42681 /* first step is to make up a grave dentry in the graveyard */
42682 sprintf(nbuffer, "%08x%08x",
42683 (uint32_t) get_seconds(),
42684 - (uint32_t) atomic_inc_return(&cache->gravecounter));
42685 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
42686
42687 /* do the multiway lock magic */
42688 trap = lock_rename(cache->graveyard, dir);
42689 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
42690 index eccd339..4c1d995 100644
42691 --- a/fs/cachefiles/proc.c
42692 +++ b/fs/cachefiles/proc.c
42693 @@ -14,9 +14,9 @@
42694 #include <linux/seq_file.h>
42695 #include "internal.h"
42696
42697 -atomic_t cachefiles_lookup_histogram[HZ];
42698 -atomic_t cachefiles_mkdir_histogram[HZ];
42699 -atomic_t cachefiles_create_histogram[HZ];
42700 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42701 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42702 +atomic_unchecked_t cachefiles_create_histogram[HZ];
42703
42704 /*
42705 * display the latency histogram
42706 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
42707 return 0;
42708 default:
42709 index = (unsigned long) v - 3;
42710 - x = atomic_read(&cachefiles_lookup_histogram[index]);
42711 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
42712 - z = atomic_read(&cachefiles_create_histogram[index]);
42713 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
42714 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
42715 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
42716 if (x == 0 && y == 0 && z == 0)
42717 return 0;
42718
42719 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
42720 index 0e3c092..818480e 100644
42721 --- a/fs/cachefiles/rdwr.c
42722 +++ b/fs/cachefiles/rdwr.c
42723 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
42724 old_fs = get_fs();
42725 set_fs(KERNEL_DS);
42726 ret = file->f_op->write(
42727 - file, (const void __user *) data, len, &pos);
42728 + file, (const void __force_user *) data, len, &pos);
42729 set_fs(old_fs);
42730 kunmap(page);
42731 if (ret != len)
42732 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
42733 index 382abc9..bd89646 100644
42734 --- a/fs/ceph/dir.c
42735 +++ b/fs/ceph/dir.c
42736 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
42737 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
42738 struct ceph_mds_client *mdsc = fsc->mdsc;
42739 unsigned frag = fpos_frag(filp->f_pos);
42740 - int off = fpos_off(filp->f_pos);
42741 + unsigned int off = fpos_off(filp->f_pos);
42742 int err;
42743 u32 ftype;
42744 struct ceph_mds_reply_info_parsed *rinfo;
42745 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
42746 index 6d40656..bc1f825 100644
42747 --- a/fs/cifs/cifs_debug.c
42748 +++ b/fs/cifs/cifs_debug.c
42749 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42750
42751 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
42752 #ifdef CONFIG_CIFS_STATS2
42753 - atomic_set(&totBufAllocCount, 0);
42754 - atomic_set(&totSmBufAllocCount, 0);
42755 + atomic_set_unchecked(&totBufAllocCount, 0);
42756 + atomic_set_unchecked(&totSmBufAllocCount, 0);
42757 #endif /* CONFIG_CIFS_STATS2 */
42758 spin_lock(&cifs_tcp_ses_lock);
42759 list_for_each(tmp1, &cifs_tcp_ses_list) {
42760 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
42761 tcon = list_entry(tmp3,
42762 struct cifs_tcon,
42763 tcon_list);
42764 - atomic_set(&tcon->num_smbs_sent, 0);
42765 - atomic_set(&tcon->num_writes, 0);
42766 - atomic_set(&tcon->num_reads, 0);
42767 - atomic_set(&tcon->num_oplock_brks, 0);
42768 - atomic_set(&tcon->num_opens, 0);
42769 - atomic_set(&tcon->num_posixopens, 0);
42770 - atomic_set(&tcon->num_posixmkdirs, 0);
42771 - atomic_set(&tcon->num_closes, 0);
42772 - atomic_set(&tcon->num_deletes, 0);
42773 - atomic_set(&tcon->num_mkdirs, 0);
42774 - atomic_set(&tcon->num_rmdirs, 0);
42775 - atomic_set(&tcon->num_renames, 0);
42776 - atomic_set(&tcon->num_t2renames, 0);
42777 - atomic_set(&tcon->num_ffirst, 0);
42778 - atomic_set(&tcon->num_fnext, 0);
42779 - atomic_set(&tcon->num_fclose, 0);
42780 - atomic_set(&tcon->num_hardlinks, 0);
42781 - atomic_set(&tcon->num_symlinks, 0);
42782 - atomic_set(&tcon->num_locks, 0);
42783 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
42784 + atomic_set_unchecked(&tcon->num_writes, 0);
42785 + atomic_set_unchecked(&tcon->num_reads, 0);
42786 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
42787 + atomic_set_unchecked(&tcon->num_opens, 0);
42788 + atomic_set_unchecked(&tcon->num_posixopens, 0);
42789 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
42790 + atomic_set_unchecked(&tcon->num_closes, 0);
42791 + atomic_set_unchecked(&tcon->num_deletes, 0);
42792 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
42793 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
42794 + atomic_set_unchecked(&tcon->num_renames, 0);
42795 + atomic_set_unchecked(&tcon->num_t2renames, 0);
42796 + atomic_set_unchecked(&tcon->num_ffirst, 0);
42797 + atomic_set_unchecked(&tcon->num_fnext, 0);
42798 + atomic_set_unchecked(&tcon->num_fclose, 0);
42799 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
42800 + atomic_set_unchecked(&tcon->num_symlinks, 0);
42801 + atomic_set_unchecked(&tcon->num_locks, 0);
42802 }
42803 }
42804 }
42805 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42806 smBufAllocCount.counter, cifs_min_small);
42807 #ifdef CONFIG_CIFS_STATS2
42808 seq_printf(m, "Total Large %d Small %d Allocations\n",
42809 - atomic_read(&totBufAllocCount),
42810 - atomic_read(&totSmBufAllocCount));
42811 + atomic_read_unchecked(&totBufAllocCount),
42812 + atomic_read_unchecked(&totSmBufAllocCount));
42813 #endif /* CONFIG_CIFS_STATS2 */
42814
42815 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
42816 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
42817 if (tcon->need_reconnect)
42818 seq_puts(m, "\tDISCONNECTED ");
42819 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
42820 - atomic_read(&tcon->num_smbs_sent),
42821 - atomic_read(&tcon->num_oplock_brks));
42822 + atomic_read_unchecked(&tcon->num_smbs_sent),
42823 + atomic_read_unchecked(&tcon->num_oplock_brks));
42824 seq_printf(m, "\nReads: %d Bytes: %lld",
42825 - atomic_read(&tcon->num_reads),
42826 + atomic_read_unchecked(&tcon->num_reads),
42827 (long long)(tcon->bytes_read));
42828 seq_printf(m, "\nWrites: %d Bytes: %lld",
42829 - atomic_read(&tcon->num_writes),
42830 + atomic_read_unchecked(&tcon->num_writes),
42831 (long long)(tcon->bytes_written));
42832 seq_printf(m, "\nFlushes: %d",
42833 - atomic_read(&tcon->num_flushes));
42834 + atomic_read_unchecked(&tcon->num_flushes));
42835 seq_printf(m, "\nLocks: %d HardLinks: %d "
42836 "Symlinks: %d",
42837 - atomic_read(&tcon->num_locks),
42838 - atomic_read(&tcon->num_hardlinks),
42839 - atomic_read(&tcon->num_symlinks));
42840 + atomic_read_unchecked(&tcon->num_locks),
42841 + atomic_read_unchecked(&tcon->num_hardlinks),
42842 + atomic_read_unchecked(&tcon->num_symlinks));
42843 seq_printf(m, "\nOpens: %d Closes: %d "
42844 "Deletes: %d",
42845 - atomic_read(&tcon->num_opens),
42846 - atomic_read(&tcon->num_closes),
42847 - atomic_read(&tcon->num_deletes));
42848 + atomic_read_unchecked(&tcon->num_opens),
42849 + atomic_read_unchecked(&tcon->num_closes),
42850 + atomic_read_unchecked(&tcon->num_deletes));
42851 seq_printf(m, "\nPosix Opens: %d "
42852 "Posix Mkdirs: %d",
42853 - atomic_read(&tcon->num_posixopens),
42854 - atomic_read(&tcon->num_posixmkdirs));
42855 + atomic_read_unchecked(&tcon->num_posixopens),
42856 + atomic_read_unchecked(&tcon->num_posixmkdirs));
42857 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
42858 - atomic_read(&tcon->num_mkdirs),
42859 - atomic_read(&tcon->num_rmdirs));
42860 + atomic_read_unchecked(&tcon->num_mkdirs),
42861 + atomic_read_unchecked(&tcon->num_rmdirs));
42862 seq_printf(m, "\nRenames: %d T2 Renames %d",
42863 - atomic_read(&tcon->num_renames),
42864 - atomic_read(&tcon->num_t2renames));
42865 + atomic_read_unchecked(&tcon->num_renames),
42866 + atomic_read_unchecked(&tcon->num_t2renames));
42867 seq_printf(m, "\nFindFirst: %d FNext %d "
42868 "FClose %d",
42869 - atomic_read(&tcon->num_ffirst),
42870 - atomic_read(&tcon->num_fnext),
42871 - atomic_read(&tcon->num_fclose));
42872 + atomic_read_unchecked(&tcon->num_ffirst),
42873 + atomic_read_unchecked(&tcon->num_fnext),
42874 + atomic_read_unchecked(&tcon->num_fclose));
42875 }
42876 }
42877 }
42878 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
42879 index 54b8f1e..f6a4c00 100644
42880 --- a/fs/cifs/cifsfs.c
42881 +++ b/fs/cifs/cifsfs.c
42882 @@ -981,7 +981,7 @@ cifs_init_request_bufs(void)
42883 cifs_req_cachep = kmem_cache_create("cifs_request",
42884 CIFSMaxBufSize +
42885 MAX_CIFS_HDR_SIZE, 0,
42886 - SLAB_HWCACHE_ALIGN, NULL);
42887 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
42888 if (cifs_req_cachep == NULL)
42889 return -ENOMEM;
42890
42891 @@ -1008,7 +1008,7 @@ cifs_init_request_bufs(void)
42892 efficient to alloc 1 per page off the slab compared to 17K (5page)
42893 alloc of large cifs buffers even when page debugging is on */
42894 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
42895 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
42896 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
42897 NULL);
42898 if (cifs_sm_req_cachep == NULL) {
42899 mempool_destroy(cifs_req_poolp);
42900 @@ -1093,8 +1093,8 @@ init_cifs(void)
42901 atomic_set(&bufAllocCount, 0);
42902 atomic_set(&smBufAllocCount, 0);
42903 #ifdef CONFIG_CIFS_STATS2
42904 - atomic_set(&totBufAllocCount, 0);
42905 - atomic_set(&totSmBufAllocCount, 0);
42906 + atomic_set_unchecked(&totBufAllocCount, 0);
42907 + atomic_set_unchecked(&totSmBufAllocCount, 0);
42908 #endif /* CONFIG_CIFS_STATS2 */
42909
42910 atomic_set(&midCount, 0);
42911 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
42912 index 95dad9d..fe7af1a 100644
42913 --- a/fs/cifs/cifsglob.h
42914 +++ b/fs/cifs/cifsglob.h
42915 @@ -381,28 +381,28 @@ struct cifs_tcon {
42916 __u16 Flags; /* optional support bits */
42917 enum statusEnum tidStatus;
42918 #ifdef CONFIG_CIFS_STATS
42919 - atomic_t num_smbs_sent;
42920 - atomic_t num_writes;
42921 - atomic_t num_reads;
42922 - atomic_t num_flushes;
42923 - atomic_t num_oplock_brks;
42924 - atomic_t num_opens;
42925 - atomic_t num_closes;
42926 - atomic_t num_deletes;
42927 - atomic_t num_mkdirs;
42928 - atomic_t num_posixopens;
42929 - atomic_t num_posixmkdirs;
42930 - atomic_t num_rmdirs;
42931 - atomic_t num_renames;
42932 - atomic_t num_t2renames;
42933 - atomic_t num_ffirst;
42934 - atomic_t num_fnext;
42935 - atomic_t num_fclose;
42936 - atomic_t num_hardlinks;
42937 - atomic_t num_symlinks;
42938 - atomic_t num_locks;
42939 - atomic_t num_acl_get;
42940 - atomic_t num_acl_set;
42941 + atomic_unchecked_t num_smbs_sent;
42942 + atomic_unchecked_t num_writes;
42943 + atomic_unchecked_t num_reads;
42944 + atomic_unchecked_t num_flushes;
42945 + atomic_unchecked_t num_oplock_brks;
42946 + atomic_unchecked_t num_opens;
42947 + atomic_unchecked_t num_closes;
42948 + atomic_unchecked_t num_deletes;
42949 + atomic_unchecked_t num_mkdirs;
42950 + atomic_unchecked_t num_posixopens;
42951 + atomic_unchecked_t num_posixmkdirs;
42952 + atomic_unchecked_t num_rmdirs;
42953 + atomic_unchecked_t num_renames;
42954 + atomic_unchecked_t num_t2renames;
42955 + atomic_unchecked_t num_ffirst;
42956 + atomic_unchecked_t num_fnext;
42957 + atomic_unchecked_t num_fclose;
42958 + atomic_unchecked_t num_hardlinks;
42959 + atomic_unchecked_t num_symlinks;
42960 + atomic_unchecked_t num_locks;
42961 + atomic_unchecked_t num_acl_get;
42962 + atomic_unchecked_t num_acl_set;
42963 #ifdef CONFIG_CIFS_STATS2
42964 unsigned long long time_writes;
42965 unsigned long long time_reads;
42966 @@ -613,7 +613,7 @@ convert_delimiter(char *path, char delim)
42967 }
42968
42969 #ifdef CONFIG_CIFS_STATS
42970 -#define cifs_stats_inc atomic_inc
42971 +#define cifs_stats_inc atomic_inc_unchecked
42972
42973 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
42974 unsigned int bytes)
42975 @@ -953,8 +953,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
42976 /* Various Debug counters */
42977 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
42978 #ifdef CONFIG_CIFS_STATS2
42979 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
42980 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
42981 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
42982 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
42983 #endif
42984 GLOBAL_EXTERN atomic_t smBufAllocCount;
42985 GLOBAL_EXTERN atomic_t midCount;
42986 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
42987 index db3f18c..1f5955e 100644
42988 --- a/fs/cifs/link.c
42989 +++ b/fs/cifs/link.c
42990 @@ -593,7 +593,7 @@ symlink_exit:
42991
42992 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
42993 {
42994 - char *p = nd_get_link(nd);
42995 + const char *p = nd_get_link(nd);
42996 if (!IS_ERR(p))
42997 kfree(p);
42998 }
42999 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
43000 index 7c16933..c8212b5 100644
43001 --- a/fs/cifs/misc.c
43002 +++ b/fs/cifs/misc.c
43003 @@ -156,7 +156,7 @@ cifs_buf_get(void)
43004 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
43005 atomic_inc(&bufAllocCount);
43006 #ifdef CONFIG_CIFS_STATS2
43007 - atomic_inc(&totBufAllocCount);
43008 + atomic_inc_unchecked(&totBufAllocCount);
43009 #endif /* CONFIG_CIFS_STATS2 */
43010 }
43011
43012 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
43013 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
43014 atomic_inc(&smBufAllocCount);
43015 #ifdef CONFIG_CIFS_STATS2
43016 - atomic_inc(&totSmBufAllocCount);
43017 + atomic_inc_unchecked(&totSmBufAllocCount);
43018 #endif /* CONFIG_CIFS_STATS2 */
43019
43020 }
43021 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
43022 index 6901578..d402eb5 100644
43023 --- a/fs/coda/cache.c
43024 +++ b/fs/coda/cache.c
43025 @@ -24,7 +24,7 @@
43026 #include "coda_linux.h"
43027 #include "coda_cache.h"
43028
43029 -static atomic_t permission_epoch = ATOMIC_INIT(0);
43030 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
43031
43032 /* replace or extend an acl cache hit */
43033 void coda_cache_enter(struct inode *inode, int mask)
43034 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
43035 struct coda_inode_info *cii = ITOC(inode);
43036
43037 spin_lock(&cii->c_lock);
43038 - cii->c_cached_epoch = atomic_read(&permission_epoch);
43039 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
43040 if (cii->c_uid != current_fsuid()) {
43041 cii->c_uid = current_fsuid();
43042 cii->c_cached_perm = mask;
43043 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
43044 {
43045 struct coda_inode_info *cii = ITOC(inode);
43046 spin_lock(&cii->c_lock);
43047 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
43048 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
43049 spin_unlock(&cii->c_lock);
43050 }
43051
43052 /* remove all acl caches */
43053 void coda_cache_clear_all(struct super_block *sb)
43054 {
43055 - atomic_inc(&permission_epoch);
43056 + atomic_inc_unchecked(&permission_epoch);
43057 }
43058
43059
43060 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
43061 spin_lock(&cii->c_lock);
43062 hit = (mask & cii->c_cached_perm) == mask &&
43063 cii->c_uid == current_fsuid() &&
43064 - cii->c_cached_epoch == atomic_read(&permission_epoch);
43065 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
43066 spin_unlock(&cii->c_lock);
43067
43068 return hit;
43069 diff --git a/fs/compat.c b/fs/compat.c
43070 index 58b1da4..afcd9b8 100644
43071 --- a/fs/compat.c
43072 +++ b/fs/compat.c
43073 @@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_tim
43074 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
43075 {
43076 compat_ino_t ino = stat->ino;
43077 - typeof(ubuf->st_uid) uid = 0;
43078 - typeof(ubuf->st_gid) gid = 0;
43079 + typeof(((struct compat_stat *)0)->st_uid) uid = 0;
43080 + typeof(((struct compat_stat *)0)->st_gid) gid = 0;
43081 int err;
43082
43083 SET_UID(uid, stat->uid);
43084 @@ -508,7 +508,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
43085
43086 set_fs(KERNEL_DS);
43087 /* The __user pointer cast is valid because of the set_fs() */
43088 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
43089 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
43090 set_fs(oldfs);
43091 /* truncating is ok because it's a user address */
43092 if (!ret)
43093 @@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
43094 goto out;
43095
43096 ret = -EINVAL;
43097 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
43098 + if (nr_segs > UIO_MAXIOV)
43099 goto out;
43100 if (nr_segs > fast_segs) {
43101 ret = -ENOMEM;
43102 @@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
43103
43104 struct compat_readdir_callback {
43105 struct compat_old_linux_dirent __user *dirent;
43106 + struct file * file;
43107 int result;
43108 };
43109
43110 @@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
43111 buf->result = -EOVERFLOW;
43112 return -EOVERFLOW;
43113 }
43114 +
43115 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43116 + return 0;
43117 +
43118 buf->result++;
43119 dirent = buf->dirent;
43120 if (!access_ok(VERIFY_WRITE, dirent,
43121 @@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
43122
43123 buf.result = 0;
43124 buf.dirent = dirent;
43125 + buf.file = file;
43126
43127 error = vfs_readdir(file, compat_fillonedir, &buf);
43128 if (buf.result)
43129 @@ -917,6 +923,7 @@ struct compat_linux_dirent {
43130 struct compat_getdents_callback {
43131 struct compat_linux_dirent __user *current_dir;
43132 struct compat_linux_dirent __user *previous;
43133 + struct file * file;
43134 int count;
43135 int error;
43136 };
43137 @@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
43138 buf->error = -EOVERFLOW;
43139 return -EOVERFLOW;
43140 }
43141 +
43142 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43143 + return 0;
43144 +
43145 dirent = buf->previous;
43146 if (dirent) {
43147 if (__put_user(offset, &dirent->d_off))
43148 @@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
43149 buf.previous = NULL;
43150 buf.count = count;
43151 buf.error = 0;
43152 + buf.file = file;
43153
43154 error = vfs_readdir(file, compat_filldir, &buf);
43155 if (error >= 0)
43156 @@ -1006,6 +1018,7 @@ out:
43157 struct compat_getdents_callback64 {
43158 struct linux_dirent64 __user *current_dir;
43159 struct linux_dirent64 __user *previous;
43160 + struct file * file;
43161 int count;
43162 int error;
43163 };
43164 @@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
43165 buf->error = -EINVAL; /* only used if we fail.. */
43166 if (reclen > buf->count)
43167 return -EINVAL;
43168 +
43169 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43170 + return 0;
43171 +
43172 dirent = buf->previous;
43173
43174 if (dirent) {
43175 @@ -1073,13 +1090,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
43176 buf.previous = NULL;
43177 buf.count = count;
43178 buf.error = 0;
43179 + buf.file = file;
43180
43181 error = vfs_readdir(file, compat_filldir64, &buf);
43182 if (error >= 0)
43183 error = buf.error;
43184 lastdirent = buf.previous;
43185 if (lastdirent) {
43186 - typeof(lastdirent->d_off) d_off = file->f_pos;
43187 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
43188 if (__put_user_unaligned(d_off, &lastdirent->d_off))
43189 error = -EFAULT;
43190 else
43191 @@ -1446,6 +1464,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
43192 struct fdtable *fdt;
43193 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
43194
43195 + pax_track_stack();
43196 +
43197 if (n < 0)
43198 goto out_nofds;
43199
43200 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
43201 index 112e45a..b59845b 100644
43202 --- a/fs/compat_binfmt_elf.c
43203 +++ b/fs/compat_binfmt_elf.c
43204 @@ -30,11 +30,13 @@
43205 #undef elf_phdr
43206 #undef elf_shdr
43207 #undef elf_note
43208 +#undef elf_dyn
43209 #undef elf_addr_t
43210 #define elfhdr elf32_hdr
43211 #define elf_phdr elf32_phdr
43212 #define elf_shdr elf32_shdr
43213 #define elf_note elf32_note
43214 +#define elf_dyn Elf32_Dyn
43215 #define elf_addr_t Elf32_Addr
43216
43217 /*
43218 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
43219 index 51352de..93292ff 100644
43220 --- a/fs/compat_ioctl.c
43221 +++ b/fs/compat_ioctl.c
43222 @@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
43223
43224 err = get_user(palp, &up->palette);
43225 err |= get_user(length, &up->length);
43226 + if (err)
43227 + return -EFAULT;
43228
43229 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
43230 err = put_user(compat_ptr(palp), &up_native->palette);
43231 @@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
43232 return -EFAULT;
43233 if (__get_user(udata, &ss32->iomem_base))
43234 return -EFAULT;
43235 - ss.iomem_base = compat_ptr(udata);
43236 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
43237 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
43238 __get_user(ss.port_high, &ss32->port_high))
43239 return -EFAULT;
43240 @@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
43241 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
43242 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
43243 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
43244 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43245 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43246 return -EFAULT;
43247
43248 return ioctl_preallocate(file, p);
43249 @@ -1644,8 +1646,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
43250 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
43251 {
43252 unsigned int a, b;
43253 - a = *(unsigned int *)p;
43254 - b = *(unsigned int *)q;
43255 + a = *(const unsigned int *)p;
43256 + b = *(const unsigned int *)q;
43257 if (a > b)
43258 return 1;
43259 if (a < b)
43260 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
43261 index 9a37a9b..35792b6 100644
43262 --- a/fs/configfs/dir.c
43263 +++ b/fs/configfs/dir.c
43264 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43265 }
43266 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
43267 struct configfs_dirent *next;
43268 - const char * name;
43269 + const unsigned char * name;
43270 + char d_name[sizeof(next->s_dentry->d_iname)];
43271 int len;
43272 struct inode *inode = NULL;
43273
43274 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43275 continue;
43276
43277 name = configfs_get_name(next);
43278 - len = strlen(name);
43279 + if (next->s_dentry && name == next->s_dentry->d_iname) {
43280 + len = next->s_dentry->d_name.len;
43281 + memcpy(d_name, name, len);
43282 + name = d_name;
43283 + } else
43284 + len = strlen(name);
43285
43286 /*
43287 * We'll have a dentry and an inode for
43288 diff --git a/fs/dcache.c b/fs/dcache.c
43289 index a88948b..1e32160 100644
43290 --- a/fs/dcache.c
43291 +++ b/fs/dcache.c
43292 @@ -2998,7 +2998,7 @@ void __init vfs_caches_init(unsigned long mempages)
43293 mempages -= reserve;
43294
43295 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
43296 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
43297 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
43298
43299 dcache_init();
43300 inode_init();
43301 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
43302 index 11f8582..7b633bd 100644
43303 --- a/fs/ecryptfs/inode.c
43304 +++ b/fs/ecryptfs/inode.c
43305 @@ -681,7 +681,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
43306 old_fs = get_fs();
43307 set_fs(get_ds());
43308 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
43309 - (char __user *)lower_buf,
43310 + (char __force_user *)lower_buf,
43311 lower_bufsiz);
43312 set_fs(old_fs);
43313 if (rc < 0)
43314 @@ -727,7 +727,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
43315 }
43316 old_fs = get_fs();
43317 set_fs(get_ds());
43318 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
43319 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
43320 set_fs(old_fs);
43321 if (rc < 0) {
43322 kfree(buf);
43323 @@ -742,7 +742,7 @@ out:
43324 static void
43325 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
43326 {
43327 - char *buf = nd_get_link(nd);
43328 + const char *buf = nd_get_link(nd);
43329 if (!IS_ERR(buf)) {
43330 /* Free the char* */
43331 kfree(buf);
43332 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
43333 index 940a82e..63af89e 100644
43334 --- a/fs/ecryptfs/miscdev.c
43335 +++ b/fs/ecryptfs/miscdev.c
43336 @@ -328,7 +328,7 @@ check_list:
43337 goto out_unlock_msg_ctx;
43338 i = 5;
43339 if (msg_ctx->msg) {
43340 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
43341 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
43342 goto out_unlock_msg_ctx;
43343 i += packet_length_size;
43344 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
43345 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
43346 index 3745f7c..89cc7a3 100644
43347 --- a/fs/ecryptfs/read_write.c
43348 +++ b/fs/ecryptfs/read_write.c
43349 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
43350 return -EIO;
43351 fs_save = get_fs();
43352 set_fs(get_ds());
43353 - rc = vfs_write(lower_file, data, size, &offset);
43354 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
43355 set_fs(fs_save);
43356 mark_inode_dirty_sync(ecryptfs_inode);
43357 return rc;
43358 @@ -235,7 +235,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
43359 return -EIO;
43360 fs_save = get_fs();
43361 set_fs(get_ds());
43362 - rc = vfs_read(lower_file, data, size, &offset);
43363 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
43364 set_fs(fs_save);
43365 return rc;
43366 }
43367 diff --git a/fs/exec.c b/fs/exec.c
43368 index 25dcbe5..4ffaa78 100644
43369 --- a/fs/exec.c
43370 +++ b/fs/exec.c
43371 @@ -55,12 +55,24 @@
43372 #include <linux/pipe_fs_i.h>
43373 #include <linux/oom.h>
43374 #include <linux/compat.h>
43375 +#include <linux/random.h>
43376 +#include <linux/seq_file.h>
43377 +
43378 +#ifdef CONFIG_PAX_REFCOUNT
43379 +#include <linux/kallsyms.h>
43380 +#include <linux/kdebug.h>
43381 +#endif
43382
43383 #include <asm/uaccess.h>
43384 #include <asm/mmu_context.h>
43385 #include <asm/tlb.h>
43386 #include "internal.h"
43387
43388 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
43389 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
43390 +EXPORT_SYMBOL(pax_set_initial_flags_func);
43391 +#endif
43392 +
43393 int core_uses_pid;
43394 char core_pattern[CORENAME_MAX_SIZE] = "core";
43395 unsigned int core_pipe_limit;
43396 @@ -70,7 +82,7 @@ struct core_name {
43397 char *corename;
43398 int used, size;
43399 };
43400 -static atomic_t call_count = ATOMIC_INIT(1);
43401 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
43402
43403 /* The maximal length of core_pattern is also specified in sysctl.c */
43404
43405 @@ -188,18 +200,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43406 int write)
43407 {
43408 struct page *page;
43409 - int ret;
43410
43411 -#ifdef CONFIG_STACK_GROWSUP
43412 - if (write) {
43413 - ret = expand_downwards(bprm->vma, pos);
43414 - if (ret < 0)
43415 - return NULL;
43416 - }
43417 -#endif
43418 - ret = get_user_pages(current, bprm->mm, pos,
43419 - 1, write, 1, &page, NULL);
43420 - if (ret <= 0)
43421 + if (0 > expand_downwards(bprm->vma, pos))
43422 + return NULL;
43423 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
43424 return NULL;
43425
43426 if (write) {
43427 @@ -274,6 +278,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43428 vma->vm_end = STACK_TOP_MAX;
43429 vma->vm_start = vma->vm_end - PAGE_SIZE;
43430 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
43431 +
43432 +#ifdef CONFIG_PAX_SEGMEXEC
43433 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
43434 +#endif
43435 +
43436 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
43437 INIT_LIST_HEAD(&vma->anon_vma_chain);
43438
43439 @@ -288,6 +297,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43440 mm->stack_vm = mm->total_vm = 1;
43441 up_write(&mm->mmap_sem);
43442 bprm->p = vma->vm_end - sizeof(void *);
43443 +
43444 +#ifdef CONFIG_PAX_RANDUSTACK
43445 + if (randomize_va_space)
43446 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
43447 +#endif
43448 +
43449 return 0;
43450 err:
43451 up_write(&mm->mmap_sem);
43452 @@ -396,19 +411,7 @@ err:
43453 return err;
43454 }
43455
43456 -struct user_arg_ptr {
43457 -#ifdef CONFIG_COMPAT
43458 - bool is_compat;
43459 -#endif
43460 - union {
43461 - const char __user *const __user *native;
43462 -#ifdef CONFIG_COMPAT
43463 - compat_uptr_t __user *compat;
43464 -#endif
43465 - } ptr;
43466 -};
43467 -
43468 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43469 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43470 {
43471 const char __user *native;
43472
43473 @@ -417,14 +420,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43474 compat_uptr_t compat;
43475
43476 if (get_user(compat, argv.ptr.compat + nr))
43477 - return ERR_PTR(-EFAULT);
43478 + return (const char __force_user *)ERR_PTR(-EFAULT);
43479
43480 return compat_ptr(compat);
43481 }
43482 #endif
43483
43484 if (get_user(native, argv.ptr.native + nr))
43485 - return ERR_PTR(-EFAULT);
43486 + return (const char __force_user *)ERR_PTR(-EFAULT);
43487
43488 return native;
43489 }
43490 @@ -443,7 +446,7 @@ static int count(struct user_arg_ptr argv, int max)
43491 if (!p)
43492 break;
43493
43494 - if (IS_ERR(p))
43495 + if (IS_ERR((const char __force_kernel *)p))
43496 return -EFAULT;
43497
43498 if (i++ >= max)
43499 @@ -477,7 +480,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
43500
43501 ret = -EFAULT;
43502 str = get_user_arg_ptr(argv, argc);
43503 - if (IS_ERR(str))
43504 + if (IS_ERR((const char __force_kernel *)str))
43505 goto out;
43506
43507 len = strnlen_user(str, MAX_ARG_STRLEN);
43508 @@ -559,7 +562,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
43509 int r;
43510 mm_segment_t oldfs = get_fs();
43511 struct user_arg_ptr argv = {
43512 - .ptr.native = (const char __user *const __user *)__argv,
43513 + .ptr.native = (const char __force_user *const __force_user *)__argv,
43514 };
43515
43516 set_fs(KERNEL_DS);
43517 @@ -594,7 +597,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43518 unsigned long new_end = old_end - shift;
43519 struct mmu_gather tlb;
43520
43521 - BUG_ON(new_start > new_end);
43522 + if (new_start >= new_end || new_start < mmap_min_addr)
43523 + return -ENOMEM;
43524
43525 /*
43526 * ensure there are no vmas between where we want to go
43527 @@ -603,6 +607,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43528 if (vma != find_vma(mm, new_start))
43529 return -EFAULT;
43530
43531 +#ifdef CONFIG_PAX_SEGMEXEC
43532 + BUG_ON(pax_find_mirror_vma(vma));
43533 +#endif
43534 +
43535 /*
43536 * cover the whole range: [new_start, old_end)
43537 */
43538 @@ -683,10 +691,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43539 stack_top = arch_align_stack(stack_top);
43540 stack_top = PAGE_ALIGN(stack_top);
43541
43542 - if (unlikely(stack_top < mmap_min_addr) ||
43543 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
43544 - return -ENOMEM;
43545 -
43546 stack_shift = vma->vm_end - stack_top;
43547
43548 bprm->p -= stack_shift;
43549 @@ -698,8 +702,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
43550 bprm->exec -= stack_shift;
43551
43552 down_write(&mm->mmap_sem);
43553 +
43554 + /* Move stack pages down in memory. */
43555 + if (stack_shift) {
43556 + ret = shift_arg_pages(vma, stack_shift);
43557 + if (ret)
43558 + goto out_unlock;
43559 + }
43560 +
43561 vm_flags = VM_STACK_FLAGS;
43562
43563 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43564 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43565 + vm_flags &= ~VM_EXEC;
43566 +
43567 +#ifdef CONFIG_PAX_MPROTECT
43568 + if (mm->pax_flags & MF_PAX_MPROTECT)
43569 + vm_flags &= ~VM_MAYEXEC;
43570 +#endif
43571 +
43572 + }
43573 +#endif
43574 +
43575 /*
43576 * Adjust stack execute permissions; explicitly enable for
43577 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
43578 @@ -718,13 +742,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43579 goto out_unlock;
43580 BUG_ON(prev != vma);
43581
43582 - /* Move stack pages down in memory. */
43583 - if (stack_shift) {
43584 - ret = shift_arg_pages(vma, stack_shift);
43585 - if (ret)
43586 - goto out_unlock;
43587 - }
43588 -
43589 /* mprotect_fixup is overkill to remove the temporary stack flags */
43590 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
43591
43592 @@ -805,7 +822,7 @@ int kernel_read(struct file *file, loff_t offset,
43593 old_fs = get_fs();
43594 set_fs(get_ds());
43595 /* The cast to a user pointer is valid due to the set_fs() */
43596 - result = vfs_read(file, (void __user *)addr, count, &pos);
43597 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
43598 set_fs(old_fs);
43599 return result;
43600 }
43601 @@ -1251,7 +1268,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
43602 }
43603 rcu_read_unlock();
43604
43605 - if (p->fs->users > n_fs) {
43606 + if (atomic_read(&p->fs->users) > n_fs) {
43607 bprm->unsafe |= LSM_UNSAFE_SHARE;
43608 } else {
43609 res = -EAGAIN;
43610 @@ -1454,6 +1471,11 @@ static int do_execve_common(const char *filename,
43611 struct user_arg_ptr envp,
43612 struct pt_regs *regs)
43613 {
43614 +#ifdef CONFIG_GRKERNSEC
43615 + struct file *old_exec_file;
43616 + struct acl_subject_label *old_acl;
43617 + struct rlimit old_rlim[RLIM_NLIMITS];
43618 +#endif
43619 struct linux_binprm *bprm;
43620 struct file *file;
43621 struct files_struct *displaced;
43622 @@ -1461,6 +1483,8 @@ static int do_execve_common(const char *filename,
43623 int retval;
43624 const struct cred *cred = current_cred();
43625
43626 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
43627 +
43628 /*
43629 * We move the actual failure in case of RLIMIT_NPROC excess from
43630 * set*uid() to execve() because too many poorly written programs
43631 @@ -1507,6 +1531,16 @@ static int do_execve_common(const char *filename,
43632 bprm->filename = filename;
43633 bprm->interp = filename;
43634
43635 + if (gr_process_user_ban()) {
43636 + retval = -EPERM;
43637 + goto out_file;
43638 + }
43639 +
43640 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
43641 + retval = -EACCES;
43642 + goto out_file;
43643 + }
43644 +
43645 retval = bprm_mm_init(bprm);
43646 if (retval)
43647 goto out_file;
43648 @@ -1536,9 +1570,40 @@ static int do_execve_common(const char *filename,
43649 if (retval < 0)
43650 goto out;
43651
43652 + if (!gr_tpe_allow(file)) {
43653 + retval = -EACCES;
43654 + goto out;
43655 + }
43656 +
43657 + if (gr_check_crash_exec(file)) {
43658 + retval = -EACCES;
43659 + goto out;
43660 + }
43661 +
43662 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
43663 +
43664 + gr_handle_exec_args(bprm, argv);
43665 +
43666 +#ifdef CONFIG_GRKERNSEC
43667 + old_acl = current->acl;
43668 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
43669 + old_exec_file = current->exec_file;
43670 + get_file(file);
43671 + current->exec_file = file;
43672 +#endif
43673 +
43674 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
43675 + bprm->unsafe & LSM_UNSAFE_SHARE);
43676 + if (retval < 0)
43677 + goto out_fail;
43678 +
43679 retval = search_binary_handler(bprm,regs);
43680 if (retval < 0)
43681 - goto out;
43682 + goto out_fail;
43683 +#ifdef CONFIG_GRKERNSEC
43684 + if (old_exec_file)
43685 + fput(old_exec_file);
43686 +#endif
43687
43688 /* execve succeeded */
43689 current->fs->in_exec = 0;
43690 @@ -1549,6 +1614,14 @@ static int do_execve_common(const char *filename,
43691 put_files_struct(displaced);
43692 return retval;
43693
43694 +out_fail:
43695 +#ifdef CONFIG_GRKERNSEC
43696 + current->acl = old_acl;
43697 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
43698 + fput(current->exec_file);
43699 + current->exec_file = old_exec_file;
43700 +#endif
43701 +
43702 out:
43703 if (bprm->mm) {
43704 acct_arg_size(bprm, 0);
43705 @@ -1622,7 +1695,7 @@ static int expand_corename(struct core_name *cn)
43706 {
43707 char *old_corename = cn->corename;
43708
43709 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
43710 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
43711 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
43712
43713 if (!cn->corename) {
43714 @@ -1719,7 +1792,7 @@ static int format_corename(struct core_name *cn, long signr)
43715 int pid_in_pattern = 0;
43716 int err = 0;
43717
43718 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
43719 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
43720 cn->corename = kmalloc(cn->size, GFP_KERNEL);
43721 cn->used = 0;
43722
43723 @@ -1816,6 +1889,218 @@ out:
43724 return ispipe;
43725 }
43726
43727 +int pax_check_flags(unsigned long *flags)
43728 +{
43729 + int retval = 0;
43730 +
43731 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
43732 + if (*flags & MF_PAX_SEGMEXEC)
43733 + {
43734 + *flags &= ~MF_PAX_SEGMEXEC;
43735 + retval = -EINVAL;
43736 + }
43737 +#endif
43738 +
43739 + if ((*flags & MF_PAX_PAGEEXEC)
43740 +
43741 +#ifdef CONFIG_PAX_PAGEEXEC
43742 + && (*flags & MF_PAX_SEGMEXEC)
43743 +#endif
43744 +
43745 + )
43746 + {
43747 + *flags &= ~MF_PAX_PAGEEXEC;
43748 + retval = -EINVAL;
43749 + }
43750 +
43751 + if ((*flags & MF_PAX_MPROTECT)
43752 +
43753 +#ifdef CONFIG_PAX_MPROTECT
43754 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43755 +#endif
43756 +
43757 + )
43758 + {
43759 + *flags &= ~MF_PAX_MPROTECT;
43760 + retval = -EINVAL;
43761 + }
43762 +
43763 + if ((*flags & MF_PAX_EMUTRAMP)
43764 +
43765 +#ifdef CONFIG_PAX_EMUTRAMP
43766 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
43767 +#endif
43768 +
43769 + )
43770 + {
43771 + *flags &= ~MF_PAX_EMUTRAMP;
43772 + retval = -EINVAL;
43773 + }
43774 +
43775 + return retval;
43776 +}
43777 +
43778 +EXPORT_SYMBOL(pax_check_flags);
43779 +
43780 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43781 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
43782 +{
43783 + struct task_struct *tsk = current;
43784 + struct mm_struct *mm = current->mm;
43785 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
43786 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
43787 + char *path_exec = NULL;
43788 + char *path_fault = NULL;
43789 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
43790 +
43791 + if (buffer_exec && buffer_fault) {
43792 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
43793 +
43794 + down_read(&mm->mmap_sem);
43795 + vma = mm->mmap;
43796 + while (vma && (!vma_exec || !vma_fault)) {
43797 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
43798 + vma_exec = vma;
43799 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
43800 + vma_fault = vma;
43801 + vma = vma->vm_next;
43802 + }
43803 + if (vma_exec) {
43804 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
43805 + if (IS_ERR(path_exec))
43806 + path_exec = "<path too long>";
43807 + else {
43808 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
43809 + if (path_exec) {
43810 + *path_exec = 0;
43811 + path_exec = buffer_exec;
43812 + } else
43813 + path_exec = "<path too long>";
43814 + }
43815 + }
43816 + if (vma_fault) {
43817 + start = vma_fault->vm_start;
43818 + end = vma_fault->vm_end;
43819 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
43820 + if (vma_fault->vm_file) {
43821 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
43822 + if (IS_ERR(path_fault))
43823 + path_fault = "<path too long>";
43824 + else {
43825 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
43826 + if (path_fault) {
43827 + *path_fault = 0;
43828 + path_fault = buffer_fault;
43829 + } else
43830 + path_fault = "<path too long>";
43831 + }
43832 + } else
43833 + path_fault = "<anonymous mapping>";
43834 + }
43835 + up_read(&mm->mmap_sem);
43836 + }
43837 + if (tsk->signal->curr_ip)
43838 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
43839 + else
43840 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
43841 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
43842 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
43843 + task_uid(tsk), task_euid(tsk), pc, sp);
43844 + free_page((unsigned long)buffer_exec);
43845 + free_page((unsigned long)buffer_fault);
43846 + pax_report_insns(regs, pc, sp);
43847 + do_coredump(SIGKILL, SIGKILL, regs);
43848 +}
43849 +#endif
43850 +
43851 +#ifdef CONFIG_PAX_REFCOUNT
43852 +void pax_report_refcount_overflow(struct pt_regs *regs)
43853 +{
43854 + if (current->signal->curr_ip)
43855 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43856 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
43857 + else
43858 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
43859 + current->comm, task_pid_nr(current), current_uid(), current_euid());
43860 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
43861 + show_regs(regs);
43862 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
43863 +}
43864 +#endif
43865 +
43866 +#ifdef CONFIG_PAX_USERCOPY
43867 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
43868 +int object_is_on_stack(const void *obj, unsigned long len)
43869 +{
43870 + const void * const stack = task_stack_page(current);
43871 + const void * const stackend = stack + THREAD_SIZE;
43872 +
43873 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
43874 + const void *frame = NULL;
43875 + const void *oldframe;
43876 +#endif
43877 +
43878 + if (obj + len < obj)
43879 + return -1;
43880 +
43881 + if (obj + len <= stack || stackend <= obj)
43882 + return 0;
43883 +
43884 + if (obj < stack || stackend < obj + len)
43885 + return -1;
43886 +
43887 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
43888 + oldframe = __builtin_frame_address(1);
43889 + if (oldframe)
43890 + frame = __builtin_frame_address(2);
43891 + /*
43892 + low ----------------------------------------------> high
43893 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
43894 + ^----------------^
43895 + allow copies only within here
43896 + */
43897 + while (stack <= frame && frame < stackend) {
43898 + /* if obj + len extends past the last frame, this
43899 + check won't pass and the next frame will be 0,
43900 + causing us to bail out and correctly report
43901 + the copy as invalid
43902 + */
43903 + if (obj + len <= frame)
43904 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
43905 + oldframe = frame;
43906 + frame = *(const void * const *)frame;
43907 + }
43908 + return -1;
43909 +#else
43910 + return 1;
43911 +#endif
43912 +}
43913 +
43914 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
43915 +{
43916 + if (current->signal->curr_ip)
43917 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
43918 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
43919 + else
43920 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
43921 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
43922 + dump_stack();
43923 + gr_handle_kernel_exploit();
43924 + do_group_exit(SIGKILL);
43925 +}
43926 +#endif
43927 +
43928 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
43929 +void pax_track_stack(void)
43930 +{
43931 + unsigned long sp = (unsigned long)&sp;
43932 + if (sp < current_thread_info()->lowest_stack &&
43933 + sp > (unsigned long)task_stack_page(current))
43934 + current_thread_info()->lowest_stack = sp;
43935 +}
43936 +EXPORT_SYMBOL(pax_track_stack);
43937 +#endif
43938 +
43939 static int zap_process(struct task_struct *start, int exit_code)
43940 {
43941 struct task_struct *t;
43942 @@ -2027,17 +2312,17 @@ static void wait_for_dump_helpers(struct file *file)
43943 pipe = file->f_path.dentry->d_inode->i_pipe;
43944
43945 pipe_lock(pipe);
43946 - pipe->readers++;
43947 - pipe->writers--;
43948 + atomic_inc(&pipe->readers);
43949 + atomic_dec(&pipe->writers);
43950
43951 - while ((pipe->readers > 1) && (!signal_pending(current))) {
43952 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
43953 wake_up_interruptible_sync(&pipe->wait);
43954 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
43955 pipe_wait(pipe);
43956 }
43957
43958 - pipe->readers--;
43959 - pipe->writers++;
43960 + atomic_dec(&pipe->readers);
43961 + atomic_inc(&pipe->writers);
43962 pipe_unlock(pipe);
43963
43964 }
43965 @@ -2098,7 +2383,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43966 int retval = 0;
43967 int flag = 0;
43968 int ispipe;
43969 - static atomic_t core_dump_count = ATOMIC_INIT(0);
43970 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
43971 struct coredump_params cprm = {
43972 .signr = signr,
43973 .regs = regs,
43974 @@ -2113,6 +2398,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43975
43976 audit_core_dumps(signr);
43977
43978 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
43979 + gr_handle_brute_attach(current, cprm.mm_flags);
43980 +
43981 binfmt = mm->binfmt;
43982 if (!binfmt || !binfmt->core_dump)
43983 goto fail;
43984 @@ -2180,7 +2468,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43985 }
43986 cprm.limit = RLIM_INFINITY;
43987
43988 - dump_count = atomic_inc_return(&core_dump_count);
43989 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
43990 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
43991 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
43992 task_tgid_vnr(current), current->comm);
43993 @@ -2207,6 +2495,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
43994 } else {
43995 struct inode *inode;
43996
43997 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
43998 +
43999 if (cprm.limit < binfmt->min_coredump)
44000 goto fail_unlock;
44001
44002 @@ -2250,7 +2540,7 @@ close_fail:
44003 filp_close(cprm.file, NULL);
44004 fail_dropcount:
44005 if (ispipe)
44006 - atomic_dec(&core_dump_count);
44007 + atomic_dec_unchecked(&core_dump_count);
44008 fail_unlock:
44009 kfree(cn.corename);
44010 fail_corename:
44011 @@ -2269,7 +2559,7 @@ fail:
44012 */
44013 int dump_write(struct file *file, const void *addr, int nr)
44014 {
44015 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
44016 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
44017 }
44018 EXPORT_SYMBOL(dump_write);
44019
44020 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
44021 index 8f44cef..cb07120 100644
44022 --- a/fs/ext2/balloc.c
44023 +++ b/fs/ext2/balloc.c
44024 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
44025
44026 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44027 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44028 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44029 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44030 sbi->s_resuid != current_fsuid() &&
44031 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44032 return 0;
44033 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
44034 index 6386d76..0a266b1 100644
44035 --- a/fs/ext3/balloc.c
44036 +++ b/fs/ext3/balloc.c
44037 @@ -1446,7 +1446,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
44038
44039 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44040 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44041 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44042 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44043 sbi->s_resuid != current_fsuid() &&
44044 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44045 return 0;
44046 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
44047 index f8224ad..fbef97c 100644
44048 --- a/fs/ext4/balloc.c
44049 +++ b/fs/ext4/balloc.c
44050 @@ -394,8 +394,8 @@ static int ext4_has_free_blocks(struct ext4_sb_info *sbi,
44051 /* Hm, nope. Are (enough) root reserved blocks available? */
44052 if (sbi->s_resuid == current_fsuid() ||
44053 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
44054 - capable(CAP_SYS_RESOURCE) ||
44055 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
44056 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
44057 + capable_nolog(CAP_SYS_RESOURCE)) {
44058
44059 if (free_blocks >= (nblocks + dirty_blocks))
44060 return 1;
44061 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
44062 index 5c38120..2291d18 100644
44063 --- a/fs/ext4/ext4.h
44064 +++ b/fs/ext4/ext4.h
44065 @@ -1180,19 +1180,19 @@ struct ext4_sb_info {
44066 unsigned long s_mb_last_start;
44067
44068 /* stats for buddy allocator */
44069 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
44070 - atomic_t s_bal_success; /* we found long enough chunks */
44071 - atomic_t s_bal_allocated; /* in blocks */
44072 - atomic_t s_bal_ex_scanned; /* total extents scanned */
44073 - atomic_t s_bal_goals; /* goal hits */
44074 - atomic_t s_bal_breaks; /* too long searches */
44075 - atomic_t s_bal_2orders; /* 2^order hits */
44076 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
44077 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
44078 + atomic_unchecked_t s_bal_allocated; /* in blocks */
44079 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
44080 + atomic_unchecked_t s_bal_goals; /* goal hits */
44081 + atomic_unchecked_t s_bal_breaks; /* too long searches */
44082 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
44083 spinlock_t s_bal_lock;
44084 unsigned long s_mb_buddies_generated;
44085 unsigned long long s_mb_generation_time;
44086 - atomic_t s_mb_lost_chunks;
44087 - atomic_t s_mb_preallocated;
44088 - atomic_t s_mb_discarded;
44089 + atomic_unchecked_t s_mb_lost_chunks;
44090 + atomic_unchecked_t s_mb_preallocated;
44091 + atomic_unchecked_t s_mb_discarded;
44092 atomic_t s_lock_busy;
44093
44094 /* locality groups */
44095 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
44096 index e4095e9..1c006c5 100644
44097 --- a/fs/ext4/file.c
44098 +++ b/fs/ext4/file.c
44099 @@ -181,8 +181,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
44100 path.dentry = mnt->mnt_root;
44101 cp = d_path(&path, buf, sizeof(buf));
44102 if (!IS_ERR(cp)) {
44103 - memcpy(sbi->s_es->s_last_mounted, cp,
44104 - sizeof(sbi->s_es->s_last_mounted));
44105 + strlcpy(sbi->s_es->s_last_mounted, cp,
44106 + sizeof(sbi->s_es->s_last_mounted));
44107 ext4_mark_super_dirty(sb);
44108 }
44109 }
44110 diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
44111 index f18bfe3..43759b1 100644
44112 --- a/fs/ext4/ioctl.c
44113 +++ b/fs/ext4/ioctl.c
44114 @@ -348,7 +348,7 @@ mext_out:
44115 if (!blk_queue_discard(q))
44116 return -EOPNOTSUPP;
44117
44118 - if (copy_from_user(&range, (struct fstrim_range *)arg,
44119 + if (copy_from_user(&range, (struct fstrim_range __user *)arg,
44120 sizeof(range)))
44121 return -EFAULT;
44122
44123 @@ -358,7 +358,7 @@ mext_out:
44124 if (ret < 0)
44125 return ret;
44126
44127 - if (copy_to_user((struct fstrim_range *)arg, &range,
44128 + if (copy_to_user((struct fstrim_range __user *)arg, &range,
44129 sizeof(range)))
44130 return -EFAULT;
44131
44132 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
44133 index 17a5a57..b6be3c5 100644
44134 --- a/fs/ext4/mballoc.c
44135 +++ b/fs/ext4/mballoc.c
44136 @@ -1795,7 +1795,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
44137 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
44138
44139 if (EXT4_SB(sb)->s_mb_stats)
44140 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
44141 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
44142
44143 break;
44144 }
44145 @@ -2089,7 +2089,7 @@ repeat:
44146 ac->ac_status = AC_STATUS_CONTINUE;
44147 ac->ac_flags |= EXT4_MB_HINT_FIRST;
44148 cr = 3;
44149 - atomic_inc(&sbi->s_mb_lost_chunks);
44150 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
44151 goto repeat;
44152 }
44153 }
44154 @@ -2132,6 +2132,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
44155 ext4_grpblk_t counters[16];
44156 } sg;
44157
44158 + pax_track_stack();
44159 +
44160 group--;
44161 if (group == 0)
44162 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
44163 @@ -2573,25 +2575,25 @@ int ext4_mb_release(struct super_block *sb)
44164 if (sbi->s_mb_stats) {
44165 ext4_msg(sb, KERN_INFO,
44166 "mballoc: %u blocks %u reqs (%u success)",
44167 - atomic_read(&sbi->s_bal_allocated),
44168 - atomic_read(&sbi->s_bal_reqs),
44169 - atomic_read(&sbi->s_bal_success));
44170 + atomic_read_unchecked(&sbi->s_bal_allocated),
44171 + atomic_read_unchecked(&sbi->s_bal_reqs),
44172 + atomic_read_unchecked(&sbi->s_bal_success));
44173 ext4_msg(sb, KERN_INFO,
44174 "mballoc: %u extents scanned, %u goal hits, "
44175 "%u 2^N hits, %u breaks, %u lost",
44176 - atomic_read(&sbi->s_bal_ex_scanned),
44177 - atomic_read(&sbi->s_bal_goals),
44178 - atomic_read(&sbi->s_bal_2orders),
44179 - atomic_read(&sbi->s_bal_breaks),
44180 - atomic_read(&sbi->s_mb_lost_chunks));
44181 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
44182 + atomic_read_unchecked(&sbi->s_bal_goals),
44183 + atomic_read_unchecked(&sbi->s_bal_2orders),
44184 + atomic_read_unchecked(&sbi->s_bal_breaks),
44185 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
44186 ext4_msg(sb, KERN_INFO,
44187 "mballoc: %lu generated and it took %Lu",
44188 sbi->s_mb_buddies_generated,
44189 sbi->s_mb_generation_time);
44190 ext4_msg(sb, KERN_INFO,
44191 "mballoc: %u preallocated, %u discarded",
44192 - atomic_read(&sbi->s_mb_preallocated),
44193 - atomic_read(&sbi->s_mb_discarded));
44194 + atomic_read_unchecked(&sbi->s_mb_preallocated),
44195 + atomic_read_unchecked(&sbi->s_mb_discarded));
44196 }
44197
44198 free_percpu(sbi->s_locality_groups);
44199 @@ -3070,16 +3072,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
44200 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
44201
44202 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
44203 - atomic_inc(&sbi->s_bal_reqs);
44204 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44205 + atomic_inc_unchecked(&sbi->s_bal_reqs);
44206 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44207 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
44208 - atomic_inc(&sbi->s_bal_success);
44209 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
44210 + atomic_inc_unchecked(&sbi->s_bal_success);
44211 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
44212 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
44213 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
44214 - atomic_inc(&sbi->s_bal_goals);
44215 + atomic_inc_unchecked(&sbi->s_bal_goals);
44216 if (ac->ac_found > sbi->s_mb_max_to_scan)
44217 - atomic_inc(&sbi->s_bal_breaks);
44218 + atomic_inc_unchecked(&sbi->s_bal_breaks);
44219 }
44220
44221 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
44222 @@ -3477,7 +3479,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
44223 trace_ext4_mb_new_inode_pa(ac, pa);
44224
44225 ext4_mb_use_inode_pa(ac, pa);
44226 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44227 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44228
44229 ei = EXT4_I(ac->ac_inode);
44230 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44231 @@ -3537,7 +3539,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
44232 trace_ext4_mb_new_group_pa(ac, pa);
44233
44234 ext4_mb_use_group_pa(ac, pa);
44235 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44236 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44237
44238 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44239 lg = ac->ac_lg;
44240 @@ -3625,7 +3627,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
44241 * from the bitmap and continue.
44242 */
44243 }
44244 - atomic_add(free, &sbi->s_mb_discarded);
44245 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
44246
44247 return err;
44248 }
44249 @@ -3643,7 +3645,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
44250 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
44251 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
44252 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
44253 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44254 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44255 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
44256
44257 return 0;
44258 diff --git a/fs/fcntl.c b/fs/fcntl.c
44259 index 22764c7..86372c9 100644
44260 --- a/fs/fcntl.c
44261 +++ b/fs/fcntl.c
44262 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
44263 if (err)
44264 return err;
44265
44266 + if (gr_handle_chroot_fowner(pid, type))
44267 + return -ENOENT;
44268 + if (gr_check_protected_task_fowner(pid, type))
44269 + return -EACCES;
44270 +
44271 f_modown(filp, pid, type, force);
44272 return 0;
44273 }
44274 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
44275
44276 static int f_setown_ex(struct file *filp, unsigned long arg)
44277 {
44278 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44279 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44280 struct f_owner_ex owner;
44281 struct pid *pid;
44282 int type;
44283 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
44284
44285 static int f_getown_ex(struct file *filp, unsigned long arg)
44286 {
44287 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44288 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44289 struct f_owner_ex owner;
44290 int ret = 0;
44291
44292 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
44293 switch (cmd) {
44294 case F_DUPFD:
44295 case F_DUPFD_CLOEXEC:
44296 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
44297 if (arg >= rlimit(RLIMIT_NOFILE))
44298 break;
44299 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
44300 diff --git a/fs/fifo.c b/fs/fifo.c
44301 index b1a524d..4ee270e 100644
44302 --- a/fs/fifo.c
44303 +++ b/fs/fifo.c
44304 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
44305 */
44306 filp->f_op = &read_pipefifo_fops;
44307 pipe->r_counter++;
44308 - if (pipe->readers++ == 0)
44309 + if (atomic_inc_return(&pipe->readers) == 1)
44310 wake_up_partner(inode);
44311
44312 - if (!pipe->writers) {
44313 + if (!atomic_read(&pipe->writers)) {
44314 if ((filp->f_flags & O_NONBLOCK)) {
44315 /* suppress POLLHUP until we have
44316 * seen a writer */
44317 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
44318 * errno=ENXIO when there is no process reading the FIFO.
44319 */
44320 ret = -ENXIO;
44321 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
44322 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
44323 goto err;
44324
44325 filp->f_op = &write_pipefifo_fops;
44326 pipe->w_counter++;
44327 - if (!pipe->writers++)
44328 + if (atomic_inc_return(&pipe->writers) == 1)
44329 wake_up_partner(inode);
44330
44331 - if (!pipe->readers) {
44332 + if (!atomic_read(&pipe->readers)) {
44333 wait_for_partner(inode, &pipe->r_counter);
44334 if (signal_pending(current))
44335 goto err_wr;
44336 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
44337 */
44338 filp->f_op = &rdwr_pipefifo_fops;
44339
44340 - pipe->readers++;
44341 - pipe->writers++;
44342 + atomic_inc(&pipe->readers);
44343 + atomic_inc(&pipe->writers);
44344 pipe->r_counter++;
44345 pipe->w_counter++;
44346 - if (pipe->readers == 1 || pipe->writers == 1)
44347 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
44348 wake_up_partner(inode);
44349 break;
44350
44351 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
44352 return 0;
44353
44354 err_rd:
44355 - if (!--pipe->readers)
44356 + if (atomic_dec_and_test(&pipe->readers))
44357 wake_up_interruptible(&pipe->wait);
44358 ret = -ERESTARTSYS;
44359 goto err;
44360
44361 err_wr:
44362 - if (!--pipe->writers)
44363 + if (atomic_dec_and_test(&pipe->writers))
44364 wake_up_interruptible(&pipe->wait);
44365 ret = -ERESTARTSYS;
44366 goto err;
44367
44368 err:
44369 - if (!pipe->readers && !pipe->writers)
44370 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
44371 free_pipe_info(inode);
44372
44373 err_nocleanup:
44374 diff --git a/fs/file.c b/fs/file.c
44375 index 4c6992d..104cdea 100644
44376 --- a/fs/file.c
44377 +++ b/fs/file.c
44378 @@ -15,6 +15,7 @@
44379 #include <linux/slab.h>
44380 #include <linux/vmalloc.h>
44381 #include <linux/file.h>
44382 +#include <linux/security.h>
44383 #include <linux/fdtable.h>
44384 #include <linux/bitops.h>
44385 #include <linux/interrupt.h>
44386 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
44387 * N.B. For clone tasks sharing a files structure, this test
44388 * will limit the total number of files that can be opened.
44389 */
44390 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
44391 if (nr >= rlimit(RLIMIT_NOFILE))
44392 return -EMFILE;
44393
44394 diff --git a/fs/filesystems.c b/fs/filesystems.c
44395 index 0845f84..7b4ebef 100644
44396 --- a/fs/filesystems.c
44397 +++ b/fs/filesystems.c
44398 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(const char *name)
44399 int len = dot ? dot - name : strlen(name);
44400
44401 fs = __get_fs_type(name, len);
44402 +
44403 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
44404 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
44405 +#else
44406 if (!fs && (request_module("%.*s", len, name) == 0))
44407 +#endif
44408 fs = __get_fs_type(name, len);
44409
44410 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
44411 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
44412 index 78b519c..212c0d0 100644
44413 --- a/fs/fs_struct.c
44414 +++ b/fs/fs_struct.c
44415 @@ -4,6 +4,7 @@
44416 #include <linux/path.h>
44417 #include <linux/slab.h>
44418 #include <linux/fs_struct.h>
44419 +#include <linux/grsecurity.h>
44420 #include "internal.h"
44421
44422 static inline void path_get_longterm(struct path *path)
44423 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
44424 old_root = fs->root;
44425 fs->root = *path;
44426 path_get_longterm(path);
44427 + gr_set_chroot_entries(current, path);
44428 write_seqcount_end(&fs->seq);
44429 spin_unlock(&fs->lock);
44430 if (old_root.dentry)
44431 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
44432 && fs->root.mnt == old_root->mnt) {
44433 path_get_longterm(new_root);
44434 fs->root = *new_root;
44435 + gr_set_chroot_entries(p, new_root);
44436 count++;
44437 }
44438 if (fs->pwd.dentry == old_root->dentry
44439 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
44440 spin_lock(&fs->lock);
44441 write_seqcount_begin(&fs->seq);
44442 tsk->fs = NULL;
44443 - kill = !--fs->users;
44444 + gr_clear_chroot_entries(tsk);
44445 + kill = !atomic_dec_return(&fs->users);
44446 write_seqcount_end(&fs->seq);
44447 spin_unlock(&fs->lock);
44448 task_unlock(tsk);
44449 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44450 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
44451 /* We don't need to lock fs - think why ;-) */
44452 if (fs) {
44453 - fs->users = 1;
44454 + atomic_set(&fs->users, 1);
44455 fs->in_exec = 0;
44456 spin_lock_init(&fs->lock);
44457 seqcount_init(&fs->seq);
44458 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44459 spin_lock(&old->lock);
44460 fs->root = old->root;
44461 path_get_longterm(&fs->root);
44462 + /* instead of calling gr_set_chroot_entries here,
44463 + we call it from every caller of this function
44464 + */
44465 fs->pwd = old->pwd;
44466 path_get_longterm(&fs->pwd);
44467 spin_unlock(&old->lock);
44468 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
44469
44470 task_lock(current);
44471 spin_lock(&fs->lock);
44472 - kill = !--fs->users;
44473 + kill = !atomic_dec_return(&fs->users);
44474 current->fs = new_fs;
44475 + gr_set_chroot_entries(current, &new_fs->root);
44476 spin_unlock(&fs->lock);
44477 task_unlock(current);
44478
44479 @@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
44480
44481 /* to be mentioned only in INIT_TASK */
44482 struct fs_struct init_fs = {
44483 - .users = 1,
44484 + .users = ATOMIC_INIT(1),
44485 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
44486 .seq = SEQCNT_ZERO,
44487 .umask = 0022,
44488 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
44489 task_lock(current);
44490
44491 spin_lock(&init_fs.lock);
44492 - init_fs.users++;
44493 + atomic_inc(&init_fs.users);
44494 spin_unlock(&init_fs.lock);
44495
44496 spin_lock(&fs->lock);
44497 current->fs = &init_fs;
44498 - kill = !--fs->users;
44499 + gr_set_chroot_entries(current, &current->fs->root);
44500 + kill = !atomic_dec_return(&fs->users);
44501 spin_unlock(&fs->lock);
44502
44503 task_unlock(current);
44504 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
44505 index 9905350..02eaec4 100644
44506 --- a/fs/fscache/cookie.c
44507 +++ b/fs/fscache/cookie.c
44508 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
44509 parent ? (char *) parent->def->name : "<no-parent>",
44510 def->name, netfs_data);
44511
44512 - fscache_stat(&fscache_n_acquires);
44513 + fscache_stat_unchecked(&fscache_n_acquires);
44514
44515 /* if there's no parent cookie, then we don't create one here either */
44516 if (!parent) {
44517 - fscache_stat(&fscache_n_acquires_null);
44518 + fscache_stat_unchecked(&fscache_n_acquires_null);
44519 _leave(" [no parent]");
44520 return NULL;
44521 }
44522 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
44523 /* allocate and initialise a cookie */
44524 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
44525 if (!cookie) {
44526 - fscache_stat(&fscache_n_acquires_oom);
44527 + fscache_stat_unchecked(&fscache_n_acquires_oom);
44528 _leave(" [ENOMEM]");
44529 return NULL;
44530 }
44531 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44532
44533 switch (cookie->def->type) {
44534 case FSCACHE_COOKIE_TYPE_INDEX:
44535 - fscache_stat(&fscache_n_cookie_index);
44536 + fscache_stat_unchecked(&fscache_n_cookie_index);
44537 break;
44538 case FSCACHE_COOKIE_TYPE_DATAFILE:
44539 - fscache_stat(&fscache_n_cookie_data);
44540 + fscache_stat_unchecked(&fscache_n_cookie_data);
44541 break;
44542 default:
44543 - fscache_stat(&fscache_n_cookie_special);
44544 + fscache_stat_unchecked(&fscache_n_cookie_special);
44545 break;
44546 }
44547
44548 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44549 if (fscache_acquire_non_index_cookie(cookie) < 0) {
44550 atomic_dec(&parent->n_children);
44551 __fscache_cookie_put(cookie);
44552 - fscache_stat(&fscache_n_acquires_nobufs);
44553 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
44554 _leave(" = NULL");
44555 return NULL;
44556 }
44557 }
44558
44559 - fscache_stat(&fscache_n_acquires_ok);
44560 + fscache_stat_unchecked(&fscache_n_acquires_ok);
44561 _leave(" = %p", cookie);
44562 return cookie;
44563 }
44564 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
44565 cache = fscache_select_cache_for_object(cookie->parent);
44566 if (!cache) {
44567 up_read(&fscache_addremove_sem);
44568 - fscache_stat(&fscache_n_acquires_no_cache);
44569 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
44570 _leave(" = -ENOMEDIUM [no cache]");
44571 return -ENOMEDIUM;
44572 }
44573 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
44574 object = cache->ops->alloc_object(cache, cookie);
44575 fscache_stat_d(&fscache_n_cop_alloc_object);
44576 if (IS_ERR(object)) {
44577 - fscache_stat(&fscache_n_object_no_alloc);
44578 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
44579 ret = PTR_ERR(object);
44580 goto error;
44581 }
44582
44583 - fscache_stat(&fscache_n_object_alloc);
44584 + fscache_stat_unchecked(&fscache_n_object_alloc);
44585
44586 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
44587
44588 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
44589 struct fscache_object *object;
44590 struct hlist_node *_p;
44591
44592 - fscache_stat(&fscache_n_updates);
44593 + fscache_stat_unchecked(&fscache_n_updates);
44594
44595 if (!cookie) {
44596 - fscache_stat(&fscache_n_updates_null);
44597 + fscache_stat_unchecked(&fscache_n_updates_null);
44598 _leave(" [no cookie]");
44599 return;
44600 }
44601 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44602 struct fscache_object *object;
44603 unsigned long event;
44604
44605 - fscache_stat(&fscache_n_relinquishes);
44606 + fscache_stat_unchecked(&fscache_n_relinquishes);
44607 if (retire)
44608 - fscache_stat(&fscache_n_relinquishes_retire);
44609 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
44610
44611 if (!cookie) {
44612 - fscache_stat(&fscache_n_relinquishes_null);
44613 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
44614 _leave(" [no cookie]");
44615 return;
44616 }
44617 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
44618
44619 /* wait for the cookie to finish being instantiated (or to fail) */
44620 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
44621 - fscache_stat(&fscache_n_relinquishes_waitcrt);
44622 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
44623 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
44624 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
44625 }
44626 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
44627 index f6aad48..88dcf26 100644
44628 --- a/fs/fscache/internal.h
44629 +++ b/fs/fscache/internal.h
44630 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
44631 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
44632 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
44633
44634 -extern atomic_t fscache_n_op_pend;
44635 -extern atomic_t fscache_n_op_run;
44636 -extern atomic_t fscache_n_op_enqueue;
44637 -extern atomic_t fscache_n_op_deferred_release;
44638 -extern atomic_t fscache_n_op_release;
44639 -extern atomic_t fscache_n_op_gc;
44640 -extern atomic_t fscache_n_op_cancelled;
44641 -extern atomic_t fscache_n_op_rejected;
44642 -
44643 -extern atomic_t fscache_n_attr_changed;
44644 -extern atomic_t fscache_n_attr_changed_ok;
44645 -extern atomic_t fscache_n_attr_changed_nobufs;
44646 -extern atomic_t fscache_n_attr_changed_nomem;
44647 -extern atomic_t fscache_n_attr_changed_calls;
44648 -
44649 -extern atomic_t fscache_n_allocs;
44650 -extern atomic_t fscache_n_allocs_ok;
44651 -extern atomic_t fscache_n_allocs_wait;
44652 -extern atomic_t fscache_n_allocs_nobufs;
44653 -extern atomic_t fscache_n_allocs_intr;
44654 -extern atomic_t fscache_n_allocs_object_dead;
44655 -extern atomic_t fscache_n_alloc_ops;
44656 -extern atomic_t fscache_n_alloc_op_waits;
44657 -
44658 -extern atomic_t fscache_n_retrievals;
44659 -extern atomic_t fscache_n_retrievals_ok;
44660 -extern atomic_t fscache_n_retrievals_wait;
44661 -extern atomic_t fscache_n_retrievals_nodata;
44662 -extern atomic_t fscache_n_retrievals_nobufs;
44663 -extern atomic_t fscache_n_retrievals_intr;
44664 -extern atomic_t fscache_n_retrievals_nomem;
44665 -extern atomic_t fscache_n_retrievals_object_dead;
44666 -extern atomic_t fscache_n_retrieval_ops;
44667 -extern atomic_t fscache_n_retrieval_op_waits;
44668 -
44669 -extern atomic_t fscache_n_stores;
44670 -extern atomic_t fscache_n_stores_ok;
44671 -extern atomic_t fscache_n_stores_again;
44672 -extern atomic_t fscache_n_stores_nobufs;
44673 -extern atomic_t fscache_n_stores_oom;
44674 -extern atomic_t fscache_n_store_ops;
44675 -extern atomic_t fscache_n_store_calls;
44676 -extern atomic_t fscache_n_store_pages;
44677 -extern atomic_t fscache_n_store_radix_deletes;
44678 -extern atomic_t fscache_n_store_pages_over_limit;
44679 -
44680 -extern atomic_t fscache_n_store_vmscan_not_storing;
44681 -extern atomic_t fscache_n_store_vmscan_gone;
44682 -extern atomic_t fscache_n_store_vmscan_busy;
44683 -extern atomic_t fscache_n_store_vmscan_cancelled;
44684 -
44685 -extern atomic_t fscache_n_marks;
44686 -extern atomic_t fscache_n_uncaches;
44687 -
44688 -extern atomic_t fscache_n_acquires;
44689 -extern atomic_t fscache_n_acquires_null;
44690 -extern atomic_t fscache_n_acquires_no_cache;
44691 -extern atomic_t fscache_n_acquires_ok;
44692 -extern atomic_t fscache_n_acquires_nobufs;
44693 -extern atomic_t fscache_n_acquires_oom;
44694 -
44695 -extern atomic_t fscache_n_updates;
44696 -extern atomic_t fscache_n_updates_null;
44697 -extern atomic_t fscache_n_updates_run;
44698 -
44699 -extern atomic_t fscache_n_relinquishes;
44700 -extern atomic_t fscache_n_relinquishes_null;
44701 -extern atomic_t fscache_n_relinquishes_waitcrt;
44702 -extern atomic_t fscache_n_relinquishes_retire;
44703 -
44704 -extern atomic_t fscache_n_cookie_index;
44705 -extern atomic_t fscache_n_cookie_data;
44706 -extern atomic_t fscache_n_cookie_special;
44707 -
44708 -extern atomic_t fscache_n_object_alloc;
44709 -extern atomic_t fscache_n_object_no_alloc;
44710 -extern atomic_t fscache_n_object_lookups;
44711 -extern atomic_t fscache_n_object_lookups_negative;
44712 -extern atomic_t fscache_n_object_lookups_positive;
44713 -extern atomic_t fscache_n_object_lookups_timed_out;
44714 -extern atomic_t fscache_n_object_created;
44715 -extern atomic_t fscache_n_object_avail;
44716 -extern atomic_t fscache_n_object_dead;
44717 -
44718 -extern atomic_t fscache_n_checkaux_none;
44719 -extern atomic_t fscache_n_checkaux_okay;
44720 -extern atomic_t fscache_n_checkaux_update;
44721 -extern atomic_t fscache_n_checkaux_obsolete;
44722 +extern atomic_unchecked_t fscache_n_op_pend;
44723 +extern atomic_unchecked_t fscache_n_op_run;
44724 +extern atomic_unchecked_t fscache_n_op_enqueue;
44725 +extern atomic_unchecked_t fscache_n_op_deferred_release;
44726 +extern atomic_unchecked_t fscache_n_op_release;
44727 +extern atomic_unchecked_t fscache_n_op_gc;
44728 +extern atomic_unchecked_t fscache_n_op_cancelled;
44729 +extern atomic_unchecked_t fscache_n_op_rejected;
44730 +
44731 +extern atomic_unchecked_t fscache_n_attr_changed;
44732 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
44733 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
44734 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
44735 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
44736 +
44737 +extern atomic_unchecked_t fscache_n_allocs;
44738 +extern atomic_unchecked_t fscache_n_allocs_ok;
44739 +extern atomic_unchecked_t fscache_n_allocs_wait;
44740 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
44741 +extern atomic_unchecked_t fscache_n_allocs_intr;
44742 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
44743 +extern atomic_unchecked_t fscache_n_alloc_ops;
44744 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
44745 +
44746 +extern atomic_unchecked_t fscache_n_retrievals;
44747 +extern atomic_unchecked_t fscache_n_retrievals_ok;
44748 +extern atomic_unchecked_t fscache_n_retrievals_wait;
44749 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
44750 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
44751 +extern atomic_unchecked_t fscache_n_retrievals_intr;
44752 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
44753 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
44754 +extern atomic_unchecked_t fscache_n_retrieval_ops;
44755 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
44756 +
44757 +extern atomic_unchecked_t fscache_n_stores;
44758 +extern atomic_unchecked_t fscache_n_stores_ok;
44759 +extern atomic_unchecked_t fscache_n_stores_again;
44760 +extern atomic_unchecked_t fscache_n_stores_nobufs;
44761 +extern atomic_unchecked_t fscache_n_stores_oom;
44762 +extern atomic_unchecked_t fscache_n_store_ops;
44763 +extern atomic_unchecked_t fscache_n_store_calls;
44764 +extern atomic_unchecked_t fscache_n_store_pages;
44765 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
44766 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
44767 +
44768 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
44769 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
44770 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
44771 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
44772 +
44773 +extern atomic_unchecked_t fscache_n_marks;
44774 +extern atomic_unchecked_t fscache_n_uncaches;
44775 +
44776 +extern atomic_unchecked_t fscache_n_acquires;
44777 +extern atomic_unchecked_t fscache_n_acquires_null;
44778 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
44779 +extern atomic_unchecked_t fscache_n_acquires_ok;
44780 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
44781 +extern atomic_unchecked_t fscache_n_acquires_oom;
44782 +
44783 +extern atomic_unchecked_t fscache_n_updates;
44784 +extern atomic_unchecked_t fscache_n_updates_null;
44785 +extern atomic_unchecked_t fscache_n_updates_run;
44786 +
44787 +extern atomic_unchecked_t fscache_n_relinquishes;
44788 +extern atomic_unchecked_t fscache_n_relinquishes_null;
44789 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
44790 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
44791 +
44792 +extern atomic_unchecked_t fscache_n_cookie_index;
44793 +extern atomic_unchecked_t fscache_n_cookie_data;
44794 +extern atomic_unchecked_t fscache_n_cookie_special;
44795 +
44796 +extern atomic_unchecked_t fscache_n_object_alloc;
44797 +extern atomic_unchecked_t fscache_n_object_no_alloc;
44798 +extern atomic_unchecked_t fscache_n_object_lookups;
44799 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
44800 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
44801 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
44802 +extern atomic_unchecked_t fscache_n_object_created;
44803 +extern atomic_unchecked_t fscache_n_object_avail;
44804 +extern atomic_unchecked_t fscache_n_object_dead;
44805 +
44806 +extern atomic_unchecked_t fscache_n_checkaux_none;
44807 +extern atomic_unchecked_t fscache_n_checkaux_okay;
44808 +extern atomic_unchecked_t fscache_n_checkaux_update;
44809 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
44810
44811 extern atomic_t fscache_n_cop_alloc_object;
44812 extern atomic_t fscache_n_cop_lookup_object;
44813 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
44814 atomic_inc(stat);
44815 }
44816
44817 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
44818 +{
44819 + atomic_inc_unchecked(stat);
44820 +}
44821 +
44822 static inline void fscache_stat_d(atomic_t *stat)
44823 {
44824 atomic_dec(stat);
44825 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
44826
44827 #define __fscache_stat(stat) (NULL)
44828 #define fscache_stat(stat) do {} while (0)
44829 +#define fscache_stat_unchecked(stat) do {} while (0)
44830 #define fscache_stat_d(stat) do {} while (0)
44831 #endif
44832
44833 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
44834 index b6b897c..0ffff9c 100644
44835 --- a/fs/fscache/object.c
44836 +++ b/fs/fscache/object.c
44837 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44838 /* update the object metadata on disk */
44839 case FSCACHE_OBJECT_UPDATING:
44840 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
44841 - fscache_stat(&fscache_n_updates_run);
44842 + fscache_stat_unchecked(&fscache_n_updates_run);
44843 fscache_stat(&fscache_n_cop_update_object);
44844 object->cache->ops->update_object(object);
44845 fscache_stat_d(&fscache_n_cop_update_object);
44846 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44847 spin_lock(&object->lock);
44848 object->state = FSCACHE_OBJECT_DEAD;
44849 spin_unlock(&object->lock);
44850 - fscache_stat(&fscache_n_object_dead);
44851 + fscache_stat_unchecked(&fscache_n_object_dead);
44852 goto terminal_transit;
44853
44854 /* handle the parent cache of this object being withdrawn from
44855 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
44856 spin_lock(&object->lock);
44857 object->state = FSCACHE_OBJECT_DEAD;
44858 spin_unlock(&object->lock);
44859 - fscache_stat(&fscache_n_object_dead);
44860 + fscache_stat_unchecked(&fscache_n_object_dead);
44861 goto terminal_transit;
44862
44863 /* complain about the object being woken up once it is
44864 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
44865 parent->cookie->def->name, cookie->def->name,
44866 object->cache->tag->name);
44867
44868 - fscache_stat(&fscache_n_object_lookups);
44869 + fscache_stat_unchecked(&fscache_n_object_lookups);
44870 fscache_stat(&fscache_n_cop_lookup_object);
44871 ret = object->cache->ops->lookup_object(object);
44872 fscache_stat_d(&fscache_n_cop_lookup_object);
44873 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
44874 if (ret == -ETIMEDOUT) {
44875 /* probably stuck behind another object, so move this one to
44876 * the back of the queue */
44877 - fscache_stat(&fscache_n_object_lookups_timed_out);
44878 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
44879 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
44880 }
44881
44882 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
44883
44884 spin_lock(&object->lock);
44885 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
44886 - fscache_stat(&fscache_n_object_lookups_negative);
44887 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
44888
44889 /* transit here to allow write requests to begin stacking up
44890 * and read requests to begin returning ENODATA */
44891 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
44892 * result, in which case there may be data available */
44893 spin_lock(&object->lock);
44894 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
44895 - fscache_stat(&fscache_n_object_lookups_positive);
44896 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
44897
44898 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
44899
44900 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
44901 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
44902 } else {
44903 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
44904 - fscache_stat(&fscache_n_object_created);
44905 + fscache_stat_unchecked(&fscache_n_object_created);
44906
44907 object->state = FSCACHE_OBJECT_AVAILABLE;
44908 spin_unlock(&object->lock);
44909 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
44910 fscache_enqueue_dependents(object);
44911
44912 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
44913 - fscache_stat(&fscache_n_object_avail);
44914 + fscache_stat_unchecked(&fscache_n_object_avail);
44915
44916 _leave("");
44917 }
44918 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
44919 enum fscache_checkaux result;
44920
44921 if (!object->cookie->def->check_aux) {
44922 - fscache_stat(&fscache_n_checkaux_none);
44923 + fscache_stat_unchecked(&fscache_n_checkaux_none);
44924 return FSCACHE_CHECKAUX_OKAY;
44925 }
44926
44927 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
44928 switch (result) {
44929 /* entry okay as is */
44930 case FSCACHE_CHECKAUX_OKAY:
44931 - fscache_stat(&fscache_n_checkaux_okay);
44932 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
44933 break;
44934
44935 /* entry requires update */
44936 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
44937 - fscache_stat(&fscache_n_checkaux_update);
44938 + fscache_stat_unchecked(&fscache_n_checkaux_update);
44939 break;
44940
44941 /* entry requires deletion */
44942 case FSCACHE_CHECKAUX_OBSOLETE:
44943 - fscache_stat(&fscache_n_checkaux_obsolete);
44944 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
44945 break;
44946
44947 default:
44948 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
44949 index 30afdfa..2256596 100644
44950 --- a/fs/fscache/operation.c
44951 +++ b/fs/fscache/operation.c
44952 @@ -17,7 +17,7 @@
44953 #include <linux/slab.h>
44954 #include "internal.h"
44955
44956 -atomic_t fscache_op_debug_id;
44957 +atomic_unchecked_t fscache_op_debug_id;
44958 EXPORT_SYMBOL(fscache_op_debug_id);
44959
44960 /**
44961 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
44962 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
44963 ASSERTCMP(atomic_read(&op->usage), >, 0);
44964
44965 - fscache_stat(&fscache_n_op_enqueue);
44966 + fscache_stat_unchecked(&fscache_n_op_enqueue);
44967 switch (op->flags & FSCACHE_OP_TYPE) {
44968 case FSCACHE_OP_ASYNC:
44969 _debug("queue async");
44970 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
44971 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
44972 if (op->processor)
44973 fscache_enqueue_operation(op);
44974 - fscache_stat(&fscache_n_op_run);
44975 + fscache_stat_unchecked(&fscache_n_op_run);
44976 }
44977
44978 /*
44979 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
44980 if (object->n_ops > 1) {
44981 atomic_inc(&op->usage);
44982 list_add_tail(&op->pend_link, &object->pending_ops);
44983 - fscache_stat(&fscache_n_op_pend);
44984 + fscache_stat_unchecked(&fscache_n_op_pend);
44985 } else if (!list_empty(&object->pending_ops)) {
44986 atomic_inc(&op->usage);
44987 list_add_tail(&op->pend_link, &object->pending_ops);
44988 - fscache_stat(&fscache_n_op_pend);
44989 + fscache_stat_unchecked(&fscache_n_op_pend);
44990 fscache_start_operations(object);
44991 } else {
44992 ASSERTCMP(object->n_in_progress, ==, 0);
44993 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
44994 object->n_exclusive++; /* reads and writes must wait */
44995 atomic_inc(&op->usage);
44996 list_add_tail(&op->pend_link, &object->pending_ops);
44997 - fscache_stat(&fscache_n_op_pend);
44998 + fscache_stat_unchecked(&fscache_n_op_pend);
44999 ret = 0;
45000 } else {
45001 /* not allowed to submit ops in any other state */
45002 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
45003 if (object->n_exclusive > 0) {
45004 atomic_inc(&op->usage);
45005 list_add_tail(&op->pend_link, &object->pending_ops);
45006 - fscache_stat(&fscache_n_op_pend);
45007 + fscache_stat_unchecked(&fscache_n_op_pend);
45008 } else if (!list_empty(&object->pending_ops)) {
45009 atomic_inc(&op->usage);
45010 list_add_tail(&op->pend_link, &object->pending_ops);
45011 - fscache_stat(&fscache_n_op_pend);
45012 + fscache_stat_unchecked(&fscache_n_op_pend);
45013 fscache_start_operations(object);
45014 } else {
45015 ASSERTCMP(object->n_exclusive, ==, 0);
45016 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
45017 object->n_ops++;
45018 atomic_inc(&op->usage);
45019 list_add_tail(&op->pend_link, &object->pending_ops);
45020 - fscache_stat(&fscache_n_op_pend);
45021 + fscache_stat_unchecked(&fscache_n_op_pend);
45022 ret = 0;
45023 } else if (object->state == FSCACHE_OBJECT_DYING ||
45024 object->state == FSCACHE_OBJECT_LC_DYING ||
45025 object->state == FSCACHE_OBJECT_WITHDRAWING) {
45026 - fscache_stat(&fscache_n_op_rejected);
45027 + fscache_stat_unchecked(&fscache_n_op_rejected);
45028 ret = -ENOBUFS;
45029 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
45030 fscache_report_unexpected_submission(object, op, ostate);
45031 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
45032
45033 ret = -EBUSY;
45034 if (!list_empty(&op->pend_link)) {
45035 - fscache_stat(&fscache_n_op_cancelled);
45036 + fscache_stat_unchecked(&fscache_n_op_cancelled);
45037 list_del_init(&op->pend_link);
45038 object->n_ops--;
45039 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
45040 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
45041 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
45042 BUG();
45043
45044 - fscache_stat(&fscache_n_op_release);
45045 + fscache_stat_unchecked(&fscache_n_op_release);
45046
45047 if (op->release) {
45048 op->release(op);
45049 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
45050 * lock, and defer it otherwise */
45051 if (!spin_trylock(&object->lock)) {
45052 _debug("defer put");
45053 - fscache_stat(&fscache_n_op_deferred_release);
45054 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
45055
45056 cache = object->cache;
45057 spin_lock(&cache->op_gc_list_lock);
45058 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
45059
45060 _debug("GC DEFERRED REL OBJ%x OP%x",
45061 object->debug_id, op->debug_id);
45062 - fscache_stat(&fscache_n_op_gc);
45063 + fscache_stat_unchecked(&fscache_n_op_gc);
45064
45065 ASSERTCMP(atomic_read(&op->usage), ==, 0);
45066
45067 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
45068 index 3f7a59b..cf196cc 100644
45069 --- a/fs/fscache/page.c
45070 +++ b/fs/fscache/page.c
45071 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45072 val = radix_tree_lookup(&cookie->stores, page->index);
45073 if (!val) {
45074 rcu_read_unlock();
45075 - fscache_stat(&fscache_n_store_vmscan_not_storing);
45076 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
45077 __fscache_uncache_page(cookie, page);
45078 return true;
45079 }
45080 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45081 spin_unlock(&cookie->stores_lock);
45082
45083 if (xpage) {
45084 - fscache_stat(&fscache_n_store_vmscan_cancelled);
45085 - fscache_stat(&fscache_n_store_radix_deletes);
45086 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
45087 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45088 ASSERTCMP(xpage, ==, page);
45089 } else {
45090 - fscache_stat(&fscache_n_store_vmscan_gone);
45091 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
45092 }
45093
45094 wake_up_bit(&cookie->flags, 0);
45095 @@ -107,7 +107,7 @@ page_busy:
45096 /* we might want to wait here, but that could deadlock the allocator as
45097 * the work threads writing to the cache may all end up sleeping
45098 * on memory allocation */
45099 - fscache_stat(&fscache_n_store_vmscan_busy);
45100 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
45101 return false;
45102 }
45103 EXPORT_SYMBOL(__fscache_maybe_release_page);
45104 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
45105 FSCACHE_COOKIE_STORING_TAG);
45106 if (!radix_tree_tag_get(&cookie->stores, page->index,
45107 FSCACHE_COOKIE_PENDING_TAG)) {
45108 - fscache_stat(&fscache_n_store_radix_deletes);
45109 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45110 xpage = radix_tree_delete(&cookie->stores, page->index);
45111 }
45112 spin_unlock(&cookie->stores_lock);
45113 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
45114
45115 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
45116
45117 - fscache_stat(&fscache_n_attr_changed_calls);
45118 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
45119
45120 if (fscache_object_is_active(object)) {
45121 fscache_stat(&fscache_n_cop_attr_changed);
45122 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45123
45124 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45125
45126 - fscache_stat(&fscache_n_attr_changed);
45127 + fscache_stat_unchecked(&fscache_n_attr_changed);
45128
45129 op = kzalloc(sizeof(*op), GFP_KERNEL);
45130 if (!op) {
45131 - fscache_stat(&fscache_n_attr_changed_nomem);
45132 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
45133 _leave(" = -ENOMEM");
45134 return -ENOMEM;
45135 }
45136 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45137 if (fscache_submit_exclusive_op(object, op) < 0)
45138 goto nobufs;
45139 spin_unlock(&cookie->lock);
45140 - fscache_stat(&fscache_n_attr_changed_ok);
45141 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
45142 fscache_put_operation(op);
45143 _leave(" = 0");
45144 return 0;
45145 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45146 nobufs:
45147 spin_unlock(&cookie->lock);
45148 kfree(op);
45149 - fscache_stat(&fscache_n_attr_changed_nobufs);
45150 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
45151 _leave(" = %d", -ENOBUFS);
45152 return -ENOBUFS;
45153 }
45154 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
45155 /* allocate a retrieval operation and attempt to submit it */
45156 op = kzalloc(sizeof(*op), GFP_NOIO);
45157 if (!op) {
45158 - fscache_stat(&fscache_n_retrievals_nomem);
45159 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45160 return NULL;
45161 }
45162
45163 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45164 return 0;
45165 }
45166
45167 - fscache_stat(&fscache_n_retrievals_wait);
45168 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
45169
45170 jif = jiffies;
45171 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
45172 fscache_wait_bit_interruptible,
45173 TASK_INTERRUPTIBLE) != 0) {
45174 - fscache_stat(&fscache_n_retrievals_intr);
45175 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45176 _leave(" = -ERESTARTSYS");
45177 return -ERESTARTSYS;
45178 }
45179 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45180 */
45181 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45182 struct fscache_retrieval *op,
45183 - atomic_t *stat_op_waits,
45184 - atomic_t *stat_object_dead)
45185 + atomic_unchecked_t *stat_op_waits,
45186 + atomic_unchecked_t *stat_object_dead)
45187 {
45188 int ret;
45189
45190 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45191 goto check_if_dead;
45192
45193 _debug(">>> WT");
45194 - fscache_stat(stat_op_waits);
45195 + fscache_stat_unchecked(stat_op_waits);
45196 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
45197 fscache_wait_bit_interruptible,
45198 TASK_INTERRUPTIBLE) < 0) {
45199 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45200
45201 check_if_dead:
45202 if (unlikely(fscache_object_is_dead(object))) {
45203 - fscache_stat(stat_object_dead);
45204 + fscache_stat_unchecked(stat_object_dead);
45205 return -ENOBUFS;
45206 }
45207 return 0;
45208 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45209
45210 _enter("%p,%p,,,", cookie, page);
45211
45212 - fscache_stat(&fscache_n_retrievals);
45213 + fscache_stat_unchecked(&fscache_n_retrievals);
45214
45215 if (hlist_empty(&cookie->backing_objects))
45216 goto nobufs;
45217 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45218 goto nobufs_unlock;
45219 spin_unlock(&cookie->lock);
45220
45221 - fscache_stat(&fscache_n_retrieval_ops);
45222 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45223
45224 /* pin the netfs read context in case we need to do the actual netfs
45225 * read because we've encountered a cache read failure */
45226 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45227
45228 error:
45229 if (ret == -ENOMEM)
45230 - fscache_stat(&fscache_n_retrievals_nomem);
45231 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45232 else if (ret == -ERESTARTSYS)
45233 - fscache_stat(&fscache_n_retrievals_intr);
45234 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45235 else if (ret == -ENODATA)
45236 - fscache_stat(&fscache_n_retrievals_nodata);
45237 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45238 else if (ret < 0)
45239 - fscache_stat(&fscache_n_retrievals_nobufs);
45240 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45241 else
45242 - fscache_stat(&fscache_n_retrievals_ok);
45243 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45244
45245 fscache_put_retrieval(op);
45246 _leave(" = %d", ret);
45247 @@ -429,7 +429,7 @@ nobufs_unlock:
45248 spin_unlock(&cookie->lock);
45249 kfree(op);
45250 nobufs:
45251 - fscache_stat(&fscache_n_retrievals_nobufs);
45252 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45253 _leave(" = -ENOBUFS");
45254 return -ENOBUFS;
45255 }
45256 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45257
45258 _enter("%p,,%d,,,", cookie, *nr_pages);
45259
45260 - fscache_stat(&fscache_n_retrievals);
45261 + fscache_stat_unchecked(&fscache_n_retrievals);
45262
45263 if (hlist_empty(&cookie->backing_objects))
45264 goto nobufs;
45265 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45266 goto nobufs_unlock;
45267 spin_unlock(&cookie->lock);
45268
45269 - fscache_stat(&fscache_n_retrieval_ops);
45270 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45271
45272 /* pin the netfs read context in case we need to do the actual netfs
45273 * read because we've encountered a cache read failure */
45274 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45275
45276 error:
45277 if (ret == -ENOMEM)
45278 - fscache_stat(&fscache_n_retrievals_nomem);
45279 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45280 else if (ret == -ERESTARTSYS)
45281 - fscache_stat(&fscache_n_retrievals_intr);
45282 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45283 else if (ret == -ENODATA)
45284 - fscache_stat(&fscache_n_retrievals_nodata);
45285 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45286 else if (ret < 0)
45287 - fscache_stat(&fscache_n_retrievals_nobufs);
45288 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45289 else
45290 - fscache_stat(&fscache_n_retrievals_ok);
45291 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45292
45293 fscache_put_retrieval(op);
45294 _leave(" = %d", ret);
45295 @@ -545,7 +545,7 @@ nobufs_unlock:
45296 spin_unlock(&cookie->lock);
45297 kfree(op);
45298 nobufs:
45299 - fscache_stat(&fscache_n_retrievals_nobufs);
45300 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45301 _leave(" = -ENOBUFS");
45302 return -ENOBUFS;
45303 }
45304 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45305
45306 _enter("%p,%p,,,", cookie, page);
45307
45308 - fscache_stat(&fscache_n_allocs);
45309 + fscache_stat_unchecked(&fscache_n_allocs);
45310
45311 if (hlist_empty(&cookie->backing_objects))
45312 goto nobufs;
45313 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45314 goto nobufs_unlock;
45315 spin_unlock(&cookie->lock);
45316
45317 - fscache_stat(&fscache_n_alloc_ops);
45318 + fscache_stat_unchecked(&fscache_n_alloc_ops);
45319
45320 ret = fscache_wait_for_retrieval_activation(
45321 object, op,
45322 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45323
45324 error:
45325 if (ret == -ERESTARTSYS)
45326 - fscache_stat(&fscache_n_allocs_intr);
45327 + fscache_stat_unchecked(&fscache_n_allocs_intr);
45328 else if (ret < 0)
45329 - fscache_stat(&fscache_n_allocs_nobufs);
45330 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45331 else
45332 - fscache_stat(&fscache_n_allocs_ok);
45333 + fscache_stat_unchecked(&fscache_n_allocs_ok);
45334
45335 fscache_put_retrieval(op);
45336 _leave(" = %d", ret);
45337 @@ -625,7 +625,7 @@ nobufs_unlock:
45338 spin_unlock(&cookie->lock);
45339 kfree(op);
45340 nobufs:
45341 - fscache_stat(&fscache_n_allocs_nobufs);
45342 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45343 _leave(" = -ENOBUFS");
45344 return -ENOBUFS;
45345 }
45346 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45347
45348 spin_lock(&cookie->stores_lock);
45349
45350 - fscache_stat(&fscache_n_store_calls);
45351 + fscache_stat_unchecked(&fscache_n_store_calls);
45352
45353 /* find a page to store */
45354 page = NULL;
45355 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45356 page = results[0];
45357 _debug("gang %d [%lx]", n, page->index);
45358 if (page->index > op->store_limit) {
45359 - fscache_stat(&fscache_n_store_pages_over_limit);
45360 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
45361 goto superseded;
45362 }
45363
45364 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45365 spin_unlock(&cookie->stores_lock);
45366 spin_unlock(&object->lock);
45367
45368 - fscache_stat(&fscache_n_store_pages);
45369 + fscache_stat_unchecked(&fscache_n_store_pages);
45370 fscache_stat(&fscache_n_cop_write_page);
45371 ret = object->cache->ops->write_page(op, page);
45372 fscache_stat_d(&fscache_n_cop_write_page);
45373 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45374 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45375 ASSERT(PageFsCache(page));
45376
45377 - fscache_stat(&fscache_n_stores);
45378 + fscache_stat_unchecked(&fscache_n_stores);
45379
45380 op = kzalloc(sizeof(*op), GFP_NOIO);
45381 if (!op)
45382 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45383 spin_unlock(&cookie->stores_lock);
45384 spin_unlock(&object->lock);
45385
45386 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
45387 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
45388 op->store_limit = object->store_limit;
45389
45390 if (fscache_submit_op(object, &op->op) < 0)
45391 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45392
45393 spin_unlock(&cookie->lock);
45394 radix_tree_preload_end();
45395 - fscache_stat(&fscache_n_store_ops);
45396 - fscache_stat(&fscache_n_stores_ok);
45397 + fscache_stat_unchecked(&fscache_n_store_ops);
45398 + fscache_stat_unchecked(&fscache_n_stores_ok);
45399
45400 /* the work queue now carries its own ref on the object */
45401 fscache_put_operation(&op->op);
45402 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45403 return 0;
45404
45405 already_queued:
45406 - fscache_stat(&fscache_n_stores_again);
45407 + fscache_stat_unchecked(&fscache_n_stores_again);
45408 already_pending:
45409 spin_unlock(&cookie->stores_lock);
45410 spin_unlock(&object->lock);
45411 spin_unlock(&cookie->lock);
45412 radix_tree_preload_end();
45413 kfree(op);
45414 - fscache_stat(&fscache_n_stores_ok);
45415 + fscache_stat_unchecked(&fscache_n_stores_ok);
45416 _leave(" = 0");
45417 return 0;
45418
45419 @@ -851,14 +851,14 @@ nobufs:
45420 spin_unlock(&cookie->lock);
45421 radix_tree_preload_end();
45422 kfree(op);
45423 - fscache_stat(&fscache_n_stores_nobufs);
45424 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
45425 _leave(" = -ENOBUFS");
45426 return -ENOBUFS;
45427
45428 nomem_free:
45429 kfree(op);
45430 nomem:
45431 - fscache_stat(&fscache_n_stores_oom);
45432 + fscache_stat_unchecked(&fscache_n_stores_oom);
45433 _leave(" = -ENOMEM");
45434 return -ENOMEM;
45435 }
45436 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
45437 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45438 ASSERTCMP(page, !=, NULL);
45439
45440 - fscache_stat(&fscache_n_uncaches);
45441 + fscache_stat_unchecked(&fscache_n_uncaches);
45442
45443 /* cache withdrawal may beat us to it */
45444 if (!PageFsCache(page))
45445 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
45446 unsigned long loop;
45447
45448 #ifdef CONFIG_FSCACHE_STATS
45449 - atomic_add(pagevec->nr, &fscache_n_marks);
45450 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
45451 #endif
45452
45453 for (loop = 0; loop < pagevec->nr; loop++) {
45454 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
45455 index 4765190..2a067f2 100644
45456 --- a/fs/fscache/stats.c
45457 +++ b/fs/fscache/stats.c
45458 @@ -18,95 +18,95 @@
45459 /*
45460 * operation counters
45461 */
45462 -atomic_t fscache_n_op_pend;
45463 -atomic_t fscache_n_op_run;
45464 -atomic_t fscache_n_op_enqueue;
45465 -atomic_t fscache_n_op_requeue;
45466 -atomic_t fscache_n_op_deferred_release;
45467 -atomic_t fscache_n_op_release;
45468 -atomic_t fscache_n_op_gc;
45469 -atomic_t fscache_n_op_cancelled;
45470 -atomic_t fscache_n_op_rejected;
45471 -
45472 -atomic_t fscache_n_attr_changed;
45473 -atomic_t fscache_n_attr_changed_ok;
45474 -atomic_t fscache_n_attr_changed_nobufs;
45475 -atomic_t fscache_n_attr_changed_nomem;
45476 -atomic_t fscache_n_attr_changed_calls;
45477 -
45478 -atomic_t fscache_n_allocs;
45479 -atomic_t fscache_n_allocs_ok;
45480 -atomic_t fscache_n_allocs_wait;
45481 -atomic_t fscache_n_allocs_nobufs;
45482 -atomic_t fscache_n_allocs_intr;
45483 -atomic_t fscache_n_allocs_object_dead;
45484 -atomic_t fscache_n_alloc_ops;
45485 -atomic_t fscache_n_alloc_op_waits;
45486 -
45487 -atomic_t fscache_n_retrievals;
45488 -atomic_t fscache_n_retrievals_ok;
45489 -atomic_t fscache_n_retrievals_wait;
45490 -atomic_t fscache_n_retrievals_nodata;
45491 -atomic_t fscache_n_retrievals_nobufs;
45492 -atomic_t fscache_n_retrievals_intr;
45493 -atomic_t fscache_n_retrievals_nomem;
45494 -atomic_t fscache_n_retrievals_object_dead;
45495 -atomic_t fscache_n_retrieval_ops;
45496 -atomic_t fscache_n_retrieval_op_waits;
45497 -
45498 -atomic_t fscache_n_stores;
45499 -atomic_t fscache_n_stores_ok;
45500 -atomic_t fscache_n_stores_again;
45501 -atomic_t fscache_n_stores_nobufs;
45502 -atomic_t fscache_n_stores_oom;
45503 -atomic_t fscache_n_store_ops;
45504 -atomic_t fscache_n_store_calls;
45505 -atomic_t fscache_n_store_pages;
45506 -atomic_t fscache_n_store_radix_deletes;
45507 -atomic_t fscache_n_store_pages_over_limit;
45508 -
45509 -atomic_t fscache_n_store_vmscan_not_storing;
45510 -atomic_t fscache_n_store_vmscan_gone;
45511 -atomic_t fscache_n_store_vmscan_busy;
45512 -atomic_t fscache_n_store_vmscan_cancelled;
45513 -
45514 -atomic_t fscache_n_marks;
45515 -atomic_t fscache_n_uncaches;
45516 -
45517 -atomic_t fscache_n_acquires;
45518 -atomic_t fscache_n_acquires_null;
45519 -atomic_t fscache_n_acquires_no_cache;
45520 -atomic_t fscache_n_acquires_ok;
45521 -atomic_t fscache_n_acquires_nobufs;
45522 -atomic_t fscache_n_acquires_oom;
45523 -
45524 -atomic_t fscache_n_updates;
45525 -atomic_t fscache_n_updates_null;
45526 -atomic_t fscache_n_updates_run;
45527 -
45528 -atomic_t fscache_n_relinquishes;
45529 -atomic_t fscache_n_relinquishes_null;
45530 -atomic_t fscache_n_relinquishes_waitcrt;
45531 -atomic_t fscache_n_relinquishes_retire;
45532 -
45533 -atomic_t fscache_n_cookie_index;
45534 -atomic_t fscache_n_cookie_data;
45535 -atomic_t fscache_n_cookie_special;
45536 -
45537 -atomic_t fscache_n_object_alloc;
45538 -atomic_t fscache_n_object_no_alloc;
45539 -atomic_t fscache_n_object_lookups;
45540 -atomic_t fscache_n_object_lookups_negative;
45541 -atomic_t fscache_n_object_lookups_positive;
45542 -atomic_t fscache_n_object_lookups_timed_out;
45543 -atomic_t fscache_n_object_created;
45544 -atomic_t fscache_n_object_avail;
45545 -atomic_t fscache_n_object_dead;
45546 -
45547 -atomic_t fscache_n_checkaux_none;
45548 -atomic_t fscache_n_checkaux_okay;
45549 -atomic_t fscache_n_checkaux_update;
45550 -atomic_t fscache_n_checkaux_obsolete;
45551 +atomic_unchecked_t fscache_n_op_pend;
45552 +atomic_unchecked_t fscache_n_op_run;
45553 +atomic_unchecked_t fscache_n_op_enqueue;
45554 +atomic_unchecked_t fscache_n_op_requeue;
45555 +atomic_unchecked_t fscache_n_op_deferred_release;
45556 +atomic_unchecked_t fscache_n_op_release;
45557 +atomic_unchecked_t fscache_n_op_gc;
45558 +atomic_unchecked_t fscache_n_op_cancelled;
45559 +atomic_unchecked_t fscache_n_op_rejected;
45560 +
45561 +atomic_unchecked_t fscache_n_attr_changed;
45562 +atomic_unchecked_t fscache_n_attr_changed_ok;
45563 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
45564 +atomic_unchecked_t fscache_n_attr_changed_nomem;
45565 +atomic_unchecked_t fscache_n_attr_changed_calls;
45566 +
45567 +atomic_unchecked_t fscache_n_allocs;
45568 +atomic_unchecked_t fscache_n_allocs_ok;
45569 +atomic_unchecked_t fscache_n_allocs_wait;
45570 +atomic_unchecked_t fscache_n_allocs_nobufs;
45571 +atomic_unchecked_t fscache_n_allocs_intr;
45572 +atomic_unchecked_t fscache_n_allocs_object_dead;
45573 +atomic_unchecked_t fscache_n_alloc_ops;
45574 +atomic_unchecked_t fscache_n_alloc_op_waits;
45575 +
45576 +atomic_unchecked_t fscache_n_retrievals;
45577 +atomic_unchecked_t fscache_n_retrievals_ok;
45578 +atomic_unchecked_t fscache_n_retrievals_wait;
45579 +atomic_unchecked_t fscache_n_retrievals_nodata;
45580 +atomic_unchecked_t fscache_n_retrievals_nobufs;
45581 +atomic_unchecked_t fscache_n_retrievals_intr;
45582 +atomic_unchecked_t fscache_n_retrievals_nomem;
45583 +atomic_unchecked_t fscache_n_retrievals_object_dead;
45584 +atomic_unchecked_t fscache_n_retrieval_ops;
45585 +atomic_unchecked_t fscache_n_retrieval_op_waits;
45586 +
45587 +atomic_unchecked_t fscache_n_stores;
45588 +atomic_unchecked_t fscache_n_stores_ok;
45589 +atomic_unchecked_t fscache_n_stores_again;
45590 +atomic_unchecked_t fscache_n_stores_nobufs;
45591 +atomic_unchecked_t fscache_n_stores_oom;
45592 +atomic_unchecked_t fscache_n_store_ops;
45593 +atomic_unchecked_t fscache_n_store_calls;
45594 +atomic_unchecked_t fscache_n_store_pages;
45595 +atomic_unchecked_t fscache_n_store_radix_deletes;
45596 +atomic_unchecked_t fscache_n_store_pages_over_limit;
45597 +
45598 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45599 +atomic_unchecked_t fscache_n_store_vmscan_gone;
45600 +atomic_unchecked_t fscache_n_store_vmscan_busy;
45601 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45602 +
45603 +atomic_unchecked_t fscache_n_marks;
45604 +atomic_unchecked_t fscache_n_uncaches;
45605 +
45606 +atomic_unchecked_t fscache_n_acquires;
45607 +atomic_unchecked_t fscache_n_acquires_null;
45608 +atomic_unchecked_t fscache_n_acquires_no_cache;
45609 +atomic_unchecked_t fscache_n_acquires_ok;
45610 +atomic_unchecked_t fscache_n_acquires_nobufs;
45611 +atomic_unchecked_t fscache_n_acquires_oom;
45612 +
45613 +atomic_unchecked_t fscache_n_updates;
45614 +atomic_unchecked_t fscache_n_updates_null;
45615 +atomic_unchecked_t fscache_n_updates_run;
45616 +
45617 +atomic_unchecked_t fscache_n_relinquishes;
45618 +atomic_unchecked_t fscache_n_relinquishes_null;
45619 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45620 +atomic_unchecked_t fscache_n_relinquishes_retire;
45621 +
45622 +atomic_unchecked_t fscache_n_cookie_index;
45623 +atomic_unchecked_t fscache_n_cookie_data;
45624 +atomic_unchecked_t fscache_n_cookie_special;
45625 +
45626 +atomic_unchecked_t fscache_n_object_alloc;
45627 +atomic_unchecked_t fscache_n_object_no_alloc;
45628 +atomic_unchecked_t fscache_n_object_lookups;
45629 +atomic_unchecked_t fscache_n_object_lookups_negative;
45630 +atomic_unchecked_t fscache_n_object_lookups_positive;
45631 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
45632 +atomic_unchecked_t fscache_n_object_created;
45633 +atomic_unchecked_t fscache_n_object_avail;
45634 +atomic_unchecked_t fscache_n_object_dead;
45635 +
45636 +atomic_unchecked_t fscache_n_checkaux_none;
45637 +atomic_unchecked_t fscache_n_checkaux_okay;
45638 +atomic_unchecked_t fscache_n_checkaux_update;
45639 +atomic_unchecked_t fscache_n_checkaux_obsolete;
45640
45641 atomic_t fscache_n_cop_alloc_object;
45642 atomic_t fscache_n_cop_lookup_object;
45643 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
45644 seq_puts(m, "FS-Cache statistics\n");
45645
45646 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
45647 - atomic_read(&fscache_n_cookie_index),
45648 - atomic_read(&fscache_n_cookie_data),
45649 - atomic_read(&fscache_n_cookie_special));
45650 + atomic_read_unchecked(&fscache_n_cookie_index),
45651 + atomic_read_unchecked(&fscache_n_cookie_data),
45652 + atomic_read_unchecked(&fscache_n_cookie_special));
45653
45654 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
45655 - atomic_read(&fscache_n_object_alloc),
45656 - atomic_read(&fscache_n_object_no_alloc),
45657 - atomic_read(&fscache_n_object_avail),
45658 - atomic_read(&fscache_n_object_dead));
45659 + atomic_read_unchecked(&fscache_n_object_alloc),
45660 + atomic_read_unchecked(&fscache_n_object_no_alloc),
45661 + atomic_read_unchecked(&fscache_n_object_avail),
45662 + atomic_read_unchecked(&fscache_n_object_dead));
45663 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
45664 - atomic_read(&fscache_n_checkaux_none),
45665 - atomic_read(&fscache_n_checkaux_okay),
45666 - atomic_read(&fscache_n_checkaux_update),
45667 - atomic_read(&fscache_n_checkaux_obsolete));
45668 + atomic_read_unchecked(&fscache_n_checkaux_none),
45669 + atomic_read_unchecked(&fscache_n_checkaux_okay),
45670 + atomic_read_unchecked(&fscache_n_checkaux_update),
45671 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
45672
45673 seq_printf(m, "Pages : mrk=%u unc=%u\n",
45674 - atomic_read(&fscache_n_marks),
45675 - atomic_read(&fscache_n_uncaches));
45676 + atomic_read_unchecked(&fscache_n_marks),
45677 + atomic_read_unchecked(&fscache_n_uncaches));
45678
45679 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
45680 " oom=%u\n",
45681 - atomic_read(&fscache_n_acquires),
45682 - atomic_read(&fscache_n_acquires_null),
45683 - atomic_read(&fscache_n_acquires_no_cache),
45684 - atomic_read(&fscache_n_acquires_ok),
45685 - atomic_read(&fscache_n_acquires_nobufs),
45686 - atomic_read(&fscache_n_acquires_oom));
45687 + atomic_read_unchecked(&fscache_n_acquires),
45688 + atomic_read_unchecked(&fscache_n_acquires_null),
45689 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
45690 + atomic_read_unchecked(&fscache_n_acquires_ok),
45691 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
45692 + atomic_read_unchecked(&fscache_n_acquires_oom));
45693
45694 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
45695 - atomic_read(&fscache_n_object_lookups),
45696 - atomic_read(&fscache_n_object_lookups_negative),
45697 - atomic_read(&fscache_n_object_lookups_positive),
45698 - atomic_read(&fscache_n_object_created),
45699 - atomic_read(&fscache_n_object_lookups_timed_out));
45700 + atomic_read_unchecked(&fscache_n_object_lookups),
45701 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
45702 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
45703 + atomic_read_unchecked(&fscache_n_object_created),
45704 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
45705
45706 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
45707 - atomic_read(&fscache_n_updates),
45708 - atomic_read(&fscache_n_updates_null),
45709 - atomic_read(&fscache_n_updates_run));
45710 + atomic_read_unchecked(&fscache_n_updates),
45711 + atomic_read_unchecked(&fscache_n_updates_null),
45712 + atomic_read_unchecked(&fscache_n_updates_run));
45713
45714 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
45715 - atomic_read(&fscache_n_relinquishes),
45716 - atomic_read(&fscache_n_relinquishes_null),
45717 - atomic_read(&fscache_n_relinquishes_waitcrt),
45718 - atomic_read(&fscache_n_relinquishes_retire));
45719 + atomic_read_unchecked(&fscache_n_relinquishes),
45720 + atomic_read_unchecked(&fscache_n_relinquishes_null),
45721 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
45722 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
45723
45724 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
45725 - atomic_read(&fscache_n_attr_changed),
45726 - atomic_read(&fscache_n_attr_changed_ok),
45727 - atomic_read(&fscache_n_attr_changed_nobufs),
45728 - atomic_read(&fscache_n_attr_changed_nomem),
45729 - atomic_read(&fscache_n_attr_changed_calls));
45730 + atomic_read_unchecked(&fscache_n_attr_changed),
45731 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
45732 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
45733 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
45734 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
45735
45736 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
45737 - atomic_read(&fscache_n_allocs),
45738 - atomic_read(&fscache_n_allocs_ok),
45739 - atomic_read(&fscache_n_allocs_wait),
45740 - atomic_read(&fscache_n_allocs_nobufs),
45741 - atomic_read(&fscache_n_allocs_intr));
45742 + atomic_read_unchecked(&fscache_n_allocs),
45743 + atomic_read_unchecked(&fscache_n_allocs_ok),
45744 + atomic_read_unchecked(&fscache_n_allocs_wait),
45745 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
45746 + atomic_read_unchecked(&fscache_n_allocs_intr));
45747 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
45748 - atomic_read(&fscache_n_alloc_ops),
45749 - atomic_read(&fscache_n_alloc_op_waits),
45750 - atomic_read(&fscache_n_allocs_object_dead));
45751 + atomic_read_unchecked(&fscache_n_alloc_ops),
45752 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
45753 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
45754
45755 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
45756 " int=%u oom=%u\n",
45757 - atomic_read(&fscache_n_retrievals),
45758 - atomic_read(&fscache_n_retrievals_ok),
45759 - atomic_read(&fscache_n_retrievals_wait),
45760 - atomic_read(&fscache_n_retrievals_nodata),
45761 - atomic_read(&fscache_n_retrievals_nobufs),
45762 - atomic_read(&fscache_n_retrievals_intr),
45763 - atomic_read(&fscache_n_retrievals_nomem));
45764 + atomic_read_unchecked(&fscache_n_retrievals),
45765 + atomic_read_unchecked(&fscache_n_retrievals_ok),
45766 + atomic_read_unchecked(&fscache_n_retrievals_wait),
45767 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
45768 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
45769 + atomic_read_unchecked(&fscache_n_retrievals_intr),
45770 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
45771 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
45772 - atomic_read(&fscache_n_retrieval_ops),
45773 - atomic_read(&fscache_n_retrieval_op_waits),
45774 - atomic_read(&fscache_n_retrievals_object_dead));
45775 + atomic_read_unchecked(&fscache_n_retrieval_ops),
45776 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
45777 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
45778
45779 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
45780 - atomic_read(&fscache_n_stores),
45781 - atomic_read(&fscache_n_stores_ok),
45782 - atomic_read(&fscache_n_stores_again),
45783 - atomic_read(&fscache_n_stores_nobufs),
45784 - atomic_read(&fscache_n_stores_oom));
45785 + atomic_read_unchecked(&fscache_n_stores),
45786 + atomic_read_unchecked(&fscache_n_stores_ok),
45787 + atomic_read_unchecked(&fscache_n_stores_again),
45788 + atomic_read_unchecked(&fscache_n_stores_nobufs),
45789 + atomic_read_unchecked(&fscache_n_stores_oom));
45790 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
45791 - atomic_read(&fscache_n_store_ops),
45792 - atomic_read(&fscache_n_store_calls),
45793 - atomic_read(&fscache_n_store_pages),
45794 - atomic_read(&fscache_n_store_radix_deletes),
45795 - atomic_read(&fscache_n_store_pages_over_limit));
45796 + atomic_read_unchecked(&fscache_n_store_ops),
45797 + atomic_read_unchecked(&fscache_n_store_calls),
45798 + atomic_read_unchecked(&fscache_n_store_pages),
45799 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
45800 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
45801
45802 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
45803 - atomic_read(&fscache_n_store_vmscan_not_storing),
45804 - atomic_read(&fscache_n_store_vmscan_gone),
45805 - atomic_read(&fscache_n_store_vmscan_busy),
45806 - atomic_read(&fscache_n_store_vmscan_cancelled));
45807 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
45808 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
45809 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
45810 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
45811
45812 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
45813 - atomic_read(&fscache_n_op_pend),
45814 - atomic_read(&fscache_n_op_run),
45815 - atomic_read(&fscache_n_op_enqueue),
45816 - atomic_read(&fscache_n_op_cancelled),
45817 - atomic_read(&fscache_n_op_rejected));
45818 + atomic_read_unchecked(&fscache_n_op_pend),
45819 + atomic_read_unchecked(&fscache_n_op_run),
45820 + atomic_read_unchecked(&fscache_n_op_enqueue),
45821 + atomic_read_unchecked(&fscache_n_op_cancelled),
45822 + atomic_read_unchecked(&fscache_n_op_rejected));
45823 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
45824 - atomic_read(&fscache_n_op_deferred_release),
45825 - atomic_read(&fscache_n_op_release),
45826 - atomic_read(&fscache_n_op_gc));
45827 + atomic_read_unchecked(&fscache_n_op_deferred_release),
45828 + atomic_read_unchecked(&fscache_n_op_release),
45829 + atomic_read_unchecked(&fscache_n_op_gc));
45830
45831 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
45832 atomic_read(&fscache_n_cop_alloc_object),
45833 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
45834 index b6cca47..ec782c3 100644
45835 --- a/fs/fuse/cuse.c
45836 +++ b/fs/fuse/cuse.c
45837 @@ -586,10 +586,12 @@ static int __init cuse_init(void)
45838 INIT_LIST_HEAD(&cuse_conntbl[i]);
45839
45840 /* inherit and extend fuse_dev_operations */
45841 - cuse_channel_fops = fuse_dev_operations;
45842 - cuse_channel_fops.owner = THIS_MODULE;
45843 - cuse_channel_fops.open = cuse_channel_open;
45844 - cuse_channel_fops.release = cuse_channel_release;
45845 + pax_open_kernel();
45846 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
45847 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
45848 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
45849 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
45850 + pax_close_kernel();
45851
45852 cuse_class = class_create(THIS_MODULE, "cuse");
45853 if (IS_ERR(cuse_class))
45854 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
45855 index 5cb8614..6865b11 100644
45856 --- a/fs/fuse/dev.c
45857 +++ b/fs/fuse/dev.c
45858 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
45859 ret = 0;
45860 pipe_lock(pipe);
45861
45862 - if (!pipe->readers) {
45863 + if (!atomic_read(&pipe->readers)) {
45864 send_sig(SIGPIPE, current, 0);
45865 if (!ret)
45866 ret = -EPIPE;
45867 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
45868 index 9f63e49..d8a64c0 100644
45869 --- a/fs/fuse/dir.c
45870 +++ b/fs/fuse/dir.c
45871 @@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *dentry)
45872 return link;
45873 }
45874
45875 -static void free_link(char *link)
45876 +static void free_link(const char *link)
45877 {
45878 if (!IS_ERR(link))
45879 free_page((unsigned long) link);
45880 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
45881 index 900cf98..3896726 100644
45882 --- a/fs/gfs2/inode.c
45883 +++ b/fs/gfs2/inode.c
45884 @@ -1517,7 +1517,7 @@ out:
45885
45886 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
45887 {
45888 - char *s = nd_get_link(nd);
45889 + const char *s = nd_get_link(nd);
45890 if (!IS_ERR(s))
45891 kfree(s);
45892 }
45893 diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
45894 index 3ebc437..eb23952 100644
45895 --- a/fs/hfs/btree.c
45896 +++ b/fs/hfs/btree.c
45897 @@ -46,11 +46,27 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
45898 case HFS_EXT_CNID:
45899 hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize,
45900 mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz));
45901 +
45902 + if (HFS_I(tree->inode)->alloc_blocks >
45903 + HFS_I(tree->inode)->first_blocks) {
45904 + printk(KERN_ERR "hfs: invalid btree extent records\n");
45905 + unlock_new_inode(tree->inode);
45906 + goto free_inode;
45907 + }
45908 +
45909 tree->inode->i_mapping->a_ops = &hfs_btree_aops;
45910 break;
45911 case HFS_CAT_CNID:
45912 hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize,
45913 mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz));
45914 +
45915 + if (!HFS_I(tree->inode)->first_blocks) {
45916 + printk(KERN_ERR "hfs: invalid btree extent records "
45917 + "(0 size).\n");
45918 + unlock_new_inode(tree->inode);
45919 + goto free_inode;
45920 + }
45921 +
45922 tree->inode->i_mapping->a_ops = &hfs_btree_aops;
45923 break;
45924 default:
45925 @@ -59,11 +75,6 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
45926 }
45927 unlock_new_inode(tree->inode);
45928
45929 - if (!HFS_I(tree->inode)->first_blocks) {
45930 - printk(KERN_ERR "hfs: invalid btree extent records (0 size).\n");
45931 - goto free_inode;
45932 - }
45933 -
45934 mapping = tree->inode->i_mapping;
45935 page = read_mapping_page(mapping, 0, NULL);
45936 if (IS_ERR(page))
45937 diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
45938 index 4dfbfec..947c9c2 100644
45939 --- a/fs/hfsplus/catalog.c
45940 +++ b/fs/hfsplus/catalog.c
45941 @@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
45942 int err;
45943 u16 type;
45944
45945 + pax_track_stack();
45946 +
45947 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
45948 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
45949 if (err)
45950 @@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
45951 int entry_size;
45952 int err;
45953
45954 + pax_track_stack();
45955 +
45956 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
45957 str->name, cnid, inode->i_nlink);
45958 err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
45959 @@ -353,6 +357,8 @@ int hfsplus_rename_cat(u32 cnid,
45960 int entry_size, type;
45961 int err;
45962
45963 + pax_track_stack();
45964 +
45965 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
45966 cnid, src_dir->i_ino, src_name->name,
45967 dst_dir->i_ino, dst_name->name);
45968 diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
45969 index 25b2443..09a3341 100644
45970 --- a/fs/hfsplus/dir.c
45971 +++ b/fs/hfsplus/dir.c
45972 @@ -131,6 +131,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
45973 struct hfsplus_readdir_data *rd;
45974 u16 type;
45975
45976 + pax_track_stack();
45977 +
45978 if (filp->f_pos >= inode->i_size)
45979 return 0;
45980
45981 diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
45982 index 4cc1e3a..ad0f70b 100644
45983 --- a/fs/hfsplus/inode.c
45984 +++ b/fs/hfsplus/inode.c
45985 @@ -501,6 +501,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
45986 int res = 0;
45987 u16 type;
45988
45989 + pax_track_stack();
45990 +
45991 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
45992
45993 HFSPLUS_I(inode)->linkid = 0;
45994 @@ -564,6 +566,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
45995 struct hfs_find_data fd;
45996 hfsplus_cat_entry entry;
45997
45998 + pax_track_stack();
45999 +
46000 if (HFSPLUS_IS_RSRC(inode))
46001 main_inode = HFSPLUS_I(inode)->rsrc_inode;
46002
46003 diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
46004 index fbaa669..c548cd0 100644
46005 --- a/fs/hfsplus/ioctl.c
46006 +++ b/fs/hfsplus/ioctl.c
46007 @@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
46008 struct hfsplus_cat_file *file;
46009 int res;
46010
46011 + pax_track_stack();
46012 +
46013 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
46014 return -EOPNOTSUPP;
46015
46016 @@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
46017 struct hfsplus_cat_file *file;
46018 ssize_t res = 0;
46019
46020 + pax_track_stack();
46021 +
46022 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
46023 return -EOPNOTSUPP;
46024
46025 diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
46026 index d24a9b6..dd9b3dd 100644
46027 --- a/fs/hfsplus/super.c
46028 +++ b/fs/hfsplus/super.c
46029 @@ -347,6 +347,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
46030 u64 last_fs_block, last_fs_page;
46031 int err;
46032
46033 + pax_track_stack();
46034 +
46035 err = -EINVAL;
46036 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
46037 if (!sbi)
46038 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
46039 index ec88953..cb5e98e 100644
46040 --- a/fs/hugetlbfs/inode.c
46041 +++ b/fs/hugetlbfs/inode.c
46042 @@ -915,7 +915,7 @@ static struct file_system_type hugetlbfs_fs_type = {
46043 .kill_sb = kill_litter_super,
46044 };
46045
46046 -static struct vfsmount *hugetlbfs_vfsmount;
46047 +struct vfsmount *hugetlbfs_vfsmount;
46048
46049 static int can_do_hugetlb_shm(void)
46050 {
46051 diff --git a/fs/inode.c b/fs/inode.c
46052 index ec79246..054c36a 100644
46053 --- a/fs/inode.c
46054 +++ b/fs/inode.c
46055 @@ -787,8 +787,8 @@ unsigned int get_next_ino(void)
46056
46057 #ifdef CONFIG_SMP
46058 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
46059 - static atomic_t shared_last_ino;
46060 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
46061 + static atomic_unchecked_t shared_last_ino;
46062 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
46063
46064 res = next - LAST_INO_BATCH;
46065 }
46066 diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
46067 index f94fc48..3bb8d30 100644
46068 --- a/fs/jbd/checkpoint.c
46069 +++ b/fs/jbd/checkpoint.c
46070 @@ -358,6 +358,8 @@ int log_do_checkpoint(journal_t *journal)
46071 tid_t this_tid;
46072 int result;
46073
46074 + pax_track_stack();
46075 +
46076 jbd_debug(1, "Start checkpoint\n");
46077
46078 /*
46079 diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
46080 index 16a5047..88ff6ca 100644
46081 --- a/fs/jffs2/compr_rtime.c
46082 +++ b/fs/jffs2/compr_rtime.c
46083 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
46084 int outpos = 0;
46085 int pos=0;
46086
46087 + pax_track_stack();
46088 +
46089 memset(positions,0,sizeof(positions));
46090
46091 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
46092 @@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
46093 int outpos = 0;
46094 int pos=0;
46095
46096 + pax_track_stack();
46097 +
46098 memset(positions,0,sizeof(positions));
46099
46100 while (outpos<destlen) {
46101 diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
46102 index 9e7cec8..4713089 100644
46103 --- a/fs/jffs2/compr_rubin.c
46104 +++ b/fs/jffs2/compr_rubin.c
46105 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
46106 int ret;
46107 uint32_t mysrclen, mydstlen;
46108
46109 + pax_track_stack();
46110 +
46111 mysrclen = *sourcelen;
46112 mydstlen = *dstlen - 8;
46113
46114 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
46115 index e513f19..2ab1351 100644
46116 --- a/fs/jffs2/erase.c
46117 +++ b/fs/jffs2/erase.c
46118 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
46119 struct jffs2_unknown_node marker = {
46120 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
46121 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46122 - .totlen = cpu_to_je32(c->cleanmarker_size)
46123 + .totlen = cpu_to_je32(c->cleanmarker_size),
46124 + .hdr_crc = cpu_to_je32(0)
46125 };
46126
46127 jffs2_prealloc_raw_node_refs(c, jeb, 1);
46128 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
46129 index 4515bea..178f2d6 100644
46130 --- a/fs/jffs2/wbuf.c
46131 +++ b/fs/jffs2/wbuf.c
46132 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
46133 {
46134 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
46135 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46136 - .totlen = constant_cpu_to_je32(8)
46137 + .totlen = constant_cpu_to_je32(8),
46138 + .hdr_crc = constant_cpu_to_je32(0)
46139 };
46140
46141 /*
46142 diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
46143 index 3e93cdd..c8a80e1 100644
46144 --- a/fs/jffs2/xattr.c
46145 +++ b/fs/jffs2/xattr.c
46146 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
46147
46148 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
46149
46150 + pax_track_stack();
46151 +
46152 /* Phase.1 : Merge same xref */
46153 for (i=0; i < XREF_TMPHASH_SIZE; i++)
46154 xref_tmphash[i] = NULL;
46155 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
46156 index 06c8a67..589dbbd 100644
46157 --- a/fs/jfs/super.c
46158 +++ b/fs/jfs/super.c
46159 @@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
46160
46161 jfs_inode_cachep =
46162 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
46163 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
46164 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
46165 init_once);
46166 if (jfs_inode_cachep == NULL)
46167 return -ENOMEM;
46168 diff --git a/fs/libfs.c b/fs/libfs.c
46169 index c18e9a1..0b04e2c 100644
46170 --- a/fs/libfs.c
46171 +++ b/fs/libfs.c
46172 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46173
46174 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
46175 struct dentry *next;
46176 + char d_name[sizeof(next->d_iname)];
46177 + const unsigned char *name;
46178 +
46179 next = list_entry(p, struct dentry, d_u.d_child);
46180 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
46181 if (!simple_positive(next)) {
46182 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46183
46184 spin_unlock(&next->d_lock);
46185 spin_unlock(&dentry->d_lock);
46186 - if (filldir(dirent, next->d_name.name,
46187 + name = next->d_name.name;
46188 + if (name == next->d_iname) {
46189 + memcpy(d_name, name, next->d_name.len);
46190 + name = d_name;
46191 + }
46192 + if (filldir(dirent, name,
46193 next->d_name.len, filp->f_pos,
46194 next->d_inode->i_ino,
46195 dt_type(next->d_inode)) < 0)
46196 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
46197 index 8392cb8..ae8ed40 100644
46198 --- a/fs/lockd/clntproc.c
46199 +++ b/fs/lockd/clntproc.c
46200 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
46201 /*
46202 * Cookie counter for NLM requests
46203 */
46204 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
46205 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
46206
46207 void nlmclnt_next_cookie(struct nlm_cookie *c)
46208 {
46209 - u32 cookie = atomic_inc_return(&nlm_cookie);
46210 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
46211
46212 memcpy(c->data, &cookie, 4);
46213 c->len=4;
46214 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
46215 struct nlm_rqst reqst, *req;
46216 int status;
46217
46218 + pax_track_stack();
46219 +
46220 req = &reqst;
46221 memset(req, 0, sizeof(*req));
46222 locks_init_lock(&req->a_args.lock.fl);
46223 diff --git a/fs/locks.c b/fs/locks.c
46224 index 703f545..150a552 100644
46225 --- a/fs/locks.c
46226 +++ b/fs/locks.c
46227 @@ -2022,16 +2022,16 @@ void locks_remove_flock(struct file *filp)
46228 return;
46229
46230 if (filp->f_op && filp->f_op->flock) {
46231 - struct file_lock fl = {
46232 + struct file_lock flock = {
46233 .fl_pid = current->tgid,
46234 .fl_file = filp,
46235 .fl_flags = FL_FLOCK,
46236 .fl_type = F_UNLCK,
46237 .fl_end = OFFSET_MAX,
46238 };
46239 - filp->f_op->flock(filp, F_SETLKW, &fl);
46240 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
46241 - fl.fl_ops->fl_release_private(&fl);
46242 + filp->f_op->flock(filp, F_SETLKW, &flock);
46243 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
46244 + flock.fl_ops->fl_release_private(&flock);
46245 }
46246
46247 lock_flocks();
46248 diff --git a/fs/logfs/super.c b/fs/logfs/super.c
46249 index ce03a18..ac8c14f 100644
46250 --- a/fs/logfs/super.c
46251 +++ b/fs/logfs/super.c
46252 @@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super_block *sb)
46253 struct logfs_disk_super _ds1, *ds1 = &_ds1;
46254 int err, valid0, valid1;
46255
46256 + pax_track_stack();
46257 +
46258 /* read first superblock */
46259 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
46260 if (err)
46261 diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
46262 index 3f32bcb..7c82c29 100644
46263 --- a/fs/minix/bitmap.c
46264 +++ b/fs/minix/bitmap.c
46265 @@ -20,10 +20,11 @@ static const int nibblemap[] = { 4,3,3,2,3,2,2,1,3,2,2,1,2,1,1,0 };
46266
46267 static DEFINE_SPINLOCK(bitmap_lock);
46268
46269 -static unsigned long count_free(struct buffer_head *map[], unsigned numblocks, __u32 numbits)
46270 +static unsigned long count_free(struct buffer_head *map[], unsigned blocksize, __u32 numbits)
46271 {
46272 unsigned i, j, sum = 0;
46273 struct buffer_head *bh;
46274 + unsigned numblocks = minix_blocks_needed(numbits, blocksize);
46275
46276 for (i=0; i<numblocks-1; i++) {
46277 if (!(bh=map[i]))
46278 @@ -105,10 +106,12 @@ int minix_new_block(struct inode * inode)
46279 return 0;
46280 }
46281
46282 -unsigned long minix_count_free_blocks(struct minix_sb_info *sbi)
46283 +unsigned long minix_count_free_blocks(struct super_block *sb)
46284 {
46285 - return (count_free(sbi->s_zmap, sbi->s_zmap_blocks,
46286 - sbi->s_nzones - sbi->s_firstdatazone + 1)
46287 + struct minix_sb_info *sbi = minix_sb(sb);
46288 + u32 bits = sbi->s_nzones - (sbi->s_firstdatazone + 1);
46289 +
46290 + return (count_free(sbi->s_zmap, sb->s_blocksize, bits)
46291 << sbi->s_log_zone_size);
46292 }
46293
46294 @@ -273,7 +276,10 @@ struct inode *minix_new_inode(const struct inode *dir, int mode, int *error)
46295 return inode;
46296 }
46297
46298 -unsigned long minix_count_free_inodes(struct minix_sb_info *sbi)
46299 +unsigned long minix_count_free_inodes(struct super_block *sb)
46300 {
46301 - return count_free(sbi->s_imap, sbi->s_imap_blocks, sbi->s_ninodes + 1);
46302 + struct minix_sb_info *sbi = minix_sb(sb);
46303 + u32 bits = sbi->s_ninodes + 1;
46304 +
46305 + return count_free(sbi->s_imap, sb->s_blocksize, bits);
46306 }
46307 diff --git a/fs/minix/inode.c b/fs/minix/inode.c
46308 index e7d23e2..1ed1351 100644
46309 --- a/fs/minix/inode.c
46310 +++ b/fs/minix/inode.c
46311 @@ -279,6 +279,27 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
46312 else if (sbi->s_mount_state & MINIX_ERROR_FS)
46313 printk("MINIX-fs: mounting file system with errors, "
46314 "running fsck is recommended\n");
46315 +
46316 + /* Apparently minix can create filesystems that allocate more blocks for
46317 + * the bitmaps than needed. We simply ignore that, but verify it didn't
46318 + * create one with not enough blocks and bail out if so.
46319 + */
46320 + block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize);
46321 + if (sbi->s_imap_blocks < block) {
46322 + printk("MINIX-fs: file system does not have enough "
46323 + "imap blocks allocated. Refusing to mount\n");
46324 + goto out_iput;
46325 + }
46326 +
46327 + block = minix_blocks_needed(
46328 + (sbi->s_nzones - (sbi->s_firstdatazone + 1)),
46329 + s->s_blocksize);
46330 + if (sbi->s_zmap_blocks < block) {
46331 + printk("MINIX-fs: file system does not have enough "
46332 + "zmap blocks allocated. Refusing to mount.\n");
46333 + goto out_iput;
46334 + }
46335 +
46336 return 0;
46337
46338 out_iput:
46339 @@ -339,10 +360,10 @@ static int minix_statfs(struct dentry *dentry, struct kstatfs *buf)
46340 buf->f_type = sb->s_magic;
46341 buf->f_bsize = sb->s_blocksize;
46342 buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size;
46343 - buf->f_bfree = minix_count_free_blocks(sbi);
46344 + buf->f_bfree = minix_count_free_blocks(sb);
46345 buf->f_bavail = buf->f_bfree;
46346 buf->f_files = sbi->s_ninodes;
46347 - buf->f_ffree = minix_count_free_inodes(sbi);
46348 + buf->f_ffree = minix_count_free_inodes(sb);
46349 buf->f_namelen = sbi->s_namelen;
46350 buf->f_fsid.val[0] = (u32)id;
46351 buf->f_fsid.val[1] = (u32)(id >> 32);
46352 diff --git a/fs/minix/minix.h b/fs/minix/minix.h
46353 index 341e212..6415fe0 100644
46354 --- a/fs/minix/minix.h
46355 +++ b/fs/minix/minix.h
46356 @@ -48,10 +48,10 @@ extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, stru
46357 extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
46358 extern struct inode * minix_new_inode(const struct inode *, int, int *);
46359 extern void minix_free_inode(struct inode * inode);
46360 -extern unsigned long minix_count_free_inodes(struct minix_sb_info *sbi);
46361 +extern unsigned long minix_count_free_inodes(struct super_block *sb);
46362 extern int minix_new_block(struct inode * inode);
46363 extern void minix_free_block(struct inode *inode, unsigned long block);
46364 -extern unsigned long minix_count_free_blocks(struct minix_sb_info *sbi);
46365 +extern unsigned long minix_count_free_blocks(struct super_block *sb);
46366 extern int minix_getattr(struct vfsmount *, struct dentry *, struct kstat *);
46367 extern int minix_prepare_chunk(struct page *page, loff_t pos, unsigned len);
46368
46369 @@ -88,6 +88,11 @@ static inline struct minix_inode_info *minix_i(struct inode *inode)
46370 return list_entry(inode, struct minix_inode_info, vfs_inode);
46371 }
46372
46373 +static inline unsigned minix_blocks_needed(unsigned bits, unsigned blocksize)
46374 +{
46375 + return DIV_ROUND_UP(bits, blocksize * 8);
46376 +}
46377 +
46378 #if defined(CONFIG_MINIX_FS_NATIVE_ENDIAN) && \
46379 defined(CONFIG_MINIX_FS_BIG_ENDIAN_16BIT_INDEXED)
46380
46381 diff --git a/fs/namei.c b/fs/namei.c
46382 index 3d15072..c1ddf9c 100644
46383 --- a/fs/namei.c
46384 +++ b/fs/namei.c
46385 @@ -281,16 +281,32 @@ int generic_permission(struct inode *inode, int mask)
46386 if (ret != -EACCES)
46387 return ret;
46388
46389 +#ifdef CONFIG_GRKERNSEC
46390 + /* we'll block if we have to log due to a denied capability use */
46391 + if (mask & MAY_NOT_BLOCK)
46392 + return -ECHILD;
46393 +#endif
46394 +
46395 if (S_ISDIR(inode->i_mode)) {
46396 /* DACs are overridable for directories */
46397 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46398 - return 0;
46399 if (!(mask & MAY_WRITE))
46400 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46401 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46402 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46403 return 0;
46404 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46405 + return 0;
46406 return -EACCES;
46407 }
46408 /*
46409 + * Searching includes executable on directories, else just read.
46410 + */
46411 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46412 + if (mask == MAY_READ)
46413 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46414 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46415 + return 0;
46416 +
46417 + /*
46418 * Read/write DACs are always overridable.
46419 * Executable DACs are overridable when there is
46420 * at least one exec bit set.
46421 @@ -299,14 +315,6 @@ int generic_permission(struct inode *inode, int mask)
46422 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46423 return 0;
46424
46425 - /*
46426 - * Searching includes executable on directories, else just read.
46427 - */
46428 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46429 - if (mask == MAY_READ)
46430 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46431 - return 0;
46432 -
46433 return -EACCES;
46434 }
46435
46436 @@ -653,11 +661,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
46437 return error;
46438 }
46439
46440 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
46441 + dentry->d_inode, dentry, nd->path.mnt)) {
46442 + error = -EACCES;
46443 + *p = ERR_PTR(error); /* no ->put_link(), please */
46444 + path_put(&nd->path);
46445 + return error;
46446 + }
46447 +
46448 nd->last_type = LAST_BIND;
46449 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
46450 error = PTR_ERR(*p);
46451 if (!IS_ERR(*p)) {
46452 - char *s = nd_get_link(nd);
46453 + const char *s = nd_get_link(nd);
46454 error = 0;
46455 if (s)
46456 error = __vfs_follow_link(nd, s);
46457 @@ -1622,6 +1638,21 @@ static int path_lookupat(int dfd, const char *name,
46458 if (!err)
46459 err = complete_walk(nd);
46460
46461 + if (!(nd->flags & LOOKUP_PARENT)) {
46462 +#ifdef CONFIG_GRKERNSEC
46463 + if (flags & LOOKUP_RCU) {
46464 + if (!err)
46465 + path_put(&nd->path);
46466 + err = -ECHILD;
46467 + } else
46468 +#endif
46469 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46470 + if (!err)
46471 + path_put(&nd->path);
46472 + err = -ENOENT;
46473 + }
46474 + }
46475 +
46476 if (!err && nd->flags & LOOKUP_DIRECTORY) {
46477 if (!nd->inode->i_op->lookup) {
46478 path_put(&nd->path);
46479 @@ -1649,6 +1680,15 @@ static int do_path_lookup(int dfd, const char *name,
46480 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
46481
46482 if (likely(!retval)) {
46483 + if (*name != '/' && nd->path.dentry && nd->inode) {
46484 +#ifdef CONFIG_GRKERNSEC
46485 + if (flags & LOOKUP_RCU)
46486 + return -ECHILD;
46487 +#endif
46488 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
46489 + return -ENOENT;
46490 + }
46491 +
46492 if (unlikely(!audit_dummy_context())) {
46493 if (nd->path.dentry && nd->inode)
46494 audit_inode(name, nd->path.dentry);
46495 @@ -2049,7 +2089,27 @@ static int may_open(struct path *path, int acc_mode, int flag)
46496 /*
46497 * Ensure there are no outstanding leases on the file.
46498 */
46499 - return break_lease(inode, flag);
46500 + error = break_lease(inode, flag);
46501 +
46502 + if (error)
46503 + return error;
46504 +
46505 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
46506 + error = -EPERM;
46507 + goto exit;
46508 + }
46509 +
46510 + if (gr_handle_rawio(inode)) {
46511 + error = -EPERM;
46512 + goto exit;
46513 + }
46514 +
46515 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
46516 + error = -EACCES;
46517 + goto exit;
46518 + }
46519 +exit:
46520 + return error;
46521 }
46522
46523 static int handle_truncate(struct file *filp)
46524 @@ -2110,6 +2170,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46525 error = complete_walk(nd);
46526 if (error)
46527 return ERR_PTR(error);
46528 +#ifdef CONFIG_GRKERNSEC
46529 + if (nd->flags & LOOKUP_RCU) {
46530 + error = -ECHILD;
46531 + goto exit;
46532 + }
46533 +#endif
46534 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46535 + error = -ENOENT;
46536 + goto exit;
46537 + }
46538 audit_inode(pathname, nd->path.dentry);
46539 if (open_flag & O_CREAT) {
46540 error = -EISDIR;
46541 @@ -2120,6 +2190,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46542 error = complete_walk(nd);
46543 if (error)
46544 return ERR_PTR(error);
46545 +#ifdef CONFIG_GRKERNSEC
46546 + if (nd->flags & LOOKUP_RCU) {
46547 + error = -ECHILD;
46548 + goto exit;
46549 + }
46550 +#endif
46551 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
46552 + error = -ENOENT;
46553 + goto exit;
46554 + }
46555 audit_inode(pathname, dir);
46556 goto ok;
46557 }
46558 @@ -2141,6 +2221,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46559 error = complete_walk(nd);
46560 if (error)
46561 return ERR_PTR(-ECHILD);
46562 +#ifdef CONFIG_GRKERNSEC
46563 + if (nd->flags & LOOKUP_RCU) {
46564 + error = -ECHILD;
46565 + goto exit;
46566 + }
46567 +#endif
46568 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46569 + error = -ENOENT;
46570 + goto exit;
46571 + }
46572
46573 error = -ENOTDIR;
46574 if (nd->flags & LOOKUP_DIRECTORY) {
46575 @@ -2181,6 +2271,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46576 /* Negative dentry, just create the file */
46577 if (!dentry->d_inode) {
46578 int mode = op->mode;
46579 +
46580 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
46581 + error = -EACCES;
46582 + goto exit_mutex_unlock;
46583 + }
46584 +
46585 if (!IS_POSIXACL(dir->d_inode))
46586 mode &= ~current_umask();
46587 /*
46588 @@ -2204,6 +2300,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46589 error = vfs_create(dir->d_inode, dentry, mode, nd);
46590 if (error)
46591 goto exit_mutex_unlock;
46592 + else
46593 + gr_handle_create(path->dentry, path->mnt);
46594 mutex_unlock(&dir->d_inode->i_mutex);
46595 dput(nd->path.dentry);
46596 nd->path.dentry = dentry;
46597 @@ -2213,6 +2311,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46598 /*
46599 * It already exists.
46600 */
46601 +
46602 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
46603 + error = -ENOENT;
46604 + goto exit_mutex_unlock;
46605 + }
46606 +
46607 + /* only check if O_CREAT is specified, all other checks need to go
46608 + into may_open */
46609 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
46610 + error = -EACCES;
46611 + goto exit_mutex_unlock;
46612 + }
46613 +
46614 mutex_unlock(&dir->d_inode->i_mutex);
46615 audit_inode(pathname, path->dentry);
46616
46617 @@ -2425,6 +2536,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
46618 *path = nd.path;
46619 return dentry;
46620 eexist:
46621 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
46622 + dput(dentry);
46623 + dentry = ERR_PTR(-ENOENT);
46624 + goto fail;
46625 + }
46626 dput(dentry);
46627 dentry = ERR_PTR(-EEXIST);
46628 fail:
46629 @@ -2447,6 +2563,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
46630 }
46631 EXPORT_SYMBOL(user_path_create);
46632
46633 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
46634 +{
46635 + char *tmp = getname(pathname);
46636 + struct dentry *res;
46637 + if (IS_ERR(tmp))
46638 + return ERR_CAST(tmp);
46639 + res = kern_path_create(dfd, tmp, path, is_dir);
46640 + if (IS_ERR(res))
46641 + putname(tmp);
46642 + else
46643 + *to = tmp;
46644 + return res;
46645 +}
46646 +
46647 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
46648 {
46649 int error = may_create(dir, dentry);
46650 @@ -2514,6 +2644,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
46651 error = mnt_want_write(path.mnt);
46652 if (error)
46653 goto out_dput;
46654 +
46655 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
46656 + error = -EPERM;
46657 + goto out_drop_write;
46658 + }
46659 +
46660 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
46661 + error = -EACCES;
46662 + goto out_drop_write;
46663 + }
46664 +
46665 error = security_path_mknod(&path, dentry, mode, dev);
46666 if (error)
46667 goto out_drop_write;
46668 @@ -2531,6 +2672,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
46669 }
46670 out_drop_write:
46671 mnt_drop_write(path.mnt);
46672 +
46673 + if (!error)
46674 + gr_handle_create(dentry, path.mnt);
46675 out_dput:
46676 dput(dentry);
46677 mutex_unlock(&path.dentry->d_inode->i_mutex);
46678 @@ -2580,12 +2724,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
46679 error = mnt_want_write(path.mnt);
46680 if (error)
46681 goto out_dput;
46682 +
46683 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
46684 + error = -EACCES;
46685 + goto out_drop_write;
46686 + }
46687 +
46688 error = security_path_mkdir(&path, dentry, mode);
46689 if (error)
46690 goto out_drop_write;
46691 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
46692 out_drop_write:
46693 mnt_drop_write(path.mnt);
46694 +
46695 + if (!error)
46696 + gr_handle_create(dentry, path.mnt);
46697 out_dput:
46698 dput(dentry);
46699 mutex_unlock(&path.dentry->d_inode->i_mutex);
46700 @@ -2665,6 +2818,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46701 char * name;
46702 struct dentry *dentry;
46703 struct nameidata nd;
46704 + ino_t saved_ino = 0;
46705 + dev_t saved_dev = 0;
46706
46707 error = user_path_parent(dfd, pathname, &nd, &name);
46708 if (error)
46709 @@ -2693,6 +2848,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
46710 error = -ENOENT;
46711 goto exit3;
46712 }
46713 +
46714 + saved_ino = dentry->d_inode->i_ino;
46715 + saved_dev = gr_get_dev_from_dentry(dentry);
46716 +
46717 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
46718 + error = -EACCES;
46719 + goto exit3;
46720 + }
46721 +
46722 error = mnt_want_write(nd.path.mnt);
46723 if (error)
46724 goto exit3;
46725 @@ -2700,6 +2864,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46726 if (error)
46727 goto exit4;
46728 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
46729 + if (!error && (saved_dev || saved_ino))
46730 + gr_handle_delete(saved_ino, saved_dev);
46731 exit4:
46732 mnt_drop_write(nd.path.mnt);
46733 exit3:
46734 @@ -2762,6 +2928,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46735 struct dentry *dentry;
46736 struct nameidata nd;
46737 struct inode *inode = NULL;
46738 + ino_t saved_ino = 0;
46739 + dev_t saved_dev = 0;
46740
46741 error = user_path_parent(dfd, pathname, &nd, &name);
46742 if (error)
46743 @@ -2784,6 +2952,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46744 if (!inode)
46745 goto slashes;
46746 ihold(inode);
46747 +
46748 + if (inode->i_nlink <= 1) {
46749 + saved_ino = inode->i_ino;
46750 + saved_dev = gr_get_dev_from_dentry(dentry);
46751 + }
46752 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
46753 + error = -EACCES;
46754 + goto exit2;
46755 + }
46756 +
46757 error = mnt_want_write(nd.path.mnt);
46758 if (error)
46759 goto exit2;
46760 @@ -2791,6 +2969,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46761 if (error)
46762 goto exit3;
46763 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
46764 + if (!error && (saved_ino || saved_dev))
46765 + gr_handle_delete(saved_ino, saved_dev);
46766 exit3:
46767 mnt_drop_write(nd.path.mnt);
46768 exit2:
46769 @@ -2866,10 +3046,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
46770 error = mnt_want_write(path.mnt);
46771 if (error)
46772 goto out_dput;
46773 +
46774 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
46775 + error = -EACCES;
46776 + goto out_drop_write;
46777 + }
46778 +
46779 error = security_path_symlink(&path, dentry, from);
46780 if (error)
46781 goto out_drop_write;
46782 error = vfs_symlink(path.dentry->d_inode, dentry, from);
46783 + if (!error)
46784 + gr_handle_create(dentry, path.mnt);
46785 out_drop_write:
46786 mnt_drop_write(path.mnt);
46787 out_dput:
46788 @@ -2941,6 +3129,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46789 {
46790 struct dentry *new_dentry;
46791 struct path old_path, new_path;
46792 + char *to = NULL;
46793 int how = 0;
46794 int error;
46795
46796 @@ -2964,7 +3153,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46797 if (error)
46798 return error;
46799
46800 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
46801 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
46802 error = PTR_ERR(new_dentry);
46803 if (IS_ERR(new_dentry))
46804 goto out;
46805 @@ -2975,13 +3164,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46806 error = mnt_want_write(new_path.mnt);
46807 if (error)
46808 goto out_dput;
46809 +
46810 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
46811 + old_path.dentry->d_inode,
46812 + old_path.dentry->d_inode->i_mode, to)) {
46813 + error = -EACCES;
46814 + goto out_drop_write;
46815 + }
46816 +
46817 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
46818 + old_path.dentry, old_path.mnt, to)) {
46819 + error = -EACCES;
46820 + goto out_drop_write;
46821 + }
46822 +
46823 error = security_path_link(old_path.dentry, &new_path, new_dentry);
46824 if (error)
46825 goto out_drop_write;
46826 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
46827 + if (!error)
46828 + gr_handle_create(new_dentry, new_path.mnt);
46829 out_drop_write:
46830 mnt_drop_write(new_path.mnt);
46831 out_dput:
46832 + putname(to);
46833 dput(new_dentry);
46834 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
46835 path_put(&new_path);
46836 @@ -3153,6 +3359,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46837 char *to;
46838 int error;
46839
46840 + pax_track_stack();
46841 +
46842 error = user_path_parent(olddfd, oldname, &oldnd, &from);
46843 if (error)
46844 goto exit;
46845 @@ -3209,6 +3417,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46846 if (new_dentry == trap)
46847 goto exit5;
46848
46849 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
46850 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
46851 + to);
46852 + if (error)
46853 + goto exit5;
46854 +
46855 error = mnt_want_write(oldnd.path.mnt);
46856 if (error)
46857 goto exit5;
46858 @@ -3218,6 +3432,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46859 goto exit6;
46860 error = vfs_rename(old_dir->d_inode, old_dentry,
46861 new_dir->d_inode, new_dentry);
46862 + if (!error)
46863 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
46864 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
46865 exit6:
46866 mnt_drop_write(oldnd.path.mnt);
46867 exit5:
46868 @@ -3243,6 +3460,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
46869
46870 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
46871 {
46872 + char tmpbuf[64];
46873 + const char *newlink;
46874 int len;
46875
46876 len = PTR_ERR(link);
46877 @@ -3252,7 +3471,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
46878 len = strlen(link);
46879 if (len > (unsigned) buflen)
46880 len = buflen;
46881 - if (copy_to_user(buffer, link, len))
46882 +
46883 + if (len < sizeof(tmpbuf)) {
46884 + memcpy(tmpbuf, link, len);
46885 + newlink = tmpbuf;
46886 + } else
46887 + newlink = link;
46888 +
46889 + if (copy_to_user(buffer, newlink, len))
46890 len = -EFAULT;
46891 out:
46892 return len;
46893 diff --git a/fs/namespace.c b/fs/namespace.c
46894 index e5e1c7d..019609e 100644
46895 --- a/fs/namespace.c
46896 +++ b/fs/namespace.c
46897 @@ -1329,6 +1329,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
46898 if (!(sb->s_flags & MS_RDONLY))
46899 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
46900 up_write(&sb->s_umount);
46901 +
46902 + gr_log_remount(mnt->mnt_devname, retval);
46903 +
46904 return retval;
46905 }
46906
46907 @@ -1348,6 +1351,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
46908 br_write_unlock(vfsmount_lock);
46909 up_write(&namespace_sem);
46910 release_mounts(&umount_list);
46911 +
46912 + gr_log_unmount(mnt->mnt_devname, retval);
46913 +
46914 return retval;
46915 }
46916
46917 @@ -2339,6 +2345,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46918 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
46919 MS_STRICTATIME);
46920
46921 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
46922 + retval = -EPERM;
46923 + goto dput_out;
46924 + }
46925 +
46926 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
46927 + retval = -EPERM;
46928 + goto dput_out;
46929 + }
46930 +
46931 if (flags & MS_REMOUNT)
46932 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
46933 data_page);
46934 @@ -2353,6 +2369,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
46935 dev_name, data_page);
46936 dput_out:
46937 path_put(&path);
46938 +
46939 + gr_log_mount(dev_name, dir_name, retval);
46940 +
46941 return retval;
46942 }
46943
46944 @@ -2576,6 +2595,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
46945 if (error)
46946 goto out2;
46947
46948 + if (gr_handle_chroot_pivot()) {
46949 + error = -EPERM;
46950 + goto out2;
46951 + }
46952 +
46953 get_fs_root(current->fs, &root);
46954 error = lock_mount(&old);
46955 if (error)
46956 diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
46957 index 9c51f62..503b252 100644
46958 --- a/fs/ncpfs/dir.c
46959 +++ b/fs/ncpfs/dir.c
46960 @@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentry, struct nameidata *nd)
46961 int res, val = 0, len;
46962 __u8 __name[NCP_MAXPATHLEN + 1];
46963
46964 + pax_track_stack();
46965 +
46966 if (dentry == dentry->d_sb->s_root)
46967 return 1;
46968
46969 @@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
46970 int error, res, len;
46971 __u8 __name[NCP_MAXPATHLEN + 1];
46972
46973 + pax_track_stack();
46974 +
46975 error = -EIO;
46976 if (!ncp_conn_valid(server))
46977 goto finished;
46978 @@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
46979 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
46980 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
46981
46982 + pax_track_stack();
46983 +
46984 ncp_age_dentry(server, dentry);
46985 len = sizeof(__name);
46986 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
46987 @@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
46988 int error, len;
46989 __u8 __name[NCP_MAXPATHLEN + 1];
46990
46991 + pax_track_stack();
46992 +
46993 DPRINTK("ncp_mkdir: making %s/%s\n",
46994 dentry->d_parent->d_name.name, dentry->d_name.name);
46995
46996 @@ -1140,6 +1148,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
46997 int old_len, new_len;
46998 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
46999
47000 + pax_track_stack();
47001 +
47002 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
47003 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
47004 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
47005 diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
47006 index 202f370..9d4565e 100644
47007 --- a/fs/ncpfs/inode.c
47008 +++ b/fs/ncpfs/inode.c
47009 @@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
47010 #endif
47011 struct ncp_entry_info finfo;
47012
47013 + pax_track_stack();
47014 +
47015 memset(&data, 0, sizeof(data));
47016 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
47017 if (!server)
47018 diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
47019 index 281ae95..dd895b9 100644
47020 --- a/fs/nfs/blocklayout/blocklayout.c
47021 +++ b/fs/nfs/blocklayout/blocklayout.c
47022 @@ -90,7 +90,7 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
47023 */
47024 struct parallel_io {
47025 struct kref refcnt;
47026 - struct rpc_call_ops call_ops;
47027 + rpc_call_ops_no_const call_ops;
47028 void (*pnfs_callback) (void *data);
47029 void *data;
47030 };
47031 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
47032 index 679d2f5..ef1ffec 100644
47033 --- a/fs/nfs/inode.c
47034 +++ b/fs/nfs/inode.c
47035 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
47036 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
47037 nfsi->attrtimeo_timestamp = jiffies;
47038
47039 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
47040 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
47041 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
47042 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
47043 else
47044 @@ -1002,16 +1002,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
47045 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
47046 }
47047
47048 -static atomic_long_t nfs_attr_generation_counter;
47049 +static atomic_long_unchecked_t nfs_attr_generation_counter;
47050
47051 static unsigned long nfs_read_attr_generation_counter(void)
47052 {
47053 - return atomic_long_read(&nfs_attr_generation_counter);
47054 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
47055 }
47056
47057 unsigned long nfs_inc_attr_generation_counter(void)
47058 {
47059 - return atomic_long_inc_return(&nfs_attr_generation_counter);
47060 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
47061 }
47062
47063 void nfs_fattr_init(struct nfs_fattr *fattr)
47064 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
47065 index 6f8bcc7..8f823c5 100644
47066 --- a/fs/nfsd/nfs4state.c
47067 +++ b/fs/nfsd/nfs4state.c
47068 @@ -3999,6 +3999,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
47069 unsigned int strhashval;
47070 int err;
47071
47072 + pax_track_stack();
47073 +
47074 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
47075 (long long) lock->lk_offset,
47076 (long long) lock->lk_length);
47077 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
47078 index f810996..cec8977 100644
47079 --- a/fs/nfsd/nfs4xdr.c
47080 +++ b/fs/nfsd/nfs4xdr.c
47081 @@ -1875,6 +1875,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
47082 .dentry = dentry,
47083 };
47084
47085 + pax_track_stack();
47086 +
47087 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
47088 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
47089 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
47090 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
47091 index acf88ae..4fd6245 100644
47092 --- a/fs/nfsd/vfs.c
47093 +++ b/fs/nfsd/vfs.c
47094 @@ -896,7 +896,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47095 } else {
47096 oldfs = get_fs();
47097 set_fs(KERNEL_DS);
47098 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
47099 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
47100 set_fs(oldfs);
47101 }
47102
47103 @@ -1000,7 +1000,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47104
47105 /* Write the data. */
47106 oldfs = get_fs(); set_fs(KERNEL_DS);
47107 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
47108 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
47109 set_fs(oldfs);
47110 if (host_err < 0)
47111 goto out_nfserr;
47112 @@ -1535,7 +1535,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
47113 */
47114
47115 oldfs = get_fs(); set_fs(KERNEL_DS);
47116 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
47117 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
47118 set_fs(oldfs);
47119
47120 if (host_err < 0)
47121 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
47122 index 9fde1c0..14e8827 100644
47123 --- a/fs/notify/fanotify/fanotify_user.c
47124 +++ b/fs/notify/fanotify/fanotify_user.c
47125 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
47126 goto out_close_fd;
47127
47128 ret = -EFAULT;
47129 - if (copy_to_user(buf, &fanotify_event_metadata,
47130 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
47131 + copy_to_user(buf, &fanotify_event_metadata,
47132 fanotify_event_metadata.event_len))
47133 goto out_kill_access_response;
47134
47135 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
47136 index ee18815..7aa5d01 100644
47137 --- a/fs/notify/notification.c
47138 +++ b/fs/notify/notification.c
47139 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
47140 * get set to 0 so it will never get 'freed'
47141 */
47142 static struct fsnotify_event *q_overflow_event;
47143 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47144 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47145
47146 /**
47147 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
47148 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47149 */
47150 u32 fsnotify_get_cookie(void)
47151 {
47152 - return atomic_inc_return(&fsnotify_sync_cookie);
47153 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
47154 }
47155 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
47156
47157 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
47158 index 99e3610..02c1068 100644
47159 --- a/fs/ntfs/dir.c
47160 +++ b/fs/ntfs/dir.c
47161 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
47162 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
47163 ~(s64)(ndir->itype.index.block_size - 1)));
47164 /* Bounds checks. */
47165 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47166 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47167 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
47168 "inode 0x%lx or driver bug.", vdir->i_ino);
47169 goto err_out;
47170 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
47171 index c587e2d..3641eaa 100644
47172 --- a/fs/ntfs/file.c
47173 +++ b/fs/ntfs/file.c
47174 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
47175 #endif /* NTFS_RW */
47176 };
47177
47178 -const struct file_operations ntfs_empty_file_ops = {};
47179 +const struct file_operations ntfs_empty_file_ops __read_only;
47180
47181 -const struct inode_operations ntfs_empty_inode_ops = {};
47182 +const struct inode_operations ntfs_empty_inode_ops __read_only;
47183 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
47184 index 210c352..a174f83 100644
47185 --- a/fs/ocfs2/localalloc.c
47186 +++ b/fs/ocfs2/localalloc.c
47187 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
47188 goto bail;
47189 }
47190
47191 - atomic_inc(&osb->alloc_stats.moves);
47192 + atomic_inc_unchecked(&osb->alloc_stats.moves);
47193
47194 bail:
47195 if (handle)
47196 diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
47197 index 53aa41e..d7df9f1 100644
47198 --- a/fs/ocfs2/namei.c
47199 +++ b/fs/ocfs2/namei.c
47200 @@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *old_dir,
47201 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
47202 struct ocfs2_dir_lookup_result target_insert = { NULL, };
47203
47204 + pax_track_stack();
47205 +
47206 /* At some point it might be nice to break this function up a
47207 * bit. */
47208
47209 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
47210 index 4092858..51c70ff 100644
47211 --- a/fs/ocfs2/ocfs2.h
47212 +++ b/fs/ocfs2/ocfs2.h
47213 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
47214
47215 struct ocfs2_alloc_stats
47216 {
47217 - atomic_t moves;
47218 - atomic_t local_data;
47219 - atomic_t bitmap_data;
47220 - atomic_t bg_allocs;
47221 - atomic_t bg_extends;
47222 + atomic_unchecked_t moves;
47223 + atomic_unchecked_t local_data;
47224 + atomic_unchecked_t bitmap_data;
47225 + atomic_unchecked_t bg_allocs;
47226 + atomic_unchecked_t bg_extends;
47227 };
47228
47229 enum ocfs2_local_alloc_state
47230 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
47231 index ba5d97e..c77db25 100644
47232 --- a/fs/ocfs2/suballoc.c
47233 +++ b/fs/ocfs2/suballoc.c
47234 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
47235 mlog_errno(status);
47236 goto bail;
47237 }
47238 - atomic_inc(&osb->alloc_stats.bg_extends);
47239 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
47240
47241 /* You should never ask for this much metadata */
47242 BUG_ON(bits_wanted >
47243 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
47244 mlog_errno(status);
47245 goto bail;
47246 }
47247 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47248 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47249
47250 *suballoc_loc = res.sr_bg_blkno;
47251 *suballoc_bit_start = res.sr_bit_offset;
47252 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
47253 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
47254 res->sr_bits);
47255
47256 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47257 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47258
47259 BUG_ON(res->sr_bits != 1);
47260
47261 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
47262 mlog_errno(status);
47263 goto bail;
47264 }
47265 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47266 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47267
47268 BUG_ON(res.sr_bits != 1);
47269
47270 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47271 cluster_start,
47272 num_clusters);
47273 if (!status)
47274 - atomic_inc(&osb->alloc_stats.local_data);
47275 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
47276 } else {
47277 if (min_clusters > (osb->bitmap_cpg - 1)) {
47278 /* The only paths asking for contiguousness
47279 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47280 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
47281 res.sr_bg_blkno,
47282 res.sr_bit_offset);
47283 - atomic_inc(&osb->alloc_stats.bitmap_data);
47284 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
47285 *num_clusters = res.sr_bits;
47286 }
47287 }
47288 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
47289 index 56f6102..1433c29 100644
47290 --- a/fs/ocfs2/super.c
47291 +++ b/fs/ocfs2/super.c
47292 @@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
47293 "%10s => GlobalAllocs: %d LocalAllocs: %d "
47294 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
47295 "Stats",
47296 - atomic_read(&osb->alloc_stats.bitmap_data),
47297 - atomic_read(&osb->alloc_stats.local_data),
47298 - atomic_read(&osb->alloc_stats.bg_allocs),
47299 - atomic_read(&osb->alloc_stats.moves),
47300 - atomic_read(&osb->alloc_stats.bg_extends));
47301 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
47302 + atomic_read_unchecked(&osb->alloc_stats.local_data),
47303 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
47304 + atomic_read_unchecked(&osb->alloc_stats.moves),
47305 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
47306
47307 out += snprintf(buf + out, len - out,
47308 "%10s => State: %u Descriptor: %llu Size: %u bits "
47309 @@ -2112,11 +2112,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
47310 spin_lock_init(&osb->osb_xattr_lock);
47311 ocfs2_init_steal_slots(osb);
47312
47313 - atomic_set(&osb->alloc_stats.moves, 0);
47314 - atomic_set(&osb->alloc_stats.local_data, 0);
47315 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
47316 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
47317 - atomic_set(&osb->alloc_stats.bg_extends, 0);
47318 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
47319 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
47320 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
47321 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
47322 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
47323
47324 /* Copy the blockcheck stats from the superblock probe */
47325 osb->osb_ecc_stats = *stats;
47326 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
47327 index 5d22872..523db20 100644
47328 --- a/fs/ocfs2/symlink.c
47329 +++ b/fs/ocfs2/symlink.c
47330 @@ -142,7 +142,7 @@ bail:
47331
47332 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47333 {
47334 - char *link = nd_get_link(nd);
47335 + const char *link = nd_get_link(nd);
47336 if (!IS_ERR(link))
47337 kfree(link);
47338 }
47339 diff --git a/fs/open.c b/fs/open.c
47340 index f711921..28d5958 100644
47341 --- a/fs/open.c
47342 +++ b/fs/open.c
47343 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
47344 error = locks_verify_truncate(inode, NULL, length);
47345 if (!error)
47346 error = security_path_truncate(&path);
47347 +
47348 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
47349 + error = -EACCES;
47350 +
47351 if (!error)
47352 error = do_truncate(path.dentry, length, 0, NULL);
47353
47354 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
47355 if (__mnt_is_readonly(path.mnt))
47356 res = -EROFS;
47357
47358 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
47359 + res = -EACCES;
47360 +
47361 out_path_release:
47362 path_put(&path);
47363 out:
47364 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
47365 if (error)
47366 goto dput_and_out;
47367
47368 + gr_log_chdir(path.dentry, path.mnt);
47369 +
47370 set_fs_pwd(current->fs, &path);
47371
47372 dput_and_out:
47373 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
47374 goto out_putf;
47375
47376 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
47377 +
47378 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
47379 + error = -EPERM;
47380 +
47381 + if (!error)
47382 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
47383 +
47384 if (!error)
47385 set_fs_pwd(current->fs, &file->f_path);
47386 out_putf:
47387 @@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
47388 if (error)
47389 goto dput_and_out;
47390
47391 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
47392 + goto dput_and_out;
47393 +
47394 set_fs_root(current->fs, &path);
47395 +
47396 + gr_handle_chroot_chdir(&path);
47397 +
47398 error = 0;
47399 dput_and_out:
47400 path_put(&path);
47401 @@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
47402 if (error)
47403 return error;
47404 mutex_lock(&inode->i_mutex);
47405 +
47406 + if (!gr_acl_handle_fchmod(path->dentry, path->mnt, mode)) {
47407 + error = -EACCES;
47408 + goto out_unlock;
47409 + }
47410 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
47411 + error = -EACCES;
47412 + goto out_unlock;
47413 + }
47414 +
47415 error = security_path_chmod(path->dentry, path->mnt, mode);
47416 if (error)
47417 goto out_unlock;
47418 @@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
47419 int error;
47420 struct iattr newattrs;
47421
47422 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
47423 + return -EACCES;
47424 +
47425 newattrs.ia_valid = ATTR_CTIME;
47426 if (user != (uid_t) -1) {
47427 newattrs.ia_valid |= ATTR_UID;
47428 diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
47429 index af9fdf0..75b15c3 100644
47430 --- a/fs/partitions/ldm.c
47431 +++ b/fs/partitions/ldm.c
47432 @@ -1322,7 +1322,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
47433 goto found;
47434 }
47435
47436 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
47437 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
47438 if (!f) {
47439 ldm_crit ("Out of memory.");
47440 return false;
47441 diff --git a/fs/pipe.c b/fs/pipe.c
47442 index 0e0be1d..f62a72d 100644
47443 --- a/fs/pipe.c
47444 +++ b/fs/pipe.c
47445 @@ -420,9 +420,9 @@ redo:
47446 }
47447 if (bufs) /* More to do? */
47448 continue;
47449 - if (!pipe->writers)
47450 + if (!atomic_read(&pipe->writers))
47451 break;
47452 - if (!pipe->waiting_writers) {
47453 + if (!atomic_read(&pipe->waiting_writers)) {
47454 /* syscall merging: Usually we must not sleep
47455 * if O_NONBLOCK is set, or if we got some data.
47456 * But if a writer sleeps in kernel space, then
47457 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
47458 mutex_lock(&inode->i_mutex);
47459 pipe = inode->i_pipe;
47460
47461 - if (!pipe->readers) {
47462 + if (!atomic_read(&pipe->readers)) {
47463 send_sig(SIGPIPE, current, 0);
47464 ret = -EPIPE;
47465 goto out;
47466 @@ -530,7 +530,7 @@ redo1:
47467 for (;;) {
47468 int bufs;
47469
47470 - if (!pipe->readers) {
47471 + if (!atomic_read(&pipe->readers)) {
47472 send_sig(SIGPIPE, current, 0);
47473 if (!ret)
47474 ret = -EPIPE;
47475 @@ -616,9 +616,9 @@ redo2:
47476 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
47477 do_wakeup = 0;
47478 }
47479 - pipe->waiting_writers++;
47480 + atomic_inc(&pipe->waiting_writers);
47481 pipe_wait(pipe);
47482 - pipe->waiting_writers--;
47483 + atomic_dec(&pipe->waiting_writers);
47484 }
47485 out:
47486 mutex_unlock(&inode->i_mutex);
47487 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47488 mask = 0;
47489 if (filp->f_mode & FMODE_READ) {
47490 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
47491 - if (!pipe->writers && filp->f_version != pipe->w_counter)
47492 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
47493 mask |= POLLHUP;
47494 }
47495
47496 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47497 * Most Unices do not set POLLERR for FIFOs but on Linux they
47498 * behave exactly like pipes for poll().
47499 */
47500 - if (!pipe->readers)
47501 + if (!atomic_read(&pipe->readers))
47502 mask |= POLLERR;
47503 }
47504
47505 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
47506
47507 mutex_lock(&inode->i_mutex);
47508 pipe = inode->i_pipe;
47509 - pipe->readers -= decr;
47510 - pipe->writers -= decw;
47511 + atomic_sub(decr, &pipe->readers);
47512 + atomic_sub(decw, &pipe->writers);
47513
47514 - if (!pipe->readers && !pipe->writers) {
47515 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
47516 free_pipe_info(inode);
47517 } else {
47518 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
47519 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
47520
47521 if (inode->i_pipe) {
47522 ret = 0;
47523 - inode->i_pipe->readers++;
47524 + atomic_inc(&inode->i_pipe->readers);
47525 }
47526
47527 mutex_unlock(&inode->i_mutex);
47528 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
47529
47530 if (inode->i_pipe) {
47531 ret = 0;
47532 - inode->i_pipe->writers++;
47533 + atomic_inc(&inode->i_pipe->writers);
47534 }
47535
47536 mutex_unlock(&inode->i_mutex);
47537 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
47538 if (inode->i_pipe) {
47539 ret = 0;
47540 if (filp->f_mode & FMODE_READ)
47541 - inode->i_pipe->readers++;
47542 + atomic_inc(&inode->i_pipe->readers);
47543 if (filp->f_mode & FMODE_WRITE)
47544 - inode->i_pipe->writers++;
47545 + atomic_inc(&inode->i_pipe->writers);
47546 }
47547
47548 mutex_unlock(&inode->i_mutex);
47549 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
47550 inode->i_pipe = NULL;
47551 }
47552
47553 -static struct vfsmount *pipe_mnt __read_mostly;
47554 +struct vfsmount *pipe_mnt __read_mostly;
47555
47556 /*
47557 * pipefs_dname() is called from d_path().
47558 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
47559 goto fail_iput;
47560 inode->i_pipe = pipe;
47561
47562 - pipe->readers = pipe->writers = 1;
47563 + atomic_set(&pipe->readers, 1);
47564 + atomic_set(&pipe->writers, 1);
47565 inode->i_fop = &rdwr_pipefifo_fops;
47566
47567 /*
47568 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
47569 index 15af622..0e9f4467 100644
47570 --- a/fs/proc/Kconfig
47571 +++ b/fs/proc/Kconfig
47572 @@ -30,12 +30,12 @@ config PROC_FS
47573
47574 config PROC_KCORE
47575 bool "/proc/kcore support" if !ARM
47576 - depends on PROC_FS && MMU
47577 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
47578
47579 config PROC_VMCORE
47580 bool "/proc/vmcore support"
47581 - depends on PROC_FS && CRASH_DUMP
47582 - default y
47583 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
47584 + default n
47585 help
47586 Exports the dump image of crashed kernel in ELF format.
47587
47588 @@ -59,8 +59,8 @@ config PROC_SYSCTL
47589 limited in memory.
47590
47591 config PROC_PAGE_MONITOR
47592 - default y
47593 - depends on PROC_FS && MMU
47594 + default n
47595 + depends on PROC_FS && MMU && !GRKERNSEC
47596 bool "Enable /proc page monitoring" if EXPERT
47597 help
47598 Various /proc files exist to monitor process memory utilization:
47599 diff --git a/fs/proc/array.c b/fs/proc/array.c
47600 index 3a1dafd..c7fed72 100644
47601 --- a/fs/proc/array.c
47602 +++ b/fs/proc/array.c
47603 @@ -60,6 +60,7 @@
47604 #include <linux/tty.h>
47605 #include <linux/string.h>
47606 #include <linux/mman.h>
47607 +#include <linux/grsecurity.h>
47608 #include <linux/proc_fs.h>
47609 #include <linux/ioport.h>
47610 #include <linux/uaccess.h>
47611 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
47612 seq_putc(m, '\n');
47613 }
47614
47615 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47616 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
47617 +{
47618 + if (p->mm)
47619 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
47620 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
47621 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
47622 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
47623 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
47624 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
47625 + else
47626 + seq_printf(m, "PaX:\t-----\n");
47627 +}
47628 +#endif
47629 +
47630 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47631 struct pid *pid, struct task_struct *task)
47632 {
47633 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47634 task_cpus_allowed(m, task);
47635 cpuset_task_status_allowed(m, task);
47636 task_context_switch_counts(m, task);
47637 +
47638 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47639 + task_pax(m, task);
47640 +#endif
47641 +
47642 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
47643 + task_grsec_rbac(m, task);
47644 +#endif
47645 +
47646 return 0;
47647 }
47648
47649 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47650 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47651 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47652 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47653 +#endif
47654 +
47655 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47656 struct pid *pid, struct task_struct *task, int whole)
47657 {
47658 @@ -378,6 +409,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47659 char tcomm[sizeof(task->comm)];
47660 unsigned long flags;
47661
47662 + pax_track_stack();
47663 +
47664 state = *get_task_state(task);
47665 vsize = eip = esp = 0;
47666 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
47667 @@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47668 gtime = task->gtime;
47669 }
47670
47671 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47672 + if (PAX_RAND_FLAGS(mm)) {
47673 + eip = 0;
47674 + esp = 0;
47675 + wchan = 0;
47676 + }
47677 +#endif
47678 +#ifdef CONFIG_GRKERNSEC_HIDESYM
47679 + wchan = 0;
47680 + eip =0;
47681 + esp =0;
47682 +#endif
47683 +
47684 /* scale priority and nice values from timeslices to -20..20 */
47685 /* to make it look like a "normal" Unix priority/nice value */
47686 priority = task_prio(task);
47687 @@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47688 vsize,
47689 mm ? get_mm_rss(mm) : 0,
47690 rsslim,
47691 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47692 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
47693 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
47694 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
47695 +#else
47696 mm ? (permitted ? mm->start_code : 1) : 0,
47697 mm ? (permitted ? mm->end_code : 1) : 0,
47698 (permitted && mm) ? mm->start_stack : 0,
47699 +#endif
47700 esp,
47701 eip,
47702 /* The signal information here is obsolete.
47703 @@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47704
47705 return 0;
47706 }
47707 +
47708 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47709 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
47710 +{
47711 + u32 curr_ip = 0;
47712 + unsigned long flags;
47713 +
47714 + if (lock_task_sighand(task, &flags)) {
47715 + curr_ip = task->signal->curr_ip;
47716 + unlock_task_sighand(task, &flags);
47717 + }
47718 +
47719 + return sprintf(buffer, "%pI4\n", &curr_ip);
47720 +}
47721 +#endif
47722 diff --git a/fs/proc/base.c b/fs/proc/base.c
47723 index 5eb0206..7e0dc06 100644
47724 --- a/fs/proc/base.c
47725 +++ b/fs/proc/base.c
47726 @@ -107,6 +107,22 @@ struct pid_entry {
47727 union proc_op op;
47728 };
47729
47730 +struct getdents_callback {
47731 + struct linux_dirent __user * current_dir;
47732 + struct linux_dirent __user * previous;
47733 + struct file * file;
47734 + int count;
47735 + int error;
47736 +};
47737 +
47738 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
47739 + loff_t offset, u64 ino, unsigned int d_type)
47740 +{
47741 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
47742 + buf->error = -EINVAL;
47743 + return 0;
47744 +}
47745 +
47746 #define NOD(NAME, MODE, IOP, FOP, OP) { \
47747 .name = (NAME), \
47748 .len = sizeof(NAME) - 1, \
47749 @@ -209,6 +225,9 @@ static struct mm_struct *__check_mem_permission(struct task_struct *task)
47750 if (task == current)
47751 return mm;
47752
47753 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
47754 + return ERR_PTR(-EPERM);
47755 +
47756 /*
47757 * If current is actively ptrace'ing, and would also be
47758 * permitted to freshly attach with ptrace now, permit it.
47759 @@ -282,6 +301,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
47760 if (!mm->arg_end)
47761 goto out_mm; /* Shh! No looking before we're done */
47762
47763 + if (gr_acl_handle_procpidmem(task))
47764 + goto out_mm;
47765 +
47766 len = mm->arg_end - mm->arg_start;
47767
47768 if (len > PAGE_SIZE)
47769 @@ -309,12 +331,28 @@ out:
47770 return res;
47771 }
47772
47773 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47774 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47775 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47776 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47777 +#endif
47778 +
47779 static int proc_pid_auxv(struct task_struct *task, char *buffer)
47780 {
47781 struct mm_struct *mm = mm_for_maps(task);
47782 int res = PTR_ERR(mm);
47783 if (mm && !IS_ERR(mm)) {
47784 unsigned int nwords = 0;
47785 +
47786 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47787 + /* allow if we're currently ptracing this task */
47788 + if (PAX_RAND_FLAGS(mm) &&
47789 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
47790 + mmput(mm);
47791 + return 0;
47792 + }
47793 +#endif
47794 +
47795 do {
47796 nwords += 2;
47797 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
47798 @@ -328,7 +366,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
47799 }
47800
47801
47802 -#ifdef CONFIG_KALLSYMS
47803 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47804 /*
47805 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
47806 * Returns the resolved symbol. If that fails, simply return the address.
47807 @@ -367,7 +405,7 @@ static void unlock_trace(struct task_struct *task)
47808 mutex_unlock(&task->signal->cred_guard_mutex);
47809 }
47810
47811 -#ifdef CONFIG_STACKTRACE
47812 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47813
47814 #define MAX_STACK_TRACE_DEPTH 64
47815
47816 @@ -558,7 +596,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
47817 return count;
47818 }
47819
47820 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47821 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47822 static int proc_pid_syscall(struct task_struct *task, char *buffer)
47823 {
47824 long nr;
47825 @@ -587,7 +625,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
47826 /************************************************************************/
47827
47828 /* permission checks */
47829 -static int proc_fd_access_allowed(struct inode *inode)
47830 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
47831 {
47832 struct task_struct *task;
47833 int allowed = 0;
47834 @@ -597,7 +635,10 @@ static int proc_fd_access_allowed(struct inode *inode)
47835 */
47836 task = get_proc_task(inode);
47837 if (task) {
47838 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47839 + if (log)
47840 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
47841 + else
47842 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47843 put_task_struct(task);
47844 }
47845 return allowed;
47846 @@ -978,6 +1019,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
47847 if (!task)
47848 goto out_no_task;
47849
47850 + if (gr_acl_handle_procpidmem(task))
47851 + goto out;
47852 +
47853 ret = -ENOMEM;
47854 page = (char *)__get_free_page(GFP_TEMPORARY);
47855 if (!page)
47856 @@ -1613,7 +1657,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
47857 path_put(&nd->path);
47858
47859 /* Are we allowed to snoop on the tasks file descriptors? */
47860 - if (!proc_fd_access_allowed(inode))
47861 + if (!proc_fd_access_allowed(inode,0))
47862 goto out;
47863
47864 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
47865 @@ -1652,8 +1696,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
47866 struct path path;
47867
47868 /* Are we allowed to snoop on the tasks file descriptors? */
47869 - if (!proc_fd_access_allowed(inode))
47870 - goto out;
47871 + /* logging this is needed for learning on chromium to work properly,
47872 + but we don't want to flood the logs from 'ps' which does a readlink
47873 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
47874 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
47875 + */
47876 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
47877 + if (!proc_fd_access_allowed(inode,0))
47878 + goto out;
47879 + } else {
47880 + if (!proc_fd_access_allowed(inode,1))
47881 + goto out;
47882 + }
47883
47884 error = PROC_I(inode)->op.proc_get_link(inode, &path);
47885 if (error)
47886 @@ -1718,7 +1772,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
47887 rcu_read_lock();
47888 cred = __task_cred(task);
47889 inode->i_uid = cred->euid;
47890 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47891 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47892 +#else
47893 inode->i_gid = cred->egid;
47894 +#endif
47895 rcu_read_unlock();
47896 }
47897 security_task_to_inode(task, inode);
47898 @@ -1736,6 +1794,9 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
47899 struct inode *inode = dentry->d_inode;
47900 struct task_struct *task;
47901 const struct cred *cred;
47902 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47903 + const struct cred *tmpcred = current_cred();
47904 +#endif
47905
47906 generic_fillattr(inode, stat);
47907
47908 @@ -1743,13 +1804,41 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
47909 stat->uid = 0;
47910 stat->gid = 0;
47911 task = pid_task(proc_pid(inode), PIDTYPE_PID);
47912 +
47913 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
47914 + rcu_read_unlock();
47915 + return -ENOENT;
47916 + }
47917 +
47918 if (task) {
47919 + cred = __task_cred(task);
47920 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47921 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
47922 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47923 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
47924 +#endif
47925 + ) {
47926 +#endif
47927 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47928 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47929 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47930 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47931 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47932 +#endif
47933 task_dumpable(task)) {
47934 - cred = __task_cred(task);
47935 stat->uid = cred->euid;
47936 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47937 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
47938 +#else
47939 stat->gid = cred->egid;
47940 +#endif
47941 }
47942 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47943 + } else {
47944 + rcu_read_unlock();
47945 + return -ENOENT;
47946 + }
47947 +#endif
47948 }
47949 rcu_read_unlock();
47950 return 0;
47951 @@ -1786,11 +1875,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
47952
47953 if (task) {
47954 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47955 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47956 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47957 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47958 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47959 +#endif
47960 task_dumpable(task)) {
47961 rcu_read_lock();
47962 cred = __task_cred(task);
47963 inode->i_uid = cred->euid;
47964 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47965 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47966 +#else
47967 inode->i_gid = cred->egid;
47968 +#endif
47969 rcu_read_unlock();
47970 } else {
47971 inode->i_uid = 0;
47972 @@ -1908,7 +2006,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
47973 int fd = proc_fd(inode);
47974
47975 if (task) {
47976 - files = get_files_struct(task);
47977 + if (!gr_acl_handle_procpidmem(task))
47978 + files = get_files_struct(task);
47979 put_task_struct(task);
47980 }
47981 if (files) {
47982 @@ -2176,11 +2275,21 @@ static const struct file_operations proc_fd_operations = {
47983 */
47984 static int proc_fd_permission(struct inode *inode, int mask)
47985 {
47986 + struct task_struct *task;
47987 int rv = generic_permission(inode, mask);
47988 - if (rv == 0)
47989 - return 0;
47990 +
47991 if (task_pid(current) == proc_pid(inode))
47992 rv = 0;
47993 +
47994 + task = get_proc_task(inode);
47995 + if (task == NULL)
47996 + return rv;
47997 +
47998 + if (gr_acl_handle_procpidmem(task))
47999 + rv = -EACCES;
48000 +
48001 + put_task_struct(task);
48002 +
48003 return rv;
48004 }
48005
48006 @@ -2290,6 +2399,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
48007 if (!task)
48008 goto out_no_task;
48009
48010 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48011 + goto out;
48012 +
48013 /*
48014 * Yes, it does not scale. And it should not. Don't add
48015 * new entries into /proc/<tgid>/ without very good reasons.
48016 @@ -2334,6 +2446,9 @@ static int proc_pident_readdir(struct file *filp,
48017 if (!task)
48018 goto out_no_task;
48019
48020 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48021 + goto out;
48022 +
48023 ret = 0;
48024 i = filp->f_pos;
48025 switch (i) {
48026 @@ -2604,7 +2719,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
48027 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
48028 void *cookie)
48029 {
48030 - char *s = nd_get_link(nd);
48031 + const char *s = nd_get_link(nd);
48032 if (!IS_ERR(s))
48033 __putname(s);
48034 }
48035 @@ -2802,7 +2917,7 @@ static const struct pid_entry tgid_base_stuff[] = {
48036 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
48037 #endif
48038 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48039 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48040 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48041 INF("syscall", S_IRUGO, proc_pid_syscall),
48042 #endif
48043 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48044 @@ -2827,10 +2942,10 @@ static const struct pid_entry tgid_base_stuff[] = {
48045 #ifdef CONFIG_SECURITY
48046 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48047 #endif
48048 -#ifdef CONFIG_KALLSYMS
48049 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48050 INF("wchan", S_IRUGO, proc_pid_wchan),
48051 #endif
48052 -#ifdef CONFIG_STACKTRACE
48053 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48054 ONE("stack", S_IRUGO, proc_pid_stack),
48055 #endif
48056 #ifdef CONFIG_SCHEDSTATS
48057 @@ -2864,6 +2979,9 @@ static const struct pid_entry tgid_base_stuff[] = {
48058 #ifdef CONFIG_HARDWALL
48059 INF("hardwall", S_IRUGO, proc_pid_hardwall),
48060 #endif
48061 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48062 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
48063 +#endif
48064 };
48065
48066 static int proc_tgid_base_readdir(struct file * filp,
48067 @@ -2989,7 +3107,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
48068 if (!inode)
48069 goto out;
48070
48071 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48072 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
48073 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48074 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48075 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
48076 +#else
48077 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
48078 +#endif
48079 inode->i_op = &proc_tgid_base_inode_operations;
48080 inode->i_fop = &proc_tgid_base_operations;
48081 inode->i_flags|=S_IMMUTABLE;
48082 @@ -3031,7 +3156,14 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
48083 if (!task)
48084 goto out;
48085
48086 + if (!has_group_leader_pid(task))
48087 + goto out_put_task;
48088 +
48089 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48090 + goto out_put_task;
48091 +
48092 result = proc_pid_instantiate(dir, dentry, task, NULL);
48093 +out_put_task:
48094 put_task_struct(task);
48095 out:
48096 return result;
48097 @@ -3096,6 +3228,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
48098 {
48099 unsigned int nr;
48100 struct task_struct *reaper;
48101 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48102 + const struct cred *tmpcred = current_cred();
48103 + const struct cred *itercred;
48104 +#endif
48105 + filldir_t __filldir = filldir;
48106 struct tgid_iter iter;
48107 struct pid_namespace *ns;
48108
48109 @@ -3119,8 +3256,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
48110 for (iter = next_tgid(ns, iter);
48111 iter.task;
48112 iter.tgid += 1, iter = next_tgid(ns, iter)) {
48113 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48114 + rcu_read_lock();
48115 + itercred = __task_cred(iter.task);
48116 +#endif
48117 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
48118 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48119 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
48120 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48121 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
48122 +#endif
48123 + )
48124 +#endif
48125 + )
48126 + __filldir = &gr_fake_filldir;
48127 + else
48128 + __filldir = filldir;
48129 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48130 + rcu_read_unlock();
48131 +#endif
48132 filp->f_pos = iter.tgid + TGID_OFFSET;
48133 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
48134 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
48135 put_task_struct(iter.task);
48136 goto out;
48137 }
48138 @@ -3148,7 +3304,7 @@ static const struct pid_entry tid_base_stuff[] = {
48139 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
48140 #endif
48141 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48142 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48143 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48144 INF("syscall", S_IRUGO, proc_pid_syscall),
48145 #endif
48146 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48147 @@ -3172,10 +3328,10 @@ static const struct pid_entry tid_base_stuff[] = {
48148 #ifdef CONFIG_SECURITY
48149 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48150 #endif
48151 -#ifdef CONFIG_KALLSYMS
48152 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48153 INF("wchan", S_IRUGO, proc_pid_wchan),
48154 #endif
48155 -#ifdef CONFIG_STACKTRACE
48156 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48157 ONE("stack", S_IRUGO, proc_pid_stack),
48158 #endif
48159 #ifdef CONFIG_SCHEDSTATS
48160 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
48161 index 82676e3..5f8518a 100644
48162 --- a/fs/proc/cmdline.c
48163 +++ b/fs/proc/cmdline.c
48164 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
48165
48166 static int __init proc_cmdline_init(void)
48167 {
48168 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48169 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
48170 +#else
48171 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
48172 +#endif
48173 return 0;
48174 }
48175 module_init(proc_cmdline_init);
48176 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
48177 index b143471..bb105e5 100644
48178 --- a/fs/proc/devices.c
48179 +++ b/fs/proc/devices.c
48180 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
48181
48182 static int __init proc_devices_init(void)
48183 {
48184 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48185 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
48186 +#else
48187 proc_create("devices", 0, NULL, &proc_devinfo_operations);
48188 +#endif
48189 return 0;
48190 }
48191 module_init(proc_devices_init);
48192 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
48193 index 7ed72d6..d5f061a 100644
48194 --- a/fs/proc/inode.c
48195 +++ b/fs/proc/inode.c
48196 @@ -18,12 +18,18 @@
48197 #include <linux/module.h>
48198 #include <linux/sysctl.h>
48199 #include <linux/slab.h>
48200 +#include <linux/grsecurity.h>
48201
48202 #include <asm/system.h>
48203 #include <asm/uaccess.h>
48204
48205 #include "internal.h"
48206
48207 +#ifdef CONFIG_PROC_SYSCTL
48208 +extern const struct inode_operations proc_sys_inode_operations;
48209 +extern const struct inode_operations proc_sys_dir_operations;
48210 +#endif
48211 +
48212 static void proc_evict_inode(struct inode *inode)
48213 {
48214 struct proc_dir_entry *de;
48215 @@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
48216 ns_ops = PROC_I(inode)->ns_ops;
48217 if (ns_ops && ns_ops->put)
48218 ns_ops->put(PROC_I(inode)->ns);
48219 +
48220 +#ifdef CONFIG_PROC_SYSCTL
48221 + if (inode->i_op == &proc_sys_inode_operations ||
48222 + inode->i_op == &proc_sys_dir_operations)
48223 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
48224 +#endif
48225 +
48226 }
48227
48228 static struct kmem_cache * proc_inode_cachep;
48229 @@ -440,7 +453,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
48230 if (de->mode) {
48231 inode->i_mode = de->mode;
48232 inode->i_uid = de->uid;
48233 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48234 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48235 +#else
48236 inode->i_gid = de->gid;
48237 +#endif
48238 }
48239 if (de->size)
48240 inode->i_size = de->size;
48241 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
48242 index 7838e5c..ff92cbc 100644
48243 --- a/fs/proc/internal.h
48244 +++ b/fs/proc/internal.h
48245 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48246 struct pid *pid, struct task_struct *task);
48247 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48248 struct pid *pid, struct task_struct *task);
48249 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48250 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
48251 +#endif
48252 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
48253
48254 extern const struct file_operations proc_maps_operations;
48255 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
48256 index d245cb2..7e645bd 100644
48257 --- a/fs/proc/kcore.c
48258 +++ b/fs/proc/kcore.c
48259 @@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
48260 off_t offset = 0;
48261 struct kcore_list *m;
48262
48263 + pax_track_stack();
48264 +
48265 /* setup ELF header */
48266 elf = (struct elfhdr *) bufp;
48267 bufp += sizeof(struct elfhdr);
48268 @@ -478,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48269 * the addresses in the elf_phdr on our list.
48270 */
48271 start = kc_offset_to_vaddr(*fpos - elf_buflen);
48272 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
48273 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
48274 + if (tsz > buflen)
48275 tsz = buflen;
48276 -
48277 +
48278 while (buflen) {
48279 struct kcore_list *m;
48280
48281 @@ -509,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48282 kfree(elf_buf);
48283 } else {
48284 if (kern_addr_valid(start)) {
48285 - unsigned long n;
48286 -
48287 - n = copy_to_user(buffer, (char *)start, tsz);
48288 - /*
48289 - * We cannot distingush between fault on source
48290 - * and fault on destination. When this happens
48291 - * we clear too and hope it will trigger the
48292 - * EFAULT again.
48293 - */
48294 - if (n) {
48295 - if (clear_user(buffer + tsz - n,
48296 - n))
48297 + char *elf_buf;
48298 + mm_segment_t oldfs;
48299 +
48300 + elf_buf = kmalloc(tsz, GFP_KERNEL);
48301 + if (!elf_buf)
48302 + return -ENOMEM;
48303 + oldfs = get_fs();
48304 + set_fs(KERNEL_DS);
48305 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
48306 + set_fs(oldfs);
48307 + if (copy_to_user(buffer, elf_buf, tsz)) {
48308 + kfree(elf_buf);
48309 return -EFAULT;
48310 + }
48311 }
48312 + set_fs(oldfs);
48313 + kfree(elf_buf);
48314 } else {
48315 if (clear_user(buffer, tsz))
48316 return -EFAULT;
48317 @@ -542,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48318
48319 static int open_kcore(struct inode *inode, struct file *filp)
48320 {
48321 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
48322 + return -EPERM;
48323 +#endif
48324 if (!capable(CAP_SYS_RAWIO))
48325 return -EPERM;
48326 if (kcore_need_update)
48327 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
48328 index 5861741..32c53bc 100644
48329 --- a/fs/proc/meminfo.c
48330 +++ b/fs/proc/meminfo.c
48331 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
48332 unsigned long pages[NR_LRU_LISTS];
48333 int lru;
48334
48335 + pax_track_stack();
48336 +
48337 /*
48338 * display in kilobytes.
48339 */
48340 @@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
48341 vmi.used >> 10,
48342 vmi.largest_chunk >> 10
48343 #ifdef CONFIG_MEMORY_FAILURE
48344 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
48345 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
48346 #endif
48347 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
48348 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
48349 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
48350 index b1822dd..df622cb 100644
48351 --- a/fs/proc/nommu.c
48352 +++ b/fs/proc/nommu.c
48353 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
48354 if (len < 1)
48355 len = 1;
48356 seq_printf(m, "%*c", len, ' ');
48357 - seq_path(m, &file->f_path, "");
48358 + seq_path(m, &file->f_path, "\n\\");
48359 }
48360
48361 seq_putc(m, '\n');
48362 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
48363 index f738024..876984a 100644
48364 --- a/fs/proc/proc_net.c
48365 +++ b/fs/proc/proc_net.c
48366 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
48367 struct task_struct *task;
48368 struct nsproxy *ns;
48369 struct net *net = NULL;
48370 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48371 + const struct cred *cred = current_cred();
48372 +#endif
48373 +
48374 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48375 + if (cred->fsuid)
48376 + return net;
48377 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48378 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
48379 + return net;
48380 +#endif
48381
48382 rcu_read_lock();
48383 task = pid_task(proc_pid(dir), PIDTYPE_PID);
48384 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
48385 index 1a77dbe..56ec911 100644
48386 --- a/fs/proc/proc_sysctl.c
48387 +++ b/fs/proc/proc_sysctl.c
48388 @@ -8,11 +8,13 @@
48389 #include <linux/namei.h>
48390 #include "internal.h"
48391
48392 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
48393 +
48394 static const struct dentry_operations proc_sys_dentry_operations;
48395 static const struct file_operations proc_sys_file_operations;
48396 -static const struct inode_operations proc_sys_inode_operations;
48397 +const struct inode_operations proc_sys_inode_operations;
48398 static const struct file_operations proc_sys_dir_file_operations;
48399 -static const struct inode_operations proc_sys_dir_operations;
48400 +const struct inode_operations proc_sys_dir_operations;
48401
48402 static struct inode *proc_sys_make_inode(struct super_block *sb,
48403 struct ctl_table_header *head, struct ctl_table *table)
48404 @@ -121,8 +123,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
48405
48406 err = NULL;
48407 d_set_d_op(dentry, &proc_sys_dentry_operations);
48408 +
48409 + gr_handle_proc_create(dentry, inode);
48410 +
48411 d_add(dentry, inode);
48412
48413 + if (gr_handle_sysctl(p, MAY_EXEC))
48414 + err = ERR_PTR(-ENOENT);
48415 +
48416 out:
48417 sysctl_head_finish(head);
48418 return err;
48419 @@ -202,6 +210,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
48420 return -ENOMEM;
48421 } else {
48422 d_set_d_op(child, &proc_sys_dentry_operations);
48423 +
48424 + gr_handle_proc_create(child, inode);
48425 +
48426 d_add(child, inode);
48427 }
48428 } else {
48429 @@ -230,6 +241,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
48430 if (*pos < file->f_pos)
48431 continue;
48432
48433 + if (gr_handle_sysctl(table, 0))
48434 + continue;
48435 +
48436 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
48437 if (res)
48438 return res;
48439 @@ -355,6 +369,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
48440 if (IS_ERR(head))
48441 return PTR_ERR(head);
48442
48443 + if (table && gr_handle_sysctl(table, MAY_EXEC))
48444 + return -ENOENT;
48445 +
48446 generic_fillattr(inode, stat);
48447 if (table)
48448 stat->mode = (stat->mode & S_IFMT) | table->mode;
48449 @@ -370,17 +387,18 @@ static const struct file_operations proc_sys_file_operations = {
48450 };
48451
48452 static const struct file_operations proc_sys_dir_file_operations = {
48453 + .read = generic_read_dir,
48454 .readdir = proc_sys_readdir,
48455 .llseek = generic_file_llseek,
48456 };
48457
48458 -static const struct inode_operations proc_sys_inode_operations = {
48459 +const struct inode_operations proc_sys_inode_operations = {
48460 .permission = proc_sys_permission,
48461 .setattr = proc_sys_setattr,
48462 .getattr = proc_sys_getattr,
48463 };
48464
48465 -static const struct inode_operations proc_sys_dir_operations = {
48466 +const struct inode_operations proc_sys_dir_operations = {
48467 .lookup = proc_sys_lookup,
48468 .permission = proc_sys_permission,
48469 .setattr = proc_sys_setattr,
48470 diff --git a/fs/proc/root.c b/fs/proc/root.c
48471 index 9a8a2b7..3018df6 100644
48472 --- a/fs/proc/root.c
48473 +++ b/fs/proc/root.c
48474 @@ -123,7 +123,15 @@ void __init proc_root_init(void)
48475 #ifdef CONFIG_PROC_DEVICETREE
48476 proc_device_tree_init();
48477 #endif
48478 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48479 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48480 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
48481 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48482 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
48483 +#endif
48484 +#else
48485 proc_mkdir("bus", NULL);
48486 +#endif
48487 proc_sys_init();
48488 }
48489
48490 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
48491 index c7d4ee6..41c5564 100644
48492 --- a/fs/proc/task_mmu.c
48493 +++ b/fs/proc/task_mmu.c
48494 @@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48495 "VmExe:\t%8lu kB\n"
48496 "VmLib:\t%8lu kB\n"
48497 "VmPTE:\t%8lu kB\n"
48498 - "VmSwap:\t%8lu kB\n",
48499 - hiwater_vm << (PAGE_SHIFT-10),
48500 + "VmSwap:\t%8lu kB\n"
48501 +
48502 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48503 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
48504 +#endif
48505 +
48506 + ,hiwater_vm << (PAGE_SHIFT-10),
48507 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
48508 mm->locked_vm << (PAGE_SHIFT-10),
48509 hiwater_rss << (PAGE_SHIFT-10),
48510 @@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48511 data << (PAGE_SHIFT-10),
48512 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
48513 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
48514 - swap << (PAGE_SHIFT-10));
48515 + swap << (PAGE_SHIFT-10)
48516 +
48517 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48518 + , mm->context.user_cs_base, mm->context.user_cs_limit
48519 +#endif
48520 +
48521 + );
48522 }
48523
48524 unsigned long task_vsize(struct mm_struct *mm)
48525 @@ -207,6 +218,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
48526 return ret;
48527 }
48528
48529 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48530 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48531 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
48532 + _mm->pax_flags & MF_PAX_SEGMEXEC))
48533 +#endif
48534 +
48535 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48536 {
48537 struct mm_struct *mm = vma->vm_mm;
48538 @@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48539 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
48540 }
48541
48542 - /* We don't show the stack guard page in /proc/maps */
48543 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48544 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
48545 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
48546 +#else
48547 start = vma->vm_start;
48548 - if (stack_guard_page_start(vma, start))
48549 - start += PAGE_SIZE;
48550 end = vma->vm_end;
48551 - if (stack_guard_page_end(vma, end))
48552 - end -= PAGE_SIZE;
48553 +#endif
48554
48555 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
48556 start,
48557 @@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48558 flags & VM_WRITE ? 'w' : '-',
48559 flags & VM_EXEC ? 'x' : '-',
48560 flags & VM_MAYSHARE ? 's' : 'p',
48561 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48562 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
48563 +#else
48564 pgoff,
48565 +#endif
48566 MAJOR(dev), MINOR(dev), ino, &len);
48567
48568 /*
48569 @@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48570 */
48571 if (file) {
48572 pad_len_spaces(m, len);
48573 - seq_path(m, &file->f_path, "\n");
48574 + seq_path(m, &file->f_path, "\n\\");
48575 } else {
48576 const char *name = arch_vma_name(vma);
48577 if (!name) {
48578 @@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48579 if (vma->vm_start <= mm->brk &&
48580 vma->vm_end >= mm->start_brk) {
48581 name = "[heap]";
48582 - } else if (vma->vm_start <= mm->start_stack &&
48583 - vma->vm_end >= mm->start_stack) {
48584 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
48585 + (vma->vm_start <= mm->start_stack &&
48586 + vma->vm_end >= mm->start_stack)) {
48587 name = "[stack]";
48588 }
48589 } else {
48590 @@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m, void *v)
48591 };
48592
48593 memset(&mss, 0, sizeof mss);
48594 - mss.vma = vma;
48595 - /* mmap_sem is held in m_start */
48596 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48597 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48598 -
48599 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48600 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
48601 +#endif
48602 + mss.vma = vma;
48603 + /* mmap_sem is held in m_start */
48604 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48605 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48606 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48607 + }
48608 +#endif
48609 show_map_vma(m, vma);
48610
48611 seq_printf(m,
48612 @@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m, void *v)
48613 "KernelPageSize: %8lu kB\n"
48614 "MMUPageSize: %8lu kB\n"
48615 "Locked: %8lu kB\n",
48616 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48617 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
48618 +#else
48619 (vma->vm_end - vma->vm_start) >> 10,
48620 +#endif
48621 mss.resident >> 10,
48622 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
48623 mss.shared_clean >> 10,
48624 @@ -1031,7 +1062,7 @@ static int show_numa_map(struct seq_file *m, void *v)
48625
48626 if (file) {
48627 seq_printf(m, " file=");
48628 - seq_path(m, &file->f_path, "\n\t= ");
48629 + seq_path(m, &file->f_path, "\n\t\\= ");
48630 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
48631 seq_printf(m, " heap");
48632 } else if (vma->vm_start <= mm->start_stack &&
48633 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
48634 index 980de54..2a4db5f 100644
48635 --- a/fs/proc/task_nommu.c
48636 +++ b/fs/proc/task_nommu.c
48637 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48638 else
48639 bytes += kobjsize(mm);
48640
48641 - if (current->fs && current->fs->users > 1)
48642 + if (current->fs && atomic_read(&current->fs->users) > 1)
48643 sbytes += kobjsize(current->fs);
48644 else
48645 bytes += kobjsize(current->fs);
48646 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
48647
48648 if (file) {
48649 pad_len_spaces(m, len);
48650 - seq_path(m, &file->f_path, "");
48651 + seq_path(m, &file->f_path, "\n\\");
48652 } else if (mm) {
48653 if (vma->vm_start <= mm->start_stack &&
48654 vma->vm_end >= mm->start_stack) {
48655 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
48656 index d67908b..d13f6a6 100644
48657 --- a/fs/quota/netlink.c
48658 +++ b/fs/quota/netlink.c
48659 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
48660 void quota_send_warning(short type, unsigned int id, dev_t dev,
48661 const char warntype)
48662 {
48663 - static atomic_t seq;
48664 + static atomic_unchecked_t seq;
48665 struct sk_buff *skb;
48666 void *msg_head;
48667 int ret;
48668 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
48669 "VFS: Not enough memory to send quota warning.\n");
48670 return;
48671 }
48672 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
48673 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
48674 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
48675 if (!msg_head) {
48676 printk(KERN_ERR
48677 diff --git a/fs/readdir.c b/fs/readdir.c
48678 index 356f715..c918d38 100644
48679 --- a/fs/readdir.c
48680 +++ b/fs/readdir.c
48681 @@ -17,6 +17,7 @@
48682 #include <linux/security.h>
48683 #include <linux/syscalls.h>
48684 #include <linux/unistd.h>
48685 +#include <linux/namei.h>
48686
48687 #include <asm/uaccess.h>
48688
48689 @@ -67,6 +68,7 @@ struct old_linux_dirent {
48690
48691 struct readdir_callback {
48692 struct old_linux_dirent __user * dirent;
48693 + struct file * file;
48694 int result;
48695 };
48696
48697 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
48698 buf->result = -EOVERFLOW;
48699 return -EOVERFLOW;
48700 }
48701 +
48702 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48703 + return 0;
48704 +
48705 buf->result++;
48706 dirent = buf->dirent;
48707 if (!access_ok(VERIFY_WRITE, dirent,
48708 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
48709
48710 buf.result = 0;
48711 buf.dirent = dirent;
48712 + buf.file = file;
48713
48714 error = vfs_readdir(file, fillonedir, &buf);
48715 if (buf.result)
48716 @@ -142,6 +149,7 @@ struct linux_dirent {
48717 struct getdents_callback {
48718 struct linux_dirent __user * current_dir;
48719 struct linux_dirent __user * previous;
48720 + struct file * file;
48721 int count;
48722 int error;
48723 };
48724 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
48725 buf->error = -EOVERFLOW;
48726 return -EOVERFLOW;
48727 }
48728 +
48729 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48730 + return 0;
48731 +
48732 dirent = buf->previous;
48733 if (dirent) {
48734 if (__put_user(offset, &dirent->d_off))
48735 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
48736 buf.previous = NULL;
48737 buf.count = count;
48738 buf.error = 0;
48739 + buf.file = file;
48740
48741 error = vfs_readdir(file, filldir, &buf);
48742 if (error >= 0)
48743 @@ -229,6 +242,7 @@ out:
48744 struct getdents_callback64 {
48745 struct linux_dirent64 __user * current_dir;
48746 struct linux_dirent64 __user * previous;
48747 + struct file *file;
48748 int count;
48749 int error;
48750 };
48751 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
48752 buf->error = -EINVAL; /* only used if we fail.. */
48753 if (reclen > buf->count)
48754 return -EINVAL;
48755 +
48756 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48757 + return 0;
48758 +
48759 dirent = buf->previous;
48760 if (dirent) {
48761 if (__put_user(offset, &dirent->d_off))
48762 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48763
48764 buf.current_dir = dirent;
48765 buf.previous = NULL;
48766 + buf.file = file;
48767 buf.count = count;
48768 buf.error = 0;
48769
48770 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48771 error = buf.error;
48772 lastdirent = buf.previous;
48773 if (lastdirent) {
48774 - typeof(lastdirent->d_off) d_off = file->f_pos;
48775 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48776 if (__put_user(d_off, &lastdirent->d_off))
48777 error = -EFAULT;
48778 else
48779 diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
48780 index 133e935..349ef18 100644
48781 --- a/fs/reiserfs/dir.c
48782 +++ b/fs/reiserfs/dir.c
48783 @@ -75,6 +75,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
48784 struct reiserfs_dir_entry de;
48785 int ret = 0;
48786
48787 + pax_track_stack();
48788 +
48789 reiserfs_write_lock(inode->i_sb);
48790
48791 reiserfs_check_lock_depth(inode->i_sb, "readdir");
48792 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
48793 index 60c0804..d814f98 100644
48794 --- a/fs/reiserfs/do_balan.c
48795 +++ b/fs/reiserfs/do_balan.c
48796 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
48797 return;
48798 }
48799
48800 - atomic_inc(&(fs_generation(tb->tb_sb)));
48801 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
48802 do_balance_starts(tb);
48803
48804 /* balance leaf returns 0 except if combining L R and S into
48805 diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
48806 index a159ba5..0396a76 100644
48807 --- a/fs/reiserfs/journal.c
48808 +++ b/fs/reiserfs/journal.c
48809 @@ -2289,6 +2289,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
48810 struct buffer_head *bh;
48811 int i, j;
48812
48813 + pax_track_stack();
48814 +
48815 bh = __getblk(dev, block, bufsize);
48816 if (buffer_uptodate(bh))
48817 return (bh);
48818 diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
48819 index ef39232..0fa91ba 100644
48820 --- a/fs/reiserfs/namei.c
48821 +++ b/fs/reiserfs/namei.c
48822 @@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
48823 unsigned long savelink = 1;
48824 struct timespec ctime;
48825
48826 + pax_track_stack();
48827 +
48828 /* three balancings: (1) old name removal, (2) new name insertion
48829 and (3) maybe "save" link insertion
48830 stat data updates: (1) old directory,
48831 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
48832 index 7a99811..2c9286f 100644
48833 --- a/fs/reiserfs/procfs.c
48834 +++ b/fs/reiserfs/procfs.c
48835 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
48836 "SMALL_TAILS " : "NO_TAILS ",
48837 replay_only(sb) ? "REPLAY_ONLY " : "",
48838 convert_reiserfs(sb) ? "CONV " : "",
48839 - atomic_read(&r->s_generation_counter),
48840 + atomic_read_unchecked(&r->s_generation_counter),
48841 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
48842 SF(s_do_balance), SF(s_unneeded_left_neighbor),
48843 SF(s_good_search_by_key_reada), SF(s_bmaps),
48844 @@ -299,6 +299,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
48845 struct journal_params *jp = &rs->s_v1.s_journal;
48846 char b[BDEVNAME_SIZE];
48847
48848 + pax_track_stack();
48849 +
48850 seq_printf(m, /* on-disk fields */
48851 "jp_journal_1st_block: \t%i\n"
48852 "jp_journal_dev: \t%s[%x]\n"
48853 diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
48854 index 313d39d..3a5811b 100644
48855 --- a/fs/reiserfs/stree.c
48856 +++ b/fs/reiserfs/stree.c
48857 @@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
48858 int iter = 0;
48859 #endif
48860
48861 + pax_track_stack();
48862 +
48863 BUG_ON(!th->t_trans_id);
48864
48865 init_tb_struct(th, &s_del_balance, sb, path,
48866 @@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
48867 int retval;
48868 int quota_cut_bytes = 0;
48869
48870 + pax_track_stack();
48871 +
48872 BUG_ON(!th->t_trans_id);
48873
48874 le_key2cpu_key(&cpu_key, key);
48875 @@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
48876 int quota_cut_bytes;
48877 loff_t tail_pos = 0;
48878
48879 + pax_track_stack();
48880 +
48881 BUG_ON(!th->t_trans_id);
48882
48883 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
48884 @@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
48885 int retval;
48886 int fs_gen;
48887
48888 + pax_track_stack();
48889 +
48890 BUG_ON(!th->t_trans_id);
48891
48892 fs_gen = get_generation(inode->i_sb);
48893 @@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
48894 int fs_gen = 0;
48895 int quota_bytes = 0;
48896
48897 + pax_track_stack();
48898 +
48899 BUG_ON(!th->t_trans_id);
48900
48901 if (inode) { /* Do we count quotas for item? */
48902 diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
48903 index 14363b9..dd95a04 100644
48904 --- a/fs/reiserfs/super.c
48905 +++ b/fs/reiserfs/super.c
48906 @@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
48907 {.option_name = NULL}
48908 };
48909
48910 + pax_track_stack();
48911 +
48912 *blocks = 0;
48913 if (!options || !*options)
48914 /* use default configuration: create tails, journaling on, no
48915 diff --git a/fs/select.c b/fs/select.c
48916 index d33418f..f8e06bc 100644
48917 --- a/fs/select.c
48918 +++ b/fs/select.c
48919 @@ -20,6 +20,7 @@
48920 #include <linux/module.h>
48921 #include <linux/slab.h>
48922 #include <linux/poll.h>
48923 +#include <linux/security.h>
48924 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
48925 #include <linux/file.h>
48926 #include <linux/fdtable.h>
48927 @@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
48928 int retval, i, timed_out = 0;
48929 unsigned long slack = 0;
48930
48931 + pax_track_stack();
48932 +
48933 rcu_read_lock();
48934 retval = max_select_fd(n, fds);
48935 rcu_read_unlock();
48936 @@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
48937 /* Allocate small arguments on the stack to save memory and be faster */
48938 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
48939
48940 + pax_track_stack();
48941 +
48942 ret = -EINVAL;
48943 if (n < 0)
48944 goto out_nofds;
48945 @@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
48946 struct poll_list *walk = head;
48947 unsigned long todo = nfds;
48948
48949 + pax_track_stack();
48950 +
48951 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
48952 if (nfds > rlimit(RLIMIT_NOFILE))
48953 return -EINVAL;
48954
48955 diff --git a/fs/seq_file.c b/fs/seq_file.c
48956 index 05d6b0e..ee96362 100644
48957 --- a/fs/seq_file.c
48958 +++ b/fs/seq_file.c
48959 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m, loff_t offset)
48960 return 0;
48961 }
48962 if (!m->buf) {
48963 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
48964 + m->size = PAGE_SIZE;
48965 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
48966 if (!m->buf)
48967 return -ENOMEM;
48968 }
48969 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m, loff_t offset)
48970 Eoverflow:
48971 m->op->stop(m, p);
48972 kfree(m->buf);
48973 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
48974 + m->size <<= 1;
48975 + m->buf = kmalloc(m->size, GFP_KERNEL);
48976 return !m->buf ? -ENOMEM : -EAGAIN;
48977 }
48978
48979 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
48980 m->version = file->f_version;
48981 /* grab buffer if we didn't have one */
48982 if (!m->buf) {
48983 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
48984 + m->size = PAGE_SIZE;
48985 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
48986 if (!m->buf)
48987 goto Enomem;
48988 }
48989 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
48990 goto Fill;
48991 m->op->stop(m, p);
48992 kfree(m->buf);
48993 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
48994 + m->size <<= 1;
48995 + m->buf = kmalloc(m->size, GFP_KERNEL);
48996 if (!m->buf)
48997 goto Enomem;
48998 m->count = 0;
48999 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file *p, void *v)
49000 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
49001 void *data)
49002 {
49003 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
49004 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
49005 int res = -ENOMEM;
49006
49007 if (op) {
49008 diff --git a/fs/splice.c b/fs/splice.c
49009 index fa2defa..9a697a5 100644
49010 --- a/fs/splice.c
49011 +++ b/fs/splice.c
49012 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49013 pipe_lock(pipe);
49014
49015 for (;;) {
49016 - if (!pipe->readers) {
49017 + if (!atomic_read(&pipe->readers)) {
49018 send_sig(SIGPIPE, current, 0);
49019 if (!ret)
49020 ret = -EPIPE;
49021 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49022 do_wakeup = 0;
49023 }
49024
49025 - pipe->waiting_writers++;
49026 + atomic_inc(&pipe->waiting_writers);
49027 pipe_wait(pipe);
49028 - pipe->waiting_writers--;
49029 + atomic_dec(&pipe->waiting_writers);
49030 }
49031
49032 pipe_unlock(pipe);
49033 @@ -320,6 +320,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
49034 .spd_release = spd_release_page,
49035 };
49036
49037 + pax_track_stack();
49038 +
49039 if (splice_grow_spd(pipe, &spd))
49040 return -ENOMEM;
49041
49042 @@ -560,7 +562,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
49043 old_fs = get_fs();
49044 set_fs(get_ds());
49045 /* The cast to a user pointer is valid due to the set_fs() */
49046 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
49047 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
49048 set_fs(old_fs);
49049
49050 return res;
49051 @@ -575,7 +577,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
49052 old_fs = get_fs();
49053 set_fs(get_ds());
49054 /* The cast to a user pointer is valid due to the set_fs() */
49055 - res = vfs_write(file, (const char __user *)buf, count, &pos);
49056 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
49057 set_fs(old_fs);
49058
49059 return res;
49060 @@ -603,6 +605,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
49061 .spd_release = spd_release_page,
49062 };
49063
49064 + pax_track_stack();
49065 +
49066 if (splice_grow_spd(pipe, &spd))
49067 return -ENOMEM;
49068
49069 @@ -626,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
49070 goto err;
49071
49072 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
49073 - vec[i].iov_base = (void __user *) page_address(page);
49074 + vec[i].iov_base = (void __force_user *) page_address(page);
49075 vec[i].iov_len = this_len;
49076 spd.pages[i] = page;
49077 spd.nr_pages++;
49078 @@ -846,10 +850,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
49079 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
49080 {
49081 while (!pipe->nrbufs) {
49082 - if (!pipe->writers)
49083 + if (!atomic_read(&pipe->writers))
49084 return 0;
49085
49086 - if (!pipe->waiting_writers && sd->num_spliced)
49087 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
49088 return 0;
49089
49090 if (sd->flags & SPLICE_F_NONBLOCK)
49091 @@ -1182,7 +1186,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
49092 * out of the pipe right after the splice_to_pipe(). So set
49093 * PIPE_READERS appropriately.
49094 */
49095 - pipe->readers = 1;
49096 + atomic_set(&pipe->readers, 1);
49097
49098 current->splice_pipe = pipe;
49099 }
49100 @@ -1619,6 +1623,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
49101 };
49102 long ret;
49103
49104 + pax_track_stack();
49105 +
49106 pipe = get_pipe_info(file);
49107 if (!pipe)
49108 return -EBADF;
49109 @@ -1734,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49110 ret = -ERESTARTSYS;
49111 break;
49112 }
49113 - if (!pipe->writers)
49114 + if (!atomic_read(&pipe->writers))
49115 break;
49116 - if (!pipe->waiting_writers) {
49117 + if (!atomic_read(&pipe->waiting_writers)) {
49118 if (flags & SPLICE_F_NONBLOCK) {
49119 ret = -EAGAIN;
49120 break;
49121 @@ -1768,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49122 pipe_lock(pipe);
49123
49124 while (pipe->nrbufs >= pipe->buffers) {
49125 - if (!pipe->readers) {
49126 + if (!atomic_read(&pipe->readers)) {
49127 send_sig(SIGPIPE, current, 0);
49128 ret = -EPIPE;
49129 break;
49130 @@ -1781,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49131 ret = -ERESTARTSYS;
49132 break;
49133 }
49134 - pipe->waiting_writers++;
49135 + atomic_inc(&pipe->waiting_writers);
49136 pipe_wait(pipe);
49137 - pipe->waiting_writers--;
49138 + atomic_dec(&pipe->waiting_writers);
49139 }
49140
49141 pipe_unlock(pipe);
49142 @@ -1819,14 +1825,14 @@ retry:
49143 pipe_double_lock(ipipe, opipe);
49144
49145 do {
49146 - if (!opipe->readers) {
49147 + if (!atomic_read(&opipe->readers)) {
49148 send_sig(SIGPIPE, current, 0);
49149 if (!ret)
49150 ret = -EPIPE;
49151 break;
49152 }
49153
49154 - if (!ipipe->nrbufs && !ipipe->writers)
49155 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
49156 break;
49157
49158 /*
49159 @@ -1923,7 +1929,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49160 pipe_double_lock(ipipe, opipe);
49161
49162 do {
49163 - if (!opipe->readers) {
49164 + if (!atomic_read(&opipe->readers)) {
49165 send_sig(SIGPIPE, current, 0);
49166 if (!ret)
49167 ret = -EPIPE;
49168 @@ -1968,7 +1974,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49169 * return EAGAIN if we have the potential of some data in the
49170 * future, otherwise just return 0
49171 */
49172 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
49173 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
49174 ret = -EAGAIN;
49175
49176 pipe_unlock(ipipe);
49177 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
49178 index 1ad8c93..6633545 100644
49179 --- a/fs/sysfs/file.c
49180 +++ b/fs/sysfs/file.c
49181 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
49182
49183 struct sysfs_open_dirent {
49184 atomic_t refcnt;
49185 - atomic_t event;
49186 + atomic_unchecked_t event;
49187 wait_queue_head_t poll;
49188 struct list_head buffers; /* goes through sysfs_buffer.list */
49189 };
49190 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
49191 if (!sysfs_get_active(attr_sd))
49192 return -ENODEV;
49193
49194 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
49195 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
49196 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
49197
49198 sysfs_put_active(attr_sd);
49199 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
49200 return -ENOMEM;
49201
49202 atomic_set(&new_od->refcnt, 0);
49203 - atomic_set(&new_od->event, 1);
49204 + atomic_set_unchecked(&new_od->event, 1);
49205 init_waitqueue_head(&new_od->poll);
49206 INIT_LIST_HEAD(&new_od->buffers);
49207 goto retry;
49208 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
49209
49210 sysfs_put_active(attr_sd);
49211
49212 - if (buffer->event != atomic_read(&od->event))
49213 + if (buffer->event != atomic_read_unchecked(&od->event))
49214 goto trigger;
49215
49216 return DEFAULT_POLLMASK;
49217 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
49218
49219 od = sd->s_attr.open;
49220 if (od) {
49221 - atomic_inc(&od->event);
49222 + atomic_inc_unchecked(&od->event);
49223 wake_up_interruptible(&od->poll);
49224 }
49225
49226 diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
49227 index e34f0d9..740ea7b 100644
49228 --- a/fs/sysfs/mount.c
49229 +++ b/fs/sysfs/mount.c
49230 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
49231 .s_name = "",
49232 .s_count = ATOMIC_INIT(1),
49233 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
49234 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
49235 + .s_mode = S_IFDIR | S_IRWXU,
49236 +#else
49237 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
49238 +#endif
49239 .s_ino = 1,
49240 };
49241
49242 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
49243 index a7ac78f..02158e1 100644
49244 --- a/fs/sysfs/symlink.c
49245 +++ b/fs/sysfs/symlink.c
49246 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
49247
49248 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
49249 {
49250 - char *page = nd_get_link(nd);
49251 + const char *page = nd_get_link(nd);
49252 if (!IS_ERR(page))
49253 free_page((unsigned long)page);
49254 }
49255 diff --git a/fs/udf/inode.c b/fs/udf/inode.c
49256 index 1d1358e..408bedb 100644
49257 --- a/fs/udf/inode.c
49258 +++ b/fs/udf/inode.c
49259 @@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
49260 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
49261 int lastblock = 0;
49262
49263 + pax_track_stack();
49264 +
49265 prev_epos.offset = udf_file_entry_alloc_offset(inode);
49266 prev_epos.block = iinfo->i_location;
49267 prev_epos.bh = NULL;
49268 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
49269 index 9215700..bf1f68e 100644
49270 --- a/fs/udf/misc.c
49271 +++ b/fs/udf/misc.c
49272 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
49273
49274 u8 udf_tag_checksum(const struct tag *t)
49275 {
49276 - u8 *data = (u8 *)t;
49277 + const u8 *data = (const u8 *)t;
49278 u8 checksum = 0;
49279 int i;
49280 for (i = 0; i < sizeof(struct tag); ++i)
49281 diff --git a/fs/utimes.c b/fs/utimes.c
49282 index ba653f3..06ea4b1 100644
49283 --- a/fs/utimes.c
49284 +++ b/fs/utimes.c
49285 @@ -1,6 +1,7 @@
49286 #include <linux/compiler.h>
49287 #include <linux/file.h>
49288 #include <linux/fs.h>
49289 +#include <linux/security.h>
49290 #include <linux/linkage.h>
49291 #include <linux/mount.h>
49292 #include <linux/namei.h>
49293 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
49294 goto mnt_drop_write_and_out;
49295 }
49296 }
49297 +
49298 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
49299 + error = -EACCES;
49300 + goto mnt_drop_write_and_out;
49301 + }
49302 +
49303 mutex_lock(&inode->i_mutex);
49304 error = notify_change(path->dentry, &newattrs);
49305 mutex_unlock(&inode->i_mutex);
49306 diff --git a/fs/xattr.c b/fs/xattr.c
49307 index f060663..def7007 100644
49308 --- a/fs/xattr.c
49309 +++ b/fs/xattr.c
49310 @@ -254,7 +254,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
49311 * Extended attribute SET operations
49312 */
49313 static long
49314 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
49315 +setxattr(struct path *path, const char __user *name, const void __user *value,
49316 size_t size, int flags)
49317 {
49318 int error;
49319 @@ -278,7 +278,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
49320 return PTR_ERR(kvalue);
49321 }
49322
49323 - error = vfs_setxattr(d, kname, kvalue, size, flags);
49324 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
49325 + error = -EACCES;
49326 + goto out;
49327 + }
49328 +
49329 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
49330 +out:
49331 kfree(kvalue);
49332 return error;
49333 }
49334 @@ -295,7 +301,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
49335 return error;
49336 error = mnt_want_write(path.mnt);
49337 if (!error) {
49338 - error = setxattr(path.dentry, name, value, size, flags);
49339 + error = setxattr(&path, name, value, size, flags);
49340 mnt_drop_write(path.mnt);
49341 }
49342 path_put(&path);
49343 @@ -314,7 +320,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
49344 return error;
49345 error = mnt_want_write(path.mnt);
49346 if (!error) {
49347 - error = setxattr(path.dentry, name, value, size, flags);
49348 + error = setxattr(&path, name, value, size, flags);
49349 mnt_drop_write(path.mnt);
49350 }
49351 path_put(&path);
49352 @@ -325,17 +331,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
49353 const void __user *,value, size_t, size, int, flags)
49354 {
49355 struct file *f;
49356 - struct dentry *dentry;
49357 int error = -EBADF;
49358
49359 f = fget(fd);
49360 if (!f)
49361 return error;
49362 - dentry = f->f_path.dentry;
49363 - audit_inode(NULL, dentry);
49364 + audit_inode(NULL, f->f_path.dentry);
49365 error = mnt_want_write_file(f);
49366 if (!error) {
49367 - error = setxattr(dentry, name, value, size, flags);
49368 + error = setxattr(&f->f_path, name, value, size, flags);
49369 mnt_drop_write(f->f_path.mnt);
49370 }
49371 fput(f);
49372 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
49373 index 8d5a506..7f62712 100644
49374 --- a/fs/xattr_acl.c
49375 +++ b/fs/xattr_acl.c
49376 @@ -17,8 +17,8 @@
49377 struct posix_acl *
49378 posix_acl_from_xattr(const void *value, size_t size)
49379 {
49380 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
49381 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
49382 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
49383 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
49384 int count;
49385 struct posix_acl *acl;
49386 struct posix_acl_entry *acl_e;
49387 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
49388 index 452a291..91a95f3b 100644
49389 --- a/fs/xfs/xfs_bmap.c
49390 +++ b/fs/xfs/xfs_bmap.c
49391 @@ -250,7 +250,7 @@ xfs_bmap_validate_ret(
49392 int nmap,
49393 int ret_nmap);
49394 #else
49395 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
49396 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
49397 #endif /* DEBUG */
49398
49399 STATIC int
49400 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
49401 index 79d05e8..e3e5861 100644
49402 --- a/fs/xfs/xfs_dir2_sf.c
49403 +++ b/fs/xfs/xfs_dir2_sf.c
49404 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
49405 }
49406
49407 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
49408 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49409 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
49410 + char name[sfep->namelen];
49411 + memcpy(name, sfep->name, sfep->namelen);
49412 + if (filldir(dirent, name, sfep->namelen,
49413 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
49414 + *offset = off & 0x7fffffff;
49415 + return 0;
49416 + }
49417 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49418 off & 0x7fffffff, ino, DT_UNKNOWN)) {
49419 *offset = off & 0x7fffffff;
49420 return 0;
49421 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
49422 index f7ce7de..e1a5db0 100644
49423 --- a/fs/xfs/xfs_ioctl.c
49424 +++ b/fs/xfs/xfs_ioctl.c
49425 @@ -128,7 +128,7 @@ xfs_find_handle(
49426 }
49427
49428 error = -EFAULT;
49429 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
49430 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
49431 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
49432 goto out_put;
49433
49434 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
49435 index 673704f..74315c5 100644
49436 --- a/fs/xfs/xfs_iops.c
49437 +++ b/fs/xfs/xfs_iops.c
49438 @@ -446,7 +446,7 @@ xfs_vn_put_link(
49439 struct nameidata *nd,
49440 void *p)
49441 {
49442 - char *s = nd_get_link(nd);
49443 + const char *s = nd_get_link(nd);
49444
49445 if (!IS_ERR(s))
49446 kfree(s);
49447 diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
49448 index 51fc429..a728e71 100644
49449 --- a/fs/xfs/xfs_vnodeops.c
49450 +++ b/fs/xfs/xfs_vnodeops.c
49451 @@ -123,13 +123,17 @@ xfs_readlink(
49452
49453 xfs_ilock(ip, XFS_ILOCK_SHARED);
49454
49455 - ASSERT(S_ISLNK(ip->i_d.di_mode));
49456 - ASSERT(ip->i_d.di_size <= MAXPATHLEN);
49457 -
49458 pathlen = ip->i_d.di_size;
49459 if (!pathlen)
49460 goto out;
49461
49462 + if (pathlen > MAXPATHLEN) {
49463 + xfs_alert(mp, "%s: inode (%llu) symlink length (%d) too long",
49464 + __func__, (unsigned long long)ip->i_ino, pathlen);
49465 + ASSERT(0);
49466 + return XFS_ERROR(EFSCORRUPTED);
49467 + }
49468 +
49469 if (ip->i_df.if_flags & XFS_IFINLINE) {
49470 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
49471 link[pathlen] = '\0';
49472 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
49473 new file mode 100644
49474 index 0000000..9629731
49475 --- /dev/null
49476 +++ b/grsecurity/Kconfig
49477 @@ -0,0 +1,1037 @@
49478 +#
49479 +# grecurity configuration
49480 +#
49481 +
49482 +menu "Grsecurity"
49483 +
49484 +config GRKERNSEC
49485 + bool "Grsecurity"
49486 + select CRYPTO
49487 + select CRYPTO_SHA256
49488 + help
49489 + If you say Y here, you will be able to configure many features
49490 + that will enhance the security of your system. It is highly
49491 + recommended that you say Y here and read through the help
49492 + for each option so that you fully understand the features and
49493 + can evaluate their usefulness for your machine.
49494 +
49495 +choice
49496 + prompt "Security Level"
49497 + depends on GRKERNSEC
49498 + default GRKERNSEC_CUSTOM
49499 +
49500 +config GRKERNSEC_LOW
49501 + bool "Low"
49502 + select GRKERNSEC_LINK
49503 + select GRKERNSEC_FIFO
49504 + select GRKERNSEC_RANDNET
49505 + select GRKERNSEC_DMESG
49506 + select GRKERNSEC_CHROOT
49507 + select GRKERNSEC_CHROOT_CHDIR
49508 +
49509 + help
49510 + If you choose this option, several of the grsecurity options will
49511 + be enabled that will give you greater protection against a number
49512 + of attacks, while assuring that none of your software will have any
49513 + conflicts with the additional security measures. If you run a lot
49514 + of unusual software, or you are having problems with the higher
49515 + security levels, you should say Y here. With this option, the
49516 + following features are enabled:
49517 +
49518 + - Linking restrictions
49519 + - FIFO restrictions
49520 + - Restricted dmesg
49521 + - Enforced chdir("/") on chroot
49522 + - Runtime module disabling
49523 +
49524 +config GRKERNSEC_MEDIUM
49525 + bool "Medium"
49526 + select PAX
49527 + select PAX_EI_PAX
49528 + select PAX_PT_PAX_FLAGS
49529 + select PAX_HAVE_ACL_FLAGS
49530 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49531 + select GRKERNSEC_CHROOT
49532 + select GRKERNSEC_CHROOT_SYSCTL
49533 + select GRKERNSEC_LINK
49534 + select GRKERNSEC_FIFO
49535 + select GRKERNSEC_DMESG
49536 + select GRKERNSEC_RANDNET
49537 + select GRKERNSEC_FORKFAIL
49538 + select GRKERNSEC_TIME
49539 + select GRKERNSEC_SIGNAL
49540 + select GRKERNSEC_CHROOT
49541 + select GRKERNSEC_CHROOT_UNIX
49542 + select GRKERNSEC_CHROOT_MOUNT
49543 + select GRKERNSEC_CHROOT_PIVOT
49544 + select GRKERNSEC_CHROOT_DOUBLE
49545 + select GRKERNSEC_CHROOT_CHDIR
49546 + select GRKERNSEC_CHROOT_MKNOD
49547 + select GRKERNSEC_PROC
49548 + select GRKERNSEC_PROC_USERGROUP
49549 + select PAX_RANDUSTACK
49550 + select PAX_ASLR
49551 + select PAX_RANDMMAP
49552 + select PAX_REFCOUNT if (X86 || SPARC64)
49553 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
49554 +
49555 + help
49556 + If you say Y here, several features in addition to those included
49557 + in the low additional security level will be enabled. These
49558 + features provide even more security to your system, though in rare
49559 + cases they may be incompatible with very old or poorly written
49560 + software. If you enable this option, make sure that your auth
49561 + service (identd) is running as gid 1001. With this option,
49562 + the following features (in addition to those provided in the
49563 + low additional security level) will be enabled:
49564 +
49565 + - Failed fork logging
49566 + - Time change logging
49567 + - Signal logging
49568 + - Deny mounts in chroot
49569 + - Deny double chrooting
49570 + - Deny sysctl writes in chroot
49571 + - Deny mknod in chroot
49572 + - Deny access to abstract AF_UNIX sockets out of chroot
49573 + - Deny pivot_root in chroot
49574 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
49575 + - /proc restrictions with special GID set to 10 (usually wheel)
49576 + - Address Space Layout Randomization (ASLR)
49577 + - Prevent exploitation of most refcount overflows
49578 + - Bounds checking of copying between the kernel and userland
49579 +
49580 +config GRKERNSEC_HIGH
49581 + bool "High"
49582 + select GRKERNSEC_LINK
49583 + select GRKERNSEC_FIFO
49584 + select GRKERNSEC_DMESG
49585 + select GRKERNSEC_FORKFAIL
49586 + select GRKERNSEC_TIME
49587 + select GRKERNSEC_SIGNAL
49588 + select GRKERNSEC_CHROOT
49589 + select GRKERNSEC_CHROOT_SHMAT
49590 + select GRKERNSEC_CHROOT_UNIX
49591 + select GRKERNSEC_CHROOT_MOUNT
49592 + select GRKERNSEC_CHROOT_FCHDIR
49593 + select GRKERNSEC_CHROOT_PIVOT
49594 + select GRKERNSEC_CHROOT_DOUBLE
49595 + select GRKERNSEC_CHROOT_CHDIR
49596 + select GRKERNSEC_CHROOT_MKNOD
49597 + select GRKERNSEC_CHROOT_CAPS
49598 + select GRKERNSEC_CHROOT_SYSCTL
49599 + select GRKERNSEC_CHROOT_FINDTASK
49600 + select GRKERNSEC_SYSFS_RESTRICT
49601 + select GRKERNSEC_PROC
49602 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49603 + select GRKERNSEC_HIDESYM
49604 + select GRKERNSEC_BRUTE
49605 + select GRKERNSEC_PROC_USERGROUP
49606 + select GRKERNSEC_KMEM
49607 + select GRKERNSEC_RESLOG
49608 + select GRKERNSEC_RANDNET
49609 + select GRKERNSEC_PROC_ADD
49610 + select GRKERNSEC_CHROOT_CHMOD
49611 + select GRKERNSEC_CHROOT_NICE
49612 + select GRKERNSEC_AUDIT_MOUNT
49613 + select GRKERNSEC_MODHARDEN if (MODULES)
49614 + select GRKERNSEC_HARDEN_PTRACE
49615 + select GRKERNSEC_VM86 if (X86_32)
49616 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
49617 + select PAX
49618 + select PAX_RANDUSTACK
49619 + select PAX_ASLR
49620 + select PAX_RANDMMAP
49621 + select PAX_NOEXEC
49622 + select PAX_MPROTECT
49623 + select PAX_EI_PAX
49624 + select PAX_PT_PAX_FLAGS
49625 + select PAX_HAVE_ACL_FLAGS
49626 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
49627 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
49628 + select PAX_RANDKSTACK if (X86_TSC && X86)
49629 + select PAX_SEGMEXEC if (X86_32)
49630 + select PAX_PAGEEXEC
49631 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
49632 + select PAX_EMUTRAMP if (PARISC)
49633 + select PAX_EMUSIGRT if (PARISC)
49634 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
49635 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
49636 + select PAX_REFCOUNT if (X86 || SPARC64)
49637 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
49638 + help
49639 + If you say Y here, many of the features of grsecurity will be
49640 + enabled, which will protect you against many kinds of attacks
49641 + against your system. The heightened security comes at a cost
49642 + of an increased chance of incompatibilities with rare software
49643 + on your machine. Since this security level enables PaX, you should
49644 + view <http://pax.grsecurity.net> and read about the PaX
49645 + project. While you are there, download chpax and run it on
49646 + binaries that cause problems with PaX. Also remember that
49647 + since the /proc restrictions are enabled, you must run your
49648 + identd as gid 1001. This security level enables the following
49649 + features in addition to those listed in the low and medium
49650 + security levels:
49651 +
49652 + - Additional /proc restrictions
49653 + - Chmod restrictions in chroot
49654 + - No signals, ptrace, or viewing of processes outside of chroot
49655 + - Capability restrictions in chroot
49656 + - Deny fchdir out of chroot
49657 + - Priority restrictions in chroot
49658 + - Segmentation-based implementation of PaX
49659 + - Mprotect restrictions
49660 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
49661 + - Kernel stack randomization
49662 + - Mount/unmount/remount logging
49663 + - Kernel symbol hiding
49664 + - Hardening of module auto-loading
49665 + - Ptrace restrictions
49666 + - Restricted vm86 mode
49667 + - Restricted sysfs/debugfs
49668 + - Active kernel exploit response
49669 +
49670 +config GRKERNSEC_CUSTOM
49671 + bool "Custom"
49672 + help
49673 + If you say Y here, you will be able to configure every grsecurity
49674 + option, which allows you to enable many more features that aren't
49675 + covered in the basic security levels. These additional features
49676 + include TPE, socket restrictions, and the sysctl system for
49677 + grsecurity. It is advised that you read through the help for
49678 + each option to determine its usefulness in your situation.
49679 +
49680 +endchoice
49681 +
49682 +menu "Address Space Protection"
49683 +depends on GRKERNSEC
49684 +
49685 +config GRKERNSEC_KMEM
49686 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
49687 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
49688 + help
49689 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
49690 + be written to or read from to modify or leak the contents of the running
49691 + kernel. /dev/port will also not be allowed to be opened. If you have module
49692 + support disabled, enabling this will close up four ways that are
49693 + currently used to insert malicious code into the running kernel.
49694 + Even with all these features enabled, we still highly recommend that
49695 + you use the RBAC system, as it is still possible for an attacker to
49696 + modify the running kernel through privileged I/O granted by ioperm/iopl.
49697 + If you are not using XFree86, you may be able to stop this additional
49698 + case by enabling the 'Disable privileged I/O' option. Though nothing
49699 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
49700 + but only to video memory, which is the only writing we allow in this
49701 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
49702 + not be allowed to mprotect it with PROT_WRITE later.
49703 + It is highly recommended that you say Y here if you meet all the
49704 + conditions above.
49705 +
49706 +config GRKERNSEC_VM86
49707 + bool "Restrict VM86 mode"
49708 + depends on X86_32
49709 +
49710 + help
49711 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
49712 + make use of a special execution mode on 32bit x86 processors called
49713 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
49714 + video cards and will still work with this option enabled. The purpose
49715 + of the option is to prevent exploitation of emulation errors in
49716 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
49717 + Nearly all users should be able to enable this option.
49718 +
49719 +config GRKERNSEC_IO
49720 + bool "Disable privileged I/O"
49721 + depends on X86
49722 + select RTC_CLASS
49723 + select RTC_INTF_DEV
49724 + select RTC_DRV_CMOS
49725 +
49726 + help
49727 + If you say Y here, all ioperm and iopl calls will return an error.
49728 + Ioperm and iopl can be used to modify the running kernel.
49729 + Unfortunately, some programs need this access to operate properly,
49730 + the most notable of which are XFree86 and hwclock. hwclock can be
49731 + remedied by having RTC support in the kernel, so real-time
49732 + clock support is enabled if this option is enabled, to ensure
49733 + that hwclock operates correctly. XFree86 still will not
49734 + operate correctly with this option enabled, so DO NOT CHOOSE Y
49735 + IF YOU USE XFree86. If you use XFree86 and you still want to
49736 + protect your kernel against modification, use the RBAC system.
49737 +
49738 +config GRKERNSEC_PROC_MEMMAP
49739 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
49740 + default y if (PAX_NOEXEC || PAX_ASLR)
49741 + depends on PAX_NOEXEC || PAX_ASLR
49742 + help
49743 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
49744 + give no information about the addresses of its mappings if
49745 + PaX features that rely on random addresses are enabled on the task.
49746 + If you use PaX it is greatly recommended that you say Y here as it
49747 + closes up a hole that makes the full ASLR useless for suid
49748 + binaries.
49749 +
49750 +config GRKERNSEC_BRUTE
49751 + bool "Deter exploit bruteforcing"
49752 + help
49753 + If you say Y here, attempts to bruteforce exploits against forking
49754 + daemons such as apache or sshd, as well as against suid/sgid binaries
49755 + will be deterred. When a child of a forking daemon is killed by PaX
49756 + or crashes due to an illegal instruction or other suspicious signal,
49757 + the parent process will be delayed 30 seconds upon every subsequent
49758 + fork until the administrator is able to assess the situation and
49759 + restart the daemon.
49760 + In the suid/sgid case, the attempt is logged, the user has all their
49761 + processes terminated, and they are prevented from executing any further
49762 + processes for 15 minutes.
49763 + It is recommended that you also enable signal logging in the auditing
49764 + section so that logs are generated when a process triggers a suspicious
49765 + signal.
49766 + If the sysctl option is enabled, a sysctl option with name
49767 + "deter_bruteforce" is created.
49768 +
49769 +
49770 +config GRKERNSEC_MODHARDEN
49771 + bool "Harden module auto-loading"
49772 + depends on MODULES
49773 + help
49774 + If you say Y here, module auto-loading in response to use of some
49775 + feature implemented by an unloaded module will be restricted to
49776 + root users. Enabling this option helps defend against attacks
49777 + by unprivileged users who abuse the auto-loading behavior to
49778 + cause a vulnerable module to load that is then exploited.
49779 +
49780 + If this option prevents a legitimate use of auto-loading for a
49781 + non-root user, the administrator can execute modprobe manually
49782 + with the exact name of the module mentioned in the alert log.
49783 + Alternatively, the administrator can add the module to the list
49784 + of modules loaded at boot by modifying init scripts.
49785 +
49786 + Modification of init scripts will most likely be needed on
49787 + Ubuntu servers with encrypted home directory support enabled,
49788 + as the first non-root user logging in will cause the ecb(aes),
49789 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
49790 +
49791 +config GRKERNSEC_HIDESYM
49792 + bool "Hide kernel symbols"
49793 + help
49794 + If you say Y here, getting information on loaded modules, and
49795 + displaying all kernel symbols through a syscall will be restricted
49796 + to users with CAP_SYS_MODULE. For software compatibility reasons,
49797 + /proc/kallsyms will be restricted to the root user. The RBAC
49798 + system can hide that entry even from root.
49799 +
49800 + This option also prevents leaking of kernel addresses through
49801 + several /proc entries.
49802 +
49803 + Note that this option is only effective provided the following
49804 + conditions are met:
49805 + 1) The kernel using grsecurity is not precompiled by some distribution
49806 + 2) You have also enabled GRKERNSEC_DMESG
49807 + 3) You are using the RBAC system and hiding other files such as your
49808 + kernel image and System.map. Alternatively, enabling this option
49809 + causes the permissions on /boot, /lib/modules, and the kernel
49810 + source directory to change at compile time to prevent
49811 + reading by non-root users.
49812 + If the above conditions are met, this option will aid in providing a
49813 + useful protection against local kernel exploitation of overflows
49814 + and arbitrary read/write vulnerabilities.
49815 +
49816 +config GRKERNSEC_KERN_LOCKOUT
49817 + bool "Active kernel exploit response"
49818 + depends on X86 || ARM || PPC || SPARC
49819 + help
49820 + If you say Y here, when a PaX alert is triggered due to suspicious
49821 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
49822 + or an OOPs occurs due to bad memory accesses, instead of just
49823 + terminating the offending process (and potentially allowing
49824 + a subsequent exploit from the same user), we will take one of two
49825 + actions:
49826 + If the user was root, we will panic the system
49827 + If the user was non-root, we will log the attempt, terminate
49828 + all processes owned by the user, then prevent them from creating
49829 + any new processes until the system is restarted
49830 + This deters repeated kernel exploitation/bruteforcing attempts
49831 + and is useful for later forensics.
49832 +
49833 +endmenu
49834 +menu "Role Based Access Control Options"
49835 +depends on GRKERNSEC
49836 +
49837 +config GRKERNSEC_RBAC_DEBUG
49838 + bool
49839 +
49840 +config GRKERNSEC_NO_RBAC
49841 + bool "Disable RBAC system"
49842 + help
49843 + If you say Y here, the /dev/grsec device will be removed from the kernel,
49844 + preventing the RBAC system from being enabled. You should only say Y
49845 + here if you have no intention of using the RBAC system, so as to prevent
49846 + an attacker with root access from misusing the RBAC system to hide files
49847 + and processes when loadable module support and /dev/[k]mem have been
49848 + locked down.
49849 +
49850 +config GRKERNSEC_ACL_HIDEKERN
49851 + bool "Hide kernel processes"
49852 + help
49853 + If you say Y here, all kernel threads will be hidden to all
49854 + processes but those whose subject has the "view hidden processes"
49855 + flag.
49856 +
49857 +config GRKERNSEC_ACL_MAXTRIES
49858 + int "Maximum tries before password lockout"
49859 + default 3
49860 + help
49861 + This option enforces the maximum number of times a user can attempt
49862 + to authorize themselves with the grsecurity RBAC system before being
49863 + denied the ability to attempt authorization again for a specified time.
49864 + The lower the number, the harder it will be to brute-force a password.
49865 +
49866 +config GRKERNSEC_ACL_TIMEOUT
49867 + int "Time to wait after max password tries, in seconds"
49868 + default 30
49869 + help
49870 + This option specifies the time the user must wait after attempting to
49871 + authorize to the RBAC system with the maximum number of invalid
49872 + passwords. The higher the number, the harder it will be to brute-force
49873 + a password.
49874 +
49875 +endmenu
49876 +menu "Filesystem Protections"
49877 +depends on GRKERNSEC
49878 +
49879 +config GRKERNSEC_PROC
49880 + bool "Proc restrictions"
49881 + help
49882 + If you say Y here, the permissions of the /proc filesystem
49883 + will be altered to enhance system security and privacy. You MUST
49884 + choose either a user only restriction or a user and group restriction.
49885 + Depending upon the option you choose, you can either restrict users to
49886 + see only the processes they themselves run, or choose a group that can
49887 + view all processes and files normally restricted to root if you choose
49888 + the "restrict to user only" option. NOTE: If you're running identd as
49889 + a non-root user, you will have to run it as the group you specify here.
49890 +
49891 +config GRKERNSEC_PROC_USER
49892 + bool "Restrict /proc to user only"
49893 + depends on GRKERNSEC_PROC
49894 + help
49895 + If you say Y here, non-root users will only be able to view their own
49896 + processes, and restricts them from viewing network-related information,
49897 + and viewing kernel symbol and module information.
49898 +
49899 +config GRKERNSEC_PROC_USERGROUP
49900 + bool "Allow special group"
49901 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
49902 + help
49903 + If you say Y here, you will be able to select a group that will be
49904 + able to view all processes and network-related information. If you've
49905 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
49906 + remain hidden. This option is useful if you want to run identd as
49907 + a non-root user.
49908 +
49909 +config GRKERNSEC_PROC_GID
49910 + int "GID for special group"
49911 + depends on GRKERNSEC_PROC_USERGROUP
49912 + default 1001
49913 +
49914 +config GRKERNSEC_PROC_ADD
49915 + bool "Additional restrictions"
49916 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
49917 + help
49918 + If you say Y here, additional restrictions will be placed on
49919 + /proc that keep normal users from viewing device information and
49920 + slabinfo information that could be useful for exploits.
49921 +
49922 +config GRKERNSEC_LINK
49923 + bool "Linking restrictions"
49924 + help
49925 + If you say Y here, /tmp race exploits will be prevented, since users
49926 + will no longer be able to follow symlinks owned by other users in
49927 + world-writable +t directories (e.g. /tmp), unless the owner of the
49928 + symlink is the owner of the directory. users will also not be
49929 + able to hardlink to files they do not own. If the sysctl option is
49930 + enabled, a sysctl option with name "linking_restrictions" is created.
49931 +
49932 +config GRKERNSEC_FIFO
49933 + bool "FIFO restrictions"
49934 + help
49935 + If you say Y here, users will not be able to write to FIFOs they don't
49936 + own in world-writable +t directories (e.g. /tmp), unless the owner of
49937 + the FIFO is the same owner of the directory it's held in. If the sysctl
49938 + option is enabled, a sysctl option with name "fifo_restrictions" is
49939 + created.
49940 +
49941 +config GRKERNSEC_SYSFS_RESTRICT
49942 + bool "Sysfs/debugfs restriction"
49943 + depends on SYSFS
49944 + help
49945 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
49946 + any filesystem normally mounted under it (e.g. debugfs) will only
49947 + be accessible by root. These filesystems generally provide access
49948 + to hardware and debug information that isn't appropriate for unprivileged
49949 + users of the system. Sysfs and debugfs have also become a large source
49950 + of new vulnerabilities, ranging from infoleaks to local compromise.
49951 + There has been very little oversight with an eye toward security involved
49952 + in adding new exporters of information to these filesystems, so their
49953 + use is discouraged.
49954 + This option is equivalent to a chmod 0700 of the mount paths.
49955 +
49956 +config GRKERNSEC_ROFS
49957 + bool "Runtime read-only mount protection"
49958 + help
49959 + If you say Y here, a sysctl option with name "romount_protect" will
49960 + be created. By setting this option to 1 at runtime, filesystems
49961 + will be protected in the following ways:
49962 + * No new writable mounts will be allowed
49963 + * Existing read-only mounts won't be able to be remounted read/write
49964 + * Write operations will be denied on all block devices
49965 + This option acts independently of grsec_lock: once it is set to 1,
49966 + it cannot be turned off. Therefore, please be mindful of the resulting
49967 + behavior if this option is enabled in an init script on a read-only
49968 + filesystem. This feature is mainly intended for secure embedded systems.
49969 +
49970 +config GRKERNSEC_CHROOT
49971 + bool "Chroot jail restrictions"
49972 + help
49973 + If you say Y here, you will be able to choose several options that will
49974 + make breaking out of a chrooted jail much more difficult. If you
49975 + encounter no software incompatibilities with the following options, it
49976 + is recommended that you enable each one.
49977 +
49978 +config GRKERNSEC_CHROOT_MOUNT
49979 + bool "Deny mounts"
49980 + depends on GRKERNSEC_CHROOT
49981 + help
49982 + If you say Y here, processes inside a chroot will not be able to
49983 + mount or remount filesystems. If the sysctl option is enabled, a
49984 + sysctl option with name "chroot_deny_mount" is created.
49985 +
49986 +config GRKERNSEC_CHROOT_DOUBLE
49987 + bool "Deny double-chroots"
49988 + depends on GRKERNSEC_CHROOT
49989 + help
49990 + If you say Y here, processes inside a chroot will not be able to chroot
49991 + again outside the chroot. This is a widely used method of breaking
49992 + out of a chroot jail and should not be allowed. If the sysctl
49993 + option is enabled, a sysctl option with name
49994 + "chroot_deny_chroot" is created.
49995 +
49996 +config GRKERNSEC_CHROOT_PIVOT
49997 + bool "Deny pivot_root in chroot"
49998 + depends on GRKERNSEC_CHROOT
49999 + help
50000 + If you say Y here, processes inside a chroot will not be able to use
50001 + a function called pivot_root() that was introduced in Linux 2.3.41. It
50002 + works similar to chroot in that it changes the root filesystem. This
50003 + function could be misused in a chrooted process to attempt to break out
50004 + of the chroot, and therefore should not be allowed. If the sysctl
50005 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
50006 + created.
50007 +
50008 +config GRKERNSEC_CHROOT_CHDIR
50009 + bool "Enforce chdir(\"/\") on all chroots"
50010 + depends on GRKERNSEC_CHROOT
50011 + help
50012 + If you say Y here, the current working directory of all newly-chrooted
50013 + applications will be set to the the root directory of the chroot.
50014 + The man page on chroot(2) states:
50015 + Note that this call does not change the current working
50016 + directory, so that `.' can be outside the tree rooted at
50017 + `/'. In particular, the super-user can escape from a
50018 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50019 +
50020 + It is recommended that you say Y here, since it's not known to break
50021 + any software. If the sysctl option is enabled, a sysctl option with
50022 + name "chroot_enforce_chdir" is created.
50023 +
50024 +config GRKERNSEC_CHROOT_CHMOD
50025 + bool "Deny (f)chmod +s"
50026 + depends on GRKERNSEC_CHROOT
50027 + help
50028 + If you say Y here, processes inside a chroot will not be able to chmod
50029 + or fchmod files to make them have suid or sgid bits. This protects
50030 + against another published method of breaking a chroot. If the sysctl
50031 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
50032 + created.
50033 +
50034 +config GRKERNSEC_CHROOT_FCHDIR
50035 + bool "Deny fchdir out of chroot"
50036 + depends on GRKERNSEC_CHROOT
50037 + help
50038 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
50039 + to a file descriptor of the chrooting process that points to a directory
50040 + outside the filesystem will be stopped. If the sysctl option
50041 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50042 +
50043 +config GRKERNSEC_CHROOT_MKNOD
50044 + bool "Deny mknod"
50045 + depends on GRKERNSEC_CHROOT
50046 + help
50047 + If you say Y here, processes inside a chroot will not be allowed to
50048 + mknod. The problem with using mknod inside a chroot is that it
50049 + would allow an attacker to create a device entry that is the same
50050 + as one on the physical root of your system, which could range from
50051 + anything from the console device to a device for your harddrive (which
50052 + they could then use to wipe the drive or steal data). It is recommended
50053 + that you say Y here, unless you run into software incompatibilities.
50054 + If the sysctl option is enabled, a sysctl option with name
50055 + "chroot_deny_mknod" is created.
50056 +
50057 +config GRKERNSEC_CHROOT_SHMAT
50058 + bool "Deny shmat() out of chroot"
50059 + depends on GRKERNSEC_CHROOT
50060 + help
50061 + If you say Y here, processes inside a chroot will not be able to attach
50062 + to shared memory segments that were created outside of the chroot jail.
50063 + It is recommended that you say Y here. If the sysctl option is enabled,
50064 + a sysctl option with name "chroot_deny_shmat" is created.
50065 +
50066 +config GRKERNSEC_CHROOT_UNIX
50067 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
50068 + depends on GRKERNSEC_CHROOT
50069 + help
50070 + If you say Y here, processes inside a chroot will not be able to
50071 + connect to abstract (meaning not belonging to a filesystem) Unix
50072 + domain sockets that were bound outside of a chroot. It is recommended
50073 + that you say Y here. If the sysctl option is enabled, a sysctl option
50074 + with name "chroot_deny_unix" is created.
50075 +
50076 +config GRKERNSEC_CHROOT_FINDTASK
50077 + bool "Protect outside processes"
50078 + depends on GRKERNSEC_CHROOT
50079 + help
50080 + If you say Y here, processes inside a chroot will not be able to
50081 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50082 + getsid, or view any process outside of the chroot. If the sysctl
50083 + option is enabled, a sysctl option with name "chroot_findtask" is
50084 + created.
50085 +
50086 +config GRKERNSEC_CHROOT_NICE
50087 + bool "Restrict priority changes"
50088 + depends on GRKERNSEC_CHROOT
50089 + help
50090 + If you say Y here, processes inside a chroot will not be able to raise
50091 + the priority of processes in the chroot, or alter the priority of
50092 + processes outside the chroot. This provides more security than simply
50093 + removing CAP_SYS_NICE from the process' capability set. If the
50094 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50095 + is created.
50096 +
50097 +config GRKERNSEC_CHROOT_SYSCTL
50098 + bool "Deny sysctl writes"
50099 + depends on GRKERNSEC_CHROOT
50100 + help
50101 + If you say Y here, an attacker in a chroot will not be able to
50102 + write to sysctl entries, either by sysctl(2) or through a /proc
50103 + interface. It is strongly recommended that you say Y here. If the
50104 + sysctl option is enabled, a sysctl option with name
50105 + "chroot_deny_sysctl" is created.
50106 +
50107 +config GRKERNSEC_CHROOT_CAPS
50108 + bool "Capability restrictions"
50109 + depends on GRKERNSEC_CHROOT
50110 + help
50111 + If you say Y here, the capabilities on all processes within a
50112 + chroot jail will be lowered to stop module insertion, raw i/o,
50113 + system and net admin tasks, rebooting the system, modifying immutable
50114 + files, modifying IPC owned by another, and changing the system time.
50115 + This is left an option because it can break some apps. Disable this
50116 + if your chrooted apps are having problems performing those kinds of
50117 + tasks. If the sysctl option is enabled, a sysctl option with
50118 + name "chroot_caps" is created.
50119 +
50120 +endmenu
50121 +menu "Kernel Auditing"
50122 +depends on GRKERNSEC
50123 +
50124 +config GRKERNSEC_AUDIT_GROUP
50125 + bool "Single group for auditing"
50126 + help
50127 + If you say Y here, the exec, chdir, and (un)mount logging features
50128 + will only operate on a group you specify. This option is recommended
50129 + if you only want to watch certain users instead of having a large
50130 + amount of logs from the entire system. If the sysctl option is enabled,
50131 + a sysctl option with name "audit_group" is created.
50132 +
50133 +config GRKERNSEC_AUDIT_GID
50134 + int "GID for auditing"
50135 + depends on GRKERNSEC_AUDIT_GROUP
50136 + default 1007
50137 +
50138 +config GRKERNSEC_EXECLOG
50139 + bool "Exec logging"
50140 + help
50141 + If you say Y here, all execve() calls will be logged (since the
50142 + other exec*() calls are frontends to execve(), all execution
50143 + will be logged). Useful for shell-servers that like to keep track
50144 + of their users. If the sysctl option is enabled, a sysctl option with
50145 + name "exec_logging" is created.
50146 + WARNING: This option when enabled will produce a LOT of logs, especially
50147 + on an active system.
50148 +
50149 +config GRKERNSEC_RESLOG
50150 + bool "Resource logging"
50151 + help
50152 + If you say Y here, all attempts to overstep resource limits will
50153 + be logged with the resource name, the requested size, and the current
50154 + limit. It is highly recommended that you say Y here. If the sysctl
50155 + option is enabled, a sysctl option with name "resource_logging" is
50156 + created. If the RBAC system is enabled, the sysctl value is ignored.
50157 +
50158 +config GRKERNSEC_CHROOT_EXECLOG
50159 + bool "Log execs within chroot"
50160 + help
50161 + If you say Y here, all executions inside a chroot jail will be logged
50162 + to syslog. This can cause a large amount of logs if certain
50163 + applications (eg. djb's daemontools) are installed on the system, and
50164 + is therefore left as an option. If the sysctl option is enabled, a
50165 + sysctl option with name "chroot_execlog" is created.
50166 +
50167 +config GRKERNSEC_AUDIT_PTRACE
50168 + bool "Ptrace logging"
50169 + help
50170 + If you say Y here, all attempts to attach to a process via ptrace
50171 + will be logged. If the sysctl option is enabled, a sysctl option
50172 + with name "audit_ptrace" is created.
50173 +
50174 +config GRKERNSEC_AUDIT_CHDIR
50175 + bool "Chdir logging"
50176 + help
50177 + If you say Y here, all chdir() calls will be logged. If the sysctl
50178 + option is enabled, a sysctl option with name "audit_chdir" is created.
50179 +
50180 +config GRKERNSEC_AUDIT_MOUNT
50181 + bool "(Un)Mount logging"
50182 + help
50183 + If you say Y here, all mounts and unmounts will be logged. If the
50184 + sysctl option is enabled, a sysctl option with name "audit_mount" is
50185 + created.
50186 +
50187 +config GRKERNSEC_SIGNAL
50188 + bool "Signal logging"
50189 + help
50190 + If you say Y here, certain important signals will be logged, such as
50191 + SIGSEGV, which will as a result inform you of when a error in a program
50192 + occurred, which in some cases could mean a possible exploit attempt.
50193 + If the sysctl option is enabled, a sysctl option with name
50194 + "signal_logging" is created.
50195 +
50196 +config GRKERNSEC_FORKFAIL
50197 + bool "Fork failure logging"
50198 + help
50199 + If you say Y here, all failed fork() attempts will be logged.
50200 + This could suggest a fork bomb, or someone attempting to overstep
50201 + their process limit. If the sysctl option is enabled, a sysctl option
50202 + with name "forkfail_logging" is created.
50203 +
50204 +config GRKERNSEC_TIME
50205 + bool "Time change logging"
50206 + help
50207 + If you say Y here, any changes of the system clock will be logged.
50208 + If the sysctl option is enabled, a sysctl option with name
50209 + "timechange_logging" is created.
50210 +
50211 +config GRKERNSEC_PROC_IPADDR
50212 + bool "/proc/<pid>/ipaddr support"
50213 + help
50214 + If you say Y here, a new entry will be added to each /proc/<pid>
50215 + directory that contains the IP address of the person using the task.
50216 + The IP is carried across local TCP and AF_UNIX stream sockets.
50217 + This information can be useful for IDS/IPSes to perform remote response
50218 + to a local attack. The entry is readable by only the owner of the
50219 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50220 + the RBAC system), and thus does not create privacy concerns.
50221 +
50222 +config GRKERNSEC_RWXMAP_LOG
50223 + bool 'Denied RWX mmap/mprotect logging'
50224 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50225 + help
50226 + If you say Y here, calls to mmap() and mprotect() with explicit
50227 + usage of PROT_WRITE and PROT_EXEC together will be logged when
50228 + denied by the PAX_MPROTECT feature. If the sysctl option is
50229 + enabled, a sysctl option with name "rwxmap_logging" is created.
50230 +
50231 +config GRKERNSEC_AUDIT_TEXTREL
50232 + bool 'ELF text relocations logging (READ HELP)'
50233 + depends on PAX_MPROTECT
50234 + help
50235 + If you say Y here, text relocations will be logged with the filename
50236 + of the offending library or binary. The purpose of the feature is
50237 + to help Linux distribution developers get rid of libraries and
50238 + binaries that need text relocations which hinder the future progress
50239 + of PaX. Only Linux distribution developers should say Y here, and
50240 + never on a production machine, as this option creates an information
50241 + leak that could aid an attacker in defeating the randomization of
50242 + a single memory region. If the sysctl option is enabled, a sysctl
50243 + option with name "audit_textrel" is created.
50244 +
50245 +endmenu
50246 +
50247 +menu "Executable Protections"
50248 +depends on GRKERNSEC
50249 +
50250 +config GRKERNSEC_DMESG
50251 + bool "Dmesg(8) restriction"
50252 + help
50253 + If you say Y here, non-root users will not be able to use dmesg(8)
50254 + to view up to the last 4kb of messages in the kernel's log buffer.
50255 + The kernel's log buffer often contains kernel addresses and other
50256 + identifying information useful to an attacker in fingerprinting a
50257 + system for a targeted exploit.
50258 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
50259 + created.
50260 +
50261 +config GRKERNSEC_HARDEN_PTRACE
50262 + bool "Deter ptrace-based process snooping"
50263 + help
50264 + If you say Y here, TTY sniffers and other malicious monitoring
50265 + programs implemented through ptrace will be defeated. If you
50266 + have been using the RBAC system, this option has already been
50267 + enabled for several years for all users, with the ability to make
50268 + fine-grained exceptions.
50269 +
50270 + This option only affects the ability of non-root users to ptrace
50271 + processes that are not a descendent of the ptracing process.
50272 + This means that strace ./binary and gdb ./binary will still work,
50273 + but attaching to arbitrary processes will not. If the sysctl
50274 + option is enabled, a sysctl option with name "harden_ptrace" is
50275 + created.
50276 +
50277 +config GRKERNSEC_TPE
50278 + bool "Trusted Path Execution (TPE)"
50279 + help
50280 + If you say Y here, you will be able to choose a gid to add to the
50281 + supplementary groups of users you want to mark as "untrusted."
50282 + These users will not be able to execute any files that are not in
50283 + root-owned directories writable only by root. If the sysctl option
50284 + is enabled, a sysctl option with name "tpe" is created.
50285 +
50286 +config GRKERNSEC_TPE_ALL
50287 + bool "Partially restrict all non-root users"
50288 + depends on GRKERNSEC_TPE
50289 + help
50290 + If you say Y here, all non-root users will be covered under
50291 + a weaker TPE restriction. This is separate from, and in addition to,
50292 + the main TPE options that you have selected elsewhere. Thus, if a
50293 + "trusted" GID is chosen, this restriction applies to even that GID.
50294 + Under this restriction, all non-root users will only be allowed to
50295 + execute files in directories they own that are not group or
50296 + world-writable, or in directories owned by root and writable only by
50297 + root. If the sysctl option is enabled, a sysctl option with name
50298 + "tpe_restrict_all" is created.
50299 +
50300 +config GRKERNSEC_TPE_INVERT
50301 + bool "Invert GID option"
50302 + depends on GRKERNSEC_TPE
50303 + help
50304 + If you say Y here, the group you specify in the TPE configuration will
50305 + decide what group TPE restrictions will be *disabled* for. This
50306 + option is useful if you want TPE restrictions to be applied to most
50307 + users on the system. If the sysctl option is enabled, a sysctl option
50308 + with name "tpe_invert" is created. Unlike other sysctl options, this
50309 + entry will default to on for backward-compatibility.
50310 +
50311 +config GRKERNSEC_TPE_GID
50312 + int "GID for untrusted users"
50313 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50314 + default 1005
50315 + help
50316 + Setting this GID determines what group TPE restrictions will be
50317 + *enabled* for. If the sysctl option is enabled, a sysctl option
50318 + with name "tpe_gid" is created.
50319 +
50320 +config GRKERNSEC_TPE_GID
50321 + int "GID for trusted users"
50322 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50323 + default 1005
50324 + help
50325 + Setting this GID determines what group TPE restrictions will be
50326 + *disabled* for. If the sysctl option is enabled, a sysctl option
50327 + with name "tpe_gid" is created.
50328 +
50329 +endmenu
50330 +menu "Network Protections"
50331 +depends on GRKERNSEC
50332 +
50333 +config GRKERNSEC_RANDNET
50334 + bool "Larger entropy pools"
50335 + help
50336 + If you say Y here, the entropy pools used for many features of Linux
50337 + and grsecurity will be doubled in size. Since several grsecurity
50338 + features use additional randomness, it is recommended that you say Y
50339 + here. Saying Y here has a similar effect as modifying
50340 + /proc/sys/kernel/random/poolsize.
50341 +
50342 +config GRKERNSEC_BLACKHOLE
50343 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50344 + depends on NET
50345 + help
50346 + If you say Y here, neither TCP resets nor ICMP
50347 + destination-unreachable packets will be sent in response to packets
50348 + sent to ports for which no associated listening process exists.
50349 + This feature supports both IPV4 and IPV6 and exempts the
50350 + loopback interface from blackholing. Enabling this feature
50351 + makes a host more resilient to DoS attacks and reduces network
50352 + visibility against scanners.
50353 +
50354 + The blackhole feature as-implemented is equivalent to the FreeBSD
50355 + blackhole feature, as it prevents RST responses to all packets, not
50356 + just SYNs. Under most application behavior this causes no
50357 + problems, but applications (like haproxy) may not close certain
50358 + connections in a way that cleanly terminates them on the remote
50359 + end, leaving the remote host in LAST_ACK state. Because of this
50360 + side-effect and to prevent intentional LAST_ACK DoSes, this
50361 + feature also adds automatic mitigation against such attacks.
50362 + The mitigation drastically reduces the amount of time a socket
50363 + can spend in LAST_ACK state. If you're using haproxy and not
50364 + all servers it connects to have this option enabled, consider
50365 + disabling this feature on the haproxy host.
50366 +
50367 + If the sysctl option is enabled, two sysctl options with names
50368 + "ip_blackhole" and "lastack_retries" will be created.
50369 + While "ip_blackhole" takes the standard zero/non-zero on/off
50370 + toggle, "lastack_retries" uses the same kinds of values as
50371 + "tcp_retries1" and "tcp_retries2". The default value of 4
50372 + prevents a socket from lasting more than 45 seconds in LAST_ACK
50373 + state.
50374 +
50375 +config GRKERNSEC_SOCKET
50376 + bool "Socket restrictions"
50377 + depends on NET
50378 + help
50379 + If you say Y here, you will be able to choose from several options.
50380 + If you assign a GID on your system and add it to the supplementary
50381 + groups of users you want to restrict socket access to, this patch
50382 + will perform up to three things, based on the option(s) you choose.
50383 +
50384 +config GRKERNSEC_SOCKET_ALL
50385 + bool "Deny any sockets to group"
50386 + depends on GRKERNSEC_SOCKET
50387 + help
50388 + If you say Y here, you will be able to choose a GID of whose users will
50389 + be unable to connect to other hosts from your machine or run server
50390 + applications from your machine. If the sysctl option is enabled, a
50391 + sysctl option with name "socket_all" is created.
50392 +
50393 +config GRKERNSEC_SOCKET_ALL_GID
50394 + int "GID to deny all sockets for"
50395 + depends on GRKERNSEC_SOCKET_ALL
50396 + default 1004
50397 + help
50398 + Here you can choose the GID to disable socket access for. Remember to
50399 + add the users you want socket access disabled for to the GID
50400 + specified here. If the sysctl option is enabled, a sysctl option
50401 + with name "socket_all_gid" is created.
50402 +
50403 +config GRKERNSEC_SOCKET_CLIENT
50404 + bool "Deny client sockets to group"
50405 + depends on GRKERNSEC_SOCKET
50406 + help
50407 + If you say Y here, you will be able to choose a GID of whose users will
50408 + be unable to connect to other hosts from your machine, but will be
50409 + able to run servers. If this option is enabled, all users in the group
50410 + you specify will have to use passive mode when initiating ftp transfers
50411 + from the shell on your machine. If the sysctl option is enabled, a
50412 + sysctl option with name "socket_client" is created.
50413 +
50414 +config GRKERNSEC_SOCKET_CLIENT_GID
50415 + int "GID to deny client sockets for"
50416 + depends on GRKERNSEC_SOCKET_CLIENT
50417 + default 1003
50418 + help
50419 + Here you can choose the GID to disable client socket access for.
50420 + Remember to add the users you want client socket access disabled for to
50421 + the GID specified here. If the sysctl option is enabled, a sysctl
50422 + option with name "socket_client_gid" is created.
50423 +
50424 +config GRKERNSEC_SOCKET_SERVER
50425 + bool "Deny server sockets to group"
50426 + depends on GRKERNSEC_SOCKET
50427 + help
50428 + If you say Y here, you will be able to choose a GID of whose users will
50429 + be unable to run server applications from your machine. If the sysctl
50430 + option is enabled, a sysctl option with name "socket_server" is created.
50431 +
50432 +config GRKERNSEC_SOCKET_SERVER_GID
50433 + int "GID to deny server sockets for"
50434 + depends on GRKERNSEC_SOCKET_SERVER
50435 + default 1002
50436 + help
50437 + Here you can choose the GID to disable server socket access for.
50438 + Remember to add the users you want server socket access disabled for to
50439 + the GID specified here. If the sysctl option is enabled, a sysctl
50440 + option with name "socket_server_gid" is created.
50441 +
50442 +endmenu
50443 +menu "Sysctl support"
50444 +depends on GRKERNSEC && SYSCTL
50445 +
50446 +config GRKERNSEC_SYSCTL
50447 + bool "Sysctl support"
50448 + help
50449 + If you say Y here, you will be able to change the options that
50450 + grsecurity runs with at bootup, without having to recompile your
50451 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50452 + to enable (1) or disable (0) various features. All the sysctl entries
50453 + are mutable until the "grsec_lock" entry is set to a non-zero value.
50454 + All features enabled in the kernel configuration are disabled at boot
50455 + if you do not say Y to the "Turn on features by default" option.
50456 + All options should be set at startup, and the grsec_lock entry should
50457 + be set to a non-zero value after all the options are set.
50458 + *THIS IS EXTREMELY IMPORTANT*
50459 +
50460 +config GRKERNSEC_SYSCTL_DISTRO
50461 + bool "Extra sysctl support for distro makers (READ HELP)"
50462 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50463 + help
50464 + If you say Y here, additional sysctl options will be created
50465 + for features that affect processes running as root. Therefore,
50466 + it is critical when using this option that the grsec_lock entry be
50467 + enabled after boot. Only distros with prebuilt kernel packages
50468 + with this option enabled that can ensure grsec_lock is enabled
50469 + after boot should use this option.
50470 + *Failure to set grsec_lock after boot makes all grsec features
50471 + this option covers useless*
50472 +
50473 + Currently this option creates the following sysctl entries:
50474 + "Disable Privileged I/O": "disable_priv_io"
50475 +
50476 +config GRKERNSEC_SYSCTL_ON
50477 + bool "Turn on features by default"
50478 + depends on GRKERNSEC_SYSCTL
50479 + help
50480 + If you say Y here, instead of having all features enabled in the
50481 + kernel configuration disabled at boot time, the features will be
50482 + enabled at boot time. It is recommended you say Y here unless
50483 + there is some reason you would want all sysctl-tunable features to
50484 + be disabled by default. As mentioned elsewhere, it is important
50485 + to enable the grsec_lock entry once you have finished modifying
50486 + the sysctl entries.
50487 +
50488 +endmenu
50489 +menu "Logging Options"
50490 +depends on GRKERNSEC
50491 +
50492 +config GRKERNSEC_FLOODTIME
50493 + int "Seconds in between log messages (minimum)"
50494 + default 10
50495 + help
50496 + This option allows you to enforce the number of seconds between
50497 + grsecurity log messages. The default should be suitable for most
50498 + people, however, if you choose to change it, choose a value small enough
50499 + to allow informative logs to be produced, but large enough to
50500 + prevent flooding.
50501 +
50502 +config GRKERNSEC_FLOODBURST
50503 + int "Number of messages in a burst (maximum)"
50504 + default 6
50505 + help
50506 + This option allows you to choose the maximum number of messages allowed
50507 + within the flood time interval you chose in a separate option. The
50508 + default should be suitable for most people, however if you find that
50509 + many of your logs are being interpreted as flooding, you may want to
50510 + raise this value.
50511 +
50512 +endmenu
50513 +
50514 +endmenu
50515 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
50516 new file mode 100644
50517 index 0000000..be9ae3a
50518 --- /dev/null
50519 +++ b/grsecurity/Makefile
50520 @@ -0,0 +1,36 @@
50521 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50522 +# during 2001-2009 it has been completely redesigned by Brad Spengler
50523 +# into an RBAC system
50524 +#
50525 +# All code in this directory and various hooks inserted throughout the kernel
50526 +# are copyright Brad Spengler - Open Source Security, Inc., and released
50527 +# under the GPL v2 or higher
50528 +
50529 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50530 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
50531 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
50532 +
50533 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50534 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50535 + gracl_learn.o grsec_log.o
50536 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
50537 +
50538 +ifdef CONFIG_NET
50539 +obj-y += grsec_sock.o
50540 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50541 +endif
50542 +
50543 +ifndef CONFIG_GRKERNSEC
50544 +obj-y += grsec_disabled.o
50545 +endif
50546 +
50547 +ifdef CONFIG_GRKERNSEC_HIDESYM
50548 +extra-y := grsec_hidesym.o
50549 +$(obj)/grsec_hidesym.o:
50550 + @-chmod -f 500 /boot
50551 + @-chmod -f 500 /lib/modules
50552 + @-chmod -f 500 /lib64/modules
50553 + @-chmod -f 500 /lib32/modules
50554 + @-chmod -f 700 .
50555 + @echo ' grsec: protected kernel image paths'
50556 +endif
50557 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
50558 new file mode 100644
50559 index 0000000..09258e0
50560 --- /dev/null
50561 +++ b/grsecurity/gracl.c
50562 @@ -0,0 +1,4156 @@
50563 +#include <linux/kernel.h>
50564 +#include <linux/module.h>
50565 +#include <linux/sched.h>
50566 +#include <linux/mm.h>
50567 +#include <linux/file.h>
50568 +#include <linux/fs.h>
50569 +#include <linux/namei.h>
50570 +#include <linux/mount.h>
50571 +#include <linux/tty.h>
50572 +#include <linux/proc_fs.h>
50573 +#include <linux/lglock.h>
50574 +#include <linux/slab.h>
50575 +#include <linux/vmalloc.h>
50576 +#include <linux/types.h>
50577 +#include <linux/sysctl.h>
50578 +#include <linux/netdevice.h>
50579 +#include <linux/ptrace.h>
50580 +#include <linux/gracl.h>
50581 +#include <linux/gralloc.h>
50582 +#include <linux/grsecurity.h>
50583 +#include <linux/grinternal.h>
50584 +#include <linux/pid_namespace.h>
50585 +#include <linux/fdtable.h>
50586 +#include <linux/percpu.h>
50587 +
50588 +#include <asm/uaccess.h>
50589 +#include <asm/errno.h>
50590 +#include <asm/mman.h>
50591 +
50592 +static struct acl_role_db acl_role_set;
50593 +static struct name_db name_set;
50594 +static struct inodev_db inodev_set;
50595 +
50596 +/* for keeping track of userspace pointers used for subjects, so we
50597 + can share references in the kernel as well
50598 +*/
50599 +
50600 +static struct path real_root;
50601 +
50602 +static struct acl_subj_map_db subj_map_set;
50603 +
50604 +static struct acl_role_label *default_role;
50605 +
50606 +static struct acl_role_label *role_list;
50607 +
50608 +static u16 acl_sp_role_value;
50609 +
50610 +extern char *gr_shared_page[4];
50611 +static DEFINE_MUTEX(gr_dev_mutex);
50612 +DEFINE_RWLOCK(gr_inode_lock);
50613 +
50614 +struct gr_arg *gr_usermode;
50615 +
50616 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
50617 +
50618 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
50619 +extern void gr_clear_learn_entries(void);
50620 +
50621 +#ifdef CONFIG_GRKERNSEC_RESLOG
50622 +extern void gr_log_resource(const struct task_struct *task,
50623 + const int res, const unsigned long wanted, const int gt);
50624 +#endif
50625 +
50626 +unsigned char *gr_system_salt;
50627 +unsigned char *gr_system_sum;
50628 +
50629 +static struct sprole_pw **acl_special_roles = NULL;
50630 +static __u16 num_sprole_pws = 0;
50631 +
50632 +static struct acl_role_label *kernel_role = NULL;
50633 +
50634 +static unsigned int gr_auth_attempts = 0;
50635 +static unsigned long gr_auth_expires = 0UL;
50636 +
50637 +#ifdef CONFIG_NET
50638 +extern struct vfsmount *sock_mnt;
50639 +#endif
50640 +
50641 +extern struct vfsmount *pipe_mnt;
50642 +extern struct vfsmount *shm_mnt;
50643 +#ifdef CONFIG_HUGETLBFS
50644 +extern struct vfsmount *hugetlbfs_vfsmount;
50645 +#endif
50646 +
50647 +static struct acl_object_label *fakefs_obj_rw;
50648 +static struct acl_object_label *fakefs_obj_rwx;
50649 +
50650 +extern int gr_init_uidset(void);
50651 +extern void gr_free_uidset(void);
50652 +extern void gr_remove_uid(uid_t uid);
50653 +extern int gr_find_uid(uid_t uid);
50654 +
50655 +DECLARE_BRLOCK(vfsmount_lock);
50656 +
50657 +__inline__ int
50658 +gr_acl_is_enabled(void)
50659 +{
50660 + return (gr_status & GR_READY);
50661 +}
50662 +
50663 +#ifdef CONFIG_BTRFS_FS
50664 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
50665 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
50666 +#endif
50667 +
50668 +static inline dev_t __get_dev(const struct dentry *dentry)
50669 +{
50670 +#ifdef CONFIG_BTRFS_FS
50671 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
50672 + return get_btrfs_dev_from_inode(dentry->d_inode);
50673 + else
50674 +#endif
50675 + return dentry->d_inode->i_sb->s_dev;
50676 +}
50677 +
50678 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50679 +{
50680 + return __get_dev(dentry);
50681 +}
50682 +
50683 +static char gr_task_roletype_to_char(struct task_struct *task)
50684 +{
50685 + switch (task->role->roletype &
50686 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
50687 + GR_ROLE_SPECIAL)) {
50688 + case GR_ROLE_DEFAULT:
50689 + return 'D';
50690 + case GR_ROLE_USER:
50691 + return 'U';
50692 + case GR_ROLE_GROUP:
50693 + return 'G';
50694 + case GR_ROLE_SPECIAL:
50695 + return 'S';
50696 + }
50697 +
50698 + return 'X';
50699 +}
50700 +
50701 +char gr_roletype_to_char(void)
50702 +{
50703 + return gr_task_roletype_to_char(current);
50704 +}
50705 +
50706 +__inline__ int
50707 +gr_acl_tpe_check(void)
50708 +{
50709 + if (unlikely(!(gr_status & GR_READY)))
50710 + return 0;
50711 + if (current->role->roletype & GR_ROLE_TPE)
50712 + return 1;
50713 + else
50714 + return 0;
50715 +}
50716 +
50717 +int
50718 +gr_handle_rawio(const struct inode *inode)
50719 +{
50720 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50721 + if (inode && S_ISBLK(inode->i_mode) &&
50722 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50723 + !capable(CAP_SYS_RAWIO))
50724 + return 1;
50725 +#endif
50726 + return 0;
50727 +}
50728 +
50729 +static int
50730 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
50731 +{
50732 + if (likely(lena != lenb))
50733 + return 0;
50734 +
50735 + return !memcmp(a, b, lena);
50736 +}
50737 +
50738 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
50739 +{
50740 + *buflen -= namelen;
50741 + if (*buflen < 0)
50742 + return -ENAMETOOLONG;
50743 + *buffer -= namelen;
50744 + memcpy(*buffer, str, namelen);
50745 + return 0;
50746 +}
50747 +
50748 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
50749 +{
50750 + return prepend(buffer, buflen, name->name, name->len);
50751 +}
50752 +
50753 +static int prepend_path(const struct path *path, struct path *root,
50754 + char **buffer, int *buflen)
50755 +{
50756 + struct dentry *dentry = path->dentry;
50757 + struct vfsmount *vfsmnt = path->mnt;
50758 + bool slash = false;
50759 + int error = 0;
50760 +
50761 + while (dentry != root->dentry || vfsmnt != root->mnt) {
50762 + struct dentry * parent;
50763 +
50764 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
50765 + /* Global root? */
50766 + if (vfsmnt->mnt_parent == vfsmnt) {
50767 + goto out;
50768 + }
50769 + dentry = vfsmnt->mnt_mountpoint;
50770 + vfsmnt = vfsmnt->mnt_parent;
50771 + continue;
50772 + }
50773 + parent = dentry->d_parent;
50774 + prefetch(parent);
50775 + spin_lock(&dentry->d_lock);
50776 + error = prepend_name(buffer, buflen, &dentry->d_name);
50777 + spin_unlock(&dentry->d_lock);
50778 + if (!error)
50779 + error = prepend(buffer, buflen, "/", 1);
50780 + if (error)
50781 + break;
50782 +
50783 + slash = true;
50784 + dentry = parent;
50785 + }
50786 +
50787 +out:
50788 + if (!error && !slash)
50789 + error = prepend(buffer, buflen, "/", 1);
50790 +
50791 + return error;
50792 +}
50793 +
50794 +/* this must be called with vfsmount_lock and rename_lock held */
50795 +
50796 +static char *__our_d_path(const struct path *path, struct path *root,
50797 + char *buf, int buflen)
50798 +{
50799 + char *res = buf + buflen;
50800 + int error;
50801 +
50802 + prepend(&res, &buflen, "\0", 1);
50803 + error = prepend_path(path, root, &res, &buflen);
50804 + if (error)
50805 + return ERR_PTR(error);
50806 +
50807 + return res;
50808 +}
50809 +
50810 +static char *
50811 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
50812 +{
50813 + char *retval;
50814 +
50815 + retval = __our_d_path(path, root, buf, buflen);
50816 + if (unlikely(IS_ERR(retval)))
50817 + retval = strcpy(buf, "<path too long>");
50818 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
50819 + retval[1] = '\0';
50820 +
50821 + return retval;
50822 +}
50823 +
50824 +static char *
50825 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50826 + char *buf, int buflen)
50827 +{
50828 + struct path path;
50829 + char *res;
50830 +
50831 + path.dentry = (struct dentry *)dentry;
50832 + path.mnt = (struct vfsmount *)vfsmnt;
50833 +
50834 + /* we can use real_root.dentry, real_root.mnt, because this is only called
50835 + by the RBAC system */
50836 + res = gen_full_path(&path, &real_root, buf, buflen);
50837 +
50838 + return res;
50839 +}
50840 +
50841 +static char *
50842 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50843 + char *buf, int buflen)
50844 +{
50845 + char *res;
50846 + struct path path;
50847 + struct path root;
50848 + struct task_struct *reaper = &init_task;
50849 +
50850 + path.dentry = (struct dentry *)dentry;
50851 + path.mnt = (struct vfsmount *)vfsmnt;
50852 +
50853 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
50854 + get_fs_root(reaper->fs, &root);
50855 +
50856 + write_seqlock(&rename_lock);
50857 + br_read_lock(vfsmount_lock);
50858 + res = gen_full_path(&path, &root, buf, buflen);
50859 + br_read_unlock(vfsmount_lock);
50860 + write_sequnlock(&rename_lock);
50861 +
50862 + path_put(&root);
50863 + return res;
50864 +}
50865 +
50866 +static char *
50867 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50868 +{
50869 + char *ret;
50870 + write_seqlock(&rename_lock);
50871 + br_read_lock(vfsmount_lock);
50872 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50873 + PAGE_SIZE);
50874 + br_read_unlock(vfsmount_lock);
50875 + write_sequnlock(&rename_lock);
50876 + return ret;
50877 +}
50878 +
50879 +static char *
50880 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50881 +{
50882 + char *ret;
50883 + char *buf;
50884 + int buflen;
50885 +
50886 + write_seqlock(&rename_lock);
50887 + br_read_lock(vfsmount_lock);
50888 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50889 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
50890 + buflen = (int)(ret - buf);
50891 + if (buflen >= 5)
50892 + prepend(&ret, &buflen, "/proc", 5);
50893 + else
50894 + ret = strcpy(buf, "<path too long>");
50895 + br_read_unlock(vfsmount_lock);
50896 + write_sequnlock(&rename_lock);
50897 + return ret;
50898 +}
50899 +
50900 +char *
50901 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
50902 +{
50903 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50904 + PAGE_SIZE);
50905 +}
50906 +
50907 +char *
50908 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
50909 +{
50910 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50911 + PAGE_SIZE);
50912 +}
50913 +
50914 +char *
50915 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
50916 +{
50917 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
50918 + PAGE_SIZE);
50919 +}
50920 +
50921 +char *
50922 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
50923 +{
50924 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
50925 + PAGE_SIZE);
50926 +}
50927 +
50928 +char *
50929 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
50930 +{
50931 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
50932 + PAGE_SIZE);
50933 +}
50934 +
50935 +__inline__ __u32
50936 +to_gr_audit(const __u32 reqmode)
50937 +{
50938 + /* masks off auditable permission flags, then shifts them to create
50939 + auditing flags, and adds the special case of append auditing if
50940 + we're requesting write */
50941 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
50942 +}
50943 +
50944 +struct acl_subject_label *
50945 +lookup_subject_map(const struct acl_subject_label *userp)
50946 +{
50947 + unsigned int index = shash(userp, subj_map_set.s_size);
50948 + struct subject_map *match;
50949 +
50950 + match = subj_map_set.s_hash[index];
50951 +
50952 + while (match && match->user != userp)
50953 + match = match->next;
50954 +
50955 + if (match != NULL)
50956 + return match->kernel;
50957 + else
50958 + return NULL;
50959 +}
50960 +
50961 +static void
50962 +insert_subj_map_entry(struct subject_map *subjmap)
50963 +{
50964 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
50965 + struct subject_map **curr;
50966 +
50967 + subjmap->prev = NULL;
50968 +
50969 + curr = &subj_map_set.s_hash[index];
50970 + if (*curr != NULL)
50971 + (*curr)->prev = subjmap;
50972 +
50973 + subjmap->next = *curr;
50974 + *curr = subjmap;
50975 +
50976 + return;
50977 +}
50978 +
50979 +static struct acl_role_label *
50980 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
50981 + const gid_t gid)
50982 +{
50983 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
50984 + struct acl_role_label *match;
50985 + struct role_allowed_ip *ipp;
50986 + unsigned int x;
50987 + u32 curr_ip = task->signal->curr_ip;
50988 +
50989 + task->signal->saved_ip = curr_ip;
50990 +
50991 + match = acl_role_set.r_hash[index];
50992 +
50993 + while (match) {
50994 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
50995 + for (x = 0; x < match->domain_child_num; x++) {
50996 + if (match->domain_children[x] == uid)
50997 + goto found;
50998 + }
50999 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
51000 + break;
51001 + match = match->next;
51002 + }
51003 +found:
51004 + if (match == NULL) {
51005 + try_group:
51006 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
51007 + match = acl_role_set.r_hash[index];
51008 +
51009 + while (match) {
51010 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
51011 + for (x = 0; x < match->domain_child_num; x++) {
51012 + if (match->domain_children[x] == gid)
51013 + goto found2;
51014 + }
51015 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
51016 + break;
51017 + match = match->next;
51018 + }
51019 +found2:
51020 + if (match == NULL)
51021 + match = default_role;
51022 + if (match->allowed_ips == NULL)
51023 + return match;
51024 + else {
51025 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51026 + if (likely
51027 + ((ntohl(curr_ip) & ipp->netmask) ==
51028 + (ntohl(ipp->addr) & ipp->netmask)))
51029 + return match;
51030 + }
51031 + match = default_role;
51032 + }
51033 + } else if (match->allowed_ips == NULL) {
51034 + return match;
51035 + } else {
51036 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51037 + if (likely
51038 + ((ntohl(curr_ip) & ipp->netmask) ==
51039 + (ntohl(ipp->addr) & ipp->netmask)))
51040 + return match;
51041 + }
51042 + goto try_group;
51043 + }
51044 +
51045 + return match;
51046 +}
51047 +
51048 +struct acl_subject_label *
51049 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
51050 + const struct acl_role_label *role)
51051 +{
51052 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
51053 + struct acl_subject_label *match;
51054 +
51055 + match = role->subj_hash[index];
51056 +
51057 + while (match && (match->inode != ino || match->device != dev ||
51058 + (match->mode & GR_DELETED))) {
51059 + match = match->next;
51060 + }
51061 +
51062 + if (match && !(match->mode & GR_DELETED))
51063 + return match;
51064 + else
51065 + return NULL;
51066 +}
51067 +
51068 +struct acl_subject_label *
51069 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
51070 + const struct acl_role_label *role)
51071 +{
51072 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
51073 + struct acl_subject_label *match;
51074 +
51075 + match = role->subj_hash[index];
51076 +
51077 + while (match && (match->inode != ino || match->device != dev ||
51078 + !(match->mode & GR_DELETED))) {
51079 + match = match->next;
51080 + }
51081 +
51082 + if (match && (match->mode & GR_DELETED))
51083 + return match;
51084 + else
51085 + return NULL;
51086 +}
51087 +
51088 +static struct acl_object_label *
51089 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
51090 + const struct acl_subject_label *subj)
51091 +{
51092 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51093 + struct acl_object_label *match;
51094 +
51095 + match = subj->obj_hash[index];
51096 +
51097 + while (match && (match->inode != ino || match->device != dev ||
51098 + (match->mode & GR_DELETED))) {
51099 + match = match->next;
51100 + }
51101 +
51102 + if (match && !(match->mode & GR_DELETED))
51103 + return match;
51104 + else
51105 + return NULL;
51106 +}
51107 +
51108 +static struct acl_object_label *
51109 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
51110 + const struct acl_subject_label *subj)
51111 +{
51112 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51113 + struct acl_object_label *match;
51114 +
51115 + match = subj->obj_hash[index];
51116 +
51117 + while (match && (match->inode != ino || match->device != dev ||
51118 + !(match->mode & GR_DELETED))) {
51119 + match = match->next;
51120 + }
51121 +
51122 + if (match && (match->mode & GR_DELETED))
51123 + return match;
51124 +
51125 + match = subj->obj_hash[index];
51126 +
51127 + while (match && (match->inode != ino || match->device != dev ||
51128 + (match->mode & GR_DELETED))) {
51129 + match = match->next;
51130 + }
51131 +
51132 + if (match && !(match->mode & GR_DELETED))
51133 + return match;
51134 + else
51135 + return NULL;
51136 +}
51137 +
51138 +static struct name_entry *
51139 +lookup_name_entry(const char *name)
51140 +{
51141 + unsigned int len = strlen(name);
51142 + unsigned int key = full_name_hash(name, len);
51143 + unsigned int index = key % name_set.n_size;
51144 + struct name_entry *match;
51145 +
51146 + match = name_set.n_hash[index];
51147 +
51148 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
51149 + match = match->next;
51150 +
51151 + return match;
51152 +}
51153 +
51154 +static struct name_entry *
51155 +lookup_name_entry_create(const char *name)
51156 +{
51157 + unsigned int len = strlen(name);
51158 + unsigned int key = full_name_hash(name, len);
51159 + unsigned int index = key % name_set.n_size;
51160 + struct name_entry *match;
51161 +
51162 + match = name_set.n_hash[index];
51163 +
51164 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51165 + !match->deleted))
51166 + match = match->next;
51167 +
51168 + if (match && match->deleted)
51169 + return match;
51170 +
51171 + match = name_set.n_hash[index];
51172 +
51173 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51174 + match->deleted))
51175 + match = match->next;
51176 +
51177 + if (match && !match->deleted)
51178 + return match;
51179 + else
51180 + return NULL;
51181 +}
51182 +
51183 +static struct inodev_entry *
51184 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
51185 +{
51186 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
51187 + struct inodev_entry *match;
51188 +
51189 + match = inodev_set.i_hash[index];
51190 +
51191 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
51192 + match = match->next;
51193 +
51194 + return match;
51195 +}
51196 +
51197 +static void
51198 +insert_inodev_entry(struct inodev_entry *entry)
51199 +{
51200 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
51201 + inodev_set.i_size);
51202 + struct inodev_entry **curr;
51203 +
51204 + entry->prev = NULL;
51205 +
51206 + curr = &inodev_set.i_hash[index];
51207 + if (*curr != NULL)
51208 + (*curr)->prev = entry;
51209 +
51210 + entry->next = *curr;
51211 + *curr = entry;
51212 +
51213 + return;
51214 +}
51215 +
51216 +static void
51217 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
51218 +{
51219 + unsigned int index =
51220 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
51221 + struct acl_role_label **curr;
51222 + struct acl_role_label *tmp;
51223 +
51224 + curr = &acl_role_set.r_hash[index];
51225 +
51226 + /* if role was already inserted due to domains and already has
51227 + a role in the same bucket as it attached, then we need to
51228 + combine these two buckets
51229 + */
51230 + if (role->next) {
51231 + tmp = role->next;
51232 + while (tmp->next)
51233 + tmp = tmp->next;
51234 + tmp->next = *curr;
51235 + } else
51236 + role->next = *curr;
51237 + *curr = role;
51238 +
51239 + return;
51240 +}
51241 +
51242 +static void
51243 +insert_acl_role_label(struct acl_role_label *role)
51244 +{
51245 + int i;
51246 +
51247 + if (role_list == NULL) {
51248 + role_list = role;
51249 + role->prev = NULL;
51250 + } else {
51251 + role->prev = role_list;
51252 + role_list = role;
51253 + }
51254 +
51255 + /* used for hash chains */
51256 + role->next = NULL;
51257 +
51258 + if (role->roletype & GR_ROLE_DOMAIN) {
51259 + for (i = 0; i < role->domain_child_num; i++)
51260 + __insert_acl_role_label(role, role->domain_children[i]);
51261 + } else
51262 + __insert_acl_role_label(role, role->uidgid);
51263 +}
51264 +
51265 +static int
51266 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
51267 +{
51268 + struct name_entry **curr, *nentry;
51269 + struct inodev_entry *ientry;
51270 + unsigned int len = strlen(name);
51271 + unsigned int key = full_name_hash(name, len);
51272 + unsigned int index = key % name_set.n_size;
51273 +
51274 + curr = &name_set.n_hash[index];
51275 +
51276 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
51277 + curr = &((*curr)->next);
51278 +
51279 + if (*curr != NULL)
51280 + return 1;
51281 +
51282 + nentry = acl_alloc(sizeof (struct name_entry));
51283 + if (nentry == NULL)
51284 + return 0;
51285 + ientry = acl_alloc(sizeof (struct inodev_entry));
51286 + if (ientry == NULL)
51287 + return 0;
51288 + ientry->nentry = nentry;
51289 +
51290 + nentry->key = key;
51291 + nentry->name = name;
51292 + nentry->inode = inode;
51293 + nentry->device = device;
51294 + nentry->len = len;
51295 + nentry->deleted = deleted;
51296 +
51297 + nentry->prev = NULL;
51298 + curr = &name_set.n_hash[index];
51299 + if (*curr != NULL)
51300 + (*curr)->prev = nentry;
51301 + nentry->next = *curr;
51302 + *curr = nentry;
51303 +
51304 + /* insert us into the table searchable by inode/dev */
51305 + insert_inodev_entry(ientry);
51306 +
51307 + return 1;
51308 +}
51309 +
51310 +static void
51311 +insert_acl_obj_label(struct acl_object_label *obj,
51312 + struct acl_subject_label *subj)
51313 +{
51314 + unsigned int index =
51315 + fhash(obj->inode, obj->device, subj->obj_hash_size);
51316 + struct acl_object_label **curr;
51317 +
51318 +
51319 + obj->prev = NULL;
51320 +
51321 + curr = &subj->obj_hash[index];
51322 + if (*curr != NULL)
51323 + (*curr)->prev = obj;
51324 +
51325 + obj->next = *curr;
51326 + *curr = obj;
51327 +
51328 + return;
51329 +}
51330 +
51331 +static void
51332 +insert_acl_subj_label(struct acl_subject_label *obj,
51333 + struct acl_role_label *role)
51334 +{
51335 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
51336 + struct acl_subject_label **curr;
51337 +
51338 + obj->prev = NULL;
51339 +
51340 + curr = &role->subj_hash[index];
51341 + if (*curr != NULL)
51342 + (*curr)->prev = obj;
51343 +
51344 + obj->next = *curr;
51345 + *curr = obj;
51346 +
51347 + return;
51348 +}
51349 +
51350 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
51351 +
51352 +static void *
51353 +create_table(__u32 * len, int elementsize)
51354 +{
51355 + unsigned int table_sizes[] = {
51356 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
51357 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
51358 + 4194301, 8388593, 16777213, 33554393, 67108859
51359 + };
51360 + void *newtable = NULL;
51361 + unsigned int pwr = 0;
51362 +
51363 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
51364 + table_sizes[pwr] <= *len)
51365 + pwr++;
51366 +
51367 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
51368 + return newtable;
51369 +
51370 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
51371 + newtable =
51372 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
51373 + else
51374 + newtable = vmalloc(table_sizes[pwr] * elementsize);
51375 +
51376 + *len = table_sizes[pwr];
51377 +
51378 + return newtable;
51379 +}
51380 +
51381 +static int
51382 +init_variables(const struct gr_arg *arg)
51383 +{
51384 + struct task_struct *reaper = &init_task;
51385 + unsigned int stacksize;
51386 +
51387 + subj_map_set.s_size = arg->role_db.num_subjects;
51388 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
51389 + name_set.n_size = arg->role_db.num_objects;
51390 + inodev_set.i_size = arg->role_db.num_objects;
51391 +
51392 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
51393 + !name_set.n_size || !inodev_set.i_size)
51394 + return 1;
51395 +
51396 + if (!gr_init_uidset())
51397 + return 1;
51398 +
51399 + /* set up the stack that holds allocation info */
51400 +
51401 + stacksize = arg->role_db.num_pointers + 5;
51402 +
51403 + if (!acl_alloc_stack_init(stacksize))
51404 + return 1;
51405 +
51406 + /* grab reference for the real root dentry and vfsmount */
51407 + get_fs_root(reaper->fs, &real_root);
51408 +
51409 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51410 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
51411 +#endif
51412 +
51413 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
51414 + if (fakefs_obj_rw == NULL)
51415 + return 1;
51416 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
51417 +
51418 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
51419 + if (fakefs_obj_rwx == NULL)
51420 + return 1;
51421 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
51422 +
51423 + subj_map_set.s_hash =
51424 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
51425 + acl_role_set.r_hash =
51426 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
51427 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
51428 + inodev_set.i_hash =
51429 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
51430 +
51431 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
51432 + !name_set.n_hash || !inodev_set.i_hash)
51433 + return 1;
51434 +
51435 + memset(subj_map_set.s_hash, 0,
51436 + sizeof(struct subject_map *) * subj_map_set.s_size);
51437 + memset(acl_role_set.r_hash, 0,
51438 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
51439 + memset(name_set.n_hash, 0,
51440 + sizeof (struct name_entry *) * name_set.n_size);
51441 + memset(inodev_set.i_hash, 0,
51442 + sizeof (struct inodev_entry *) * inodev_set.i_size);
51443 +
51444 + return 0;
51445 +}
51446 +
51447 +/* free information not needed after startup
51448 + currently contains user->kernel pointer mappings for subjects
51449 +*/
51450 +
51451 +static void
51452 +free_init_variables(void)
51453 +{
51454 + __u32 i;
51455 +
51456 + if (subj_map_set.s_hash) {
51457 + for (i = 0; i < subj_map_set.s_size; i++) {
51458 + if (subj_map_set.s_hash[i]) {
51459 + kfree(subj_map_set.s_hash[i]);
51460 + subj_map_set.s_hash[i] = NULL;
51461 + }
51462 + }
51463 +
51464 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
51465 + PAGE_SIZE)
51466 + kfree(subj_map_set.s_hash);
51467 + else
51468 + vfree(subj_map_set.s_hash);
51469 + }
51470 +
51471 + return;
51472 +}
51473 +
51474 +static void
51475 +free_variables(void)
51476 +{
51477 + struct acl_subject_label *s;
51478 + struct acl_role_label *r;
51479 + struct task_struct *task, *task2;
51480 + unsigned int x;
51481 +
51482 + gr_clear_learn_entries();
51483 +
51484 + read_lock(&tasklist_lock);
51485 + do_each_thread(task2, task) {
51486 + task->acl_sp_role = 0;
51487 + task->acl_role_id = 0;
51488 + task->acl = NULL;
51489 + task->role = NULL;
51490 + } while_each_thread(task2, task);
51491 + read_unlock(&tasklist_lock);
51492 +
51493 + /* release the reference to the real root dentry and vfsmount */
51494 + path_put(&real_root);
51495 +
51496 + /* free all object hash tables */
51497 +
51498 + FOR_EACH_ROLE_START(r)
51499 + if (r->subj_hash == NULL)
51500 + goto next_role;
51501 + FOR_EACH_SUBJECT_START(r, s, x)
51502 + if (s->obj_hash == NULL)
51503 + break;
51504 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51505 + kfree(s->obj_hash);
51506 + else
51507 + vfree(s->obj_hash);
51508 + FOR_EACH_SUBJECT_END(s, x)
51509 + FOR_EACH_NESTED_SUBJECT_START(r, s)
51510 + if (s->obj_hash == NULL)
51511 + break;
51512 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51513 + kfree(s->obj_hash);
51514 + else
51515 + vfree(s->obj_hash);
51516 + FOR_EACH_NESTED_SUBJECT_END(s)
51517 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
51518 + kfree(r->subj_hash);
51519 + else
51520 + vfree(r->subj_hash);
51521 + r->subj_hash = NULL;
51522 +next_role:
51523 + FOR_EACH_ROLE_END(r)
51524 +
51525 + acl_free_all();
51526 +
51527 + if (acl_role_set.r_hash) {
51528 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
51529 + PAGE_SIZE)
51530 + kfree(acl_role_set.r_hash);
51531 + else
51532 + vfree(acl_role_set.r_hash);
51533 + }
51534 + if (name_set.n_hash) {
51535 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
51536 + PAGE_SIZE)
51537 + kfree(name_set.n_hash);
51538 + else
51539 + vfree(name_set.n_hash);
51540 + }
51541 +
51542 + if (inodev_set.i_hash) {
51543 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
51544 + PAGE_SIZE)
51545 + kfree(inodev_set.i_hash);
51546 + else
51547 + vfree(inodev_set.i_hash);
51548 + }
51549 +
51550 + gr_free_uidset();
51551 +
51552 + memset(&name_set, 0, sizeof (struct name_db));
51553 + memset(&inodev_set, 0, sizeof (struct inodev_db));
51554 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
51555 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
51556 +
51557 + default_role = NULL;
51558 + role_list = NULL;
51559 +
51560 + return;
51561 +}
51562 +
51563 +static __u32
51564 +count_user_objs(struct acl_object_label *userp)
51565 +{
51566 + struct acl_object_label o_tmp;
51567 + __u32 num = 0;
51568 +
51569 + while (userp) {
51570 + if (copy_from_user(&o_tmp, userp,
51571 + sizeof (struct acl_object_label)))
51572 + break;
51573 +
51574 + userp = o_tmp.prev;
51575 + num++;
51576 + }
51577 +
51578 + return num;
51579 +}
51580 +
51581 +static struct acl_subject_label *
51582 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
51583 +
51584 +static int
51585 +copy_user_glob(struct acl_object_label *obj)
51586 +{
51587 + struct acl_object_label *g_tmp, **guser;
51588 + unsigned int len;
51589 + char *tmp;
51590 +
51591 + if (obj->globbed == NULL)
51592 + return 0;
51593 +
51594 + guser = &obj->globbed;
51595 + while (*guser) {
51596 + g_tmp = (struct acl_object_label *)
51597 + acl_alloc(sizeof (struct acl_object_label));
51598 + if (g_tmp == NULL)
51599 + return -ENOMEM;
51600 +
51601 + if (copy_from_user(g_tmp, *guser,
51602 + sizeof (struct acl_object_label)))
51603 + return -EFAULT;
51604 +
51605 + len = strnlen_user(g_tmp->filename, PATH_MAX);
51606 +
51607 + if (!len || len >= PATH_MAX)
51608 + return -EINVAL;
51609 +
51610 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51611 + return -ENOMEM;
51612 +
51613 + if (copy_from_user(tmp, g_tmp->filename, len))
51614 + return -EFAULT;
51615 + tmp[len-1] = '\0';
51616 + g_tmp->filename = tmp;
51617 +
51618 + *guser = g_tmp;
51619 + guser = &(g_tmp->next);
51620 + }
51621 +
51622 + return 0;
51623 +}
51624 +
51625 +static int
51626 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
51627 + struct acl_role_label *role)
51628 +{
51629 + struct acl_object_label *o_tmp;
51630 + unsigned int len;
51631 + int ret;
51632 + char *tmp;
51633 +
51634 + while (userp) {
51635 + if ((o_tmp = (struct acl_object_label *)
51636 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
51637 + return -ENOMEM;
51638 +
51639 + if (copy_from_user(o_tmp, userp,
51640 + sizeof (struct acl_object_label)))
51641 + return -EFAULT;
51642 +
51643 + userp = o_tmp->prev;
51644 +
51645 + len = strnlen_user(o_tmp->filename, PATH_MAX);
51646 +
51647 + if (!len || len >= PATH_MAX)
51648 + return -EINVAL;
51649 +
51650 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51651 + return -ENOMEM;
51652 +
51653 + if (copy_from_user(tmp, o_tmp->filename, len))
51654 + return -EFAULT;
51655 + tmp[len-1] = '\0';
51656 + o_tmp->filename = tmp;
51657 +
51658 + insert_acl_obj_label(o_tmp, subj);
51659 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
51660 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
51661 + return -ENOMEM;
51662 +
51663 + ret = copy_user_glob(o_tmp);
51664 + if (ret)
51665 + return ret;
51666 +
51667 + if (o_tmp->nested) {
51668 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
51669 + if (IS_ERR(o_tmp->nested))
51670 + return PTR_ERR(o_tmp->nested);
51671 +
51672 + /* insert into nested subject list */
51673 + o_tmp->nested->next = role->hash->first;
51674 + role->hash->first = o_tmp->nested;
51675 + }
51676 + }
51677 +
51678 + return 0;
51679 +}
51680 +
51681 +static __u32
51682 +count_user_subjs(struct acl_subject_label *userp)
51683 +{
51684 + struct acl_subject_label s_tmp;
51685 + __u32 num = 0;
51686 +
51687 + while (userp) {
51688 + if (copy_from_user(&s_tmp, userp,
51689 + sizeof (struct acl_subject_label)))
51690 + break;
51691 +
51692 + userp = s_tmp.prev;
51693 + /* do not count nested subjects against this count, since
51694 + they are not included in the hash table, but are
51695 + attached to objects. We have already counted
51696 + the subjects in userspace for the allocation
51697 + stack
51698 + */
51699 + if (!(s_tmp.mode & GR_NESTED))
51700 + num++;
51701 + }
51702 +
51703 + return num;
51704 +}
51705 +
51706 +static int
51707 +copy_user_allowedips(struct acl_role_label *rolep)
51708 +{
51709 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
51710 +
51711 + ruserip = rolep->allowed_ips;
51712 +
51713 + while (ruserip) {
51714 + rlast = rtmp;
51715 +
51716 + if ((rtmp = (struct role_allowed_ip *)
51717 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
51718 + return -ENOMEM;
51719 +
51720 + if (copy_from_user(rtmp, ruserip,
51721 + sizeof (struct role_allowed_ip)))
51722 + return -EFAULT;
51723 +
51724 + ruserip = rtmp->prev;
51725 +
51726 + if (!rlast) {
51727 + rtmp->prev = NULL;
51728 + rolep->allowed_ips = rtmp;
51729 + } else {
51730 + rlast->next = rtmp;
51731 + rtmp->prev = rlast;
51732 + }
51733 +
51734 + if (!ruserip)
51735 + rtmp->next = NULL;
51736 + }
51737 +
51738 + return 0;
51739 +}
51740 +
51741 +static int
51742 +copy_user_transitions(struct acl_role_label *rolep)
51743 +{
51744 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
51745 +
51746 + unsigned int len;
51747 + char *tmp;
51748 +
51749 + rusertp = rolep->transitions;
51750 +
51751 + while (rusertp) {
51752 + rlast = rtmp;
51753 +
51754 + if ((rtmp = (struct role_transition *)
51755 + acl_alloc(sizeof (struct role_transition))) == NULL)
51756 + return -ENOMEM;
51757 +
51758 + if (copy_from_user(rtmp, rusertp,
51759 + sizeof (struct role_transition)))
51760 + return -EFAULT;
51761 +
51762 + rusertp = rtmp->prev;
51763 +
51764 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
51765 +
51766 + if (!len || len >= GR_SPROLE_LEN)
51767 + return -EINVAL;
51768 +
51769 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51770 + return -ENOMEM;
51771 +
51772 + if (copy_from_user(tmp, rtmp->rolename, len))
51773 + return -EFAULT;
51774 + tmp[len-1] = '\0';
51775 + rtmp->rolename = tmp;
51776 +
51777 + if (!rlast) {
51778 + rtmp->prev = NULL;
51779 + rolep->transitions = rtmp;
51780 + } else {
51781 + rlast->next = rtmp;
51782 + rtmp->prev = rlast;
51783 + }
51784 +
51785 + if (!rusertp)
51786 + rtmp->next = NULL;
51787 + }
51788 +
51789 + return 0;
51790 +}
51791 +
51792 +static struct acl_subject_label *
51793 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
51794 +{
51795 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
51796 + unsigned int len;
51797 + char *tmp;
51798 + __u32 num_objs;
51799 + struct acl_ip_label **i_tmp, *i_utmp2;
51800 + struct gr_hash_struct ghash;
51801 + struct subject_map *subjmap;
51802 + unsigned int i_num;
51803 + int err;
51804 +
51805 + s_tmp = lookup_subject_map(userp);
51806 +
51807 + /* we've already copied this subject into the kernel, just return
51808 + the reference to it, and don't copy it over again
51809 + */
51810 + if (s_tmp)
51811 + return(s_tmp);
51812 +
51813 + if ((s_tmp = (struct acl_subject_label *)
51814 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
51815 + return ERR_PTR(-ENOMEM);
51816 +
51817 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
51818 + if (subjmap == NULL)
51819 + return ERR_PTR(-ENOMEM);
51820 +
51821 + subjmap->user = userp;
51822 + subjmap->kernel = s_tmp;
51823 + insert_subj_map_entry(subjmap);
51824 +
51825 + if (copy_from_user(s_tmp, userp,
51826 + sizeof (struct acl_subject_label)))
51827 + return ERR_PTR(-EFAULT);
51828 +
51829 + len = strnlen_user(s_tmp->filename, PATH_MAX);
51830 +
51831 + if (!len || len >= PATH_MAX)
51832 + return ERR_PTR(-EINVAL);
51833 +
51834 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51835 + return ERR_PTR(-ENOMEM);
51836 +
51837 + if (copy_from_user(tmp, s_tmp->filename, len))
51838 + return ERR_PTR(-EFAULT);
51839 + tmp[len-1] = '\0';
51840 + s_tmp->filename = tmp;
51841 +
51842 + if (!strcmp(s_tmp->filename, "/"))
51843 + role->root_label = s_tmp;
51844 +
51845 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
51846 + return ERR_PTR(-EFAULT);
51847 +
51848 + /* copy user and group transition tables */
51849 +
51850 + if (s_tmp->user_trans_num) {
51851 + uid_t *uidlist;
51852 +
51853 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
51854 + if (uidlist == NULL)
51855 + return ERR_PTR(-ENOMEM);
51856 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
51857 + return ERR_PTR(-EFAULT);
51858 +
51859 + s_tmp->user_transitions = uidlist;
51860 + }
51861 +
51862 + if (s_tmp->group_trans_num) {
51863 + gid_t *gidlist;
51864 +
51865 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
51866 + if (gidlist == NULL)
51867 + return ERR_PTR(-ENOMEM);
51868 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
51869 + return ERR_PTR(-EFAULT);
51870 +
51871 + s_tmp->group_transitions = gidlist;
51872 + }
51873 +
51874 + /* set up object hash table */
51875 + num_objs = count_user_objs(ghash.first);
51876 +
51877 + s_tmp->obj_hash_size = num_objs;
51878 + s_tmp->obj_hash =
51879 + (struct acl_object_label **)
51880 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
51881 +
51882 + if (!s_tmp->obj_hash)
51883 + return ERR_PTR(-ENOMEM);
51884 +
51885 + memset(s_tmp->obj_hash, 0,
51886 + s_tmp->obj_hash_size *
51887 + sizeof (struct acl_object_label *));
51888 +
51889 + /* add in objects */
51890 + err = copy_user_objs(ghash.first, s_tmp, role);
51891 +
51892 + if (err)
51893 + return ERR_PTR(err);
51894 +
51895 + /* set pointer for parent subject */
51896 + if (s_tmp->parent_subject) {
51897 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
51898 +
51899 + if (IS_ERR(s_tmp2))
51900 + return s_tmp2;
51901 +
51902 + s_tmp->parent_subject = s_tmp2;
51903 + }
51904 +
51905 + /* add in ip acls */
51906 +
51907 + if (!s_tmp->ip_num) {
51908 + s_tmp->ips = NULL;
51909 + goto insert;
51910 + }
51911 +
51912 + i_tmp =
51913 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
51914 + sizeof (struct acl_ip_label *));
51915 +
51916 + if (!i_tmp)
51917 + return ERR_PTR(-ENOMEM);
51918 +
51919 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
51920 + *(i_tmp + i_num) =
51921 + (struct acl_ip_label *)
51922 + acl_alloc(sizeof (struct acl_ip_label));
51923 + if (!*(i_tmp + i_num))
51924 + return ERR_PTR(-ENOMEM);
51925 +
51926 + if (copy_from_user
51927 + (&i_utmp2, s_tmp->ips + i_num,
51928 + sizeof (struct acl_ip_label *)))
51929 + return ERR_PTR(-EFAULT);
51930 +
51931 + if (copy_from_user
51932 + (*(i_tmp + i_num), i_utmp2,
51933 + sizeof (struct acl_ip_label)))
51934 + return ERR_PTR(-EFAULT);
51935 +
51936 + if ((*(i_tmp + i_num))->iface == NULL)
51937 + continue;
51938 +
51939 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
51940 + if (!len || len >= IFNAMSIZ)
51941 + return ERR_PTR(-EINVAL);
51942 + tmp = acl_alloc(len);
51943 + if (tmp == NULL)
51944 + return ERR_PTR(-ENOMEM);
51945 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
51946 + return ERR_PTR(-EFAULT);
51947 + (*(i_tmp + i_num))->iface = tmp;
51948 + }
51949 +
51950 + s_tmp->ips = i_tmp;
51951 +
51952 +insert:
51953 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
51954 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
51955 + return ERR_PTR(-ENOMEM);
51956 +
51957 + return s_tmp;
51958 +}
51959 +
51960 +static int
51961 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
51962 +{
51963 + struct acl_subject_label s_pre;
51964 + struct acl_subject_label * ret;
51965 + int err;
51966 +
51967 + while (userp) {
51968 + if (copy_from_user(&s_pre, userp,
51969 + sizeof (struct acl_subject_label)))
51970 + return -EFAULT;
51971 +
51972 + /* do not add nested subjects here, add
51973 + while parsing objects
51974 + */
51975 +
51976 + if (s_pre.mode & GR_NESTED) {
51977 + userp = s_pre.prev;
51978 + continue;
51979 + }
51980 +
51981 + ret = do_copy_user_subj(userp, role);
51982 +
51983 + err = PTR_ERR(ret);
51984 + if (IS_ERR(ret))
51985 + return err;
51986 +
51987 + insert_acl_subj_label(ret, role);
51988 +
51989 + userp = s_pre.prev;
51990 + }
51991 +
51992 + return 0;
51993 +}
51994 +
51995 +static int
51996 +copy_user_acl(struct gr_arg *arg)
51997 +{
51998 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
51999 + struct sprole_pw *sptmp;
52000 + struct gr_hash_struct *ghash;
52001 + uid_t *domainlist;
52002 + unsigned int r_num;
52003 + unsigned int len;
52004 + char *tmp;
52005 + int err = 0;
52006 + __u16 i;
52007 + __u32 num_subjs;
52008 +
52009 + /* we need a default and kernel role */
52010 + if (arg->role_db.num_roles < 2)
52011 + return -EINVAL;
52012 +
52013 + /* copy special role authentication info from userspace */
52014 +
52015 + num_sprole_pws = arg->num_sprole_pws;
52016 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
52017 +
52018 + if (!acl_special_roles) {
52019 + err = -ENOMEM;
52020 + goto cleanup;
52021 + }
52022 +
52023 + for (i = 0; i < num_sprole_pws; i++) {
52024 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
52025 + if (!sptmp) {
52026 + err = -ENOMEM;
52027 + goto cleanup;
52028 + }
52029 + if (copy_from_user(sptmp, arg->sprole_pws + i,
52030 + sizeof (struct sprole_pw))) {
52031 + err = -EFAULT;
52032 + goto cleanup;
52033 + }
52034 +
52035 + len =
52036 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
52037 +
52038 + if (!len || len >= GR_SPROLE_LEN) {
52039 + err = -EINVAL;
52040 + goto cleanup;
52041 + }
52042 +
52043 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
52044 + err = -ENOMEM;
52045 + goto cleanup;
52046 + }
52047 +
52048 + if (copy_from_user(tmp, sptmp->rolename, len)) {
52049 + err = -EFAULT;
52050 + goto cleanup;
52051 + }
52052 + tmp[len-1] = '\0';
52053 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52054 + printk(KERN_ALERT "Copying special role %s\n", tmp);
52055 +#endif
52056 + sptmp->rolename = tmp;
52057 + acl_special_roles[i] = sptmp;
52058 + }
52059 +
52060 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
52061 +
52062 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
52063 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
52064 +
52065 + if (!r_tmp) {
52066 + err = -ENOMEM;
52067 + goto cleanup;
52068 + }
52069 +
52070 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
52071 + sizeof (struct acl_role_label *))) {
52072 + err = -EFAULT;
52073 + goto cleanup;
52074 + }
52075 +
52076 + if (copy_from_user(r_tmp, r_utmp2,
52077 + sizeof (struct acl_role_label))) {
52078 + err = -EFAULT;
52079 + goto cleanup;
52080 + }
52081 +
52082 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
52083 +
52084 + if (!len || len >= PATH_MAX) {
52085 + err = -EINVAL;
52086 + goto cleanup;
52087 + }
52088 +
52089 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
52090 + err = -ENOMEM;
52091 + goto cleanup;
52092 + }
52093 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
52094 + err = -EFAULT;
52095 + goto cleanup;
52096 + }
52097 + tmp[len-1] = '\0';
52098 + r_tmp->rolename = tmp;
52099 +
52100 + if (!strcmp(r_tmp->rolename, "default")
52101 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
52102 + default_role = r_tmp;
52103 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
52104 + kernel_role = r_tmp;
52105 + }
52106 +
52107 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
52108 + err = -ENOMEM;
52109 + goto cleanup;
52110 + }
52111 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
52112 + err = -EFAULT;
52113 + goto cleanup;
52114 + }
52115 +
52116 + r_tmp->hash = ghash;
52117 +
52118 + num_subjs = count_user_subjs(r_tmp->hash->first);
52119 +
52120 + r_tmp->subj_hash_size = num_subjs;
52121 + r_tmp->subj_hash =
52122 + (struct acl_subject_label **)
52123 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
52124 +
52125 + if (!r_tmp->subj_hash) {
52126 + err = -ENOMEM;
52127 + goto cleanup;
52128 + }
52129 +
52130 + err = copy_user_allowedips(r_tmp);
52131 + if (err)
52132 + goto cleanup;
52133 +
52134 + /* copy domain info */
52135 + if (r_tmp->domain_children != NULL) {
52136 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
52137 + if (domainlist == NULL) {
52138 + err = -ENOMEM;
52139 + goto cleanup;
52140 + }
52141 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
52142 + err = -EFAULT;
52143 + goto cleanup;
52144 + }
52145 + r_tmp->domain_children = domainlist;
52146 + }
52147 +
52148 + err = copy_user_transitions(r_tmp);
52149 + if (err)
52150 + goto cleanup;
52151 +
52152 + memset(r_tmp->subj_hash, 0,
52153 + r_tmp->subj_hash_size *
52154 + sizeof (struct acl_subject_label *));
52155 +
52156 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
52157 +
52158 + if (err)
52159 + goto cleanup;
52160 +
52161 + /* set nested subject list to null */
52162 + r_tmp->hash->first = NULL;
52163 +
52164 + insert_acl_role_label(r_tmp);
52165 + }
52166 +
52167 + goto return_err;
52168 + cleanup:
52169 + free_variables();
52170 + return_err:
52171 + return err;
52172 +
52173 +}
52174 +
52175 +static int
52176 +gracl_init(struct gr_arg *args)
52177 +{
52178 + int error = 0;
52179 +
52180 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
52181 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
52182 +
52183 + if (init_variables(args)) {
52184 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
52185 + error = -ENOMEM;
52186 + free_variables();
52187 + goto out;
52188 + }
52189 +
52190 + error = copy_user_acl(args);
52191 + free_init_variables();
52192 + if (error) {
52193 + free_variables();
52194 + goto out;
52195 + }
52196 +
52197 + if ((error = gr_set_acls(0))) {
52198 + free_variables();
52199 + goto out;
52200 + }
52201 +
52202 + pax_open_kernel();
52203 + gr_status |= GR_READY;
52204 + pax_close_kernel();
52205 +
52206 + out:
52207 + return error;
52208 +}
52209 +
52210 +/* derived from glibc fnmatch() 0: match, 1: no match*/
52211 +
52212 +static int
52213 +glob_match(const char *p, const char *n)
52214 +{
52215 + char c;
52216 +
52217 + while ((c = *p++) != '\0') {
52218 + switch (c) {
52219 + case '?':
52220 + if (*n == '\0')
52221 + return 1;
52222 + else if (*n == '/')
52223 + return 1;
52224 + break;
52225 + case '\\':
52226 + if (*n != c)
52227 + return 1;
52228 + break;
52229 + case '*':
52230 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
52231 + if (*n == '/')
52232 + return 1;
52233 + else if (c == '?') {
52234 + if (*n == '\0')
52235 + return 1;
52236 + else
52237 + ++n;
52238 + }
52239 + }
52240 + if (c == '\0') {
52241 + return 0;
52242 + } else {
52243 + const char *endp;
52244 +
52245 + if ((endp = strchr(n, '/')) == NULL)
52246 + endp = n + strlen(n);
52247 +
52248 + if (c == '[') {
52249 + for (--p; n < endp; ++n)
52250 + if (!glob_match(p, n))
52251 + return 0;
52252 + } else if (c == '/') {
52253 + while (*n != '\0' && *n != '/')
52254 + ++n;
52255 + if (*n == '/' && !glob_match(p, n + 1))
52256 + return 0;
52257 + } else {
52258 + for (--p; n < endp; ++n)
52259 + if (*n == c && !glob_match(p, n))
52260 + return 0;
52261 + }
52262 +
52263 + return 1;
52264 + }
52265 + case '[':
52266 + {
52267 + int not;
52268 + char cold;
52269 +
52270 + if (*n == '\0' || *n == '/')
52271 + return 1;
52272 +
52273 + not = (*p == '!' || *p == '^');
52274 + if (not)
52275 + ++p;
52276 +
52277 + c = *p++;
52278 + for (;;) {
52279 + unsigned char fn = (unsigned char)*n;
52280 +
52281 + if (c == '\0')
52282 + return 1;
52283 + else {
52284 + if (c == fn)
52285 + goto matched;
52286 + cold = c;
52287 + c = *p++;
52288 +
52289 + if (c == '-' && *p != ']') {
52290 + unsigned char cend = *p++;
52291 +
52292 + if (cend == '\0')
52293 + return 1;
52294 +
52295 + if (cold <= fn && fn <= cend)
52296 + goto matched;
52297 +
52298 + c = *p++;
52299 + }
52300 + }
52301 +
52302 + if (c == ']')
52303 + break;
52304 + }
52305 + if (!not)
52306 + return 1;
52307 + break;
52308 + matched:
52309 + while (c != ']') {
52310 + if (c == '\0')
52311 + return 1;
52312 +
52313 + c = *p++;
52314 + }
52315 + if (not)
52316 + return 1;
52317 + }
52318 + break;
52319 + default:
52320 + if (c != *n)
52321 + return 1;
52322 + }
52323 +
52324 + ++n;
52325 + }
52326 +
52327 + if (*n == '\0')
52328 + return 0;
52329 +
52330 + if (*n == '/')
52331 + return 0;
52332 +
52333 + return 1;
52334 +}
52335 +
52336 +static struct acl_object_label *
52337 +chk_glob_label(struct acl_object_label *globbed,
52338 + struct dentry *dentry, struct vfsmount *mnt, char **path)
52339 +{
52340 + struct acl_object_label *tmp;
52341 +
52342 + if (*path == NULL)
52343 + *path = gr_to_filename_nolock(dentry, mnt);
52344 +
52345 + tmp = globbed;
52346 +
52347 + while (tmp) {
52348 + if (!glob_match(tmp->filename, *path))
52349 + return tmp;
52350 + tmp = tmp->next;
52351 + }
52352 +
52353 + return NULL;
52354 +}
52355 +
52356 +static struct acl_object_label *
52357 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52358 + const ino_t curr_ino, const dev_t curr_dev,
52359 + const struct acl_subject_label *subj, char **path, const int checkglob)
52360 +{
52361 + struct acl_subject_label *tmpsubj;
52362 + struct acl_object_label *retval;
52363 + struct acl_object_label *retval2;
52364 +
52365 + tmpsubj = (struct acl_subject_label *) subj;
52366 + read_lock(&gr_inode_lock);
52367 + do {
52368 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
52369 + if (retval) {
52370 + if (checkglob && retval->globbed) {
52371 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
52372 + (struct vfsmount *)orig_mnt, path);
52373 + if (retval2)
52374 + retval = retval2;
52375 + }
52376 + break;
52377 + }
52378 + } while ((tmpsubj = tmpsubj->parent_subject));
52379 + read_unlock(&gr_inode_lock);
52380 +
52381 + return retval;
52382 +}
52383 +
52384 +static __inline__ struct acl_object_label *
52385 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52386 + struct dentry *curr_dentry,
52387 + const struct acl_subject_label *subj, char **path, const int checkglob)
52388 +{
52389 + int newglob = checkglob;
52390 + ino_t inode;
52391 + dev_t device;
52392 +
52393 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
52394 + as we don't want a / * rule to match instead of the / object
52395 + don't do this for create lookups that call this function though, since they're looking up
52396 + on the parent and thus need globbing checks on all paths
52397 + */
52398 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
52399 + newglob = GR_NO_GLOB;
52400 +
52401 + spin_lock(&curr_dentry->d_lock);
52402 + inode = curr_dentry->d_inode->i_ino;
52403 + device = __get_dev(curr_dentry);
52404 + spin_unlock(&curr_dentry->d_lock);
52405 +
52406 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
52407 +}
52408 +
52409 +static struct acl_object_label *
52410 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52411 + const struct acl_subject_label *subj, char *path, const int checkglob)
52412 +{
52413 + struct dentry *dentry = (struct dentry *) l_dentry;
52414 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52415 + struct acl_object_label *retval;
52416 + struct dentry *parent;
52417 +
52418 + write_seqlock(&rename_lock);
52419 + br_read_lock(vfsmount_lock);
52420 +
52421 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
52422 +#ifdef CONFIG_NET
52423 + mnt == sock_mnt ||
52424 +#endif
52425 +#ifdef CONFIG_HUGETLBFS
52426 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
52427 +#endif
52428 + /* ignore Eric Biederman */
52429 + IS_PRIVATE(l_dentry->d_inode))) {
52430 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
52431 + goto out;
52432 + }
52433 +
52434 + for (;;) {
52435 + if (dentry == real_root.dentry && mnt == real_root.mnt)
52436 + break;
52437 +
52438 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52439 + if (mnt->mnt_parent == mnt)
52440 + break;
52441 +
52442 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52443 + if (retval != NULL)
52444 + goto out;
52445 +
52446 + dentry = mnt->mnt_mountpoint;
52447 + mnt = mnt->mnt_parent;
52448 + continue;
52449 + }
52450 +
52451 + parent = dentry->d_parent;
52452 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52453 + if (retval != NULL)
52454 + goto out;
52455 +
52456 + dentry = parent;
52457 + }
52458 +
52459 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52460 +
52461 + /* real_root is pinned so we don't have to hold a reference */
52462 + if (retval == NULL)
52463 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
52464 +out:
52465 + br_read_unlock(vfsmount_lock);
52466 + write_sequnlock(&rename_lock);
52467 +
52468 + BUG_ON(retval == NULL);
52469 +
52470 + return retval;
52471 +}
52472 +
52473 +static __inline__ struct acl_object_label *
52474 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52475 + const struct acl_subject_label *subj)
52476 +{
52477 + char *path = NULL;
52478 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
52479 +}
52480 +
52481 +static __inline__ struct acl_object_label *
52482 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52483 + const struct acl_subject_label *subj)
52484 +{
52485 + char *path = NULL;
52486 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
52487 +}
52488 +
52489 +static __inline__ struct acl_object_label *
52490 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52491 + const struct acl_subject_label *subj, char *path)
52492 +{
52493 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
52494 +}
52495 +
52496 +static struct acl_subject_label *
52497 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52498 + const struct acl_role_label *role)
52499 +{
52500 + struct dentry *dentry = (struct dentry *) l_dentry;
52501 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52502 + struct acl_subject_label *retval;
52503 + struct dentry *parent;
52504 +
52505 + write_seqlock(&rename_lock);
52506 + br_read_lock(vfsmount_lock);
52507 +
52508 + for (;;) {
52509 + if (dentry == real_root.dentry && mnt == real_root.mnt)
52510 + break;
52511 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52512 + if (mnt->mnt_parent == mnt)
52513 + break;
52514 +
52515 + spin_lock(&dentry->d_lock);
52516 + read_lock(&gr_inode_lock);
52517 + retval =
52518 + lookup_acl_subj_label(dentry->d_inode->i_ino,
52519 + __get_dev(dentry), role);
52520 + read_unlock(&gr_inode_lock);
52521 + spin_unlock(&dentry->d_lock);
52522 + if (retval != NULL)
52523 + goto out;
52524 +
52525 + dentry = mnt->mnt_mountpoint;
52526 + mnt = mnt->mnt_parent;
52527 + continue;
52528 + }
52529 +
52530 + spin_lock(&dentry->d_lock);
52531 + read_lock(&gr_inode_lock);
52532 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52533 + __get_dev(dentry), role);
52534 + read_unlock(&gr_inode_lock);
52535 + parent = dentry->d_parent;
52536 + spin_unlock(&dentry->d_lock);
52537 +
52538 + if (retval != NULL)
52539 + goto out;
52540 +
52541 + dentry = parent;
52542 + }
52543 +
52544 + spin_lock(&dentry->d_lock);
52545 + read_lock(&gr_inode_lock);
52546 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52547 + __get_dev(dentry), role);
52548 + read_unlock(&gr_inode_lock);
52549 + spin_unlock(&dentry->d_lock);
52550 +
52551 + if (unlikely(retval == NULL)) {
52552 + /* real_root is pinned, we don't need to hold a reference */
52553 + read_lock(&gr_inode_lock);
52554 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
52555 + __get_dev(real_root.dentry), role);
52556 + read_unlock(&gr_inode_lock);
52557 + }
52558 +out:
52559 + br_read_unlock(vfsmount_lock);
52560 + write_sequnlock(&rename_lock);
52561 +
52562 + BUG_ON(retval == NULL);
52563 +
52564 + return retval;
52565 +}
52566 +
52567 +static void
52568 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
52569 +{
52570 + struct task_struct *task = current;
52571 + const struct cred *cred = current_cred();
52572 +
52573 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52574 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52575 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52576 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
52577 +
52578 + return;
52579 +}
52580 +
52581 +static void
52582 +gr_log_learn_sysctl(const char *path, const __u32 mode)
52583 +{
52584 + struct task_struct *task = current;
52585 + const struct cred *cred = current_cred();
52586 +
52587 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52588 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52589 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52590 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
52591 +
52592 + return;
52593 +}
52594 +
52595 +static void
52596 +gr_log_learn_id_change(const char type, const unsigned int real,
52597 + const unsigned int effective, const unsigned int fs)
52598 +{
52599 + struct task_struct *task = current;
52600 + const struct cred *cred = current_cred();
52601 +
52602 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
52603 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52604 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52605 + type, real, effective, fs, &task->signal->saved_ip);
52606 +
52607 + return;
52608 +}
52609 +
52610 +__u32
52611 +gr_search_file(const struct dentry * dentry, const __u32 mode,
52612 + const struct vfsmount * mnt)
52613 +{
52614 + __u32 retval = mode;
52615 + struct acl_subject_label *curracl;
52616 + struct acl_object_label *currobj;
52617 +
52618 + if (unlikely(!(gr_status & GR_READY)))
52619 + return (mode & ~GR_AUDITS);
52620 +
52621 + curracl = current->acl;
52622 +
52623 + currobj = chk_obj_label(dentry, mnt, curracl);
52624 + retval = currobj->mode & mode;
52625 +
52626 + /* if we're opening a specified transfer file for writing
52627 + (e.g. /dev/initctl), then transfer our role to init
52628 + */
52629 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
52630 + current->role->roletype & GR_ROLE_PERSIST)) {
52631 + struct task_struct *task = init_pid_ns.child_reaper;
52632 +
52633 + if (task->role != current->role) {
52634 + task->acl_sp_role = 0;
52635 + task->acl_role_id = current->acl_role_id;
52636 + task->role = current->role;
52637 + rcu_read_lock();
52638 + read_lock(&grsec_exec_file_lock);
52639 + gr_apply_subject_to_task(task);
52640 + read_unlock(&grsec_exec_file_lock);
52641 + rcu_read_unlock();
52642 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
52643 + }
52644 + }
52645 +
52646 + if (unlikely
52647 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
52648 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
52649 + __u32 new_mode = mode;
52650 +
52651 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52652 +
52653 + retval = new_mode;
52654 +
52655 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
52656 + new_mode |= GR_INHERIT;
52657 +
52658 + if (!(mode & GR_NOLEARN))
52659 + gr_log_learn(dentry, mnt, new_mode);
52660 + }
52661 +
52662 + return retval;
52663 +}
52664 +
52665 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
52666 + const struct dentry *parent,
52667 + const struct vfsmount *mnt)
52668 +{
52669 + struct name_entry *match;
52670 + struct acl_object_label *matchpo;
52671 + struct acl_subject_label *curracl;
52672 + char *path;
52673 +
52674 + if (unlikely(!(gr_status & GR_READY)))
52675 + return NULL;
52676 +
52677 + preempt_disable();
52678 + path = gr_to_filename_rbac(new_dentry, mnt);
52679 + match = lookup_name_entry_create(path);
52680 +
52681 + curracl = current->acl;
52682 +
52683 + if (match) {
52684 + read_lock(&gr_inode_lock);
52685 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
52686 + read_unlock(&gr_inode_lock);
52687 +
52688 + if (matchpo) {
52689 + preempt_enable();
52690 + return matchpo;
52691 + }
52692 + }
52693 +
52694 + // lookup parent
52695 +
52696 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
52697 +
52698 + preempt_enable();
52699 + return matchpo;
52700 +}
52701 +
52702 +__u32
52703 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
52704 + const struct vfsmount * mnt, const __u32 mode)
52705 +{
52706 + struct acl_object_label *matchpo;
52707 + __u32 retval;
52708 +
52709 + if (unlikely(!(gr_status & GR_READY)))
52710 + return (mode & ~GR_AUDITS);
52711 +
52712 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
52713 +
52714 + retval = matchpo->mode & mode;
52715 +
52716 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
52717 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52718 + __u32 new_mode = mode;
52719 +
52720 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52721 +
52722 + gr_log_learn(new_dentry, mnt, new_mode);
52723 + return new_mode;
52724 + }
52725 +
52726 + return retval;
52727 +}
52728 +
52729 +__u32
52730 +gr_check_link(const struct dentry * new_dentry,
52731 + const struct dentry * parent_dentry,
52732 + const struct vfsmount * parent_mnt,
52733 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
52734 +{
52735 + struct acl_object_label *obj;
52736 + __u32 oldmode, newmode;
52737 + __u32 needmode;
52738 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
52739 + GR_DELETE | GR_INHERIT;
52740 +
52741 + if (unlikely(!(gr_status & GR_READY)))
52742 + return (GR_CREATE | GR_LINK);
52743 +
52744 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
52745 + oldmode = obj->mode;
52746 +
52747 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
52748 + newmode = obj->mode;
52749 +
52750 + needmode = newmode & checkmodes;
52751 +
52752 + // old name for hardlink must have at least the permissions of the new name
52753 + if ((oldmode & needmode) != needmode)
52754 + goto bad;
52755 +
52756 + // if old name had restrictions/auditing, make sure the new name does as well
52757 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
52758 +
52759 + // don't allow hardlinking of suid/sgid files without permission
52760 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52761 + needmode |= GR_SETID;
52762 +
52763 + if ((newmode & needmode) != needmode)
52764 + goto bad;
52765 +
52766 + // enforce minimum permissions
52767 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
52768 + return newmode;
52769 +bad:
52770 + needmode = oldmode;
52771 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52772 + needmode |= GR_SETID;
52773 +
52774 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
52775 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
52776 + return (GR_CREATE | GR_LINK);
52777 + } else if (newmode & GR_SUPPRESS)
52778 + return GR_SUPPRESS;
52779 + else
52780 + return 0;
52781 +}
52782 +
52783 +int
52784 +gr_check_hidden_task(const struct task_struct *task)
52785 +{
52786 + if (unlikely(!(gr_status & GR_READY)))
52787 + return 0;
52788 +
52789 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
52790 + return 1;
52791 +
52792 + return 0;
52793 +}
52794 +
52795 +int
52796 +gr_check_protected_task(const struct task_struct *task)
52797 +{
52798 + if (unlikely(!(gr_status & GR_READY) || !task))
52799 + return 0;
52800 +
52801 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52802 + task->acl != current->acl)
52803 + return 1;
52804 +
52805 + return 0;
52806 +}
52807 +
52808 +int
52809 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52810 +{
52811 + struct task_struct *p;
52812 + int ret = 0;
52813 +
52814 + if (unlikely(!(gr_status & GR_READY) || !pid))
52815 + return ret;
52816 +
52817 + read_lock(&tasklist_lock);
52818 + do_each_pid_task(pid, type, p) {
52819 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52820 + p->acl != current->acl) {
52821 + ret = 1;
52822 + goto out;
52823 + }
52824 + } while_each_pid_task(pid, type, p);
52825 +out:
52826 + read_unlock(&tasklist_lock);
52827 +
52828 + return ret;
52829 +}
52830 +
52831 +void
52832 +gr_copy_label(struct task_struct *tsk)
52833 +{
52834 + tsk->signal->used_accept = 0;
52835 + tsk->acl_sp_role = 0;
52836 + tsk->acl_role_id = current->acl_role_id;
52837 + tsk->acl = current->acl;
52838 + tsk->role = current->role;
52839 + tsk->signal->curr_ip = current->signal->curr_ip;
52840 + tsk->signal->saved_ip = current->signal->saved_ip;
52841 + if (current->exec_file)
52842 + get_file(current->exec_file);
52843 + tsk->exec_file = current->exec_file;
52844 + tsk->is_writable = current->is_writable;
52845 + if (unlikely(current->signal->used_accept)) {
52846 + current->signal->curr_ip = 0;
52847 + current->signal->saved_ip = 0;
52848 + }
52849 +
52850 + return;
52851 +}
52852 +
52853 +static void
52854 +gr_set_proc_res(struct task_struct *task)
52855 +{
52856 + struct acl_subject_label *proc;
52857 + unsigned short i;
52858 +
52859 + proc = task->acl;
52860 +
52861 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
52862 + return;
52863 +
52864 + for (i = 0; i < RLIM_NLIMITS; i++) {
52865 + if (!(proc->resmask & (1 << i)))
52866 + continue;
52867 +
52868 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
52869 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
52870 + }
52871 +
52872 + return;
52873 +}
52874 +
52875 +extern int __gr_process_user_ban(struct user_struct *user);
52876 +
52877 +int
52878 +gr_check_user_change(int real, int effective, int fs)
52879 +{
52880 + unsigned int i;
52881 + __u16 num;
52882 + uid_t *uidlist;
52883 + int curuid;
52884 + int realok = 0;
52885 + int effectiveok = 0;
52886 + int fsok = 0;
52887 +
52888 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52889 + struct user_struct *user;
52890 +
52891 + if (real == -1)
52892 + goto skipit;
52893 +
52894 + user = find_user(real);
52895 + if (user == NULL)
52896 + goto skipit;
52897 +
52898 + if (__gr_process_user_ban(user)) {
52899 + /* for find_user */
52900 + free_uid(user);
52901 + return 1;
52902 + }
52903 +
52904 + /* for find_user */
52905 + free_uid(user);
52906 +
52907 +skipit:
52908 +#endif
52909 +
52910 + if (unlikely(!(gr_status & GR_READY)))
52911 + return 0;
52912 +
52913 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52914 + gr_log_learn_id_change('u', real, effective, fs);
52915 +
52916 + num = current->acl->user_trans_num;
52917 + uidlist = current->acl->user_transitions;
52918 +
52919 + if (uidlist == NULL)
52920 + return 0;
52921 +
52922 + if (real == -1)
52923 + realok = 1;
52924 + if (effective == -1)
52925 + effectiveok = 1;
52926 + if (fs == -1)
52927 + fsok = 1;
52928 +
52929 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
52930 + for (i = 0; i < num; i++) {
52931 + curuid = (int)uidlist[i];
52932 + if (real == curuid)
52933 + realok = 1;
52934 + if (effective == curuid)
52935 + effectiveok = 1;
52936 + if (fs == curuid)
52937 + fsok = 1;
52938 + }
52939 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
52940 + for (i = 0; i < num; i++) {
52941 + curuid = (int)uidlist[i];
52942 + if (real == curuid)
52943 + break;
52944 + if (effective == curuid)
52945 + break;
52946 + if (fs == curuid)
52947 + break;
52948 + }
52949 + /* not in deny list */
52950 + if (i == num) {
52951 + realok = 1;
52952 + effectiveok = 1;
52953 + fsok = 1;
52954 + }
52955 + }
52956 +
52957 + if (realok && effectiveok && fsok)
52958 + return 0;
52959 + else {
52960 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52961 + return 1;
52962 + }
52963 +}
52964 +
52965 +int
52966 +gr_check_group_change(int real, int effective, int fs)
52967 +{
52968 + unsigned int i;
52969 + __u16 num;
52970 + gid_t *gidlist;
52971 + int curgid;
52972 + int realok = 0;
52973 + int effectiveok = 0;
52974 + int fsok = 0;
52975 +
52976 + if (unlikely(!(gr_status & GR_READY)))
52977 + return 0;
52978 +
52979 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52980 + gr_log_learn_id_change('g', real, effective, fs);
52981 +
52982 + num = current->acl->group_trans_num;
52983 + gidlist = current->acl->group_transitions;
52984 +
52985 + if (gidlist == NULL)
52986 + return 0;
52987 +
52988 + if (real == -1)
52989 + realok = 1;
52990 + if (effective == -1)
52991 + effectiveok = 1;
52992 + if (fs == -1)
52993 + fsok = 1;
52994 +
52995 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
52996 + for (i = 0; i < num; i++) {
52997 + curgid = (int)gidlist[i];
52998 + if (real == curgid)
52999 + realok = 1;
53000 + if (effective == curgid)
53001 + effectiveok = 1;
53002 + if (fs == curgid)
53003 + fsok = 1;
53004 + }
53005 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
53006 + for (i = 0; i < num; i++) {
53007 + curgid = (int)gidlist[i];
53008 + if (real == curgid)
53009 + break;
53010 + if (effective == curgid)
53011 + break;
53012 + if (fs == curgid)
53013 + break;
53014 + }
53015 + /* not in deny list */
53016 + if (i == num) {
53017 + realok = 1;
53018 + effectiveok = 1;
53019 + fsok = 1;
53020 + }
53021 + }
53022 +
53023 + if (realok && effectiveok && fsok)
53024 + return 0;
53025 + else {
53026 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53027 + return 1;
53028 + }
53029 +}
53030 +
53031 +void
53032 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
53033 +{
53034 + struct acl_role_label *role = task->role;
53035 + struct acl_subject_label *subj = NULL;
53036 + struct acl_object_label *obj;
53037 + struct file *filp;
53038 +
53039 + if (unlikely(!(gr_status & GR_READY)))
53040 + return;
53041 +
53042 + filp = task->exec_file;
53043 +
53044 + /* kernel process, we'll give them the kernel role */
53045 + if (unlikely(!filp)) {
53046 + task->role = kernel_role;
53047 + task->acl = kernel_role->root_label;
53048 + return;
53049 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
53050 + role = lookup_acl_role_label(task, uid, gid);
53051 +
53052 + /* perform subject lookup in possibly new role
53053 + we can use this result below in the case where role == task->role
53054 + */
53055 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
53056 +
53057 + /* if we changed uid/gid, but result in the same role
53058 + and are using inheritance, don't lose the inherited subject
53059 + if current subject is other than what normal lookup
53060 + would result in, we arrived via inheritance, don't
53061 + lose subject
53062 + */
53063 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
53064 + (subj == task->acl)))
53065 + task->acl = subj;
53066 +
53067 + task->role = role;
53068 +
53069 + task->is_writable = 0;
53070 +
53071 + /* ignore additional mmap checks for processes that are writable
53072 + by the default ACL */
53073 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53074 + if (unlikely(obj->mode & GR_WRITE))
53075 + task->is_writable = 1;
53076 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53077 + if (unlikely(obj->mode & GR_WRITE))
53078 + task->is_writable = 1;
53079 +
53080 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53081 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53082 +#endif
53083 +
53084 + gr_set_proc_res(task);
53085 +
53086 + return;
53087 +}
53088 +
53089 +int
53090 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53091 + const int unsafe_share)
53092 +{
53093 + struct task_struct *task = current;
53094 + struct acl_subject_label *newacl;
53095 + struct acl_object_label *obj;
53096 + __u32 retmode;
53097 +
53098 + if (unlikely(!(gr_status & GR_READY)))
53099 + return 0;
53100 +
53101 + newacl = chk_subj_label(dentry, mnt, task->role);
53102 +
53103 + task_lock(task);
53104 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
53105 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
53106 + !(task->role->roletype & GR_ROLE_GOD) &&
53107 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
53108 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
53109 + task_unlock(task);
53110 + if (unsafe_share)
53111 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
53112 + else
53113 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
53114 + return -EACCES;
53115 + }
53116 + task_unlock(task);
53117 +
53118 + obj = chk_obj_label(dentry, mnt, task->acl);
53119 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
53120 +
53121 + if (!(task->acl->mode & GR_INHERITLEARN) &&
53122 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
53123 + if (obj->nested)
53124 + task->acl = obj->nested;
53125 + else
53126 + task->acl = newacl;
53127 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
53128 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
53129 +
53130 + task->is_writable = 0;
53131 +
53132 + /* ignore additional mmap checks for processes that are writable
53133 + by the default ACL */
53134 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
53135 + if (unlikely(obj->mode & GR_WRITE))
53136 + task->is_writable = 1;
53137 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
53138 + if (unlikely(obj->mode & GR_WRITE))
53139 + task->is_writable = 1;
53140 +
53141 + gr_set_proc_res(task);
53142 +
53143 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53144 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53145 +#endif
53146 + return 0;
53147 +}
53148 +
53149 +/* always called with valid inodev ptr */
53150 +static void
53151 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
53152 +{
53153 + struct acl_object_label *matchpo;
53154 + struct acl_subject_label *matchps;
53155 + struct acl_subject_label *subj;
53156 + struct acl_role_label *role;
53157 + unsigned int x;
53158 +
53159 + FOR_EACH_ROLE_START(role)
53160 + FOR_EACH_SUBJECT_START(role, subj, x)
53161 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
53162 + matchpo->mode |= GR_DELETED;
53163 + FOR_EACH_SUBJECT_END(subj,x)
53164 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
53165 + if (subj->inode == ino && subj->device == dev)
53166 + subj->mode |= GR_DELETED;
53167 + FOR_EACH_NESTED_SUBJECT_END(subj)
53168 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
53169 + matchps->mode |= GR_DELETED;
53170 + FOR_EACH_ROLE_END(role)
53171 +
53172 + inodev->nentry->deleted = 1;
53173 +
53174 + return;
53175 +}
53176 +
53177 +void
53178 +gr_handle_delete(const ino_t ino, const dev_t dev)
53179 +{
53180 + struct inodev_entry *inodev;
53181 +
53182 + if (unlikely(!(gr_status & GR_READY)))
53183 + return;
53184 +
53185 + write_lock(&gr_inode_lock);
53186 + inodev = lookup_inodev_entry(ino, dev);
53187 + if (inodev != NULL)
53188 + do_handle_delete(inodev, ino, dev);
53189 + write_unlock(&gr_inode_lock);
53190 +
53191 + return;
53192 +}
53193 +
53194 +static void
53195 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
53196 + const ino_t newinode, const dev_t newdevice,
53197 + struct acl_subject_label *subj)
53198 +{
53199 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
53200 + struct acl_object_label *match;
53201 +
53202 + match = subj->obj_hash[index];
53203 +
53204 + while (match && (match->inode != oldinode ||
53205 + match->device != olddevice ||
53206 + !(match->mode & GR_DELETED)))
53207 + match = match->next;
53208 +
53209 + if (match && (match->inode == oldinode)
53210 + && (match->device == olddevice)
53211 + && (match->mode & GR_DELETED)) {
53212 + if (match->prev == NULL) {
53213 + subj->obj_hash[index] = match->next;
53214 + if (match->next != NULL)
53215 + match->next->prev = NULL;
53216 + } else {
53217 + match->prev->next = match->next;
53218 + if (match->next != NULL)
53219 + match->next->prev = match->prev;
53220 + }
53221 + match->prev = NULL;
53222 + match->next = NULL;
53223 + match->inode = newinode;
53224 + match->device = newdevice;
53225 + match->mode &= ~GR_DELETED;
53226 +
53227 + insert_acl_obj_label(match, subj);
53228 + }
53229 +
53230 + return;
53231 +}
53232 +
53233 +static void
53234 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
53235 + const ino_t newinode, const dev_t newdevice,
53236 + struct acl_role_label *role)
53237 +{
53238 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
53239 + struct acl_subject_label *match;
53240 +
53241 + match = role->subj_hash[index];
53242 +
53243 + while (match && (match->inode != oldinode ||
53244 + match->device != olddevice ||
53245 + !(match->mode & GR_DELETED)))
53246 + match = match->next;
53247 +
53248 + if (match && (match->inode == oldinode)
53249 + && (match->device == olddevice)
53250 + && (match->mode & GR_DELETED)) {
53251 + if (match->prev == NULL) {
53252 + role->subj_hash[index] = match->next;
53253 + if (match->next != NULL)
53254 + match->next->prev = NULL;
53255 + } else {
53256 + match->prev->next = match->next;
53257 + if (match->next != NULL)
53258 + match->next->prev = match->prev;
53259 + }
53260 + match->prev = NULL;
53261 + match->next = NULL;
53262 + match->inode = newinode;
53263 + match->device = newdevice;
53264 + match->mode &= ~GR_DELETED;
53265 +
53266 + insert_acl_subj_label(match, role);
53267 + }
53268 +
53269 + return;
53270 +}
53271 +
53272 +static void
53273 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
53274 + const ino_t newinode, const dev_t newdevice)
53275 +{
53276 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
53277 + struct inodev_entry *match;
53278 +
53279 + match = inodev_set.i_hash[index];
53280 +
53281 + while (match && (match->nentry->inode != oldinode ||
53282 + match->nentry->device != olddevice || !match->nentry->deleted))
53283 + match = match->next;
53284 +
53285 + if (match && (match->nentry->inode == oldinode)
53286 + && (match->nentry->device == olddevice) &&
53287 + match->nentry->deleted) {
53288 + if (match->prev == NULL) {
53289 + inodev_set.i_hash[index] = match->next;
53290 + if (match->next != NULL)
53291 + match->next->prev = NULL;
53292 + } else {
53293 + match->prev->next = match->next;
53294 + if (match->next != NULL)
53295 + match->next->prev = match->prev;
53296 + }
53297 + match->prev = NULL;
53298 + match->next = NULL;
53299 + match->nentry->inode = newinode;
53300 + match->nentry->device = newdevice;
53301 + match->nentry->deleted = 0;
53302 +
53303 + insert_inodev_entry(match);
53304 + }
53305 +
53306 + return;
53307 +}
53308 +
53309 +static void
53310 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
53311 +{
53312 + struct acl_subject_label *subj;
53313 + struct acl_role_label *role;
53314 + unsigned int x;
53315 +
53316 + FOR_EACH_ROLE_START(role)
53317 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
53318 +
53319 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
53320 + if ((subj->inode == ino) && (subj->device == dev)) {
53321 + subj->inode = ino;
53322 + subj->device = dev;
53323 + }
53324 + FOR_EACH_NESTED_SUBJECT_END(subj)
53325 + FOR_EACH_SUBJECT_START(role, subj, x)
53326 + update_acl_obj_label(matchn->inode, matchn->device,
53327 + ino, dev, subj);
53328 + FOR_EACH_SUBJECT_END(subj,x)
53329 + FOR_EACH_ROLE_END(role)
53330 +
53331 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
53332 +
53333 + return;
53334 +}
53335 +
53336 +static void
53337 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
53338 + const struct vfsmount *mnt)
53339 +{
53340 + ino_t ino = dentry->d_inode->i_ino;
53341 + dev_t dev = __get_dev(dentry);
53342 +
53343 + __do_handle_create(matchn, ino, dev);
53344 +
53345 + return;
53346 +}
53347 +
53348 +void
53349 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53350 +{
53351 + struct name_entry *matchn;
53352 +
53353 + if (unlikely(!(gr_status & GR_READY)))
53354 + return;
53355 +
53356 + preempt_disable();
53357 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
53358 +
53359 + if (unlikely((unsigned long)matchn)) {
53360 + write_lock(&gr_inode_lock);
53361 + do_handle_create(matchn, dentry, mnt);
53362 + write_unlock(&gr_inode_lock);
53363 + }
53364 + preempt_enable();
53365 +
53366 + return;
53367 +}
53368 +
53369 +void
53370 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
53371 +{
53372 + struct name_entry *matchn;
53373 +
53374 + if (unlikely(!(gr_status & GR_READY)))
53375 + return;
53376 +
53377 + preempt_disable();
53378 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
53379 +
53380 + if (unlikely((unsigned long)matchn)) {
53381 + write_lock(&gr_inode_lock);
53382 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
53383 + write_unlock(&gr_inode_lock);
53384 + }
53385 + preempt_enable();
53386 +
53387 + return;
53388 +}
53389 +
53390 +void
53391 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53392 + struct dentry *old_dentry,
53393 + struct dentry *new_dentry,
53394 + struct vfsmount *mnt, const __u8 replace)
53395 +{
53396 + struct name_entry *matchn;
53397 + struct inodev_entry *inodev;
53398 + struct inode *inode = new_dentry->d_inode;
53399 + ino_t old_ino = old_dentry->d_inode->i_ino;
53400 + dev_t old_dev = __get_dev(old_dentry);
53401 +
53402 + /* vfs_rename swaps the name and parent link for old_dentry and
53403 + new_dentry
53404 + at this point, old_dentry has the new name, parent link, and inode
53405 + for the renamed file
53406 + if a file is being replaced by a rename, new_dentry has the inode
53407 + and name for the replaced file
53408 + */
53409 +
53410 + if (unlikely(!(gr_status & GR_READY)))
53411 + return;
53412 +
53413 + preempt_disable();
53414 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
53415 +
53416 + /* we wouldn't have to check d_inode if it weren't for
53417 + NFS silly-renaming
53418 + */
53419 +
53420 + write_lock(&gr_inode_lock);
53421 + if (unlikely(replace && inode)) {
53422 + ino_t new_ino = inode->i_ino;
53423 + dev_t new_dev = __get_dev(new_dentry);
53424 +
53425 + inodev = lookup_inodev_entry(new_ino, new_dev);
53426 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
53427 + do_handle_delete(inodev, new_ino, new_dev);
53428 + }
53429 +
53430 + inodev = lookup_inodev_entry(old_ino, old_dev);
53431 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
53432 + do_handle_delete(inodev, old_ino, old_dev);
53433 +
53434 + if (unlikely((unsigned long)matchn))
53435 + do_handle_create(matchn, old_dentry, mnt);
53436 +
53437 + write_unlock(&gr_inode_lock);
53438 + preempt_enable();
53439 +
53440 + return;
53441 +}
53442 +
53443 +static int
53444 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
53445 + unsigned char **sum)
53446 +{
53447 + struct acl_role_label *r;
53448 + struct role_allowed_ip *ipp;
53449 + struct role_transition *trans;
53450 + unsigned int i;
53451 + int found = 0;
53452 + u32 curr_ip = current->signal->curr_ip;
53453 +
53454 + current->signal->saved_ip = curr_ip;
53455 +
53456 + /* check transition table */
53457 +
53458 + for (trans = current->role->transitions; trans; trans = trans->next) {
53459 + if (!strcmp(rolename, trans->rolename)) {
53460 + found = 1;
53461 + break;
53462 + }
53463 + }
53464 +
53465 + if (!found)
53466 + return 0;
53467 +
53468 + /* handle special roles that do not require authentication
53469 + and check ip */
53470 +
53471 + FOR_EACH_ROLE_START(r)
53472 + if (!strcmp(rolename, r->rolename) &&
53473 + (r->roletype & GR_ROLE_SPECIAL)) {
53474 + found = 0;
53475 + if (r->allowed_ips != NULL) {
53476 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
53477 + if ((ntohl(curr_ip) & ipp->netmask) ==
53478 + (ntohl(ipp->addr) & ipp->netmask))
53479 + found = 1;
53480 + }
53481 + } else
53482 + found = 2;
53483 + if (!found)
53484 + return 0;
53485 +
53486 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
53487 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
53488 + *salt = NULL;
53489 + *sum = NULL;
53490 + return 1;
53491 + }
53492 + }
53493 + FOR_EACH_ROLE_END(r)
53494 +
53495 + for (i = 0; i < num_sprole_pws; i++) {
53496 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
53497 + *salt = acl_special_roles[i]->salt;
53498 + *sum = acl_special_roles[i]->sum;
53499 + return 1;
53500 + }
53501 + }
53502 +
53503 + return 0;
53504 +}
53505 +
53506 +static void
53507 +assign_special_role(char *rolename)
53508 +{
53509 + struct acl_object_label *obj;
53510 + struct acl_role_label *r;
53511 + struct acl_role_label *assigned = NULL;
53512 + struct task_struct *tsk;
53513 + struct file *filp;
53514 +
53515 + FOR_EACH_ROLE_START(r)
53516 + if (!strcmp(rolename, r->rolename) &&
53517 + (r->roletype & GR_ROLE_SPECIAL)) {
53518 + assigned = r;
53519 + break;
53520 + }
53521 + FOR_EACH_ROLE_END(r)
53522 +
53523 + if (!assigned)
53524 + return;
53525 +
53526 + read_lock(&tasklist_lock);
53527 + read_lock(&grsec_exec_file_lock);
53528 +
53529 + tsk = current->real_parent;
53530 + if (tsk == NULL)
53531 + goto out_unlock;
53532 +
53533 + filp = tsk->exec_file;
53534 + if (filp == NULL)
53535 + goto out_unlock;
53536 +
53537 + tsk->is_writable = 0;
53538 +
53539 + tsk->acl_sp_role = 1;
53540 + tsk->acl_role_id = ++acl_sp_role_value;
53541 + tsk->role = assigned;
53542 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
53543 +
53544 + /* ignore additional mmap checks for processes that are writable
53545 + by the default ACL */
53546 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53547 + if (unlikely(obj->mode & GR_WRITE))
53548 + tsk->is_writable = 1;
53549 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
53550 + if (unlikely(obj->mode & GR_WRITE))
53551 + tsk->is_writable = 1;
53552 +
53553 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53554 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
53555 +#endif
53556 +
53557 +out_unlock:
53558 + read_unlock(&grsec_exec_file_lock);
53559 + read_unlock(&tasklist_lock);
53560 + return;
53561 +}
53562 +
53563 +int gr_check_secure_terminal(struct task_struct *task)
53564 +{
53565 + struct task_struct *p, *p2, *p3;
53566 + struct files_struct *files;
53567 + struct fdtable *fdt;
53568 + struct file *our_file = NULL, *file;
53569 + int i;
53570 +
53571 + if (task->signal->tty == NULL)
53572 + return 1;
53573 +
53574 + files = get_files_struct(task);
53575 + if (files != NULL) {
53576 + rcu_read_lock();
53577 + fdt = files_fdtable(files);
53578 + for (i=0; i < fdt->max_fds; i++) {
53579 + file = fcheck_files(files, i);
53580 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
53581 + get_file(file);
53582 + our_file = file;
53583 + }
53584 + }
53585 + rcu_read_unlock();
53586 + put_files_struct(files);
53587 + }
53588 +
53589 + if (our_file == NULL)
53590 + return 1;
53591 +
53592 + read_lock(&tasklist_lock);
53593 + do_each_thread(p2, p) {
53594 + files = get_files_struct(p);
53595 + if (files == NULL ||
53596 + (p->signal && p->signal->tty == task->signal->tty)) {
53597 + if (files != NULL)
53598 + put_files_struct(files);
53599 + continue;
53600 + }
53601 + rcu_read_lock();
53602 + fdt = files_fdtable(files);
53603 + for (i=0; i < fdt->max_fds; i++) {
53604 + file = fcheck_files(files, i);
53605 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
53606 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
53607 + p3 = task;
53608 + while (p3->pid > 0) {
53609 + if (p3 == p)
53610 + break;
53611 + p3 = p3->real_parent;
53612 + }
53613 + if (p3 == p)
53614 + break;
53615 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
53616 + gr_handle_alertkill(p);
53617 + rcu_read_unlock();
53618 + put_files_struct(files);
53619 + read_unlock(&tasklist_lock);
53620 + fput(our_file);
53621 + return 0;
53622 + }
53623 + }
53624 + rcu_read_unlock();
53625 + put_files_struct(files);
53626 + } while_each_thread(p2, p);
53627 + read_unlock(&tasklist_lock);
53628 +
53629 + fput(our_file);
53630 + return 1;
53631 +}
53632 +
53633 +ssize_t
53634 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
53635 +{
53636 + struct gr_arg_wrapper uwrap;
53637 + unsigned char *sprole_salt = NULL;
53638 + unsigned char *sprole_sum = NULL;
53639 + int error = sizeof (struct gr_arg_wrapper);
53640 + int error2 = 0;
53641 +
53642 + mutex_lock(&gr_dev_mutex);
53643 +
53644 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
53645 + error = -EPERM;
53646 + goto out;
53647 + }
53648 +
53649 + if (count != sizeof (struct gr_arg_wrapper)) {
53650 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
53651 + error = -EINVAL;
53652 + goto out;
53653 + }
53654 +
53655 +
53656 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
53657 + gr_auth_expires = 0;
53658 + gr_auth_attempts = 0;
53659 + }
53660 +
53661 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
53662 + error = -EFAULT;
53663 + goto out;
53664 + }
53665 +
53666 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
53667 + error = -EINVAL;
53668 + goto out;
53669 + }
53670 +
53671 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
53672 + error = -EFAULT;
53673 + goto out;
53674 + }
53675 +
53676 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53677 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53678 + time_after(gr_auth_expires, get_seconds())) {
53679 + error = -EBUSY;
53680 + goto out;
53681 + }
53682 +
53683 + /* if non-root trying to do anything other than use a special role,
53684 + do not attempt authentication, do not count towards authentication
53685 + locking
53686 + */
53687 +
53688 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
53689 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53690 + current_uid()) {
53691 + error = -EPERM;
53692 + goto out;
53693 + }
53694 +
53695 + /* ensure pw and special role name are null terminated */
53696 +
53697 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
53698 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
53699 +
53700 + /* Okay.
53701 + * We have our enough of the argument structure..(we have yet
53702 + * to copy_from_user the tables themselves) . Copy the tables
53703 + * only if we need them, i.e. for loading operations. */
53704 +
53705 + switch (gr_usermode->mode) {
53706 + case GR_STATUS:
53707 + if (gr_status & GR_READY) {
53708 + error = 1;
53709 + if (!gr_check_secure_terminal(current))
53710 + error = 3;
53711 + } else
53712 + error = 2;
53713 + goto out;
53714 + case GR_SHUTDOWN:
53715 + if ((gr_status & GR_READY)
53716 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53717 + pax_open_kernel();
53718 + gr_status &= ~GR_READY;
53719 + pax_close_kernel();
53720 +
53721 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
53722 + free_variables();
53723 + memset(gr_usermode, 0, sizeof (struct gr_arg));
53724 + memset(gr_system_salt, 0, GR_SALT_LEN);
53725 + memset(gr_system_sum, 0, GR_SHA_LEN);
53726 + } else if (gr_status & GR_READY) {
53727 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
53728 + error = -EPERM;
53729 + } else {
53730 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
53731 + error = -EAGAIN;
53732 + }
53733 + break;
53734 + case GR_ENABLE:
53735 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
53736 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
53737 + else {
53738 + if (gr_status & GR_READY)
53739 + error = -EAGAIN;
53740 + else
53741 + error = error2;
53742 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
53743 + }
53744 + break;
53745 + case GR_RELOAD:
53746 + if (!(gr_status & GR_READY)) {
53747 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
53748 + error = -EAGAIN;
53749 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53750 + preempt_disable();
53751 +
53752 + pax_open_kernel();
53753 + gr_status &= ~GR_READY;
53754 + pax_close_kernel();
53755 +
53756 + free_variables();
53757 + if (!(error2 = gracl_init(gr_usermode))) {
53758 + preempt_enable();
53759 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
53760 + } else {
53761 + preempt_enable();
53762 + error = error2;
53763 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53764 + }
53765 + } else {
53766 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53767 + error = -EPERM;
53768 + }
53769 + break;
53770 + case GR_SEGVMOD:
53771 + if (unlikely(!(gr_status & GR_READY))) {
53772 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
53773 + error = -EAGAIN;
53774 + break;
53775 + }
53776 +
53777 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53778 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
53779 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
53780 + struct acl_subject_label *segvacl;
53781 + segvacl =
53782 + lookup_acl_subj_label(gr_usermode->segv_inode,
53783 + gr_usermode->segv_device,
53784 + current->role);
53785 + if (segvacl) {
53786 + segvacl->crashes = 0;
53787 + segvacl->expires = 0;
53788 + }
53789 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
53790 + gr_remove_uid(gr_usermode->segv_uid);
53791 + }
53792 + } else {
53793 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
53794 + error = -EPERM;
53795 + }
53796 + break;
53797 + case GR_SPROLE:
53798 + case GR_SPROLEPAM:
53799 + if (unlikely(!(gr_status & GR_READY))) {
53800 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
53801 + error = -EAGAIN;
53802 + break;
53803 + }
53804 +
53805 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
53806 + current->role->expires = 0;
53807 + current->role->auth_attempts = 0;
53808 + }
53809 +
53810 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53811 + time_after(current->role->expires, get_seconds())) {
53812 + error = -EBUSY;
53813 + goto out;
53814 + }
53815 +
53816 + if (lookup_special_role_auth
53817 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
53818 + && ((!sprole_salt && !sprole_sum)
53819 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
53820 + char *p = "";
53821 + assign_special_role(gr_usermode->sp_role);
53822 + read_lock(&tasklist_lock);
53823 + if (current->real_parent)
53824 + p = current->real_parent->role->rolename;
53825 + read_unlock(&tasklist_lock);
53826 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
53827 + p, acl_sp_role_value);
53828 + } else {
53829 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
53830 + error = -EPERM;
53831 + if(!(current->role->auth_attempts++))
53832 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53833 +
53834 + goto out;
53835 + }
53836 + break;
53837 + case GR_UNSPROLE:
53838 + if (unlikely(!(gr_status & GR_READY))) {
53839 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
53840 + error = -EAGAIN;
53841 + break;
53842 + }
53843 +
53844 + if (current->role->roletype & GR_ROLE_SPECIAL) {
53845 + char *p = "";
53846 + int i = 0;
53847 +
53848 + read_lock(&tasklist_lock);
53849 + if (current->real_parent) {
53850 + p = current->real_parent->role->rolename;
53851 + i = current->real_parent->acl_role_id;
53852 + }
53853 + read_unlock(&tasklist_lock);
53854 +
53855 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
53856 + gr_set_acls(1);
53857 + } else {
53858 + error = -EPERM;
53859 + goto out;
53860 + }
53861 + break;
53862 + default:
53863 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
53864 + error = -EINVAL;
53865 + break;
53866 + }
53867 +
53868 + if (error != -EPERM)
53869 + goto out;
53870 +
53871 + if(!(gr_auth_attempts++))
53872 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53873 +
53874 + out:
53875 + mutex_unlock(&gr_dev_mutex);
53876 + return error;
53877 +}
53878 +
53879 +/* must be called with
53880 + rcu_read_lock();
53881 + read_lock(&tasklist_lock);
53882 + read_lock(&grsec_exec_file_lock);
53883 +*/
53884 +int gr_apply_subject_to_task(struct task_struct *task)
53885 +{
53886 + struct acl_object_label *obj;
53887 + char *tmpname;
53888 + struct acl_subject_label *tmpsubj;
53889 + struct file *filp;
53890 + struct name_entry *nmatch;
53891 +
53892 + filp = task->exec_file;
53893 + if (filp == NULL)
53894 + return 0;
53895 +
53896 + /* the following is to apply the correct subject
53897 + on binaries running when the RBAC system
53898 + is enabled, when the binaries have been
53899 + replaced or deleted since their execution
53900 + -----
53901 + when the RBAC system starts, the inode/dev
53902 + from exec_file will be one the RBAC system
53903 + is unaware of. It only knows the inode/dev
53904 + of the present file on disk, or the absence
53905 + of it.
53906 + */
53907 + preempt_disable();
53908 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
53909 +
53910 + nmatch = lookup_name_entry(tmpname);
53911 + preempt_enable();
53912 + tmpsubj = NULL;
53913 + if (nmatch) {
53914 + if (nmatch->deleted)
53915 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
53916 + else
53917 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
53918 + if (tmpsubj != NULL)
53919 + task->acl = tmpsubj;
53920 + }
53921 + if (tmpsubj == NULL)
53922 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
53923 + task->role);
53924 + if (task->acl) {
53925 + task->is_writable = 0;
53926 + /* ignore additional mmap checks for processes that are writable
53927 + by the default ACL */
53928 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53929 + if (unlikely(obj->mode & GR_WRITE))
53930 + task->is_writable = 1;
53931 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53932 + if (unlikely(obj->mode & GR_WRITE))
53933 + task->is_writable = 1;
53934 +
53935 + gr_set_proc_res(task);
53936 +
53937 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53938 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53939 +#endif
53940 + } else {
53941 + return 1;
53942 + }
53943 +
53944 + return 0;
53945 +}
53946 +
53947 +int
53948 +gr_set_acls(const int type)
53949 +{
53950 + struct task_struct *task, *task2;
53951 + struct acl_role_label *role = current->role;
53952 + __u16 acl_role_id = current->acl_role_id;
53953 + const struct cred *cred;
53954 + int ret;
53955 +
53956 + rcu_read_lock();
53957 + read_lock(&tasklist_lock);
53958 + read_lock(&grsec_exec_file_lock);
53959 + do_each_thread(task2, task) {
53960 + /* check to see if we're called from the exit handler,
53961 + if so, only replace ACLs that have inherited the admin
53962 + ACL */
53963 +
53964 + if (type && (task->role != role ||
53965 + task->acl_role_id != acl_role_id))
53966 + continue;
53967 +
53968 + task->acl_role_id = 0;
53969 + task->acl_sp_role = 0;
53970 +
53971 + if (task->exec_file) {
53972 + cred = __task_cred(task);
53973 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
53974 + ret = gr_apply_subject_to_task(task);
53975 + if (ret) {
53976 + read_unlock(&grsec_exec_file_lock);
53977 + read_unlock(&tasklist_lock);
53978 + rcu_read_unlock();
53979 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
53980 + return ret;
53981 + }
53982 + } else {
53983 + // it's a kernel process
53984 + task->role = kernel_role;
53985 + task->acl = kernel_role->root_label;
53986 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
53987 + task->acl->mode &= ~GR_PROCFIND;
53988 +#endif
53989 + }
53990 + } while_each_thread(task2, task);
53991 + read_unlock(&grsec_exec_file_lock);
53992 + read_unlock(&tasklist_lock);
53993 + rcu_read_unlock();
53994 +
53995 + return 0;
53996 +}
53997 +
53998 +void
53999 +gr_learn_resource(const struct task_struct *task,
54000 + const int res, const unsigned long wanted, const int gt)
54001 +{
54002 + struct acl_subject_label *acl;
54003 + const struct cred *cred;
54004 +
54005 + if (unlikely((gr_status & GR_READY) &&
54006 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
54007 + goto skip_reslog;
54008 +
54009 +#ifdef CONFIG_GRKERNSEC_RESLOG
54010 + gr_log_resource(task, res, wanted, gt);
54011 +#endif
54012 + skip_reslog:
54013 +
54014 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
54015 + return;
54016 +
54017 + acl = task->acl;
54018 +
54019 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
54020 + !(acl->resmask & (1 << (unsigned short) res))))
54021 + return;
54022 +
54023 + if (wanted >= acl->res[res].rlim_cur) {
54024 + unsigned long res_add;
54025 +
54026 + res_add = wanted;
54027 + switch (res) {
54028 + case RLIMIT_CPU:
54029 + res_add += GR_RLIM_CPU_BUMP;
54030 + break;
54031 + case RLIMIT_FSIZE:
54032 + res_add += GR_RLIM_FSIZE_BUMP;
54033 + break;
54034 + case RLIMIT_DATA:
54035 + res_add += GR_RLIM_DATA_BUMP;
54036 + break;
54037 + case RLIMIT_STACK:
54038 + res_add += GR_RLIM_STACK_BUMP;
54039 + break;
54040 + case RLIMIT_CORE:
54041 + res_add += GR_RLIM_CORE_BUMP;
54042 + break;
54043 + case RLIMIT_RSS:
54044 + res_add += GR_RLIM_RSS_BUMP;
54045 + break;
54046 + case RLIMIT_NPROC:
54047 + res_add += GR_RLIM_NPROC_BUMP;
54048 + break;
54049 + case RLIMIT_NOFILE:
54050 + res_add += GR_RLIM_NOFILE_BUMP;
54051 + break;
54052 + case RLIMIT_MEMLOCK:
54053 + res_add += GR_RLIM_MEMLOCK_BUMP;
54054 + break;
54055 + case RLIMIT_AS:
54056 + res_add += GR_RLIM_AS_BUMP;
54057 + break;
54058 + case RLIMIT_LOCKS:
54059 + res_add += GR_RLIM_LOCKS_BUMP;
54060 + break;
54061 + case RLIMIT_SIGPENDING:
54062 + res_add += GR_RLIM_SIGPENDING_BUMP;
54063 + break;
54064 + case RLIMIT_MSGQUEUE:
54065 + res_add += GR_RLIM_MSGQUEUE_BUMP;
54066 + break;
54067 + case RLIMIT_NICE:
54068 + res_add += GR_RLIM_NICE_BUMP;
54069 + break;
54070 + case RLIMIT_RTPRIO:
54071 + res_add += GR_RLIM_RTPRIO_BUMP;
54072 + break;
54073 + case RLIMIT_RTTIME:
54074 + res_add += GR_RLIM_RTTIME_BUMP;
54075 + break;
54076 + }
54077 +
54078 + acl->res[res].rlim_cur = res_add;
54079 +
54080 + if (wanted > acl->res[res].rlim_max)
54081 + acl->res[res].rlim_max = res_add;
54082 +
54083 + /* only log the subject filename, since resource logging is supported for
54084 + single-subject learning only */
54085 + rcu_read_lock();
54086 + cred = __task_cred(task);
54087 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54088 + task->role->roletype, cred->uid, cred->gid, acl->filename,
54089 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
54090 + "", (unsigned long) res, &task->signal->saved_ip);
54091 + rcu_read_unlock();
54092 + }
54093 +
54094 + return;
54095 +}
54096 +
54097 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
54098 +void
54099 +pax_set_initial_flags(struct linux_binprm *bprm)
54100 +{
54101 + struct task_struct *task = current;
54102 + struct acl_subject_label *proc;
54103 + unsigned long flags;
54104 +
54105 + if (unlikely(!(gr_status & GR_READY)))
54106 + return;
54107 +
54108 + flags = pax_get_flags(task);
54109 +
54110 + proc = task->acl;
54111 +
54112 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
54113 + flags &= ~MF_PAX_PAGEEXEC;
54114 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
54115 + flags &= ~MF_PAX_SEGMEXEC;
54116 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
54117 + flags &= ~MF_PAX_RANDMMAP;
54118 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
54119 + flags &= ~MF_PAX_EMUTRAMP;
54120 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
54121 + flags &= ~MF_PAX_MPROTECT;
54122 +
54123 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
54124 + flags |= MF_PAX_PAGEEXEC;
54125 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
54126 + flags |= MF_PAX_SEGMEXEC;
54127 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
54128 + flags |= MF_PAX_RANDMMAP;
54129 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
54130 + flags |= MF_PAX_EMUTRAMP;
54131 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
54132 + flags |= MF_PAX_MPROTECT;
54133 +
54134 + pax_set_flags(task, flags);
54135 +
54136 + return;
54137 +}
54138 +#endif
54139 +
54140 +#ifdef CONFIG_SYSCTL
54141 +/* Eric Biederman likes breaking userland ABI and every inode-based security
54142 + system to save 35kb of memory */
54143 +
54144 +/* we modify the passed in filename, but adjust it back before returning */
54145 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
54146 +{
54147 + struct name_entry *nmatch;
54148 + char *p, *lastp = NULL;
54149 + struct acl_object_label *obj = NULL, *tmp;
54150 + struct acl_subject_label *tmpsubj;
54151 + char c = '\0';
54152 +
54153 + read_lock(&gr_inode_lock);
54154 +
54155 + p = name + len - 1;
54156 + do {
54157 + nmatch = lookup_name_entry(name);
54158 + if (lastp != NULL)
54159 + *lastp = c;
54160 +
54161 + if (nmatch == NULL)
54162 + goto next_component;
54163 + tmpsubj = current->acl;
54164 + do {
54165 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
54166 + if (obj != NULL) {
54167 + tmp = obj->globbed;
54168 + while (tmp) {
54169 + if (!glob_match(tmp->filename, name)) {
54170 + obj = tmp;
54171 + goto found_obj;
54172 + }
54173 + tmp = tmp->next;
54174 + }
54175 + goto found_obj;
54176 + }
54177 + } while ((tmpsubj = tmpsubj->parent_subject));
54178 +next_component:
54179 + /* end case */
54180 + if (p == name)
54181 + break;
54182 +
54183 + while (*p != '/')
54184 + p--;
54185 + if (p == name)
54186 + lastp = p + 1;
54187 + else {
54188 + lastp = p;
54189 + p--;
54190 + }
54191 + c = *lastp;
54192 + *lastp = '\0';
54193 + } while (1);
54194 +found_obj:
54195 + read_unlock(&gr_inode_lock);
54196 + /* obj returned will always be non-null */
54197 + return obj;
54198 +}
54199 +
54200 +/* returns 0 when allowing, non-zero on error
54201 + op of 0 is used for readdir, so we don't log the names of hidden files
54202 +*/
54203 +__u32
54204 +gr_handle_sysctl(const struct ctl_table *table, const int op)
54205 +{
54206 + struct ctl_table *tmp;
54207 + const char *proc_sys = "/proc/sys";
54208 + char *path;
54209 + struct acl_object_label *obj;
54210 + unsigned short len = 0, pos = 0, depth = 0, i;
54211 + __u32 err = 0;
54212 + __u32 mode = 0;
54213 +
54214 + if (unlikely(!(gr_status & GR_READY)))
54215 + return 0;
54216 +
54217 + /* for now, ignore operations on non-sysctl entries if it's not a
54218 + readdir*/
54219 + if (table->child != NULL && op != 0)
54220 + return 0;
54221 +
54222 + mode |= GR_FIND;
54223 + /* it's only a read if it's an entry, read on dirs is for readdir */
54224 + if (op & MAY_READ)
54225 + mode |= GR_READ;
54226 + if (op & MAY_WRITE)
54227 + mode |= GR_WRITE;
54228 +
54229 + preempt_disable();
54230 +
54231 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
54232 +
54233 + /* it's only a read/write if it's an actual entry, not a dir
54234 + (which are opened for readdir)
54235 + */
54236 +
54237 + /* convert the requested sysctl entry into a pathname */
54238 +
54239 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
54240 + len += strlen(tmp->procname);
54241 + len++;
54242 + depth++;
54243 + }
54244 +
54245 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
54246 + /* deny */
54247 + goto out;
54248 + }
54249 +
54250 + memset(path, 0, PAGE_SIZE);
54251 +
54252 + memcpy(path, proc_sys, strlen(proc_sys));
54253 +
54254 + pos += strlen(proc_sys);
54255 +
54256 + for (; depth > 0; depth--) {
54257 + path[pos] = '/';
54258 + pos++;
54259 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
54260 + if (depth == i) {
54261 + memcpy(path + pos, tmp->procname,
54262 + strlen(tmp->procname));
54263 + pos += strlen(tmp->procname);
54264 + }
54265 + i++;
54266 + }
54267 + }
54268 +
54269 + obj = gr_lookup_by_name(path, pos);
54270 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
54271 +
54272 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
54273 + ((err & mode) != mode))) {
54274 + __u32 new_mode = mode;
54275 +
54276 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
54277 +
54278 + err = 0;
54279 + gr_log_learn_sysctl(path, new_mode);
54280 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
54281 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
54282 + err = -ENOENT;
54283 + } else if (!(err & GR_FIND)) {
54284 + err = -ENOENT;
54285 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
54286 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
54287 + path, (mode & GR_READ) ? " reading" : "",
54288 + (mode & GR_WRITE) ? " writing" : "");
54289 + err = -EACCES;
54290 + } else if ((err & mode) != mode) {
54291 + err = -EACCES;
54292 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
54293 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
54294 + path, (mode & GR_READ) ? " reading" : "",
54295 + (mode & GR_WRITE) ? " writing" : "");
54296 + err = 0;
54297 + } else
54298 + err = 0;
54299 +
54300 + out:
54301 + preempt_enable();
54302 +
54303 + return err;
54304 +}
54305 +#endif
54306 +
54307 +int
54308 +gr_handle_proc_ptrace(struct task_struct *task)
54309 +{
54310 + struct file *filp;
54311 + struct task_struct *tmp = task;
54312 + struct task_struct *curtemp = current;
54313 + __u32 retmode;
54314 +
54315 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54316 + if (unlikely(!(gr_status & GR_READY)))
54317 + return 0;
54318 +#endif
54319 +
54320 + read_lock(&tasklist_lock);
54321 + read_lock(&grsec_exec_file_lock);
54322 + filp = task->exec_file;
54323 +
54324 + while (tmp->pid > 0) {
54325 + if (tmp == curtemp)
54326 + break;
54327 + tmp = tmp->real_parent;
54328 + }
54329 +
54330 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54331 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
54332 + read_unlock(&grsec_exec_file_lock);
54333 + read_unlock(&tasklist_lock);
54334 + return 1;
54335 + }
54336 +
54337 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54338 + if (!(gr_status & GR_READY)) {
54339 + read_unlock(&grsec_exec_file_lock);
54340 + read_unlock(&tasklist_lock);
54341 + return 0;
54342 + }
54343 +#endif
54344 +
54345 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
54346 + read_unlock(&grsec_exec_file_lock);
54347 + read_unlock(&tasklist_lock);
54348 +
54349 + if (retmode & GR_NOPTRACE)
54350 + return 1;
54351 +
54352 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
54353 + && (current->acl != task->acl || (current->acl != current->role->root_label
54354 + && current->pid != task->pid)))
54355 + return 1;
54356 +
54357 + return 0;
54358 +}
54359 +
54360 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
54361 +{
54362 + if (unlikely(!(gr_status & GR_READY)))
54363 + return;
54364 +
54365 + if (!(current->role->roletype & GR_ROLE_GOD))
54366 + return;
54367 +
54368 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
54369 + p->role->rolename, gr_task_roletype_to_char(p),
54370 + p->acl->filename);
54371 +}
54372 +
54373 +int
54374 +gr_handle_ptrace(struct task_struct *task, const long request)
54375 +{
54376 + struct task_struct *tmp = task;
54377 + struct task_struct *curtemp = current;
54378 + __u32 retmode;
54379 +
54380 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54381 + if (unlikely(!(gr_status & GR_READY)))
54382 + return 0;
54383 +#endif
54384 +
54385 + read_lock(&tasklist_lock);
54386 + while (tmp->pid > 0) {
54387 + if (tmp == curtemp)
54388 + break;
54389 + tmp = tmp->real_parent;
54390 + }
54391 +
54392 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54393 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
54394 + read_unlock(&tasklist_lock);
54395 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54396 + return 1;
54397 + }
54398 + read_unlock(&tasklist_lock);
54399 +
54400 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54401 + if (!(gr_status & GR_READY))
54402 + return 0;
54403 +#endif
54404 +
54405 + read_lock(&grsec_exec_file_lock);
54406 + if (unlikely(!task->exec_file)) {
54407 + read_unlock(&grsec_exec_file_lock);
54408 + return 0;
54409 + }
54410 +
54411 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
54412 + read_unlock(&grsec_exec_file_lock);
54413 +
54414 + if (retmode & GR_NOPTRACE) {
54415 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54416 + return 1;
54417 + }
54418 +
54419 + if (retmode & GR_PTRACERD) {
54420 + switch (request) {
54421 + case PTRACE_SEIZE:
54422 + case PTRACE_POKETEXT:
54423 + case PTRACE_POKEDATA:
54424 + case PTRACE_POKEUSR:
54425 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
54426 + case PTRACE_SETREGS:
54427 + case PTRACE_SETFPREGS:
54428 +#endif
54429 +#ifdef CONFIG_X86
54430 + case PTRACE_SETFPXREGS:
54431 +#endif
54432 +#ifdef CONFIG_ALTIVEC
54433 + case PTRACE_SETVRREGS:
54434 +#endif
54435 + return 1;
54436 + default:
54437 + return 0;
54438 + }
54439 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
54440 + !(current->role->roletype & GR_ROLE_GOD) &&
54441 + (current->acl != task->acl)) {
54442 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54443 + return 1;
54444 + }
54445 +
54446 + return 0;
54447 +}
54448 +
54449 +static int is_writable_mmap(const struct file *filp)
54450 +{
54451 + struct task_struct *task = current;
54452 + struct acl_object_label *obj, *obj2;
54453 +
54454 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
54455 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
54456 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54457 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
54458 + task->role->root_label);
54459 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
54460 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
54461 + return 1;
54462 + }
54463 + }
54464 + return 0;
54465 +}
54466 +
54467 +int
54468 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
54469 +{
54470 + __u32 mode;
54471 +
54472 + if (unlikely(!file || !(prot & PROT_EXEC)))
54473 + return 1;
54474 +
54475 + if (is_writable_mmap(file))
54476 + return 0;
54477 +
54478 + mode =
54479 + gr_search_file(file->f_path.dentry,
54480 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54481 + file->f_path.mnt);
54482 +
54483 + if (!gr_tpe_allow(file))
54484 + return 0;
54485 +
54486 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54487 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54488 + return 0;
54489 + } else if (unlikely(!(mode & GR_EXEC))) {
54490 + return 0;
54491 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54492 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54493 + return 1;
54494 + }
54495 +
54496 + return 1;
54497 +}
54498 +
54499 +int
54500 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54501 +{
54502 + __u32 mode;
54503 +
54504 + if (unlikely(!file || !(prot & PROT_EXEC)))
54505 + return 1;
54506 +
54507 + if (is_writable_mmap(file))
54508 + return 0;
54509 +
54510 + mode =
54511 + gr_search_file(file->f_path.dentry,
54512 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54513 + file->f_path.mnt);
54514 +
54515 + if (!gr_tpe_allow(file))
54516 + return 0;
54517 +
54518 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54519 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54520 + return 0;
54521 + } else if (unlikely(!(mode & GR_EXEC))) {
54522 + return 0;
54523 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54524 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54525 + return 1;
54526 + }
54527 +
54528 + return 1;
54529 +}
54530 +
54531 +void
54532 +gr_acl_handle_psacct(struct task_struct *task, const long code)
54533 +{
54534 + unsigned long runtime;
54535 + unsigned long cputime;
54536 + unsigned int wday, cday;
54537 + __u8 whr, chr;
54538 + __u8 wmin, cmin;
54539 + __u8 wsec, csec;
54540 + struct timespec timeval;
54541 +
54542 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
54543 + !(task->acl->mode & GR_PROCACCT)))
54544 + return;
54545 +
54546 + do_posix_clock_monotonic_gettime(&timeval);
54547 + runtime = timeval.tv_sec - task->start_time.tv_sec;
54548 + wday = runtime / (3600 * 24);
54549 + runtime -= wday * (3600 * 24);
54550 + whr = runtime / 3600;
54551 + runtime -= whr * 3600;
54552 + wmin = runtime / 60;
54553 + runtime -= wmin * 60;
54554 + wsec = runtime;
54555 +
54556 + cputime = (task->utime + task->stime) / HZ;
54557 + cday = cputime / (3600 * 24);
54558 + cputime -= cday * (3600 * 24);
54559 + chr = cputime / 3600;
54560 + cputime -= chr * 3600;
54561 + cmin = cputime / 60;
54562 + cputime -= cmin * 60;
54563 + csec = cputime;
54564 +
54565 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
54566 +
54567 + return;
54568 +}
54569 +
54570 +void gr_set_kernel_label(struct task_struct *task)
54571 +{
54572 + if (gr_status & GR_READY) {
54573 + task->role = kernel_role;
54574 + task->acl = kernel_role->root_label;
54575 + }
54576 + return;
54577 +}
54578 +
54579 +#ifdef CONFIG_TASKSTATS
54580 +int gr_is_taskstats_denied(int pid)
54581 +{
54582 + struct task_struct *task;
54583 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54584 + const struct cred *cred;
54585 +#endif
54586 + int ret = 0;
54587 +
54588 + /* restrict taskstats viewing to un-chrooted root users
54589 + who have the 'view' subject flag if the RBAC system is enabled
54590 + */
54591 +
54592 + rcu_read_lock();
54593 + read_lock(&tasklist_lock);
54594 + task = find_task_by_vpid(pid);
54595 + if (task) {
54596 +#ifdef CONFIG_GRKERNSEC_CHROOT
54597 + if (proc_is_chrooted(task))
54598 + ret = -EACCES;
54599 +#endif
54600 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54601 + cred = __task_cred(task);
54602 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54603 + if (cred->uid != 0)
54604 + ret = -EACCES;
54605 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54606 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
54607 + ret = -EACCES;
54608 +#endif
54609 +#endif
54610 + if (gr_status & GR_READY) {
54611 + if (!(task->acl->mode & GR_VIEW))
54612 + ret = -EACCES;
54613 + }
54614 + } else
54615 + ret = -ENOENT;
54616 +
54617 + read_unlock(&tasklist_lock);
54618 + rcu_read_unlock();
54619 +
54620 + return ret;
54621 +}
54622 +#endif
54623 +
54624 +/* AUXV entries are filled via a descendant of search_binary_handler
54625 + after we've already applied the subject for the target
54626 +*/
54627 +int gr_acl_enable_at_secure(void)
54628 +{
54629 + if (unlikely(!(gr_status & GR_READY)))
54630 + return 0;
54631 +
54632 + if (current->acl->mode & GR_ATSECURE)
54633 + return 1;
54634 +
54635 + return 0;
54636 +}
54637 +
54638 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
54639 +{
54640 + struct task_struct *task = current;
54641 + struct dentry *dentry = file->f_path.dentry;
54642 + struct vfsmount *mnt = file->f_path.mnt;
54643 + struct acl_object_label *obj, *tmp;
54644 + struct acl_subject_label *subj;
54645 + unsigned int bufsize;
54646 + int is_not_root;
54647 + char *path;
54648 + dev_t dev = __get_dev(dentry);
54649 +
54650 + if (unlikely(!(gr_status & GR_READY)))
54651 + return 1;
54652 +
54653 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
54654 + return 1;
54655 +
54656 + /* ignore Eric Biederman */
54657 + if (IS_PRIVATE(dentry->d_inode))
54658 + return 1;
54659 +
54660 + subj = task->acl;
54661 + do {
54662 + obj = lookup_acl_obj_label(ino, dev, subj);
54663 + if (obj != NULL)
54664 + return (obj->mode & GR_FIND) ? 1 : 0;
54665 + } while ((subj = subj->parent_subject));
54666 +
54667 + /* this is purely an optimization since we're looking for an object
54668 + for the directory we're doing a readdir on
54669 + if it's possible for any globbed object to match the entry we're
54670 + filling into the directory, then the object we find here will be
54671 + an anchor point with attached globbed objects
54672 + */
54673 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
54674 + if (obj->globbed == NULL)
54675 + return (obj->mode & GR_FIND) ? 1 : 0;
54676 +
54677 + is_not_root = ((obj->filename[0] == '/') &&
54678 + (obj->filename[1] == '\0')) ? 0 : 1;
54679 + bufsize = PAGE_SIZE - namelen - is_not_root;
54680 +
54681 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
54682 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
54683 + return 1;
54684 +
54685 + preempt_disable();
54686 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
54687 + bufsize);
54688 +
54689 + bufsize = strlen(path);
54690 +
54691 + /* if base is "/", don't append an additional slash */
54692 + if (is_not_root)
54693 + *(path + bufsize) = '/';
54694 + memcpy(path + bufsize + is_not_root, name, namelen);
54695 + *(path + bufsize + namelen + is_not_root) = '\0';
54696 +
54697 + tmp = obj->globbed;
54698 + while (tmp) {
54699 + if (!glob_match(tmp->filename, path)) {
54700 + preempt_enable();
54701 + return (tmp->mode & GR_FIND) ? 1 : 0;
54702 + }
54703 + tmp = tmp->next;
54704 + }
54705 + preempt_enable();
54706 + return (obj->mode & GR_FIND) ? 1 : 0;
54707 +}
54708 +
54709 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
54710 +EXPORT_SYMBOL(gr_acl_is_enabled);
54711 +#endif
54712 +EXPORT_SYMBOL(gr_learn_resource);
54713 +EXPORT_SYMBOL(gr_set_kernel_label);
54714 +#ifdef CONFIG_SECURITY
54715 +EXPORT_SYMBOL(gr_check_user_change);
54716 +EXPORT_SYMBOL(gr_check_group_change);
54717 +#endif
54718 +
54719 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
54720 new file mode 100644
54721 index 0000000..34fefda
54722 --- /dev/null
54723 +++ b/grsecurity/gracl_alloc.c
54724 @@ -0,0 +1,105 @@
54725 +#include <linux/kernel.h>
54726 +#include <linux/mm.h>
54727 +#include <linux/slab.h>
54728 +#include <linux/vmalloc.h>
54729 +#include <linux/gracl.h>
54730 +#include <linux/grsecurity.h>
54731 +
54732 +static unsigned long alloc_stack_next = 1;
54733 +static unsigned long alloc_stack_size = 1;
54734 +static void **alloc_stack;
54735 +
54736 +static __inline__ int
54737 +alloc_pop(void)
54738 +{
54739 + if (alloc_stack_next == 1)
54740 + return 0;
54741 +
54742 + kfree(alloc_stack[alloc_stack_next - 2]);
54743 +
54744 + alloc_stack_next--;
54745 +
54746 + return 1;
54747 +}
54748 +
54749 +static __inline__ int
54750 +alloc_push(void *buf)
54751 +{
54752 + if (alloc_stack_next >= alloc_stack_size)
54753 + return 1;
54754 +
54755 + alloc_stack[alloc_stack_next - 1] = buf;
54756 +
54757 + alloc_stack_next++;
54758 +
54759 + return 0;
54760 +}
54761 +
54762 +void *
54763 +acl_alloc(unsigned long len)
54764 +{
54765 + void *ret = NULL;
54766 +
54767 + if (!len || len > PAGE_SIZE)
54768 + goto out;
54769 +
54770 + ret = kmalloc(len, GFP_KERNEL);
54771 +
54772 + if (ret) {
54773 + if (alloc_push(ret)) {
54774 + kfree(ret);
54775 + ret = NULL;
54776 + }
54777 + }
54778 +
54779 +out:
54780 + return ret;
54781 +}
54782 +
54783 +void *
54784 +acl_alloc_num(unsigned long num, unsigned long len)
54785 +{
54786 + if (!len || (num > (PAGE_SIZE / len)))
54787 + return NULL;
54788 +
54789 + return acl_alloc(num * len);
54790 +}
54791 +
54792 +void
54793 +acl_free_all(void)
54794 +{
54795 + if (gr_acl_is_enabled() || !alloc_stack)
54796 + return;
54797 +
54798 + while (alloc_pop()) ;
54799 +
54800 + if (alloc_stack) {
54801 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
54802 + kfree(alloc_stack);
54803 + else
54804 + vfree(alloc_stack);
54805 + }
54806 +
54807 + alloc_stack = NULL;
54808 + alloc_stack_size = 1;
54809 + alloc_stack_next = 1;
54810 +
54811 + return;
54812 +}
54813 +
54814 +int
54815 +acl_alloc_stack_init(unsigned long size)
54816 +{
54817 + if ((size * sizeof (void *)) <= PAGE_SIZE)
54818 + alloc_stack =
54819 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
54820 + else
54821 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
54822 +
54823 + alloc_stack_size = size;
54824 +
54825 + if (!alloc_stack)
54826 + return 0;
54827 + else
54828 + return 1;
54829 +}
54830 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
54831 new file mode 100644
54832 index 0000000..955ddfb
54833 --- /dev/null
54834 +++ b/grsecurity/gracl_cap.c
54835 @@ -0,0 +1,101 @@
54836 +#include <linux/kernel.h>
54837 +#include <linux/module.h>
54838 +#include <linux/sched.h>
54839 +#include <linux/gracl.h>
54840 +#include <linux/grsecurity.h>
54841 +#include <linux/grinternal.h>
54842 +
54843 +extern const char *captab_log[];
54844 +extern int captab_log_entries;
54845 +
54846 +int
54847 +gr_acl_is_capable(const int cap)
54848 +{
54849 + struct task_struct *task = current;
54850 + const struct cred *cred = current_cred();
54851 + struct acl_subject_label *curracl;
54852 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54853 + kernel_cap_t cap_audit = __cap_empty_set;
54854 +
54855 + if (!gr_acl_is_enabled())
54856 + return 1;
54857 +
54858 + curracl = task->acl;
54859 +
54860 + cap_drop = curracl->cap_lower;
54861 + cap_mask = curracl->cap_mask;
54862 + cap_audit = curracl->cap_invert_audit;
54863 +
54864 + while ((curracl = curracl->parent_subject)) {
54865 + /* if the cap isn't specified in the current computed mask but is specified in the
54866 + current level subject, and is lowered in the current level subject, then add
54867 + it to the set of dropped capabilities
54868 + otherwise, add the current level subject's mask to the current computed mask
54869 + */
54870 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54871 + cap_raise(cap_mask, cap);
54872 + if (cap_raised(curracl->cap_lower, cap))
54873 + cap_raise(cap_drop, cap);
54874 + if (cap_raised(curracl->cap_invert_audit, cap))
54875 + cap_raise(cap_audit, cap);
54876 + }
54877 + }
54878 +
54879 + if (!cap_raised(cap_drop, cap)) {
54880 + if (cap_raised(cap_audit, cap))
54881 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
54882 + return 1;
54883 + }
54884 +
54885 + curracl = task->acl;
54886 +
54887 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
54888 + && cap_raised(cred->cap_effective, cap)) {
54889 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54890 + task->role->roletype, cred->uid,
54891 + cred->gid, task->exec_file ?
54892 + gr_to_filename(task->exec_file->f_path.dentry,
54893 + task->exec_file->f_path.mnt) : curracl->filename,
54894 + curracl->filename, 0UL,
54895 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
54896 + return 1;
54897 + }
54898 +
54899 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
54900 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
54901 + return 0;
54902 +}
54903 +
54904 +int
54905 +gr_acl_is_capable_nolog(const int cap)
54906 +{
54907 + struct acl_subject_label *curracl;
54908 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54909 +
54910 + if (!gr_acl_is_enabled())
54911 + return 1;
54912 +
54913 + curracl = current->acl;
54914 +
54915 + cap_drop = curracl->cap_lower;
54916 + cap_mask = curracl->cap_mask;
54917 +
54918 + while ((curracl = curracl->parent_subject)) {
54919 + /* if the cap isn't specified in the current computed mask but is specified in the
54920 + current level subject, and is lowered in the current level subject, then add
54921 + it to the set of dropped capabilities
54922 + otherwise, add the current level subject's mask to the current computed mask
54923 + */
54924 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54925 + cap_raise(cap_mask, cap);
54926 + if (cap_raised(curracl->cap_lower, cap))
54927 + cap_raise(cap_drop, cap);
54928 + }
54929 + }
54930 +
54931 + if (!cap_raised(cap_drop, cap))
54932 + return 1;
54933 +
54934 + return 0;
54935 +}
54936 +
54937 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
54938 new file mode 100644
54939 index 0000000..4eda5c3
54940 --- /dev/null
54941 +++ b/grsecurity/gracl_fs.c
54942 @@ -0,0 +1,433 @@
54943 +#include <linux/kernel.h>
54944 +#include <linux/sched.h>
54945 +#include <linux/types.h>
54946 +#include <linux/fs.h>
54947 +#include <linux/file.h>
54948 +#include <linux/stat.h>
54949 +#include <linux/grsecurity.h>
54950 +#include <linux/grinternal.h>
54951 +#include <linux/gracl.h>
54952 +
54953 +__u32
54954 +gr_acl_handle_hidden_file(const struct dentry * dentry,
54955 + const struct vfsmount * mnt)
54956 +{
54957 + __u32 mode;
54958 +
54959 + if (unlikely(!dentry->d_inode))
54960 + return GR_FIND;
54961 +
54962 + mode =
54963 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
54964 +
54965 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
54966 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54967 + return mode;
54968 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
54969 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54970 + return 0;
54971 + } else if (unlikely(!(mode & GR_FIND)))
54972 + return 0;
54973 +
54974 + return GR_FIND;
54975 +}
54976 +
54977 +__u32
54978 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54979 + int acc_mode)
54980 +{
54981 + __u32 reqmode = GR_FIND;
54982 + __u32 mode;
54983 +
54984 + if (unlikely(!dentry->d_inode))
54985 + return reqmode;
54986 +
54987 + if (acc_mode & MAY_APPEND)
54988 + reqmode |= GR_APPEND;
54989 + else if (acc_mode & MAY_WRITE)
54990 + reqmode |= GR_WRITE;
54991 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
54992 + reqmode |= GR_READ;
54993 +
54994 + mode =
54995 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54996 + mnt);
54997 +
54998 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54999 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55000 + reqmode & GR_READ ? " reading" : "",
55001 + reqmode & GR_WRITE ? " writing" : reqmode &
55002 + GR_APPEND ? " appending" : "");
55003 + return reqmode;
55004 + } else
55005 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55006 + {
55007 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55008 + reqmode & GR_READ ? " reading" : "",
55009 + reqmode & GR_WRITE ? " writing" : reqmode &
55010 + GR_APPEND ? " appending" : "");
55011 + return 0;
55012 + } else if (unlikely((mode & reqmode) != reqmode))
55013 + return 0;
55014 +
55015 + return reqmode;
55016 +}
55017 +
55018 +__u32
55019 +gr_acl_handle_creat(const struct dentry * dentry,
55020 + const struct dentry * p_dentry,
55021 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
55022 + const int imode)
55023 +{
55024 + __u32 reqmode = GR_WRITE | GR_CREATE;
55025 + __u32 mode;
55026 +
55027 + if (acc_mode & MAY_APPEND)
55028 + reqmode |= GR_APPEND;
55029 + // if a directory was required or the directory already exists, then
55030 + // don't count this open as a read
55031 + if ((acc_mode & MAY_READ) &&
55032 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
55033 + reqmode |= GR_READ;
55034 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
55035 + reqmode |= GR_SETID;
55036 +
55037 + mode =
55038 + gr_check_create(dentry, p_dentry, p_mnt,
55039 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55040 +
55041 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55042 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55043 + reqmode & GR_READ ? " reading" : "",
55044 + reqmode & GR_WRITE ? " writing" : reqmode &
55045 + GR_APPEND ? " appending" : "");
55046 + return reqmode;
55047 + } else
55048 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55049 + {
55050 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55051 + reqmode & GR_READ ? " reading" : "",
55052 + reqmode & GR_WRITE ? " writing" : reqmode &
55053 + GR_APPEND ? " appending" : "");
55054 + return 0;
55055 + } else if (unlikely((mode & reqmode) != reqmode))
55056 + return 0;
55057 +
55058 + return reqmode;
55059 +}
55060 +
55061 +__u32
55062 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
55063 + const int fmode)
55064 +{
55065 + __u32 mode, reqmode = GR_FIND;
55066 +
55067 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
55068 + reqmode |= GR_EXEC;
55069 + if (fmode & S_IWOTH)
55070 + reqmode |= GR_WRITE;
55071 + if (fmode & S_IROTH)
55072 + reqmode |= GR_READ;
55073 +
55074 + mode =
55075 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55076 + mnt);
55077 +
55078 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55079 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55080 + reqmode & GR_READ ? " reading" : "",
55081 + reqmode & GR_WRITE ? " writing" : "",
55082 + reqmode & GR_EXEC ? " executing" : "");
55083 + return reqmode;
55084 + } else
55085 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55086 + {
55087 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55088 + reqmode & GR_READ ? " reading" : "",
55089 + reqmode & GR_WRITE ? " writing" : "",
55090 + reqmode & GR_EXEC ? " executing" : "");
55091 + return 0;
55092 + } else if (unlikely((mode & reqmode) != reqmode))
55093 + return 0;
55094 +
55095 + return reqmode;
55096 +}
55097 +
55098 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
55099 +{
55100 + __u32 mode;
55101 +
55102 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
55103 +
55104 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55105 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
55106 + return mode;
55107 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55108 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
55109 + return 0;
55110 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
55111 + return 0;
55112 +
55113 + return (reqmode);
55114 +}
55115 +
55116 +__u32
55117 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
55118 +{
55119 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
55120 +}
55121 +
55122 +__u32
55123 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
55124 +{
55125 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
55126 +}
55127 +
55128 +__u32
55129 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
55130 +{
55131 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
55132 +}
55133 +
55134 +__u32
55135 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
55136 +{
55137 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
55138 +}
55139 +
55140 +__u32
55141 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
55142 + mode_t mode)
55143 +{
55144 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
55145 + return 1;
55146 +
55147 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
55148 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
55149 + GR_FCHMOD_ACL_MSG);
55150 + } else {
55151 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
55152 + }
55153 +}
55154 +
55155 +__u32
55156 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
55157 + mode_t mode)
55158 +{
55159 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
55160 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
55161 + GR_CHMOD_ACL_MSG);
55162 + } else {
55163 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
55164 + }
55165 +}
55166 +
55167 +__u32
55168 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
55169 +{
55170 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
55171 +}
55172 +
55173 +__u32
55174 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
55175 +{
55176 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
55177 +}
55178 +
55179 +__u32
55180 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
55181 +{
55182 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
55183 +}
55184 +
55185 +__u32
55186 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
55187 +{
55188 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
55189 + GR_UNIXCONNECT_ACL_MSG);
55190 +}
55191 +
55192 +/* hardlinks require at minimum create and link permission,
55193 + any additional privilege required is based on the
55194 + privilege of the file being linked to
55195 +*/
55196 +__u32
55197 +gr_acl_handle_link(const struct dentry * new_dentry,
55198 + const struct dentry * parent_dentry,
55199 + const struct vfsmount * parent_mnt,
55200 + const struct dentry * old_dentry,
55201 + const struct vfsmount * old_mnt, const char *to)
55202 +{
55203 + __u32 mode;
55204 + __u32 needmode = GR_CREATE | GR_LINK;
55205 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
55206 +
55207 + mode =
55208 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
55209 + old_mnt);
55210 +
55211 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
55212 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55213 + return mode;
55214 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55215 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55216 + return 0;
55217 + } else if (unlikely((mode & needmode) != needmode))
55218 + return 0;
55219 +
55220 + return 1;
55221 +}
55222 +
55223 +__u32
55224 +gr_acl_handle_symlink(const struct dentry * new_dentry,
55225 + const struct dentry * parent_dentry,
55226 + const struct vfsmount * parent_mnt, const char *from)
55227 +{
55228 + __u32 needmode = GR_WRITE | GR_CREATE;
55229 + __u32 mode;
55230 +
55231 + mode =
55232 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
55233 + GR_CREATE | GR_AUDIT_CREATE |
55234 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
55235 +
55236 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
55237 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55238 + return mode;
55239 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55240 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55241 + return 0;
55242 + } else if (unlikely((mode & needmode) != needmode))
55243 + return 0;
55244 +
55245 + return (GR_WRITE | GR_CREATE);
55246 +}
55247 +
55248 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
55249 +{
55250 + __u32 mode;
55251 +
55252 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55253 +
55254 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55255 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
55256 + return mode;
55257 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55258 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
55259 + return 0;
55260 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
55261 + return 0;
55262 +
55263 + return (reqmode);
55264 +}
55265 +
55266 +__u32
55267 +gr_acl_handle_mknod(const struct dentry * new_dentry,
55268 + const struct dentry * parent_dentry,
55269 + const struct vfsmount * parent_mnt,
55270 + const int mode)
55271 +{
55272 + __u32 reqmode = GR_WRITE | GR_CREATE;
55273 + if (unlikely(mode & (S_ISUID | S_ISGID)))
55274 + reqmode |= GR_SETID;
55275 +
55276 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55277 + reqmode, GR_MKNOD_ACL_MSG);
55278 +}
55279 +
55280 +__u32
55281 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
55282 + const struct dentry *parent_dentry,
55283 + const struct vfsmount *parent_mnt)
55284 +{
55285 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55286 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
55287 +}
55288 +
55289 +#define RENAME_CHECK_SUCCESS(old, new) \
55290 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
55291 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
55292 +
55293 +int
55294 +gr_acl_handle_rename(struct dentry *new_dentry,
55295 + struct dentry *parent_dentry,
55296 + const struct vfsmount *parent_mnt,
55297 + struct dentry *old_dentry,
55298 + struct inode *old_parent_inode,
55299 + struct vfsmount *old_mnt, const char *newname)
55300 +{
55301 + __u32 comp1, comp2;
55302 + int error = 0;
55303 +
55304 + if (unlikely(!gr_acl_is_enabled()))
55305 + return 0;
55306 +
55307 + if (!new_dentry->d_inode) {
55308 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
55309 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
55310 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
55311 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
55312 + GR_DELETE | GR_AUDIT_DELETE |
55313 + GR_AUDIT_READ | GR_AUDIT_WRITE |
55314 + GR_SUPPRESS, old_mnt);
55315 + } else {
55316 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
55317 + GR_CREATE | GR_DELETE |
55318 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
55319 + GR_AUDIT_READ | GR_AUDIT_WRITE |
55320 + GR_SUPPRESS, parent_mnt);
55321 + comp2 =
55322 + gr_search_file(old_dentry,
55323 + GR_READ | GR_WRITE | GR_AUDIT_READ |
55324 + GR_DELETE | GR_AUDIT_DELETE |
55325 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
55326 + }
55327 +
55328 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
55329 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
55330 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55331 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
55332 + && !(comp2 & GR_SUPPRESS)) {
55333 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55334 + error = -EACCES;
55335 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
55336 + error = -EACCES;
55337 +
55338 + return error;
55339 +}
55340 +
55341 +void
55342 +gr_acl_handle_exit(void)
55343 +{
55344 + u16 id;
55345 + char *rolename;
55346 + struct file *exec_file;
55347 +
55348 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
55349 + !(current->role->roletype & GR_ROLE_PERSIST))) {
55350 + id = current->acl_role_id;
55351 + rolename = current->role->rolename;
55352 + gr_set_acls(1);
55353 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
55354 + }
55355 +
55356 + write_lock(&grsec_exec_file_lock);
55357 + exec_file = current->exec_file;
55358 + current->exec_file = NULL;
55359 + write_unlock(&grsec_exec_file_lock);
55360 +
55361 + if (exec_file)
55362 + fput(exec_file);
55363 +}
55364 +
55365 +int
55366 +gr_acl_handle_procpidmem(const struct task_struct *task)
55367 +{
55368 + if (unlikely(!gr_acl_is_enabled()))
55369 + return 0;
55370 +
55371 + if (task != current && task->acl->mode & GR_PROTPROCFD)
55372 + return -EACCES;
55373 +
55374 + return 0;
55375 +}
55376 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
55377 new file mode 100644
55378 index 0000000..17050ca
55379 --- /dev/null
55380 +++ b/grsecurity/gracl_ip.c
55381 @@ -0,0 +1,381 @@
55382 +#include <linux/kernel.h>
55383 +#include <asm/uaccess.h>
55384 +#include <asm/errno.h>
55385 +#include <net/sock.h>
55386 +#include <linux/file.h>
55387 +#include <linux/fs.h>
55388 +#include <linux/net.h>
55389 +#include <linux/in.h>
55390 +#include <linux/skbuff.h>
55391 +#include <linux/ip.h>
55392 +#include <linux/udp.h>
55393 +#include <linux/types.h>
55394 +#include <linux/sched.h>
55395 +#include <linux/netdevice.h>
55396 +#include <linux/inetdevice.h>
55397 +#include <linux/gracl.h>
55398 +#include <linux/grsecurity.h>
55399 +#include <linux/grinternal.h>
55400 +
55401 +#define GR_BIND 0x01
55402 +#define GR_CONNECT 0x02
55403 +#define GR_INVERT 0x04
55404 +#define GR_BINDOVERRIDE 0x08
55405 +#define GR_CONNECTOVERRIDE 0x10
55406 +#define GR_SOCK_FAMILY 0x20
55407 +
55408 +static const char * gr_protocols[IPPROTO_MAX] = {
55409 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
55410 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
55411 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
55412 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
55413 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
55414 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
55415 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
55416 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
55417 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
55418 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
55419 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
55420 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
55421 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
55422 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
55423 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
55424 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
55425 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
55426 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
55427 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
55428 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
55429 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
55430 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
55431 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
55432 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
55433 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
55434 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
55435 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
55436 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
55437 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
55438 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
55439 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
55440 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
55441 + };
55442 +
55443 +static const char * gr_socktypes[SOCK_MAX] = {
55444 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
55445 + "unknown:7", "unknown:8", "unknown:9", "packet"
55446 + };
55447 +
55448 +static const char * gr_sockfamilies[AF_MAX+1] = {
55449 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
55450 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
55451 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
55452 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
55453 + };
55454 +
55455 +const char *
55456 +gr_proto_to_name(unsigned char proto)
55457 +{
55458 + return gr_protocols[proto];
55459 +}
55460 +
55461 +const char *
55462 +gr_socktype_to_name(unsigned char type)
55463 +{
55464 + return gr_socktypes[type];
55465 +}
55466 +
55467 +const char *
55468 +gr_sockfamily_to_name(unsigned char family)
55469 +{
55470 + return gr_sockfamilies[family];
55471 +}
55472 +
55473 +int
55474 +gr_search_socket(const int domain, const int type, const int protocol)
55475 +{
55476 + struct acl_subject_label *curr;
55477 + const struct cred *cred = current_cred();
55478 +
55479 + if (unlikely(!gr_acl_is_enabled()))
55480 + goto exit;
55481 +
55482 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
55483 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
55484 + goto exit; // let the kernel handle it
55485 +
55486 + curr = current->acl;
55487 +
55488 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
55489 + /* the family is allowed, if this is PF_INET allow it only if
55490 + the extra sock type/protocol checks pass */
55491 + if (domain == PF_INET)
55492 + goto inet_check;
55493 + goto exit;
55494 + } else {
55495 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55496 + __u32 fakeip = 0;
55497 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55498 + current->role->roletype, cred->uid,
55499 + cred->gid, current->exec_file ?
55500 + gr_to_filename(current->exec_file->f_path.dentry,
55501 + current->exec_file->f_path.mnt) :
55502 + curr->filename, curr->filename,
55503 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
55504 + &current->signal->saved_ip);
55505 + goto exit;
55506 + }
55507 + goto exit_fail;
55508 + }
55509 +
55510 +inet_check:
55511 + /* the rest of this checking is for IPv4 only */
55512 + if (!curr->ips)
55513 + goto exit;
55514 +
55515 + if ((curr->ip_type & (1 << type)) &&
55516 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
55517 + goto exit;
55518 +
55519 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55520 + /* we don't place acls on raw sockets , and sometimes
55521 + dgram/ip sockets are opened for ioctl and not
55522 + bind/connect, so we'll fake a bind learn log */
55523 + if (type == SOCK_RAW || type == SOCK_PACKET) {
55524 + __u32 fakeip = 0;
55525 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55526 + current->role->roletype, cred->uid,
55527 + cred->gid, current->exec_file ?
55528 + gr_to_filename(current->exec_file->f_path.dentry,
55529 + current->exec_file->f_path.mnt) :
55530 + curr->filename, curr->filename,
55531 + &fakeip, 0, type,
55532 + protocol, GR_CONNECT, &current->signal->saved_ip);
55533 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
55534 + __u32 fakeip = 0;
55535 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55536 + current->role->roletype, cred->uid,
55537 + cred->gid, current->exec_file ?
55538 + gr_to_filename(current->exec_file->f_path.dentry,
55539 + current->exec_file->f_path.mnt) :
55540 + curr->filename, curr->filename,
55541 + &fakeip, 0, type,
55542 + protocol, GR_BIND, &current->signal->saved_ip);
55543 + }
55544 + /* we'll log when they use connect or bind */
55545 + goto exit;
55546 + }
55547 +
55548 +exit_fail:
55549 + if (domain == PF_INET)
55550 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
55551 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
55552 + else
55553 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
55554 + gr_socktype_to_name(type), protocol);
55555 +
55556 + return 0;
55557 +exit:
55558 + return 1;
55559 +}
55560 +
55561 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
55562 +{
55563 + if ((ip->mode & mode) &&
55564 + (ip_port >= ip->low) &&
55565 + (ip_port <= ip->high) &&
55566 + ((ntohl(ip_addr) & our_netmask) ==
55567 + (ntohl(our_addr) & our_netmask))
55568 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
55569 + && (ip->type & (1 << type))) {
55570 + if (ip->mode & GR_INVERT)
55571 + return 2; // specifically denied
55572 + else
55573 + return 1; // allowed
55574 + }
55575 +
55576 + return 0; // not specifically allowed, may continue parsing
55577 +}
55578 +
55579 +static int
55580 +gr_search_connectbind(const int full_mode, struct sock *sk,
55581 + struct sockaddr_in *addr, const int type)
55582 +{
55583 + char iface[IFNAMSIZ] = {0};
55584 + struct acl_subject_label *curr;
55585 + struct acl_ip_label *ip;
55586 + struct inet_sock *isk;
55587 + struct net_device *dev;
55588 + struct in_device *idev;
55589 + unsigned long i;
55590 + int ret;
55591 + int mode = full_mode & (GR_BIND | GR_CONNECT);
55592 + __u32 ip_addr = 0;
55593 + __u32 our_addr;
55594 + __u32 our_netmask;
55595 + char *p;
55596 + __u16 ip_port = 0;
55597 + const struct cred *cred = current_cred();
55598 +
55599 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
55600 + return 0;
55601 +
55602 + curr = current->acl;
55603 + isk = inet_sk(sk);
55604 +
55605 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
55606 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
55607 + addr->sin_addr.s_addr = curr->inaddr_any_override;
55608 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
55609 + struct sockaddr_in saddr;
55610 + int err;
55611 +
55612 + saddr.sin_family = AF_INET;
55613 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
55614 + saddr.sin_port = isk->inet_sport;
55615 +
55616 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55617 + if (err)
55618 + return err;
55619 +
55620 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55621 + if (err)
55622 + return err;
55623 + }
55624 +
55625 + if (!curr->ips)
55626 + return 0;
55627 +
55628 + ip_addr = addr->sin_addr.s_addr;
55629 + ip_port = ntohs(addr->sin_port);
55630 +
55631 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55632 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55633 + current->role->roletype, cred->uid,
55634 + cred->gid, current->exec_file ?
55635 + gr_to_filename(current->exec_file->f_path.dentry,
55636 + current->exec_file->f_path.mnt) :
55637 + curr->filename, curr->filename,
55638 + &ip_addr, ip_port, type,
55639 + sk->sk_protocol, mode, &current->signal->saved_ip);
55640 + return 0;
55641 + }
55642 +
55643 + for (i = 0; i < curr->ip_num; i++) {
55644 + ip = *(curr->ips + i);
55645 + if (ip->iface != NULL) {
55646 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
55647 + p = strchr(iface, ':');
55648 + if (p != NULL)
55649 + *p = '\0';
55650 + dev = dev_get_by_name(sock_net(sk), iface);
55651 + if (dev == NULL)
55652 + continue;
55653 + idev = in_dev_get(dev);
55654 + if (idev == NULL) {
55655 + dev_put(dev);
55656 + continue;
55657 + }
55658 + rcu_read_lock();
55659 + for_ifa(idev) {
55660 + if (!strcmp(ip->iface, ifa->ifa_label)) {
55661 + our_addr = ifa->ifa_address;
55662 + our_netmask = 0xffffffff;
55663 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55664 + if (ret == 1) {
55665 + rcu_read_unlock();
55666 + in_dev_put(idev);
55667 + dev_put(dev);
55668 + return 0;
55669 + } else if (ret == 2) {
55670 + rcu_read_unlock();
55671 + in_dev_put(idev);
55672 + dev_put(dev);
55673 + goto denied;
55674 + }
55675 + }
55676 + } endfor_ifa(idev);
55677 + rcu_read_unlock();
55678 + in_dev_put(idev);
55679 + dev_put(dev);
55680 + } else {
55681 + our_addr = ip->addr;
55682 + our_netmask = ip->netmask;
55683 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55684 + if (ret == 1)
55685 + return 0;
55686 + else if (ret == 2)
55687 + goto denied;
55688 + }
55689 + }
55690 +
55691 +denied:
55692 + if (mode == GR_BIND)
55693 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55694 + else if (mode == GR_CONNECT)
55695 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55696 +
55697 + return -EACCES;
55698 +}
55699 +
55700 +int
55701 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
55702 +{
55703 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
55704 +}
55705 +
55706 +int
55707 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
55708 +{
55709 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
55710 +}
55711 +
55712 +int gr_search_listen(struct socket *sock)
55713 +{
55714 + struct sock *sk = sock->sk;
55715 + struct sockaddr_in addr;
55716 +
55717 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55718 + addr.sin_port = inet_sk(sk)->inet_sport;
55719 +
55720 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55721 +}
55722 +
55723 +int gr_search_accept(struct socket *sock)
55724 +{
55725 + struct sock *sk = sock->sk;
55726 + struct sockaddr_in addr;
55727 +
55728 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55729 + addr.sin_port = inet_sk(sk)->inet_sport;
55730 +
55731 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55732 +}
55733 +
55734 +int
55735 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
55736 +{
55737 + if (addr)
55738 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
55739 + else {
55740 + struct sockaddr_in sin;
55741 + const struct inet_sock *inet = inet_sk(sk);
55742 +
55743 + sin.sin_addr.s_addr = inet->inet_daddr;
55744 + sin.sin_port = inet->inet_dport;
55745 +
55746 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55747 + }
55748 +}
55749 +
55750 +int
55751 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
55752 +{
55753 + struct sockaddr_in sin;
55754 +
55755 + if (unlikely(skb->len < sizeof (struct udphdr)))
55756 + return 0; // skip this packet
55757 +
55758 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
55759 + sin.sin_port = udp_hdr(skb)->source;
55760 +
55761 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55762 +}
55763 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
55764 new file mode 100644
55765 index 0000000..25f54ef
55766 --- /dev/null
55767 +++ b/grsecurity/gracl_learn.c
55768 @@ -0,0 +1,207 @@
55769 +#include <linux/kernel.h>
55770 +#include <linux/mm.h>
55771 +#include <linux/sched.h>
55772 +#include <linux/poll.h>
55773 +#include <linux/string.h>
55774 +#include <linux/file.h>
55775 +#include <linux/types.h>
55776 +#include <linux/vmalloc.h>
55777 +#include <linux/grinternal.h>
55778 +
55779 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
55780 + size_t count, loff_t *ppos);
55781 +extern int gr_acl_is_enabled(void);
55782 +
55783 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
55784 +static int gr_learn_attached;
55785 +
55786 +/* use a 512k buffer */
55787 +#define LEARN_BUFFER_SIZE (512 * 1024)
55788 +
55789 +static DEFINE_SPINLOCK(gr_learn_lock);
55790 +static DEFINE_MUTEX(gr_learn_user_mutex);
55791 +
55792 +/* we need to maintain two buffers, so that the kernel context of grlearn
55793 + uses a semaphore around the userspace copying, and the other kernel contexts
55794 + use a spinlock when copying into the buffer, since they cannot sleep
55795 +*/
55796 +static char *learn_buffer;
55797 +static char *learn_buffer_user;
55798 +static int learn_buffer_len;
55799 +static int learn_buffer_user_len;
55800 +
55801 +static ssize_t
55802 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
55803 +{
55804 + DECLARE_WAITQUEUE(wait, current);
55805 + ssize_t retval = 0;
55806 +
55807 + add_wait_queue(&learn_wait, &wait);
55808 + set_current_state(TASK_INTERRUPTIBLE);
55809 + do {
55810 + mutex_lock(&gr_learn_user_mutex);
55811 + spin_lock(&gr_learn_lock);
55812 + if (learn_buffer_len)
55813 + break;
55814 + spin_unlock(&gr_learn_lock);
55815 + mutex_unlock(&gr_learn_user_mutex);
55816 + if (file->f_flags & O_NONBLOCK) {
55817 + retval = -EAGAIN;
55818 + goto out;
55819 + }
55820 + if (signal_pending(current)) {
55821 + retval = -ERESTARTSYS;
55822 + goto out;
55823 + }
55824 +
55825 + schedule();
55826 + } while (1);
55827 +
55828 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
55829 + learn_buffer_user_len = learn_buffer_len;
55830 + retval = learn_buffer_len;
55831 + learn_buffer_len = 0;
55832 +
55833 + spin_unlock(&gr_learn_lock);
55834 +
55835 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
55836 + retval = -EFAULT;
55837 +
55838 + mutex_unlock(&gr_learn_user_mutex);
55839 +out:
55840 + set_current_state(TASK_RUNNING);
55841 + remove_wait_queue(&learn_wait, &wait);
55842 + return retval;
55843 +}
55844 +
55845 +static unsigned int
55846 +poll_learn(struct file * file, poll_table * wait)
55847 +{
55848 + poll_wait(file, &learn_wait, wait);
55849 +
55850 + if (learn_buffer_len)
55851 + return (POLLIN | POLLRDNORM);
55852 +
55853 + return 0;
55854 +}
55855 +
55856 +void
55857 +gr_clear_learn_entries(void)
55858 +{
55859 + char *tmp;
55860 +
55861 + mutex_lock(&gr_learn_user_mutex);
55862 + spin_lock(&gr_learn_lock);
55863 + tmp = learn_buffer;
55864 + learn_buffer = NULL;
55865 + spin_unlock(&gr_learn_lock);
55866 + if (tmp)
55867 + vfree(tmp);
55868 + if (learn_buffer_user != NULL) {
55869 + vfree(learn_buffer_user);
55870 + learn_buffer_user = NULL;
55871 + }
55872 + learn_buffer_len = 0;
55873 + mutex_unlock(&gr_learn_user_mutex);
55874 +
55875 + return;
55876 +}
55877 +
55878 +void
55879 +gr_add_learn_entry(const char *fmt, ...)
55880 +{
55881 + va_list args;
55882 + unsigned int len;
55883 +
55884 + if (!gr_learn_attached)
55885 + return;
55886 +
55887 + spin_lock(&gr_learn_lock);
55888 +
55889 + /* leave a gap at the end so we know when it's "full" but don't have to
55890 + compute the exact length of the string we're trying to append
55891 + */
55892 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
55893 + spin_unlock(&gr_learn_lock);
55894 + wake_up_interruptible(&learn_wait);
55895 + return;
55896 + }
55897 + if (learn_buffer == NULL) {
55898 + spin_unlock(&gr_learn_lock);
55899 + return;
55900 + }
55901 +
55902 + va_start(args, fmt);
55903 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
55904 + va_end(args);
55905 +
55906 + learn_buffer_len += len + 1;
55907 +
55908 + spin_unlock(&gr_learn_lock);
55909 + wake_up_interruptible(&learn_wait);
55910 +
55911 + return;
55912 +}
55913 +
55914 +static int
55915 +open_learn(struct inode *inode, struct file *file)
55916 +{
55917 + if (file->f_mode & FMODE_READ && gr_learn_attached)
55918 + return -EBUSY;
55919 + if (file->f_mode & FMODE_READ) {
55920 + int retval = 0;
55921 + mutex_lock(&gr_learn_user_mutex);
55922 + if (learn_buffer == NULL)
55923 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
55924 + if (learn_buffer_user == NULL)
55925 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
55926 + if (learn_buffer == NULL) {
55927 + retval = -ENOMEM;
55928 + goto out_error;
55929 + }
55930 + if (learn_buffer_user == NULL) {
55931 + retval = -ENOMEM;
55932 + goto out_error;
55933 + }
55934 + learn_buffer_len = 0;
55935 + learn_buffer_user_len = 0;
55936 + gr_learn_attached = 1;
55937 +out_error:
55938 + mutex_unlock(&gr_learn_user_mutex);
55939 + return retval;
55940 + }
55941 + return 0;
55942 +}
55943 +
55944 +static int
55945 +close_learn(struct inode *inode, struct file *file)
55946 +{
55947 + if (file->f_mode & FMODE_READ) {
55948 + char *tmp = NULL;
55949 + mutex_lock(&gr_learn_user_mutex);
55950 + spin_lock(&gr_learn_lock);
55951 + tmp = learn_buffer;
55952 + learn_buffer = NULL;
55953 + spin_unlock(&gr_learn_lock);
55954 + if (tmp)
55955 + vfree(tmp);
55956 + if (learn_buffer_user != NULL) {
55957 + vfree(learn_buffer_user);
55958 + learn_buffer_user = NULL;
55959 + }
55960 + learn_buffer_len = 0;
55961 + learn_buffer_user_len = 0;
55962 + gr_learn_attached = 0;
55963 + mutex_unlock(&gr_learn_user_mutex);
55964 + }
55965 +
55966 + return 0;
55967 +}
55968 +
55969 +const struct file_operations grsec_fops = {
55970 + .read = read_learn,
55971 + .write = write_grsec_handler,
55972 + .open = open_learn,
55973 + .release = close_learn,
55974 + .poll = poll_learn,
55975 +};
55976 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
55977 new file mode 100644
55978 index 0000000..39645c9
55979 --- /dev/null
55980 +++ b/grsecurity/gracl_res.c
55981 @@ -0,0 +1,68 @@
55982 +#include <linux/kernel.h>
55983 +#include <linux/sched.h>
55984 +#include <linux/gracl.h>
55985 +#include <linux/grinternal.h>
55986 +
55987 +static const char *restab_log[] = {
55988 + [RLIMIT_CPU] = "RLIMIT_CPU",
55989 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
55990 + [RLIMIT_DATA] = "RLIMIT_DATA",
55991 + [RLIMIT_STACK] = "RLIMIT_STACK",
55992 + [RLIMIT_CORE] = "RLIMIT_CORE",
55993 + [RLIMIT_RSS] = "RLIMIT_RSS",
55994 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
55995 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
55996 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
55997 + [RLIMIT_AS] = "RLIMIT_AS",
55998 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
55999 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
56000 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
56001 + [RLIMIT_NICE] = "RLIMIT_NICE",
56002 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
56003 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
56004 + [GR_CRASH_RES] = "RLIMIT_CRASH"
56005 +};
56006 +
56007 +void
56008 +gr_log_resource(const struct task_struct *task,
56009 + const int res, const unsigned long wanted, const int gt)
56010 +{
56011 + const struct cred *cred;
56012 + unsigned long rlim;
56013 +
56014 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
56015 + return;
56016 +
56017 + // not yet supported resource
56018 + if (unlikely(!restab_log[res]))
56019 + return;
56020 +
56021 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
56022 + rlim = task_rlimit_max(task, res);
56023 + else
56024 + rlim = task_rlimit(task, res);
56025 +
56026 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
56027 + return;
56028 +
56029 + rcu_read_lock();
56030 + cred = __task_cred(task);
56031 +
56032 + if (res == RLIMIT_NPROC &&
56033 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
56034 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
56035 + goto out_rcu_unlock;
56036 + else if (res == RLIMIT_MEMLOCK &&
56037 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
56038 + goto out_rcu_unlock;
56039 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
56040 + goto out_rcu_unlock;
56041 + rcu_read_unlock();
56042 +
56043 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
56044 +
56045 + return;
56046 +out_rcu_unlock:
56047 + rcu_read_unlock();
56048 + return;
56049 +}
56050 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
56051 new file mode 100644
56052 index 0000000..5556be3
56053 --- /dev/null
56054 +++ b/grsecurity/gracl_segv.c
56055 @@ -0,0 +1,299 @@
56056 +#include <linux/kernel.h>
56057 +#include <linux/mm.h>
56058 +#include <asm/uaccess.h>
56059 +#include <asm/errno.h>
56060 +#include <asm/mman.h>
56061 +#include <net/sock.h>
56062 +#include <linux/file.h>
56063 +#include <linux/fs.h>
56064 +#include <linux/net.h>
56065 +#include <linux/in.h>
56066 +#include <linux/slab.h>
56067 +#include <linux/types.h>
56068 +#include <linux/sched.h>
56069 +#include <linux/timer.h>
56070 +#include <linux/gracl.h>
56071 +#include <linux/grsecurity.h>
56072 +#include <linux/grinternal.h>
56073 +
56074 +static struct crash_uid *uid_set;
56075 +static unsigned short uid_used;
56076 +static DEFINE_SPINLOCK(gr_uid_lock);
56077 +extern rwlock_t gr_inode_lock;
56078 +extern struct acl_subject_label *
56079 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
56080 + struct acl_role_label *role);
56081 +
56082 +#ifdef CONFIG_BTRFS_FS
56083 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56084 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56085 +#endif
56086 +
56087 +static inline dev_t __get_dev(const struct dentry *dentry)
56088 +{
56089 +#ifdef CONFIG_BTRFS_FS
56090 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56091 + return get_btrfs_dev_from_inode(dentry->d_inode);
56092 + else
56093 +#endif
56094 + return dentry->d_inode->i_sb->s_dev;
56095 +}
56096 +
56097 +int
56098 +gr_init_uidset(void)
56099 +{
56100 + uid_set =
56101 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
56102 + uid_used = 0;
56103 +
56104 + return uid_set ? 1 : 0;
56105 +}
56106 +
56107 +void
56108 +gr_free_uidset(void)
56109 +{
56110 + if (uid_set)
56111 + kfree(uid_set);
56112 +
56113 + return;
56114 +}
56115 +
56116 +int
56117 +gr_find_uid(const uid_t uid)
56118 +{
56119 + struct crash_uid *tmp = uid_set;
56120 + uid_t buid;
56121 + int low = 0, high = uid_used - 1, mid;
56122 +
56123 + while (high >= low) {
56124 + mid = (low + high) >> 1;
56125 + buid = tmp[mid].uid;
56126 + if (buid == uid)
56127 + return mid;
56128 + if (buid > uid)
56129 + high = mid - 1;
56130 + if (buid < uid)
56131 + low = mid + 1;
56132 + }
56133 +
56134 + return -1;
56135 +}
56136 +
56137 +static __inline__ void
56138 +gr_insertsort(void)
56139 +{
56140 + unsigned short i, j;
56141 + struct crash_uid index;
56142 +
56143 + for (i = 1; i < uid_used; i++) {
56144 + index = uid_set[i];
56145 + j = i;
56146 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
56147 + uid_set[j] = uid_set[j - 1];
56148 + j--;
56149 + }
56150 + uid_set[j] = index;
56151 + }
56152 +
56153 + return;
56154 +}
56155 +
56156 +static __inline__ void
56157 +gr_insert_uid(const uid_t uid, const unsigned long expires)
56158 +{
56159 + int loc;
56160 +
56161 + if (uid_used == GR_UIDTABLE_MAX)
56162 + return;
56163 +
56164 + loc = gr_find_uid(uid);
56165 +
56166 + if (loc >= 0) {
56167 + uid_set[loc].expires = expires;
56168 + return;
56169 + }
56170 +
56171 + uid_set[uid_used].uid = uid;
56172 + uid_set[uid_used].expires = expires;
56173 + uid_used++;
56174 +
56175 + gr_insertsort();
56176 +
56177 + return;
56178 +}
56179 +
56180 +void
56181 +gr_remove_uid(const unsigned short loc)
56182 +{
56183 + unsigned short i;
56184 +
56185 + for (i = loc + 1; i < uid_used; i++)
56186 + uid_set[i - 1] = uid_set[i];
56187 +
56188 + uid_used--;
56189 +
56190 + return;
56191 +}
56192 +
56193 +int
56194 +gr_check_crash_uid(const uid_t uid)
56195 +{
56196 + int loc;
56197 + int ret = 0;
56198 +
56199 + if (unlikely(!gr_acl_is_enabled()))
56200 + return 0;
56201 +
56202 + spin_lock(&gr_uid_lock);
56203 + loc = gr_find_uid(uid);
56204 +
56205 + if (loc < 0)
56206 + goto out_unlock;
56207 +
56208 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
56209 + gr_remove_uid(loc);
56210 + else
56211 + ret = 1;
56212 +
56213 +out_unlock:
56214 + spin_unlock(&gr_uid_lock);
56215 + return ret;
56216 +}
56217 +
56218 +static __inline__ int
56219 +proc_is_setxid(const struct cred *cred)
56220 +{
56221 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
56222 + cred->uid != cred->fsuid)
56223 + return 1;
56224 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
56225 + cred->gid != cred->fsgid)
56226 + return 1;
56227 +
56228 + return 0;
56229 +}
56230 +
56231 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
56232 +
56233 +void
56234 +gr_handle_crash(struct task_struct *task, const int sig)
56235 +{
56236 + struct acl_subject_label *curr;
56237 + struct task_struct *tsk, *tsk2;
56238 + const struct cred *cred;
56239 + const struct cred *cred2;
56240 +
56241 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
56242 + return;
56243 +
56244 + if (unlikely(!gr_acl_is_enabled()))
56245 + return;
56246 +
56247 + curr = task->acl;
56248 +
56249 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
56250 + return;
56251 +
56252 + if (time_before_eq(curr->expires, get_seconds())) {
56253 + curr->expires = 0;
56254 + curr->crashes = 0;
56255 + }
56256 +
56257 + curr->crashes++;
56258 +
56259 + if (!curr->expires)
56260 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
56261 +
56262 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56263 + time_after(curr->expires, get_seconds())) {
56264 + rcu_read_lock();
56265 + cred = __task_cred(task);
56266 + if (cred->uid && proc_is_setxid(cred)) {
56267 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56268 + spin_lock(&gr_uid_lock);
56269 + gr_insert_uid(cred->uid, curr->expires);
56270 + spin_unlock(&gr_uid_lock);
56271 + curr->expires = 0;
56272 + curr->crashes = 0;
56273 + read_lock(&tasklist_lock);
56274 + do_each_thread(tsk2, tsk) {
56275 + cred2 = __task_cred(tsk);
56276 + if (tsk != task && cred2->uid == cred->uid)
56277 + gr_fake_force_sig(SIGKILL, tsk);
56278 + } while_each_thread(tsk2, tsk);
56279 + read_unlock(&tasklist_lock);
56280 + } else {
56281 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56282 + read_lock(&tasklist_lock);
56283 + read_lock(&grsec_exec_file_lock);
56284 + do_each_thread(tsk2, tsk) {
56285 + if (likely(tsk != task)) {
56286 + // if this thread has the same subject as the one that triggered
56287 + // RES_CRASH and it's the same binary, kill it
56288 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
56289 + gr_fake_force_sig(SIGKILL, tsk);
56290 + }
56291 + } while_each_thread(tsk2, tsk);
56292 + read_unlock(&grsec_exec_file_lock);
56293 + read_unlock(&tasklist_lock);
56294 + }
56295 + rcu_read_unlock();
56296 + }
56297 +
56298 + return;
56299 +}
56300 +
56301 +int
56302 +gr_check_crash_exec(const struct file *filp)
56303 +{
56304 + struct acl_subject_label *curr;
56305 +
56306 + if (unlikely(!gr_acl_is_enabled()))
56307 + return 0;
56308 +
56309 + read_lock(&gr_inode_lock);
56310 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
56311 + __get_dev(filp->f_path.dentry),
56312 + current->role);
56313 + read_unlock(&gr_inode_lock);
56314 +
56315 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
56316 + (!curr->crashes && !curr->expires))
56317 + return 0;
56318 +
56319 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56320 + time_after(curr->expires, get_seconds()))
56321 + return 1;
56322 + else if (time_before_eq(curr->expires, get_seconds())) {
56323 + curr->crashes = 0;
56324 + curr->expires = 0;
56325 + }
56326 +
56327 + return 0;
56328 +}
56329 +
56330 +void
56331 +gr_handle_alertkill(struct task_struct *task)
56332 +{
56333 + struct acl_subject_label *curracl;
56334 + __u32 curr_ip;
56335 + struct task_struct *p, *p2;
56336 +
56337 + if (unlikely(!gr_acl_is_enabled()))
56338 + return;
56339 +
56340 + curracl = task->acl;
56341 + curr_ip = task->signal->curr_ip;
56342 +
56343 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
56344 + read_lock(&tasklist_lock);
56345 + do_each_thread(p2, p) {
56346 + if (p->signal->curr_ip == curr_ip)
56347 + gr_fake_force_sig(SIGKILL, p);
56348 + } while_each_thread(p2, p);
56349 + read_unlock(&tasklist_lock);
56350 + } else if (curracl->mode & GR_KILLPROC)
56351 + gr_fake_force_sig(SIGKILL, task);
56352 +
56353 + return;
56354 +}
56355 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
56356 new file mode 100644
56357 index 0000000..9d83a69
56358 --- /dev/null
56359 +++ b/grsecurity/gracl_shm.c
56360 @@ -0,0 +1,40 @@
56361 +#include <linux/kernel.h>
56362 +#include <linux/mm.h>
56363 +#include <linux/sched.h>
56364 +#include <linux/file.h>
56365 +#include <linux/ipc.h>
56366 +#include <linux/gracl.h>
56367 +#include <linux/grsecurity.h>
56368 +#include <linux/grinternal.h>
56369 +
56370 +int
56371 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56372 + const time_t shm_createtime, const uid_t cuid, const int shmid)
56373 +{
56374 + struct task_struct *task;
56375 +
56376 + if (!gr_acl_is_enabled())
56377 + return 1;
56378 +
56379 + rcu_read_lock();
56380 + read_lock(&tasklist_lock);
56381 +
56382 + task = find_task_by_vpid(shm_cprid);
56383 +
56384 + if (unlikely(!task))
56385 + task = find_task_by_vpid(shm_lapid);
56386 +
56387 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
56388 + (task->pid == shm_lapid)) &&
56389 + (task->acl->mode & GR_PROTSHM) &&
56390 + (task->acl != current->acl))) {
56391 + read_unlock(&tasklist_lock);
56392 + rcu_read_unlock();
56393 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
56394 + return 0;
56395 + }
56396 + read_unlock(&tasklist_lock);
56397 + rcu_read_unlock();
56398 +
56399 + return 1;
56400 +}
56401 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
56402 new file mode 100644
56403 index 0000000..bc0be01
56404 --- /dev/null
56405 +++ b/grsecurity/grsec_chdir.c
56406 @@ -0,0 +1,19 @@
56407 +#include <linux/kernel.h>
56408 +#include <linux/sched.h>
56409 +#include <linux/fs.h>
56410 +#include <linux/file.h>
56411 +#include <linux/grsecurity.h>
56412 +#include <linux/grinternal.h>
56413 +
56414 +void
56415 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
56416 +{
56417 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56418 + if ((grsec_enable_chdir && grsec_enable_group &&
56419 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
56420 + !grsec_enable_group)) {
56421 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
56422 + }
56423 +#endif
56424 + return;
56425 +}
56426 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
56427 new file mode 100644
56428 index 0000000..a2dc675
56429 --- /dev/null
56430 +++ b/grsecurity/grsec_chroot.c
56431 @@ -0,0 +1,351 @@
56432 +#include <linux/kernel.h>
56433 +#include <linux/module.h>
56434 +#include <linux/sched.h>
56435 +#include <linux/file.h>
56436 +#include <linux/fs.h>
56437 +#include <linux/mount.h>
56438 +#include <linux/types.h>
56439 +#include <linux/pid_namespace.h>
56440 +#include <linux/grsecurity.h>
56441 +#include <linux/grinternal.h>
56442 +
56443 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
56444 +{
56445 +#ifdef CONFIG_GRKERNSEC
56446 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
56447 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
56448 + task->gr_is_chrooted = 1;
56449 + else
56450 + task->gr_is_chrooted = 0;
56451 +
56452 + task->gr_chroot_dentry = path->dentry;
56453 +#endif
56454 + return;
56455 +}
56456 +
56457 +void gr_clear_chroot_entries(struct task_struct *task)
56458 +{
56459 +#ifdef CONFIG_GRKERNSEC
56460 + task->gr_is_chrooted = 0;
56461 + task->gr_chroot_dentry = NULL;
56462 +#endif
56463 + return;
56464 +}
56465 +
56466 +int
56467 +gr_handle_chroot_unix(const pid_t pid)
56468 +{
56469 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56470 + struct task_struct *p;
56471 +
56472 + if (unlikely(!grsec_enable_chroot_unix))
56473 + return 1;
56474 +
56475 + if (likely(!proc_is_chrooted(current)))
56476 + return 1;
56477 +
56478 + rcu_read_lock();
56479 + read_lock(&tasklist_lock);
56480 + p = find_task_by_vpid_unrestricted(pid);
56481 + if (unlikely(p && !have_same_root(current, p))) {
56482 + read_unlock(&tasklist_lock);
56483 + rcu_read_unlock();
56484 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
56485 + return 0;
56486 + }
56487 + read_unlock(&tasklist_lock);
56488 + rcu_read_unlock();
56489 +#endif
56490 + return 1;
56491 +}
56492 +
56493 +int
56494 +gr_handle_chroot_nice(void)
56495 +{
56496 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56497 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
56498 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
56499 + return -EPERM;
56500 + }
56501 +#endif
56502 + return 0;
56503 +}
56504 +
56505 +int
56506 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
56507 +{
56508 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56509 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
56510 + && proc_is_chrooted(current)) {
56511 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
56512 + return -EACCES;
56513 + }
56514 +#endif
56515 + return 0;
56516 +}
56517 +
56518 +int
56519 +gr_handle_chroot_rawio(const struct inode *inode)
56520 +{
56521 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56522 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56523 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
56524 + return 1;
56525 +#endif
56526 + return 0;
56527 +}
56528 +
56529 +int
56530 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
56531 +{
56532 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56533 + struct task_struct *p;
56534 + int ret = 0;
56535 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
56536 + return ret;
56537 +
56538 + read_lock(&tasklist_lock);
56539 + do_each_pid_task(pid, type, p) {
56540 + if (!have_same_root(current, p)) {
56541 + ret = 1;
56542 + goto out;
56543 + }
56544 + } while_each_pid_task(pid, type, p);
56545 +out:
56546 + read_unlock(&tasklist_lock);
56547 + return ret;
56548 +#endif
56549 + return 0;
56550 +}
56551 +
56552 +int
56553 +gr_pid_is_chrooted(struct task_struct *p)
56554 +{
56555 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56556 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
56557 + return 0;
56558 +
56559 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
56560 + !have_same_root(current, p)) {
56561 + return 1;
56562 + }
56563 +#endif
56564 + return 0;
56565 +}
56566 +
56567 +EXPORT_SYMBOL(gr_pid_is_chrooted);
56568 +
56569 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
56570 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
56571 +{
56572 + struct path path, currentroot;
56573 + int ret = 0;
56574 +
56575 + path.dentry = (struct dentry *)u_dentry;
56576 + path.mnt = (struct vfsmount *)u_mnt;
56577 + get_fs_root(current->fs, &currentroot);
56578 + if (path_is_under(&path, &currentroot))
56579 + ret = 1;
56580 + path_put(&currentroot);
56581 +
56582 + return ret;
56583 +}
56584 +#endif
56585 +
56586 +int
56587 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
56588 +{
56589 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56590 + if (!grsec_enable_chroot_fchdir)
56591 + return 1;
56592 +
56593 + if (!proc_is_chrooted(current))
56594 + return 1;
56595 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
56596 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
56597 + return 0;
56598 + }
56599 +#endif
56600 + return 1;
56601 +}
56602 +
56603 +int
56604 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56605 + const time_t shm_createtime)
56606 +{
56607 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56608 + struct task_struct *p;
56609 + time_t starttime;
56610 +
56611 + if (unlikely(!grsec_enable_chroot_shmat))
56612 + return 1;
56613 +
56614 + if (likely(!proc_is_chrooted(current)))
56615 + return 1;
56616 +
56617 + rcu_read_lock();
56618 + read_lock(&tasklist_lock);
56619 +
56620 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
56621 + starttime = p->start_time.tv_sec;
56622 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
56623 + if (have_same_root(current, p)) {
56624 + goto allow;
56625 + } else {
56626 + read_unlock(&tasklist_lock);
56627 + rcu_read_unlock();
56628 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56629 + return 0;
56630 + }
56631 + }
56632 + /* creator exited, pid reuse, fall through to next check */
56633 + }
56634 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
56635 + if (unlikely(!have_same_root(current, p))) {
56636 + read_unlock(&tasklist_lock);
56637 + rcu_read_unlock();
56638 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56639 + return 0;
56640 + }
56641 + }
56642 +
56643 +allow:
56644 + read_unlock(&tasklist_lock);
56645 + rcu_read_unlock();
56646 +#endif
56647 + return 1;
56648 +}
56649 +
56650 +void
56651 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
56652 +{
56653 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56654 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
56655 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
56656 +#endif
56657 + return;
56658 +}
56659 +
56660 +int
56661 +gr_handle_chroot_mknod(const struct dentry *dentry,
56662 + const struct vfsmount *mnt, const int mode)
56663 +{
56664 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56665 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
56666 + proc_is_chrooted(current)) {
56667 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
56668 + return -EPERM;
56669 + }
56670 +#endif
56671 + return 0;
56672 +}
56673 +
56674 +int
56675 +gr_handle_chroot_mount(const struct dentry *dentry,
56676 + const struct vfsmount *mnt, const char *dev_name)
56677 +{
56678 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56679 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
56680 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
56681 + return -EPERM;
56682 + }
56683 +#endif
56684 + return 0;
56685 +}
56686 +
56687 +int
56688 +gr_handle_chroot_pivot(void)
56689 +{
56690 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56691 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
56692 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
56693 + return -EPERM;
56694 + }
56695 +#endif
56696 + return 0;
56697 +}
56698 +
56699 +int
56700 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
56701 +{
56702 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56703 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
56704 + !gr_is_outside_chroot(dentry, mnt)) {
56705 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
56706 + return -EPERM;
56707 + }
56708 +#endif
56709 + return 0;
56710 +}
56711 +
56712 +extern const char *captab_log[];
56713 +extern int captab_log_entries;
56714 +
56715 +int
56716 +gr_chroot_is_capable(const int cap)
56717 +{
56718 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56719 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
56720 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56721 + if (cap_raised(chroot_caps, cap)) {
56722 + const struct cred *creds = current_cred();
56723 + if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
56724 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
56725 + }
56726 + return 0;
56727 + }
56728 + }
56729 +#endif
56730 + return 1;
56731 +}
56732 +
56733 +int
56734 +gr_chroot_is_capable_nolog(const int cap)
56735 +{
56736 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56737 + if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
56738 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56739 + if (cap_raised(chroot_caps, cap)) {
56740 + return 0;
56741 + }
56742 + }
56743 +#endif
56744 + return 1;
56745 +}
56746 +
56747 +int
56748 +gr_handle_chroot_sysctl(const int op)
56749 +{
56750 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56751 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
56752 + proc_is_chrooted(current))
56753 + return -EACCES;
56754 +#endif
56755 + return 0;
56756 +}
56757 +
56758 +void
56759 +gr_handle_chroot_chdir(struct path *path)
56760 +{
56761 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56762 + if (grsec_enable_chroot_chdir)
56763 + set_fs_pwd(current->fs, path);
56764 +#endif
56765 + return;
56766 +}
56767 +
56768 +int
56769 +gr_handle_chroot_chmod(const struct dentry *dentry,
56770 + const struct vfsmount *mnt, const int mode)
56771 +{
56772 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56773 + /* allow chmod +s on directories, but not files */
56774 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
56775 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
56776 + proc_is_chrooted(current)) {
56777 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
56778 + return -EPERM;
56779 + }
56780 +#endif
56781 + return 0;
56782 +}
56783 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
56784 new file mode 100644
56785 index 0000000..d81a586
56786 --- /dev/null
56787 +++ b/grsecurity/grsec_disabled.c
56788 @@ -0,0 +1,439 @@
56789 +#include <linux/kernel.h>
56790 +#include <linux/module.h>
56791 +#include <linux/sched.h>
56792 +#include <linux/file.h>
56793 +#include <linux/fs.h>
56794 +#include <linux/kdev_t.h>
56795 +#include <linux/net.h>
56796 +#include <linux/in.h>
56797 +#include <linux/ip.h>
56798 +#include <linux/skbuff.h>
56799 +#include <linux/sysctl.h>
56800 +
56801 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56802 +void
56803 +pax_set_initial_flags(struct linux_binprm *bprm)
56804 +{
56805 + return;
56806 +}
56807 +#endif
56808 +
56809 +#ifdef CONFIG_SYSCTL
56810 +__u32
56811 +gr_handle_sysctl(const struct ctl_table * table, const int op)
56812 +{
56813 + return 0;
56814 +}
56815 +#endif
56816 +
56817 +#ifdef CONFIG_TASKSTATS
56818 +int gr_is_taskstats_denied(int pid)
56819 +{
56820 + return 0;
56821 +}
56822 +#endif
56823 +
56824 +int
56825 +gr_acl_is_enabled(void)
56826 +{
56827 + return 0;
56828 +}
56829 +
56830 +void
56831 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
56832 +{
56833 + return;
56834 +}
56835 +
56836 +int
56837 +gr_handle_rawio(const struct inode *inode)
56838 +{
56839 + return 0;
56840 +}
56841 +
56842 +void
56843 +gr_acl_handle_psacct(struct task_struct *task, const long code)
56844 +{
56845 + return;
56846 +}
56847 +
56848 +int
56849 +gr_handle_ptrace(struct task_struct *task, const long request)
56850 +{
56851 + return 0;
56852 +}
56853 +
56854 +int
56855 +gr_handle_proc_ptrace(struct task_struct *task)
56856 +{
56857 + return 0;
56858 +}
56859 +
56860 +void
56861 +gr_learn_resource(const struct task_struct *task,
56862 + const int res, const unsigned long wanted, const int gt)
56863 +{
56864 + return;
56865 +}
56866 +
56867 +int
56868 +gr_set_acls(const int type)
56869 +{
56870 + return 0;
56871 +}
56872 +
56873 +int
56874 +gr_check_hidden_task(const struct task_struct *tsk)
56875 +{
56876 + return 0;
56877 +}
56878 +
56879 +int
56880 +gr_check_protected_task(const struct task_struct *task)
56881 +{
56882 + return 0;
56883 +}
56884 +
56885 +int
56886 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
56887 +{
56888 + return 0;
56889 +}
56890 +
56891 +void
56892 +gr_copy_label(struct task_struct *tsk)
56893 +{
56894 + return;
56895 +}
56896 +
56897 +void
56898 +gr_set_pax_flags(struct task_struct *task)
56899 +{
56900 + return;
56901 +}
56902 +
56903 +int
56904 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
56905 + const int unsafe_share)
56906 +{
56907 + return 0;
56908 +}
56909 +
56910 +void
56911 +gr_handle_delete(const ino_t ino, const dev_t dev)
56912 +{
56913 + return;
56914 +}
56915 +
56916 +void
56917 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
56918 +{
56919 + return;
56920 +}
56921 +
56922 +void
56923 +gr_handle_crash(struct task_struct *task, const int sig)
56924 +{
56925 + return;
56926 +}
56927 +
56928 +int
56929 +gr_check_crash_exec(const struct file *filp)
56930 +{
56931 + return 0;
56932 +}
56933 +
56934 +int
56935 +gr_check_crash_uid(const uid_t uid)
56936 +{
56937 + return 0;
56938 +}
56939 +
56940 +void
56941 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56942 + struct dentry *old_dentry,
56943 + struct dentry *new_dentry,
56944 + struct vfsmount *mnt, const __u8 replace)
56945 +{
56946 + return;
56947 +}
56948 +
56949 +int
56950 +gr_search_socket(const int family, const int type, const int protocol)
56951 +{
56952 + return 1;
56953 +}
56954 +
56955 +int
56956 +gr_search_connectbind(const int mode, const struct socket *sock,
56957 + const struct sockaddr_in *addr)
56958 +{
56959 + return 0;
56960 +}
56961 +
56962 +void
56963 +gr_handle_alertkill(struct task_struct *task)
56964 +{
56965 + return;
56966 +}
56967 +
56968 +__u32
56969 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
56970 +{
56971 + return 1;
56972 +}
56973 +
56974 +__u32
56975 +gr_acl_handle_hidden_file(const struct dentry * dentry,
56976 + const struct vfsmount * mnt)
56977 +{
56978 + return 1;
56979 +}
56980 +
56981 +__u32
56982 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
56983 + int acc_mode)
56984 +{
56985 + return 1;
56986 +}
56987 +
56988 +__u32
56989 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
56990 +{
56991 + return 1;
56992 +}
56993 +
56994 +__u32
56995 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
56996 +{
56997 + return 1;
56998 +}
56999 +
57000 +int
57001 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
57002 + unsigned int *vm_flags)
57003 +{
57004 + return 1;
57005 +}
57006 +
57007 +__u32
57008 +gr_acl_handle_truncate(const struct dentry * dentry,
57009 + const struct vfsmount * mnt)
57010 +{
57011 + return 1;
57012 +}
57013 +
57014 +__u32
57015 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
57016 +{
57017 + return 1;
57018 +}
57019 +
57020 +__u32
57021 +gr_acl_handle_access(const struct dentry * dentry,
57022 + const struct vfsmount * mnt, const int fmode)
57023 +{
57024 + return 1;
57025 +}
57026 +
57027 +__u32
57028 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
57029 + mode_t mode)
57030 +{
57031 + return 1;
57032 +}
57033 +
57034 +__u32
57035 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
57036 + mode_t mode)
57037 +{
57038 + return 1;
57039 +}
57040 +
57041 +__u32
57042 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
57043 +{
57044 + return 1;
57045 +}
57046 +
57047 +__u32
57048 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
57049 +{
57050 + return 1;
57051 +}
57052 +
57053 +void
57054 +grsecurity_init(void)
57055 +{
57056 + return;
57057 +}
57058 +
57059 +__u32
57060 +gr_acl_handle_mknod(const struct dentry * new_dentry,
57061 + const struct dentry * parent_dentry,
57062 + const struct vfsmount * parent_mnt,
57063 + const int mode)
57064 +{
57065 + return 1;
57066 +}
57067 +
57068 +__u32
57069 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
57070 + const struct dentry * parent_dentry,
57071 + const struct vfsmount * parent_mnt)
57072 +{
57073 + return 1;
57074 +}
57075 +
57076 +__u32
57077 +gr_acl_handle_symlink(const struct dentry * new_dentry,
57078 + const struct dentry * parent_dentry,
57079 + const struct vfsmount * parent_mnt, const char *from)
57080 +{
57081 + return 1;
57082 +}
57083 +
57084 +__u32
57085 +gr_acl_handle_link(const struct dentry * new_dentry,
57086 + const struct dentry * parent_dentry,
57087 + const struct vfsmount * parent_mnt,
57088 + const struct dentry * old_dentry,
57089 + const struct vfsmount * old_mnt, const char *to)
57090 +{
57091 + return 1;
57092 +}
57093 +
57094 +int
57095 +gr_acl_handle_rename(const struct dentry *new_dentry,
57096 + const struct dentry *parent_dentry,
57097 + const struct vfsmount *parent_mnt,
57098 + const struct dentry *old_dentry,
57099 + const struct inode *old_parent_inode,
57100 + const struct vfsmount *old_mnt, const char *newname)
57101 +{
57102 + return 0;
57103 +}
57104 +
57105 +int
57106 +gr_acl_handle_filldir(const struct file *file, const char *name,
57107 + const int namelen, const ino_t ino)
57108 +{
57109 + return 1;
57110 +}
57111 +
57112 +int
57113 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57114 + const time_t shm_createtime, const uid_t cuid, const int shmid)
57115 +{
57116 + return 1;
57117 +}
57118 +
57119 +int
57120 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
57121 +{
57122 + return 0;
57123 +}
57124 +
57125 +int
57126 +gr_search_accept(const struct socket *sock)
57127 +{
57128 + return 0;
57129 +}
57130 +
57131 +int
57132 +gr_search_listen(const struct socket *sock)
57133 +{
57134 + return 0;
57135 +}
57136 +
57137 +int
57138 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
57139 +{
57140 + return 0;
57141 +}
57142 +
57143 +__u32
57144 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
57145 +{
57146 + return 1;
57147 +}
57148 +
57149 +__u32
57150 +gr_acl_handle_creat(const struct dentry * dentry,
57151 + const struct dentry * p_dentry,
57152 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
57153 + const int imode)
57154 +{
57155 + return 1;
57156 +}
57157 +
57158 +void
57159 +gr_acl_handle_exit(void)
57160 +{
57161 + return;
57162 +}
57163 +
57164 +int
57165 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
57166 +{
57167 + return 1;
57168 +}
57169 +
57170 +void
57171 +gr_set_role_label(const uid_t uid, const gid_t gid)
57172 +{
57173 + return;
57174 +}
57175 +
57176 +int
57177 +gr_acl_handle_procpidmem(const struct task_struct *task)
57178 +{
57179 + return 0;
57180 +}
57181 +
57182 +int
57183 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
57184 +{
57185 + return 0;
57186 +}
57187 +
57188 +int
57189 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
57190 +{
57191 + return 0;
57192 +}
57193 +
57194 +void
57195 +gr_set_kernel_label(struct task_struct *task)
57196 +{
57197 + return;
57198 +}
57199 +
57200 +int
57201 +gr_check_user_change(int real, int effective, int fs)
57202 +{
57203 + return 0;
57204 +}
57205 +
57206 +int
57207 +gr_check_group_change(int real, int effective, int fs)
57208 +{
57209 + return 0;
57210 +}
57211 +
57212 +int gr_acl_enable_at_secure(void)
57213 +{
57214 + return 0;
57215 +}
57216 +
57217 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
57218 +{
57219 + return dentry->d_inode->i_sb->s_dev;
57220 +}
57221 +
57222 +EXPORT_SYMBOL(gr_learn_resource);
57223 +EXPORT_SYMBOL(gr_set_kernel_label);
57224 +#ifdef CONFIG_SECURITY
57225 +EXPORT_SYMBOL(gr_check_user_change);
57226 +EXPORT_SYMBOL(gr_check_group_change);
57227 +#endif
57228 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
57229 new file mode 100644
57230 index 0000000..2b05ada
57231 --- /dev/null
57232 +++ b/grsecurity/grsec_exec.c
57233 @@ -0,0 +1,146 @@
57234 +#include <linux/kernel.h>
57235 +#include <linux/sched.h>
57236 +#include <linux/file.h>
57237 +#include <linux/binfmts.h>
57238 +#include <linux/fs.h>
57239 +#include <linux/types.h>
57240 +#include <linux/grdefs.h>
57241 +#include <linux/grsecurity.h>
57242 +#include <linux/grinternal.h>
57243 +#include <linux/capability.h>
57244 +#include <linux/module.h>
57245 +
57246 +#include <asm/uaccess.h>
57247 +
57248 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57249 +static char gr_exec_arg_buf[132];
57250 +static DEFINE_MUTEX(gr_exec_arg_mutex);
57251 +#endif
57252 +
57253 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
57254 +
57255 +void
57256 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
57257 +{
57258 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57259 + char *grarg = gr_exec_arg_buf;
57260 + unsigned int i, x, execlen = 0;
57261 + char c;
57262 +
57263 + if (!((grsec_enable_execlog && grsec_enable_group &&
57264 + in_group_p(grsec_audit_gid))
57265 + || (grsec_enable_execlog && !grsec_enable_group)))
57266 + return;
57267 +
57268 + mutex_lock(&gr_exec_arg_mutex);
57269 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
57270 +
57271 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
57272 + const char __user *p;
57273 + unsigned int len;
57274 +
57275 + p = get_user_arg_ptr(argv, i);
57276 + if (IS_ERR(p))
57277 + goto log;
57278 +
57279 + len = strnlen_user(p, 128 - execlen);
57280 + if (len > 128 - execlen)
57281 + len = 128 - execlen;
57282 + else if (len > 0)
57283 + len--;
57284 + if (copy_from_user(grarg + execlen, p, len))
57285 + goto log;
57286 +
57287 + /* rewrite unprintable characters */
57288 + for (x = 0; x < len; x++) {
57289 + c = *(grarg + execlen + x);
57290 + if (c < 32 || c > 126)
57291 + *(grarg + execlen + x) = ' ';
57292 + }
57293 +
57294 + execlen += len;
57295 + *(grarg + execlen) = ' ';
57296 + *(grarg + execlen + 1) = '\0';
57297 + execlen++;
57298 + }
57299 +
57300 + log:
57301 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
57302 + bprm->file->f_path.mnt, grarg);
57303 + mutex_unlock(&gr_exec_arg_mutex);
57304 +#endif
57305 + return;
57306 +}
57307 +
57308 +#ifdef CONFIG_GRKERNSEC
57309 +extern int gr_acl_is_capable(const int cap);
57310 +extern int gr_acl_is_capable_nolog(const int cap);
57311 +extern int gr_chroot_is_capable(const int cap);
57312 +extern int gr_chroot_is_capable_nolog(const int cap);
57313 +#endif
57314 +
57315 +const char *captab_log[] = {
57316 + "CAP_CHOWN",
57317 + "CAP_DAC_OVERRIDE",
57318 + "CAP_DAC_READ_SEARCH",
57319 + "CAP_FOWNER",
57320 + "CAP_FSETID",
57321 + "CAP_KILL",
57322 + "CAP_SETGID",
57323 + "CAP_SETUID",
57324 + "CAP_SETPCAP",
57325 + "CAP_LINUX_IMMUTABLE",
57326 + "CAP_NET_BIND_SERVICE",
57327 + "CAP_NET_BROADCAST",
57328 + "CAP_NET_ADMIN",
57329 + "CAP_NET_RAW",
57330 + "CAP_IPC_LOCK",
57331 + "CAP_IPC_OWNER",
57332 + "CAP_SYS_MODULE",
57333 + "CAP_SYS_RAWIO",
57334 + "CAP_SYS_CHROOT",
57335 + "CAP_SYS_PTRACE",
57336 + "CAP_SYS_PACCT",
57337 + "CAP_SYS_ADMIN",
57338 + "CAP_SYS_BOOT",
57339 + "CAP_SYS_NICE",
57340 + "CAP_SYS_RESOURCE",
57341 + "CAP_SYS_TIME",
57342 + "CAP_SYS_TTY_CONFIG",
57343 + "CAP_MKNOD",
57344 + "CAP_LEASE",
57345 + "CAP_AUDIT_WRITE",
57346 + "CAP_AUDIT_CONTROL",
57347 + "CAP_SETFCAP",
57348 + "CAP_MAC_OVERRIDE",
57349 + "CAP_MAC_ADMIN",
57350 + "CAP_SYSLOG",
57351 + "CAP_WAKE_ALARM"
57352 +};
57353 +
57354 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
57355 +
57356 +int gr_is_capable(const int cap)
57357 +{
57358 +#ifdef CONFIG_GRKERNSEC
57359 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
57360 + return 1;
57361 + return 0;
57362 +#else
57363 + return 1;
57364 +#endif
57365 +}
57366 +
57367 +int gr_is_capable_nolog(const int cap)
57368 +{
57369 +#ifdef CONFIG_GRKERNSEC
57370 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
57371 + return 1;
57372 + return 0;
57373 +#else
57374 + return 1;
57375 +#endif
57376 +}
57377 +
57378 +EXPORT_SYMBOL(gr_is_capable);
57379 +EXPORT_SYMBOL(gr_is_capable_nolog);
57380 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
57381 new file mode 100644
57382 index 0000000..d3ee748
57383 --- /dev/null
57384 +++ b/grsecurity/grsec_fifo.c
57385 @@ -0,0 +1,24 @@
57386 +#include <linux/kernel.h>
57387 +#include <linux/sched.h>
57388 +#include <linux/fs.h>
57389 +#include <linux/file.h>
57390 +#include <linux/grinternal.h>
57391 +
57392 +int
57393 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
57394 + const struct dentry *dir, const int flag, const int acc_mode)
57395 +{
57396 +#ifdef CONFIG_GRKERNSEC_FIFO
57397 + const struct cred *cred = current_cred();
57398 +
57399 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
57400 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
57401 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
57402 + (cred->fsuid != dentry->d_inode->i_uid)) {
57403 + if (!inode_permission(dentry->d_inode, acc_mode))
57404 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
57405 + return -EACCES;
57406 + }
57407 +#endif
57408 + return 0;
57409 +}
57410 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
57411 new file mode 100644
57412 index 0000000..8ca18bf
57413 --- /dev/null
57414 +++ b/grsecurity/grsec_fork.c
57415 @@ -0,0 +1,23 @@
57416 +#include <linux/kernel.h>
57417 +#include <linux/sched.h>
57418 +#include <linux/grsecurity.h>
57419 +#include <linux/grinternal.h>
57420 +#include <linux/errno.h>
57421 +
57422 +void
57423 +gr_log_forkfail(const int retval)
57424 +{
57425 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
57426 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
57427 + switch (retval) {
57428 + case -EAGAIN:
57429 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
57430 + break;
57431 + case -ENOMEM:
57432 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
57433 + break;
57434 + }
57435 + }
57436 +#endif
57437 + return;
57438 +}
57439 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
57440 new file mode 100644
57441 index 0000000..356ef00
57442 --- /dev/null
57443 +++ b/grsecurity/grsec_init.c
57444 @@ -0,0 +1,269 @@
57445 +#include <linux/kernel.h>
57446 +#include <linux/sched.h>
57447 +#include <linux/mm.h>
57448 +#include <linux/gracl.h>
57449 +#include <linux/slab.h>
57450 +#include <linux/vmalloc.h>
57451 +#include <linux/percpu.h>
57452 +#include <linux/module.h>
57453 +
57454 +int grsec_enable_brute;
57455 +int grsec_enable_link;
57456 +int grsec_enable_dmesg;
57457 +int grsec_enable_harden_ptrace;
57458 +int grsec_enable_fifo;
57459 +int grsec_enable_execlog;
57460 +int grsec_enable_signal;
57461 +int grsec_enable_forkfail;
57462 +int grsec_enable_audit_ptrace;
57463 +int grsec_enable_time;
57464 +int grsec_enable_audit_textrel;
57465 +int grsec_enable_group;
57466 +int grsec_audit_gid;
57467 +int grsec_enable_chdir;
57468 +int grsec_enable_mount;
57469 +int grsec_enable_rofs;
57470 +int grsec_enable_chroot_findtask;
57471 +int grsec_enable_chroot_mount;
57472 +int grsec_enable_chroot_shmat;
57473 +int grsec_enable_chroot_fchdir;
57474 +int grsec_enable_chroot_double;
57475 +int grsec_enable_chroot_pivot;
57476 +int grsec_enable_chroot_chdir;
57477 +int grsec_enable_chroot_chmod;
57478 +int grsec_enable_chroot_mknod;
57479 +int grsec_enable_chroot_nice;
57480 +int grsec_enable_chroot_execlog;
57481 +int grsec_enable_chroot_caps;
57482 +int grsec_enable_chroot_sysctl;
57483 +int grsec_enable_chroot_unix;
57484 +int grsec_enable_tpe;
57485 +int grsec_tpe_gid;
57486 +int grsec_enable_blackhole;
57487 +#ifdef CONFIG_IPV6_MODULE
57488 +EXPORT_SYMBOL(grsec_enable_blackhole);
57489 +#endif
57490 +int grsec_lastack_retries;
57491 +int grsec_enable_tpe_all;
57492 +int grsec_enable_tpe_invert;
57493 +int grsec_enable_socket_all;
57494 +int grsec_socket_all_gid;
57495 +int grsec_enable_socket_client;
57496 +int grsec_socket_client_gid;
57497 +int grsec_enable_socket_server;
57498 +int grsec_socket_server_gid;
57499 +int grsec_resource_logging;
57500 +int grsec_disable_privio;
57501 +int grsec_enable_log_rwxmaps;
57502 +int grsec_lock;
57503 +
57504 +DEFINE_SPINLOCK(grsec_alert_lock);
57505 +unsigned long grsec_alert_wtime = 0;
57506 +unsigned long grsec_alert_fyet = 0;
57507 +
57508 +DEFINE_SPINLOCK(grsec_audit_lock);
57509 +
57510 +DEFINE_RWLOCK(grsec_exec_file_lock);
57511 +
57512 +char *gr_shared_page[4];
57513 +
57514 +char *gr_alert_log_fmt;
57515 +char *gr_audit_log_fmt;
57516 +char *gr_alert_log_buf;
57517 +char *gr_audit_log_buf;
57518 +
57519 +extern struct gr_arg *gr_usermode;
57520 +extern unsigned char *gr_system_salt;
57521 +extern unsigned char *gr_system_sum;
57522 +
57523 +void __init
57524 +grsecurity_init(void)
57525 +{
57526 + int j;
57527 + /* create the per-cpu shared pages */
57528 +
57529 +#ifdef CONFIG_X86
57530 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
57531 +#endif
57532 +
57533 + for (j = 0; j < 4; j++) {
57534 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
57535 + if (gr_shared_page[j] == NULL) {
57536 + panic("Unable to allocate grsecurity shared page");
57537 + return;
57538 + }
57539 + }
57540 +
57541 + /* allocate log buffers */
57542 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
57543 + if (!gr_alert_log_fmt) {
57544 + panic("Unable to allocate grsecurity alert log format buffer");
57545 + return;
57546 + }
57547 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
57548 + if (!gr_audit_log_fmt) {
57549 + panic("Unable to allocate grsecurity audit log format buffer");
57550 + return;
57551 + }
57552 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57553 + if (!gr_alert_log_buf) {
57554 + panic("Unable to allocate grsecurity alert log buffer");
57555 + return;
57556 + }
57557 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57558 + if (!gr_audit_log_buf) {
57559 + panic("Unable to allocate grsecurity audit log buffer");
57560 + return;
57561 + }
57562 +
57563 + /* allocate memory for authentication structure */
57564 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
57565 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
57566 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
57567 +
57568 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
57569 + panic("Unable to allocate grsecurity authentication structure");
57570 + return;
57571 + }
57572 +
57573 +
57574 +#ifdef CONFIG_GRKERNSEC_IO
57575 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
57576 + grsec_disable_privio = 1;
57577 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57578 + grsec_disable_privio = 1;
57579 +#else
57580 + grsec_disable_privio = 0;
57581 +#endif
57582 +#endif
57583 +
57584 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57585 + /* for backward compatibility, tpe_invert always defaults to on if
57586 + enabled in the kernel
57587 + */
57588 + grsec_enable_tpe_invert = 1;
57589 +#endif
57590 +
57591 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57592 +#ifndef CONFIG_GRKERNSEC_SYSCTL
57593 + grsec_lock = 1;
57594 +#endif
57595 +
57596 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57597 + grsec_enable_audit_textrel = 1;
57598 +#endif
57599 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57600 + grsec_enable_log_rwxmaps = 1;
57601 +#endif
57602 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
57603 + grsec_enable_group = 1;
57604 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
57605 +#endif
57606 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57607 + grsec_enable_chdir = 1;
57608 +#endif
57609 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57610 + grsec_enable_harden_ptrace = 1;
57611 +#endif
57612 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57613 + grsec_enable_mount = 1;
57614 +#endif
57615 +#ifdef CONFIG_GRKERNSEC_LINK
57616 + grsec_enable_link = 1;
57617 +#endif
57618 +#ifdef CONFIG_GRKERNSEC_BRUTE
57619 + grsec_enable_brute = 1;
57620 +#endif
57621 +#ifdef CONFIG_GRKERNSEC_DMESG
57622 + grsec_enable_dmesg = 1;
57623 +#endif
57624 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
57625 + grsec_enable_blackhole = 1;
57626 + grsec_lastack_retries = 4;
57627 +#endif
57628 +#ifdef CONFIG_GRKERNSEC_FIFO
57629 + grsec_enable_fifo = 1;
57630 +#endif
57631 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57632 + grsec_enable_execlog = 1;
57633 +#endif
57634 +#ifdef CONFIG_GRKERNSEC_SIGNAL
57635 + grsec_enable_signal = 1;
57636 +#endif
57637 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
57638 + grsec_enable_forkfail = 1;
57639 +#endif
57640 +#ifdef CONFIG_GRKERNSEC_TIME
57641 + grsec_enable_time = 1;
57642 +#endif
57643 +#ifdef CONFIG_GRKERNSEC_RESLOG
57644 + grsec_resource_logging = 1;
57645 +#endif
57646 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57647 + grsec_enable_chroot_findtask = 1;
57648 +#endif
57649 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57650 + grsec_enable_chroot_unix = 1;
57651 +#endif
57652 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57653 + grsec_enable_chroot_mount = 1;
57654 +#endif
57655 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57656 + grsec_enable_chroot_fchdir = 1;
57657 +#endif
57658 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57659 + grsec_enable_chroot_shmat = 1;
57660 +#endif
57661 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57662 + grsec_enable_audit_ptrace = 1;
57663 +#endif
57664 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57665 + grsec_enable_chroot_double = 1;
57666 +#endif
57667 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57668 + grsec_enable_chroot_pivot = 1;
57669 +#endif
57670 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57671 + grsec_enable_chroot_chdir = 1;
57672 +#endif
57673 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57674 + grsec_enable_chroot_chmod = 1;
57675 +#endif
57676 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57677 + grsec_enable_chroot_mknod = 1;
57678 +#endif
57679 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57680 + grsec_enable_chroot_nice = 1;
57681 +#endif
57682 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57683 + grsec_enable_chroot_execlog = 1;
57684 +#endif
57685 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57686 + grsec_enable_chroot_caps = 1;
57687 +#endif
57688 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57689 + grsec_enable_chroot_sysctl = 1;
57690 +#endif
57691 +#ifdef CONFIG_GRKERNSEC_TPE
57692 + grsec_enable_tpe = 1;
57693 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
57694 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
57695 + grsec_enable_tpe_all = 1;
57696 +#endif
57697 +#endif
57698 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
57699 + grsec_enable_socket_all = 1;
57700 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
57701 +#endif
57702 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
57703 + grsec_enable_socket_client = 1;
57704 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
57705 +#endif
57706 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57707 + grsec_enable_socket_server = 1;
57708 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
57709 +#endif
57710 +#endif
57711 +
57712 + return;
57713 +}
57714 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
57715 new file mode 100644
57716 index 0000000..3efe141
57717 --- /dev/null
57718 +++ b/grsecurity/grsec_link.c
57719 @@ -0,0 +1,43 @@
57720 +#include <linux/kernel.h>
57721 +#include <linux/sched.h>
57722 +#include <linux/fs.h>
57723 +#include <linux/file.h>
57724 +#include <linux/grinternal.h>
57725 +
57726 +int
57727 +gr_handle_follow_link(const struct inode *parent,
57728 + const struct inode *inode,
57729 + const struct dentry *dentry, const struct vfsmount *mnt)
57730 +{
57731 +#ifdef CONFIG_GRKERNSEC_LINK
57732 + const struct cred *cred = current_cred();
57733 +
57734 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
57735 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
57736 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
57737 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
57738 + return -EACCES;
57739 + }
57740 +#endif
57741 + return 0;
57742 +}
57743 +
57744 +int
57745 +gr_handle_hardlink(const struct dentry *dentry,
57746 + const struct vfsmount *mnt,
57747 + struct inode *inode, const int mode, const char *to)
57748 +{
57749 +#ifdef CONFIG_GRKERNSEC_LINK
57750 + const struct cred *cred = current_cred();
57751 +
57752 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
57753 + (!S_ISREG(mode) || (mode & S_ISUID) ||
57754 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
57755 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
57756 + !capable(CAP_FOWNER) && cred->uid) {
57757 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
57758 + return -EPERM;
57759 + }
57760 +#endif
57761 + return 0;
57762 +}
57763 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
57764 new file mode 100644
57765 index 0000000..a45d2e9
57766 --- /dev/null
57767 +++ b/grsecurity/grsec_log.c
57768 @@ -0,0 +1,322 @@
57769 +#include <linux/kernel.h>
57770 +#include <linux/sched.h>
57771 +#include <linux/file.h>
57772 +#include <linux/tty.h>
57773 +#include <linux/fs.h>
57774 +#include <linux/grinternal.h>
57775 +
57776 +#ifdef CONFIG_TREE_PREEMPT_RCU
57777 +#define DISABLE_PREEMPT() preempt_disable()
57778 +#define ENABLE_PREEMPT() preempt_enable()
57779 +#else
57780 +#define DISABLE_PREEMPT()
57781 +#define ENABLE_PREEMPT()
57782 +#endif
57783 +
57784 +#define BEGIN_LOCKS(x) \
57785 + DISABLE_PREEMPT(); \
57786 + rcu_read_lock(); \
57787 + read_lock(&tasklist_lock); \
57788 + read_lock(&grsec_exec_file_lock); \
57789 + if (x != GR_DO_AUDIT) \
57790 + spin_lock(&grsec_alert_lock); \
57791 + else \
57792 + spin_lock(&grsec_audit_lock)
57793 +
57794 +#define END_LOCKS(x) \
57795 + if (x != GR_DO_AUDIT) \
57796 + spin_unlock(&grsec_alert_lock); \
57797 + else \
57798 + spin_unlock(&grsec_audit_lock); \
57799 + read_unlock(&grsec_exec_file_lock); \
57800 + read_unlock(&tasklist_lock); \
57801 + rcu_read_unlock(); \
57802 + ENABLE_PREEMPT(); \
57803 + if (x == GR_DONT_AUDIT) \
57804 + gr_handle_alertkill(current)
57805 +
57806 +enum {
57807 + FLOODING,
57808 + NO_FLOODING
57809 +};
57810 +
57811 +extern char *gr_alert_log_fmt;
57812 +extern char *gr_audit_log_fmt;
57813 +extern char *gr_alert_log_buf;
57814 +extern char *gr_audit_log_buf;
57815 +
57816 +static int gr_log_start(int audit)
57817 +{
57818 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
57819 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
57820 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57821 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
57822 + unsigned long curr_secs = get_seconds();
57823 +
57824 + if (audit == GR_DO_AUDIT)
57825 + goto set_fmt;
57826 +
57827 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
57828 + grsec_alert_wtime = curr_secs;
57829 + grsec_alert_fyet = 0;
57830 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
57831 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
57832 + grsec_alert_fyet++;
57833 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
57834 + grsec_alert_wtime = curr_secs;
57835 + grsec_alert_fyet++;
57836 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
57837 + return FLOODING;
57838 + }
57839 + else return FLOODING;
57840 +
57841 +set_fmt:
57842 +#endif
57843 + memset(buf, 0, PAGE_SIZE);
57844 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
57845 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
57846 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57847 + } else if (current->signal->curr_ip) {
57848 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
57849 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
57850 + } else if (gr_acl_is_enabled()) {
57851 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
57852 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57853 + } else {
57854 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
57855 + strcpy(buf, fmt);
57856 + }
57857 +
57858 + return NO_FLOODING;
57859 +}
57860 +
57861 +static void gr_log_middle(int audit, const char *msg, va_list ap)
57862 + __attribute__ ((format (printf, 2, 0)));
57863 +
57864 +static void gr_log_middle(int audit, const char *msg, va_list ap)
57865 +{
57866 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57867 + unsigned int len = strlen(buf);
57868 +
57869 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57870 +
57871 + return;
57872 +}
57873 +
57874 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
57875 + __attribute__ ((format (printf, 2, 3)));
57876 +
57877 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
57878 +{
57879 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57880 + unsigned int len = strlen(buf);
57881 + va_list ap;
57882 +
57883 + va_start(ap, msg);
57884 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57885 + va_end(ap);
57886 +
57887 + return;
57888 +}
57889 +
57890 +static void gr_log_end(int audit, int append_default)
57891 +{
57892 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57893 +
57894 + if (append_default) {
57895 + unsigned int len = strlen(buf);
57896 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
57897 + }
57898 +
57899 + printk("%s\n", buf);
57900 +
57901 + return;
57902 +}
57903 +
57904 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
57905 +{
57906 + int logtype;
57907 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
57908 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
57909 + void *voidptr = NULL;
57910 + int num1 = 0, num2 = 0;
57911 + unsigned long ulong1 = 0, ulong2 = 0;
57912 + struct dentry *dentry = NULL;
57913 + struct vfsmount *mnt = NULL;
57914 + struct file *file = NULL;
57915 + struct task_struct *task = NULL;
57916 + const struct cred *cred, *pcred;
57917 + va_list ap;
57918 +
57919 + BEGIN_LOCKS(audit);
57920 + logtype = gr_log_start(audit);
57921 + if (logtype == FLOODING) {
57922 + END_LOCKS(audit);
57923 + return;
57924 + }
57925 + va_start(ap, argtypes);
57926 + switch (argtypes) {
57927 + case GR_TTYSNIFF:
57928 + task = va_arg(ap, struct task_struct *);
57929 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
57930 + break;
57931 + case GR_SYSCTL_HIDDEN:
57932 + str1 = va_arg(ap, char *);
57933 + gr_log_middle_varargs(audit, msg, result, str1);
57934 + break;
57935 + case GR_RBAC:
57936 + dentry = va_arg(ap, struct dentry *);
57937 + mnt = va_arg(ap, struct vfsmount *);
57938 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
57939 + break;
57940 + case GR_RBAC_STR:
57941 + dentry = va_arg(ap, struct dentry *);
57942 + mnt = va_arg(ap, struct vfsmount *);
57943 + str1 = va_arg(ap, char *);
57944 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
57945 + break;
57946 + case GR_STR_RBAC:
57947 + str1 = va_arg(ap, char *);
57948 + dentry = va_arg(ap, struct dentry *);
57949 + mnt = va_arg(ap, struct vfsmount *);
57950 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
57951 + break;
57952 + case GR_RBAC_MODE2:
57953 + dentry = va_arg(ap, struct dentry *);
57954 + mnt = va_arg(ap, struct vfsmount *);
57955 + str1 = va_arg(ap, char *);
57956 + str2 = va_arg(ap, char *);
57957 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
57958 + break;
57959 + case GR_RBAC_MODE3:
57960 + dentry = va_arg(ap, struct dentry *);
57961 + mnt = va_arg(ap, struct vfsmount *);
57962 + str1 = va_arg(ap, char *);
57963 + str2 = va_arg(ap, char *);
57964 + str3 = va_arg(ap, char *);
57965 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
57966 + break;
57967 + case GR_FILENAME:
57968 + dentry = va_arg(ap, struct dentry *);
57969 + mnt = va_arg(ap, struct vfsmount *);
57970 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
57971 + break;
57972 + case GR_STR_FILENAME:
57973 + str1 = va_arg(ap, char *);
57974 + dentry = va_arg(ap, struct dentry *);
57975 + mnt = va_arg(ap, struct vfsmount *);
57976 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
57977 + break;
57978 + case GR_FILENAME_STR:
57979 + dentry = va_arg(ap, struct dentry *);
57980 + mnt = va_arg(ap, struct vfsmount *);
57981 + str1 = va_arg(ap, char *);
57982 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
57983 + break;
57984 + case GR_FILENAME_TWO_INT:
57985 + dentry = va_arg(ap, struct dentry *);
57986 + mnt = va_arg(ap, struct vfsmount *);
57987 + num1 = va_arg(ap, int);
57988 + num2 = va_arg(ap, int);
57989 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
57990 + break;
57991 + case GR_FILENAME_TWO_INT_STR:
57992 + dentry = va_arg(ap, struct dentry *);
57993 + mnt = va_arg(ap, struct vfsmount *);
57994 + num1 = va_arg(ap, int);
57995 + num2 = va_arg(ap, int);
57996 + str1 = va_arg(ap, char *);
57997 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
57998 + break;
57999 + case GR_TEXTREL:
58000 + file = va_arg(ap, struct file *);
58001 + ulong1 = va_arg(ap, unsigned long);
58002 + ulong2 = va_arg(ap, unsigned long);
58003 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
58004 + break;
58005 + case GR_PTRACE:
58006 + task = va_arg(ap, struct task_struct *);
58007 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
58008 + break;
58009 + case GR_RESOURCE:
58010 + task = va_arg(ap, struct task_struct *);
58011 + cred = __task_cred(task);
58012 + pcred = __task_cred(task->real_parent);
58013 + ulong1 = va_arg(ap, unsigned long);
58014 + str1 = va_arg(ap, char *);
58015 + ulong2 = va_arg(ap, unsigned long);
58016 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58017 + break;
58018 + case GR_CAP:
58019 + task = va_arg(ap, struct task_struct *);
58020 + cred = __task_cred(task);
58021 + pcred = __task_cred(task->real_parent);
58022 + str1 = va_arg(ap, char *);
58023 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58024 + break;
58025 + case GR_SIG:
58026 + str1 = va_arg(ap, char *);
58027 + voidptr = va_arg(ap, void *);
58028 + gr_log_middle_varargs(audit, msg, str1, voidptr);
58029 + break;
58030 + case GR_SIG2:
58031 + task = va_arg(ap, struct task_struct *);
58032 + cred = __task_cred(task);
58033 + pcred = __task_cred(task->real_parent);
58034 + num1 = va_arg(ap, int);
58035 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58036 + break;
58037 + case GR_CRASH1:
58038 + task = va_arg(ap, struct task_struct *);
58039 + cred = __task_cred(task);
58040 + pcred = __task_cred(task->real_parent);
58041 + ulong1 = va_arg(ap, unsigned long);
58042 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
58043 + break;
58044 + case GR_CRASH2:
58045 + task = va_arg(ap, struct task_struct *);
58046 + cred = __task_cred(task);
58047 + pcred = __task_cred(task->real_parent);
58048 + ulong1 = va_arg(ap, unsigned long);
58049 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
58050 + break;
58051 + case GR_RWXMAP:
58052 + file = va_arg(ap, struct file *);
58053 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
58054 + break;
58055 + case GR_PSACCT:
58056 + {
58057 + unsigned int wday, cday;
58058 + __u8 whr, chr;
58059 + __u8 wmin, cmin;
58060 + __u8 wsec, csec;
58061 + char cur_tty[64] = { 0 };
58062 + char parent_tty[64] = { 0 };
58063 +
58064 + task = va_arg(ap, struct task_struct *);
58065 + wday = va_arg(ap, unsigned int);
58066 + cday = va_arg(ap, unsigned int);
58067 + whr = va_arg(ap, int);
58068 + chr = va_arg(ap, int);
58069 + wmin = va_arg(ap, int);
58070 + cmin = va_arg(ap, int);
58071 + wsec = va_arg(ap, int);
58072 + csec = va_arg(ap, int);
58073 + ulong1 = va_arg(ap, unsigned long);
58074 + cred = __task_cred(task);
58075 + pcred = __task_cred(task->real_parent);
58076 +
58077 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58078 + }
58079 + break;
58080 + default:
58081 + gr_log_middle(audit, msg, ap);
58082 + }
58083 + va_end(ap);
58084 + // these don't need DEFAULTSECARGS printed on the end
58085 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
58086 + gr_log_end(audit, 0);
58087 + else
58088 + gr_log_end(audit, 1);
58089 + END_LOCKS(audit);
58090 +}
58091 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
58092 new file mode 100644
58093 index 0000000..6c0416b
58094 --- /dev/null
58095 +++ b/grsecurity/grsec_mem.c
58096 @@ -0,0 +1,33 @@
58097 +#include <linux/kernel.h>
58098 +#include <linux/sched.h>
58099 +#include <linux/mm.h>
58100 +#include <linux/mman.h>
58101 +#include <linux/grinternal.h>
58102 +
58103 +void
58104 +gr_handle_ioperm(void)
58105 +{
58106 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
58107 + return;
58108 +}
58109 +
58110 +void
58111 +gr_handle_iopl(void)
58112 +{
58113 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
58114 + return;
58115 +}
58116 +
58117 +void
58118 +gr_handle_mem_readwrite(u64 from, u64 to)
58119 +{
58120 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
58121 + return;
58122 +}
58123 +
58124 +void
58125 +gr_handle_vm86(void)
58126 +{
58127 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
58128 + return;
58129 +}
58130 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
58131 new file mode 100644
58132 index 0000000..2131422
58133 --- /dev/null
58134 +++ b/grsecurity/grsec_mount.c
58135 @@ -0,0 +1,62 @@
58136 +#include <linux/kernel.h>
58137 +#include <linux/sched.h>
58138 +#include <linux/mount.h>
58139 +#include <linux/grsecurity.h>
58140 +#include <linux/grinternal.h>
58141 +
58142 +void
58143 +gr_log_remount(const char *devname, const int retval)
58144 +{
58145 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58146 + if (grsec_enable_mount && (retval >= 0))
58147 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
58148 +#endif
58149 + return;
58150 +}
58151 +
58152 +void
58153 +gr_log_unmount(const char *devname, const int retval)
58154 +{
58155 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58156 + if (grsec_enable_mount && (retval >= 0))
58157 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
58158 +#endif
58159 + return;
58160 +}
58161 +
58162 +void
58163 +gr_log_mount(const char *from, const char *to, const int retval)
58164 +{
58165 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58166 + if (grsec_enable_mount && (retval >= 0))
58167 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
58168 +#endif
58169 + return;
58170 +}
58171 +
58172 +int
58173 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
58174 +{
58175 +#ifdef CONFIG_GRKERNSEC_ROFS
58176 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
58177 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
58178 + return -EPERM;
58179 + } else
58180 + return 0;
58181 +#endif
58182 + return 0;
58183 +}
58184 +
58185 +int
58186 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
58187 +{
58188 +#ifdef CONFIG_GRKERNSEC_ROFS
58189 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
58190 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
58191 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
58192 + return -EPERM;
58193 + } else
58194 + return 0;
58195 +#endif
58196 + return 0;
58197 +}
58198 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
58199 new file mode 100644
58200 index 0000000..a3b12a0
58201 --- /dev/null
58202 +++ b/grsecurity/grsec_pax.c
58203 @@ -0,0 +1,36 @@
58204 +#include <linux/kernel.h>
58205 +#include <linux/sched.h>
58206 +#include <linux/mm.h>
58207 +#include <linux/file.h>
58208 +#include <linux/grinternal.h>
58209 +#include <linux/grsecurity.h>
58210 +
58211 +void
58212 +gr_log_textrel(struct vm_area_struct * vma)
58213 +{
58214 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58215 + if (grsec_enable_audit_textrel)
58216 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
58217 +#endif
58218 + return;
58219 +}
58220 +
58221 +void
58222 +gr_log_rwxmmap(struct file *file)
58223 +{
58224 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58225 + if (grsec_enable_log_rwxmaps)
58226 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
58227 +#endif
58228 + return;
58229 +}
58230 +
58231 +void
58232 +gr_log_rwxmprotect(struct file *file)
58233 +{
58234 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58235 + if (grsec_enable_log_rwxmaps)
58236 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
58237 +#endif
58238 + return;
58239 +}
58240 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
58241 new file mode 100644
58242 index 0000000..472c1d6
58243 --- /dev/null
58244 +++ b/grsecurity/grsec_ptrace.c
58245 @@ -0,0 +1,14 @@
58246 +#include <linux/kernel.h>
58247 +#include <linux/sched.h>
58248 +#include <linux/grinternal.h>
58249 +#include <linux/grsecurity.h>
58250 +
58251 +void
58252 +gr_audit_ptrace(struct task_struct *task)
58253 +{
58254 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58255 + if (grsec_enable_audit_ptrace)
58256 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
58257 +#endif
58258 + return;
58259 +}
58260 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
58261 new file mode 100644
58262 index 0000000..cf090b3
58263 --- /dev/null
58264 +++ b/grsecurity/grsec_sig.c
58265 @@ -0,0 +1,206 @@
58266 +#include <linux/kernel.h>
58267 +#include <linux/sched.h>
58268 +#include <linux/delay.h>
58269 +#include <linux/grsecurity.h>
58270 +#include <linux/grinternal.h>
58271 +#include <linux/hardirq.h>
58272 +
58273 +char *signames[] = {
58274 + [SIGSEGV] = "Segmentation fault",
58275 + [SIGILL] = "Illegal instruction",
58276 + [SIGABRT] = "Abort",
58277 + [SIGBUS] = "Invalid alignment/Bus error"
58278 +};
58279 +
58280 +void
58281 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
58282 +{
58283 +#ifdef CONFIG_GRKERNSEC_SIGNAL
58284 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
58285 + (sig == SIGABRT) || (sig == SIGBUS))) {
58286 + if (t->pid == current->pid) {
58287 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
58288 + } else {
58289 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
58290 + }
58291 + }
58292 +#endif
58293 + return;
58294 +}
58295 +
58296 +int
58297 +gr_handle_signal(const struct task_struct *p, const int sig)
58298 +{
58299 +#ifdef CONFIG_GRKERNSEC
58300 + if (current->pid > 1 && gr_check_protected_task(p)) {
58301 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
58302 + return -EPERM;
58303 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
58304 + return -EPERM;
58305 + }
58306 +#endif
58307 + return 0;
58308 +}
58309 +
58310 +#ifdef CONFIG_GRKERNSEC
58311 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
58312 +
58313 +int gr_fake_force_sig(int sig, struct task_struct *t)
58314 +{
58315 + unsigned long int flags;
58316 + int ret, blocked, ignored;
58317 + struct k_sigaction *action;
58318 +
58319 + spin_lock_irqsave(&t->sighand->siglock, flags);
58320 + action = &t->sighand->action[sig-1];
58321 + ignored = action->sa.sa_handler == SIG_IGN;
58322 + blocked = sigismember(&t->blocked, sig);
58323 + if (blocked || ignored) {
58324 + action->sa.sa_handler = SIG_DFL;
58325 + if (blocked) {
58326 + sigdelset(&t->blocked, sig);
58327 + recalc_sigpending_and_wake(t);
58328 + }
58329 + }
58330 + if (action->sa.sa_handler == SIG_DFL)
58331 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
58332 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
58333 +
58334 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
58335 +
58336 + return ret;
58337 +}
58338 +#endif
58339 +
58340 +#ifdef CONFIG_GRKERNSEC_BRUTE
58341 +#define GR_USER_BAN_TIME (15 * 60)
58342 +
58343 +static int __get_dumpable(unsigned long mm_flags)
58344 +{
58345 + int ret;
58346 +
58347 + ret = mm_flags & MMF_DUMPABLE_MASK;
58348 + return (ret >= 2) ? 2 : ret;
58349 +}
58350 +#endif
58351 +
58352 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
58353 +{
58354 +#ifdef CONFIG_GRKERNSEC_BRUTE
58355 + uid_t uid = 0;
58356 +
58357 + if (!grsec_enable_brute)
58358 + return;
58359 +
58360 + rcu_read_lock();
58361 + read_lock(&tasklist_lock);
58362 + read_lock(&grsec_exec_file_lock);
58363 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
58364 + p->real_parent->brute = 1;
58365 + else {
58366 + const struct cred *cred = __task_cred(p), *cred2;
58367 + struct task_struct *tsk, *tsk2;
58368 +
58369 + if (!__get_dumpable(mm_flags) && cred->uid) {
58370 + struct user_struct *user;
58371 +
58372 + uid = cred->uid;
58373 +
58374 + /* this is put upon execution past expiration */
58375 + user = find_user(uid);
58376 + if (user == NULL)
58377 + goto unlock;
58378 + user->banned = 1;
58379 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
58380 + if (user->ban_expires == ~0UL)
58381 + user->ban_expires--;
58382 +
58383 + do_each_thread(tsk2, tsk) {
58384 + cred2 = __task_cred(tsk);
58385 + if (tsk != p && cred2->uid == uid)
58386 + gr_fake_force_sig(SIGKILL, tsk);
58387 + } while_each_thread(tsk2, tsk);
58388 + }
58389 + }
58390 +unlock:
58391 + read_unlock(&grsec_exec_file_lock);
58392 + read_unlock(&tasklist_lock);
58393 + rcu_read_unlock();
58394 +
58395 + if (uid)
58396 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
58397 +
58398 +#endif
58399 + return;
58400 +}
58401 +
58402 +void gr_handle_brute_check(void)
58403 +{
58404 +#ifdef CONFIG_GRKERNSEC_BRUTE
58405 + if (current->brute)
58406 + msleep(30 * 1000);
58407 +#endif
58408 + return;
58409 +}
58410 +
58411 +void gr_handle_kernel_exploit(void)
58412 +{
58413 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
58414 + const struct cred *cred;
58415 + struct task_struct *tsk, *tsk2;
58416 + struct user_struct *user;
58417 + uid_t uid;
58418 +
58419 + if (in_irq() || in_serving_softirq() || in_nmi())
58420 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
58421 +
58422 + uid = current_uid();
58423 +
58424 + if (uid == 0)
58425 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
58426 + else {
58427 + /* kill all the processes of this user, hold a reference
58428 + to their creds struct, and prevent them from creating
58429 + another process until system reset
58430 + */
58431 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
58432 + /* we intentionally leak this ref */
58433 + user = get_uid(current->cred->user);
58434 + if (user) {
58435 + user->banned = 1;
58436 + user->ban_expires = ~0UL;
58437 + }
58438 +
58439 + read_lock(&tasklist_lock);
58440 + do_each_thread(tsk2, tsk) {
58441 + cred = __task_cred(tsk);
58442 + if (cred->uid == uid)
58443 + gr_fake_force_sig(SIGKILL, tsk);
58444 + } while_each_thread(tsk2, tsk);
58445 + read_unlock(&tasklist_lock);
58446 + }
58447 +#endif
58448 +}
58449 +
58450 +int __gr_process_user_ban(struct user_struct *user)
58451 +{
58452 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58453 + if (unlikely(user->banned)) {
58454 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
58455 + user->banned = 0;
58456 + user->ban_expires = 0;
58457 + free_uid(user);
58458 + } else
58459 + return -EPERM;
58460 + }
58461 +#endif
58462 + return 0;
58463 +}
58464 +
58465 +int gr_process_user_ban(void)
58466 +{
58467 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58468 + return __gr_process_user_ban(current->cred->user);
58469 +#endif
58470 + return 0;
58471 +}
58472 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
58473 new file mode 100644
58474 index 0000000..4030d57
58475 --- /dev/null
58476 +++ b/grsecurity/grsec_sock.c
58477 @@ -0,0 +1,244 @@
58478 +#include <linux/kernel.h>
58479 +#include <linux/module.h>
58480 +#include <linux/sched.h>
58481 +#include <linux/file.h>
58482 +#include <linux/net.h>
58483 +#include <linux/in.h>
58484 +#include <linux/ip.h>
58485 +#include <net/sock.h>
58486 +#include <net/inet_sock.h>
58487 +#include <linux/grsecurity.h>
58488 +#include <linux/grinternal.h>
58489 +#include <linux/gracl.h>
58490 +
58491 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
58492 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
58493 +
58494 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
58495 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
58496 +
58497 +#ifdef CONFIG_UNIX_MODULE
58498 +EXPORT_SYMBOL(gr_acl_handle_unix);
58499 +EXPORT_SYMBOL(gr_acl_handle_mknod);
58500 +EXPORT_SYMBOL(gr_handle_chroot_unix);
58501 +EXPORT_SYMBOL(gr_handle_create);
58502 +#endif
58503 +
58504 +#ifdef CONFIG_GRKERNSEC
58505 +#define gr_conn_table_size 32749
58506 +struct conn_table_entry {
58507 + struct conn_table_entry *next;
58508 + struct signal_struct *sig;
58509 +};
58510 +
58511 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
58512 +DEFINE_SPINLOCK(gr_conn_table_lock);
58513 +
58514 +extern const char * gr_socktype_to_name(unsigned char type);
58515 +extern const char * gr_proto_to_name(unsigned char proto);
58516 +extern const char * gr_sockfamily_to_name(unsigned char family);
58517 +
58518 +static __inline__ int
58519 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
58520 +{
58521 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
58522 +}
58523 +
58524 +static __inline__ int
58525 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
58526 + __u16 sport, __u16 dport)
58527 +{
58528 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
58529 + sig->gr_sport == sport && sig->gr_dport == dport))
58530 + return 1;
58531 + else
58532 + return 0;
58533 +}
58534 +
58535 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
58536 +{
58537 + struct conn_table_entry **match;
58538 + unsigned int index;
58539 +
58540 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58541 + sig->gr_sport, sig->gr_dport,
58542 + gr_conn_table_size);
58543 +
58544 + newent->sig = sig;
58545 +
58546 + match = &gr_conn_table[index];
58547 + newent->next = *match;
58548 + *match = newent;
58549 +
58550 + return;
58551 +}
58552 +
58553 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
58554 +{
58555 + struct conn_table_entry *match, *last = NULL;
58556 + unsigned int index;
58557 +
58558 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58559 + sig->gr_sport, sig->gr_dport,
58560 + gr_conn_table_size);
58561 +
58562 + match = gr_conn_table[index];
58563 + while (match && !conn_match(match->sig,
58564 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
58565 + sig->gr_dport)) {
58566 + last = match;
58567 + match = match->next;
58568 + }
58569 +
58570 + if (match) {
58571 + if (last)
58572 + last->next = match->next;
58573 + else
58574 + gr_conn_table[index] = NULL;
58575 + kfree(match);
58576 + }
58577 +
58578 + return;
58579 +}
58580 +
58581 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
58582 + __u16 sport, __u16 dport)
58583 +{
58584 + struct conn_table_entry *match;
58585 + unsigned int index;
58586 +
58587 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
58588 +
58589 + match = gr_conn_table[index];
58590 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
58591 + match = match->next;
58592 +
58593 + if (match)
58594 + return match->sig;
58595 + else
58596 + return NULL;
58597 +}
58598 +
58599 +#endif
58600 +
58601 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
58602 +{
58603 +#ifdef CONFIG_GRKERNSEC
58604 + struct signal_struct *sig = task->signal;
58605 + struct conn_table_entry *newent;
58606 +
58607 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
58608 + if (newent == NULL)
58609 + return;
58610 + /* no bh lock needed since we are called with bh disabled */
58611 + spin_lock(&gr_conn_table_lock);
58612 + gr_del_task_from_ip_table_nolock(sig);
58613 + sig->gr_saddr = inet->inet_rcv_saddr;
58614 + sig->gr_daddr = inet->inet_daddr;
58615 + sig->gr_sport = inet->inet_sport;
58616 + sig->gr_dport = inet->inet_dport;
58617 + gr_add_to_task_ip_table_nolock(sig, newent);
58618 + spin_unlock(&gr_conn_table_lock);
58619 +#endif
58620 + return;
58621 +}
58622 +
58623 +void gr_del_task_from_ip_table(struct task_struct *task)
58624 +{
58625 +#ifdef CONFIG_GRKERNSEC
58626 + spin_lock_bh(&gr_conn_table_lock);
58627 + gr_del_task_from_ip_table_nolock(task->signal);
58628 + spin_unlock_bh(&gr_conn_table_lock);
58629 +#endif
58630 + return;
58631 +}
58632 +
58633 +void
58634 +gr_attach_curr_ip(const struct sock *sk)
58635 +{
58636 +#ifdef CONFIG_GRKERNSEC
58637 + struct signal_struct *p, *set;
58638 + const struct inet_sock *inet = inet_sk(sk);
58639 +
58640 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
58641 + return;
58642 +
58643 + set = current->signal;
58644 +
58645 + spin_lock_bh(&gr_conn_table_lock);
58646 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
58647 + inet->inet_dport, inet->inet_sport);
58648 + if (unlikely(p != NULL)) {
58649 + set->curr_ip = p->curr_ip;
58650 + set->used_accept = 1;
58651 + gr_del_task_from_ip_table_nolock(p);
58652 + spin_unlock_bh(&gr_conn_table_lock);
58653 + return;
58654 + }
58655 + spin_unlock_bh(&gr_conn_table_lock);
58656 +
58657 + set->curr_ip = inet->inet_daddr;
58658 + set->used_accept = 1;
58659 +#endif
58660 + return;
58661 +}
58662 +
58663 +int
58664 +gr_handle_sock_all(const int family, const int type, const int protocol)
58665 +{
58666 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58667 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
58668 + (family != AF_UNIX)) {
58669 + if (family == AF_INET)
58670 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
58671 + else
58672 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
58673 + return -EACCES;
58674 + }
58675 +#endif
58676 + return 0;
58677 +}
58678 +
58679 +int
58680 +gr_handle_sock_server(const struct sockaddr *sck)
58681 +{
58682 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58683 + if (grsec_enable_socket_server &&
58684 + in_group_p(grsec_socket_server_gid) &&
58685 + sck && (sck->sa_family != AF_UNIX) &&
58686 + (sck->sa_family != AF_LOCAL)) {
58687 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58688 + return -EACCES;
58689 + }
58690 +#endif
58691 + return 0;
58692 +}
58693 +
58694 +int
58695 +gr_handle_sock_server_other(const struct sock *sck)
58696 +{
58697 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58698 + if (grsec_enable_socket_server &&
58699 + in_group_p(grsec_socket_server_gid) &&
58700 + sck && (sck->sk_family != AF_UNIX) &&
58701 + (sck->sk_family != AF_LOCAL)) {
58702 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58703 + return -EACCES;
58704 + }
58705 +#endif
58706 + return 0;
58707 +}
58708 +
58709 +int
58710 +gr_handle_sock_client(const struct sockaddr *sck)
58711 +{
58712 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58713 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
58714 + sck && (sck->sa_family != AF_UNIX) &&
58715 + (sck->sa_family != AF_LOCAL)) {
58716 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
58717 + return -EACCES;
58718 + }
58719 +#endif
58720 + return 0;
58721 +}
58722 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
58723 new file mode 100644
58724 index 0000000..174668f
58725 --- /dev/null
58726 +++ b/grsecurity/grsec_sysctl.c
58727 @@ -0,0 +1,433 @@
58728 +#include <linux/kernel.h>
58729 +#include <linux/sched.h>
58730 +#include <linux/sysctl.h>
58731 +#include <linux/grsecurity.h>
58732 +#include <linux/grinternal.h>
58733 +
58734 +int
58735 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
58736 +{
58737 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58738 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
58739 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
58740 + return -EACCES;
58741 + }
58742 +#endif
58743 + return 0;
58744 +}
58745 +
58746 +#ifdef CONFIG_GRKERNSEC_ROFS
58747 +static int __maybe_unused one = 1;
58748 +#endif
58749 +
58750 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
58751 +struct ctl_table grsecurity_table[] = {
58752 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58753 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
58754 +#ifdef CONFIG_GRKERNSEC_IO
58755 + {
58756 + .procname = "disable_priv_io",
58757 + .data = &grsec_disable_privio,
58758 + .maxlen = sizeof(int),
58759 + .mode = 0600,
58760 + .proc_handler = &proc_dointvec,
58761 + },
58762 +#endif
58763 +#endif
58764 +#ifdef CONFIG_GRKERNSEC_LINK
58765 + {
58766 + .procname = "linking_restrictions",
58767 + .data = &grsec_enable_link,
58768 + .maxlen = sizeof(int),
58769 + .mode = 0600,
58770 + .proc_handler = &proc_dointvec,
58771 + },
58772 +#endif
58773 +#ifdef CONFIG_GRKERNSEC_BRUTE
58774 + {
58775 + .procname = "deter_bruteforce",
58776 + .data = &grsec_enable_brute,
58777 + .maxlen = sizeof(int),
58778 + .mode = 0600,
58779 + .proc_handler = &proc_dointvec,
58780 + },
58781 +#endif
58782 +#ifdef CONFIG_GRKERNSEC_FIFO
58783 + {
58784 + .procname = "fifo_restrictions",
58785 + .data = &grsec_enable_fifo,
58786 + .maxlen = sizeof(int),
58787 + .mode = 0600,
58788 + .proc_handler = &proc_dointvec,
58789 + },
58790 +#endif
58791 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58792 + {
58793 + .procname = "ip_blackhole",
58794 + .data = &grsec_enable_blackhole,
58795 + .maxlen = sizeof(int),
58796 + .mode = 0600,
58797 + .proc_handler = &proc_dointvec,
58798 + },
58799 + {
58800 + .procname = "lastack_retries",
58801 + .data = &grsec_lastack_retries,
58802 + .maxlen = sizeof(int),
58803 + .mode = 0600,
58804 + .proc_handler = &proc_dointvec,
58805 + },
58806 +#endif
58807 +#ifdef CONFIG_GRKERNSEC_EXECLOG
58808 + {
58809 + .procname = "exec_logging",
58810 + .data = &grsec_enable_execlog,
58811 + .maxlen = sizeof(int),
58812 + .mode = 0600,
58813 + .proc_handler = &proc_dointvec,
58814 + },
58815 +#endif
58816 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58817 + {
58818 + .procname = "rwxmap_logging",
58819 + .data = &grsec_enable_log_rwxmaps,
58820 + .maxlen = sizeof(int),
58821 + .mode = 0600,
58822 + .proc_handler = &proc_dointvec,
58823 + },
58824 +#endif
58825 +#ifdef CONFIG_GRKERNSEC_SIGNAL
58826 + {
58827 + .procname = "signal_logging",
58828 + .data = &grsec_enable_signal,
58829 + .maxlen = sizeof(int),
58830 + .mode = 0600,
58831 + .proc_handler = &proc_dointvec,
58832 + },
58833 +#endif
58834 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
58835 + {
58836 + .procname = "forkfail_logging",
58837 + .data = &grsec_enable_forkfail,
58838 + .maxlen = sizeof(int),
58839 + .mode = 0600,
58840 + .proc_handler = &proc_dointvec,
58841 + },
58842 +#endif
58843 +#ifdef CONFIG_GRKERNSEC_TIME
58844 + {
58845 + .procname = "timechange_logging",
58846 + .data = &grsec_enable_time,
58847 + .maxlen = sizeof(int),
58848 + .mode = 0600,
58849 + .proc_handler = &proc_dointvec,
58850 + },
58851 +#endif
58852 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58853 + {
58854 + .procname = "chroot_deny_shmat",
58855 + .data = &grsec_enable_chroot_shmat,
58856 + .maxlen = sizeof(int),
58857 + .mode = 0600,
58858 + .proc_handler = &proc_dointvec,
58859 + },
58860 +#endif
58861 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58862 + {
58863 + .procname = "chroot_deny_unix",
58864 + .data = &grsec_enable_chroot_unix,
58865 + .maxlen = sizeof(int),
58866 + .mode = 0600,
58867 + .proc_handler = &proc_dointvec,
58868 + },
58869 +#endif
58870 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58871 + {
58872 + .procname = "chroot_deny_mount",
58873 + .data = &grsec_enable_chroot_mount,
58874 + .maxlen = sizeof(int),
58875 + .mode = 0600,
58876 + .proc_handler = &proc_dointvec,
58877 + },
58878 +#endif
58879 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58880 + {
58881 + .procname = "chroot_deny_fchdir",
58882 + .data = &grsec_enable_chroot_fchdir,
58883 + .maxlen = sizeof(int),
58884 + .mode = 0600,
58885 + .proc_handler = &proc_dointvec,
58886 + },
58887 +#endif
58888 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58889 + {
58890 + .procname = "chroot_deny_chroot",
58891 + .data = &grsec_enable_chroot_double,
58892 + .maxlen = sizeof(int),
58893 + .mode = 0600,
58894 + .proc_handler = &proc_dointvec,
58895 + },
58896 +#endif
58897 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58898 + {
58899 + .procname = "chroot_deny_pivot",
58900 + .data = &grsec_enable_chroot_pivot,
58901 + .maxlen = sizeof(int),
58902 + .mode = 0600,
58903 + .proc_handler = &proc_dointvec,
58904 + },
58905 +#endif
58906 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58907 + {
58908 + .procname = "chroot_enforce_chdir",
58909 + .data = &grsec_enable_chroot_chdir,
58910 + .maxlen = sizeof(int),
58911 + .mode = 0600,
58912 + .proc_handler = &proc_dointvec,
58913 + },
58914 +#endif
58915 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58916 + {
58917 + .procname = "chroot_deny_chmod",
58918 + .data = &grsec_enable_chroot_chmod,
58919 + .maxlen = sizeof(int),
58920 + .mode = 0600,
58921 + .proc_handler = &proc_dointvec,
58922 + },
58923 +#endif
58924 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
58925 + {
58926 + .procname = "chroot_deny_mknod",
58927 + .data = &grsec_enable_chroot_mknod,
58928 + .maxlen = sizeof(int),
58929 + .mode = 0600,
58930 + .proc_handler = &proc_dointvec,
58931 + },
58932 +#endif
58933 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58934 + {
58935 + .procname = "chroot_restrict_nice",
58936 + .data = &grsec_enable_chroot_nice,
58937 + .maxlen = sizeof(int),
58938 + .mode = 0600,
58939 + .proc_handler = &proc_dointvec,
58940 + },
58941 +#endif
58942 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
58943 + {
58944 + .procname = "chroot_execlog",
58945 + .data = &grsec_enable_chroot_execlog,
58946 + .maxlen = sizeof(int),
58947 + .mode = 0600,
58948 + .proc_handler = &proc_dointvec,
58949 + },
58950 +#endif
58951 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58952 + {
58953 + .procname = "chroot_caps",
58954 + .data = &grsec_enable_chroot_caps,
58955 + .maxlen = sizeof(int),
58956 + .mode = 0600,
58957 + .proc_handler = &proc_dointvec,
58958 + },
58959 +#endif
58960 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
58961 + {
58962 + .procname = "chroot_deny_sysctl",
58963 + .data = &grsec_enable_chroot_sysctl,
58964 + .maxlen = sizeof(int),
58965 + .mode = 0600,
58966 + .proc_handler = &proc_dointvec,
58967 + },
58968 +#endif
58969 +#ifdef CONFIG_GRKERNSEC_TPE
58970 + {
58971 + .procname = "tpe",
58972 + .data = &grsec_enable_tpe,
58973 + .maxlen = sizeof(int),
58974 + .mode = 0600,
58975 + .proc_handler = &proc_dointvec,
58976 + },
58977 + {
58978 + .procname = "tpe_gid",
58979 + .data = &grsec_tpe_gid,
58980 + .maxlen = sizeof(int),
58981 + .mode = 0600,
58982 + .proc_handler = &proc_dointvec,
58983 + },
58984 +#endif
58985 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58986 + {
58987 + .procname = "tpe_invert",
58988 + .data = &grsec_enable_tpe_invert,
58989 + .maxlen = sizeof(int),
58990 + .mode = 0600,
58991 + .proc_handler = &proc_dointvec,
58992 + },
58993 +#endif
58994 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
58995 + {
58996 + .procname = "tpe_restrict_all",
58997 + .data = &grsec_enable_tpe_all,
58998 + .maxlen = sizeof(int),
58999 + .mode = 0600,
59000 + .proc_handler = &proc_dointvec,
59001 + },
59002 +#endif
59003 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59004 + {
59005 + .procname = "socket_all",
59006 + .data = &grsec_enable_socket_all,
59007 + .maxlen = sizeof(int),
59008 + .mode = 0600,
59009 + .proc_handler = &proc_dointvec,
59010 + },
59011 + {
59012 + .procname = "socket_all_gid",
59013 + .data = &grsec_socket_all_gid,
59014 + .maxlen = sizeof(int),
59015 + .mode = 0600,
59016 + .proc_handler = &proc_dointvec,
59017 + },
59018 +#endif
59019 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59020 + {
59021 + .procname = "socket_client",
59022 + .data = &grsec_enable_socket_client,
59023 + .maxlen = sizeof(int),
59024 + .mode = 0600,
59025 + .proc_handler = &proc_dointvec,
59026 + },
59027 + {
59028 + .procname = "socket_client_gid",
59029 + .data = &grsec_socket_client_gid,
59030 + .maxlen = sizeof(int),
59031 + .mode = 0600,
59032 + .proc_handler = &proc_dointvec,
59033 + },
59034 +#endif
59035 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59036 + {
59037 + .procname = "socket_server",
59038 + .data = &grsec_enable_socket_server,
59039 + .maxlen = sizeof(int),
59040 + .mode = 0600,
59041 + .proc_handler = &proc_dointvec,
59042 + },
59043 + {
59044 + .procname = "socket_server_gid",
59045 + .data = &grsec_socket_server_gid,
59046 + .maxlen = sizeof(int),
59047 + .mode = 0600,
59048 + .proc_handler = &proc_dointvec,
59049 + },
59050 +#endif
59051 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
59052 + {
59053 + .procname = "audit_group",
59054 + .data = &grsec_enable_group,
59055 + .maxlen = sizeof(int),
59056 + .mode = 0600,
59057 + .proc_handler = &proc_dointvec,
59058 + },
59059 + {
59060 + .procname = "audit_gid",
59061 + .data = &grsec_audit_gid,
59062 + .maxlen = sizeof(int),
59063 + .mode = 0600,
59064 + .proc_handler = &proc_dointvec,
59065 + },
59066 +#endif
59067 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
59068 + {
59069 + .procname = "audit_chdir",
59070 + .data = &grsec_enable_chdir,
59071 + .maxlen = sizeof(int),
59072 + .mode = 0600,
59073 + .proc_handler = &proc_dointvec,
59074 + },
59075 +#endif
59076 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59077 + {
59078 + .procname = "audit_mount",
59079 + .data = &grsec_enable_mount,
59080 + .maxlen = sizeof(int),
59081 + .mode = 0600,
59082 + .proc_handler = &proc_dointvec,
59083 + },
59084 +#endif
59085 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
59086 + {
59087 + .procname = "audit_textrel",
59088 + .data = &grsec_enable_audit_textrel,
59089 + .maxlen = sizeof(int),
59090 + .mode = 0600,
59091 + .proc_handler = &proc_dointvec,
59092 + },
59093 +#endif
59094 +#ifdef CONFIG_GRKERNSEC_DMESG
59095 + {
59096 + .procname = "dmesg",
59097 + .data = &grsec_enable_dmesg,
59098 + .maxlen = sizeof(int),
59099 + .mode = 0600,
59100 + .proc_handler = &proc_dointvec,
59101 + },
59102 +#endif
59103 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59104 + {
59105 + .procname = "chroot_findtask",
59106 + .data = &grsec_enable_chroot_findtask,
59107 + .maxlen = sizeof(int),
59108 + .mode = 0600,
59109 + .proc_handler = &proc_dointvec,
59110 + },
59111 +#endif
59112 +#ifdef CONFIG_GRKERNSEC_RESLOG
59113 + {
59114 + .procname = "resource_logging",
59115 + .data = &grsec_resource_logging,
59116 + .maxlen = sizeof(int),
59117 + .mode = 0600,
59118 + .proc_handler = &proc_dointvec,
59119 + },
59120 +#endif
59121 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
59122 + {
59123 + .procname = "audit_ptrace",
59124 + .data = &grsec_enable_audit_ptrace,
59125 + .maxlen = sizeof(int),
59126 + .mode = 0600,
59127 + .proc_handler = &proc_dointvec,
59128 + },
59129 +#endif
59130 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59131 + {
59132 + .procname = "harden_ptrace",
59133 + .data = &grsec_enable_harden_ptrace,
59134 + .maxlen = sizeof(int),
59135 + .mode = 0600,
59136 + .proc_handler = &proc_dointvec,
59137 + },
59138 +#endif
59139 + {
59140 + .procname = "grsec_lock",
59141 + .data = &grsec_lock,
59142 + .maxlen = sizeof(int),
59143 + .mode = 0600,
59144 + .proc_handler = &proc_dointvec,
59145 + },
59146 +#endif
59147 +#ifdef CONFIG_GRKERNSEC_ROFS
59148 + {
59149 + .procname = "romount_protect",
59150 + .data = &grsec_enable_rofs,
59151 + .maxlen = sizeof(int),
59152 + .mode = 0600,
59153 + .proc_handler = &proc_dointvec_minmax,
59154 + .extra1 = &one,
59155 + .extra2 = &one,
59156 + },
59157 +#endif
59158 + { }
59159 +};
59160 +#endif
59161 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
59162 new file mode 100644
59163 index 0000000..0dc13c3
59164 --- /dev/null
59165 +++ b/grsecurity/grsec_time.c
59166 @@ -0,0 +1,16 @@
59167 +#include <linux/kernel.h>
59168 +#include <linux/sched.h>
59169 +#include <linux/grinternal.h>
59170 +#include <linux/module.h>
59171 +
59172 +void
59173 +gr_log_timechange(void)
59174 +{
59175 +#ifdef CONFIG_GRKERNSEC_TIME
59176 + if (grsec_enable_time)
59177 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
59178 +#endif
59179 + return;
59180 +}
59181 +
59182 +EXPORT_SYMBOL(gr_log_timechange);
59183 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
59184 new file mode 100644
59185 index 0000000..4a78774
59186 --- /dev/null
59187 +++ b/grsecurity/grsec_tpe.c
59188 @@ -0,0 +1,39 @@
59189 +#include <linux/kernel.h>
59190 +#include <linux/sched.h>
59191 +#include <linux/file.h>
59192 +#include <linux/fs.h>
59193 +#include <linux/grinternal.h>
59194 +
59195 +extern int gr_acl_tpe_check(void);
59196 +
59197 +int
59198 +gr_tpe_allow(const struct file *file)
59199 +{
59200 +#ifdef CONFIG_GRKERNSEC
59201 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
59202 + const struct cred *cred = current_cred();
59203 +
59204 + if (cred->uid && ((grsec_enable_tpe &&
59205 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59206 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
59207 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
59208 +#else
59209 + in_group_p(grsec_tpe_gid)
59210 +#endif
59211 + ) || gr_acl_tpe_check()) &&
59212 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
59213 + (inode->i_mode & S_IWOTH))))) {
59214 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
59215 + return 0;
59216 + }
59217 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
59218 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
59219 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
59220 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
59221 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
59222 + return 0;
59223 + }
59224 +#endif
59225 +#endif
59226 + return 1;
59227 +}
59228 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
59229 new file mode 100644
59230 index 0000000..9f7b1ac
59231 --- /dev/null
59232 +++ b/grsecurity/grsum.c
59233 @@ -0,0 +1,61 @@
59234 +#include <linux/err.h>
59235 +#include <linux/kernel.h>
59236 +#include <linux/sched.h>
59237 +#include <linux/mm.h>
59238 +#include <linux/scatterlist.h>
59239 +#include <linux/crypto.h>
59240 +#include <linux/gracl.h>
59241 +
59242 +
59243 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
59244 +#error "crypto and sha256 must be built into the kernel"
59245 +#endif
59246 +
59247 +int
59248 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
59249 +{
59250 + char *p;
59251 + struct crypto_hash *tfm;
59252 + struct hash_desc desc;
59253 + struct scatterlist sg;
59254 + unsigned char temp_sum[GR_SHA_LEN];
59255 + volatile int retval = 0;
59256 + volatile int dummy = 0;
59257 + unsigned int i;
59258 +
59259 + sg_init_table(&sg, 1);
59260 +
59261 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
59262 + if (IS_ERR(tfm)) {
59263 + /* should never happen, since sha256 should be built in */
59264 + return 1;
59265 + }
59266 +
59267 + desc.tfm = tfm;
59268 + desc.flags = 0;
59269 +
59270 + crypto_hash_init(&desc);
59271 +
59272 + p = salt;
59273 + sg_set_buf(&sg, p, GR_SALT_LEN);
59274 + crypto_hash_update(&desc, &sg, sg.length);
59275 +
59276 + p = entry->pw;
59277 + sg_set_buf(&sg, p, strlen(p));
59278 +
59279 + crypto_hash_update(&desc, &sg, sg.length);
59280 +
59281 + crypto_hash_final(&desc, temp_sum);
59282 +
59283 + memset(entry->pw, 0, GR_PW_LEN);
59284 +
59285 + for (i = 0; i < GR_SHA_LEN; i++)
59286 + if (sum[i] != temp_sum[i])
59287 + retval = 1;
59288 + else
59289 + dummy = 1; // waste a cycle
59290 +
59291 + crypto_free_hash(tfm);
59292 +
59293 + return retval;
59294 +}
59295 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
59296 index 6cd5b64..f620d2d 100644
59297 --- a/include/acpi/acpi_bus.h
59298 +++ b/include/acpi/acpi_bus.h
59299 @@ -107,7 +107,7 @@ struct acpi_device_ops {
59300 acpi_op_bind bind;
59301 acpi_op_unbind unbind;
59302 acpi_op_notify notify;
59303 -};
59304 +} __no_const;
59305
59306 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
59307
59308 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
59309 index b7babf0..71e4e74 100644
59310 --- a/include/asm-generic/atomic-long.h
59311 +++ b/include/asm-generic/atomic-long.h
59312 @@ -22,6 +22,12 @@
59313
59314 typedef atomic64_t atomic_long_t;
59315
59316 +#ifdef CONFIG_PAX_REFCOUNT
59317 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
59318 +#else
59319 +typedef atomic64_t atomic_long_unchecked_t;
59320 +#endif
59321 +
59322 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
59323
59324 static inline long atomic_long_read(atomic_long_t *l)
59325 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
59326 return (long)atomic64_read(v);
59327 }
59328
59329 +#ifdef CONFIG_PAX_REFCOUNT
59330 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59331 +{
59332 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59333 +
59334 + return (long)atomic64_read_unchecked(v);
59335 +}
59336 +#endif
59337 +
59338 static inline void atomic_long_set(atomic_long_t *l, long i)
59339 {
59340 atomic64_t *v = (atomic64_t *)l;
59341 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59342 atomic64_set(v, i);
59343 }
59344
59345 +#ifdef CONFIG_PAX_REFCOUNT
59346 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59347 +{
59348 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59349 +
59350 + atomic64_set_unchecked(v, i);
59351 +}
59352 +#endif
59353 +
59354 static inline void atomic_long_inc(atomic_long_t *l)
59355 {
59356 atomic64_t *v = (atomic64_t *)l;
59357 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59358 atomic64_inc(v);
59359 }
59360
59361 +#ifdef CONFIG_PAX_REFCOUNT
59362 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59363 +{
59364 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59365 +
59366 + atomic64_inc_unchecked(v);
59367 +}
59368 +#endif
59369 +
59370 static inline void atomic_long_dec(atomic_long_t *l)
59371 {
59372 atomic64_t *v = (atomic64_t *)l;
59373 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59374 atomic64_dec(v);
59375 }
59376
59377 +#ifdef CONFIG_PAX_REFCOUNT
59378 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59379 +{
59380 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59381 +
59382 + atomic64_dec_unchecked(v);
59383 +}
59384 +#endif
59385 +
59386 static inline void atomic_long_add(long i, atomic_long_t *l)
59387 {
59388 atomic64_t *v = (atomic64_t *)l;
59389 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59390 atomic64_add(i, v);
59391 }
59392
59393 +#ifdef CONFIG_PAX_REFCOUNT
59394 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59395 +{
59396 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59397 +
59398 + atomic64_add_unchecked(i, v);
59399 +}
59400 +#endif
59401 +
59402 static inline void atomic_long_sub(long i, atomic_long_t *l)
59403 {
59404 atomic64_t *v = (atomic64_t *)l;
59405 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59406 atomic64_sub(i, v);
59407 }
59408
59409 +#ifdef CONFIG_PAX_REFCOUNT
59410 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59411 +{
59412 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59413 +
59414 + atomic64_sub_unchecked(i, v);
59415 +}
59416 +#endif
59417 +
59418 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59419 {
59420 atomic64_t *v = (atomic64_t *)l;
59421 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59422 return (long)atomic64_inc_return(v);
59423 }
59424
59425 +#ifdef CONFIG_PAX_REFCOUNT
59426 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59427 +{
59428 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59429 +
59430 + return (long)atomic64_inc_return_unchecked(v);
59431 +}
59432 +#endif
59433 +
59434 static inline long atomic_long_dec_return(atomic_long_t *l)
59435 {
59436 atomic64_t *v = (atomic64_t *)l;
59437 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59438
59439 typedef atomic_t atomic_long_t;
59440
59441 +#ifdef CONFIG_PAX_REFCOUNT
59442 +typedef atomic_unchecked_t atomic_long_unchecked_t;
59443 +#else
59444 +typedef atomic_t atomic_long_unchecked_t;
59445 +#endif
59446 +
59447 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
59448 static inline long atomic_long_read(atomic_long_t *l)
59449 {
59450 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
59451 return (long)atomic_read(v);
59452 }
59453
59454 +#ifdef CONFIG_PAX_REFCOUNT
59455 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59456 +{
59457 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59458 +
59459 + return (long)atomic_read_unchecked(v);
59460 +}
59461 +#endif
59462 +
59463 static inline void atomic_long_set(atomic_long_t *l, long i)
59464 {
59465 atomic_t *v = (atomic_t *)l;
59466 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59467 atomic_set(v, i);
59468 }
59469
59470 +#ifdef CONFIG_PAX_REFCOUNT
59471 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59472 +{
59473 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59474 +
59475 + atomic_set_unchecked(v, i);
59476 +}
59477 +#endif
59478 +
59479 static inline void atomic_long_inc(atomic_long_t *l)
59480 {
59481 atomic_t *v = (atomic_t *)l;
59482 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59483 atomic_inc(v);
59484 }
59485
59486 +#ifdef CONFIG_PAX_REFCOUNT
59487 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59488 +{
59489 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59490 +
59491 + atomic_inc_unchecked(v);
59492 +}
59493 +#endif
59494 +
59495 static inline void atomic_long_dec(atomic_long_t *l)
59496 {
59497 atomic_t *v = (atomic_t *)l;
59498 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59499 atomic_dec(v);
59500 }
59501
59502 +#ifdef CONFIG_PAX_REFCOUNT
59503 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59504 +{
59505 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59506 +
59507 + atomic_dec_unchecked(v);
59508 +}
59509 +#endif
59510 +
59511 static inline void atomic_long_add(long i, atomic_long_t *l)
59512 {
59513 atomic_t *v = (atomic_t *)l;
59514 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59515 atomic_add(i, v);
59516 }
59517
59518 +#ifdef CONFIG_PAX_REFCOUNT
59519 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59520 +{
59521 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59522 +
59523 + atomic_add_unchecked(i, v);
59524 +}
59525 +#endif
59526 +
59527 static inline void atomic_long_sub(long i, atomic_long_t *l)
59528 {
59529 atomic_t *v = (atomic_t *)l;
59530 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59531 atomic_sub(i, v);
59532 }
59533
59534 +#ifdef CONFIG_PAX_REFCOUNT
59535 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59536 +{
59537 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59538 +
59539 + atomic_sub_unchecked(i, v);
59540 +}
59541 +#endif
59542 +
59543 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59544 {
59545 atomic_t *v = (atomic_t *)l;
59546 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59547 return (long)atomic_inc_return(v);
59548 }
59549
59550 +#ifdef CONFIG_PAX_REFCOUNT
59551 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59552 +{
59553 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59554 +
59555 + return (long)atomic_inc_return_unchecked(v);
59556 +}
59557 +#endif
59558 +
59559 static inline long atomic_long_dec_return(atomic_long_t *l)
59560 {
59561 atomic_t *v = (atomic_t *)l;
59562 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59563
59564 #endif /* BITS_PER_LONG == 64 */
59565
59566 +#ifdef CONFIG_PAX_REFCOUNT
59567 +static inline void pax_refcount_needs_these_functions(void)
59568 +{
59569 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
59570 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
59571 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
59572 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
59573 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
59574 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
59575 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
59576 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
59577 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
59578 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
59579 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
59580 +
59581 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
59582 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
59583 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
59584 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
59585 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
59586 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
59587 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
59588 +}
59589 +#else
59590 +#define atomic_read_unchecked(v) atomic_read(v)
59591 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
59592 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
59593 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
59594 +#define atomic_inc_unchecked(v) atomic_inc(v)
59595 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
59596 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
59597 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
59598 +#define atomic_dec_unchecked(v) atomic_dec(v)
59599 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
59600 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
59601 +
59602 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
59603 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
59604 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
59605 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
59606 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
59607 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
59608 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
59609 +#endif
59610 +
59611 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
59612 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
59613 index 1bfcfe5..e04c5c9 100644
59614 --- a/include/asm-generic/cache.h
59615 +++ b/include/asm-generic/cache.h
59616 @@ -6,7 +6,7 @@
59617 * cache lines need to provide their own cache.h.
59618 */
59619
59620 -#define L1_CACHE_SHIFT 5
59621 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
59622 +#define L1_CACHE_SHIFT 5UL
59623 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
59624
59625 #endif /* __ASM_GENERIC_CACHE_H */
59626 diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
59627 index 1ca3efc..e3dc852 100644
59628 --- a/include/asm-generic/int-l64.h
59629 +++ b/include/asm-generic/int-l64.h
59630 @@ -46,6 +46,8 @@ typedef unsigned int u32;
59631 typedef signed long s64;
59632 typedef unsigned long u64;
59633
59634 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
59635 +
59636 #define S8_C(x) x
59637 #define U8_C(x) x ## U
59638 #define S16_C(x) x
59639 diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
59640 index f394147..b6152b9 100644
59641 --- a/include/asm-generic/int-ll64.h
59642 +++ b/include/asm-generic/int-ll64.h
59643 @@ -51,6 +51,8 @@ typedef unsigned int u32;
59644 typedef signed long long s64;
59645 typedef unsigned long long u64;
59646
59647 +typedef unsigned long long intoverflow_t;
59648 +
59649 #define S8_C(x) x
59650 #define U8_C(x) x ## U
59651 #define S16_C(x) x
59652 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
59653 index 0232ccb..13d9165 100644
59654 --- a/include/asm-generic/kmap_types.h
59655 +++ b/include/asm-generic/kmap_types.h
59656 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
59657 KMAP_D(17) KM_NMI,
59658 KMAP_D(18) KM_NMI_PTE,
59659 KMAP_D(19) KM_KDB,
59660 +KMAP_D(20) KM_CLEARPAGE,
59661 /*
59662 * Remember to update debug_kmap_atomic() when adding new kmap types!
59663 */
59664 -KMAP_D(20) KM_TYPE_NR
59665 +KMAP_D(21) KM_TYPE_NR
59666 };
59667
59668 #undef KMAP_D
59669 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
59670 index 725612b..9cc513a 100644
59671 --- a/include/asm-generic/pgtable-nopmd.h
59672 +++ b/include/asm-generic/pgtable-nopmd.h
59673 @@ -1,14 +1,19 @@
59674 #ifndef _PGTABLE_NOPMD_H
59675 #define _PGTABLE_NOPMD_H
59676
59677 -#ifndef __ASSEMBLY__
59678 -
59679 #include <asm-generic/pgtable-nopud.h>
59680
59681 -struct mm_struct;
59682 -
59683 #define __PAGETABLE_PMD_FOLDED
59684
59685 +#define PMD_SHIFT PUD_SHIFT
59686 +#define PTRS_PER_PMD 1
59687 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
59688 +#define PMD_MASK (~(PMD_SIZE-1))
59689 +
59690 +#ifndef __ASSEMBLY__
59691 +
59692 +struct mm_struct;
59693 +
59694 /*
59695 * Having the pmd type consist of a pud gets the size right, and allows
59696 * us to conceptually access the pud entry that this pmd is folded into
59697 @@ -16,11 +21,6 @@ struct mm_struct;
59698 */
59699 typedef struct { pud_t pud; } pmd_t;
59700
59701 -#define PMD_SHIFT PUD_SHIFT
59702 -#define PTRS_PER_PMD 1
59703 -#define PMD_SIZE (1UL << PMD_SHIFT)
59704 -#define PMD_MASK (~(PMD_SIZE-1))
59705 -
59706 /*
59707 * The "pud_xxx()" functions here are trivial for a folded two-level
59708 * setup: the pmd is never bad, and a pmd always exists (as it's folded
59709 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
59710 index 810431d..ccc3638 100644
59711 --- a/include/asm-generic/pgtable-nopud.h
59712 +++ b/include/asm-generic/pgtable-nopud.h
59713 @@ -1,10 +1,15 @@
59714 #ifndef _PGTABLE_NOPUD_H
59715 #define _PGTABLE_NOPUD_H
59716
59717 -#ifndef __ASSEMBLY__
59718 -
59719 #define __PAGETABLE_PUD_FOLDED
59720
59721 +#define PUD_SHIFT PGDIR_SHIFT
59722 +#define PTRS_PER_PUD 1
59723 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
59724 +#define PUD_MASK (~(PUD_SIZE-1))
59725 +
59726 +#ifndef __ASSEMBLY__
59727 +
59728 /*
59729 * Having the pud type consist of a pgd gets the size right, and allows
59730 * us to conceptually access the pgd entry that this pud is folded into
59731 @@ -12,11 +17,6 @@
59732 */
59733 typedef struct { pgd_t pgd; } pud_t;
59734
59735 -#define PUD_SHIFT PGDIR_SHIFT
59736 -#define PTRS_PER_PUD 1
59737 -#define PUD_SIZE (1UL << PUD_SHIFT)
59738 -#define PUD_MASK (~(PUD_SIZE-1))
59739 -
59740 /*
59741 * The "pgd_xxx()" functions here are trivial for a folded two-level
59742 * setup: the pud is never bad, and a pud always exists (as it's folded
59743 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
59744 index 76bff2b..c7a14e2 100644
59745 --- a/include/asm-generic/pgtable.h
59746 +++ b/include/asm-generic/pgtable.h
59747 @@ -443,6 +443,14 @@ static inline int pmd_write(pmd_t pmd)
59748 #endif /* __HAVE_ARCH_PMD_WRITE */
59749 #endif
59750
59751 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
59752 +static inline unsigned long pax_open_kernel(void) { return 0; }
59753 +#endif
59754 +
59755 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
59756 +static inline unsigned long pax_close_kernel(void) { return 0; }
59757 +#endif
59758 +
59759 #endif /* !__ASSEMBLY__ */
59760
59761 #endif /* _ASM_GENERIC_PGTABLE_H */
59762 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
59763 index db22d13..1f2e3e1 100644
59764 --- a/include/asm-generic/vmlinux.lds.h
59765 +++ b/include/asm-generic/vmlinux.lds.h
59766 @@ -217,6 +217,7 @@
59767 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
59768 VMLINUX_SYMBOL(__start_rodata) = .; \
59769 *(.rodata) *(.rodata.*) \
59770 + *(.data..read_only) \
59771 *(__vermagic) /* Kernel version magic */ \
59772 . = ALIGN(8); \
59773 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
59774 @@ -723,17 +724,18 @@
59775 * section in the linker script will go there too. @phdr should have
59776 * a leading colon.
59777 *
59778 - * Note that this macros defines __per_cpu_load as an absolute symbol.
59779 + * Note that this macros defines per_cpu_load as an absolute symbol.
59780 * If there is no need to put the percpu section at a predetermined
59781 * address, use PERCPU_SECTION.
59782 */
59783 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
59784 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
59785 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
59786 + per_cpu_load = .; \
59787 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
59788 - LOAD_OFFSET) { \
59789 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
59790 PERCPU_INPUT(cacheline) \
59791 } phdr \
59792 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
59793 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
59794
59795 /**
59796 * PERCPU_SECTION - define output section for percpu area, simple version
59797 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
59798 index 9b7c2bb..76b7d1e 100644
59799 --- a/include/drm/drmP.h
59800 +++ b/include/drm/drmP.h
59801 @@ -73,6 +73,7 @@
59802 #include <linux/workqueue.h>
59803 #include <linux/poll.h>
59804 #include <asm/pgalloc.h>
59805 +#include <asm/local.h>
59806 #include "drm.h"
59807
59808 #include <linux/idr.h>
59809 @@ -1035,7 +1036,7 @@ struct drm_device {
59810
59811 /** \name Usage Counters */
59812 /*@{ */
59813 - int open_count; /**< Outstanding files open */
59814 + local_t open_count; /**< Outstanding files open */
59815 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
59816 atomic_t vma_count; /**< Outstanding vma areas open */
59817 int buf_use; /**< Buffers in use -- cannot alloc */
59818 @@ -1046,7 +1047,7 @@ struct drm_device {
59819 /*@{ */
59820 unsigned long counters;
59821 enum drm_stat_type types[15];
59822 - atomic_t counts[15];
59823 + atomic_unchecked_t counts[15];
59824 /*@} */
59825
59826 struct list_head filelist;
59827 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
59828 index 73b0712..0b7ef2f 100644
59829 --- a/include/drm/drm_crtc_helper.h
59830 +++ b/include/drm/drm_crtc_helper.h
59831 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
59832
59833 /* disable crtc when not in use - more explicit than dpms off */
59834 void (*disable)(struct drm_crtc *crtc);
59835 -};
59836 +} __no_const;
59837
59838 struct drm_encoder_helper_funcs {
59839 void (*dpms)(struct drm_encoder *encoder, int mode);
59840 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
59841 struct drm_connector *connector);
59842 /* disable encoder when not in use - more explicit than dpms off */
59843 void (*disable)(struct drm_encoder *encoder);
59844 -};
59845 +} __no_const;
59846
59847 struct drm_connector_helper_funcs {
59848 int (*get_modes)(struct drm_connector *connector);
59849 diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
59850 index c4961ea..53dfa109 100644
59851 --- a/include/drm/drm_mode.h
59852 +++ b/include/drm/drm_mode.h
59853 @@ -233,6 +233,8 @@ struct drm_mode_fb_cmd {
59854 #define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
59855 #define DRM_MODE_FB_DIRTY_FLAGS 0x03
59856
59857 +#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256
59858 +
59859 /*
59860 * Mark a region of a framebuffer as dirty.
59861 *
59862 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
59863 index 26c1f78..6722682 100644
59864 --- a/include/drm/ttm/ttm_memory.h
59865 +++ b/include/drm/ttm/ttm_memory.h
59866 @@ -47,7 +47,7 @@
59867
59868 struct ttm_mem_shrink {
59869 int (*do_shrink) (struct ttm_mem_shrink *);
59870 -};
59871 +} __no_const;
59872
59873 /**
59874 * struct ttm_mem_global - Global memory accounting structure.
59875 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
59876 index e86dfca..40cc55f 100644
59877 --- a/include/linux/a.out.h
59878 +++ b/include/linux/a.out.h
59879 @@ -39,6 +39,14 @@ enum machine_type {
59880 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
59881 };
59882
59883 +/* Constants for the N_FLAGS field */
59884 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
59885 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
59886 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
59887 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
59888 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
59889 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
59890 +
59891 #if !defined (N_MAGIC)
59892 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
59893 #endif
59894 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
59895 index 49a83ca..df96b54 100644
59896 --- a/include/linux/atmdev.h
59897 +++ b/include/linux/atmdev.h
59898 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
59899 #endif
59900
59901 struct k_atm_aal_stats {
59902 -#define __HANDLE_ITEM(i) atomic_t i
59903 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
59904 __AAL_STAT_ITEMS
59905 #undef __HANDLE_ITEM
59906 };
59907 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
59908 index fd88a39..f4d0bad 100644
59909 --- a/include/linux/binfmts.h
59910 +++ b/include/linux/binfmts.h
59911 @@ -88,6 +88,7 @@ struct linux_binfmt {
59912 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
59913 int (*load_shlib)(struct file *);
59914 int (*core_dump)(struct coredump_params *cprm);
59915 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
59916 unsigned long min_coredump; /* minimal dump size */
59917 };
59918
59919 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
59920 index 7fbaa91..5e6a460 100644
59921 --- a/include/linux/blkdev.h
59922 +++ b/include/linux/blkdev.h
59923 @@ -1321,7 +1321,7 @@ struct block_device_operations {
59924 /* this callback is with swap_lock and sometimes page table lock held */
59925 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
59926 struct module *owner;
59927 -};
59928 +} __do_const;
59929
59930 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
59931 unsigned long);
59932 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
59933 index 8e9e4bc..88bd457 100644
59934 --- a/include/linux/blktrace_api.h
59935 +++ b/include/linux/blktrace_api.h
59936 @@ -162,7 +162,7 @@ struct blk_trace {
59937 struct dentry *dir;
59938 struct dentry *dropped_file;
59939 struct dentry *msg_file;
59940 - atomic_t dropped;
59941 + atomic_unchecked_t dropped;
59942 };
59943
59944 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
59945 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
59946 index 83195fb..0b0f77d 100644
59947 --- a/include/linux/byteorder/little_endian.h
59948 +++ b/include/linux/byteorder/little_endian.h
59949 @@ -42,51 +42,51 @@
59950
59951 static inline __le64 __cpu_to_le64p(const __u64 *p)
59952 {
59953 - return (__force __le64)*p;
59954 + return (__force const __le64)*p;
59955 }
59956 static inline __u64 __le64_to_cpup(const __le64 *p)
59957 {
59958 - return (__force __u64)*p;
59959 + return (__force const __u64)*p;
59960 }
59961 static inline __le32 __cpu_to_le32p(const __u32 *p)
59962 {
59963 - return (__force __le32)*p;
59964 + return (__force const __le32)*p;
59965 }
59966 static inline __u32 __le32_to_cpup(const __le32 *p)
59967 {
59968 - return (__force __u32)*p;
59969 + return (__force const __u32)*p;
59970 }
59971 static inline __le16 __cpu_to_le16p(const __u16 *p)
59972 {
59973 - return (__force __le16)*p;
59974 + return (__force const __le16)*p;
59975 }
59976 static inline __u16 __le16_to_cpup(const __le16 *p)
59977 {
59978 - return (__force __u16)*p;
59979 + return (__force const __u16)*p;
59980 }
59981 static inline __be64 __cpu_to_be64p(const __u64 *p)
59982 {
59983 - return (__force __be64)__swab64p(p);
59984 + return (__force const __be64)__swab64p(p);
59985 }
59986 static inline __u64 __be64_to_cpup(const __be64 *p)
59987 {
59988 - return __swab64p((__u64 *)p);
59989 + return __swab64p((const __u64 *)p);
59990 }
59991 static inline __be32 __cpu_to_be32p(const __u32 *p)
59992 {
59993 - return (__force __be32)__swab32p(p);
59994 + return (__force const __be32)__swab32p(p);
59995 }
59996 static inline __u32 __be32_to_cpup(const __be32 *p)
59997 {
59998 - return __swab32p((__u32 *)p);
59999 + return __swab32p((const __u32 *)p);
60000 }
60001 static inline __be16 __cpu_to_be16p(const __u16 *p)
60002 {
60003 - return (__force __be16)__swab16p(p);
60004 + return (__force const __be16)__swab16p(p);
60005 }
60006 static inline __u16 __be16_to_cpup(const __be16 *p)
60007 {
60008 - return __swab16p((__u16 *)p);
60009 + return __swab16p((const __u16 *)p);
60010 }
60011 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
60012 #define __le64_to_cpus(x) do { (void)(x); } while (0)
60013 diff --git a/include/linux/cache.h b/include/linux/cache.h
60014 index 4c57065..4307975 100644
60015 --- a/include/linux/cache.h
60016 +++ b/include/linux/cache.h
60017 @@ -16,6 +16,10 @@
60018 #define __read_mostly
60019 #endif
60020
60021 +#ifndef __read_only
60022 +#define __read_only __read_mostly
60023 +#endif
60024 +
60025 #ifndef ____cacheline_aligned
60026 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
60027 #endif
60028 diff --git a/include/linux/capability.h b/include/linux/capability.h
60029 index c421123..e343179 100644
60030 --- a/include/linux/capability.h
60031 +++ b/include/linux/capability.h
60032 @@ -547,6 +547,9 @@ extern bool capable(int cap);
60033 extern bool ns_capable(struct user_namespace *ns, int cap);
60034 extern bool task_ns_capable(struct task_struct *t, int cap);
60035 extern bool nsown_capable(int cap);
60036 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
60037 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
60038 +extern bool capable_nolog(int cap);
60039
60040 /* audit system wants to get cap info from files as well */
60041 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
60042 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
60043 index 04ffb2e..6799180 100644
60044 --- a/include/linux/cleancache.h
60045 +++ b/include/linux/cleancache.h
60046 @@ -31,7 +31,7 @@ struct cleancache_ops {
60047 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
60048 void (*flush_inode)(int, struct cleancache_filekey);
60049 void (*flush_fs)(int);
60050 -};
60051 +} __no_const;
60052
60053 extern struct cleancache_ops
60054 cleancache_register_ops(struct cleancache_ops *ops);
60055 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
60056 index dfadc96..c0e70c1 100644
60057 --- a/include/linux/compiler-gcc4.h
60058 +++ b/include/linux/compiler-gcc4.h
60059 @@ -31,6 +31,12 @@
60060
60061
60062 #if __GNUC_MINOR__ >= 5
60063 +
60064 +#ifdef CONSTIFY_PLUGIN
60065 +#define __no_const __attribute__((no_const))
60066 +#define __do_const __attribute__((do_const))
60067 +#endif
60068 +
60069 /*
60070 * Mark a position in code as unreachable. This can be used to
60071 * suppress control flow warnings after asm blocks that transfer
60072 @@ -46,6 +52,11 @@
60073 #define __noclone __attribute__((__noclone__))
60074
60075 #endif
60076 +
60077 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
60078 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
60079 +#define __bos0(ptr) __bos((ptr), 0)
60080 +#define __bos1(ptr) __bos((ptr), 1)
60081 #endif
60082
60083 #if __GNUC_MINOR__ > 0
60084 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
60085 index 320d6c9..8573a1c 100644
60086 --- a/include/linux/compiler.h
60087 +++ b/include/linux/compiler.h
60088 @@ -5,31 +5,62 @@
60089
60090 #ifdef __CHECKER__
60091 # define __user __attribute__((noderef, address_space(1)))
60092 +# define __force_user __force __user
60093 # define __kernel __attribute__((address_space(0)))
60094 +# define __force_kernel __force __kernel
60095 # define __safe __attribute__((safe))
60096 # define __force __attribute__((force))
60097 # define __nocast __attribute__((nocast))
60098 # define __iomem __attribute__((noderef, address_space(2)))
60099 +# define __force_iomem __force __iomem
60100 # define __acquires(x) __attribute__((context(x,0,1)))
60101 # define __releases(x) __attribute__((context(x,1,0)))
60102 # define __acquire(x) __context__(x,1)
60103 # define __release(x) __context__(x,-1)
60104 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
60105 # define __percpu __attribute__((noderef, address_space(3)))
60106 +# define __force_percpu __force __percpu
60107 #ifdef CONFIG_SPARSE_RCU_POINTER
60108 # define __rcu __attribute__((noderef, address_space(4)))
60109 +# define __force_rcu __force __rcu
60110 #else
60111 # define __rcu
60112 +# define __force_rcu
60113 #endif
60114 extern void __chk_user_ptr(const volatile void __user *);
60115 extern void __chk_io_ptr(const volatile void __iomem *);
60116 +#elif defined(CHECKER_PLUGIN)
60117 +//# define __user
60118 +//# define __force_user
60119 +//# define __kernel
60120 +//# define __force_kernel
60121 +# define __safe
60122 +# define __force
60123 +# define __nocast
60124 +# define __iomem
60125 +# define __force_iomem
60126 +# define __chk_user_ptr(x) (void)0
60127 +# define __chk_io_ptr(x) (void)0
60128 +# define __builtin_warning(x, y...) (1)
60129 +# define __acquires(x)
60130 +# define __releases(x)
60131 +# define __acquire(x) (void)0
60132 +# define __release(x) (void)0
60133 +# define __cond_lock(x,c) (c)
60134 +# define __percpu
60135 +# define __force_percpu
60136 +# define __rcu
60137 +# define __force_rcu
60138 #else
60139 # define __user
60140 +# define __force_user
60141 # define __kernel
60142 +# define __force_kernel
60143 # define __safe
60144 # define __force
60145 # define __nocast
60146 # define __iomem
60147 +# define __force_iomem
60148 # define __chk_user_ptr(x) (void)0
60149 # define __chk_io_ptr(x) (void)0
60150 # define __builtin_warning(x, y...) (1)
60151 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
60152 # define __release(x) (void)0
60153 # define __cond_lock(x,c) (c)
60154 # define __percpu
60155 +# define __force_percpu
60156 # define __rcu
60157 +# define __force_rcu
60158 #endif
60159
60160 #ifdef __KERNEL__
60161 @@ -264,6 +297,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60162 # define __attribute_const__ /* unimplemented */
60163 #endif
60164
60165 +#ifndef __no_const
60166 +# define __no_const
60167 +#endif
60168 +
60169 +#ifndef __do_const
60170 +# define __do_const
60171 +#endif
60172 +
60173 /*
60174 * Tell gcc if a function is cold. The compiler will assume any path
60175 * directly leading to the call is unlikely.
60176 @@ -273,6 +314,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60177 #define __cold
60178 #endif
60179
60180 +#ifndef __alloc_size
60181 +#define __alloc_size(...)
60182 +#endif
60183 +
60184 +#ifndef __bos
60185 +#define __bos(ptr, arg)
60186 +#endif
60187 +
60188 +#ifndef __bos0
60189 +#define __bos0(ptr)
60190 +#endif
60191 +
60192 +#ifndef __bos1
60193 +#define __bos1(ptr)
60194 +#endif
60195 +
60196 /* Simple shorthand for a section definition */
60197 #ifndef __section
60198 # define __section(S) __attribute__ ((__section__(#S)))
60199 @@ -306,6 +363,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60200 * use is to mediate communication between process-level code and irq/NMI
60201 * handlers, all running on the same CPU.
60202 */
60203 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
60204 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
60205 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
60206
60207 #endif /* __LINUX_COMPILER_H */
60208 diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
60209 index e9eaec5..bfeb9bb 100644
60210 --- a/include/linux/cpuset.h
60211 +++ b/include/linux/cpuset.h
60212 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
60213 * nodemask.
60214 */
60215 smp_mb();
60216 - --ACCESS_ONCE(current->mems_allowed_change_disable);
60217 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
60218 }
60219
60220 static inline void set_mems_allowed(nodemask_t nodemask)
60221 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
60222 index e5e468e..f079672 100644
60223 --- a/include/linux/crypto.h
60224 +++ b/include/linux/crypto.h
60225 @@ -361,7 +361,7 @@ struct cipher_tfm {
60226 const u8 *key, unsigned int keylen);
60227 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60228 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60229 -};
60230 +} __no_const;
60231
60232 struct hash_tfm {
60233 int (*init)(struct hash_desc *desc);
60234 @@ -382,13 +382,13 @@ struct compress_tfm {
60235 int (*cot_decompress)(struct crypto_tfm *tfm,
60236 const u8 *src, unsigned int slen,
60237 u8 *dst, unsigned int *dlen);
60238 -};
60239 +} __no_const;
60240
60241 struct rng_tfm {
60242 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
60243 unsigned int dlen);
60244 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
60245 -};
60246 +} __no_const;
60247
60248 #define crt_ablkcipher crt_u.ablkcipher
60249 #define crt_aead crt_u.aead
60250 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
60251 index 7925bf0..d5143d2 100644
60252 --- a/include/linux/decompress/mm.h
60253 +++ b/include/linux/decompress/mm.h
60254 @@ -77,7 +77,7 @@ static void free(void *where)
60255 * warnings when not needed (indeed large_malloc / large_free are not
60256 * needed by inflate */
60257
60258 -#define malloc(a) kmalloc(a, GFP_KERNEL)
60259 +#define malloc(a) kmalloc((a), GFP_KERNEL)
60260 #define free(a) kfree(a)
60261
60262 #define large_malloc(a) vmalloc(a)
60263 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
60264 index 347fdc3..cd01657 100644
60265 --- a/include/linux/dma-mapping.h
60266 +++ b/include/linux/dma-mapping.h
60267 @@ -42,7 +42,7 @@ struct dma_map_ops {
60268 int (*dma_supported)(struct device *dev, u64 mask);
60269 int (*set_dma_mask)(struct device *dev, u64 mask);
60270 int is_phys;
60271 -};
60272 +} __do_const;
60273
60274 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
60275
60276 diff --git a/include/linux/efi.h b/include/linux/efi.h
60277 index 2362a0b..cfaf8fcc 100644
60278 --- a/include/linux/efi.h
60279 +++ b/include/linux/efi.h
60280 @@ -446,7 +446,7 @@ struct efivar_operations {
60281 efi_get_variable_t *get_variable;
60282 efi_get_next_variable_t *get_next_variable;
60283 efi_set_variable_t *set_variable;
60284 -};
60285 +} __no_const;
60286
60287 struct efivars {
60288 /*
60289 diff --git a/include/linux/elf.h b/include/linux/elf.h
60290 index 110821c..cb14c08 100644
60291 --- a/include/linux/elf.h
60292 +++ b/include/linux/elf.h
60293 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
60294 #define PT_GNU_EH_FRAME 0x6474e550
60295
60296 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
60297 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
60298 +
60299 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
60300 +
60301 +/* Constants for the e_flags field */
60302 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60303 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
60304 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
60305 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
60306 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60307 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60308
60309 /*
60310 * Extended Numbering
60311 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
60312 #define DT_DEBUG 21
60313 #define DT_TEXTREL 22
60314 #define DT_JMPREL 23
60315 +#define DT_FLAGS 30
60316 + #define DF_TEXTREL 0x00000004
60317 #define DT_ENCODING 32
60318 #define OLD_DT_LOOS 0x60000000
60319 #define DT_LOOS 0x6000000d
60320 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
60321 #define PF_W 0x2
60322 #define PF_X 0x1
60323
60324 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
60325 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
60326 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
60327 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
60328 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
60329 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
60330 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
60331 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
60332 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
60333 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
60334 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
60335 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
60336 +
60337 typedef struct elf32_phdr{
60338 Elf32_Word p_type;
60339 Elf32_Off p_offset;
60340 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
60341 #define EI_OSABI 7
60342 #define EI_PAD 8
60343
60344 +#define EI_PAX 14
60345 +
60346 #define ELFMAG0 0x7f /* EI_MAG */
60347 #define ELFMAG1 'E'
60348 #define ELFMAG2 'L'
60349 @@ -422,6 +450,7 @@ extern Elf32_Dyn _DYNAMIC [];
60350 #define elf_note elf32_note
60351 #define elf_addr_t Elf32_Off
60352 #define Elf_Half Elf32_Half
60353 +#define elf_dyn Elf32_Dyn
60354
60355 #else
60356
60357 @@ -432,6 +461,7 @@ extern Elf64_Dyn _DYNAMIC [];
60358 #define elf_note elf64_note
60359 #define elf_addr_t Elf64_Off
60360 #define Elf_Half Elf64_Half
60361 +#define elf_dyn Elf64_Dyn
60362
60363 #endif
60364
60365 diff --git a/include/linux/filter.h b/include/linux/filter.h
60366 index 741956f..f02f482 100644
60367 --- a/include/linux/filter.h
60368 +++ b/include/linux/filter.h
60369 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
60370
60371 struct sk_buff;
60372 struct sock;
60373 +struct bpf_jit_work;
60374
60375 struct sk_filter
60376 {
60377 @@ -141,6 +142,9 @@ struct sk_filter
60378 unsigned int len; /* Number of filter blocks */
60379 unsigned int (*bpf_func)(const struct sk_buff *skb,
60380 const struct sock_filter *filter);
60381 +#ifdef CONFIG_BPF_JIT
60382 + struct bpf_jit_work *work;
60383 +#endif
60384 struct rcu_head rcu;
60385 struct sock_filter insns[0];
60386 };
60387 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
60388 index 84ccf8e..2e9b14c 100644
60389 --- a/include/linux/firewire.h
60390 +++ b/include/linux/firewire.h
60391 @@ -428,7 +428,7 @@ struct fw_iso_context {
60392 union {
60393 fw_iso_callback_t sc;
60394 fw_iso_mc_callback_t mc;
60395 - } callback;
60396 + } __no_const callback;
60397 void *callback_data;
60398 };
60399
60400 diff --git a/include/linux/fs.h b/include/linux/fs.h
60401 index 277f497..9be66a4 100644
60402 --- a/include/linux/fs.h
60403 +++ b/include/linux/fs.h
60404 @@ -1588,7 +1588,8 @@ struct file_operations {
60405 int (*setlease)(struct file *, long, struct file_lock **);
60406 long (*fallocate)(struct file *file, int mode, loff_t offset,
60407 loff_t len);
60408 -};
60409 +} __do_const;
60410 +typedef struct file_operations __no_const file_operations_no_const;
60411
60412 struct inode_operations {
60413 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
60414 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
60415 index 003dc0f..3c4ea97 100644
60416 --- a/include/linux/fs_struct.h
60417 +++ b/include/linux/fs_struct.h
60418 @@ -6,7 +6,7 @@
60419 #include <linux/seqlock.h>
60420
60421 struct fs_struct {
60422 - int users;
60423 + atomic_t users;
60424 spinlock_t lock;
60425 seqcount_t seq;
60426 int umask;
60427 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
60428 index af095b5..cf1220c 100644
60429 --- a/include/linux/fscache-cache.h
60430 +++ b/include/linux/fscache-cache.h
60431 @@ -102,7 +102,7 @@ struct fscache_operation {
60432 fscache_operation_release_t release;
60433 };
60434
60435 -extern atomic_t fscache_op_debug_id;
60436 +extern atomic_unchecked_t fscache_op_debug_id;
60437 extern void fscache_op_work_func(struct work_struct *work);
60438
60439 extern void fscache_enqueue_operation(struct fscache_operation *);
60440 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
60441 {
60442 INIT_WORK(&op->work, fscache_op_work_func);
60443 atomic_set(&op->usage, 1);
60444 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
60445 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
60446 op->processor = processor;
60447 op->release = release;
60448 INIT_LIST_HEAD(&op->pend_link);
60449 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
60450 index 2a53f10..0187fdf 100644
60451 --- a/include/linux/fsnotify.h
60452 +++ b/include/linux/fsnotify.h
60453 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
60454 */
60455 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
60456 {
60457 - return kstrdup(name, GFP_KERNEL);
60458 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
60459 }
60460
60461 /*
60462 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
60463 index 96efa67..1261547 100644
60464 --- a/include/linux/ftrace_event.h
60465 +++ b/include/linux/ftrace_event.h
60466 @@ -97,7 +97,7 @@ struct trace_event_functions {
60467 trace_print_func raw;
60468 trace_print_func hex;
60469 trace_print_func binary;
60470 -};
60471 +} __no_const;
60472
60473 struct trace_event {
60474 struct hlist_node node;
60475 @@ -252,7 +252,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
60476 extern int trace_add_event_call(struct ftrace_event_call *call);
60477 extern void trace_remove_event_call(struct ftrace_event_call *call);
60478
60479 -#define is_signed_type(type) (((type)(-1)) < 0)
60480 +#define is_signed_type(type) (((type)(-1)) < (type)1)
60481
60482 int trace_set_clr_event(const char *system, const char *event, int set);
60483
60484 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
60485 index 02fa469..a15f279 100644
60486 --- a/include/linux/genhd.h
60487 +++ b/include/linux/genhd.h
60488 @@ -184,7 +184,7 @@ struct gendisk {
60489 struct kobject *slave_dir;
60490
60491 struct timer_rand_state *random;
60492 - atomic_t sync_io; /* RAID */
60493 + atomic_unchecked_t sync_io; /* RAID */
60494 struct disk_events *ev;
60495 #ifdef CONFIG_BLK_DEV_INTEGRITY
60496 struct blk_integrity *integrity;
60497 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
60498 new file mode 100644
60499 index 0000000..0dc3943
60500 --- /dev/null
60501 +++ b/include/linux/gracl.h
60502 @@ -0,0 +1,317 @@
60503 +#ifndef GR_ACL_H
60504 +#define GR_ACL_H
60505 +
60506 +#include <linux/grdefs.h>
60507 +#include <linux/resource.h>
60508 +#include <linux/capability.h>
60509 +#include <linux/dcache.h>
60510 +#include <asm/resource.h>
60511 +
60512 +/* Major status information */
60513 +
60514 +#define GR_VERSION "grsecurity 2.2.2"
60515 +#define GRSECURITY_VERSION 0x2202
60516 +
60517 +enum {
60518 + GR_SHUTDOWN = 0,
60519 + GR_ENABLE = 1,
60520 + GR_SPROLE = 2,
60521 + GR_RELOAD = 3,
60522 + GR_SEGVMOD = 4,
60523 + GR_STATUS = 5,
60524 + GR_UNSPROLE = 6,
60525 + GR_PASSSET = 7,
60526 + GR_SPROLEPAM = 8,
60527 +};
60528 +
60529 +/* Password setup definitions
60530 + * kernel/grhash.c */
60531 +enum {
60532 + GR_PW_LEN = 128,
60533 + GR_SALT_LEN = 16,
60534 + GR_SHA_LEN = 32,
60535 +};
60536 +
60537 +enum {
60538 + GR_SPROLE_LEN = 64,
60539 +};
60540 +
60541 +enum {
60542 + GR_NO_GLOB = 0,
60543 + GR_REG_GLOB,
60544 + GR_CREATE_GLOB
60545 +};
60546 +
60547 +#define GR_NLIMITS 32
60548 +
60549 +/* Begin Data Structures */
60550 +
60551 +struct sprole_pw {
60552 + unsigned char *rolename;
60553 + unsigned char salt[GR_SALT_LEN];
60554 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
60555 +};
60556 +
60557 +struct name_entry {
60558 + __u32 key;
60559 + ino_t inode;
60560 + dev_t device;
60561 + char *name;
60562 + __u16 len;
60563 + __u8 deleted;
60564 + struct name_entry *prev;
60565 + struct name_entry *next;
60566 +};
60567 +
60568 +struct inodev_entry {
60569 + struct name_entry *nentry;
60570 + struct inodev_entry *prev;
60571 + struct inodev_entry *next;
60572 +};
60573 +
60574 +struct acl_role_db {
60575 + struct acl_role_label **r_hash;
60576 + __u32 r_size;
60577 +};
60578 +
60579 +struct inodev_db {
60580 + struct inodev_entry **i_hash;
60581 + __u32 i_size;
60582 +};
60583 +
60584 +struct name_db {
60585 + struct name_entry **n_hash;
60586 + __u32 n_size;
60587 +};
60588 +
60589 +struct crash_uid {
60590 + uid_t uid;
60591 + unsigned long expires;
60592 +};
60593 +
60594 +struct gr_hash_struct {
60595 + void **table;
60596 + void **nametable;
60597 + void *first;
60598 + __u32 table_size;
60599 + __u32 used_size;
60600 + int type;
60601 +};
60602 +
60603 +/* Userspace Grsecurity ACL data structures */
60604 +
60605 +struct acl_subject_label {
60606 + char *filename;
60607 + ino_t inode;
60608 + dev_t device;
60609 + __u32 mode;
60610 + kernel_cap_t cap_mask;
60611 + kernel_cap_t cap_lower;
60612 + kernel_cap_t cap_invert_audit;
60613 +
60614 + struct rlimit res[GR_NLIMITS];
60615 + __u32 resmask;
60616 +
60617 + __u8 user_trans_type;
60618 + __u8 group_trans_type;
60619 + uid_t *user_transitions;
60620 + gid_t *group_transitions;
60621 + __u16 user_trans_num;
60622 + __u16 group_trans_num;
60623 +
60624 + __u32 sock_families[2];
60625 + __u32 ip_proto[8];
60626 + __u32 ip_type;
60627 + struct acl_ip_label **ips;
60628 + __u32 ip_num;
60629 + __u32 inaddr_any_override;
60630 +
60631 + __u32 crashes;
60632 + unsigned long expires;
60633 +
60634 + struct acl_subject_label *parent_subject;
60635 + struct gr_hash_struct *hash;
60636 + struct acl_subject_label *prev;
60637 + struct acl_subject_label *next;
60638 +
60639 + struct acl_object_label **obj_hash;
60640 + __u32 obj_hash_size;
60641 + __u16 pax_flags;
60642 +};
60643 +
60644 +struct role_allowed_ip {
60645 + __u32 addr;
60646 + __u32 netmask;
60647 +
60648 + struct role_allowed_ip *prev;
60649 + struct role_allowed_ip *next;
60650 +};
60651 +
60652 +struct role_transition {
60653 + char *rolename;
60654 +
60655 + struct role_transition *prev;
60656 + struct role_transition *next;
60657 +};
60658 +
60659 +struct acl_role_label {
60660 + char *rolename;
60661 + uid_t uidgid;
60662 + __u16 roletype;
60663 +
60664 + __u16 auth_attempts;
60665 + unsigned long expires;
60666 +
60667 + struct acl_subject_label *root_label;
60668 + struct gr_hash_struct *hash;
60669 +
60670 + struct acl_role_label *prev;
60671 + struct acl_role_label *next;
60672 +
60673 + struct role_transition *transitions;
60674 + struct role_allowed_ip *allowed_ips;
60675 + uid_t *domain_children;
60676 + __u16 domain_child_num;
60677 +
60678 + struct acl_subject_label **subj_hash;
60679 + __u32 subj_hash_size;
60680 +};
60681 +
60682 +struct user_acl_role_db {
60683 + struct acl_role_label **r_table;
60684 + __u32 num_pointers; /* Number of allocations to track */
60685 + __u32 num_roles; /* Number of roles */
60686 + __u32 num_domain_children; /* Number of domain children */
60687 + __u32 num_subjects; /* Number of subjects */
60688 + __u32 num_objects; /* Number of objects */
60689 +};
60690 +
60691 +struct acl_object_label {
60692 + char *filename;
60693 + ino_t inode;
60694 + dev_t device;
60695 + __u32 mode;
60696 +
60697 + struct acl_subject_label *nested;
60698 + struct acl_object_label *globbed;
60699 +
60700 + /* next two structures not used */
60701 +
60702 + struct acl_object_label *prev;
60703 + struct acl_object_label *next;
60704 +};
60705 +
60706 +struct acl_ip_label {
60707 + char *iface;
60708 + __u32 addr;
60709 + __u32 netmask;
60710 + __u16 low, high;
60711 + __u8 mode;
60712 + __u32 type;
60713 + __u32 proto[8];
60714 +
60715 + /* next two structures not used */
60716 +
60717 + struct acl_ip_label *prev;
60718 + struct acl_ip_label *next;
60719 +};
60720 +
60721 +struct gr_arg {
60722 + struct user_acl_role_db role_db;
60723 + unsigned char pw[GR_PW_LEN];
60724 + unsigned char salt[GR_SALT_LEN];
60725 + unsigned char sum[GR_SHA_LEN];
60726 + unsigned char sp_role[GR_SPROLE_LEN];
60727 + struct sprole_pw *sprole_pws;
60728 + dev_t segv_device;
60729 + ino_t segv_inode;
60730 + uid_t segv_uid;
60731 + __u16 num_sprole_pws;
60732 + __u16 mode;
60733 +};
60734 +
60735 +struct gr_arg_wrapper {
60736 + struct gr_arg *arg;
60737 + __u32 version;
60738 + __u32 size;
60739 +};
60740 +
60741 +struct subject_map {
60742 + struct acl_subject_label *user;
60743 + struct acl_subject_label *kernel;
60744 + struct subject_map *prev;
60745 + struct subject_map *next;
60746 +};
60747 +
60748 +struct acl_subj_map_db {
60749 + struct subject_map **s_hash;
60750 + __u32 s_size;
60751 +};
60752 +
60753 +/* End Data Structures Section */
60754 +
60755 +/* Hash functions generated by empirical testing by Brad Spengler
60756 + Makes good use of the low bits of the inode. Generally 0-1 times
60757 + in loop for successful match. 0-3 for unsuccessful match.
60758 + Shift/add algorithm with modulus of table size and an XOR*/
60759 +
60760 +static __inline__ unsigned int
60761 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
60762 +{
60763 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
60764 +}
60765 +
60766 + static __inline__ unsigned int
60767 +shash(const struct acl_subject_label *userp, const unsigned int sz)
60768 +{
60769 + return ((const unsigned long)userp % sz);
60770 +}
60771 +
60772 +static __inline__ unsigned int
60773 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
60774 +{
60775 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
60776 +}
60777 +
60778 +static __inline__ unsigned int
60779 +nhash(const char *name, const __u16 len, const unsigned int sz)
60780 +{
60781 + return full_name_hash((const unsigned char *)name, len) % sz;
60782 +}
60783 +
60784 +#define FOR_EACH_ROLE_START(role) \
60785 + role = role_list; \
60786 + while (role) {
60787 +
60788 +#define FOR_EACH_ROLE_END(role) \
60789 + role = role->prev; \
60790 + }
60791 +
60792 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
60793 + subj = NULL; \
60794 + iter = 0; \
60795 + while (iter < role->subj_hash_size) { \
60796 + if (subj == NULL) \
60797 + subj = role->subj_hash[iter]; \
60798 + if (subj == NULL) { \
60799 + iter++; \
60800 + continue; \
60801 + }
60802 +
60803 +#define FOR_EACH_SUBJECT_END(subj,iter) \
60804 + subj = subj->next; \
60805 + if (subj == NULL) \
60806 + iter++; \
60807 + }
60808 +
60809 +
60810 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
60811 + subj = role->hash->first; \
60812 + while (subj != NULL) {
60813 +
60814 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
60815 + subj = subj->next; \
60816 + }
60817 +
60818 +#endif
60819 +
60820 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
60821 new file mode 100644
60822 index 0000000..323ecf2
60823 --- /dev/null
60824 +++ b/include/linux/gralloc.h
60825 @@ -0,0 +1,9 @@
60826 +#ifndef __GRALLOC_H
60827 +#define __GRALLOC_H
60828 +
60829 +void acl_free_all(void);
60830 +int acl_alloc_stack_init(unsigned long size);
60831 +void *acl_alloc(unsigned long len);
60832 +void *acl_alloc_num(unsigned long num, unsigned long len);
60833 +
60834 +#endif
60835 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
60836 new file mode 100644
60837 index 0000000..b30e9bc
60838 --- /dev/null
60839 +++ b/include/linux/grdefs.h
60840 @@ -0,0 +1,140 @@
60841 +#ifndef GRDEFS_H
60842 +#define GRDEFS_H
60843 +
60844 +/* Begin grsecurity status declarations */
60845 +
60846 +enum {
60847 + GR_READY = 0x01,
60848 + GR_STATUS_INIT = 0x00 // disabled state
60849 +};
60850 +
60851 +/* Begin ACL declarations */
60852 +
60853 +/* Role flags */
60854 +
60855 +enum {
60856 + GR_ROLE_USER = 0x0001,
60857 + GR_ROLE_GROUP = 0x0002,
60858 + GR_ROLE_DEFAULT = 0x0004,
60859 + GR_ROLE_SPECIAL = 0x0008,
60860 + GR_ROLE_AUTH = 0x0010,
60861 + GR_ROLE_NOPW = 0x0020,
60862 + GR_ROLE_GOD = 0x0040,
60863 + GR_ROLE_LEARN = 0x0080,
60864 + GR_ROLE_TPE = 0x0100,
60865 + GR_ROLE_DOMAIN = 0x0200,
60866 + GR_ROLE_PAM = 0x0400,
60867 + GR_ROLE_PERSIST = 0x0800
60868 +};
60869 +
60870 +/* ACL Subject and Object mode flags */
60871 +enum {
60872 + GR_DELETED = 0x80000000
60873 +};
60874 +
60875 +/* ACL Object-only mode flags */
60876 +enum {
60877 + GR_READ = 0x00000001,
60878 + GR_APPEND = 0x00000002,
60879 + GR_WRITE = 0x00000004,
60880 + GR_EXEC = 0x00000008,
60881 + GR_FIND = 0x00000010,
60882 + GR_INHERIT = 0x00000020,
60883 + GR_SETID = 0x00000040,
60884 + GR_CREATE = 0x00000080,
60885 + GR_DELETE = 0x00000100,
60886 + GR_LINK = 0x00000200,
60887 + GR_AUDIT_READ = 0x00000400,
60888 + GR_AUDIT_APPEND = 0x00000800,
60889 + GR_AUDIT_WRITE = 0x00001000,
60890 + GR_AUDIT_EXEC = 0x00002000,
60891 + GR_AUDIT_FIND = 0x00004000,
60892 + GR_AUDIT_INHERIT= 0x00008000,
60893 + GR_AUDIT_SETID = 0x00010000,
60894 + GR_AUDIT_CREATE = 0x00020000,
60895 + GR_AUDIT_DELETE = 0x00040000,
60896 + GR_AUDIT_LINK = 0x00080000,
60897 + GR_PTRACERD = 0x00100000,
60898 + GR_NOPTRACE = 0x00200000,
60899 + GR_SUPPRESS = 0x00400000,
60900 + GR_NOLEARN = 0x00800000,
60901 + GR_INIT_TRANSFER= 0x01000000
60902 +};
60903 +
60904 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
60905 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
60906 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
60907 +
60908 +/* ACL subject-only mode flags */
60909 +enum {
60910 + GR_KILL = 0x00000001,
60911 + GR_VIEW = 0x00000002,
60912 + GR_PROTECTED = 0x00000004,
60913 + GR_LEARN = 0x00000008,
60914 + GR_OVERRIDE = 0x00000010,
60915 + /* just a placeholder, this mode is only used in userspace */
60916 + GR_DUMMY = 0x00000020,
60917 + GR_PROTSHM = 0x00000040,
60918 + GR_KILLPROC = 0x00000080,
60919 + GR_KILLIPPROC = 0x00000100,
60920 + /* just a placeholder, this mode is only used in userspace */
60921 + GR_NOTROJAN = 0x00000200,
60922 + GR_PROTPROCFD = 0x00000400,
60923 + GR_PROCACCT = 0x00000800,
60924 + GR_RELAXPTRACE = 0x00001000,
60925 + GR_NESTED = 0x00002000,
60926 + GR_INHERITLEARN = 0x00004000,
60927 + GR_PROCFIND = 0x00008000,
60928 + GR_POVERRIDE = 0x00010000,
60929 + GR_KERNELAUTH = 0x00020000,
60930 + GR_ATSECURE = 0x00040000,
60931 + GR_SHMEXEC = 0x00080000
60932 +};
60933 +
60934 +enum {
60935 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
60936 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
60937 + GR_PAX_ENABLE_MPROTECT = 0x0004,
60938 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
60939 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
60940 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
60941 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
60942 + GR_PAX_DISABLE_MPROTECT = 0x0400,
60943 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
60944 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
60945 +};
60946 +
60947 +enum {
60948 + GR_ID_USER = 0x01,
60949 + GR_ID_GROUP = 0x02,
60950 +};
60951 +
60952 +enum {
60953 + GR_ID_ALLOW = 0x01,
60954 + GR_ID_DENY = 0x02,
60955 +};
60956 +
60957 +#define GR_CRASH_RES 31
60958 +#define GR_UIDTABLE_MAX 500
60959 +
60960 +/* begin resource learning section */
60961 +enum {
60962 + GR_RLIM_CPU_BUMP = 60,
60963 + GR_RLIM_FSIZE_BUMP = 50000,
60964 + GR_RLIM_DATA_BUMP = 10000,
60965 + GR_RLIM_STACK_BUMP = 1000,
60966 + GR_RLIM_CORE_BUMP = 10000,
60967 + GR_RLIM_RSS_BUMP = 500000,
60968 + GR_RLIM_NPROC_BUMP = 1,
60969 + GR_RLIM_NOFILE_BUMP = 5,
60970 + GR_RLIM_MEMLOCK_BUMP = 50000,
60971 + GR_RLIM_AS_BUMP = 500000,
60972 + GR_RLIM_LOCKS_BUMP = 2,
60973 + GR_RLIM_SIGPENDING_BUMP = 5,
60974 + GR_RLIM_MSGQUEUE_BUMP = 10000,
60975 + GR_RLIM_NICE_BUMP = 1,
60976 + GR_RLIM_RTPRIO_BUMP = 1,
60977 + GR_RLIM_RTTIME_BUMP = 1000000
60978 +};
60979 +
60980 +#endif
60981 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
60982 new file mode 100644
60983 index 0000000..60cda84
60984 --- /dev/null
60985 +++ b/include/linux/grinternal.h
60986 @@ -0,0 +1,220 @@
60987 +#ifndef __GRINTERNAL_H
60988 +#define __GRINTERNAL_H
60989 +
60990 +#ifdef CONFIG_GRKERNSEC
60991 +
60992 +#include <linux/fs.h>
60993 +#include <linux/mnt_namespace.h>
60994 +#include <linux/nsproxy.h>
60995 +#include <linux/gracl.h>
60996 +#include <linux/grdefs.h>
60997 +#include <linux/grmsg.h>
60998 +
60999 +void gr_add_learn_entry(const char *fmt, ...)
61000 + __attribute__ ((format (printf, 1, 2)));
61001 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
61002 + const struct vfsmount *mnt);
61003 +__u32 gr_check_create(const struct dentry *new_dentry,
61004 + const struct dentry *parent,
61005 + const struct vfsmount *mnt, const __u32 mode);
61006 +int gr_check_protected_task(const struct task_struct *task);
61007 +__u32 to_gr_audit(const __u32 reqmode);
61008 +int gr_set_acls(const int type);
61009 +int gr_apply_subject_to_task(struct task_struct *task);
61010 +int gr_acl_is_enabled(void);
61011 +char gr_roletype_to_char(void);
61012 +
61013 +void gr_handle_alertkill(struct task_struct *task);
61014 +char *gr_to_filename(const struct dentry *dentry,
61015 + const struct vfsmount *mnt);
61016 +char *gr_to_filename1(const struct dentry *dentry,
61017 + const struct vfsmount *mnt);
61018 +char *gr_to_filename2(const struct dentry *dentry,
61019 + const struct vfsmount *mnt);
61020 +char *gr_to_filename3(const struct dentry *dentry,
61021 + const struct vfsmount *mnt);
61022 +
61023 +extern int grsec_enable_harden_ptrace;
61024 +extern int grsec_enable_link;
61025 +extern int grsec_enable_fifo;
61026 +extern int grsec_enable_execve;
61027 +extern int grsec_enable_shm;
61028 +extern int grsec_enable_execlog;
61029 +extern int grsec_enable_signal;
61030 +extern int grsec_enable_audit_ptrace;
61031 +extern int grsec_enable_forkfail;
61032 +extern int grsec_enable_time;
61033 +extern int grsec_enable_rofs;
61034 +extern int grsec_enable_chroot_shmat;
61035 +extern int grsec_enable_chroot_mount;
61036 +extern int grsec_enable_chroot_double;
61037 +extern int grsec_enable_chroot_pivot;
61038 +extern int grsec_enable_chroot_chdir;
61039 +extern int grsec_enable_chroot_chmod;
61040 +extern int grsec_enable_chroot_mknod;
61041 +extern int grsec_enable_chroot_fchdir;
61042 +extern int grsec_enable_chroot_nice;
61043 +extern int grsec_enable_chroot_execlog;
61044 +extern int grsec_enable_chroot_caps;
61045 +extern int grsec_enable_chroot_sysctl;
61046 +extern int grsec_enable_chroot_unix;
61047 +extern int grsec_enable_tpe;
61048 +extern int grsec_tpe_gid;
61049 +extern int grsec_enable_tpe_all;
61050 +extern int grsec_enable_tpe_invert;
61051 +extern int grsec_enable_socket_all;
61052 +extern int grsec_socket_all_gid;
61053 +extern int grsec_enable_socket_client;
61054 +extern int grsec_socket_client_gid;
61055 +extern int grsec_enable_socket_server;
61056 +extern int grsec_socket_server_gid;
61057 +extern int grsec_audit_gid;
61058 +extern int grsec_enable_group;
61059 +extern int grsec_enable_audit_textrel;
61060 +extern int grsec_enable_log_rwxmaps;
61061 +extern int grsec_enable_mount;
61062 +extern int grsec_enable_chdir;
61063 +extern int grsec_resource_logging;
61064 +extern int grsec_enable_blackhole;
61065 +extern int grsec_lastack_retries;
61066 +extern int grsec_enable_brute;
61067 +extern int grsec_lock;
61068 +
61069 +extern spinlock_t grsec_alert_lock;
61070 +extern unsigned long grsec_alert_wtime;
61071 +extern unsigned long grsec_alert_fyet;
61072 +
61073 +extern spinlock_t grsec_audit_lock;
61074 +
61075 +extern rwlock_t grsec_exec_file_lock;
61076 +
61077 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
61078 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
61079 + (tsk)->exec_file->f_vfsmnt) : "/")
61080 +
61081 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
61082 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
61083 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
61084 +
61085 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
61086 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
61087 + (tsk)->exec_file->f_vfsmnt) : "/")
61088 +
61089 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
61090 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
61091 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
61092 +
61093 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
61094 +
61095 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
61096 +
61097 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
61098 + (task)->pid, (cred)->uid, \
61099 + (cred)->euid, (cred)->gid, (cred)->egid, \
61100 + gr_parent_task_fullpath(task), \
61101 + (task)->real_parent->comm, (task)->real_parent->pid, \
61102 + (pcred)->uid, (pcred)->euid, \
61103 + (pcred)->gid, (pcred)->egid
61104 +
61105 +#define GR_CHROOT_CAPS {{ \
61106 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
61107 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
61108 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
61109 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
61110 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
61111 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
61112 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
61113 +
61114 +#define security_learn(normal_msg,args...) \
61115 +({ \
61116 + read_lock(&grsec_exec_file_lock); \
61117 + gr_add_learn_entry(normal_msg "\n", ## args); \
61118 + read_unlock(&grsec_exec_file_lock); \
61119 +})
61120 +
61121 +enum {
61122 + GR_DO_AUDIT,
61123 + GR_DONT_AUDIT,
61124 + /* used for non-audit messages that we shouldn't kill the task on */
61125 + GR_DONT_AUDIT_GOOD
61126 +};
61127 +
61128 +enum {
61129 + GR_TTYSNIFF,
61130 + GR_RBAC,
61131 + GR_RBAC_STR,
61132 + GR_STR_RBAC,
61133 + GR_RBAC_MODE2,
61134 + GR_RBAC_MODE3,
61135 + GR_FILENAME,
61136 + GR_SYSCTL_HIDDEN,
61137 + GR_NOARGS,
61138 + GR_ONE_INT,
61139 + GR_ONE_INT_TWO_STR,
61140 + GR_ONE_STR,
61141 + GR_STR_INT,
61142 + GR_TWO_STR_INT,
61143 + GR_TWO_INT,
61144 + GR_TWO_U64,
61145 + GR_THREE_INT,
61146 + GR_FIVE_INT_TWO_STR,
61147 + GR_TWO_STR,
61148 + GR_THREE_STR,
61149 + GR_FOUR_STR,
61150 + GR_STR_FILENAME,
61151 + GR_FILENAME_STR,
61152 + GR_FILENAME_TWO_INT,
61153 + GR_FILENAME_TWO_INT_STR,
61154 + GR_TEXTREL,
61155 + GR_PTRACE,
61156 + GR_RESOURCE,
61157 + GR_CAP,
61158 + GR_SIG,
61159 + GR_SIG2,
61160 + GR_CRASH1,
61161 + GR_CRASH2,
61162 + GR_PSACCT,
61163 + GR_RWXMAP
61164 +};
61165 +
61166 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
61167 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
61168 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
61169 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
61170 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
61171 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
61172 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
61173 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
61174 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
61175 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
61176 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
61177 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
61178 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
61179 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
61180 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
61181 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
61182 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
61183 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
61184 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
61185 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
61186 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
61187 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
61188 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
61189 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
61190 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
61191 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
61192 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
61193 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
61194 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
61195 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
61196 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
61197 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
61198 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
61199 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
61200 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
61201 +
61202 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
61203 +
61204 +#endif
61205 +
61206 +#endif
61207 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
61208 new file mode 100644
61209 index 0000000..9d5fd4a
61210 --- /dev/null
61211 +++ b/include/linux/grmsg.h
61212 @@ -0,0 +1,108 @@
61213 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
61214 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
61215 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
61216 +#define GR_STOPMOD_MSG "denied modification of module state by "
61217 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
61218 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
61219 +#define GR_IOPERM_MSG "denied use of ioperm() by "
61220 +#define GR_IOPL_MSG "denied use of iopl() by "
61221 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
61222 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
61223 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
61224 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
61225 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
61226 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
61227 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
61228 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
61229 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
61230 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
61231 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
61232 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
61233 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
61234 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
61235 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
61236 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
61237 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
61238 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
61239 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
61240 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
61241 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
61242 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
61243 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
61244 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
61245 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
61246 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
61247 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
61248 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
61249 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
61250 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
61251 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
61252 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
61253 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
61254 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
61255 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
61256 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
61257 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
61258 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
61259 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
61260 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
61261 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
61262 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
61263 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
61264 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
61265 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
61266 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
61267 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
61268 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
61269 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
61270 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
61271 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
61272 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
61273 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
61274 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
61275 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
61276 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
61277 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
61278 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
61279 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
61280 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
61281 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
61282 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
61283 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
61284 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
61285 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
61286 +#define GR_NICE_CHROOT_MSG "denied priority change by "
61287 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
61288 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
61289 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
61290 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
61291 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
61292 +#define GR_TIME_MSG "time set by "
61293 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
61294 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
61295 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
61296 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
61297 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
61298 +#define GR_BIND_MSG "denied bind() by "
61299 +#define GR_CONNECT_MSG "denied connect() by "
61300 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
61301 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
61302 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
61303 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
61304 +#define GR_CAP_ACL_MSG "use of %s denied for "
61305 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
61306 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
61307 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
61308 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
61309 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
61310 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
61311 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
61312 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
61313 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
61314 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
61315 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
61316 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
61317 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
61318 +#define GR_VM86_MSG "denied use of vm86 by "
61319 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
61320 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
61321 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
61322 new file mode 100644
61323 index 0000000..bd25f72
61324 --- /dev/null
61325 +++ b/include/linux/grsecurity.h
61326 @@ -0,0 +1,228 @@
61327 +#ifndef GR_SECURITY_H
61328 +#define GR_SECURITY_H
61329 +#include <linux/fs.h>
61330 +#include <linux/fs_struct.h>
61331 +#include <linux/binfmts.h>
61332 +#include <linux/gracl.h>
61333 +
61334 +/* notify of brain-dead configs */
61335 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61336 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
61337 +#endif
61338 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
61339 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
61340 +#endif
61341 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
61342 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
61343 +#endif
61344 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
61345 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
61346 +#endif
61347 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
61348 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
61349 +#endif
61350 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
61351 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
61352 +#endif
61353 +
61354 +#include <linux/compat.h>
61355 +
61356 +struct user_arg_ptr {
61357 +#ifdef CONFIG_COMPAT
61358 + bool is_compat;
61359 +#endif
61360 + union {
61361 + const char __user *const __user *native;
61362 +#ifdef CONFIG_COMPAT
61363 + compat_uptr_t __user *compat;
61364 +#endif
61365 + } ptr;
61366 +};
61367 +
61368 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
61369 +void gr_handle_brute_check(void);
61370 +void gr_handle_kernel_exploit(void);
61371 +int gr_process_user_ban(void);
61372 +
61373 +char gr_roletype_to_char(void);
61374 +
61375 +int gr_acl_enable_at_secure(void);
61376 +
61377 +int gr_check_user_change(int real, int effective, int fs);
61378 +int gr_check_group_change(int real, int effective, int fs);
61379 +
61380 +void gr_del_task_from_ip_table(struct task_struct *p);
61381 +
61382 +int gr_pid_is_chrooted(struct task_struct *p);
61383 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
61384 +int gr_handle_chroot_nice(void);
61385 +int gr_handle_chroot_sysctl(const int op);
61386 +int gr_handle_chroot_setpriority(struct task_struct *p,
61387 + const int niceval);
61388 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
61389 +int gr_handle_chroot_chroot(const struct dentry *dentry,
61390 + const struct vfsmount *mnt);
61391 +void gr_handle_chroot_chdir(struct path *path);
61392 +int gr_handle_chroot_chmod(const struct dentry *dentry,
61393 + const struct vfsmount *mnt, const int mode);
61394 +int gr_handle_chroot_mknod(const struct dentry *dentry,
61395 + const struct vfsmount *mnt, const int mode);
61396 +int gr_handle_chroot_mount(const struct dentry *dentry,
61397 + const struct vfsmount *mnt,
61398 + const char *dev_name);
61399 +int gr_handle_chroot_pivot(void);
61400 +int gr_handle_chroot_unix(const pid_t pid);
61401 +
61402 +int gr_handle_rawio(const struct inode *inode);
61403 +
61404 +void gr_handle_ioperm(void);
61405 +void gr_handle_iopl(void);
61406 +
61407 +int gr_tpe_allow(const struct file *file);
61408 +
61409 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
61410 +void gr_clear_chroot_entries(struct task_struct *task);
61411 +
61412 +void gr_log_forkfail(const int retval);
61413 +void gr_log_timechange(void);
61414 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
61415 +void gr_log_chdir(const struct dentry *dentry,
61416 + const struct vfsmount *mnt);
61417 +void gr_log_chroot_exec(const struct dentry *dentry,
61418 + const struct vfsmount *mnt);
61419 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
61420 +void gr_log_remount(const char *devname, const int retval);
61421 +void gr_log_unmount(const char *devname, const int retval);
61422 +void gr_log_mount(const char *from, const char *to, const int retval);
61423 +void gr_log_textrel(struct vm_area_struct *vma);
61424 +void gr_log_rwxmmap(struct file *file);
61425 +void gr_log_rwxmprotect(struct file *file);
61426 +
61427 +int gr_handle_follow_link(const struct inode *parent,
61428 + const struct inode *inode,
61429 + const struct dentry *dentry,
61430 + const struct vfsmount *mnt);
61431 +int gr_handle_fifo(const struct dentry *dentry,
61432 + const struct vfsmount *mnt,
61433 + const struct dentry *dir, const int flag,
61434 + const int acc_mode);
61435 +int gr_handle_hardlink(const struct dentry *dentry,
61436 + const struct vfsmount *mnt,
61437 + struct inode *inode,
61438 + const int mode, const char *to);
61439 +
61440 +int gr_is_capable(const int cap);
61441 +int gr_is_capable_nolog(const int cap);
61442 +void gr_learn_resource(const struct task_struct *task, const int limit,
61443 + const unsigned long wanted, const int gt);
61444 +void gr_copy_label(struct task_struct *tsk);
61445 +void gr_handle_crash(struct task_struct *task, const int sig);
61446 +int gr_handle_signal(const struct task_struct *p, const int sig);
61447 +int gr_check_crash_uid(const uid_t uid);
61448 +int gr_check_protected_task(const struct task_struct *task);
61449 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
61450 +int gr_acl_handle_mmap(const struct file *file,
61451 + const unsigned long prot);
61452 +int gr_acl_handle_mprotect(const struct file *file,
61453 + const unsigned long prot);
61454 +int gr_check_hidden_task(const struct task_struct *tsk);
61455 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
61456 + const struct vfsmount *mnt);
61457 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
61458 + const struct vfsmount *mnt);
61459 +__u32 gr_acl_handle_access(const struct dentry *dentry,
61460 + const struct vfsmount *mnt, const int fmode);
61461 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
61462 + const struct vfsmount *mnt, mode_t mode);
61463 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
61464 + const struct vfsmount *mnt, mode_t mode);
61465 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
61466 + const struct vfsmount *mnt);
61467 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
61468 + const struct vfsmount *mnt);
61469 +int gr_handle_ptrace(struct task_struct *task, const long request);
61470 +int gr_handle_proc_ptrace(struct task_struct *task);
61471 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
61472 + const struct vfsmount *mnt);
61473 +int gr_check_crash_exec(const struct file *filp);
61474 +int gr_acl_is_enabled(void);
61475 +void gr_set_kernel_label(struct task_struct *task);
61476 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
61477 + const gid_t gid);
61478 +int gr_set_proc_label(const struct dentry *dentry,
61479 + const struct vfsmount *mnt,
61480 + const int unsafe_share);
61481 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
61482 + const struct vfsmount *mnt);
61483 +__u32 gr_acl_handle_open(const struct dentry *dentry,
61484 + const struct vfsmount *mnt, int acc_mode);
61485 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
61486 + const struct dentry *p_dentry,
61487 + const struct vfsmount *p_mnt,
61488 + int open_flags, int acc_mode, const int imode);
61489 +void gr_handle_create(const struct dentry *dentry,
61490 + const struct vfsmount *mnt);
61491 +void gr_handle_proc_create(const struct dentry *dentry,
61492 + const struct inode *inode);
61493 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
61494 + const struct dentry *parent_dentry,
61495 + const struct vfsmount *parent_mnt,
61496 + const int mode);
61497 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
61498 + const struct dentry *parent_dentry,
61499 + const struct vfsmount *parent_mnt);
61500 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
61501 + const struct vfsmount *mnt);
61502 +void gr_handle_delete(const ino_t ino, const dev_t dev);
61503 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
61504 + const struct vfsmount *mnt);
61505 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
61506 + const struct dentry *parent_dentry,
61507 + const struct vfsmount *parent_mnt,
61508 + const char *from);
61509 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
61510 + const struct dentry *parent_dentry,
61511 + const struct vfsmount *parent_mnt,
61512 + const struct dentry *old_dentry,
61513 + const struct vfsmount *old_mnt, const char *to);
61514 +int gr_acl_handle_rename(struct dentry *new_dentry,
61515 + struct dentry *parent_dentry,
61516 + const struct vfsmount *parent_mnt,
61517 + struct dentry *old_dentry,
61518 + struct inode *old_parent_inode,
61519 + struct vfsmount *old_mnt, const char *newname);
61520 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61521 + struct dentry *old_dentry,
61522 + struct dentry *new_dentry,
61523 + struct vfsmount *mnt, const __u8 replace);
61524 +__u32 gr_check_link(const struct dentry *new_dentry,
61525 + const struct dentry *parent_dentry,
61526 + const struct vfsmount *parent_mnt,
61527 + const struct dentry *old_dentry,
61528 + const struct vfsmount *old_mnt);
61529 +int gr_acl_handle_filldir(const struct file *file, const char *name,
61530 + const unsigned int namelen, const ino_t ino);
61531 +
61532 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
61533 + const struct vfsmount *mnt);
61534 +void gr_acl_handle_exit(void);
61535 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
61536 +int gr_acl_handle_procpidmem(const struct task_struct *task);
61537 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
61538 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
61539 +void gr_audit_ptrace(struct task_struct *task);
61540 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
61541 +
61542 +#ifdef CONFIG_GRKERNSEC
61543 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
61544 +void gr_handle_vm86(void);
61545 +void gr_handle_mem_readwrite(u64 from, u64 to);
61546 +
61547 +extern int grsec_enable_dmesg;
61548 +extern int grsec_disable_privio;
61549 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61550 +extern int grsec_enable_chroot_findtask;
61551 +#endif
61552 +#endif
61553 +
61554 +#endif
61555 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
61556 new file mode 100644
61557 index 0000000..e7ffaaf
61558 --- /dev/null
61559 +++ b/include/linux/grsock.h
61560 @@ -0,0 +1,19 @@
61561 +#ifndef __GRSOCK_H
61562 +#define __GRSOCK_H
61563 +
61564 +extern void gr_attach_curr_ip(const struct sock *sk);
61565 +extern int gr_handle_sock_all(const int family, const int type,
61566 + const int protocol);
61567 +extern int gr_handle_sock_server(const struct sockaddr *sck);
61568 +extern int gr_handle_sock_server_other(const struct sock *sck);
61569 +extern int gr_handle_sock_client(const struct sockaddr *sck);
61570 +extern int gr_search_connect(struct socket * sock,
61571 + struct sockaddr_in * addr);
61572 +extern int gr_search_bind(struct socket * sock,
61573 + struct sockaddr_in * addr);
61574 +extern int gr_search_listen(struct socket * sock);
61575 +extern int gr_search_accept(struct socket * sock);
61576 +extern int gr_search_socket(const int domain, const int type,
61577 + const int protocol);
61578 +
61579 +#endif
61580 diff --git a/include/linux/hid.h b/include/linux/hid.h
61581 index 9cf8e7a..5ec94d0 100644
61582 --- a/include/linux/hid.h
61583 +++ b/include/linux/hid.h
61584 @@ -676,7 +676,7 @@ struct hid_ll_driver {
61585 unsigned int code, int value);
61586
61587 int (*parse)(struct hid_device *hdev);
61588 -};
61589 +} __no_const;
61590
61591 #define PM_HINT_FULLON 1<<5
61592 #define PM_HINT_NORMAL 1<<1
61593 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
61594 index 3a93f73..b19d0b3 100644
61595 --- a/include/linux/highmem.h
61596 +++ b/include/linux/highmem.h
61597 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
61598 kunmap_atomic(kaddr, KM_USER0);
61599 }
61600
61601 +static inline void sanitize_highpage(struct page *page)
61602 +{
61603 + void *kaddr;
61604 + unsigned long flags;
61605 +
61606 + local_irq_save(flags);
61607 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
61608 + clear_page(kaddr);
61609 + kunmap_atomic(kaddr, KM_CLEARPAGE);
61610 + local_irq_restore(flags);
61611 +}
61612 +
61613 static inline void zero_user_segments(struct page *page,
61614 unsigned start1, unsigned end1,
61615 unsigned start2, unsigned end2)
61616 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
61617 index a6c652e..1f5878f 100644
61618 --- a/include/linux/i2c.h
61619 +++ b/include/linux/i2c.h
61620 @@ -346,6 +346,7 @@ struct i2c_algorithm {
61621 /* To determine what the adapter supports */
61622 u32 (*functionality) (struct i2c_adapter *);
61623 };
61624 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
61625
61626 /*
61627 * i2c_adapter is the structure used to identify a physical i2c bus along
61628 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
61629 index a6deef4..c56a7f2 100644
61630 --- a/include/linux/i2o.h
61631 +++ b/include/linux/i2o.h
61632 @@ -564,7 +564,7 @@ struct i2o_controller {
61633 struct i2o_device *exec; /* Executive */
61634 #if BITS_PER_LONG == 64
61635 spinlock_t context_list_lock; /* lock for context_list */
61636 - atomic_t context_list_counter; /* needed for unique contexts */
61637 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
61638 struct list_head context_list; /* list of context id's
61639 and pointers */
61640 #endif
61641 diff --git a/include/linux/init.h b/include/linux/init.h
61642 index 9146f39..885354d 100644
61643 --- a/include/linux/init.h
61644 +++ b/include/linux/init.h
61645 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cmdline);
61646
61647 /* Each module must use one module_init(). */
61648 #define module_init(initfn) \
61649 - static inline initcall_t __inittest(void) \
61650 + static inline __used initcall_t __inittest(void) \
61651 { return initfn; } \
61652 int init_module(void) __attribute__((alias(#initfn)));
61653
61654 /* This is only required if you want to be unloadable. */
61655 #define module_exit(exitfn) \
61656 - static inline exitcall_t __exittest(void) \
61657 + static inline __used exitcall_t __exittest(void) \
61658 { return exitfn; } \
61659 void cleanup_module(void) __attribute__((alias(#exitfn)));
61660
61661 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
61662 index d14e058..4162929 100644
61663 --- a/include/linux/init_task.h
61664 +++ b/include/linux/init_task.h
61665 @@ -126,6 +126,12 @@ extern struct cred init_cred;
61666 # define INIT_PERF_EVENTS(tsk)
61667 #endif
61668
61669 +#ifdef CONFIG_X86
61670 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
61671 +#else
61672 +#define INIT_TASK_THREAD_INFO
61673 +#endif
61674 +
61675 /*
61676 * INIT_TASK is used to set up the first task table, touch at
61677 * your own risk!. Base=0, limit=0x1fffff (=2MB)
61678 @@ -164,6 +170,7 @@ extern struct cred init_cred;
61679 RCU_INIT_POINTER(.cred, &init_cred), \
61680 .comm = "swapper", \
61681 .thread = INIT_THREAD, \
61682 + INIT_TASK_THREAD_INFO \
61683 .fs = &init_fs, \
61684 .files = &init_files, \
61685 .signal = &init_signals, \
61686 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
61687 index 9310c69..6ebb244 100644
61688 --- a/include/linux/intel-iommu.h
61689 +++ b/include/linux/intel-iommu.h
61690 @@ -296,7 +296,7 @@ struct iommu_flush {
61691 u8 fm, u64 type);
61692 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
61693 unsigned int size_order, u64 type);
61694 -};
61695 +} __no_const;
61696
61697 enum {
61698 SR_DMAR_FECTL_REG,
61699 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
61700 index f51a81b..adfcb44 100644
61701 --- a/include/linux/interrupt.h
61702 +++ b/include/linux/interrupt.h
61703 @@ -425,7 +425,7 @@ enum
61704 /* map softirq index to softirq name. update 'softirq_to_name' in
61705 * kernel/softirq.c when adding a new softirq.
61706 */
61707 -extern char *softirq_to_name[NR_SOFTIRQS];
61708 +extern const char * const softirq_to_name[NR_SOFTIRQS];
61709
61710 /* softirq mask and active fields moved to irq_cpustat_t in
61711 * asm/hardirq.h to get better cache usage. KAO
61712 @@ -433,12 +433,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
61713
61714 struct softirq_action
61715 {
61716 - void (*action)(struct softirq_action *);
61717 + void (*action)(void);
61718 };
61719
61720 asmlinkage void do_softirq(void);
61721 asmlinkage void __do_softirq(void);
61722 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
61723 +extern void open_softirq(int nr, void (*action)(void));
61724 extern void softirq_init(void);
61725 static inline void __raise_softirq_irqoff(unsigned int nr)
61726 {
61727 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
61728 index 0df513b..fe901a2 100644
61729 --- a/include/linux/kallsyms.h
61730 +++ b/include/linux/kallsyms.h
61731 @@ -15,7 +15,8 @@
61732
61733 struct module;
61734
61735 -#ifdef CONFIG_KALLSYMS
61736 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
61737 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
61738 /* Lookup the address for a symbol. Returns 0 if not found. */
61739 unsigned long kallsyms_lookup_name(const char *name);
61740
61741 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
61742 /* Stupid that this does nothing, but I didn't create this mess. */
61743 #define __print_symbol(fmt, addr)
61744 #endif /*CONFIG_KALLSYMS*/
61745 +#else /* when included by kallsyms.c, vsnprintf.c, or
61746 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
61747 +extern void __print_symbol(const char *fmt, unsigned long address);
61748 +extern int sprint_backtrace(char *buffer, unsigned long address);
61749 +extern int sprint_symbol(char *buffer, unsigned long address);
61750 +const char *kallsyms_lookup(unsigned long addr,
61751 + unsigned long *symbolsize,
61752 + unsigned long *offset,
61753 + char **modname, char *namebuf);
61754 +#endif
61755
61756 /* This macro allows us to keep printk typechecking */
61757 static void __check_printsym_format(const char *fmt, ...)
61758 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
61759 index fa39183..40160be 100644
61760 --- a/include/linux/kgdb.h
61761 +++ b/include/linux/kgdb.h
61762 @@ -53,7 +53,7 @@ extern int kgdb_connected;
61763 extern int kgdb_io_module_registered;
61764
61765 extern atomic_t kgdb_setting_breakpoint;
61766 -extern atomic_t kgdb_cpu_doing_single_step;
61767 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
61768
61769 extern struct task_struct *kgdb_usethread;
61770 extern struct task_struct *kgdb_contthread;
61771 @@ -251,7 +251,7 @@ struct kgdb_arch {
61772 void (*disable_hw_break)(struct pt_regs *regs);
61773 void (*remove_all_hw_break)(void);
61774 void (*correct_hw_break)(void);
61775 -};
61776 +} __do_const;
61777
61778 /**
61779 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
61780 @@ -276,7 +276,7 @@ struct kgdb_io {
61781 void (*pre_exception) (void);
61782 void (*post_exception) (void);
61783 int is_console;
61784 -};
61785 +} __do_const;
61786
61787 extern struct kgdb_arch arch_kgdb_ops;
61788
61789 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
61790 index 0da38cf..d23f05f 100644
61791 --- a/include/linux/kmod.h
61792 +++ b/include/linux/kmod.h
61793 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
61794 * usually useless though. */
61795 extern int __request_module(bool wait, const char *name, ...) \
61796 __attribute__((format(printf, 2, 3)));
61797 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
61798 + __attribute__((format(printf, 3, 4)));
61799 #define request_module(mod...) __request_module(true, mod)
61800 #define request_module_nowait(mod...) __request_module(false, mod)
61801 #define try_then_request_module(x, mod...) \
61802 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
61803 index eabb21a..3f030f4 100644
61804 --- a/include/linux/kvm_host.h
61805 +++ b/include/linux/kvm_host.h
61806 @@ -308,7 +308,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
61807 void vcpu_load(struct kvm_vcpu *vcpu);
61808 void vcpu_put(struct kvm_vcpu *vcpu);
61809
61810 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61811 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61812 struct module *module);
61813 void kvm_exit(void);
61814
61815 @@ -454,7 +454,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
61816 struct kvm_guest_debug *dbg);
61817 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
61818
61819 -int kvm_arch_init(void *opaque);
61820 +int kvm_arch_init(const void *opaque);
61821 void kvm_arch_exit(void);
61822
61823 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
61824 diff --git a/include/linux/libata.h b/include/linux/libata.h
61825 index efd6f98..5f5fd37 100644
61826 --- a/include/linux/libata.h
61827 +++ b/include/linux/libata.h
61828 @@ -909,7 +909,7 @@ struct ata_port_operations {
61829 * fields must be pointers.
61830 */
61831 const struct ata_port_operations *inherits;
61832 -};
61833 +} __do_const;
61834
61835 struct ata_port_info {
61836 unsigned long flags;
61837 diff --git a/include/linux/mca.h b/include/linux/mca.h
61838 index 3797270..7765ede 100644
61839 --- a/include/linux/mca.h
61840 +++ b/include/linux/mca.h
61841 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
61842 int region);
61843 void * (*mca_transform_memory)(struct mca_device *,
61844 void *memory);
61845 -};
61846 +} __no_const;
61847
61848 struct mca_bus {
61849 u64 default_dma_mask;
61850 diff --git a/include/linux/memory.h b/include/linux/memory.h
61851 index 935699b..11042cc 100644
61852 --- a/include/linux/memory.h
61853 +++ b/include/linux/memory.h
61854 @@ -144,7 +144,7 @@ struct memory_accessor {
61855 size_t count);
61856 ssize_t (*write)(struct memory_accessor *, const char *buf,
61857 off_t offset, size_t count);
61858 -};
61859 +} __no_const;
61860
61861 /*
61862 * Kernel text modification mutex, used for code patching. Users of this lock
61863 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
61864 index 896b5e4..1159ad0 100644
61865 --- a/include/linux/mfd/abx500.h
61866 +++ b/include/linux/mfd/abx500.h
61867 @@ -234,6 +234,7 @@ struct abx500_ops {
61868 int (*event_registers_startup_state_get) (struct device *, u8 *);
61869 int (*startup_irq_enabled) (struct device *, unsigned int);
61870 };
61871 +typedef struct abx500_ops __no_const abx500_ops_no_const;
61872
61873 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
61874 void abx500_remove_ops(struct device *dev);
61875 diff --git a/include/linux/mm.h b/include/linux/mm.h
61876 index fedc5f0..7cedb6d 100644
61877 --- a/include/linux/mm.h
61878 +++ b/include/linux/mm.h
61879 @@ -114,7 +114,14 @@ extern unsigned int kobjsize(const void *objp);
61880
61881 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
61882 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
61883 +
61884 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61885 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
61886 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
61887 +#else
61888 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
61889 +#endif
61890 +
61891 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
61892 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
61893
61894 @@ -1011,34 +1018,6 @@ int set_page_dirty(struct page *page);
61895 int set_page_dirty_lock(struct page *page);
61896 int clear_page_dirty_for_io(struct page *page);
61897
61898 -/* Is the vma a continuation of the stack vma above it? */
61899 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
61900 -{
61901 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
61902 -}
61903 -
61904 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
61905 - unsigned long addr)
61906 -{
61907 - return (vma->vm_flags & VM_GROWSDOWN) &&
61908 - (vma->vm_start == addr) &&
61909 - !vma_growsdown(vma->vm_prev, addr);
61910 -}
61911 -
61912 -/* Is the vma a continuation of the stack vma below it? */
61913 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
61914 -{
61915 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
61916 -}
61917 -
61918 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
61919 - unsigned long addr)
61920 -{
61921 - return (vma->vm_flags & VM_GROWSUP) &&
61922 - (vma->vm_end == addr) &&
61923 - !vma_growsup(vma->vm_next, addr);
61924 -}
61925 -
61926 extern unsigned long move_page_tables(struct vm_area_struct *vma,
61927 unsigned long old_addr, struct vm_area_struct *new_vma,
61928 unsigned long new_addr, unsigned long len);
61929 @@ -1133,6 +1112,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
61930 }
61931 #endif
61932
61933 +#ifdef CONFIG_MMU
61934 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
61935 +#else
61936 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
61937 +{
61938 + return __pgprot(0);
61939 +}
61940 +#endif
61941 +
61942 int vma_wants_writenotify(struct vm_area_struct *vma);
61943
61944 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
61945 @@ -1417,6 +1405,7 @@ out:
61946 }
61947
61948 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
61949 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
61950
61951 extern unsigned long do_brk(unsigned long, unsigned long);
61952
61953 @@ -1474,6 +1463,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
61954 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
61955 struct vm_area_struct **pprev);
61956
61957 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
61958 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
61959 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
61960 +
61961 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
61962 NULL if none. Assume start_addr < end_addr. */
61963 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
61964 @@ -1490,15 +1483,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
61965 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
61966 }
61967
61968 -#ifdef CONFIG_MMU
61969 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
61970 -#else
61971 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
61972 -{
61973 - return __pgprot(0);
61974 -}
61975 -#endif
61976 -
61977 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
61978 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
61979 unsigned long pfn, unsigned long size, pgprot_t);
61980 @@ -1612,7 +1596,7 @@ extern int unpoison_memory(unsigned long pfn);
61981 extern int sysctl_memory_failure_early_kill;
61982 extern int sysctl_memory_failure_recovery;
61983 extern void shake_page(struct page *p, int access);
61984 -extern atomic_long_t mce_bad_pages;
61985 +extern atomic_long_unchecked_t mce_bad_pages;
61986 extern int soft_offline_page(struct page *page, int flags);
61987
61988 extern void dump_page(struct page *page);
61989 @@ -1626,5 +1610,11 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
61990 unsigned int pages_per_huge_page);
61991 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
61992
61993 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
61994 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
61995 +#else
61996 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
61997 +#endif
61998 +
61999 #endif /* __KERNEL__ */
62000 #endif /* _LINUX_MM_H */
62001 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
62002 index 10a2f62..c8fa287 100644
62003 --- a/include/linux/mm_types.h
62004 +++ b/include/linux/mm_types.h
62005 @@ -230,6 +230,8 @@ struct vm_area_struct {
62006 #ifdef CONFIG_NUMA
62007 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
62008 #endif
62009 +
62010 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
62011 };
62012
62013 struct core_thread {
62014 @@ -362,6 +364,24 @@ struct mm_struct {
62015 #ifdef CONFIG_CPUMASK_OFFSTACK
62016 struct cpumask cpumask_allocation;
62017 #endif
62018 +
62019 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62020 + unsigned long pax_flags;
62021 +#endif
62022 +
62023 +#ifdef CONFIG_PAX_DLRESOLVE
62024 + unsigned long call_dl_resolve;
62025 +#endif
62026 +
62027 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
62028 + unsigned long call_syscall;
62029 +#endif
62030 +
62031 +#ifdef CONFIG_PAX_ASLR
62032 + unsigned long delta_mmap; /* randomized offset */
62033 + unsigned long delta_stack; /* randomized offset */
62034 +#endif
62035 +
62036 };
62037
62038 static inline void mm_init_cpumask(struct mm_struct *mm)
62039 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
62040 index 1d1b1e1..2a13c78 100644
62041 --- a/include/linux/mmu_notifier.h
62042 +++ b/include/linux/mmu_notifier.h
62043 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
62044 */
62045 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
62046 ({ \
62047 - pte_t __pte; \
62048 + pte_t ___pte; \
62049 struct vm_area_struct *___vma = __vma; \
62050 unsigned long ___address = __address; \
62051 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
62052 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
62053 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
62054 - __pte; \
62055 + ___pte; \
62056 })
62057
62058 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
62059 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
62060 index be1ac8d..26868ce 100644
62061 --- a/include/linux/mmzone.h
62062 +++ b/include/linux/mmzone.h
62063 @@ -356,7 +356,7 @@ struct zone {
62064 unsigned long flags; /* zone flags, see below */
62065
62066 /* Zone statistics */
62067 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62068 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62069
62070 /*
62071 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
62072 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
62073 index ae28e93..1ac2233 100644
62074 --- a/include/linux/mod_devicetable.h
62075 +++ b/include/linux/mod_devicetable.h
62076 @@ -12,7 +12,7 @@
62077 typedef unsigned long kernel_ulong_t;
62078 #endif
62079
62080 -#define PCI_ANY_ID (~0)
62081 +#define PCI_ANY_ID ((__u16)~0)
62082
62083 struct pci_device_id {
62084 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
62085 @@ -131,7 +131,7 @@ struct usb_device_id {
62086 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
62087 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
62088
62089 -#define HID_ANY_ID (~0)
62090 +#define HID_ANY_ID (~0U)
62091
62092 struct hid_device_id {
62093 __u16 bus;
62094 diff --git a/include/linux/module.h b/include/linux/module.h
62095 index 1c30087..fc2a442 100644
62096 --- a/include/linux/module.h
62097 +++ b/include/linux/module.h
62098 @@ -16,6 +16,7 @@
62099 #include <linux/kobject.h>
62100 #include <linux/moduleparam.h>
62101 #include <linux/tracepoint.h>
62102 +#include <linux/fs.h>
62103
62104 #include <linux/percpu.h>
62105 #include <asm/module.h>
62106 @@ -327,19 +328,16 @@ struct module
62107 int (*init)(void);
62108
62109 /* If this is non-NULL, vfree after init() returns */
62110 - void *module_init;
62111 + void *module_init_rx, *module_init_rw;
62112
62113 /* Here is the actual code + data, vfree'd on unload. */
62114 - void *module_core;
62115 + void *module_core_rx, *module_core_rw;
62116
62117 /* Here are the sizes of the init and core sections */
62118 - unsigned int init_size, core_size;
62119 + unsigned int init_size_rw, core_size_rw;
62120
62121 /* The size of the executable code in each section. */
62122 - unsigned int init_text_size, core_text_size;
62123 -
62124 - /* Size of RO sections of the module (text+rodata) */
62125 - unsigned int init_ro_size, core_ro_size;
62126 + unsigned int init_size_rx, core_size_rx;
62127
62128 /* Arch-specific module values */
62129 struct mod_arch_specific arch;
62130 @@ -395,6 +393,10 @@ struct module
62131 #ifdef CONFIG_EVENT_TRACING
62132 struct ftrace_event_call **trace_events;
62133 unsigned int num_trace_events;
62134 + struct file_operations trace_id;
62135 + struct file_operations trace_enable;
62136 + struct file_operations trace_format;
62137 + struct file_operations trace_filter;
62138 #endif
62139 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
62140 unsigned int num_ftrace_callsites;
62141 @@ -445,16 +447,46 @@ bool is_module_address(unsigned long addr);
62142 bool is_module_percpu_address(unsigned long addr);
62143 bool is_module_text_address(unsigned long addr);
62144
62145 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
62146 +{
62147 +
62148 +#ifdef CONFIG_PAX_KERNEXEC
62149 + if (ktla_ktva(addr) >= (unsigned long)start &&
62150 + ktla_ktva(addr) < (unsigned long)start + size)
62151 + return 1;
62152 +#endif
62153 +
62154 + return ((void *)addr >= start && (void *)addr < start + size);
62155 +}
62156 +
62157 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
62158 +{
62159 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
62160 +}
62161 +
62162 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
62163 +{
62164 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
62165 +}
62166 +
62167 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
62168 +{
62169 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
62170 +}
62171 +
62172 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
62173 +{
62174 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
62175 +}
62176 +
62177 static inline int within_module_core(unsigned long addr, struct module *mod)
62178 {
62179 - return (unsigned long)mod->module_core <= addr &&
62180 - addr < (unsigned long)mod->module_core + mod->core_size;
62181 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
62182 }
62183
62184 static inline int within_module_init(unsigned long addr, struct module *mod)
62185 {
62186 - return (unsigned long)mod->module_init <= addr &&
62187 - addr < (unsigned long)mod->module_init + mod->init_size;
62188 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
62189 }
62190
62191 /* Search for module by name: must hold module_mutex. */
62192 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
62193 index b2be02e..6a9fdb1 100644
62194 --- a/include/linux/moduleloader.h
62195 +++ b/include/linux/moduleloader.h
62196 @@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
62197 sections. Returns NULL on failure. */
62198 void *module_alloc(unsigned long size);
62199
62200 +#ifdef CONFIG_PAX_KERNEXEC
62201 +void *module_alloc_exec(unsigned long size);
62202 +#else
62203 +#define module_alloc_exec(x) module_alloc(x)
62204 +#endif
62205 +
62206 /* Free memory returned from module_alloc. */
62207 void module_free(struct module *mod, void *module_region);
62208
62209 +#ifdef CONFIG_PAX_KERNEXEC
62210 +void module_free_exec(struct module *mod, void *module_region);
62211 +#else
62212 +#define module_free_exec(x, y) module_free((x), (y))
62213 +#endif
62214 +
62215 /* Apply the given relocation to the (simplified) ELF. Return -error
62216 or 0. */
62217 int apply_relocate(Elf_Shdr *sechdrs,
62218 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
62219 index ddaae98..3c70938 100644
62220 --- a/include/linux/moduleparam.h
62221 +++ b/include/linux/moduleparam.h
62222 @@ -255,7 +255,7 @@ static inline void __kernel_param_unlock(void)
62223 * @len is usually just sizeof(string).
62224 */
62225 #define module_param_string(name, string, len, perm) \
62226 - static const struct kparam_string __param_string_##name \
62227 + static const struct kparam_string __param_string_##name __used \
62228 = { len, string }; \
62229 __module_param_call(MODULE_PARAM_PREFIX, name, \
62230 &param_ops_string, \
62231 @@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
62232 * module_param_named() for why this might be necessary.
62233 */
62234 #define module_param_array_named(name, array, type, nump, perm) \
62235 - static const struct kparam_array __param_arr_##name \
62236 + static const struct kparam_array __param_arr_##name __used \
62237 = { .max = ARRAY_SIZE(array), .num = nump, \
62238 .ops = &param_ops_##type, \
62239 .elemsize = sizeof(array[0]), .elem = array }; \
62240 diff --git a/include/linux/namei.h b/include/linux/namei.h
62241 index ffc0213..2c1f2cb 100644
62242 --- a/include/linux/namei.h
62243 +++ b/include/linux/namei.h
62244 @@ -24,7 +24,7 @@ struct nameidata {
62245 unsigned seq;
62246 int last_type;
62247 unsigned depth;
62248 - char *saved_names[MAX_NESTED_LINKS + 1];
62249 + const char *saved_names[MAX_NESTED_LINKS + 1];
62250
62251 /* Intent data */
62252 union {
62253 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
62254 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
62255 extern void unlock_rename(struct dentry *, struct dentry *);
62256
62257 -static inline void nd_set_link(struct nameidata *nd, char *path)
62258 +static inline void nd_set_link(struct nameidata *nd, const char *path)
62259 {
62260 nd->saved_names[nd->depth] = path;
62261 }
62262
62263 -static inline char *nd_get_link(struct nameidata *nd)
62264 +static inline const char *nd_get_link(const struct nameidata *nd)
62265 {
62266 return nd->saved_names[nd->depth];
62267 }
62268 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
62269 index ddee79b..67af106 100644
62270 --- a/include/linux/netdevice.h
62271 +++ b/include/linux/netdevice.h
62272 @@ -944,6 +944,7 @@ struct net_device_ops {
62273 int (*ndo_set_features)(struct net_device *dev,
62274 u32 features);
62275 };
62276 +typedef struct net_device_ops __no_const net_device_ops_no_const;
62277
62278 /*
62279 * The DEVICE structure.
62280 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
62281 new file mode 100644
62282 index 0000000..33f4af8
62283 --- /dev/null
62284 +++ b/include/linux/netfilter/xt_gradm.h
62285 @@ -0,0 +1,9 @@
62286 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
62287 +#define _LINUX_NETFILTER_XT_GRADM_H 1
62288 +
62289 +struct xt_gradm_mtinfo {
62290 + __u16 flags;
62291 + __u16 invflags;
62292 +};
62293 +
62294 +#endif
62295 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
62296 index c65a18a..0c05f3a 100644
62297 --- a/include/linux/of_pdt.h
62298 +++ b/include/linux/of_pdt.h
62299 @@ -32,7 +32,7 @@ struct of_pdt_ops {
62300
62301 /* return 0 on success; fill in 'len' with number of bytes in path */
62302 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
62303 -};
62304 +} __no_const;
62305
62306 extern void *prom_early_alloc(unsigned long size);
62307
62308 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
62309 index 49c8727..34d2ae1 100644
62310 --- a/include/linux/oprofile.h
62311 +++ b/include/linux/oprofile.h
62312 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
62313 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
62314 char const * name, ulong * val);
62315
62316 -/** Create a file for read-only access to an atomic_t. */
62317 +/** Create a file for read-only access to an atomic_unchecked_t. */
62318 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
62319 - char const * name, atomic_t * val);
62320 + char const * name, atomic_unchecked_t * val);
62321
62322 /** create a directory */
62323 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
62324 diff --git a/include/linux/padata.h b/include/linux/padata.h
62325 index 4633b2f..988bc08 100644
62326 --- a/include/linux/padata.h
62327 +++ b/include/linux/padata.h
62328 @@ -129,7 +129,7 @@ struct parallel_data {
62329 struct padata_instance *pinst;
62330 struct padata_parallel_queue __percpu *pqueue;
62331 struct padata_serial_queue __percpu *squeue;
62332 - atomic_t seq_nr;
62333 + atomic_unchecked_t seq_nr;
62334 atomic_t reorder_objects;
62335 atomic_t refcnt;
62336 unsigned int max_seq_nr;
62337 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
62338 index c816075..cd28c4d 100644
62339 --- a/include/linux/perf_event.h
62340 +++ b/include/linux/perf_event.h
62341 @@ -745,8 +745,8 @@ struct perf_event {
62342
62343 enum perf_event_active_state state;
62344 unsigned int attach_state;
62345 - local64_t count;
62346 - atomic64_t child_count;
62347 + local64_t count; /* PaX: fix it one day */
62348 + atomic64_unchecked_t child_count;
62349
62350 /*
62351 * These are the total time in nanoseconds that the event
62352 @@ -797,8 +797,8 @@ struct perf_event {
62353 * These accumulate total time (in nanoseconds) that children
62354 * events have been enabled and running, respectively.
62355 */
62356 - atomic64_t child_total_time_enabled;
62357 - atomic64_t child_total_time_running;
62358 + atomic64_unchecked_t child_total_time_enabled;
62359 + atomic64_unchecked_t child_total_time_running;
62360
62361 /*
62362 * Protect attach/detach and child_list:
62363 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
62364 index 77257c9..51d473a 100644
62365 --- a/include/linux/pipe_fs_i.h
62366 +++ b/include/linux/pipe_fs_i.h
62367 @@ -46,9 +46,9 @@ struct pipe_buffer {
62368 struct pipe_inode_info {
62369 wait_queue_head_t wait;
62370 unsigned int nrbufs, curbuf, buffers;
62371 - unsigned int readers;
62372 - unsigned int writers;
62373 - unsigned int waiting_writers;
62374 + atomic_t readers;
62375 + atomic_t writers;
62376 + atomic_t waiting_writers;
62377 unsigned int r_counter;
62378 unsigned int w_counter;
62379 struct page *tmp_page;
62380 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
62381 index daac05d..c6802ce 100644
62382 --- a/include/linux/pm_runtime.h
62383 +++ b/include/linux/pm_runtime.h
62384 @@ -99,7 +99,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
62385
62386 static inline void pm_runtime_mark_last_busy(struct device *dev)
62387 {
62388 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
62389 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
62390 }
62391
62392 #else /* !CONFIG_PM_RUNTIME */
62393 diff --git a/include/linux/poison.h b/include/linux/poison.h
62394 index 79159de..f1233a9 100644
62395 --- a/include/linux/poison.h
62396 +++ b/include/linux/poison.h
62397 @@ -19,8 +19,8 @@
62398 * under normal circumstances, used to verify that nobody uses
62399 * non-initialized list entries.
62400 */
62401 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
62402 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
62403 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
62404 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
62405
62406 /********** include/linux/timer.h **********/
62407 /*
62408 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
62409 index 58969b2..ead129b 100644
62410 --- a/include/linux/preempt.h
62411 +++ b/include/linux/preempt.h
62412 @@ -123,7 +123,7 @@ struct preempt_ops {
62413 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
62414 void (*sched_out)(struct preempt_notifier *notifier,
62415 struct task_struct *next);
62416 -};
62417 +} __no_const;
62418
62419 /**
62420 * preempt_notifier - key for installing preemption notifiers
62421 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
62422 index 643b96c..ef55a9c 100644
62423 --- a/include/linux/proc_fs.h
62424 +++ b/include/linux/proc_fs.h
62425 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
62426 return proc_create_data(name, mode, parent, proc_fops, NULL);
62427 }
62428
62429 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
62430 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
62431 +{
62432 +#ifdef CONFIG_GRKERNSEC_PROC_USER
62433 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
62434 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62435 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
62436 +#else
62437 + return proc_create_data(name, mode, parent, proc_fops, NULL);
62438 +#endif
62439 +}
62440 +
62441 +
62442 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
62443 mode_t mode, struct proc_dir_entry *base,
62444 read_proc_t *read_proc, void * data)
62445 @@ -258,7 +271,7 @@ union proc_op {
62446 int (*proc_show)(struct seq_file *m,
62447 struct pid_namespace *ns, struct pid *pid,
62448 struct task_struct *task);
62449 -};
62450 +} __no_const;
62451
62452 struct ctl_table_header;
62453 struct ctl_table;
62454 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
62455 index 800f113..af90cc8 100644
62456 --- a/include/linux/ptrace.h
62457 +++ b/include/linux/ptrace.h
62458 @@ -129,10 +129,10 @@ extern void __ptrace_unlink(struct task_struct *child);
62459 extern void exit_ptrace(struct task_struct *tracer);
62460 #define PTRACE_MODE_READ 1
62461 #define PTRACE_MODE_ATTACH 2
62462 -/* Returns 0 on success, -errno on denial. */
62463 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
62464 /* Returns true on success, false on denial. */
62465 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
62466 +/* Returns true on success, false on denial. */
62467 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
62468
62469 static inline int ptrace_reparented(struct task_struct *child)
62470 {
62471 diff --git a/include/linux/random.h b/include/linux/random.h
62472 index d13059f..2eaafaa 100644
62473 --- a/include/linux/random.h
62474 +++ b/include/linux/random.h
62475 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
62476
62477 u32 prandom32(struct rnd_state *);
62478
62479 +static inline unsigned long pax_get_random_long(void)
62480 +{
62481 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
62482 +}
62483 +
62484 /*
62485 * Handle minimum values for seeds
62486 */
62487 static inline u32 __seed(u32 x, u32 m)
62488 {
62489 - return (x < m) ? x + m : x;
62490 + return (x <= m) ? x + m + 1 : x;
62491 }
62492
62493 /**
62494 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
62495 index e0879a7..a12f962 100644
62496 --- a/include/linux/reboot.h
62497 +++ b/include/linux/reboot.h
62498 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
62499 * Architecture-specific implementations of sys_reboot commands.
62500 */
62501
62502 -extern void machine_restart(char *cmd);
62503 -extern void machine_halt(void);
62504 -extern void machine_power_off(void);
62505 +extern void machine_restart(char *cmd) __noreturn;
62506 +extern void machine_halt(void) __noreturn;
62507 +extern void machine_power_off(void) __noreturn;
62508
62509 extern void machine_shutdown(void);
62510 struct pt_regs;
62511 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
62512 */
62513
62514 extern void kernel_restart_prepare(char *cmd);
62515 -extern void kernel_restart(char *cmd);
62516 -extern void kernel_halt(void);
62517 -extern void kernel_power_off(void);
62518 +extern void kernel_restart(char *cmd) __noreturn;
62519 +extern void kernel_halt(void) __noreturn;
62520 +extern void kernel_power_off(void) __noreturn;
62521
62522 extern int C_A_D; /* for sysctl */
62523 void ctrl_alt_del(void);
62524 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
62525 * Emergency restart, callable from an interrupt handler.
62526 */
62527
62528 -extern void emergency_restart(void);
62529 +extern void emergency_restart(void) __noreturn;
62530 #include <asm/emergency-restart.h>
62531
62532 #endif
62533 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
62534 index 96d465f..b084e05 100644
62535 --- a/include/linux/reiserfs_fs.h
62536 +++ b/include/linux/reiserfs_fs.h
62537 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
62538 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
62539
62540 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
62541 -#define get_generation(s) atomic_read (&fs_generation(s))
62542 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
62543 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
62544 #define __fs_changed(gen,s) (gen != get_generation (s))
62545 #define fs_changed(gen,s) \
62546 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
62547 index 52c83b6..18ed7eb 100644
62548 --- a/include/linux/reiserfs_fs_sb.h
62549 +++ b/include/linux/reiserfs_fs_sb.h
62550 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
62551 /* Comment? -Hans */
62552 wait_queue_head_t s_wait;
62553 /* To be obsoleted soon by per buffer seals.. -Hans */
62554 - atomic_t s_generation_counter; // increased by one every time the
62555 + atomic_unchecked_t s_generation_counter; // increased by one every time the
62556 // tree gets re-balanced
62557 unsigned long s_properties; /* File system properties. Currently holds
62558 on-disk FS format */
62559 diff --git a/include/linux/relay.h b/include/linux/relay.h
62560 index 14a86bc..17d0700 100644
62561 --- a/include/linux/relay.h
62562 +++ b/include/linux/relay.h
62563 @@ -159,7 +159,7 @@ struct rchan_callbacks
62564 * The callback should return 0 if successful, negative if not.
62565 */
62566 int (*remove_buf_file)(struct dentry *dentry);
62567 -};
62568 +} __no_const;
62569
62570 /*
62571 * CONFIG_RELAY kernel API, kernel/relay.c
62572 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
62573 index c6c6084..5bf1212 100644
62574 --- a/include/linux/rfkill.h
62575 +++ b/include/linux/rfkill.h
62576 @@ -147,6 +147,7 @@ struct rfkill_ops {
62577 void (*query)(struct rfkill *rfkill, void *data);
62578 int (*set_block)(void *data, bool blocked);
62579 };
62580 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
62581
62582 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
62583 /**
62584 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
62585 index 2148b12..519b820 100644
62586 --- a/include/linux/rmap.h
62587 +++ b/include/linux/rmap.h
62588 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
62589 void anon_vma_init(void); /* create anon_vma_cachep */
62590 int anon_vma_prepare(struct vm_area_struct *);
62591 void unlink_anon_vmas(struct vm_area_struct *);
62592 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
62593 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
62594 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
62595 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
62596 void __anon_vma_link(struct vm_area_struct *);
62597
62598 static inline void anon_vma_merge(struct vm_area_struct *vma,
62599 diff --git a/include/linux/sched.h b/include/linux/sched.h
62600 index 41d0237..5a64056 100644
62601 --- a/include/linux/sched.h
62602 +++ b/include/linux/sched.h
62603 @@ -100,6 +100,7 @@ struct bio_list;
62604 struct fs_struct;
62605 struct perf_event_context;
62606 struct blk_plug;
62607 +struct linux_binprm;
62608
62609 /*
62610 * List of flags we want to share for kernel threads,
62611 @@ -380,10 +381,13 @@ struct user_namespace;
62612 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
62613
62614 extern int sysctl_max_map_count;
62615 +extern unsigned long sysctl_heap_stack_gap;
62616
62617 #include <linux/aio.h>
62618
62619 #ifdef CONFIG_MMU
62620 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
62621 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
62622 extern void arch_pick_mmap_layout(struct mm_struct *mm);
62623 extern unsigned long
62624 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
62625 @@ -629,6 +633,17 @@ struct signal_struct {
62626 #ifdef CONFIG_TASKSTATS
62627 struct taskstats *stats;
62628 #endif
62629 +
62630 +#ifdef CONFIG_GRKERNSEC
62631 + u32 curr_ip;
62632 + u32 saved_ip;
62633 + u32 gr_saddr;
62634 + u32 gr_daddr;
62635 + u16 gr_sport;
62636 + u16 gr_dport;
62637 + u8 used_accept:1;
62638 +#endif
62639 +
62640 #ifdef CONFIG_AUDIT
62641 unsigned audit_tty;
62642 struct tty_audit_buf *tty_audit_buf;
62643 @@ -710,6 +725,11 @@ struct user_struct {
62644 struct key *session_keyring; /* UID's default session keyring */
62645 #endif
62646
62647 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
62648 + unsigned int banned;
62649 + unsigned long ban_expires;
62650 +#endif
62651 +
62652 /* Hash table maintenance information */
62653 struct hlist_node uidhash_node;
62654 uid_t uid;
62655 @@ -1340,8 +1360,8 @@ struct task_struct {
62656 struct list_head thread_group;
62657
62658 struct completion *vfork_done; /* for vfork() */
62659 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
62660 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62661 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
62662 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62663
62664 cputime_t utime, stime, utimescaled, stimescaled;
62665 cputime_t gtime;
62666 @@ -1357,13 +1377,6 @@ struct task_struct {
62667 struct task_cputime cputime_expires;
62668 struct list_head cpu_timers[3];
62669
62670 -/* process credentials */
62671 - const struct cred __rcu *real_cred; /* objective and real subjective task
62672 - * credentials (COW) */
62673 - const struct cred __rcu *cred; /* effective (overridable) subjective task
62674 - * credentials (COW) */
62675 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62676 -
62677 char comm[TASK_COMM_LEN]; /* executable name excluding path
62678 - access with [gs]et_task_comm (which lock
62679 it with task_lock())
62680 @@ -1380,8 +1393,16 @@ struct task_struct {
62681 #endif
62682 /* CPU-specific state of this task */
62683 struct thread_struct thread;
62684 +/* thread_info moved to task_struct */
62685 +#ifdef CONFIG_X86
62686 + struct thread_info tinfo;
62687 +#endif
62688 /* filesystem information */
62689 struct fs_struct *fs;
62690 +
62691 + const struct cred __rcu *cred; /* effective (overridable) subjective task
62692 + * credentials (COW) */
62693 +
62694 /* open file information */
62695 struct files_struct *files;
62696 /* namespaces */
62697 @@ -1428,6 +1449,11 @@ struct task_struct {
62698 struct rt_mutex_waiter *pi_blocked_on;
62699 #endif
62700
62701 +/* process credentials */
62702 + const struct cred __rcu *real_cred; /* objective and real subjective task
62703 + * credentials (COW) */
62704 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62705 +
62706 #ifdef CONFIG_DEBUG_MUTEXES
62707 /* mutex deadlock detection */
62708 struct mutex_waiter *blocked_on;
62709 @@ -1537,6 +1563,21 @@ struct task_struct {
62710 unsigned long default_timer_slack_ns;
62711
62712 struct list_head *scm_work_list;
62713 +
62714 +#ifdef CONFIG_GRKERNSEC
62715 + /* grsecurity */
62716 + struct dentry *gr_chroot_dentry;
62717 + struct acl_subject_label *acl;
62718 + struct acl_role_label *role;
62719 + struct file *exec_file;
62720 + u16 acl_role_id;
62721 + /* is this the task that authenticated to the special role */
62722 + u8 acl_sp_role;
62723 + u8 is_writable;
62724 + u8 brute;
62725 + u8 gr_is_chrooted;
62726 +#endif
62727 +
62728 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
62729 /* Index of current stored address in ret_stack */
62730 int curr_ret_stack;
62731 @@ -1571,6 +1612,57 @@ struct task_struct {
62732 #endif
62733 };
62734
62735 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
62736 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
62737 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
62738 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
62739 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
62740 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
62741 +
62742 +#ifdef CONFIG_PAX_SOFTMODE
62743 +extern int pax_softmode;
62744 +#endif
62745 +
62746 +extern int pax_check_flags(unsigned long *);
62747 +
62748 +/* if tsk != current then task_lock must be held on it */
62749 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62750 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
62751 +{
62752 + if (likely(tsk->mm))
62753 + return tsk->mm->pax_flags;
62754 + else
62755 + return 0UL;
62756 +}
62757 +
62758 +/* if tsk != current then task_lock must be held on it */
62759 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
62760 +{
62761 + if (likely(tsk->mm)) {
62762 + tsk->mm->pax_flags = flags;
62763 + return 0;
62764 + }
62765 + return -EINVAL;
62766 +}
62767 +#endif
62768 +
62769 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62770 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
62771 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
62772 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
62773 +#endif
62774 +
62775 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
62776 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
62777 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
62778 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
62779 +
62780 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
62781 +extern void pax_track_stack(void);
62782 +#else
62783 +static inline void pax_track_stack(void) {}
62784 +#endif
62785 +
62786 /* Future-safe accessor for struct task_struct's cpus_allowed. */
62787 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
62788
62789 @@ -2074,7 +2166,9 @@ void yield(void);
62790 extern struct exec_domain default_exec_domain;
62791
62792 union thread_union {
62793 +#ifndef CONFIG_X86
62794 struct thread_info thread_info;
62795 +#endif
62796 unsigned long stack[THREAD_SIZE/sizeof(long)];
62797 };
62798
62799 @@ -2107,6 +2201,7 @@ extern struct pid_namespace init_pid_ns;
62800 */
62801
62802 extern struct task_struct *find_task_by_vpid(pid_t nr);
62803 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
62804 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
62805 struct pid_namespace *ns);
62806
62807 @@ -2243,7 +2338,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
62808 extern void exit_itimers(struct signal_struct *);
62809 extern void flush_itimer_signals(void);
62810
62811 -extern NORET_TYPE void do_group_exit(int);
62812 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
62813
62814 extern void daemonize(const char *, ...);
62815 extern int allow_signal(int);
62816 @@ -2408,13 +2503,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
62817
62818 #endif
62819
62820 -static inline int object_is_on_stack(void *obj)
62821 +static inline int object_starts_on_stack(void *obj)
62822 {
62823 - void *stack = task_stack_page(current);
62824 + const void *stack = task_stack_page(current);
62825
62826 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
62827 }
62828
62829 +#ifdef CONFIG_PAX_USERCOPY
62830 +extern int object_is_on_stack(const void *obj, unsigned long len);
62831 +#endif
62832 +
62833 extern void thread_info_cache_init(void);
62834
62835 #ifdef CONFIG_DEBUG_STACK_USAGE
62836 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
62837 index 899fbb4..1cb4138 100644
62838 --- a/include/linux/screen_info.h
62839 +++ b/include/linux/screen_info.h
62840 @@ -43,7 +43,8 @@ struct screen_info {
62841 __u16 pages; /* 0x32 */
62842 __u16 vesa_attributes; /* 0x34 */
62843 __u32 capabilities; /* 0x36 */
62844 - __u8 _reserved[6]; /* 0x3a */
62845 + __u16 vesapm_size; /* 0x3a */
62846 + __u8 _reserved[4]; /* 0x3c */
62847 } __attribute__((packed));
62848
62849 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
62850 diff --git a/include/linux/security.h b/include/linux/security.h
62851 index ebd2a53..2d949ae 100644
62852 --- a/include/linux/security.h
62853 +++ b/include/linux/security.h
62854 @@ -36,6 +36,7 @@
62855 #include <linux/key.h>
62856 #include <linux/xfrm.h>
62857 #include <linux/slab.h>
62858 +#include <linux/grsecurity.h>
62859 #include <net/flow.h>
62860
62861 /* Maximum number of letters for an LSM name string */
62862 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
62863 index be720cd..a0e1b94 100644
62864 --- a/include/linux/seq_file.h
62865 +++ b/include/linux/seq_file.h
62866 @@ -33,6 +33,7 @@ struct seq_operations {
62867 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
62868 int (*show) (struct seq_file *m, void *v);
62869 };
62870 +typedef struct seq_operations __no_const seq_operations_no_const;
62871
62872 #define SEQ_SKIP 1
62873
62874 diff --git a/include/linux/shm.h b/include/linux/shm.h
62875 index 92808b8..c28cac4 100644
62876 --- a/include/linux/shm.h
62877 +++ b/include/linux/shm.h
62878 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
62879
62880 /* The task created the shm object. NULL if the task is dead. */
62881 struct task_struct *shm_creator;
62882 +#ifdef CONFIG_GRKERNSEC
62883 + time_t shm_createtime;
62884 + pid_t shm_lapid;
62885 +#endif
62886 };
62887
62888 /* shm_mode upper byte flags */
62889 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
62890 index 0f96646..cfb757a 100644
62891 --- a/include/linux/skbuff.h
62892 +++ b/include/linux/skbuff.h
62893 @@ -610,7 +610,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
62894 */
62895 static inline int skb_queue_empty(const struct sk_buff_head *list)
62896 {
62897 - return list->next == (struct sk_buff *)list;
62898 + return list->next == (const struct sk_buff *)list;
62899 }
62900
62901 /**
62902 @@ -623,7 +623,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
62903 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62904 const struct sk_buff *skb)
62905 {
62906 - return skb->next == (struct sk_buff *)list;
62907 + return skb->next == (const struct sk_buff *)list;
62908 }
62909
62910 /**
62911 @@ -636,7 +636,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
62912 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
62913 const struct sk_buff *skb)
62914 {
62915 - return skb->prev == (struct sk_buff *)list;
62916 + return skb->prev == (const struct sk_buff *)list;
62917 }
62918
62919 /**
62920 @@ -1458,7 +1458,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
62921 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
62922 */
62923 #ifndef NET_SKB_PAD
62924 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
62925 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
62926 #endif
62927
62928 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
62929 diff --git a/include/linux/slab.h b/include/linux/slab.h
62930 index 573c809..e84c132 100644
62931 --- a/include/linux/slab.h
62932 +++ b/include/linux/slab.h
62933 @@ -11,12 +11,20 @@
62934
62935 #include <linux/gfp.h>
62936 #include <linux/types.h>
62937 +#include <linux/err.h>
62938
62939 /*
62940 * Flags to pass to kmem_cache_create().
62941 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
62942 */
62943 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
62944 +
62945 +#ifdef CONFIG_PAX_USERCOPY
62946 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
62947 +#else
62948 +#define SLAB_USERCOPY 0x00000000UL
62949 +#endif
62950 +
62951 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
62952 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
62953 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
62954 @@ -87,10 +95,13 @@
62955 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
62956 * Both make kfree a no-op.
62957 */
62958 -#define ZERO_SIZE_PTR ((void *)16)
62959 +#define ZERO_SIZE_PTR \
62960 +({ \
62961 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
62962 + (void *)(-MAX_ERRNO-1L); \
62963 +})
62964
62965 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
62966 - (unsigned long)ZERO_SIZE_PTR)
62967 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
62968
62969 /*
62970 * struct kmem_cache related prototypes
62971 @@ -161,6 +172,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
62972 void kfree(const void *);
62973 void kzfree(const void *);
62974 size_t ksize(const void *);
62975 +void check_object_size(const void *ptr, unsigned long n, bool to);
62976
62977 /*
62978 * Allocator specific definitions. These are mainly used to establish optimized
62979 @@ -353,4 +365,59 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
62980
62981 void __init kmem_cache_init_late(void);
62982
62983 +#define kmalloc(x, y) \
62984 +({ \
62985 + void *___retval; \
62986 + intoverflow_t ___x = (intoverflow_t)x; \
62987 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
62988 + ___retval = NULL; \
62989 + else \
62990 + ___retval = kmalloc((size_t)___x, (y)); \
62991 + ___retval; \
62992 +})
62993 +
62994 +#define kmalloc_node(x, y, z) \
62995 +({ \
62996 + void *___retval; \
62997 + intoverflow_t ___x = (intoverflow_t)x; \
62998 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
62999 + ___retval = NULL; \
63000 + else \
63001 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
63002 + ___retval; \
63003 +})
63004 +
63005 +#define kzalloc(x, y) \
63006 +({ \
63007 + void *___retval; \
63008 + intoverflow_t ___x = (intoverflow_t)x; \
63009 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
63010 + ___retval = NULL; \
63011 + else \
63012 + ___retval = kzalloc((size_t)___x, (y)); \
63013 + ___retval; \
63014 +})
63015 +
63016 +#define __krealloc(x, y, z) \
63017 +({ \
63018 + void *___retval; \
63019 + intoverflow_t ___y = (intoverflow_t)y; \
63020 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
63021 + ___retval = NULL; \
63022 + else \
63023 + ___retval = __krealloc((x), (size_t)___y, (z)); \
63024 + ___retval; \
63025 +})
63026 +
63027 +#define krealloc(x, y, z) \
63028 +({ \
63029 + void *___retval; \
63030 + intoverflow_t ___y = (intoverflow_t)y; \
63031 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
63032 + ___retval = NULL; \
63033 + else \
63034 + ___retval = krealloc((x), (size_t)___y, (z)); \
63035 + ___retval; \
63036 +})
63037 +
63038 #endif /* _LINUX_SLAB_H */
63039 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
63040 index d00e0ba..1b3bf7b 100644
63041 --- a/include/linux/slab_def.h
63042 +++ b/include/linux/slab_def.h
63043 @@ -68,10 +68,10 @@ struct kmem_cache {
63044 unsigned long node_allocs;
63045 unsigned long node_frees;
63046 unsigned long node_overflow;
63047 - atomic_t allochit;
63048 - atomic_t allocmiss;
63049 - atomic_t freehit;
63050 - atomic_t freemiss;
63051 + atomic_unchecked_t allochit;
63052 + atomic_unchecked_t allocmiss;
63053 + atomic_unchecked_t freehit;
63054 + atomic_unchecked_t freemiss;
63055
63056 /*
63057 * If debugging is enabled, then the allocator can add additional
63058 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
63059 index f58d641..c56bf9c 100644
63060 --- a/include/linux/slub_def.h
63061 +++ b/include/linux/slub_def.h
63062 @@ -85,7 +85,7 @@ struct kmem_cache {
63063 struct kmem_cache_order_objects max;
63064 struct kmem_cache_order_objects min;
63065 gfp_t allocflags; /* gfp flags to use on each alloc */
63066 - int refcount; /* Refcount for slab cache destroy */
63067 + atomic_t refcount; /* Refcount for slab cache destroy */
63068 void (*ctor)(void *);
63069 int inuse; /* Offset to metadata */
63070 int align; /* Alignment */
63071 @@ -211,7 +211,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
63072 }
63073
63074 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
63075 -void *__kmalloc(size_t size, gfp_t flags);
63076 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
63077
63078 static __always_inline void *
63079 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
63080 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
63081 index de8832d..0147b46 100644
63082 --- a/include/linux/sonet.h
63083 +++ b/include/linux/sonet.h
63084 @@ -61,7 +61,7 @@ struct sonet_stats {
63085 #include <linux/atomic.h>
63086
63087 struct k_sonet_stats {
63088 -#define __HANDLE_ITEM(i) atomic_t i
63089 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
63090 __SONET_ITEMS
63091 #undef __HANDLE_ITEM
63092 };
63093 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
63094 index db7bcaf..1aca77e 100644
63095 --- a/include/linux/sunrpc/clnt.h
63096 +++ b/include/linux/sunrpc/clnt.h
63097 @@ -169,9 +169,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
63098 {
63099 switch (sap->sa_family) {
63100 case AF_INET:
63101 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
63102 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
63103 case AF_INET6:
63104 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
63105 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
63106 }
63107 return 0;
63108 }
63109 @@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
63110 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
63111 const struct sockaddr *src)
63112 {
63113 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
63114 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
63115 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
63116
63117 dsin->sin_family = ssin->sin_family;
63118 @@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
63119 if (sa->sa_family != AF_INET6)
63120 return 0;
63121
63122 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
63123 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
63124 }
63125
63126 #endif /* __KERNEL__ */
63127 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
63128 index e775689..9e206d9 100644
63129 --- a/include/linux/sunrpc/sched.h
63130 +++ b/include/linux/sunrpc/sched.h
63131 @@ -105,6 +105,7 @@ struct rpc_call_ops {
63132 void (*rpc_call_done)(struct rpc_task *, void *);
63133 void (*rpc_release)(void *);
63134 };
63135 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
63136
63137 struct rpc_task_setup {
63138 struct rpc_task *task;
63139 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
63140 index c14fe86..393245e 100644
63141 --- a/include/linux/sunrpc/svc_rdma.h
63142 +++ b/include/linux/sunrpc/svc_rdma.h
63143 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
63144 extern unsigned int svcrdma_max_requests;
63145 extern unsigned int svcrdma_max_req_size;
63146
63147 -extern atomic_t rdma_stat_recv;
63148 -extern atomic_t rdma_stat_read;
63149 -extern atomic_t rdma_stat_write;
63150 -extern atomic_t rdma_stat_sq_starve;
63151 -extern atomic_t rdma_stat_rq_starve;
63152 -extern atomic_t rdma_stat_rq_poll;
63153 -extern atomic_t rdma_stat_rq_prod;
63154 -extern atomic_t rdma_stat_sq_poll;
63155 -extern atomic_t rdma_stat_sq_prod;
63156 +extern atomic_unchecked_t rdma_stat_recv;
63157 +extern atomic_unchecked_t rdma_stat_read;
63158 +extern atomic_unchecked_t rdma_stat_write;
63159 +extern atomic_unchecked_t rdma_stat_sq_starve;
63160 +extern atomic_unchecked_t rdma_stat_rq_starve;
63161 +extern atomic_unchecked_t rdma_stat_rq_poll;
63162 +extern atomic_unchecked_t rdma_stat_rq_prod;
63163 +extern atomic_unchecked_t rdma_stat_sq_poll;
63164 +extern atomic_unchecked_t rdma_stat_sq_prod;
63165
63166 #define RPCRDMA_VERSION 1
63167
63168 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
63169 index 11684d9..0d245eb 100644
63170 --- a/include/linux/sysctl.h
63171 +++ b/include/linux/sysctl.h
63172 @@ -155,7 +155,11 @@ enum
63173 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
63174 };
63175
63176 -
63177 +#ifdef CONFIG_PAX_SOFTMODE
63178 +enum {
63179 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
63180 +};
63181 +#endif
63182
63183 /* CTL_VM names: */
63184 enum
63185 @@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
63186
63187 extern int proc_dostring(struct ctl_table *, int,
63188 void __user *, size_t *, loff_t *);
63189 +extern int proc_dostring_modpriv(struct ctl_table *, int,
63190 + void __user *, size_t *, loff_t *);
63191 extern int proc_dointvec(struct ctl_table *, int,
63192 void __user *, size_t *, loff_t *);
63193 extern int proc_dointvec_minmax(struct ctl_table *, int,
63194 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
63195 index ff7dc08..893e1bd 100644
63196 --- a/include/linux/tty_ldisc.h
63197 +++ b/include/linux/tty_ldisc.h
63198 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
63199
63200 struct module *owner;
63201
63202 - int refcount;
63203 + atomic_t refcount;
63204 };
63205
63206 struct tty_ldisc {
63207 diff --git a/include/linux/types.h b/include/linux/types.h
63208 index 176da8c..e45e473 100644
63209 --- a/include/linux/types.h
63210 +++ b/include/linux/types.h
63211 @@ -213,10 +213,26 @@ typedef struct {
63212 int counter;
63213 } atomic_t;
63214
63215 +#ifdef CONFIG_PAX_REFCOUNT
63216 +typedef struct {
63217 + int counter;
63218 +} atomic_unchecked_t;
63219 +#else
63220 +typedef atomic_t atomic_unchecked_t;
63221 +#endif
63222 +
63223 #ifdef CONFIG_64BIT
63224 typedef struct {
63225 long counter;
63226 } atomic64_t;
63227 +
63228 +#ifdef CONFIG_PAX_REFCOUNT
63229 +typedef struct {
63230 + long counter;
63231 +} atomic64_unchecked_t;
63232 +#else
63233 +typedef atomic64_t atomic64_unchecked_t;
63234 +#endif
63235 #endif
63236
63237 struct list_head {
63238 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
63239 index 5ca0951..ab496a5 100644
63240 --- a/include/linux/uaccess.h
63241 +++ b/include/linux/uaccess.h
63242 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
63243 long ret; \
63244 mm_segment_t old_fs = get_fs(); \
63245 \
63246 - set_fs(KERNEL_DS); \
63247 pagefault_disable(); \
63248 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
63249 - pagefault_enable(); \
63250 + set_fs(KERNEL_DS); \
63251 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
63252 set_fs(old_fs); \
63253 + pagefault_enable(); \
63254 ret; \
63255 })
63256
63257 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
63258 index 99c1b4d..bb94261 100644
63259 --- a/include/linux/unaligned/access_ok.h
63260 +++ b/include/linux/unaligned/access_ok.h
63261 @@ -6,32 +6,32 @@
63262
63263 static inline u16 get_unaligned_le16(const void *p)
63264 {
63265 - return le16_to_cpup((__le16 *)p);
63266 + return le16_to_cpup((const __le16 *)p);
63267 }
63268
63269 static inline u32 get_unaligned_le32(const void *p)
63270 {
63271 - return le32_to_cpup((__le32 *)p);
63272 + return le32_to_cpup((const __le32 *)p);
63273 }
63274
63275 static inline u64 get_unaligned_le64(const void *p)
63276 {
63277 - return le64_to_cpup((__le64 *)p);
63278 + return le64_to_cpup((const __le64 *)p);
63279 }
63280
63281 static inline u16 get_unaligned_be16(const void *p)
63282 {
63283 - return be16_to_cpup((__be16 *)p);
63284 + return be16_to_cpup((const __be16 *)p);
63285 }
63286
63287 static inline u32 get_unaligned_be32(const void *p)
63288 {
63289 - return be32_to_cpup((__be32 *)p);
63290 + return be32_to_cpup((const __be32 *)p);
63291 }
63292
63293 static inline u64 get_unaligned_be64(const void *p)
63294 {
63295 - return be64_to_cpup((__be64 *)p);
63296 + return be64_to_cpup((const __be64 *)p);
63297 }
63298
63299 static inline void put_unaligned_le16(u16 val, void *p)
63300 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
63301 index cf97b5b..40ebc87 100644
63302 --- a/include/linux/vermagic.h
63303 +++ b/include/linux/vermagic.h
63304 @@ -26,9 +26,35 @@
63305 #define MODULE_ARCH_VERMAGIC ""
63306 #endif
63307
63308 +#ifdef CONFIG_PAX_REFCOUNT
63309 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
63310 +#else
63311 +#define MODULE_PAX_REFCOUNT ""
63312 +#endif
63313 +
63314 +#ifdef CONSTIFY_PLUGIN
63315 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
63316 +#else
63317 +#define MODULE_CONSTIFY_PLUGIN ""
63318 +#endif
63319 +
63320 +#ifdef STACKLEAK_PLUGIN
63321 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
63322 +#else
63323 +#define MODULE_STACKLEAK_PLUGIN ""
63324 +#endif
63325 +
63326 +#ifdef CONFIG_GRKERNSEC
63327 +#define MODULE_GRSEC "GRSEC "
63328 +#else
63329 +#define MODULE_GRSEC ""
63330 +#endif
63331 +
63332 #define VERMAGIC_STRING \
63333 UTS_RELEASE " " \
63334 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
63335 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
63336 - MODULE_ARCH_VERMAGIC
63337 + MODULE_ARCH_VERMAGIC \
63338 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
63339 + MODULE_GRSEC
63340
63341 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
63342 index 687fb11..b342358 100644
63343 --- a/include/linux/vmalloc.h
63344 +++ b/include/linux/vmalloc.h
63345 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
63346 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
63347 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
63348 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
63349 +
63350 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
63351 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
63352 +#endif
63353 +
63354 /* bits [20..32] reserved for arch specific ioremap internals */
63355
63356 /*
63357 @@ -156,4 +161,103 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
63358 # endif
63359 #endif
63360
63361 +#define vmalloc(x) \
63362 +({ \
63363 + void *___retval; \
63364 + intoverflow_t ___x = (intoverflow_t)x; \
63365 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
63366 + ___retval = NULL; \
63367 + else \
63368 + ___retval = vmalloc((unsigned long)___x); \
63369 + ___retval; \
63370 +})
63371 +
63372 +#define vzalloc(x) \
63373 +({ \
63374 + void *___retval; \
63375 + intoverflow_t ___x = (intoverflow_t)x; \
63376 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
63377 + ___retval = NULL; \
63378 + else \
63379 + ___retval = vzalloc((unsigned long)___x); \
63380 + ___retval; \
63381 +})
63382 +
63383 +#define __vmalloc(x, y, z) \
63384 +({ \
63385 + void *___retval; \
63386 + intoverflow_t ___x = (intoverflow_t)x; \
63387 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
63388 + ___retval = NULL; \
63389 + else \
63390 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
63391 + ___retval; \
63392 +})
63393 +
63394 +#define vmalloc_user(x) \
63395 +({ \
63396 + void *___retval; \
63397 + intoverflow_t ___x = (intoverflow_t)x; \
63398 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
63399 + ___retval = NULL; \
63400 + else \
63401 + ___retval = vmalloc_user((unsigned long)___x); \
63402 + ___retval; \
63403 +})
63404 +
63405 +#define vmalloc_exec(x) \
63406 +({ \
63407 + void *___retval; \
63408 + intoverflow_t ___x = (intoverflow_t)x; \
63409 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
63410 + ___retval = NULL; \
63411 + else \
63412 + ___retval = vmalloc_exec((unsigned long)___x); \
63413 + ___retval; \
63414 +})
63415 +
63416 +#define vmalloc_node(x, y) \
63417 +({ \
63418 + void *___retval; \
63419 + intoverflow_t ___x = (intoverflow_t)x; \
63420 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
63421 + ___retval = NULL; \
63422 + else \
63423 + ___retval = vmalloc_node((unsigned long)___x, (y));\
63424 + ___retval; \
63425 +})
63426 +
63427 +#define vzalloc_node(x, y) \
63428 +({ \
63429 + void *___retval; \
63430 + intoverflow_t ___x = (intoverflow_t)x; \
63431 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
63432 + ___retval = NULL; \
63433 + else \
63434 + ___retval = vzalloc_node((unsigned long)___x, (y));\
63435 + ___retval; \
63436 +})
63437 +
63438 +#define vmalloc_32(x) \
63439 +({ \
63440 + void *___retval; \
63441 + intoverflow_t ___x = (intoverflow_t)x; \
63442 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
63443 + ___retval = NULL; \
63444 + else \
63445 + ___retval = vmalloc_32((unsigned long)___x); \
63446 + ___retval; \
63447 +})
63448 +
63449 +#define vmalloc_32_user(x) \
63450 +({ \
63451 +void *___retval; \
63452 + intoverflow_t ___x = (intoverflow_t)x; \
63453 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
63454 + ___retval = NULL; \
63455 + else \
63456 + ___retval = vmalloc_32_user((unsigned long)___x);\
63457 + ___retval; \
63458 +})
63459 +
63460 #endif /* _LINUX_VMALLOC_H */
63461 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
63462 index 65efb92..137adbb 100644
63463 --- a/include/linux/vmstat.h
63464 +++ b/include/linux/vmstat.h
63465 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
63466 /*
63467 * Zone based page accounting with per cpu differentials.
63468 */
63469 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63470 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63471
63472 static inline void zone_page_state_add(long x, struct zone *zone,
63473 enum zone_stat_item item)
63474 {
63475 - atomic_long_add(x, &zone->vm_stat[item]);
63476 - atomic_long_add(x, &vm_stat[item]);
63477 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
63478 + atomic_long_add_unchecked(x, &vm_stat[item]);
63479 }
63480
63481 static inline unsigned long global_page_state(enum zone_stat_item item)
63482 {
63483 - long x = atomic_long_read(&vm_stat[item]);
63484 + long x = atomic_long_read_unchecked(&vm_stat[item]);
63485 #ifdef CONFIG_SMP
63486 if (x < 0)
63487 x = 0;
63488 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
63489 static inline unsigned long zone_page_state(struct zone *zone,
63490 enum zone_stat_item item)
63491 {
63492 - long x = atomic_long_read(&zone->vm_stat[item]);
63493 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63494 #ifdef CONFIG_SMP
63495 if (x < 0)
63496 x = 0;
63497 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
63498 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
63499 enum zone_stat_item item)
63500 {
63501 - long x = atomic_long_read(&zone->vm_stat[item]);
63502 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63503
63504 #ifdef CONFIG_SMP
63505 int cpu;
63506 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
63507
63508 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
63509 {
63510 - atomic_long_inc(&zone->vm_stat[item]);
63511 - atomic_long_inc(&vm_stat[item]);
63512 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
63513 + atomic_long_inc_unchecked(&vm_stat[item]);
63514 }
63515
63516 static inline void __inc_zone_page_state(struct page *page,
63517 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
63518
63519 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
63520 {
63521 - atomic_long_dec(&zone->vm_stat[item]);
63522 - atomic_long_dec(&vm_stat[item]);
63523 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
63524 + atomic_long_dec_unchecked(&vm_stat[item]);
63525 }
63526
63527 static inline void __dec_zone_page_state(struct page *page,
63528 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
63529 index 4aeff96..b378cdc 100644
63530 --- a/include/media/saa7146_vv.h
63531 +++ b/include/media/saa7146_vv.h
63532 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
63533 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
63534
63535 /* the extension can override this */
63536 - struct v4l2_ioctl_ops ops;
63537 + v4l2_ioctl_ops_no_const ops;
63538 /* pointer to the saa7146 core ops */
63539 const struct v4l2_ioctl_ops *core_ops;
63540
63541 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
63542 index c7c40f1..4f01585 100644
63543 --- a/include/media/v4l2-dev.h
63544 +++ b/include/media/v4l2-dev.h
63545 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
63546
63547
63548 struct v4l2_file_operations {
63549 - struct module *owner;
63550 + struct module * const owner;
63551 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
63552 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
63553 unsigned int (*poll) (struct file *, struct poll_table_struct *);
63554 @@ -68,6 +68,7 @@ struct v4l2_file_operations {
63555 int (*open) (struct file *);
63556 int (*release) (struct file *);
63557 };
63558 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
63559
63560 /*
63561 * Newer version of video_device, handled by videodev2.c
63562 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
63563 index dd9f1e7..8c4dd86 100644
63564 --- a/include/media/v4l2-ioctl.h
63565 +++ b/include/media/v4l2-ioctl.h
63566 @@ -272,7 +272,7 @@ struct v4l2_ioctl_ops {
63567 long (*vidioc_default) (struct file *file, void *fh,
63568 bool valid_prio, int cmd, void *arg);
63569 };
63570 -
63571 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
63572
63573 /* v4l debugging and diagnostics */
63574
63575 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
63576 index c5dedd8..a93b07b 100644
63577 --- a/include/net/caif/caif_hsi.h
63578 +++ b/include/net/caif/caif_hsi.h
63579 @@ -94,7 +94,7 @@ struct cfhsi_drv {
63580 void (*rx_done_cb) (struct cfhsi_drv *drv);
63581 void (*wake_up_cb) (struct cfhsi_drv *drv);
63582 void (*wake_down_cb) (struct cfhsi_drv *drv);
63583 -};
63584 +} __no_const;
63585
63586 /* Structure implemented by HSI device. */
63587 struct cfhsi_dev {
63588 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
63589 index 9e5425b..8136ffc 100644
63590 --- a/include/net/caif/cfctrl.h
63591 +++ b/include/net/caif/cfctrl.h
63592 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
63593 void (*radioset_rsp)(void);
63594 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
63595 struct cflayer *client_layer);
63596 -};
63597 +} __no_const;
63598
63599 /* Link Setup Parameters for CAIF-Links. */
63600 struct cfctrl_link_param {
63601 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
63602 struct cfctrl {
63603 struct cfsrvl serv;
63604 struct cfctrl_rsp res;
63605 - atomic_t req_seq_no;
63606 - atomic_t rsp_seq_no;
63607 + atomic_unchecked_t req_seq_no;
63608 + atomic_unchecked_t rsp_seq_no;
63609 struct list_head list;
63610 /* Protects from simultaneous access to first_req list */
63611 spinlock_t info_list_lock;
63612 diff --git a/include/net/flow.h b/include/net/flow.h
63613 index a094477..bc91db1 100644
63614 --- a/include/net/flow.h
63615 +++ b/include/net/flow.h
63616 @@ -207,6 +207,6 @@ extern struct flow_cache_object *flow_cache_lookup(
63617 u8 dir, flow_resolve_t resolver, void *ctx);
63618
63619 extern void flow_cache_flush(void);
63620 -extern atomic_t flow_cache_genid;
63621 +extern atomic_unchecked_t flow_cache_genid;
63622
63623 #endif
63624 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
63625 index 78c83e6..db3518d 100644
63626 --- a/include/net/inetpeer.h
63627 +++ b/include/net/inetpeer.h
63628 @@ -47,8 +47,8 @@ struct inet_peer {
63629 */
63630 union {
63631 struct {
63632 - atomic_t rid; /* Frag reception counter */
63633 - atomic_t ip_id_count; /* IP ID for the next packet */
63634 + atomic_unchecked_t rid; /* Frag reception counter */
63635 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
63636 __u32 tcp_ts;
63637 __u32 tcp_ts_stamp;
63638 };
63639 @@ -112,11 +112,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
63640 more++;
63641 inet_peer_refcheck(p);
63642 do {
63643 - old = atomic_read(&p->ip_id_count);
63644 + old = atomic_read_unchecked(&p->ip_id_count);
63645 new = old + more;
63646 if (!new)
63647 new = 1;
63648 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
63649 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
63650 return new;
63651 }
63652
63653 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
63654 index 10422ef..662570f 100644
63655 --- a/include/net/ip_fib.h
63656 +++ b/include/net/ip_fib.h
63657 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
63658
63659 #define FIB_RES_SADDR(net, res) \
63660 ((FIB_RES_NH(res).nh_saddr_genid == \
63661 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
63662 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
63663 FIB_RES_NH(res).nh_saddr : \
63664 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
63665 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
63666 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
63667 index 8fa4430..05dd772 100644
63668 --- a/include/net/ip_vs.h
63669 +++ b/include/net/ip_vs.h
63670 @@ -509,7 +509,7 @@ struct ip_vs_conn {
63671 struct ip_vs_conn *control; /* Master control connection */
63672 atomic_t n_control; /* Number of controlled ones */
63673 struct ip_vs_dest *dest; /* real server */
63674 - atomic_t in_pkts; /* incoming packet counter */
63675 + atomic_unchecked_t in_pkts; /* incoming packet counter */
63676
63677 /* packet transmitter for different forwarding methods. If it
63678 mangles the packet, it must return NF_DROP or better NF_STOLEN,
63679 @@ -647,7 +647,7 @@ struct ip_vs_dest {
63680 __be16 port; /* port number of the server */
63681 union nf_inet_addr addr; /* IP address of the server */
63682 volatile unsigned flags; /* dest status flags */
63683 - atomic_t conn_flags; /* flags to copy to conn */
63684 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
63685 atomic_t weight; /* server weight */
63686
63687 atomic_t refcnt; /* reference counter */
63688 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
63689 index 69b610a..fe3962c 100644
63690 --- a/include/net/irda/ircomm_core.h
63691 +++ b/include/net/irda/ircomm_core.h
63692 @@ -51,7 +51,7 @@ typedef struct {
63693 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
63694 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
63695 struct ircomm_info *);
63696 -} call_t;
63697 +} __no_const call_t;
63698
63699 struct ircomm_cb {
63700 irda_queue_t queue;
63701 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
63702 index 59ba38bc..d515662 100644
63703 --- a/include/net/irda/ircomm_tty.h
63704 +++ b/include/net/irda/ircomm_tty.h
63705 @@ -35,6 +35,7 @@
63706 #include <linux/termios.h>
63707 #include <linux/timer.h>
63708 #include <linux/tty.h> /* struct tty_struct */
63709 +#include <asm/local.h>
63710
63711 #include <net/irda/irias_object.h>
63712 #include <net/irda/ircomm_core.h>
63713 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
63714 unsigned short close_delay;
63715 unsigned short closing_wait; /* time to wait before closing */
63716
63717 - int open_count;
63718 - int blocked_open; /* # of blocked opens */
63719 + local_t open_count;
63720 + local_t blocked_open; /* # of blocked opens */
63721
63722 /* Protect concurent access to :
63723 * o self->open_count
63724 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
63725 index f82a1e8..82d81e8 100644
63726 --- a/include/net/iucv/af_iucv.h
63727 +++ b/include/net/iucv/af_iucv.h
63728 @@ -87,7 +87,7 @@ struct iucv_sock {
63729 struct iucv_sock_list {
63730 struct hlist_head head;
63731 rwlock_t lock;
63732 - atomic_t autobind_name;
63733 + atomic_unchecked_t autobind_name;
63734 };
63735
63736 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
63737 diff --git a/include/net/lapb.h b/include/net/lapb.h
63738 index 96cb5dd..25e8d4f 100644
63739 --- a/include/net/lapb.h
63740 +++ b/include/net/lapb.h
63741 @@ -95,7 +95,7 @@ struct lapb_cb {
63742 struct sk_buff_head write_queue;
63743 struct sk_buff_head ack_queue;
63744 unsigned char window;
63745 - struct lapb_register_struct callbacks;
63746 + struct lapb_register_struct *callbacks;
63747
63748 /* FRMR control information */
63749 struct lapb_frame frmr_data;
63750 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
63751 index 2720884..3aa5c25 100644
63752 --- a/include/net/neighbour.h
63753 +++ b/include/net/neighbour.h
63754 @@ -122,7 +122,7 @@ struct neigh_ops {
63755 void (*error_report)(struct neighbour *, struct sk_buff *);
63756 int (*output)(struct neighbour *, struct sk_buff *);
63757 int (*connected_output)(struct neighbour *, struct sk_buff *);
63758 -};
63759 +} __do_const;
63760
63761 struct pneigh_entry {
63762 struct pneigh_entry *next;
63763 diff --git a/include/net/netlink.h b/include/net/netlink.h
63764 index 98c1854..d4add7b 100644
63765 --- a/include/net/netlink.h
63766 +++ b/include/net/netlink.h
63767 @@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
63768 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
63769 {
63770 if (mark)
63771 - skb_trim(skb, (unsigned char *) mark - skb->data);
63772 + skb_trim(skb, (const unsigned char *) mark - skb->data);
63773 }
63774
63775 /**
63776 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
63777 index d786b4f..4c3dd41 100644
63778 --- a/include/net/netns/ipv4.h
63779 +++ b/include/net/netns/ipv4.h
63780 @@ -56,8 +56,8 @@ struct netns_ipv4 {
63781
63782 unsigned int sysctl_ping_group_range[2];
63783
63784 - atomic_t rt_genid;
63785 - atomic_t dev_addr_genid;
63786 + atomic_unchecked_t rt_genid;
63787 + atomic_unchecked_t dev_addr_genid;
63788
63789 #ifdef CONFIG_IP_MROUTE
63790 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
63791 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
63792 index 6a72a58..e6a127d 100644
63793 --- a/include/net/sctp/sctp.h
63794 +++ b/include/net/sctp/sctp.h
63795 @@ -318,9 +318,9 @@ do { \
63796
63797 #else /* SCTP_DEBUG */
63798
63799 -#define SCTP_DEBUG_PRINTK(whatever...)
63800 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
63801 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
63802 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
63803 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
63804 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
63805 #define SCTP_ENABLE_DEBUG
63806 #define SCTP_DISABLE_DEBUG
63807 #define SCTP_ASSERT(expr, str, func)
63808 diff --git a/include/net/sock.h b/include/net/sock.h
63809 index 8e4062f..77b041e 100644
63810 --- a/include/net/sock.h
63811 +++ b/include/net/sock.h
63812 @@ -278,7 +278,7 @@ struct sock {
63813 #ifdef CONFIG_RPS
63814 __u32 sk_rxhash;
63815 #endif
63816 - atomic_t sk_drops;
63817 + atomic_unchecked_t sk_drops;
63818 int sk_rcvbuf;
63819
63820 struct sk_filter __rcu *sk_filter;
63821 @@ -1391,7 +1391,7 @@ static inline void sk_nocaps_add(struct sock *sk, int flags)
63822 }
63823
63824 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
63825 - char __user *from, char *to,
63826 + char __user *from, unsigned char *to,
63827 int copy, int offset)
63828 {
63829 if (skb->ip_summed == CHECKSUM_NONE) {
63830 diff --git a/include/net/tcp.h b/include/net/tcp.h
63831 index acc620a..f4d99c6 100644
63832 --- a/include/net/tcp.h
63833 +++ b/include/net/tcp.h
63834 @@ -1401,8 +1401,8 @@ enum tcp_seq_states {
63835 struct tcp_seq_afinfo {
63836 char *name;
63837 sa_family_t family;
63838 - struct file_operations seq_fops;
63839 - struct seq_operations seq_ops;
63840 + file_operations_no_const seq_fops;
63841 + seq_operations_no_const seq_ops;
63842 };
63843
63844 struct tcp_iter_state {
63845 diff --git a/include/net/udp.h b/include/net/udp.h
63846 index 67ea6fc..e42aee8 100644
63847 --- a/include/net/udp.h
63848 +++ b/include/net/udp.h
63849 @@ -234,8 +234,8 @@ struct udp_seq_afinfo {
63850 char *name;
63851 sa_family_t family;
63852 struct udp_table *udp_table;
63853 - struct file_operations seq_fops;
63854 - struct seq_operations seq_ops;
63855 + file_operations_no_const seq_fops;
63856 + seq_operations_no_const seq_ops;
63857 };
63858
63859 struct udp_iter_state {
63860 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
63861 index b203e14..1df3991 100644
63862 --- a/include/net/xfrm.h
63863 +++ b/include/net/xfrm.h
63864 @@ -505,7 +505,7 @@ struct xfrm_policy {
63865 struct timer_list timer;
63866
63867 struct flow_cache_object flo;
63868 - atomic_t genid;
63869 + atomic_unchecked_t genid;
63870 u32 priority;
63871 u32 index;
63872 struct xfrm_mark mark;
63873 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
63874 index 2d0191c..a55797d 100644
63875 --- a/include/rdma/iw_cm.h
63876 +++ b/include/rdma/iw_cm.h
63877 @@ -120,7 +120,7 @@ struct iw_cm_verbs {
63878 int backlog);
63879
63880 int (*destroy_listen)(struct iw_cm_id *cm_id);
63881 -};
63882 +} __no_const;
63883
63884 /**
63885 * iw_create_cm_id - Create an IW CM identifier.
63886 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
63887 index 7d96829..4ba78d3 100644
63888 --- a/include/scsi/libfc.h
63889 +++ b/include/scsi/libfc.h
63890 @@ -758,6 +758,7 @@ struct libfc_function_template {
63891 */
63892 void (*disc_stop_final) (struct fc_lport *);
63893 };
63894 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
63895
63896 /**
63897 * struct fc_disc - Discovery context
63898 @@ -861,7 +862,7 @@ struct fc_lport {
63899 struct fc_vport *vport;
63900
63901 /* Operational Information */
63902 - struct libfc_function_template tt;
63903 + libfc_function_template_no_const tt;
63904 u8 link_up;
63905 u8 qfull;
63906 enum fc_lport_state state;
63907 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
63908 index d371c3c..e228a8c 100644
63909 --- a/include/scsi/scsi_device.h
63910 +++ b/include/scsi/scsi_device.h
63911 @@ -161,9 +161,9 @@ struct scsi_device {
63912 unsigned int max_device_blocked; /* what device_blocked counts down from */
63913 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
63914
63915 - atomic_t iorequest_cnt;
63916 - atomic_t iodone_cnt;
63917 - atomic_t ioerr_cnt;
63918 + atomic_unchecked_t iorequest_cnt;
63919 + atomic_unchecked_t iodone_cnt;
63920 + atomic_unchecked_t ioerr_cnt;
63921
63922 struct device sdev_gendev,
63923 sdev_dev;
63924 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
63925 index 2a65167..91e01f8 100644
63926 --- a/include/scsi/scsi_transport_fc.h
63927 +++ b/include/scsi/scsi_transport_fc.h
63928 @@ -711,7 +711,7 @@ struct fc_function_template {
63929 unsigned long show_host_system_hostname:1;
63930
63931 unsigned long disable_target_scan:1;
63932 -};
63933 +} __do_const;
63934
63935
63936 /**
63937 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
63938 index 030b87c..98a6954 100644
63939 --- a/include/sound/ak4xxx-adda.h
63940 +++ b/include/sound/ak4xxx-adda.h
63941 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
63942 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
63943 unsigned char val);
63944 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
63945 -};
63946 +} __no_const;
63947
63948 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
63949
63950 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
63951 index 8c05e47..2b5df97 100644
63952 --- a/include/sound/hwdep.h
63953 +++ b/include/sound/hwdep.h
63954 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
63955 struct snd_hwdep_dsp_status *status);
63956 int (*dsp_load)(struct snd_hwdep *hw,
63957 struct snd_hwdep_dsp_image *image);
63958 -};
63959 +} __no_const;
63960
63961 struct snd_hwdep {
63962 struct snd_card *card;
63963 diff --git a/include/sound/info.h b/include/sound/info.h
63964 index 4e94cf1..76748b1 100644
63965 --- a/include/sound/info.h
63966 +++ b/include/sound/info.h
63967 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
63968 struct snd_info_buffer *buffer);
63969 void (*write)(struct snd_info_entry *entry,
63970 struct snd_info_buffer *buffer);
63971 -};
63972 +} __no_const;
63973
63974 struct snd_info_entry_ops {
63975 int (*open)(struct snd_info_entry *entry,
63976 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
63977 index 57e71fa..a2c7534 100644
63978 --- a/include/sound/pcm.h
63979 +++ b/include/sound/pcm.h
63980 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
63981 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
63982 int (*ack)(struct snd_pcm_substream *substream);
63983 };
63984 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
63985
63986 /*
63987 *
63988 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
63989 index af1b49e..a5d55a5 100644
63990 --- a/include/sound/sb16_csp.h
63991 +++ b/include/sound/sb16_csp.h
63992 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
63993 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
63994 int (*csp_stop) (struct snd_sb_csp * p);
63995 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
63996 -};
63997 +} __no_const;
63998
63999 /*
64000 * CSP private data
64001 diff --git a/include/sound/soc.h b/include/sound/soc.h
64002 index aa19f5a..a5b8208 100644
64003 --- a/include/sound/soc.h
64004 +++ b/include/sound/soc.h
64005 @@ -676,7 +676,7 @@ struct snd_soc_platform_driver {
64006 /* platform IO - used for platform DAPM */
64007 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
64008 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
64009 -};
64010 +} __do_const;
64011
64012 struct snd_soc_platform {
64013 const char *name;
64014 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
64015 index 444cd6b..3327cc5 100644
64016 --- a/include/sound/ymfpci.h
64017 +++ b/include/sound/ymfpci.h
64018 @@ -358,7 +358,7 @@ struct snd_ymfpci {
64019 spinlock_t reg_lock;
64020 spinlock_t voice_lock;
64021 wait_queue_head_t interrupt_sleep;
64022 - atomic_t interrupt_sleep_count;
64023 + atomic_unchecked_t interrupt_sleep_count;
64024 struct snd_info_entry *proc_entry;
64025 const struct firmware *dsp_microcode;
64026 const struct firmware *controller_microcode;
64027 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
64028 index 2704065..e10f3ef 100644
64029 --- a/include/target/target_core_base.h
64030 +++ b/include/target/target_core_base.h
64031 @@ -356,7 +356,7 @@ struct t10_reservation_ops {
64032 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
64033 int (*t10_pr_register)(struct se_cmd *);
64034 int (*t10_pr_clear)(struct se_cmd *);
64035 -};
64036 +} __no_const;
64037
64038 struct t10_reservation {
64039 /* Reservation effects all target ports */
64040 @@ -496,8 +496,8 @@ struct se_cmd {
64041 atomic_t t_task_cdbs_left;
64042 atomic_t t_task_cdbs_ex_left;
64043 atomic_t t_task_cdbs_timeout_left;
64044 - atomic_t t_task_cdbs_sent;
64045 - atomic_t t_transport_aborted;
64046 + atomic_unchecked_t t_task_cdbs_sent;
64047 + atomic_unchecked_t t_transport_aborted;
64048 atomic_t t_transport_active;
64049 atomic_t t_transport_complete;
64050 atomic_t t_transport_queue_active;
64051 @@ -744,7 +744,7 @@ struct se_device {
64052 atomic_t active_cmds;
64053 atomic_t simple_cmds;
64054 atomic_t depth_left;
64055 - atomic_t dev_ordered_id;
64056 + atomic_unchecked_t dev_ordered_id;
64057 atomic_t dev_tur_active;
64058 atomic_t execute_tasks;
64059 atomic_t dev_status_thr_count;
64060 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
64061 index 1c09820..7f5ec79 100644
64062 --- a/include/trace/events/irq.h
64063 +++ b/include/trace/events/irq.h
64064 @@ -36,7 +36,7 @@ struct softirq_action;
64065 */
64066 TRACE_EVENT(irq_handler_entry,
64067
64068 - TP_PROTO(int irq, struct irqaction *action),
64069 + TP_PROTO(int irq, const struct irqaction *action),
64070
64071 TP_ARGS(irq, action),
64072
64073 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
64074 */
64075 TRACE_EVENT(irq_handler_exit,
64076
64077 - TP_PROTO(int irq, struct irqaction *action, int ret),
64078 + TP_PROTO(int irq, const struct irqaction *action, int ret),
64079
64080 TP_ARGS(irq, action, ret),
64081
64082 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
64083 index 69d485a..dd0bee7 100644
64084 --- a/include/video/udlfb.h
64085 +++ b/include/video/udlfb.h
64086 @@ -51,10 +51,10 @@ struct dlfb_data {
64087 int base8;
64088 u32 pseudo_palette[256];
64089 /* blit-only rendering path metrics, exposed through sysfs */
64090 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64091 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
64092 - atomic_t bytes_sent; /* to usb, after compression including overhead */
64093 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
64094 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64095 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
64096 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
64097 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
64098 };
64099
64100 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
64101 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
64102 index 0993a22..32ba2fe 100644
64103 --- a/include/video/uvesafb.h
64104 +++ b/include/video/uvesafb.h
64105 @@ -177,6 +177,7 @@ struct uvesafb_par {
64106 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
64107 u8 pmi_setpal; /* PMI for palette changes */
64108 u16 *pmi_base; /* protected mode interface location */
64109 + u8 *pmi_code; /* protected mode code location */
64110 void *pmi_start;
64111 void *pmi_pal;
64112 u8 *vbe_state_orig; /*
64113 diff --git a/init/Kconfig b/init/Kconfig
64114 index d627783..693a9f3 100644
64115 --- a/init/Kconfig
64116 +++ b/init/Kconfig
64117 @@ -1202,7 +1202,7 @@ config SLUB_DEBUG
64118
64119 config COMPAT_BRK
64120 bool "Disable heap randomization"
64121 - default y
64122 + default n
64123 help
64124 Randomizing heap placement makes heap exploits harder, but it
64125 also breaks ancient binaries (including anything libc5 based).
64126 diff --git a/init/do_mounts.c b/init/do_mounts.c
64127 index c0851a8..4f8977d 100644
64128 --- a/init/do_mounts.c
64129 +++ b/init/do_mounts.c
64130 @@ -287,11 +287,11 @@ static void __init get_fs_names(char *page)
64131
64132 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
64133 {
64134 - int err = sys_mount(name, "/root", fs, flags, data);
64135 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
64136 if (err)
64137 return err;
64138
64139 - sys_chdir((const char __user __force *)"/root");
64140 + sys_chdir((const char __force_user*)"/root");
64141 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
64142 printk(KERN_INFO
64143 "VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
64144 @@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...)
64145 va_start(args, fmt);
64146 vsprintf(buf, fmt, args);
64147 va_end(args);
64148 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
64149 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
64150 if (fd >= 0) {
64151 sys_ioctl(fd, FDEJECT, 0);
64152 sys_close(fd);
64153 }
64154 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
64155 - fd = sys_open("/dev/console", O_RDWR, 0);
64156 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
64157 if (fd >= 0) {
64158 sys_ioctl(fd, TCGETS, (long)&termios);
64159 termios.c_lflag &= ~ICANON;
64160 sys_ioctl(fd, TCSETSF, (long)&termios);
64161 - sys_read(fd, &c, 1);
64162 + sys_read(fd, (char __user *)&c, 1);
64163 termios.c_lflag |= ICANON;
64164 sys_ioctl(fd, TCSETSF, (long)&termios);
64165 sys_close(fd);
64166 @@ -488,6 +488,6 @@ void __init prepare_namespace(void)
64167 mount_root();
64168 out:
64169 devtmpfs_mount("dev");
64170 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
64171 - sys_chroot((const char __user __force *)".");
64172 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64173 + sys_chroot((const char __force_user *)".");
64174 }
64175 diff --git a/init/do_mounts.h b/init/do_mounts.h
64176 index f5b978a..69dbfe8 100644
64177 --- a/init/do_mounts.h
64178 +++ b/init/do_mounts.h
64179 @@ -15,15 +15,15 @@ extern int root_mountflags;
64180
64181 static inline int create_dev(char *name, dev_t dev)
64182 {
64183 - sys_unlink(name);
64184 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
64185 + sys_unlink((char __force_user *)name);
64186 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
64187 }
64188
64189 #if BITS_PER_LONG == 32
64190 static inline u32 bstat(char *name)
64191 {
64192 struct stat64 stat;
64193 - if (sys_stat64(name, &stat) != 0)
64194 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
64195 return 0;
64196 if (!S_ISBLK(stat.st_mode))
64197 return 0;
64198 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
64199 static inline u32 bstat(char *name)
64200 {
64201 struct stat stat;
64202 - if (sys_newstat(name, &stat) != 0)
64203 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
64204 return 0;
64205 if (!S_ISBLK(stat.st_mode))
64206 return 0;
64207 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
64208 index 3098a38..253064e 100644
64209 --- a/init/do_mounts_initrd.c
64210 +++ b/init/do_mounts_initrd.c
64211 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
64212 create_dev("/dev/root.old", Root_RAM0);
64213 /* mount initrd on rootfs' /root */
64214 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
64215 - sys_mkdir("/old", 0700);
64216 - root_fd = sys_open("/", 0, 0);
64217 - old_fd = sys_open("/old", 0, 0);
64218 + sys_mkdir((const char __force_user *)"/old", 0700);
64219 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
64220 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
64221 /* move initrd over / and chdir/chroot in initrd root */
64222 - sys_chdir("/root");
64223 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
64224 - sys_chroot(".");
64225 + sys_chdir((const char __force_user *)"/root");
64226 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64227 + sys_chroot((const char __force_user *)".");
64228
64229 /*
64230 * In case that a resume from disk is carried out by linuxrc or one of
64231 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
64232
64233 /* move initrd to rootfs' /old */
64234 sys_fchdir(old_fd);
64235 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
64236 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
64237 /* switch root and cwd back to / of rootfs */
64238 sys_fchdir(root_fd);
64239 - sys_chroot(".");
64240 + sys_chroot((const char __force_user *)".");
64241 sys_close(old_fd);
64242 sys_close(root_fd);
64243
64244 if (new_decode_dev(real_root_dev) == Root_RAM0) {
64245 - sys_chdir("/old");
64246 + sys_chdir((const char __force_user *)"/old");
64247 return;
64248 }
64249
64250 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
64251 mount_root();
64252
64253 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
64254 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
64255 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
64256 if (!error)
64257 printk("okay\n");
64258 else {
64259 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
64260 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
64261 if (error == -ENOENT)
64262 printk("/initrd does not exist. Ignored.\n");
64263 else
64264 printk("failed\n");
64265 printk(KERN_NOTICE "Unmounting old root\n");
64266 - sys_umount("/old", MNT_DETACH);
64267 + sys_umount((char __force_user *)"/old", MNT_DETACH);
64268 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
64269 if (fd < 0) {
64270 error = fd;
64271 @@ -116,11 +116,11 @@ int __init initrd_load(void)
64272 * mounted in the normal path.
64273 */
64274 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
64275 - sys_unlink("/initrd.image");
64276 + sys_unlink((const char __force_user *)"/initrd.image");
64277 handle_initrd();
64278 return 1;
64279 }
64280 }
64281 - sys_unlink("/initrd.image");
64282 + sys_unlink((const char __force_user *)"/initrd.image");
64283 return 0;
64284 }
64285 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
64286 index 32c4799..c27ee74 100644
64287 --- a/init/do_mounts_md.c
64288 +++ b/init/do_mounts_md.c
64289 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
64290 partitioned ? "_d" : "", minor,
64291 md_setup_args[ent].device_names);
64292
64293 - fd = sys_open(name, 0, 0);
64294 + fd = sys_open((char __force_user *)name, 0, 0);
64295 if (fd < 0) {
64296 printk(KERN_ERR "md: open failed - cannot start "
64297 "array %s\n", name);
64298 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
64299 * array without it
64300 */
64301 sys_close(fd);
64302 - fd = sys_open(name, 0, 0);
64303 + fd = sys_open((char __force_user *)name, 0, 0);
64304 sys_ioctl(fd, BLKRRPART, 0);
64305 }
64306 sys_close(fd);
64307 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
64308
64309 wait_for_device_probe();
64310
64311 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
64312 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
64313 if (fd >= 0) {
64314 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
64315 sys_close(fd);
64316 diff --git a/init/initramfs.c b/init/initramfs.c
64317 index 2531811..040d4d4 100644
64318 --- a/init/initramfs.c
64319 +++ b/init/initramfs.c
64320 @@ -74,7 +74,7 @@ static void __init free_hash(void)
64321 }
64322 }
64323
64324 -static long __init do_utime(char __user *filename, time_t mtime)
64325 +static long __init do_utime(__force char __user *filename, time_t mtime)
64326 {
64327 struct timespec t[2];
64328
64329 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
64330 struct dir_entry *de, *tmp;
64331 list_for_each_entry_safe(de, tmp, &dir_list, list) {
64332 list_del(&de->list);
64333 - do_utime(de->name, de->mtime);
64334 + do_utime((char __force_user *)de->name, de->mtime);
64335 kfree(de->name);
64336 kfree(de);
64337 }
64338 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
64339 if (nlink >= 2) {
64340 char *old = find_link(major, minor, ino, mode, collected);
64341 if (old)
64342 - return (sys_link(old, collected) < 0) ? -1 : 1;
64343 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
64344 }
64345 return 0;
64346 }
64347 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
64348 {
64349 struct stat st;
64350
64351 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
64352 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
64353 if (S_ISDIR(st.st_mode))
64354 - sys_rmdir(path);
64355 + sys_rmdir((char __force_user *)path);
64356 else
64357 - sys_unlink(path);
64358 + sys_unlink((char __force_user *)path);
64359 }
64360 }
64361
64362 @@ -305,7 +305,7 @@ static int __init do_name(void)
64363 int openflags = O_WRONLY|O_CREAT;
64364 if (ml != 1)
64365 openflags |= O_TRUNC;
64366 - wfd = sys_open(collected, openflags, mode);
64367 + wfd = sys_open((char __force_user *)collected, openflags, mode);
64368
64369 if (wfd >= 0) {
64370 sys_fchown(wfd, uid, gid);
64371 @@ -317,17 +317,17 @@ static int __init do_name(void)
64372 }
64373 }
64374 } else if (S_ISDIR(mode)) {
64375 - sys_mkdir(collected, mode);
64376 - sys_chown(collected, uid, gid);
64377 - sys_chmod(collected, mode);
64378 + sys_mkdir((char __force_user *)collected, mode);
64379 + sys_chown((char __force_user *)collected, uid, gid);
64380 + sys_chmod((char __force_user *)collected, mode);
64381 dir_add(collected, mtime);
64382 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
64383 S_ISFIFO(mode) || S_ISSOCK(mode)) {
64384 if (maybe_link() == 0) {
64385 - sys_mknod(collected, mode, rdev);
64386 - sys_chown(collected, uid, gid);
64387 - sys_chmod(collected, mode);
64388 - do_utime(collected, mtime);
64389 + sys_mknod((char __force_user *)collected, mode, rdev);
64390 + sys_chown((char __force_user *)collected, uid, gid);
64391 + sys_chmod((char __force_user *)collected, mode);
64392 + do_utime((char __force_user *)collected, mtime);
64393 }
64394 }
64395 return 0;
64396 @@ -336,15 +336,15 @@ static int __init do_name(void)
64397 static int __init do_copy(void)
64398 {
64399 if (count >= body_len) {
64400 - sys_write(wfd, victim, body_len);
64401 + sys_write(wfd, (char __force_user *)victim, body_len);
64402 sys_close(wfd);
64403 - do_utime(vcollected, mtime);
64404 + do_utime((char __force_user *)vcollected, mtime);
64405 kfree(vcollected);
64406 eat(body_len);
64407 state = SkipIt;
64408 return 0;
64409 } else {
64410 - sys_write(wfd, victim, count);
64411 + sys_write(wfd, (char __force_user *)victim, count);
64412 body_len -= count;
64413 eat(count);
64414 return 1;
64415 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
64416 {
64417 collected[N_ALIGN(name_len) + body_len] = '\0';
64418 clean_path(collected, 0);
64419 - sys_symlink(collected + N_ALIGN(name_len), collected);
64420 - sys_lchown(collected, uid, gid);
64421 - do_utime(collected, mtime);
64422 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
64423 + sys_lchown((char __force_user *)collected, uid, gid);
64424 + do_utime((char __force_user *)collected, mtime);
64425 state = SkipIt;
64426 next_state = Reset;
64427 return 0;
64428 diff --git a/init/main.c b/init/main.c
64429 index 03b408d..5777f59 100644
64430 --- a/init/main.c
64431 +++ b/init/main.c
64432 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
64433 extern void tc_init(void);
64434 #endif
64435
64436 +extern void grsecurity_init(void);
64437 +
64438 /*
64439 * Debug helper: via this flag we know that we are in 'early bootup code'
64440 * where only the boot processor is running with IRQ disabled. This means
64441 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
64442
64443 __setup("reset_devices", set_reset_devices);
64444
64445 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
64446 +extern char pax_enter_kernel_user[];
64447 +extern char pax_exit_kernel_user[];
64448 +extern pgdval_t clone_pgd_mask;
64449 +#endif
64450 +
64451 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
64452 +static int __init setup_pax_nouderef(char *str)
64453 +{
64454 +#ifdef CONFIG_X86_32
64455 + unsigned int cpu;
64456 + struct desc_struct *gdt;
64457 +
64458 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
64459 + gdt = get_cpu_gdt_table(cpu);
64460 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
64461 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
64462 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
64463 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
64464 + }
64465 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
64466 +#else
64467 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
64468 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
64469 + clone_pgd_mask = ~(pgdval_t)0UL;
64470 +#endif
64471 +
64472 + return 0;
64473 +}
64474 +early_param("pax_nouderef", setup_pax_nouderef);
64475 +#endif
64476 +
64477 +#ifdef CONFIG_PAX_SOFTMODE
64478 +int pax_softmode;
64479 +
64480 +static int __init setup_pax_softmode(char *str)
64481 +{
64482 + get_option(&str, &pax_softmode);
64483 + return 1;
64484 +}
64485 +__setup("pax_softmode=", setup_pax_softmode);
64486 +#endif
64487 +
64488 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
64489 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
64490 static const char *panic_later, *panic_param;
64491 @@ -678,6 +723,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
64492 {
64493 int count = preempt_count();
64494 int ret;
64495 + const char *msg1 = "", *msg2 = "";
64496
64497 if (initcall_debug)
64498 ret = do_one_initcall_debug(fn);
64499 @@ -690,15 +736,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
64500 sprintf(msgbuf, "error code %d ", ret);
64501
64502 if (preempt_count() != count) {
64503 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
64504 + msg1 = " preemption imbalance";
64505 preempt_count() = count;
64506 }
64507 if (irqs_disabled()) {
64508 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
64509 + msg2 = " disabled interrupts";
64510 local_irq_enable();
64511 }
64512 - if (msgbuf[0]) {
64513 - printk("initcall %pF returned with %s\n", fn, msgbuf);
64514 + if (msgbuf[0] || *msg1 || *msg2) {
64515 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
64516 }
64517
64518 return ret;
64519 @@ -817,7 +863,7 @@ static int __init kernel_init(void * unused)
64520 do_basic_setup();
64521
64522 /* Open the /dev/console on the rootfs, this should never fail */
64523 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
64524 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
64525 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
64526
64527 (void) sys_dup(0);
64528 @@ -830,11 +876,13 @@ static int __init kernel_init(void * unused)
64529 if (!ramdisk_execute_command)
64530 ramdisk_execute_command = "/init";
64531
64532 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
64533 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
64534 ramdisk_execute_command = NULL;
64535 prepare_namespace();
64536 }
64537
64538 + grsecurity_init();
64539 +
64540 /*
64541 * Ok, we have completed the initial bootup, and
64542 * we're essentially up and running. Get rid of the
64543 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
64544 index ed049ea..6442f7f 100644
64545 --- a/ipc/mqueue.c
64546 +++ b/ipc/mqueue.c
64547 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
64548 mq_bytes = (mq_msg_tblsz +
64549 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
64550
64551 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
64552 spin_lock(&mq_lock);
64553 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
64554 u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
64555 diff --git a/ipc/msg.c b/ipc/msg.c
64556 index 7385de2..a8180e0 100644
64557 --- a/ipc/msg.c
64558 +++ b/ipc/msg.c
64559 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
64560 return security_msg_queue_associate(msq, msgflg);
64561 }
64562
64563 +static struct ipc_ops msg_ops = {
64564 + .getnew = newque,
64565 + .associate = msg_security,
64566 + .more_checks = NULL
64567 +};
64568 +
64569 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
64570 {
64571 struct ipc_namespace *ns;
64572 - struct ipc_ops msg_ops;
64573 struct ipc_params msg_params;
64574
64575 ns = current->nsproxy->ipc_ns;
64576
64577 - msg_ops.getnew = newque;
64578 - msg_ops.associate = msg_security;
64579 - msg_ops.more_checks = NULL;
64580 -
64581 msg_params.key = key;
64582 msg_params.flg = msgflg;
64583
64584 diff --git a/ipc/sem.c b/ipc/sem.c
64585 index c8e00f8..1135c4e 100644
64586 --- a/ipc/sem.c
64587 +++ b/ipc/sem.c
64588 @@ -318,10 +318,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
64589 return 0;
64590 }
64591
64592 +static struct ipc_ops sem_ops = {
64593 + .getnew = newary,
64594 + .associate = sem_security,
64595 + .more_checks = sem_more_checks
64596 +};
64597 +
64598 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64599 {
64600 struct ipc_namespace *ns;
64601 - struct ipc_ops sem_ops;
64602 struct ipc_params sem_params;
64603
64604 ns = current->nsproxy->ipc_ns;
64605 @@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64606 if (nsems < 0 || nsems > ns->sc_semmsl)
64607 return -EINVAL;
64608
64609 - sem_ops.getnew = newary;
64610 - sem_ops.associate = sem_security;
64611 - sem_ops.more_checks = sem_more_checks;
64612 -
64613 sem_params.key = key;
64614 sem_params.flg = semflg;
64615 sem_params.u.nsems = nsems;
64616 @@ -848,6 +849,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
64617 int nsems;
64618 struct list_head tasks;
64619
64620 + pax_track_stack();
64621 +
64622 sma = sem_lock_check(ns, semid);
64623 if (IS_ERR(sma))
64624 return PTR_ERR(sma);
64625 @@ -1295,6 +1298,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
64626 struct ipc_namespace *ns;
64627 struct list_head tasks;
64628
64629 + pax_track_stack();
64630 +
64631 ns = current->nsproxy->ipc_ns;
64632
64633 if (nsops < 1 || semid < 0)
64634 diff --git a/ipc/shm.c b/ipc/shm.c
64635 index 02ecf2c..c8f5627 100644
64636 --- a/ipc/shm.c
64637 +++ b/ipc/shm.c
64638 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
64639 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
64640 #endif
64641
64642 +#ifdef CONFIG_GRKERNSEC
64643 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64644 + const time_t shm_createtime, const uid_t cuid,
64645 + const int shmid);
64646 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64647 + const time_t shm_createtime);
64648 +#endif
64649 +
64650 void shm_init_ns(struct ipc_namespace *ns)
64651 {
64652 ns->shm_ctlmax = SHMMAX;
64653 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
64654 shp->shm_lprid = 0;
64655 shp->shm_atim = shp->shm_dtim = 0;
64656 shp->shm_ctim = get_seconds();
64657 +#ifdef CONFIG_GRKERNSEC
64658 + {
64659 + struct timespec timeval;
64660 + do_posix_clock_monotonic_gettime(&timeval);
64661 +
64662 + shp->shm_createtime = timeval.tv_sec;
64663 + }
64664 +#endif
64665 shp->shm_segsz = size;
64666 shp->shm_nattch = 0;
64667 shp->shm_file = file;
64668 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
64669 return 0;
64670 }
64671
64672 +static struct ipc_ops shm_ops = {
64673 + .getnew = newseg,
64674 + .associate = shm_security,
64675 + .more_checks = shm_more_checks
64676 +};
64677 +
64678 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
64679 {
64680 struct ipc_namespace *ns;
64681 - struct ipc_ops shm_ops;
64682 struct ipc_params shm_params;
64683
64684 ns = current->nsproxy->ipc_ns;
64685
64686 - shm_ops.getnew = newseg;
64687 - shm_ops.associate = shm_security;
64688 - shm_ops.more_checks = shm_more_checks;
64689 -
64690 shm_params.key = key;
64691 shm_params.flg = shmflg;
64692 shm_params.u.size = size;
64693 @@ -870,8 +887,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
64694 case SHM_LOCK:
64695 case SHM_UNLOCK:
64696 {
64697 - struct file *uninitialized_var(shm_file);
64698 -
64699 lru_add_drain_all(); /* drain pagevecs to lru lists */
64700
64701 shp = shm_lock_check(ns, shmid);
64702 @@ -1004,9 +1019,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64703 if (err)
64704 goto out_unlock;
64705
64706 +#ifdef CONFIG_GRKERNSEC
64707 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
64708 + shp->shm_perm.cuid, shmid) ||
64709 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
64710 + err = -EACCES;
64711 + goto out_unlock;
64712 + }
64713 +#endif
64714 +
64715 path = shp->shm_file->f_path;
64716 path_get(&path);
64717 shp->shm_nattch++;
64718 +#ifdef CONFIG_GRKERNSEC
64719 + shp->shm_lapid = current->pid;
64720 +#endif
64721 size = i_size_read(path.dentry->d_inode);
64722 shm_unlock(shp);
64723
64724 diff --git a/kernel/acct.c b/kernel/acct.c
64725 index fa7eb3d..7faf116 100644
64726 --- a/kernel/acct.c
64727 +++ b/kernel/acct.c
64728 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
64729 */
64730 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
64731 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
64732 - file->f_op->write(file, (char *)&ac,
64733 + file->f_op->write(file, (char __force_user *)&ac,
64734 sizeof(acct_t), &file->f_pos);
64735 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
64736 set_fs(fs);
64737 diff --git a/kernel/audit.c b/kernel/audit.c
64738 index 0a1355c..dca420f 100644
64739 --- a/kernel/audit.c
64740 +++ b/kernel/audit.c
64741 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
64742 3) suppressed due to audit_rate_limit
64743 4) suppressed due to audit_backlog_limit
64744 */
64745 -static atomic_t audit_lost = ATOMIC_INIT(0);
64746 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
64747
64748 /* The netlink socket. */
64749 static struct sock *audit_sock;
64750 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
64751 unsigned long now;
64752 int print;
64753
64754 - atomic_inc(&audit_lost);
64755 + atomic_inc_unchecked(&audit_lost);
64756
64757 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
64758
64759 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
64760 printk(KERN_WARNING
64761 "audit: audit_lost=%d audit_rate_limit=%d "
64762 "audit_backlog_limit=%d\n",
64763 - atomic_read(&audit_lost),
64764 + atomic_read_unchecked(&audit_lost),
64765 audit_rate_limit,
64766 audit_backlog_limit);
64767 audit_panic(message);
64768 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
64769 status_set.pid = audit_pid;
64770 status_set.rate_limit = audit_rate_limit;
64771 status_set.backlog_limit = audit_backlog_limit;
64772 - status_set.lost = atomic_read(&audit_lost);
64773 + status_set.lost = atomic_read_unchecked(&audit_lost);
64774 status_set.backlog = skb_queue_len(&audit_skb_queue);
64775 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
64776 &status_set, sizeof(status_set));
64777 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
64778 index ce4b054..8139ed7 100644
64779 --- a/kernel/auditsc.c
64780 +++ b/kernel/auditsc.c
64781 @@ -2118,7 +2118,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
64782 }
64783
64784 /* global counter which is incremented every time something logs in */
64785 -static atomic_t session_id = ATOMIC_INIT(0);
64786 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
64787
64788 /**
64789 * audit_set_loginuid - set a task's audit_context loginuid
64790 @@ -2131,7 +2131,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
64791 */
64792 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
64793 {
64794 - unsigned int sessionid = atomic_inc_return(&session_id);
64795 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
64796 struct audit_context *context = task->audit_context;
64797
64798 if (context && context->in_syscall) {
64799 diff --git a/kernel/capability.c b/kernel/capability.c
64800 index 283c529..36ac81e 100644
64801 --- a/kernel/capability.c
64802 +++ b/kernel/capability.c
64803 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
64804 * before modification is attempted and the application
64805 * fails.
64806 */
64807 + if (tocopy > ARRAY_SIZE(kdata))
64808 + return -EFAULT;
64809 +
64810 if (copy_to_user(dataptr, kdata, tocopy
64811 * sizeof(struct __user_cap_data_struct))) {
64812 return -EFAULT;
64813 @@ -374,7 +377,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
64814 BUG();
64815 }
64816
64817 - if (security_capable(ns, current_cred(), cap) == 0) {
64818 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
64819 current->flags |= PF_SUPERPRIV;
64820 return true;
64821 }
64822 @@ -382,6 +385,27 @@ bool ns_capable(struct user_namespace *ns, int cap)
64823 }
64824 EXPORT_SYMBOL(ns_capable);
64825
64826 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
64827 +{
64828 + if (unlikely(!cap_valid(cap))) {
64829 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
64830 + BUG();
64831 + }
64832 +
64833 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
64834 + current->flags |= PF_SUPERPRIV;
64835 + return true;
64836 + }
64837 + return false;
64838 +}
64839 +EXPORT_SYMBOL(ns_capable_nolog);
64840 +
64841 +bool capable_nolog(int cap)
64842 +{
64843 + return ns_capable_nolog(&init_user_ns, cap);
64844 +}
64845 +EXPORT_SYMBOL(capable_nolog);
64846 +
64847 /**
64848 * task_ns_capable - Determine whether current task has a superior
64849 * capability targeted at a specific task's user namespace.
64850 @@ -396,6 +420,12 @@ bool task_ns_capable(struct task_struct *t, int cap)
64851 }
64852 EXPORT_SYMBOL(task_ns_capable);
64853
64854 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
64855 +{
64856 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
64857 +}
64858 +EXPORT_SYMBOL(task_ns_capable_nolog);
64859 +
64860 /**
64861 * nsown_capable - Check superior capability to one's own user_ns
64862 * @cap: The capability in question
64863 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
64864 index 1d2b6ce..87bf267 100644
64865 --- a/kernel/cgroup.c
64866 +++ b/kernel/cgroup.c
64867 @@ -595,6 +595,8 @@ static struct css_set *find_css_set(
64868 struct hlist_head *hhead;
64869 struct cg_cgroup_link *link;
64870
64871 + pax_track_stack();
64872 +
64873 /* First see if we already have a cgroup group that matches
64874 * the desired set */
64875 read_lock(&css_set_lock);
64876 diff --git a/kernel/compat.c b/kernel/compat.c
64877 index e2435ee..8e82199 100644
64878 --- a/kernel/compat.c
64879 +++ b/kernel/compat.c
64880 @@ -13,6 +13,7 @@
64881
64882 #include <linux/linkage.h>
64883 #include <linux/compat.h>
64884 +#include <linux/module.h>
64885 #include <linux/errno.h>
64886 #include <linux/time.h>
64887 #include <linux/signal.h>
64888 @@ -167,7 +168,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
64889 mm_segment_t oldfs;
64890 long ret;
64891
64892 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
64893 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
64894 oldfs = get_fs();
64895 set_fs(KERNEL_DS);
64896 ret = hrtimer_nanosleep_restart(restart);
64897 @@ -199,7 +200,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
64898 oldfs = get_fs();
64899 set_fs(KERNEL_DS);
64900 ret = hrtimer_nanosleep(&tu,
64901 - rmtp ? (struct timespec __user *)&rmt : NULL,
64902 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
64903 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
64904 set_fs(oldfs);
64905
64906 @@ -308,7 +309,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
64907 mm_segment_t old_fs = get_fs();
64908
64909 set_fs(KERNEL_DS);
64910 - ret = sys_sigpending((old_sigset_t __user *) &s);
64911 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
64912 set_fs(old_fs);
64913 if (ret == 0)
64914 ret = put_user(s, set);
64915 @@ -331,8 +332,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
64916 old_fs = get_fs();
64917 set_fs(KERNEL_DS);
64918 ret = sys_sigprocmask(how,
64919 - set ? (old_sigset_t __user *) &s : NULL,
64920 - oset ? (old_sigset_t __user *) &s : NULL);
64921 + set ? (old_sigset_t __force_user *) &s : NULL,
64922 + oset ? (old_sigset_t __force_user *) &s : NULL);
64923 set_fs(old_fs);
64924 if (ret == 0)
64925 if (oset)
64926 @@ -369,7 +370,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
64927 mm_segment_t old_fs = get_fs();
64928
64929 set_fs(KERNEL_DS);
64930 - ret = sys_old_getrlimit(resource, &r);
64931 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
64932 set_fs(old_fs);
64933
64934 if (!ret) {
64935 @@ -441,7 +442,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
64936 mm_segment_t old_fs = get_fs();
64937
64938 set_fs(KERNEL_DS);
64939 - ret = sys_getrusage(who, (struct rusage __user *) &r);
64940 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
64941 set_fs(old_fs);
64942
64943 if (ret)
64944 @@ -468,8 +469,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
64945 set_fs (KERNEL_DS);
64946 ret = sys_wait4(pid,
64947 (stat_addr ?
64948 - (unsigned int __user *) &status : NULL),
64949 - options, (struct rusage __user *) &r);
64950 + (unsigned int __force_user *) &status : NULL),
64951 + options, (struct rusage __force_user *) &r);
64952 set_fs (old_fs);
64953
64954 if (ret > 0) {
64955 @@ -494,8 +495,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
64956 memset(&info, 0, sizeof(info));
64957
64958 set_fs(KERNEL_DS);
64959 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
64960 - uru ? (struct rusage __user *)&ru : NULL);
64961 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
64962 + uru ? (struct rusage __force_user *)&ru : NULL);
64963 set_fs(old_fs);
64964
64965 if ((ret < 0) || (info.si_signo == 0))
64966 @@ -625,8 +626,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
64967 oldfs = get_fs();
64968 set_fs(KERNEL_DS);
64969 err = sys_timer_settime(timer_id, flags,
64970 - (struct itimerspec __user *) &newts,
64971 - (struct itimerspec __user *) &oldts);
64972 + (struct itimerspec __force_user *) &newts,
64973 + (struct itimerspec __force_user *) &oldts);
64974 set_fs(oldfs);
64975 if (!err && old && put_compat_itimerspec(old, &oldts))
64976 return -EFAULT;
64977 @@ -643,7 +644,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
64978 oldfs = get_fs();
64979 set_fs(KERNEL_DS);
64980 err = sys_timer_gettime(timer_id,
64981 - (struct itimerspec __user *) &ts);
64982 + (struct itimerspec __force_user *) &ts);
64983 set_fs(oldfs);
64984 if (!err && put_compat_itimerspec(setting, &ts))
64985 return -EFAULT;
64986 @@ -662,7 +663,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
64987 oldfs = get_fs();
64988 set_fs(KERNEL_DS);
64989 err = sys_clock_settime(which_clock,
64990 - (struct timespec __user *) &ts);
64991 + (struct timespec __force_user *) &ts);
64992 set_fs(oldfs);
64993 return err;
64994 }
64995 @@ -677,7 +678,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
64996 oldfs = get_fs();
64997 set_fs(KERNEL_DS);
64998 err = sys_clock_gettime(which_clock,
64999 - (struct timespec __user *) &ts);
65000 + (struct timespec __force_user *) &ts);
65001 set_fs(oldfs);
65002 if (!err && put_compat_timespec(&ts, tp))
65003 return -EFAULT;
65004 @@ -697,7 +698,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
65005
65006 oldfs = get_fs();
65007 set_fs(KERNEL_DS);
65008 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
65009 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
65010 set_fs(oldfs);
65011
65012 err = compat_put_timex(utp, &txc);
65013 @@ -717,7 +718,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
65014 oldfs = get_fs();
65015 set_fs(KERNEL_DS);
65016 err = sys_clock_getres(which_clock,
65017 - (struct timespec __user *) &ts);
65018 + (struct timespec __force_user *) &ts);
65019 set_fs(oldfs);
65020 if (!err && tp && put_compat_timespec(&ts, tp))
65021 return -EFAULT;
65022 @@ -729,9 +730,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
65023 long err;
65024 mm_segment_t oldfs;
65025 struct timespec tu;
65026 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
65027 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
65028
65029 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
65030 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
65031 oldfs = get_fs();
65032 set_fs(KERNEL_DS);
65033 err = clock_nanosleep_restart(restart);
65034 @@ -763,8 +764,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
65035 oldfs = get_fs();
65036 set_fs(KERNEL_DS);
65037 err = sys_clock_nanosleep(which_clock, flags,
65038 - (struct timespec __user *) &in,
65039 - (struct timespec __user *) &out);
65040 + (struct timespec __force_user *) &in,
65041 + (struct timespec __force_user *) &out);
65042 set_fs(oldfs);
65043
65044 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
65045 diff --git a/kernel/configs.c b/kernel/configs.c
65046 index 42e8fa0..9e7406b 100644
65047 --- a/kernel/configs.c
65048 +++ b/kernel/configs.c
65049 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
65050 struct proc_dir_entry *entry;
65051
65052 /* create the current config file */
65053 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
65054 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
65055 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
65056 + &ikconfig_file_ops);
65057 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65058 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
65059 + &ikconfig_file_ops);
65060 +#endif
65061 +#else
65062 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
65063 &ikconfig_file_ops);
65064 +#endif
65065 +
65066 if (!entry)
65067 return -ENOMEM;
65068
65069 diff --git a/kernel/cred.c b/kernel/cred.c
65070 index 8ef31f5..f63d997 100644
65071 --- a/kernel/cred.c
65072 +++ b/kernel/cred.c
65073 @@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
65074 */
65075 void __put_cred(struct cred *cred)
65076 {
65077 + pax_track_stack();
65078 +
65079 kdebug("__put_cred(%p{%d,%d})", cred,
65080 atomic_read(&cred->usage),
65081 read_cred_subscribers(cred));
65082 @@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
65083 {
65084 struct cred *cred;
65085
65086 + pax_track_stack();
65087 +
65088 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
65089 atomic_read(&tsk->cred->usage),
65090 read_cred_subscribers(tsk->cred));
65091 @@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct task_struct *task)
65092 {
65093 const struct cred *cred;
65094
65095 + pax_track_stack();
65096 +
65097 rcu_read_lock();
65098
65099 do {
65100 @@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
65101 {
65102 struct cred *new;
65103
65104 + pax_track_stack();
65105 +
65106 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
65107 if (!new)
65108 return NULL;
65109 @@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
65110 const struct cred *old;
65111 struct cred *new;
65112
65113 + pax_track_stack();
65114 +
65115 validate_process_creds();
65116
65117 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
65118 @@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
65119 struct thread_group_cred *tgcred = NULL;
65120 struct cred *new;
65121
65122 + pax_track_stack();
65123 +
65124 #ifdef CONFIG_KEYS
65125 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
65126 if (!tgcred)
65127 @@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
65128 struct cred *new;
65129 int ret;
65130
65131 + pax_track_stack();
65132 +
65133 if (
65134 #ifdef CONFIG_KEYS
65135 !p->cred->thread_keyring &&
65136 @@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
65137 struct task_struct *task = current;
65138 const struct cred *old = task->real_cred;
65139
65140 + pax_track_stack();
65141 +
65142 kdebug("commit_creds(%p{%d,%d})", new,
65143 atomic_read(&new->usage),
65144 read_cred_subscribers(new));
65145 @@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
65146
65147 get_cred(new); /* we will require a ref for the subj creds too */
65148
65149 + gr_set_role_label(task, new->uid, new->gid);
65150 +
65151 /* dumpability changes */
65152 if (old->euid != new->euid ||
65153 old->egid != new->egid ||
65154 @@ -549,6 +567,8 @@ EXPORT_SYMBOL(commit_creds);
65155 */
65156 void abort_creds(struct cred *new)
65157 {
65158 + pax_track_stack();
65159 +
65160 kdebug("abort_creds(%p{%d,%d})", new,
65161 atomic_read(&new->usage),
65162 read_cred_subscribers(new));
65163 @@ -572,6 +592,8 @@ const struct cred *override_creds(const struct cred *new)
65164 {
65165 const struct cred *old = current->cred;
65166
65167 + pax_track_stack();
65168 +
65169 kdebug("override_creds(%p{%d,%d})", new,
65170 atomic_read(&new->usage),
65171 read_cred_subscribers(new));
65172 @@ -601,6 +623,8 @@ void revert_creds(const struct cred *old)
65173 {
65174 const struct cred *override = current->cred;
65175
65176 + pax_track_stack();
65177 +
65178 kdebug("revert_creds(%p{%d,%d})", old,
65179 atomic_read(&old->usage),
65180 read_cred_subscribers(old));
65181 @@ -647,6 +671,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
65182 const struct cred *old;
65183 struct cred *new;
65184
65185 + pax_track_stack();
65186 +
65187 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
65188 if (!new)
65189 return NULL;
65190 @@ -701,6 +727,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
65191 */
65192 int set_security_override(struct cred *new, u32 secid)
65193 {
65194 + pax_track_stack();
65195 +
65196 return security_kernel_act_as(new, secid);
65197 }
65198 EXPORT_SYMBOL(set_security_override);
65199 @@ -720,6 +748,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
65200 u32 secid;
65201 int ret;
65202
65203 + pax_track_stack();
65204 +
65205 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
65206 if (ret < 0)
65207 return ret;
65208 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
65209 index 0d7c087..01b8cef 100644
65210 --- a/kernel/debug/debug_core.c
65211 +++ b/kernel/debug/debug_core.c
65212 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
65213 */
65214 static atomic_t masters_in_kgdb;
65215 static atomic_t slaves_in_kgdb;
65216 -static atomic_t kgdb_break_tasklet_var;
65217 +static atomic_unchecked_t kgdb_break_tasklet_var;
65218 atomic_t kgdb_setting_breakpoint;
65219
65220 struct task_struct *kgdb_usethread;
65221 @@ -129,7 +129,7 @@ int kgdb_single_step;
65222 static pid_t kgdb_sstep_pid;
65223
65224 /* to keep track of the CPU which is doing the single stepping*/
65225 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65226 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65227
65228 /*
65229 * If you are debugging a problem where roundup (the collection of
65230 @@ -542,7 +542,7 @@ return_normal:
65231 * kernel will only try for the value of sstep_tries before
65232 * giving up and continuing on.
65233 */
65234 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
65235 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
65236 (kgdb_info[cpu].task &&
65237 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
65238 atomic_set(&kgdb_active, -1);
65239 @@ -636,8 +636,8 @@ cpu_master_loop:
65240 }
65241
65242 kgdb_restore:
65243 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
65244 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
65245 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
65246 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
65247 if (kgdb_info[sstep_cpu].task)
65248 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
65249 else
65250 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
65251 static void kgdb_tasklet_bpt(unsigned long ing)
65252 {
65253 kgdb_breakpoint();
65254 - atomic_set(&kgdb_break_tasklet_var, 0);
65255 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
65256 }
65257
65258 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
65259
65260 void kgdb_schedule_breakpoint(void)
65261 {
65262 - if (atomic_read(&kgdb_break_tasklet_var) ||
65263 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
65264 atomic_read(&kgdb_active) != -1 ||
65265 atomic_read(&kgdb_setting_breakpoint))
65266 return;
65267 - atomic_inc(&kgdb_break_tasklet_var);
65268 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
65269 tasklet_schedule(&kgdb_tasklet_breakpoint);
65270 }
65271 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
65272 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
65273 index 63786e7..0780cac 100644
65274 --- a/kernel/debug/kdb/kdb_main.c
65275 +++ b/kernel/debug/kdb/kdb_main.c
65276 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
65277 list_for_each_entry(mod, kdb_modules, list) {
65278
65279 kdb_printf("%-20s%8u 0x%p ", mod->name,
65280 - mod->core_size, (void *)mod);
65281 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
65282 #ifdef CONFIG_MODULE_UNLOAD
65283 kdb_printf("%4d ", module_refcount(mod));
65284 #endif
65285 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
65286 kdb_printf(" (Loading)");
65287 else
65288 kdb_printf(" (Live)");
65289 - kdb_printf(" 0x%p", mod->module_core);
65290 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
65291
65292 #ifdef CONFIG_MODULE_UNLOAD
65293 {
65294 diff --git a/kernel/events/core.c b/kernel/events/core.c
65295 index 0f85778..0d43716 100644
65296 --- a/kernel/events/core.c
65297 +++ b/kernel/events/core.c
65298 @@ -172,7 +172,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
65299 return 0;
65300 }
65301
65302 -static atomic64_t perf_event_id;
65303 +static atomic64_unchecked_t perf_event_id;
65304
65305 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
65306 enum event_type_t event_type);
65307 @@ -2535,7 +2535,7 @@ static void __perf_event_read(void *info)
65308
65309 static inline u64 perf_event_count(struct perf_event *event)
65310 {
65311 - return local64_read(&event->count) + atomic64_read(&event->child_count);
65312 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
65313 }
65314
65315 static u64 perf_event_read(struct perf_event *event)
65316 @@ -3060,9 +3060,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
65317 mutex_lock(&event->child_mutex);
65318 total += perf_event_read(event);
65319 *enabled += event->total_time_enabled +
65320 - atomic64_read(&event->child_total_time_enabled);
65321 + atomic64_read_unchecked(&event->child_total_time_enabled);
65322 *running += event->total_time_running +
65323 - atomic64_read(&event->child_total_time_running);
65324 + atomic64_read_unchecked(&event->child_total_time_running);
65325
65326 list_for_each_entry(child, &event->child_list, child_list) {
65327 total += perf_event_read(child);
65328 @@ -3448,10 +3448,10 @@ void perf_event_update_userpage(struct perf_event *event)
65329 userpg->offset -= local64_read(&event->hw.prev_count);
65330
65331 userpg->time_enabled = enabled +
65332 - atomic64_read(&event->child_total_time_enabled);
65333 + atomic64_read_unchecked(&event->child_total_time_enabled);
65334
65335 userpg->time_running = running +
65336 - atomic64_read(&event->child_total_time_running);
65337 + atomic64_read_unchecked(&event->child_total_time_running);
65338
65339 barrier();
65340 ++userpg->lock;
65341 @@ -3822,11 +3822,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
65342 values[n++] = perf_event_count(event);
65343 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
65344 values[n++] = enabled +
65345 - atomic64_read(&event->child_total_time_enabled);
65346 + atomic64_read_unchecked(&event->child_total_time_enabled);
65347 }
65348 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
65349 values[n++] = running +
65350 - atomic64_read(&event->child_total_time_running);
65351 + atomic64_read_unchecked(&event->child_total_time_running);
65352 }
65353 if (read_format & PERF_FORMAT_ID)
65354 values[n++] = primary_event_id(event);
65355 @@ -4477,12 +4477,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
65356 * need to add enough zero bytes after the string to handle
65357 * the 64bit alignment we do later.
65358 */
65359 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
65360 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
65361 if (!buf) {
65362 name = strncpy(tmp, "//enomem", sizeof(tmp));
65363 goto got_name;
65364 }
65365 - name = d_path(&file->f_path, buf, PATH_MAX);
65366 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
65367 if (IS_ERR(name)) {
65368 name = strncpy(tmp, "//toolong", sizeof(tmp));
65369 goto got_name;
65370 @@ -5833,7 +5833,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
65371 event->parent = parent_event;
65372
65373 event->ns = get_pid_ns(current->nsproxy->pid_ns);
65374 - event->id = atomic64_inc_return(&perf_event_id);
65375 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
65376
65377 event->state = PERF_EVENT_STATE_INACTIVE;
65378
65379 @@ -6355,10 +6355,10 @@ static void sync_child_event(struct perf_event *child_event,
65380 /*
65381 * Add back the child's count to the parent's count:
65382 */
65383 - atomic64_add(child_val, &parent_event->child_count);
65384 - atomic64_add(child_event->total_time_enabled,
65385 + atomic64_add_unchecked(child_val, &parent_event->child_count);
65386 + atomic64_add_unchecked(child_event->total_time_enabled,
65387 &parent_event->child_total_time_enabled);
65388 - atomic64_add(child_event->total_time_running,
65389 + atomic64_add_unchecked(child_event->total_time_running,
65390 &parent_event->child_total_time_running);
65391
65392 /*
65393 diff --git a/kernel/exit.c b/kernel/exit.c
65394 index 2913b35..86c7364 100644
65395 --- a/kernel/exit.c
65396 +++ b/kernel/exit.c
65397 @@ -57,6 +57,10 @@
65398 #include <asm/pgtable.h>
65399 #include <asm/mmu_context.h>
65400
65401 +#ifdef CONFIG_GRKERNSEC
65402 +extern rwlock_t grsec_exec_file_lock;
65403 +#endif
65404 +
65405 static void exit_mm(struct task_struct * tsk);
65406
65407 static void __unhash_process(struct task_struct *p, bool group_dead)
65408 @@ -168,6 +172,10 @@ void release_task(struct task_struct * p)
65409 struct task_struct *leader;
65410 int zap_leader;
65411 repeat:
65412 +#ifdef CONFIG_NET
65413 + gr_del_task_from_ip_table(p);
65414 +#endif
65415 +
65416 /* don't need to get the RCU readlock here - the process is dead and
65417 * can't be modifying its own credentials. But shut RCU-lockdep up */
65418 rcu_read_lock();
65419 @@ -324,11 +332,22 @@ static void reparent_to_kthreadd(void)
65420 {
65421 write_lock_irq(&tasklist_lock);
65422
65423 +#ifdef CONFIG_GRKERNSEC
65424 + write_lock(&grsec_exec_file_lock);
65425 + if (current->exec_file) {
65426 + fput(current->exec_file);
65427 + current->exec_file = NULL;
65428 + }
65429 + write_unlock(&grsec_exec_file_lock);
65430 +#endif
65431 +
65432 ptrace_unlink(current);
65433 /* Reparent to init */
65434 current->real_parent = current->parent = kthreadd_task;
65435 list_move_tail(&current->sibling, &current->real_parent->children);
65436
65437 + gr_set_kernel_label(current);
65438 +
65439 /* Set the exit signal to SIGCHLD so we signal init on exit */
65440 current->exit_signal = SIGCHLD;
65441
65442 @@ -380,7 +399,7 @@ int allow_signal(int sig)
65443 * know it'll be handled, so that they don't get converted to
65444 * SIGKILL or just silently dropped.
65445 */
65446 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
65447 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
65448 recalc_sigpending();
65449 spin_unlock_irq(&current->sighand->siglock);
65450 return 0;
65451 @@ -416,6 +435,17 @@ void daemonize(const char *name, ...)
65452 vsnprintf(current->comm, sizeof(current->comm), name, args);
65453 va_end(args);
65454
65455 +#ifdef CONFIG_GRKERNSEC
65456 + write_lock(&grsec_exec_file_lock);
65457 + if (current->exec_file) {
65458 + fput(current->exec_file);
65459 + current->exec_file = NULL;
65460 + }
65461 + write_unlock(&grsec_exec_file_lock);
65462 +#endif
65463 +
65464 + gr_set_kernel_label(current);
65465 +
65466 /*
65467 * If we were started as result of loading a module, close all of the
65468 * user space pages. We don't need them, and if we didn't close them
65469 @@ -895,6 +925,8 @@ NORET_TYPE void do_exit(long code)
65470 struct task_struct *tsk = current;
65471 int group_dead;
65472
65473 + set_fs(USER_DS);
65474 +
65475 profile_task_exit(tsk);
65476
65477 WARN_ON(blk_needs_flush_plug(tsk));
65478 @@ -911,7 +943,6 @@ NORET_TYPE void do_exit(long code)
65479 * mm_release()->clear_child_tid() from writing to a user-controlled
65480 * kernel address.
65481 */
65482 - set_fs(USER_DS);
65483
65484 ptrace_event(PTRACE_EVENT_EXIT, code);
65485
65486 @@ -973,6 +1004,9 @@ NORET_TYPE void do_exit(long code)
65487 tsk->exit_code = code;
65488 taskstats_exit(tsk, group_dead);
65489
65490 + gr_acl_handle_psacct(tsk, code);
65491 + gr_acl_handle_exit();
65492 +
65493 exit_mm(tsk);
65494
65495 if (group_dead)
65496 diff --git a/kernel/fork.c b/kernel/fork.c
65497 index 8e6b6f4..9dccf00 100644
65498 --- a/kernel/fork.c
65499 +++ b/kernel/fork.c
65500 @@ -285,7 +285,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
65501 *stackend = STACK_END_MAGIC; /* for overflow detection */
65502
65503 #ifdef CONFIG_CC_STACKPROTECTOR
65504 - tsk->stack_canary = get_random_int();
65505 + tsk->stack_canary = pax_get_random_long();
65506 #endif
65507
65508 /*
65509 @@ -309,13 +309,77 @@ out:
65510 }
65511
65512 #ifdef CONFIG_MMU
65513 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
65514 +{
65515 + struct vm_area_struct *tmp;
65516 + unsigned long charge;
65517 + struct mempolicy *pol;
65518 + struct file *file;
65519 +
65520 + charge = 0;
65521 + if (mpnt->vm_flags & VM_ACCOUNT) {
65522 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65523 + if (security_vm_enough_memory(len))
65524 + goto fail_nomem;
65525 + charge = len;
65526 + }
65527 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65528 + if (!tmp)
65529 + goto fail_nomem;
65530 + *tmp = *mpnt;
65531 + tmp->vm_mm = mm;
65532 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
65533 + pol = mpol_dup(vma_policy(mpnt));
65534 + if (IS_ERR(pol))
65535 + goto fail_nomem_policy;
65536 + vma_set_policy(tmp, pol);
65537 + if (anon_vma_fork(tmp, mpnt))
65538 + goto fail_nomem_anon_vma_fork;
65539 + tmp->vm_flags &= ~VM_LOCKED;
65540 + tmp->vm_next = tmp->vm_prev = NULL;
65541 + tmp->vm_mirror = NULL;
65542 + file = tmp->vm_file;
65543 + if (file) {
65544 + struct inode *inode = file->f_path.dentry->d_inode;
65545 + struct address_space *mapping = file->f_mapping;
65546 +
65547 + get_file(file);
65548 + if (tmp->vm_flags & VM_DENYWRITE)
65549 + atomic_dec(&inode->i_writecount);
65550 + mutex_lock(&mapping->i_mmap_mutex);
65551 + if (tmp->vm_flags & VM_SHARED)
65552 + mapping->i_mmap_writable++;
65553 + flush_dcache_mmap_lock(mapping);
65554 + /* insert tmp into the share list, just after mpnt */
65555 + vma_prio_tree_add(tmp, mpnt);
65556 + flush_dcache_mmap_unlock(mapping);
65557 + mutex_unlock(&mapping->i_mmap_mutex);
65558 + }
65559 +
65560 + /*
65561 + * Clear hugetlb-related page reserves for children. This only
65562 + * affects MAP_PRIVATE mappings. Faults generated by the child
65563 + * are not guaranteed to succeed, even if read-only
65564 + */
65565 + if (is_vm_hugetlb_page(tmp))
65566 + reset_vma_resv_huge_pages(tmp);
65567 +
65568 + return tmp;
65569 +
65570 +fail_nomem_anon_vma_fork:
65571 + mpol_put(pol);
65572 +fail_nomem_policy:
65573 + kmem_cache_free(vm_area_cachep, tmp);
65574 +fail_nomem:
65575 + vm_unacct_memory(charge);
65576 + return NULL;
65577 +}
65578 +
65579 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65580 {
65581 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
65582 struct rb_node **rb_link, *rb_parent;
65583 int retval;
65584 - unsigned long charge;
65585 - struct mempolicy *pol;
65586
65587 down_write(&oldmm->mmap_sem);
65588 flush_cache_dup_mm(oldmm);
65589 @@ -327,8 +391,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65590 mm->locked_vm = 0;
65591 mm->mmap = NULL;
65592 mm->mmap_cache = NULL;
65593 - mm->free_area_cache = oldmm->mmap_base;
65594 - mm->cached_hole_size = ~0UL;
65595 + mm->free_area_cache = oldmm->free_area_cache;
65596 + mm->cached_hole_size = oldmm->cached_hole_size;
65597 mm->map_count = 0;
65598 cpumask_clear(mm_cpumask(mm));
65599 mm->mm_rb = RB_ROOT;
65600 @@ -344,8 +408,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65601
65602 prev = NULL;
65603 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
65604 - struct file *file;
65605 -
65606 if (mpnt->vm_flags & VM_DONTCOPY) {
65607 long pages = vma_pages(mpnt);
65608 mm->total_vm -= pages;
65609 @@ -353,55 +415,13 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65610 -pages);
65611 continue;
65612 }
65613 - charge = 0;
65614 - if (mpnt->vm_flags & VM_ACCOUNT) {
65615 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65616 - if (security_vm_enough_memory(len))
65617 - goto fail_nomem;
65618 - charge = len;
65619 - }
65620 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65621 - if (!tmp)
65622 - goto fail_nomem;
65623 - *tmp = *mpnt;
65624 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
65625 - pol = mpol_dup(vma_policy(mpnt));
65626 - retval = PTR_ERR(pol);
65627 - if (IS_ERR(pol))
65628 - goto fail_nomem_policy;
65629 - vma_set_policy(tmp, pol);
65630 - tmp->vm_mm = mm;
65631 - if (anon_vma_fork(tmp, mpnt))
65632 - goto fail_nomem_anon_vma_fork;
65633 - tmp->vm_flags &= ~VM_LOCKED;
65634 - tmp->vm_next = tmp->vm_prev = NULL;
65635 - file = tmp->vm_file;
65636 - if (file) {
65637 - struct inode *inode = file->f_path.dentry->d_inode;
65638 - struct address_space *mapping = file->f_mapping;
65639 -
65640 - get_file(file);
65641 - if (tmp->vm_flags & VM_DENYWRITE)
65642 - atomic_dec(&inode->i_writecount);
65643 - mutex_lock(&mapping->i_mmap_mutex);
65644 - if (tmp->vm_flags & VM_SHARED)
65645 - mapping->i_mmap_writable++;
65646 - flush_dcache_mmap_lock(mapping);
65647 - /* insert tmp into the share list, just after mpnt */
65648 - vma_prio_tree_add(tmp, mpnt);
65649 - flush_dcache_mmap_unlock(mapping);
65650 - mutex_unlock(&mapping->i_mmap_mutex);
65651 + tmp = dup_vma(mm, mpnt);
65652 + if (!tmp) {
65653 + retval = -ENOMEM;
65654 + goto out;
65655 }
65656
65657 /*
65658 - * Clear hugetlb-related page reserves for children. This only
65659 - * affects MAP_PRIVATE mappings. Faults generated by the child
65660 - * are not guaranteed to succeed, even if read-only
65661 - */
65662 - if (is_vm_hugetlb_page(tmp))
65663 - reset_vma_resv_huge_pages(tmp);
65664 -
65665 - /*
65666 * Link in the new vma and copy the page table entries.
65667 */
65668 *pprev = tmp;
65669 @@ -422,6 +442,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65670 if (retval)
65671 goto out;
65672 }
65673 +
65674 +#ifdef CONFIG_PAX_SEGMEXEC
65675 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
65676 + struct vm_area_struct *mpnt_m;
65677 +
65678 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
65679 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
65680 +
65681 + if (!mpnt->vm_mirror)
65682 + continue;
65683 +
65684 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
65685 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
65686 + mpnt->vm_mirror = mpnt_m;
65687 + } else {
65688 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
65689 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
65690 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
65691 + mpnt->vm_mirror->vm_mirror = mpnt;
65692 + }
65693 + }
65694 + BUG_ON(mpnt_m);
65695 + }
65696 +#endif
65697 +
65698 /* a new mm has just been created */
65699 arch_dup_mmap(oldmm, mm);
65700 retval = 0;
65701 @@ -430,14 +475,6 @@ out:
65702 flush_tlb_mm(oldmm);
65703 up_write(&oldmm->mmap_sem);
65704 return retval;
65705 -fail_nomem_anon_vma_fork:
65706 - mpol_put(pol);
65707 -fail_nomem_policy:
65708 - kmem_cache_free(vm_area_cachep, tmp);
65709 -fail_nomem:
65710 - retval = -ENOMEM;
65711 - vm_unacct_memory(charge);
65712 - goto out;
65713 }
65714
65715 static inline int mm_alloc_pgd(struct mm_struct *mm)
65716 @@ -837,13 +874,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
65717 spin_unlock(&fs->lock);
65718 return -EAGAIN;
65719 }
65720 - fs->users++;
65721 + atomic_inc(&fs->users);
65722 spin_unlock(&fs->lock);
65723 return 0;
65724 }
65725 tsk->fs = copy_fs_struct(fs);
65726 if (!tsk->fs)
65727 return -ENOMEM;
65728 + gr_set_chroot_entries(tsk, &tsk->fs->root);
65729 return 0;
65730 }
65731
65732 @@ -1105,6 +1143,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65733 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
65734 #endif
65735 retval = -EAGAIN;
65736 +
65737 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
65738 +
65739 if (atomic_read(&p->real_cred->user->processes) >=
65740 task_rlimit(p, RLIMIT_NPROC)) {
65741 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
65742 @@ -1264,6 +1305,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
65743 if (clone_flags & CLONE_THREAD)
65744 p->tgid = current->tgid;
65745
65746 + gr_copy_label(p);
65747 +
65748 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
65749 /*
65750 * Clear TID on mm_release()?
65751 @@ -1428,6 +1471,8 @@ bad_fork_cleanup_count:
65752 bad_fork_free:
65753 free_task(p);
65754 fork_out:
65755 + gr_log_forkfail(retval);
65756 +
65757 return ERR_PTR(retval);
65758 }
65759
65760 @@ -1528,6 +1573,8 @@ long do_fork(unsigned long clone_flags,
65761 if (clone_flags & CLONE_PARENT_SETTID)
65762 put_user(nr, parent_tidptr);
65763
65764 + gr_handle_brute_check();
65765 +
65766 if (clone_flags & CLONE_VFORK) {
65767 p->vfork_done = &vfork;
65768 init_completion(&vfork);
65769 @@ -1637,7 +1684,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
65770 return 0;
65771
65772 /* don't need lock here; in the worst case we'll do useless copy */
65773 - if (fs->users == 1)
65774 + if (atomic_read(&fs->users) == 1)
65775 return 0;
65776
65777 *new_fsp = copy_fs_struct(fs);
65778 @@ -1726,7 +1773,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
65779 fs = current->fs;
65780 spin_lock(&fs->lock);
65781 current->fs = new_fs;
65782 - if (--fs->users)
65783 + gr_set_chroot_entries(current, &current->fs->root);
65784 + if (atomic_dec_return(&fs->users))
65785 new_fs = NULL;
65786 else
65787 new_fs = fs;
65788 diff --git a/kernel/futex.c b/kernel/futex.c
65789 index 11cbe05..9ff191b 100644
65790 --- a/kernel/futex.c
65791 +++ b/kernel/futex.c
65792 @@ -54,6 +54,7 @@
65793 #include <linux/mount.h>
65794 #include <linux/pagemap.h>
65795 #include <linux/syscalls.h>
65796 +#include <linux/ptrace.h>
65797 #include <linux/signal.h>
65798 #include <linux/module.h>
65799 #include <linux/magic.h>
65800 @@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
65801 struct page *page, *page_head;
65802 int err, ro = 0;
65803
65804 +#ifdef CONFIG_PAX_SEGMEXEC
65805 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
65806 + return -EFAULT;
65807 +#endif
65808 +
65809 /*
65810 * The futex address must be "naturally" aligned.
65811 */
65812 @@ -1863,6 +1869,8 @@ static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
65813 struct futex_q q = futex_q_init;
65814 int ret;
65815
65816 + pax_track_stack();
65817 +
65818 if (!bitset)
65819 return -EINVAL;
65820 q.bitset = bitset;
65821 @@ -2259,6 +2267,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
65822 struct futex_q q = futex_q_init;
65823 int res, ret;
65824
65825 + pax_track_stack();
65826 +
65827 if (!bitset)
65828 return -EINVAL;
65829
65830 @@ -2431,7 +2441,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
65831 {
65832 struct robust_list_head __user *head;
65833 unsigned long ret;
65834 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
65835 const struct cred *cred = current_cred(), *pcred;
65836 +#endif
65837
65838 if (!futex_cmpxchg_enabled)
65839 return -ENOSYS;
65840 @@ -2447,6 +2459,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
65841 if (!p)
65842 goto err_unlock;
65843 ret = -EPERM;
65844 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65845 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
65846 + goto err_unlock;
65847 +#else
65848 pcred = __task_cred(p);
65849 /* If victim is in different user_ns, then uids are not
65850 comparable, so we must have CAP_SYS_PTRACE */
65851 @@ -2461,6 +2477,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
65852 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
65853 goto err_unlock;
65854 ok:
65855 +#endif
65856 head = p->robust_list;
65857 rcu_read_unlock();
65858 }
65859 @@ -2712,6 +2729,7 @@ static int __init futex_init(void)
65860 {
65861 u32 curval;
65862 int i;
65863 + mm_segment_t oldfs;
65864
65865 /*
65866 * This will fail and we want it. Some arch implementations do
65867 @@ -2723,8 +2741,11 @@ static int __init futex_init(void)
65868 * implementation, the non-functional ones will return
65869 * -ENOSYS.
65870 */
65871 + oldfs = get_fs();
65872 + set_fs(USER_DS);
65873 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
65874 futex_cmpxchg_enabled = 1;
65875 + set_fs(oldfs);
65876
65877 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
65878 plist_head_init(&futex_queues[i].chain);
65879 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
65880 index 5f9e689..03afa21 100644
65881 --- a/kernel/futex_compat.c
65882 +++ b/kernel/futex_compat.c
65883 @@ -10,6 +10,7 @@
65884 #include <linux/compat.h>
65885 #include <linux/nsproxy.h>
65886 #include <linux/futex.h>
65887 +#include <linux/ptrace.h>
65888
65889 #include <asm/uaccess.h>
65890
65891 @@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
65892 {
65893 struct compat_robust_list_head __user *head;
65894 unsigned long ret;
65895 - const struct cred *cred = current_cred(), *pcred;
65896 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
65897 + const struct cred *cred = current_cred();
65898 + const struct cred *pcred;
65899 +#endif
65900
65901 if (!futex_cmpxchg_enabled)
65902 return -ENOSYS;
65903 @@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
65904 if (!p)
65905 goto err_unlock;
65906 ret = -EPERM;
65907 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65908 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
65909 + goto err_unlock;
65910 +#else
65911 pcred = __task_cred(p);
65912 /* If victim is in different user_ns, then uids are not
65913 comparable, so we must have CAP_SYS_PTRACE */
65914 @@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
65915 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
65916 goto err_unlock;
65917 ok:
65918 +#endif
65919 head = p->compat_robust_list;
65920 rcu_read_unlock();
65921 }
65922 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
65923 index 9b22d03..6295b62 100644
65924 --- a/kernel/gcov/base.c
65925 +++ b/kernel/gcov/base.c
65926 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
65927 }
65928
65929 #ifdef CONFIG_MODULES
65930 -static inline int within(void *addr, void *start, unsigned long size)
65931 -{
65932 - return ((addr >= start) && (addr < start + size));
65933 -}
65934 -
65935 /* Update list and generate events when modules are unloaded. */
65936 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65937 void *data)
65938 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
65939 prev = NULL;
65940 /* Remove entries located in module from linked list. */
65941 for (info = gcov_info_head; info; info = info->next) {
65942 - if (within(info, mod->module_core, mod->core_size)) {
65943 + if (within_module_core_rw((unsigned long)info, mod)) {
65944 if (prev)
65945 prev->next = info->next;
65946 else
65947 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
65948 index a9205e3..1c6f5c0 100644
65949 --- a/kernel/hrtimer.c
65950 +++ b/kernel/hrtimer.c
65951 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
65952 local_irq_restore(flags);
65953 }
65954
65955 -static void run_hrtimer_softirq(struct softirq_action *h)
65956 +static void run_hrtimer_softirq(void)
65957 {
65958 hrtimer_peek_ahead_timers();
65959 }
65960 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
65961 index a8ce450..5519bce 100644
65962 --- a/kernel/jump_label.c
65963 +++ b/kernel/jump_label.c
65964 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
65965
65966 size = (((unsigned long)stop - (unsigned long)start)
65967 / sizeof(struct jump_entry));
65968 + pax_open_kernel();
65969 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
65970 + pax_close_kernel();
65971 }
65972
65973 static void jump_label_update(struct jump_label_key *key, int enable);
65974 @@ -297,10 +299,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
65975 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
65976 struct jump_entry *iter;
65977
65978 + pax_open_kernel();
65979 for (iter = iter_start; iter < iter_stop; iter++) {
65980 if (within_module_init(iter->code, mod))
65981 iter->code = 0;
65982 }
65983 + pax_close_kernel();
65984 }
65985
65986 static int
65987 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
65988 index 079f1d3..a407562 100644
65989 --- a/kernel/kallsyms.c
65990 +++ b/kernel/kallsyms.c
65991 @@ -11,6 +11,9 @@
65992 * Changed the compression method from stem compression to "table lookup"
65993 * compression (see scripts/kallsyms.c for a more complete description)
65994 */
65995 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65996 +#define __INCLUDED_BY_HIDESYM 1
65997 +#endif
65998 #include <linux/kallsyms.h>
65999 #include <linux/module.h>
66000 #include <linux/init.h>
66001 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
66002
66003 static inline int is_kernel_inittext(unsigned long addr)
66004 {
66005 + if (system_state != SYSTEM_BOOTING)
66006 + return 0;
66007 +
66008 if (addr >= (unsigned long)_sinittext
66009 && addr <= (unsigned long)_einittext)
66010 return 1;
66011 return 0;
66012 }
66013
66014 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66015 +#ifdef CONFIG_MODULES
66016 +static inline int is_module_text(unsigned long addr)
66017 +{
66018 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
66019 + return 1;
66020 +
66021 + addr = ktla_ktva(addr);
66022 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
66023 +}
66024 +#else
66025 +static inline int is_module_text(unsigned long addr)
66026 +{
66027 + return 0;
66028 +}
66029 +#endif
66030 +#endif
66031 +
66032 static inline int is_kernel_text(unsigned long addr)
66033 {
66034 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
66035 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
66036
66037 static inline int is_kernel(unsigned long addr)
66038 {
66039 +
66040 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66041 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
66042 + return 1;
66043 +
66044 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
66045 +#else
66046 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
66047 +#endif
66048 +
66049 return 1;
66050 return in_gate_area_no_mm(addr);
66051 }
66052
66053 static int is_ksym_addr(unsigned long addr)
66054 {
66055 +
66056 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66057 + if (is_module_text(addr))
66058 + return 0;
66059 +#endif
66060 +
66061 if (all_var)
66062 return is_kernel(addr);
66063
66064 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
66065
66066 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
66067 {
66068 - iter->name[0] = '\0';
66069 iter->nameoff = get_symbol_offset(new_pos);
66070 iter->pos = new_pos;
66071 }
66072 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
66073 {
66074 struct kallsym_iter *iter = m->private;
66075
66076 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66077 + if (current_uid())
66078 + return 0;
66079 +#endif
66080 +
66081 /* Some debugging symbols have no name. Ignore them. */
66082 if (!iter->name[0])
66083 return 0;
66084 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
66085 struct kallsym_iter *iter;
66086 int ret;
66087
66088 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
66089 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
66090 if (!iter)
66091 return -ENOMEM;
66092 reset_iter(iter, 0);
66093 diff --git a/kernel/kexec.c b/kernel/kexec.c
66094 index 296fbc8..84cb857 100644
66095 --- a/kernel/kexec.c
66096 +++ b/kernel/kexec.c
66097 @@ -1033,7 +1033,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
66098 unsigned long flags)
66099 {
66100 struct compat_kexec_segment in;
66101 - struct kexec_segment out, __user *ksegments;
66102 + struct kexec_segment out;
66103 + struct kexec_segment __user *ksegments;
66104 unsigned long i, result;
66105
66106 /* Don't allow clients that don't understand the native
66107 diff --git a/kernel/kmod.c b/kernel/kmod.c
66108 index a4bea97..7a1ae9a 100644
66109 --- a/kernel/kmod.c
66110 +++ b/kernel/kmod.c
66111 @@ -73,13 +73,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
66112 * If module auto-loading support is disabled then this function
66113 * becomes a no-operation.
66114 */
66115 -int __request_module(bool wait, const char *fmt, ...)
66116 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
66117 {
66118 - va_list args;
66119 char module_name[MODULE_NAME_LEN];
66120 unsigned int max_modprobes;
66121 int ret;
66122 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
66123 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
66124 static char *envp[] = { "HOME=/",
66125 "TERM=linux",
66126 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
66127 @@ -88,9 +87,7 @@ int __request_module(bool wait, const char *fmt, ...)
66128 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
66129 static int kmod_loop_msg;
66130
66131 - va_start(args, fmt);
66132 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
66133 - va_end(args);
66134 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
66135 if (ret >= MODULE_NAME_LEN)
66136 return -ENAMETOOLONG;
66137
66138 @@ -98,6 +95,20 @@ int __request_module(bool wait, const char *fmt, ...)
66139 if (ret)
66140 return ret;
66141
66142 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66143 + if (!current_uid()) {
66144 + /* hack to workaround consolekit/udisks stupidity */
66145 + read_lock(&tasklist_lock);
66146 + if (!strcmp(current->comm, "mount") &&
66147 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
66148 + read_unlock(&tasklist_lock);
66149 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
66150 + return -EPERM;
66151 + }
66152 + read_unlock(&tasklist_lock);
66153 + }
66154 +#endif
66155 +
66156 /* If modprobe needs a service that is in a module, we get a recursive
66157 * loop. Limit the number of running kmod threads to max_threads/2 or
66158 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
66159 @@ -133,6 +144,47 @@ int __request_module(bool wait, const char *fmt, ...)
66160 atomic_dec(&kmod_concurrent);
66161 return ret;
66162 }
66163 +
66164 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
66165 +{
66166 + va_list args;
66167 + int ret;
66168 +
66169 + va_start(args, fmt);
66170 + ret = ____request_module(wait, module_param, fmt, args);
66171 + va_end(args);
66172 +
66173 + return ret;
66174 +}
66175 +
66176 +int __request_module(bool wait, const char *fmt, ...)
66177 +{
66178 + va_list args;
66179 + int ret;
66180 +
66181 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66182 + if (current_uid()) {
66183 + char module_param[MODULE_NAME_LEN];
66184 +
66185 + memset(module_param, 0, sizeof(module_param));
66186 +
66187 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
66188 +
66189 + va_start(args, fmt);
66190 + ret = ____request_module(wait, module_param, fmt, args);
66191 + va_end(args);
66192 +
66193 + return ret;
66194 + }
66195 +#endif
66196 +
66197 + va_start(args, fmt);
66198 + ret = ____request_module(wait, NULL, fmt, args);
66199 + va_end(args);
66200 +
66201 + return ret;
66202 +}
66203 +
66204 EXPORT_SYMBOL(__request_module);
66205 #endif /* CONFIG_MODULES */
66206
66207 @@ -222,7 +274,7 @@ static int wait_for_helper(void *data)
66208 *
66209 * Thus the __user pointer cast is valid here.
66210 */
66211 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
66212 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
66213
66214 /*
66215 * If ret is 0, either ____call_usermodehelper failed and the
66216 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
66217 index b30fd54..11821ec 100644
66218 --- a/kernel/kprobes.c
66219 +++ b/kernel/kprobes.c
66220 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
66221 * kernel image and loaded module images reside. This is required
66222 * so x86_64 can correctly handle the %rip-relative fixups.
66223 */
66224 - kip->insns = module_alloc(PAGE_SIZE);
66225 + kip->insns = module_alloc_exec(PAGE_SIZE);
66226 if (!kip->insns) {
66227 kfree(kip);
66228 return NULL;
66229 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
66230 */
66231 if (!list_is_singular(&kip->list)) {
66232 list_del(&kip->list);
66233 - module_free(NULL, kip->insns);
66234 + module_free_exec(NULL, kip->insns);
66235 kfree(kip);
66236 }
66237 return 1;
66238 @@ -1949,7 +1949,7 @@ static int __init init_kprobes(void)
66239 {
66240 int i, err = 0;
66241 unsigned long offset = 0, size = 0;
66242 - char *modname, namebuf[128];
66243 + char *modname, namebuf[KSYM_NAME_LEN];
66244 const char *symbol_name;
66245 void *addr;
66246 struct kprobe_blackpoint *kb;
66247 @@ -2075,7 +2075,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
66248 const char *sym = NULL;
66249 unsigned int i = *(loff_t *) v;
66250 unsigned long offset = 0;
66251 - char *modname, namebuf[128];
66252 + char *modname, namebuf[KSYM_NAME_LEN];
66253
66254 head = &kprobe_table[i];
66255 preempt_disable();
66256 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
66257 index 91d67ce..ac259df 100644
66258 --- a/kernel/lockdep.c
66259 +++ b/kernel/lockdep.c
66260 @@ -583,6 +583,10 @@ static int static_obj(void *obj)
66261 end = (unsigned long) &_end,
66262 addr = (unsigned long) obj;
66263
66264 +#ifdef CONFIG_PAX_KERNEXEC
66265 + start = ktla_ktva(start);
66266 +#endif
66267 +
66268 /*
66269 * static variable?
66270 */
66271 @@ -718,6 +722,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
66272 if (!static_obj(lock->key)) {
66273 debug_locks_off();
66274 printk("INFO: trying to register non-static key.\n");
66275 + printk("lock:%pS key:%pS.\n", lock, lock->key);
66276 printk("the code is fine but needs lockdep annotation.\n");
66277 printk("turning off the locking correctness validator.\n");
66278 dump_stack();
66279 @@ -2948,7 +2953,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
66280 if (!class)
66281 return 0;
66282 }
66283 - atomic_inc((atomic_t *)&class->ops);
66284 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
66285 if (very_verbose(class)) {
66286 printk("\nacquire class [%p] %s", class->key, class->name);
66287 if (class->name_version > 1)
66288 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
66289 index 71edd2f..e0542a5 100644
66290 --- a/kernel/lockdep_proc.c
66291 +++ b/kernel/lockdep_proc.c
66292 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
66293
66294 static void print_name(struct seq_file *m, struct lock_class *class)
66295 {
66296 - char str[128];
66297 + char str[KSYM_NAME_LEN];
66298 const char *name = class->name;
66299
66300 if (!name) {
66301 diff --git a/kernel/module.c b/kernel/module.c
66302 index 04379f92..fba2faf 100644
66303 --- a/kernel/module.c
66304 +++ b/kernel/module.c
66305 @@ -58,6 +58,7 @@
66306 #include <linux/jump_label.h>
66307 #include <linux/pfn.h>
66308 #include <linux/bsearch.h>
66309 +#include <linux/grsecurity.h>
66310
66311 #define CREATE_TRACE_POINTS
66312 #include <trace/events/module.h>
66313 @@ -119,7 +120,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
66314
66315 /* Bounds of module allocation, for speeding __module_address.
66316 * Protected by module_mutex. */
66317 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
66318 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
66319 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
66320
66321 int register_module_notifier(struct notifier_block * nb)
66322 {
66323 @@ -284,7 +286,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66324 return true;
66325
66326 list_for_each_entry_rcu(mod, &modules, list) {
66327 - struct symsearch arr[] = {
66328 + struct symsearch modarr[] = {
66329 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
66330 NOT_GPL_ONLY, false },
66331 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
66332 @@ -306,7 +308,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66333 #endif
66334 };
66335
66336 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
66337 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
66338 return true;
66339 }
66340 return false;
66341 @@ -438,7 +440,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
66342 static int percpu_modalloc(struct module *mod,
66343 unsigned long size, unsigned long align)
66344 {
66345 - if (align > PAGE_SIZE) {
66346 + if (align-1 >= PAGE_SIZE) {
66347 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
66348 mod->name, align, PAGE_SIZE);
66349 align = PAGE_SIZE;
66350 @@ -1183,7 +1185,7 @@ resolve_symbol_wait(struct module *mod,
66351 */
66352 #ifdef CONFIG_SYSFS
66353
66354 -#ifdef CONFIG_KALLSYMS
66355 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66356 static inline bool sect_empty(const Elf_Shdr *sect)
66357 {
66358 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
66359 @@ -1649,21 +1651,21 @@ static void set_section_ro_nx(void *base,
66360
66361 static void unset_module_core_ro_nx(struct module *mod)
66362 {
66363 - set_page_attributes(mod->module_core + mod->core_text_size,
66364 - mod->module_core + mod->core_size,
66365 + set_page_attributes(mod->module_core_rw,
66366 + mod->module_core_rw + mod->core_size_rw,
66367 set_memory_x);
66368 - set_page_attributes(mod->module_core,
66369 - mod->module_core + mod->core_ro_size,
66370 + set_page_attributes(mod->module_core_rx,
66371 + mod->module_core_rx + mod->core_size_rx,
66372 set_memory_rw);
66373 }
66374
66375 static void unset_module_init_ro_nx(struct module *mod)
66376 {
66377 - set_page_attributes(mod->module_init + mod->init_text_size,
66378 - mod->module_init + mod->init_size,
66379 + set_page_attributes(mod->module_init_rw,
66380 + mod->module_init_rw + mod->init_size_rw,
66381 set_memory_x);
66382 - set_page_attributes(mod->module_init,
66383 - mod->module_init + mod->init_ro_size,
66384 + set_page_attributes(mod->module_init_rx,
66385 + mod->module_init_rx + mod->init_size_rx,
66386 set_memory_rw);
66387 }
66388
66389 @@ -1674,14 +1676,14 @@ void set_all_modules_text_rw(void)
66390
66391 mutex_lock(&module_mutex);
66392 list_for_each_entry_rcu(mod, &modules, list) {
66393 - if ((mod->module_core) && (mod->core_text_size)) {
66394 - set_page_attributes(mod->module_core,
66395 - mod->module_core + mod->core_text_size,
66396 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66397 + set_page_attributes(mod->module_core_rx,
66398 + mod->module_core_rx + mod->core_size_rx,
66399 set_memory_rw);
66400 }
66401 - if ((mod->module_init) && (mod->init_text_size)) {
66402 - set_page_attributes(mod->module_init,
66403 - mod->module_init + mod->init_text_size,
66404 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66405 + set_page_attributes(mod->module_init_rx,
66406 + mod->module_init_rx + mod->init_size_rx,
66407 set_memory_rw);
66408 }
66409 }
66410 @@ -1695,14 +1697,14 @@ void set_all_modules_text_ro(void)
66411
66412 mutex_lock(&module_mutex);
66413 list_for_each_entry_rcu(mod, &modules, list) {
66414 - if ((mod->module_core) && (mod->core_text_size)) {
66415 - set_page_attributes(mod->module_core,
66416 - mod->module_core + mod->core_text_size,
66417 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66418 + set_page_attributes(mod->module_core_rx,
66419 + mod->module_core_rx + mod->core_size_rx,
66420 set_memory_ro);
66421 }
66422 - if ((mod->module_init) && (mod->init_text_size)) {
66423 - set_page_attributes(mod->module_init,
66424 - mod->module_init + mod->init_text_size,
66425 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66426 + set_page_attributes(mod->module_init_rx,
66427 + mod->module_init_rx + mod->init_size_rx,
66428 set_memory_ro);
66429 }
66430 }
66431 @@ -1748,16 +1750,19 @@ static void free_module(struct module *mod)
66432
66433 /* This may be NULL, but that's OK */
66434 unset_module_init_ro_nx(mod);
66435 - module_free(mod, mod->module_init);
66436 + module_free(mod, mod->module_init_rw);
66437 + module_free_exec(mod, mod->module_init_rx);
66438 kfree(mod->args);
66439 percpu_modfree(mod);
66440
66441 /* Free lock-classes: */
66442 - lockdep_free_key_range(mod->module_core, mod->core_size);
66443 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
66444 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
66445
66446 /* Finally, free the core (containing the module structure) */
66447 unset_module_core_ro_nx(mod);
66448 - module_free(mod, mod->module_core);
66449 + module_free_exec(mod, mod->module_core_rx);
66450 + module_free(mod, mod->module_core_rw);
66451
66452 #ifdef CONFIG_MPU
66453 update_protections(current->mm);
66454 @@ -1826,10 +1831,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66455 unsigned int i;
66456 int ret = 0;
66457 const struct kernel_symbol *ksym;
66458 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66459 + int is_fs_load = 0;
66460 + int register_filesystem_found = 0;
66461 + char *p;
66462 +
66463 + p = strstr(mod->args, "grsec_modharden_fs");
66464 + if (p) {
66465 + char *endptr = p + strlen("grsec_modharden_fs");
66466 + /* copy \0 as well */
66467 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
66468 + is_fs_load = 1;
66469 + }
66470 +#endif
66471
66472 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
66473 const char *name = info->strtab + sym[i].st_name;
66474
66475 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66476 + /* it's a real shame this will never get ripped and copied
66477 + upstream! ;(
66478 + */
66479 + if (is_fs_load && !strcmp(name, "register_filesystem"))
66480 + register_filesystem_found = 1;
66481 +#endif
66482 +
66483 switch (sym[i].st_shndx) {
66484 case SHN_COMMON:
66485 /* We compiled with -fno-common. These are not
66486 @@ -1850,7 +1876,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66487 ksym = resolve_symbol_wait(mod, info, name);
66488 /* Ok if resolved. */
66489 if (ksym && !IS_ERR(ksym)) {
66490 + pax_open_kernel();
66491 sym[i].st_value = ksym->value;
66492 + pax_close_kernel();
66493 break;
66494 }
66495
66496 @@ -1869,11 +1897,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66497 secbase = (unsigned long)mod_percpu(mod);
66498 else
66499 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
66500 + pax_open_kernel();
66501 sym[i].st_value += secbase;
66502 + pax_close_kernel();
66503 break;
66504 }
66505 }
66506
66507 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66508 + if (is_fs_load && !register_filesystem_found) {
66509 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
66510 + ret = -EPERM;
66511 + }
66512 +#endif
66513 +
66514 return ret;
66515 }
66516
66517 @@ -1977,22 +2014,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
66518 || s->sh_entsize != ~0UL
66519 || strstarts(sname, ".init"))
66520 continue;
66521 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
66522 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66523 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
66524 + else
66525 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
66526 DEBUGP("\t%s\n", name);
66527 }
66528 - switch (m) {
66529 - case 0: /* executable */
66530 - mod->core_size = debug_align(mod->core_size);
66531 - mod->core_text_size = mod->core_size;
66532 - break;
66533 - case 1: /* RO: text and ro-data */
66534 - mod->core_size = debug_align(mod->core_size);
66535 - mod->core_ro_size = mod->core_size;
66536 - break;
66537 - case 3: /* whole core */
66538 - mod->core_size = debug_align(mod->core_size);
66539 - break;
66540 - }
66541 }
66542
66543 DEBUGP("Init section allocation order:\n");
66544 @@ -2006,23 +2033,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
66545 || s->sh_entsize != ~0UL
66546 || !strstarts(sname, ".init"))
66547 continue;
66548 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
66549 - | INIT_OFFSET_MASK);
66550 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66551 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
66552 + else
66553 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
66554 + s->sh_entsize |= INIT_OFFSET_MASK;
66555 DEBUGP("\t%s\n", sname);
66556 }
66557 - switch (m) {
66558 - case 0: /* executable */
66559 - mod->init_size = debug_align(mod->init_size);
66560 - mod->init_text_size = mod->init_size;
66561 - break;
66562 - case 1: /* RO: text and ro-data */
66563 - mod->init_size = debug_align(mod->init_size);
66564 - mod->init_ro_size = mod->init_size;
66565 - break;
66566 - case 3: /* whole init */
66567 - mod->init_size = debug_align(mod->init_size);
66568 - break;
66569 - }
66570 }
66571 }
66572
66573 @@ -2187,7 +2204,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66574
66575 /* Put symbol section at end of init part of module. */
66576 symsect->sh_flags |= SHF_ALLOC;
66577 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
66578 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
66579 info->index.sym) | INIT_OFFSET_MASK;
66580 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
66581
66582 @@ -2204,19 +2221,19 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66583 }
66584
66585 /* Append room for core symbols at end of core part. */
66586 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
66587 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
66588 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
66589 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
66590
66591 /* Put string table section at end of init part of module. */
66592 strsect->sh_flags |= SHF_ALLOC;
66593 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
66594 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
66595 info->index.str) | INIT_OFFSET_MASK;
66596 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
66597
66598 /* Append room for core symbols' strings at end of core part. */
66599 - info->stroffs = mod->core_size;
66600 + info->stroffs = mod->core_size_rx;
66601 __set_bit(0, info->strmap);
66602 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
66603 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
66604 }
66605
66606 static void add_kallsyms(struct module *mod, const struct load_info *info)
66607 @@ -2232,11 +2249,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66608 /* Make sure we get permanent strtab: don't use info->strtab. */
66609 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
66610
66611 + pax_open_kernel();
66612 +
66613 /* Set types up while we still have access to sections. */
66614 for (i = 0; i < mod->num_symtab; i++)
66615 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
66616
66617 - mod->core_symtab = dst = mod->module_core + info->symoffs;
66618 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
66619 src = mod->symtab;
66620 *dst = *src;
66621 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
66622 @@ -2249,10 +2268,12 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66623 }
66624 mod->core_num_syms = ndst;
66625
66626 - mod->core_strtab = s = mod->module_core + info->stroffs;
66627 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
66628 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
66629 if (test_bit(i, info->strmap))
66630 *++s = mod->strtab[i];
66631 +
66632 + pax_close_kernel();
66633 }
66634 #else
66635 static inline void layout_symtab(struct module *mod, struct load_info *info)
66636 @@ -2286,17 +2307,33 @@ void * __weak module_alloc(unsigned long size)
66637 return size == 0 ? NULL : vmalloc_exec(size);
66638 }
66639
66640 -static void *module_alloc_update_bounds(unsigned long size)
66641 +static void *module_alloc_update_bounds_rw(unsigned long size)
66642 {
66643 void *ret = module_alloc(size);
66644
66645 if (ret) {
66646 mutex_lock(&module_mutex);
66647 /* Update module bounds. */
66648 - if ((unsigned long)ret < module_addr_min)
66649 - module_addr_min = (unsigned long)ret;
66650 - if ((unsigned long)ret + size > module_addr_max)
66651 - module_addr_max = (unsigned long)ret + size;
66652 + if ((unsigned long)ret < module_addr_min_rw)
66653 + module_addr_min_rw = (unsigned long)ret;
66654 + if ((unsigned long)ret + size > module_addr_max_rw)
66655 + module_addr_max_rw = (unsigned long)ret + size;
66656 + mutex_unlock(&module_mutex);
66657 + }
66658 + return ret;
66659 +}
66660 +
66661 +static void *module_alloc_update_bounds_rx(unsigned long size)
66662 +{
66663 + void *ret = module_alloc_exec(size);
66664 +
66665 + if (ret) {
66666 + mutex_lock(&module_mutex);
66667 + /* Update module bounds. */
66668 + if ((unsigned long)ret < module_addr_min_rx)
66669 + module_addr_min_rx = (unsigned long)ret;
66670 + if ((unsigned long)ret + size > module_addr_max_rx)
66671 + module_addr_max_rx = (unsigned long)ret + size;
66672 mutex_unlock(&module_mutex);
66673 }
66674 return ret;
66675 @@ -2474,8 +2511,14 @@ static struct module *setup_load_info(struct load_info *info)
66676 static int check_modinfo(struct module *mod, struct load_info *info)
66677 {
66678 const char *modmagic = get_modinfo(info, "vermagic");
66679 + const char *license = get_modinfo(info, "license");
66680 int err;
66681
66682 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
66683 + if (!license || !license_is_gpl_compatible(license))
66684 + return -ENOEXEC;
66685 +#endif
66686 +
66687 /* This is allowed: modprobe --force will invalidate it. */
66688 if (!modmagic) {
66689 err = try_to_force_load(mod, "bad vermagic");
66690 @@ -2495,7 +2538,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
66691 }
66692
66693 /* Set up license info based on the info section */
66694 - set_license(mod, get_modinfo(info, "license"));
66695 + set_license(mod, license);
66696
66697 return 0;
66698 }
66699 @@ -2589,7 +2632,7 @@ static int move_module(struct module *mod, struct load_info *info)
66700 void *ptr;
66701
66702 /* Do the allocs. */
66703 - ptr = module_alloc_update_bounds(mod->core_size);
66704 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
66705 /*
66706 * The pointer to this block is stored in the module structure
66707 * which is inside the block. Just mark it as not being a
66708 @@ -2599,23 +2642,50 @@ static int move_module(struct module *mod, struct load_info *info)
66709 if (!ptr)
66710 return -ENOMEM;
66711
66712 - memset(ptr, 0, mod->core_size);
66713 - mod->module_core = ptr;
66714 + memset(ptr, 0, mod->core_size_rw);
66715 + mod->module_core_rw = ptr;
66716
66717 - ptr = module_alloc_update_bounds(mod->init_size);
66718 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
66719 /*
66720 * The pointer to this block is stored in the module structure
66721 * which is inside the block. This block doesn't need to be
66722 * scanned as it contains data and code that will be freed
66723 * after the module is initialized.
66724 */
66725 - kmemleak_ignore(ptr);
66726 - if (!ptr && mod->init_size) {
66727 - module_free(mod, mod->module_core);
66728 + kmemleak_not_leak(ptr);
66729 + if (!ptr && mod->init_size_rw) {
66730 + module_free(mod, mod->module_core_rw);
66731 + return -ENOMEM;
66732 + }
66733 + memset(ptr, 0, mod->init_size_rw);
66734 + mod->module_init_rw = ptr;
66735 +
66736 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
66737 + kmemleak_not_leak(ptr);
66738 + if (!ptr) {
66739 + module_free(mod, mod->module_init_rw);
66740 + module_free(mod, mod->module_core_rw);
66741 return -ENOMEM;
66742 }
66743 - memset(ptr, 0, mod->init_size);
66744 - mod->module_init = ptr;
66745 +
66746 + pax_open_kernel();
66747 + memset(ptr, 0, mod->core_size_rx);
66748 + pax_close_kernel();
66749 + mod->module_core_rx = ptr;
66750 +
66751 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
66752 + kmemleak_not_leak(ptr);
66753 + if (!ptr && mod->init_size_rx) {
66754 + module_free_exec(mod, mod->module_core_rx);
66755 + module_free(mod, mod->module_init_rw);
66756 + module_free(mod, mod->module_core_rw);
66757 + return -ENOMEM;
66758 + }
66759 +
66760 + pax_open_kernel();
66761 + memset(ptr, 0, mod->init_size_rx);
66762 + pax_close_kernel();
66763 + mod->module_init_rx = ptr;
66764
66765 /* Transfer each section which specifies SHF_ALLOC */
66766 DEBUGP("final section addresses:\n");
66767 @@ -2626,16 +2696,45 @@ static int move_module(struct module *mod, struct load_info *info)
66768 if (!(shdr->sh_flags & SHF_ALLOC))
66769 continue;
66770
66771 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
66772 - dest = mod->module_init
66773 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66774 - else
66775 - dest = mod->module_core + shdr->sh_entsize;
66776 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
66777 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66778 + dest = mod->module_init_rw
66779 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66780 + else
66781 + dest = mod->module_init_rx
66782 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
66783 + } else {
66784 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
66785 + dest = mod->module_core_rw + shdr->sh_entsize;
66786 + else
66787 + dest = mod->module_core_rx + shdr->sh_entsize;
66788 + }
66789 +
66790 + if (shdr->sh_type != SHT_NOBITS) {
66791 +
66792 +#ifdef CONFIG_PAX_KERNEXEC
66793 +#ifdef CONFIG_X86_64
66794 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
66795 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
66796 +#endif
66797 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
66798 + pax_open_kernel();
66799 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66800 + pax_close_kernel();
66801 + } else
66802 +#endif
66803
66804 - if (shdr->sh_type != SHT_NOBITS)
66805 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
66806 + }
66807 /* Update sh_addr to point to copy in image. */
66808 - shdr->sh_addr = (unsigned long)dest;
66809 +
66810 +#ifdef CONFIG_PAX_KERNEXEC
66811 + if (shdr->sh_flags & SHF_EXECINSTR)
66812 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
66813 + else
66814 +#endif
66815 +
66816 + shdr->sh_addr = (unsigned long)dest;
66817 DEBUGP("\t0x%lx %s\n",
66818 shdr->sh_addr, info->secstrings + shdr->sh_name);
66819 }
66820 @@ -2686,12 +2785,12 @@ static void flush_module_icache(const struct module *mod)
66821 * Do it before processing of module parameters, so the module
66822 * can provide parameter accessor functions of its own.
66823 */
66824 - if (mod->module_init)
66825 - flush_icache_range((unsigned long)mod->module_init,
66826 - (unsigned long)mod->module_init
66827 - + mod->init_size);
66828 - flush_icache_range((unsigned long)mod->module_core,
66829 - (unsigned long)mod->module_core + mod->core_size);
66830 + if (mod->module_init_rx)
66831 + flush_icache_range((unsigned long)mod->module_init_rx,
66832 + (unsigned long)mod->module_init_rx
66833 + + mod->init_size_rx);
66834 + flush_icache_range((unsigned long)mod->module_core_rx,
66835 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
66836
66837 set_fs(old_fs);
66838 }
66839 @@ -2771,8 +2870,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
66840 {
66841 kfree(info->strmap);
66842 percpu_modfree(mod);
66843 - module_free(mod, mod->module_init);
66844 - module_free(mod, mod->module_core);
66845 + module_free_exec(mod, mod->module_init_rx);
66846 + module_free_exec(mod, mod->module_core_rx);
66847 + module_free(mod, mod->module_init_rw);
66848 + module_free(mod, mod->module_core_rw);
66849 }
66850
66851 int __weak module_finalize(const Elf_Ehdr *hdr,
66852 @@ -2836,9 +2937,38 @@ static struct module *load_module(void __user *umod,
66853 if (err)
66854 goto free_unload;
66855
66856 + /* Now copy in args */
66857 + mod->args = strndup_user(uargs, ~0UL >> 1);
66858 + if (IS_ERR(mod->args)) {
66859 + err = PTR_ERR(mod->args);
66860 + goto free_unload;
66861 + }
66862 +
66863 /* Set up MODINFO_ATTR fields */
66864 setup_modinfo(mod, &info);
66865
66866 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66867 + {
66868 + char *p, *p2;
66869 +
66870 + if (strstr(mod->args, "grsec_modharden_netdev")) {
66871 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
66872 + err = -EPERM;
66873 + goto free_modinfo;
66874 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
66875 + p += strlen("grsec_modharden_normal");
66876 + p2 = strstr(p, "_");
66877 + if (p2) {
66878 + *p2 = '\0';
66879 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
66880 + *p2 = '_';
66881 + }
66882 + err = -EPERM;
66883 + goto free_modinfo;
66884 + }
66885 + }
66886 +#endif
66887 +
66888 /* Fix up syms, so that st_value is a pointer to location. */
66889 err = simplify_symbols(mod, &info);
66890 if (err < 0)
66891 @@ -2854,13 +2984,6 @@ static struct module *load_module(void __user *umod,
66892
66893 flush_module_icache(mod);
66894
66895 - /* Now copy in args */
66896 - mod->args = strndup_user(uargs, ~0UL >> 1);
66897 - if (IS_ERR(mod->args)) {
66898 - err = PTR_ERR(mod->args);
66899 - goto free_arch_cleanup;
66900 - }
66901 -
66902 /* Mark state as coming so strong_try_module_get() ignores us. */
66903 mod->state = MODULE_STATE_COMING;
66904
66905 @@ -2920,11 +3043,10 @@ static struct module *load_module(void __user *umod,
66906 unlock:
66907 mutex_unlock(&module_mutex);
66908 synchronize_sched();
66909 - kfree(mod->args);
66910 - free_arch_cleanup:
66911 module_arch_cleanup(mod);
66912 free_modinfo:
66913 free_modinfo(mod);
66914 + kfree(mod->args);
66915 free_unload:
66916 module_unload_free(mod);
66917 free_module:
66918 @@ -2965,16 +3087,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66919 MODULE_STATE_COMING, mod);
66920
66921 /* Set RO and NX regions for core */
66922 - set_section_ro_nx(mod->module_core,
66923 - mod->core_text_size,
66924 - mod->core_ro_size,
66925 - mod->core_size);
66926 + set_section_ro_nx(mod->module_core_rx,
66927 + mod->core_size_rx,
66928 + mod->core_size_rx,
66929 + mod->core_size_rx);
66930
66931 /* Set RO and NX regions for init */
66932 - set_section_ro_nx(mod->module_init,
66933 - mod->init_text_size,
66934 - mod->init_ro_size,
66935 - mod->init_size);
66936 + set_section_ro_nx(mod->module_init_rx,
66937 + mod->init_size_rx,
66938 + mod->init_size_rx,
66939 + mod->init_size_rx);
66940
66941 do_mod_ctors(mod);
66942 /* Start the module */
66943 @@ -3020,11 +3142,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
66944 mod->strtab = mod->core_strtab;
66945 #endif
66946 unset_module_init_ro_nx(mod);
66947 - module_free(mod, mod->module_init);
66948 - mod->module_init = NULL;
66949 - mod->init_size = 0;
66950 - mod->init_ro_size = 0;
66951 - mod->init_text_size = 0;
66952 + module_free(mod, mod->module_init_rw);
66953 + module_free_exec(mod, mod->module_init_rx);
66954 + mod->module_init_rw = NULL;
66955 + mod->module_init_rx = NULL;
66956 + mod->init_size_rw = 0;
66957 + mod->init_size_rx = 0;
66958 mutex_unlock(&module_mutex);
66959
66960 return 0;
66961 @@ -3055,10 +3178,16 @@ static const char *get_ksymbol(struct module *mod,
66962 unsigned long nextval;
66963
66964 /* At worse, next value is at end of module */
66965 - if (within_module_init(addr, mod))
66966 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
66967 + if (within_module_init_rx(addr, mod))
66968 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
66969 + else if (within_module_init_rw(addr, mod))
66970 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
66971 + else if (within_module_core_rx(addr, mod))
66972 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
66973 + else if (within_module_core_rw(addr, mod))
66974 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
66975 else
66976 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
66977 + return NULL;
66978
66979 /* Scan for closest preceding symbol, and next symbol. (ELF
66980 starts real symbols at 1). */
66981 @@ -3304,7 +3433,7 @@ static int m_show(struct seq_file *m, void *p)
66982 char buf[8];
66983
66984 seq_printf(m, "%s %u",
66985 - mod->name, mod->init_size + mod->core_size);
66986 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
66987 print_unload_info(m, mod);
66988
66989 /* Informative for users. */
66990 @@ -3313,7 +3442,7 @@ static int m_show(struct seq_file *m, void *p)
66991 mod->state == MODULE_STATE_COMING ? "Loading":
66992 "Live");
66993 /* Used by oprofile and other similar tools. */
66994 - seq_printf(m, " 0x%pK", mod->module_core);
66995 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
66996
66997 /* Taints info */
66998 if (mod->taints)
66999 @@ -3349,7 +3478,17 @@ static const struct file_operations proc_modules_operations = {
67000
67001 static int __init proc_modules_init(void)
67002 {
67003 +#ifndef CONFIG_GRKERNSEC_HIDESYM
67004 +#ifdef CONFIG_GRKERNSEC_PROC_USER
67005 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67006 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67007 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
67008 +#else
67009 proc_create("modules", 0, NULL, &proc_modules_operations);
67010 +#endif
67011 +#else
67012 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67013 +#endif
67014 return 0;
67015 }
67016 module_init(proc_modules_init);
67017 @@ -3408,12 +3547,12 @@ struct module *__module_address(unsigned long addr)
67018 {
67019 struct module *mod;
67020
67021 - if (addr < module_addr_min || addr > module_addr_max)
67022 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
67023 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
67024 return NULL;
67025
67026 list_for_each_entry_rcu(mod, &modules, list)
67027 - if (within_module_core(addr, mod)
67028 - || within_module_init(addr, mod))
67029 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
67030 return mod;
67031 return NULL;
67032 }
67033 @@ -3447,11 +3586,20 @@ bool is_module_text_address(unsigned long addr)
67034 */
67035 struct module *__module_text_address(unsigned long addr)
67036 {
67037 - struct module *mod = __module_address(addr);
67038 + struct module *mod;
67039 +
67040 +#ifdef CONFIG_X86_32
67041 + addr = ktla_ktva(addr);
67042 +#endif
67043 +
67044 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
67045 + return NULL;
67046 +
67047 + mod = __module_address(addr);
67048 +
67049 if (mod) {
67050 /* Make sure it's within the text section. */
67051 - if (!within(addr, mod->module_init, mod->init_text_size)
67052 - && !within(addr, mod->module_core, mod->core_text_size))
67053 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
67054 mod = NULL;
67055 }
67056 return mod;
67057 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
67058 index 73da83a..fe46e99 100644
67059 --- a/kernel/mutex-debug.c
67060 +++ b/kernel/mutex-debug.c
67061 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
67062 }
67063
67064 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67065 - struct thread_info *ti)
67066 + struct task_struct *task)
67067 {
67068 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
67069
67070 /* Mark the current thread as blocked on the lock: */
67071 - ti->task->blocked_on = waiter;
67072 + task->blocked_on = waiter;
67073 }
67074
67075 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67076 - struct thread_info *ti)
67077 + struct task_struct *task)
67078 {
67079 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
67080 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
67081 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
67082 - ti->task->blocked_on = NULL;
67083 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
67084 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
67085 + task->blocked_on = NULL;
67086
67087 list_del_init(&waiter->list);
67088 waiter->task = NULL;
67089 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
67090 index 0799fd3..d06ae3b 100644
67091 --- a/kernel/mutex-debug.h
67092 +++ b/kernel/mutex-debug.h
67093 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
67094 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
67095 extern void debug_mutex_add_waiter(struct mutex *lock,
67096 struct mutex_waiter *waiter,
67097 - struct thread_info *ti);
67098 + struct task_struct *task);
67099 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67100 - struct thread_info *ti);
67101 + struct task_struct *task);
67102 extern void debug_mutex_unlock(struct mutex *lock);
67103 extern void debug_mutex_init(struct mutex *lock, const char *name,
67104 struct lock_class_key *key);
67105 diff --git a/kernel/mutex.c b/kernel/mutex.c
67106 index d607ed5..58d0a52 100644
67107 --- a/kernel/mutex.c
67108 +++ b/kernel/mutex.c
67109 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67110 spin_lock_mutex(&lock->wait_lock, flags);
67111
67112 debug_mutex_lock_common(lock, &waiter);
67113 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
67114 + debug_mutex_add_waiter(lock, &waiter, task);
67115
67116 /* add waiting tasks to the end of the waitqueue (FIFO): */
67117 list_add_tail(&waiter.list, &lock->wait_list);
67118 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67119 * TASK_UNINTERRUPTIBLE case.)
67120 */
67121 if (unlikely(signal_pending_state(state, task))) {
67122 - mutex_remove_waiter(lock, &waiter,
67123 - task_thread_info(task));
67124 + mutex_remove_waiter(lock, &waiter, task);
67125 mutex_release(&lock->dep_map, 1, ip);
67126 spin_unlock_mutex(&lock->wait_lock, flags);
67127
67128 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67129 done:
67130 lock_acquired(&lock->dep_map, ip);
67131 /* got the lock - rejoice! */
67132 - mutex_remove_waiter(lock, &waiter, current_thread_info());
67133 + mutex_remove_waiter(lock, &waiter, task);
67134 mutex_set_owner(lock);
67135
67136 /* set it to 0 if there are no waiters left: */
67137 diff --git a/kernel/padata.c b/kernel/padata.c
67138 index b91941d..0871d60 100644
67139 --- a/kernel/padata.c
67140 +++ b/kernel/padata.c
67141 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
67142 padata->pd = pd;
67143 padata->cb_cpu = cb_cpu;
67144
67145 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
67146 - atomic_set(&pd->seq_nr, -1);
67147 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
67148 + atomic_set_unchecked(&pd->seq_nr, -1);
67149
67150 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
67151 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
67152
67153 target_cpu = padata_cpu_hash(padata);
67154 queue = per_cpu_ptr(pd->pqueue, target_cpu);
67155 @@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
67156 padata_init_pqueues(pd);
67157 padata_init_squeues(pd);
67158 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
67159 - atomic_set(&pd->seq_nr, -1);
67160 + atomic_set_unchecked(&pd->seq_nr, -1);
67161 atomic_set(&pd->reorder_objects, 0);
67162 atomic_set(&pd->refcnt, 0);
67163 pd->pinst = pinst;
67164 diff --git a/kernel/panic.c b/kernel/panic.c
67165 index d7bb697..9ef9f19 100644
67166 --- a/kernel/panic.c
67167 +++ b/kernel/panic.c
67168 @@ -371,7 +371,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
67169 const char *board;
67170
67171 printk(KERN_WARNING "------------[ cut here ]------------\n");
67172 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
67173 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
67174 board = dmi_get_system_info(DMI_PRODUCT_NAME);
67175 if (board)
67176 printk(KERN_WARNING "Hardware name: %s\n", board);
67177 @@ -426,7 +426,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
67178 */
67179 void __stack_chk_fail(void)
67180 {
67181 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
67182 + dump_stack();
67183 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
67184 __builtin_return_address(0));
67185 }
67186 EXPORT_SYMBOL(__stack_chk_fail);
67187 diff --git a/kernel/pid.c b/kernel/pid.c
67188 index e432057..a2b2ac5 100644
67189 --- a/kernel/pid.c
67190 +++ b/kernel/pid.c
67191 @@ -33,6 +33,7 @@
67192 #include <linux/rculist.h>
67193 #include <linux/bootmem.h>
67194 #include <linux/hash.h>
67195 +#include <linux/security.h>
67196 #include <linux/pid_namespace.h>
67197 #include <linux/init_task.h>
67198 #include <linux/syscalls.h>
67199 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
67200
67201 int pid_max = PID_MAX_DEFAULT;
67202
67203 -#define RESERVED_PIDS 300
67204 +#define RESERVED_PIDS 500
67205
67206 int pid_max_min = RESERVED_PIDS + 1;
67207 int pid_max_max = PID_MAX_LIMIT;
67208 @@ -418,8 +419,15 @@ EXPORT_SYMBOL(pid_task);
67209 */
67210 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
67211 {
67212 + struct task_struct *task;
67213 +
67214 rcu_lockdep_assert(rcu_read_lock_held());
67215 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67216 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67217 +
67218 + if (gr_pid_is_chrooted(task))
67219 + return NULL;
67220 +
67221 + return task;
67222 }
67223
67224 struct task_struct *find_task_by_vpid(pid_t vnr)
67225 @@ -427,6 +435,12 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
67226 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
67227 }
67228
67229 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
67230 +{
67231 + rcu_lockdep_assert(rcu_read_lock_held());
67232 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
67233 +}
67234 +
67235 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
67236 {
67237 struct pid *pid;
67238 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
67239 index 640ded8..3dafb85 100644
67240 --- a/kernel/posix-cpu-timers.c
67241 +++ b/kernel/posix-cpu-timers.c
67242 @@ -6,6 +6,7 @@
67243 #include <linux/posix-timers.h>
67244 #include <linux/errno.h>
67245 #include <linux/math64.h>
67246 +#include <linux/security.h>
67247 #include <asm/uaccess.h>
67248 #include <linux/kernel_stat.h>
67249 #include <trace/events/timer.h>
67250 @@ -1606,14 +1607,14 @@ struct k_clock clock_posix_cpu = {
67251
67252 static __init int init_posix_cpu_timers(void)
67253 {
67254 - struct k_clock process = {
67255 + static struct k_clock process = {
67256 .clock_getres = process_cpu_clock_getres,
67257 .clock_get = process_cpu_clock_get,
67258 .timer_create = process_cpu_timer_create,
67259 .nsleep = process_cpu_nsleep,
67260 .nsleep_restart = process_cpu_nsleep_restart,
67261 };
67262 - struct k_clock thread = {
67263 + static struct k_clock thread = {
67264 .clock_getres = thread_cpu_clock_getres,
67265 .clock_get = thread_cpu_clock_get,
67266 .timer_create = thread_cpu_timer_create,
67267 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
67268 index 4556182..9335419 100644
67269 --- a/kernel/posix-timers.c
67270 +++ b/kernel/posix-timers.c
67271 @@ -43,6 +43,7 @@
67272 #include <linux/idr.h>
67273 #include <linux/posix-clock.h>
67274 #include <linux/posix-timers.h>
67275 +#include <linux/grsecurity.h>
67276 #include <linux/syscalls.h>
67277 #include <linux/wait.h>
67278 #include <linux/workqueue.h>
67279 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
67280 * which we beg off on and pass to do_sys_settimeofday().
67281 */
67282
67283 -static struct k_clock posix_clocks[MAX_CLOCKS];
67284 +static struct k_clock *posix_clocks[MAX_CLOCKS];
67285
67286 /*
67287 * These ones are defined below.
67288 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
67289 */
67290 static __init int init_posix_timers(void)
67291 {
67292 - struct k_clock clock_realtime = {
67293 + static struct k_clock clock_realtime = {
67294 .clock_getres = hrtimer_get_res,
67295 .clock_get = posix_clock_realtime_get,
67296 .clock_set = posix_clock_realtime_set,
67297 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
67298 .timer_get = common_timer_get,
67299 .timer_del = common_timer_del,
67300 };
67301 - struct k_clock clock_monotonic = {
67302 + static struct k_clock clock_monotonic = {
67303 .clock_getres = hrtimer_get_res,
67304 .clock_get = posix_ktime_get_ts,
67305 .nsleep = common_nsleep,
67306 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
67307 .timer_get = common_timer_get,
67308 .timer_del = common_timer_del,
67309 };
67310 - struct k_clock clock_monotonic_raw = {
67311 + static struct k_clock clock_monotonic_raw = {
67312 .clock_getres = hrtimer_get_res,
67313 .clock_get = posix_get_monotonic_raw,
67314 };
67315 - struct k_clock clock_realtime_coarse = {
67316 + static struct k_clock clock_realtime_coarse = {
67317 .clock_getres = posix_get_coarse_res,
67318 .clock_get = posix_get_realtime_coarse,
67319 };
67320 - struct k_clock clock_monotonic_coarse = {
67321 + static struct k_clock clock_monotonic_coarse = {
67322 .clock_getres = posix_get_coarse_res,
67323 .clock_get = posix_get_monotonic_coarse,
67324 };
67325 - struct k_clock clock_boottime = {
67326 + static struct k_clock clock_boottime = {
67327 .clock_getres = hrtimer_get_res,
67328 .clock_get = posix_get_boottime,
67329 .nsleep = common_nsleep,
67330 @@ -272,6 +273,8 @@ static __init int init_posix_timers(void)
67331 .timer_del = common_timer_del,
67332 };
67333
67334 + pax_track_stack();
67335 +
67336 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
67337 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
67338 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
67339 @@ -473,7 +476,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
67340 return;
67341 }
67342
67343 - posix_clocks[clock_id] = *new_clock;
67344 + posix_clocks[clock_id] = new_clock;
67345 }
67346 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
67347
67348 @@ -519,9 +522,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
67349 return (id & CLOCKFD_MASK) == CLOCKFD ?
67350 &clock_posix_dynamic : &clock_posix_cpu;
67351
67352 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
67353 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
67354 return NULL;
67355 - return &posix_clocks[id];
67356 + return posix_clocks[id];
67357 }
67358
67359 static int common_timer_create(struct k_itimer *new_timer)
67360 @@ -959,6 +962,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
67361 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
67362 return -EFAULT;
67363
67364 + /* only the CLOCK_REALTIME clock can be set, all other clocks
67365 + have their clock_set fptr set to a nosettime dummy function
67366 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
67367 + call common_clock_set, which calls do_sys_settimeofday, which
67368 + we hook
67369 + */
67370 +
67371 return kc->clock_set(which_clock, &new_tp);
67372 }
67373
67374 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
67375 index d523593..68197a4 100644
67376 --- a/kernel/power/poweroff.c
67377 +++ b/kernel/power/poweroff.c
67378 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
67379 .enable_mask = SYSRQ_ENABLE_BOOT,
67380 };
67381
67382 -static int pm_sysrq_init(void)
67383 +static int __init pm_sysrq_init(void)
67384 {
67385 register_sysrq_key('o', &sysrq_poweroff_op);
67386 return 0;
67387 diff --git a/kernel/power/process.c b/kernel/power/process.c
67388 index 0cf3a27..5481be4 100644
67389 --- a/kernel/power/process.c
67390 +++ b/kernel/power/process.c
67391 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_only)
67392 u64 elapsed_csecs64;
67393 unsigned int elapsed_csecs;
67394 bool wakeup = false;
67395 + bool timedout = false;
67396
67397 do_gettimeofday(&start);
67398
67399 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_only)
67400
67401 while (true) {
67402 todo = 0;
67403 + if (time_after(jiffies, end_time))
67404 + timedout = true;
67405 read_lock(&tasklist_lock);
67406 do_each_thread(g, p) {
67407 if (frozen(p) || !freezable(p))
67408 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_only)
67409 * try_to_stop() after schedule() in ptrace/signal
67410 * stop sees TIF_FREEZE.
67411 */
67412 - if (!task_is_stopped_or_traced(p) &&
67413 - !freezer_should_skip(p))
67414 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
67415 todo++;
67416 + if (timedout) {
67417 + printk(KERN_ERR "Task refusing to freeze:\n");
67418 + sched_show_task(p);
67419 + }
67420 + }
67421 } while_each_thread(g, p);
67422 read_unlock(&tasklist_lock);
67423
67424 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_only)
67425 todo += wq_busy;
67426 }
67427
67428 - if (!todo || time_after(jiffies, end_time))
67429 + if (!todo || timedout)
67430 break;
67431
67432 if (pm_wakeup_pending()) {
67433 diff --git a/kernel/printk.c b/kernel/printk.c
67434 index 28a40d8..2411bec 100644
67435 --- a/kernel/printk.c
67436 +++ b/kernel/printk.c
67437 @@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
67438 if (from_file && type != SYSLOG_ACTION_OPEN)
67439 return 0;
67440
67441 +#ifdef CONFIG_GRKERNSEC_DMESG
67442 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
67443 + return -EPERM;
67444 +#endif
67445 +
67446 if (syslog_action_restricted(type)) {
67447 if (capable(CAP_SYSLOG))
67448 return 0;
67449 diff --git a/kernel/profile.c b/kernel/profile.c
67450 index 961b389..c451353 100644
67451 --- a/kernel/profile.c
67452 +++ b/kernel/profile.c
67453 @@ -39,7 +39,7 @@ struct profile_hit {
67454 /* Oprofile timer tick hook */
67455 static int (*timer_hook)(struct pt_regs *) __read_mostly;
67456
67457 -static atomic_t *prof_buffer;
67458 +static atomic_unchecked_t *prof_buffer;
67459 static unsigned long prof_len, prof_shift;
67460
67461 int prof_on __read_mostly;
67462 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
67463 hits[i].pc = 0;
67464 continue;
67465 }
67466 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67467 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67468 hits[i].hits = hits[i].pc = 0;
67469 }
67470 }
67471 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67472 * Add the current hit(s) and flush the write-queue out
67473 * to the global buffer:
67474 */
67475 - atomic_add(nr_hits, &prof_buffer[pc]);
67476 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
67477 for (i = 0; i < NR_PROFILE_HIT; ++i) {
67478 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67479 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67480 hits[i].pc = hits[i].hits = 0;
67481 }
67482 out:
67483 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67484 {
67485 unsigned long pc;
67486 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
67487 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67488 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67489 }
67490 #endif /* !CONFIG_SMP */
67491
67492 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
67493 return -EFAULT;
67494 buf++; p++; count--; read++;
67495 }
67496 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
67497 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
67498 if (copy_to_user(buf, (void *)pnt, count))
67499 return -EFAULT;
67500 read += count;
67501 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
67502 }
67503 #endif
67504 profile_discard_flip_buffers();
67505 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
67506 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
67507 return count;
67508 }
67509
67510 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
67511 index a70d2a5..cbd4b4f 100644
67512 --- a/kernel/ptrace.c
67513 +++ b/kernel/ptrace.c
67514 @@ -161,7 +161,8 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
67515 return ret;
67516 }
67517
67518 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
67519 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
67520 + unsigned int log)
67521 {
67522 const struct cred *cred = current_cred(), *tcred;
67523
67524 @@ -187,7 +188,8 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
67525 cred->gid == tcred->sgid &&
67526 cred->gid == tcred->gid))
67527 goto ok;
67528 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
67529 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
67530 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
67531 goto ok;
67532 rcu_read_unlock();
67533 return -EPERM;
67534 @@ -196,7 +198,9 @@ ok:
67535 smp_rmb();
67536 if (task->mm)
67537 dumpable = get_dumpable(task->mm);
67538 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
67539 + if (!dumpable &&
67540 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
67541 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
67542 return -EPERM;
67543
67544 return security_ptrace_access_check(task, mode);
67545 @@ -206,7 +210,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
67546 {
67547 int err;
67548 task_lock(task);
67549 - err = __ptrace_may_access(task, mode);
67550 + err = __ptrace_may_access(task, mode, 0);
67551 + task_unlock(task);
67552 + return !err;
67553 +}
67554 +
67555 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
67556 +{
67557 + int err;
67558 + task_lock(task);
67559 + err = __ptrace_may_access(task, mode, 1);
67560 task_unlock(task);
67561 return !err;
67562 }
67563 @@ -251,7 +264,7 @@ static int ptrace_attach(struct task_struct *task, long request,
67564 goto out;
67565
67566 task_lock(task);
67567 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
67568 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
67569 task_unlock(task);
67570 if (retval)
67571 goto unlock_creds;
67572 @@ -266,7 +279,7 @@ static int ptrace_attach(struct task_struct *task, long request,
67573 task->ptrace = PT_PTRACED;
67574 if (seize)
67575 task->ptrace |= PT_SEIZED;
67576 - if (task_ns_capable(task, CAP_SYS_PTRACE))
67577 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
67578 task->ptrace |= PT_PTRACE_CAP;
67579
67580 __ptrace_link(task, current);
67581 @@ -461,6 +474,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
67582 {
67583 int copied = 0;
67584
67585 + pax_track_stack();
67586 +
67587 while (len > 0) {
67588 char buf[128];
67589 int this_len, retval;
67590 @@ -472,7 +487,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
67591 break;
67592 return -EIO;
67593 }
67594 - if (copy_to_user(dst, buf, retval))
67595 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
67596 return -EFAULT;
67597 copied += retval;
67598 src += retval;
67599 @@ -486,6 +501,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
67600 {
67601 int copied = 0;
67602
67603 + pax_track_stack();
67604 +
67605 while (len > 0) {
67606 char buf[128];
67607 int this_len, retval;
67608 @@ -669,10 +686,12 @@ int ptrace_request(struct task_struct *child, long request,
67609 bool seized = child->ptrace & PT_SEIZED;
67610 int ret = -EIO;
67611 siginfo_t siginfo, *si;
67612 - void __user *datavp = (void __user *) data;
67613 + void __user *datavp = (__force void __user *) data;
67614 unsigned long __user *datalp = datavp;
67615 unsigned long flags;
67616
67617 + pax_track_stack();
67618 +
67619 switch (request) {
67620 case PTRACE_PEEKTEXT:
67621 case PTRACE_PEEKDATA:
67622 @@ -871,14 +890,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
67623 goto out;
67624 }
67625
67626 + if (gr_handle_ptrace(child, request)) {
67627 + ret = -EPERM;
67628 + goto out_put_task_struct;
67629 + }
67630 +
67631 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67632 ret = ptrace_attach(child, request, data);
67633 /*
67634 * Some architectures need to do book-keeping after
67635 * a ptrace attach.
67636 */
67637 - if (!ret)
67638 + if (!ret) {
67639 arch_ptrace_attach(child);
67640 + gr_audit_ptrace(child);
67641 + }
67642 goto out_put_task_struct;
67643 }
67644
67645 @@ -904,7 +930,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
67646 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
67647 if (copied != sizeof(tmp))
67648 return -EIO;
67649 - return put_user(tmp, (unsigned long __user *)data);
67650 + return put_user(tmp, (__force unsigned long __user *)data);
67651 }
67652
67653 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
67654 @@ -927,6 +953,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
67655 siginfo_t siginfo;
67656 int ret;
67657
67658 + pax_track_stack();
67659 +
67660 switch (request) {
67661 case PTRACE_PEEKTEXT:
67662 case PTRACE_PEEKDATA:
67663 @@ -1014,14 +1042,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
67664 goto out;
67665 }
67666
67667 + if (gr_handle_ptrace(child, request)) {
67668 + ret = -EPERM;
67669 + goto out_put_task_struct;
67670 + }
67671 +
67672 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67673 ret = ptrace_attach(child, request, data);
67674 /*
67675 * Some architectures need to do book-keeping after
67676 * a ptrace attach.
67677 */
67678 - if (!ret)
67679 + if (!ret) {
67680 arch_ptrace_attach(child);
67681 + gr_audit_ptrace(child);
67682 + }
67683 goto out_put_task_struct;
67684 }
67685
67686 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
67687 index 98f51b1..30b950c 100644
67688 --- a/kernel/rcutorture.c
67689 +++ b/kernel/rcutorture.c
67690 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
67691 { 0 };
67692 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
67693 { 0 };
67694 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67695 -static atomic_t n_rcu_torture_alloc;
67696 -static atomic_t n_rcu_torture_alloc_fail;
67697 -static atomic_t n_rcu_torture_free;
67698 -static atomic_t n_rcu_torture_mberror;
67699 -static atomic_t n_rcu_torture_error;
67700 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67701 +static atomic_unchecked_t n_rcu_torture_alloc;
67702 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
67703 +static atomic_unchecked_t n_rcu_torture_free;
67704 +static atomic_unchecked_t n_rcu_torture_mberror;
67705 +static atomic_unchecked_t n_rcu_torture_error;
67706 static long n_rcu_torture_boost_ktrerror;
67707 static long n_rcu_torture_boost_rterror;
67708 static long n_rcu_torture_boost_failure;
67709 @@ -223,11 +223,11 @@ rcu_torture_alloc(void)
67710
67711 spin_lock_bh(&rcu_torture_lock);
67712 if (list_empty(&rcu_torture_freelist)) {
67713 - atomic_inc(&n_rcu_torture_alloc_fail);
67714 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
67715 spin_unlock_bh(&rcu_torture_lock);
67716 return NULL;
67717 }
67718 - atomic_inc(&n_rcu_torture_alloc);
67719 + atomic_inc_unchecked(&n_rcu_torture_alloc);
67720 p = rcu_torture_freelist.next;
67721 list_del_init(p);
67722 spin_unlock_bh(&rcu_torture_lock);
67723 @@ -240,7 +240,7 @@ rcu_torture_alloc(void)
67724 static void
67725 rcu_torture_free(struct rcu_torture *p)
67726 {
67727 - atomic_inc(&n_rcu_torture_free);
67728 + atomic_inc_unchecked(&n_rcu_torture_free);
67729 spin_lock_bh(&rcu_torture_lock);
67730 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
67731 spin_unlock_bh(&rcu_torture_lock);
67732 @@ -360,7 +360,7 @@ rcu_torture_cb(struct rcu_head *p)
67733 i = rp->rtort_pipe_count;
67734 if (i > RCU_TORTURE_PIPE_LEN)
67735 i = RCU_TORTURE_PIPE_LEN;
67736 - atomic_inc(&rcu_torture_wcount[i]);
67737 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67738 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67739 rp->rtort_mbtest = 0;
67740 rcu_torture_free(rp);
67741 @@ -407,7 +407,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
67742 i = rp->rtort_pipe_count;
67743 if (i > RCU_TORTURE_PIPE_LEN)
67744 i = RCU_TORTURE_PIPE_LEN;
67745 - atomic_inc(&rcu_torture_wcount[i]);
67746 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67747 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67748 rp->rtort_mbtest = 0;
67749 list_del(&rp->rtort_free);
67750 @@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
67751 i = old_rp->rtort_pipe_count;
67752 if (i > RCU_TORTURE_PIPE_LEN)
67753 i = RCU_TORTURE_PIPE_LEN;
67754 - atomic_inc(&rcu_torture_wcount[i]);
67755 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67756 old_rp->rtort_pipe_count++;
67757 cur_ops->deferred_free(old_rp);
67758 }
67759 @@ -950,7 +950,7 @@ static void rcu_torture_timer(unsigned long unused)
67760 return;
67761 }
67762 if (p->rtort_mbtest == 0)
67763 - atomic_inc(&n_rcu_torture_mberror);
67764 + atomic_inc_unchecked(&n_rcu_torture_mberror);
67765 spin_lock(&rand_lock);
67766 cur_ops->read_delay(&rand);
67767 n_rcu_torture_timers++;
67768 @@ -1011,7 +1011,7 @@ rcu_torture_reader(void *arg)
67769 continue;
67770 }
67771 if (p->rtort_mbtest == 0)
67772 - atomic_inc(&n_rcu_torture_mberror);
67773 + atomic_inc_unchecked(&n_rcu_torture_mberror);
67774 cur_ops->read_delay(&rand);
67775 preempt_disable();
67776 pipe_count = p->rtort_pipe_count;
67777 @@ -1070,16 +1070,16 @@ rcu_torture_printk(char *page)
67778 rcu_torture_current,
67779 rcu_torture_current_version,
67780 list_empty(&rcu_torture_freelist),
67781 - atomic_read(&n_rcu_torture_alloc),
67782 - atomic_read(&n_rcu_torture_alloc_fail),
67783 - atomic_read(&n_rcu_torture_free),
67784 - atomic_read(&n_rcu_torture_mberror),
67785 + atomic_read_unchecked(&n_rcu_torture_alloc),
67786 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
67787 + atomic_read_unchecked(&n_rcu_torture_free),
67788 + atomic_read_unchecked(&n_rcu_torture_mberror),
67789 n_rcu_torture_boost_ktrerror,
67790 n_rcu_torture_boost_rterror,
67791 n_rcu_torture_boost_failure,
67792 n_rcu_torture_boosts,
67793 n_rcu_torture_timers);
67794 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
67795 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
67796 n_rcu_torture_boost_ktrerror != 0 ||
67797 n_rcu_torture_boost_rterror != 0 ||
67798 n_rcu_torture_boost_failure != 0)
67799 @@ -1087,7 +1087,7 @@ rcu_torture_printk(char *page)
67800 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
67801 if (i > 1) {
67802 cnt += sprintf(&page[cnt], "!!! ");
67803 - atomic_inc(&n_rcu_torture_error);
67804 + atomic_inc_unchecked(&n_rcu_torture_error);
67805 WARN_ON_ONCE(1);
67806 }
67807 cnt += sprintf(&page[cnt], "Reader Pipe: ");
67808 @@ -1101,7 +1101,7 @@ rcu_torture_printk(char *page)
67809 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
67810 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67811 cnt += sprintf(&page[cnt], " %d",
67812 - atomic_read(&rcu_torture_wcount[i]));
67813 + atomic_read_unchecked(&rcu_torture_wcount[i]));
67814 }
67815 cnt += sprintf(&page[cnt], "\n");
67816 if (cur_ops->stats)
67817 @@ -1410,7 +1410,7 @@ rcu_torture_cleanup(void)
67818
67819 if (cur_ops->cleanup)
67820 cur_ops->cleanup();
67821 - if (atomic_read(&n_rcu_torture_error))
67822 + if (atomic_read_unchecked(&n_rcu_torture_error))
67823 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
67824 else
67825 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
67826 @@ -1474,17 +1474,17 @@ rcu_torture_init(void)
67827
67828 rcu_torture_current = NULL;
67829 rcu_torture_current_version = 0;
67830 - atomic_set(&n_rcu_torture_alloc, 0);
67831 - atomic_set(&n_rcu_torture_alloc_fail, 0);
67832 - atomic_set(&n_rcu_torture_free, 0);
67833 - atomic_set(&n_rcu_torture_mberror, 0);
67834 - atomic_set(&n_rcu_torture_error, 0);
67835 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
67836 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
67837 + atomic_set_unchecked(&n_rcu_torture_free, 0);
67838 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
67839 + atomic_set_unchecked(&n_rcu_torture_error, 0);
67840 n_rcu_torture_boost_ktrerror = 0;
67841 n_rcu_torture_boost_rterror = 0;
67842 n_rcu_torture_boost_failure = 0;
67843 n_rcu_torture_boosts = 0;
67844 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
67845 - atomic_set(&rcu_torture_wcount[i], 0);
67846 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
67847 for_each_possible_cpu(cpu) {
67848 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67849 per_cpu(rcu_torture_count, cpu)[i] = 0;
67850 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
67851 index ba06207..85d8ba8 100644
67852 --- a/kernel/rcutree.c
67853 +++ b/kernel/rcutree.c
67854 @@ -356,9 +356,9 @@ void rcu_enter_nohz(void)
67855 }
67856 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67857 smp_mb__before_atomic_inc(); /* See above. */
67858 - atomic_inc(&rdtp->dynticks);
67859 + atomic_inc_unchecked(&rdtp->dynticks);
67860 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
67861 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67862 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67863 local_irq_restore(flags);
67864
67865 /* If the interrupt queued a callback, get out of dyntick mode. */
67866 @@ -387,10 +387,10 @@ void rcu_exit_nohz(void)
67867 return;
67868 }
67869 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
67870 - atomic_inc(&rdtp->dynticks);
67871 + atomic_inc_unchecked(&rdtp->dynticks);
67872 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67873 smp_mb__after_atomic_inc(); /* See above. */
67874 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67875 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67876 local_irq_restore(flags);
67877 }
67878
67879 @@ -406,14 +406,14 @@ void rcu_nmi_enter(void)
67880 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
67881
67882 if (rdtp->dynticks_nmi_nesting == 0 &&
67883 - (atomic_read(&rdtp->dynticks) & 0x1))
67884 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
67885 return;
67886 rdtp->dynticks_nmi_nesting++;
67887 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
67888 - atomic_inc(&rdtp->dynticks);
67889 + atomic_inc_unchecked(&rdtp->dynticks);
67890 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
67891 smp_mb__after_atomic_inc(); /* See above. */
67892 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
67893 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
67894 }
67895
67896 /**
67897 @@ -432,9 +432,9 @@ void rcu_nmi_exit(void)
67898 return;
67899 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
67900 smp_mb__before_atomic_inc(); /* See above. */
67901 - atomic_inc(&rdtp->dynticks);
67902 + atomic_inc_unchecked(&rdtp->dynticks);
67903 smp_mb__after_atomic_inc(); /* Force delay to next write. */
67904 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
67905 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
67906 }
67907
67908 /**
67909 @@ -469,7 +469,7 @@ void rcu_irq_exit(void)
67910 */
67911 static int dyntick_save_progress_counter(struct rcu_data *rdp)
67912 {
67913 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
67914 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67915 return 0;
67916 }
67917
67918 @@ -484,7 +484,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
67919 unsigned long curr;
67920 unsigned long snap;
67921
67922 - curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks);
67923 + curr = (unsigned long)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
67924 snap = (unsigned long)rdp->dynticks_snap;
67925
67926 /*
67927 @@ -1470,7 +1470,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
67928 /*
67929 * Do softirq processing for the current CPU.
67930 */
67931 -static void rcu_process_callbacks(struct softirq_action *unused)
67932 +static void rcu_process_callbacks(void)
67933 {
67934 __rcu_process_callbacks(&rcu_sched_state,
67935 &__get_cpu_var(rcu_sched_data));
67936 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
67937 index 01b2ccd..4f5d80a 100644
67938 --- a/kernel/rcutree.h
67939 +++ b/kernel/rcutree.h
67940 @@ -86,7 +86,7 @@
67941 struct rcu_dynticks {
67942 int dynticks_nesting; /* Track irq/process nesting level. */
67943 int dynticks_nmi_nesting; /* Track NMI nesting level. */
67944 - atomic_t dynticks; /* Even value for dynticks-idle, else odd. */
67945 + atomic_unchecked_t dynticks; /* Even value for dynticks-idle, else odd. */
67946 };
67947
67948 /* RCU's kthread states for tracing. */
67949 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
67950 index 8aafbb8..2fca109 100644
67951 --- a/kernel/rcutree_plugin.h
67952 +++ b/kernel/rcutree_plugin.h
67953 @@ -822,7 +822,7 @@ void synchronize_rcu_expedited(void)
67954
67955 /* Clean up and exit. */
67956 smp_mb(); /* ensure expedited GP seen before counter increment. */
67957 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
67958 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
67959 unlock_mb_ret:
67960 mutex_unlock(&sync_rcu_preempt_exp_mutex);
67961 mb_ret:
67962 @@ -1774,8 +1774,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
67963
67964 #else /* #ifndef CONFIG_SMP */
67965
67966 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
67967 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
67968 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
67969 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
67970
67971 static int synchronize_sched_expedited_cpu_stop(void *data)
67972 {
67973 @@ -1830,7 +1830,7 @@ void synchronize_sched_expedited(void)
67974 int firstsnap, s, snap, trycount = 0;
67975
67976 /* Note that atomic_inc_return() implies full memory barrier. */
67977 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
67978 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
67979 get_online_cpus();
67980
67981 /*
67982 @@ -1851,7 +1851,7 @@ void synchronize_sched_expedited(void)
67983 }
67984
67985 /* Check to see if someone else did our work for us. */
67986 - s = atomic_read(&sync_sched_expedited_done);
67987 + s = atomic_read_unchecked(&sync_sched_expedited_done);
67988 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
67989 smp_mb(); /* ensure test happens before caller kfree */
67990 return;
67991 @@ -1866,7 +1866,7 @@ void synchronize_sched_expedited(void)
67992 * grace period works for us.
67993 */
67994 get_online_cpus();
67995 - snap = atomic_read(&sync_sched_expedited_started) - 1;
67996 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
67997 smp_mb(); /* ensure read is before try_stop_cpus(). */
67998 }
67999
68000 @@ -1877,12 +1877,12 @@ void synchronize_sched_expedited(void)
68001 * than we did beat us to the punch.
68002 */
68003 do {
68004 - s = atomic_read(&sync_sched_expedited_done);
68005 + s = atomic_read_unchecked(&sync_sched_expedited_done);
68006 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
68007 smp_mb(); /* ensure test happens before caller kfree */
68008 break;
68009 }
68010 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
68011 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
68012
68013 put_online_cpus();
68014 }
68015 @@ -1953,7 +1953,7 @@ int rcu_needs_cpu(int cpu)
68016 for_each_online_cpu(thatcpu) {
68017 if (thatcpu == cpu)
68018 continue;
68019 - snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
68020 + snap = atomic_add_return_unchecked(0, &per_cpu(rcu_dynticks,
68021 thatcpu).dynticks);
68022 smp_mb(); /* Order sampling of snap with end of grace period. */
68023 if ((snap & 0x1) != 0) {
68024 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
68025 index 3b0c098..43ba2d8 100644
68026 --- a/kernel/rcutree_trace.c
68027 +++ b/kernel/rcutree_trace.c
68028 @@ -74,7 +74,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
68029 rdp->qs_pending);
68030 #ifdef CONFIG_NO_HZ
68031 seq_printf(m, " dt=%d/%d/%d df=%lu",
68032 - atomic_read(&rdp->dynticks->dynticks),
68033 + atomic_read_unchecked(&rdp->dynticks->dynticks),
68034 rdp->dynticks->dynticks_nesting,
68035 rdp->dynticks->dynticks_nmi_nesting,
68036 rdp->dynticks_fqs);
68037 @@ -148,7 +148,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
68038 rdp->qs_pending);
68039 #ifdef CONFIG_NO_HZ
68040 seq_printf(m, ",%d,%d,%d,%lu",
68041 - atomic_read(&rdp->dynticks->dynticks),
68042 + atomic_read_unchecked(&rdp->dynticks->dynticks),
68043 rdp->dynticks->dynticks_nesting,
68044 rdp->dynticks->dynticks_nmi_nesting,
68045 rdp->dynticks_fqs);
68046 diff --git a/kernel/relay.c b/kernel/relay.c
68047 index 859ea5a..096e2fe 100644
68048 --- a/kernel/relay.c
68049 +++ b/kernel/relay.c
68050 @@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struct file *in,
68051 };
68052 ssize_t ret;
68053
68054 + pax_track_stack();
68055 +
68056 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
68057 return 0;
68058 if (splice_grow_spd(pipe, &spd))
68059 diff --git a/kernel/resource.c b/kernel/resource.c
68060 index c8dc249..f1e2359 100644
68061 --- a/kernel/resource.c
68062 +++ b/kernel/resource.c
68063 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
68064
68065 static int __init ioresources_init(void)
68066 {
68067 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68068 +#ifdef CONFIG_GRKERNSEC_PROC_USER
68069 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
68070 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
68071 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68072 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
68073 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
68074 +#endif
68075 +#else
68076 proc_create("ioports", 0, NULL, &proc_ioports_operations);
68077 proc_create("iomem", 0, NULL, &proc_iomem_operations);
68078 +#endif
68079 return 0;
68080 }
68081 __initcall(ioresources_init);
68082 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
68083 index 5c9ccd3..a35e22b 100644
68084 --- a/kernel/rtmutex-tester.c
68085 +++ b/kernel/rtmutex-tester.c
68086 @@ -20,7 +20,7 @@
68087 #define MAX_RT_TEST_MUTEXES 8
68088
68089 static spinlock_t rttest_lock;
68090 -static atomic_t rttest_event;
68091 +static atomic_unchecked_t rttest_event;
68092
68093 struct test_thread_data {
68094 int opcode;
68095 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68096
68097 case RTTEST_LOCKCONT:
68098 td->mutexes[td->opdata] = 1;
68099 - td->event = atomic_add_return(1, &rttest_event);
68100 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68101 return 0;
68102
68103 case RTTEST_RESET:
68104 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68105 return 0;
68106
68107 case RTTEST_RESETEVENT:
68108 - atomic_set(&rttest_event, 0);
68109 + atomic_set_unchecked(&rttest_event, 0);
68110 return 0;
68111
68112 default:
68113 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68114 return ret;
68115
68116 td->mutexes[id] = 1;
68117 - td->event = atomic_add_return(1, &rttest_event);
68118 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68119 rt_mutex_lock(&mutexes[id]);
68120 - td->event = atomic_add_return(1, &rttest_event);
68121 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68122 td->mutexes[id] = 4;
68123 return 0;
68124
68125 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68126 return ret;
68127
68128 td->mutexes[id] = 1;
68129 - td->event = atomic_add_return(1, &rttest_event);
68130 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68131 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
68132 - td->event = atomic_add_return(1, &rttest_event);
68133 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68134 td->mutexes[id] = ret ? 0 : 4;
68135 return ret ? -EINTR : 0;
68136
68137 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68138 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
68139 return ret;
68140
68141 - td->event = atomic_add_return(1, &rttest_event);
68142 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68143 rt_mutex_unlock(&mutexes[id]);
68144 - td->event = atomic_add_return(1, &rttest_event);
68145 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68146 td->mutexes[id] = 0;
68147 return 0;
68148
68149 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68150 break;
68151
68152 td->mutexes[dat] = 2;
68153 - td->event = atomic_add_return(1, &rttest_event);
68154 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68155 break;
68156
68157 default:
68158 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68159 return;
68160
68161 td->mutexes[dat] = 3;
68162 - td->event = atomic_add_return(1, &rttest_event);
68163 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68164 break;
68165
68166 case RTTEST_LOCKNOWAIT:
68167 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68168 return;
68169
68170 td->mutexes[dat] = 1;
68171 - td->event = atomic_add_return(1, &rttest_event);
68172 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68173 return;
68174
68175 default:
68176 diff --git a/kernel/sched.c b/kernel/sched.c
68177 index b50b0f0..1c6c591 100644
68178 --- a/kernel/sched.c
68179 +++ b/kernel/sched.c
68180 @@ -4264,6 +4264,8 @@ static void __sched __schedule(void)
68181 struct rq *rq;
68182 int cpu;
68183
68184 + pax_track_stack();
68185 +
68186 need_resched:
68187 preempt_disable();
68188 cpu = smp_processor_id();
68189 @@ -4950,6 +4952,8 @@ int can_nice(const struct task_struct *p, const int nice)
68190 /* convert nice value [19,-20] to rlimit style value [1,40] */
68191 int nice_rlim = 20 - nice;
68192
68193 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
68194 +
68195 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
68196 capable(CAP_SYS_NICE));
68197 }
68198 @@ -4983,7 +4987,8 @@ SYSCALL_DEFINE1(nice, int, increment)
68199 if (nice > 19)
68200 nice = 19;
68201
68202 - if (increment < 0 && !can_nice(current, nice))
68203 + if (increment < 0 && (!can_nice(current, nice) ||
68204 + gr_handle_chroot_nice()))
68205 return -EPERM;
68206
68207 retval = security_task_setnice(current, nice);
68208 @@ -5127,6 +5132,7 @@ recheck:
68209 unsigned long rlim_rtprio =
68210 task_rlimit(p, RLIMIT_RTPRIO);
68211
68212 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
68213 /* can't set/change the rt policy */
68214 if (policy != p->policy && !rlim_rtprio)
68215 return -EPERM;
68216 diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
68217 index 429242f..d7cca82 100644
68218 --- a/kernel/sched_autogroup.c
68219 +++ b/kernel/sched_autogroup.c
68220 @@ -7,7 +7,7 @@
68221
68222 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
68223 static struct autogroup autogroup_default;
68224 -static atomic_t autogroup_seq_nr;
68225 +static atomic_unchecked_t autogroup_seq_nr;
68226
68227 static void __init autogroup_init(struct task_struct *init_task)
68228 {
68229 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
68230
68231 kref_init(&ag->kref);
68232 init_rwsem(&ag->lock);
68233 - ag->id = atomic_inc_return(&autogroup_seq_nr);
68234 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
68235 ag->tg = tg;
68236 #ifdef CONFIG_RT_GROUP_SCHED
68237 /*
68238 diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
68239 index bc8ee99..b6f6492 100644
68240 --- a/kernel/sched_fair.c
68241 +++ b/kernel/sched_fair.c
68242 @@ -4062,7 +4062,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
68243 * run_rebalance_domains is triggered when needed from the scheduler tick.
68244 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
68245 */
68246 -static void run_rebalance_domains(struct softirq_action *h)
68247 +static void run_rebalance_domains(void)
68248 {
68249 int this_cpu = smp_processor_id();
68250 struct rq *this_rq = cpu_rq(this_cpu);
68251 diff --git a/kernel/signal.c b/kernel/signal.c
68252 index 291c970..304bd03 100644
68253 --- a/kernel/signal.c
68254 +++ b/kernel/signal.c
68255 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cachep;
68256
68257 int print_fatal_signals __read_mostly;
68258
68259 -static void __user *sig_handler(struct task_struct *t, int sig)
68260 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
68261 {
68262 return t->sighand->action[sig - 1].sa.sa_handler;
68263 }
68264
68265 -static int sig_handler_ignored(void __user *handler, int sig)
68266 +static int sig_handler_ignored(__sighandler_t handler, int sig)
68267 {
68268 /* Is it explicitly or implicitly ignored? */
68269 return handler == SIG_IGN ||
68270 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
68271 static int sig_task_ignored(struct task_struct *t, int sig,
68272 int from_ancestor_ns)
68273 {
68274 - void __user *handler;
68275 + __sighandler_t handler;
68276
68277 handler = sig_handler(t, sig);
68278
68279 @@ -364,6 +364,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
68280 atomic_inc(&user->sigpending);
68281 rcu_read_unlock();
68282
68283 + if (!override_rlimit)
68284 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
68285 +
68286 if (override_rlimit ||
68287 atomic_read(&user->sigpending) <=
68288 task_rlimit(t, RLIMIT_SIGPENDING)) {
68289 @@ -488,7 +491,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
68290
68291 int unhandled_signal(struct task_struct *tsk, int sig)
68292 {
68293 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
68294 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
68295 if (is_global_init(tsk))
68296 return 1;
68297 if (handler != SIG_IGN && handler != SIG_DFL)
68298 @@ -815,6 +818,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
68299 }
68300 }
68301
68302 + /* allow glibc communication via tgkill to other threads in our
68303 + thread group */
68304 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
68305 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
68306 + && gr_handle_signal(t, sig))
68307 + return -EPERM;
68308 +
68309 return security_task_kill(t, info, sig, 0);
68310 }
68311
68312 @@ -1165,7 +1175,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68313 return send_signal(sig, info, p, 1);
68314 }
68315
68316 -static int
68317 +int
68318 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68319 {
68320 return send_signal(sig, info, t, 0);
68321 @@ -1202,6 +1212,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68322 unsigned long int flags;
68323 int ret, blocked, ignored;
68324 struct k_sigaction *action;
68325 + int is_unhandled = 0;
68326
68327 spin_lock_irqsave(&t->sighand->siglock, flags);
68328 action = &t->sighand->action[sig-1];
68329 @@ -1216,9 +1227,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68330 }
68331 if (action->sa.sa_handler == SIG_DFL)
68332 t->signal->flags &= ~SIGNAL_UNKILLABLE;
68333 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
68334 + is_unhandled = 1;
68335 ret = specific_send_sig_info(sig, info, t);
68336 spin_unlock_irqrestore(&t->sighand->siglock, flags);
68337
68338 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
68339 + normal operation */
68340 + if (is_unhandled) {
68341 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
68342 + gr_handle_crash(t, sig);
68343 + }
68344 +
68345 return ret;
68346 }
68347
68348 @@ -1285,8 +1305,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68349 ret = check_kill_permission(sig, info, p);
68350 rcu_read_unlock();
68351
68352 - if (!ret && sig)
68353 + if (!ret && sig) {
68354 ret = do_send_sig_info(sig, info, p, true);
68355 + if (!ret)
68356 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
68357 + }
68358
68359 return ret;
68360 }
68361 @@ -1909,6 +1932,8 @@ static void ptrace_do_notify(int signr, int exit_code, int why)
68362 {
68363 siginfo_t info;
68364
68365 + pax_track_stack();
68366 +
68367 memset(&info, 0, sizeof info);
68368 info.si_signo = signr;
68369 info.si_code = exit_code;
68370 @@ -2748,7 +2773,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
68371 int error = -ESRCH;
68372
68373 rcu_read_lock();
68374 - p = find_task_by_vpid(pid);
68375 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68376 + /* allow glibc communication via tgkill to other threads in our
68377 + thread group */
68378 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
68379 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
68380 + p = find_task_by_vpid_unrestricted(pid);
68381 + else
68382 +#endif
68383 + p = find_task_by_vpid(pid);
68384 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
68385 error = check_kill_permission(sig, info, p);
68386 /*
68387 diff --git a/kernel/smp.c b/kernel/smp.c
68388 index fb67dfa..f819e2e 100644
68389 --- a/kernel/smp.c
68390 +++ b/kernel/smp.c
68391 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
68392 }
68393 EXPORT_SYMBOL(smp_call_function);
68394
68395 -void ipi_call_lock(void)
68396 +void ipi_call_lock(void) __acquires(call_function.lock)
68397 {
68398 raw_spin_lock(&call_function.lock);
68399 }
68400
68401 -void ipi_call_unlock(void)
68402 +void ipi_call_unlock(void) __releases(call_function.lock)
68403 {
68404 raw_spin_unlock(&call_function.lock);
68405 }
68406
68407 -void ipi_call_lock_irq(void)
68408 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
68409 {
68410 raw_spin_lock_irq(&call_function.lock);
68411 }
68412
68413 -void ipi_call_unlock_irq(void)
68414 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
68415 {
68416 raw_spin_unlock_irq(&call_function.lock);
68417 }
68418 diff --git a/kernel/softirq.c b/kernel/softirq.c
68419 index fca82c3..1db9690 100644
68420 --- a/kernel/softirq.c
68421 +++ b/kernel/softirq.c
68422 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
68423
68424 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
68425
68426 -char *softirq_to_name[NR_SOFTIRQS] = {
68427 +const char * const softirq_to_name[NR_SOFTIRQS] = {
68428 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
68429 "TASKLET", "SCHED", "HRTIMER", "RCU"
68430 };
68431 @@ -235,7 +235,7 @@ restart:
68432 kstat_incr_softirqs_this_cpu(vec_nr);
68433
68434 trace_softirq_entry(vec_nr);
68435 - h->action(h);
68436 + h->action();
68437 trace_softirq_exit(vec_nr);
68438 if (unlikely(prev_count != preempt_count())) {
68439 printk(KERN_ERR "huh, entered softirq %u %s %p"
68440 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
68441 local_irq_restore(flags);
68442 }
68443
68444 -void open_softirq(int nr, void (*action)(struct softirq_action *))
68445 +void open_softirq(int nr, void (*action)(void))
68446 {
68447 - softirq_vec[nr].action = action;
68448 + pax_open_kernel();
68449 + *(void **)&softirq_vec[nr].action = action;
68450 + pax_close_kernel();
68451 }
68452
68453 /*
68454 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
68455
68456 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
68457
68458 -static void tasklet_action(struct softirq_action *a)
68459 +static void tasklet_action(void)
68460 {
68461 struct tasklet_struct *list;
68462
68463 @@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
68464 }
68465 }
68466
68467 -static void tasklet_hi_action(struct softirq_action *a)
68468 +static void tasklet_hi_action(void)
68469 {
68470 struct tasklet_struct *list;
68471
68472 diff --git a/kernel/sys.c b/kernel/sys.c
68473 index 1dbbe69..e96e1dd 100644
68474 --- a/kernel/sys.c
68475 +++ b/kernel/sys.c
68476 @@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
68477 error = -EACCES;
68478 goto out;
68479 }
68480 +
68481 + if (gr_handle_chroot_setpriority(p, niceval)) {
68482 + error = -EACCES;
68483 + goto out;
68484 + }
68485 +
68486 no_nice = security_task_setnice(p, niceval);
68487 if (no_nice) {
68488 error = no_nice;
68489 @@ -571,6 +577,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
68490 goto error;
68491 }
68492
68493 + if (gr_check_group_change(new->gid, new->egid, -1))
68494 + goto error;
68495 +
68496 if (rgid != (gid_t) -1 ||
68497 (egid != (gid_t) -1 && egid != old->gid))
68498 new->sgid = new->egid;
68499 @@ -600,6 +609,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
68500 old = current_cred();
68501
68502 retval = -EPERM;
68503 +
68504 + if (gr_check_group_change(gid, gid, gid))
68505 + goto error;
68506 +
68507 if (nsown_capable(CAP_SETGID))
68508 new->gid = new->egid = new->sgid = new->fsgid = gid;
68509 else if (gid == old->gid || gid == old->sgid)
68510 @@ -687,6 +700,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
68511 goto error;
68512 }
68513
68514 + if (gr_check_user_change(new->uid, new->euid, -1))
68515 + goto error;
68516 +
68517 if (new->uid != old->uid) {
68518 retval = set_user(new);
68519 if (retval < 0)
68520 @@ -731,6 +747,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
68521 old = current_cred();
68522
68523 retval = -EPERM;
68524 +
68525 + if (gr_check_crash_uid(uid))
68526 + goto error;
68527 + if (gr_check_user_change(uid, uid, uid))
68528 + goto error;
68529 +
68530 if (nsown_capable(CAP_SETUID)) {
68531 new->suid = new->uid = uid;
68532 if (uid != old->uid) {
68533 @@ -785,6 +807,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
68534 goto error;
68535 }
68536
68537 + if (gr_check_user_change(ruid, euid, -1))
68538 + goto error;
68539 +
68540 if (ruid != (uid_t) -1) {
68541 new->uid = ruid;
68542 if (ruid != old->uid) {
68543 @@ -849,6 +874,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
68544 goto error;
68545 }
68546
68547 + if (gr_check_group_change(rgid, egid, -1))
68548 + goto error;
68549 +
68550 if (rgid != (gid_t) -1)
68551 new->gid = rgid;
68552 if (egid != (gid_t) -1)
68553 @@ -895,6 +923,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68554 old = current_cred();
68555 old_fsuid = old->fsuid;
68556
68557 + if (gr_check_user_change(-1, -1, uid))
68558 + goto error;
68559 +
68560 if (uid == old->uid || uid == old->euid ||
68561 uid == old->suid || uid == old->fsuid ||
68562 nsown_capable(CAP_SETUID)) {
68563 @@ -905,6 +936,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68564 }
68565 }
68566
68567 +error:
68568 abort_creds(new);
68569 return old_fsuid;
68570
68571 @@ -931,12 +963,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
68572 if (gid == old->gid || gid == old->egid ||
68573 gid == old->sgid || gid == old->fsgid ||
68574 nsown_capable(CAP_SETGID)) {
68575 + if (gr_check_group_change(-1, -1, gid))
68576 + goto error;
68577 +
68578 if (gid != old_fsgid) {
68579 new->fsgid = gid;
68580 goto change_okay;
68581 }
68582 }
68583
68584 +error:
68585 abort_creds(new);
68586 return old_fsgid;
68587
68588 @@ -1188,7 +1224,10 @@ static int override_release(char __user *release, int len)
68589 }
68590 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
68591 snprintf(buf, len, "2.6.%u%s", v, rest);
68592 - ret = copy_to_user(release, buf, len);
68593 + if (len > sizeof(buf))
68594 + ret = -EFAULT;
68595 + else
68596 + ret = copy_to_user(release, buf, len);
68597 }
68598 return ret;
68599 }
68600 @@ -1242,19 +1281,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
68601 return -EFAULT;
68602
68603 down_read(&uts_sem);
68604 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
68605 + error = __copy_to_user(name->sysname, &utsname()->sysname,
68606 __OLD_UTS_LEN);
68607 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
68608 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
68609 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
68610 __OLD_UTS_LEN);
68611 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
68612 - error |= __copy_to_user(&name->release, &utsname()->release,
68613 + error |= __copy_to_user(name->release, &utsname()->release,
68614 __OLD_UTS_LEN);
68615 error |= __put_user(0, name->release + __OLD_UTS_LEN);
68616 - error |= __copy_to_user(&name->version, &utsname()->version,
68617 + error |= __copy_to_user(name->version, &utsname()->version,
68618 __OLD_UTS_LEN);
68619 error |= __put_user(0, name->version + __OLD_UTS_LEN);
68620 - error |= __copy_to_user(&name->machine, &utsname()->machine,
68621 + error |= __copy_to_user(name->machine, &utsname()->machine,
68622 __OLD_UTS_LEN);
68623 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
68624 up_read(&uts_sem);
68625 @@ -1717,7 +1756,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
68626 error = get_dumpable(me->mm);
68627 break;
68628 case PR_SET_DUMPABLE:
68629 - if (arg2 < 0 || arg2 > 1) {
68630 + if (arg2 > 1) {
68631 error = -EINVAL;
68632 break;
68633 }
68634 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
68635 index 11d65b5..6957b37 100644
68636 --- a/kernel/sysctl.c
68637 +++ b/kernel/sysctl.c
68638 @@ -85,6 +85,13 @@
68639
68640
68641 #if defined(CONFIG_SYSCTL)
68642 +#include <linux/grsecurity.h>
68643 +#include <linux/grinternal.h>
68644 +
68645 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
68646 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
68647 + const int op);
68648 +extern int gr_handle_chroot_sysctl(const int op);
68649
68650 /* External variables not in a header file. */
68651 extern int sysctl_overcommit_memory;
68652 @@ -197,6 +204,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
68653 }
68654
68655 #endif
68656 +extern struct ctl_table grsecurity_table[];
68657
68658 static struct ctl_table root_table[];
68659 static struct ctl_table_root sysctl_table_root;
68660 @@ -226,6 +234,20 @@ extern struct ctl_table epoll_table[];
68661 int sysctl_legacy_va_layout;
68662 #endif
68663
68664 +#ifdef CONFIG_PAX_SOFTMODE
68665 +static ctl_table pax_table[] = {
68666 + {
68667 + .procname = "softmode",
68668 + .data = &pax_softmode,
68669 + .maxlen = sizeof(unsigned int),
68670 + .mode = 0600,
68671 + .proc_handler = &proc_dointvec,
68672 + },
68673 +
68674 + { }
68675 +};
68676 +#endif
68677 +
68678 /* The default sysctl tables: */
68679
68680 static struct ctl_table root_table[] = {
68681 @@ -272,6 +294,22 @@ static int max_extfrag_threshold = 1000;
68682 #endif
68683
68684 static struct ctl_table kern_table[] = {
68685 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
68686 + {
68687 + .procname = "grsecurity",
68688 + .mode = 0500,
68689 + .child = grsecurity_table,
68690 + },
68691 +#endif
68692 +
68693 +#ifdef CONFIG_PAX_SOFTMODE
68694 + {
68695 + .procname = "pax",
68696 + .mode = 0500,
68697 + .child = pax_table,
68698 + },
68699 +#endif
68700 +
68701 {
68702 .procname = "sched_child_runs_first",
68703 .data = &sysctl_sched_child_runs_first,
68704 @@ -546,7 +584,7 @@ static struct ctl_table kern_table[] = {
68705 .data = &modprobe_path,
68706 .maxlen = KMOD_PATH_LEN,
68707 .mode = 0644,
68708 - .proc_handler = proc_dostring,
68709 + .proc_handler = proc_dostring_modpriv,
68710 },
68711 {
68712 .procname = "modules_disabled",
68713 @@ -713,16 +751,20 @@ static struct ctl_table kern_table[] = {
68714 .extra1 = &zero,
68715 .extra2 = &one,
68716 },
68717 +#endif
68718 {
68719 .procname = "kptr_restrict",
68720 .data = &kptr_restrict,
68721 .maxlen = sizeof(int),
68722 .mode = 0644,
68723 .proc_handler = proc_dmesg_restrict,
68724 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68725 + .extra1 = &two,
68726 +#else
68727 .extra1 = &zero,
68728 +#endif
68729 .extra2 = &two,
68730 },
68731 -#endif
68732 {
68733 .procname = "ngroups_max",
68734 .data = &ngroups_max,
68735 @@ -1205,6 +1247,13 @@ static struct ctl_table vm_table[] = {
68736 .proc_handler = proc_dointvec_minmax,
68737 .extra1 = &zero,
68738 },
68739 + {
68740 + .procname = "heap_stack_gap",
68741 + .data = &sysctl_heap_stack_gap,
68742 + .maxlen = sizeof(sysctl_heap_stack_gap),
68743 + .mode = 0644,
68744 + .proc_handler = proc_doulongvec_minmax,
68745 + },
68746 #else
68747 {
68748 .procname = "nr_trim_pages",
68749 @@ -1709,6 +1758,17 @@ static int test_perm(int mode, int op)
68750 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
68751 {
68752 int mode;
68753 + int error;
68754 +
68755 + if (table->parent != NULL && table->parent->procname != NULL &&
68756 + table->procname != NULL &&
68757 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
68758 + return -EACCES;
68759 + if (gr_handle_chroot_sysctl(op))
68760 + return -EACCES;
68761 + error = gr_handle_sysctl(table, op);
68762 + if (error)
68763 + return error;
68764
68765 if (root->permissions)
68766 mode = root->permissions(root, current->nsproxy, table);
68767 @@ -2113,6 +2173,16 @@ int proc_dostring(struct ctl_table *table, int write,
68768 buffer, lenp, ppos);
68769 }
68770
68771 +int proc_dostring_modpriv(struct ctl_table *table, int write,
68772 + void __user *buffer, size_t *lenp, loff_t *ppos)
68773 +{
68774 + if (write && !capable(CAP_SYS_MODULE))
68775 + return -EPERM;
68776 +
68777 + return _proc_do_string(table->data, table->maxlen, write,
68778 + buffer, lenp, ppos);
68779 +}
68780 +
68781 static size_t proc_skip_spaces(char **buf)
68782 {
68783 size_t ret;
68784 @@ -2218,6 +2288,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
68785 len = strlen(tmp);
68786 if (len > *size)
68787 len = *size;
68788 + if (len > sizeof(tmp))
68789 + len = sizeof(tmp);
68790 if (copy_to_user(*buf, tmp, len))
68791 return -EFAULT;
68792 *size -= len;
68793 @@ -2534,8 +2606,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
68794 *i = val;
68795 } else {
68796 val = convdiv * (*i) / convmul;
68797 - if (!first)
68798 + if (!first) {
68799 err = proc_put_char(&buffer, &left, '\t');
68800 + if (err)
68801 + break;
68802 + }
68803 err = proc_put_long(&buffer, &left, val, false);
68804 if (err)
68805 break;
68806 @@ -2930,6 +3005,12 @@ int proc_dostring(struct ctl_table *table, int write,
68807 return -ENOSYS;
68808 }
68809
68810 +int proc_dostring_modpriv(struct ctl_table *table, int write,
68811 + void __user *buffer, size_t *lenp, loff_t *ppos)
68812 +{
68813 + return -ENOSYS;
68814 +}
68815 +
68816 int proc_dointvec(struct ctl_table *table, int write,
68817 void __user *buffer, size_t *lenp, loff_t *ppos)
68818 {
68819 @@ -2986,6 +3067,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
68820 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
68821 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
68822 EXPORT_SYMBOL(proc_dostring);
68823 +EXPORT_SYMBOL(proc_dostring_modpriv);
68824 EXPORT_SYMBOL(proc_doulongvec_minmax);
68825 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
68826 EXPORT_SYMBOL(register_sysctl_table);
68827 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
68828 index e8bffbe..2344401 100644
68829 --- a/kernel/sysctl_binary.c
68830 +++ b/kernel/sysctl_binary.c
68831 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
68832 int i;
68833
68834 set_fs(KERNEL_DS);
68835 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68836 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68837 set_fs(old_fs);
68838 if (result < 0)
68839 goto out_kfree;
68840 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
68841 }
68842
68843 set_fs(KERNEL_DS);
68844 - result = vfs_write(file, buffer, str - buffer, &pos);
68845 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68846 set_fs(old_fs);
68847 if (result < 0)
68848 goto out_kfree;
68849 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
68850 int i;
68851
68852 set_fs(KERNEL_DS);
68853 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
68854 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
68855 set_fs(old_fs);
68856 if (result < 0)
68857 goto out_kfree;
68858 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
68859 }
68860
68861 set_fs(KERNEL_DS);
68862 - result = vfs_write(file, buffer, str - buffer, &pos);
68863 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
68864 set_fs(old_fs);
68865 if (result < 0)
68866 goto out_kfree;
68867 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
68868 int i;
68869
68870 set_fs(KERNEL_DS);
68871 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68872 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68873 set_fs(old_fs);
68874 if (result < 0)
68875 goto out;
68876 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68877 __le16 dnaddr;
68878
68879 set_fs(KERNEL_DS);
68880 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
68881 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
68882 set_fs(old_fs);
68883 if (result < 0)
68884 goto out;
68885 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
68886 le16_to_cpu(dnaddr) & 0x3ff);
68887
68888 set_fs(KERNEL_DS);
68889 - result = vfs_write(file, buf, len, &pos);
68890 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
68891 set_fs(old_fs);
68892 if (result < 0)
68893 goto out;
68894 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
68895 index 362da65..ab8ef8c 100644
68896 --- a/kernel/sysctl_check.c
68897 +++ b/kernel/sysctl_check.c
68898 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
68899 set_fail(&fail, table, "Directory with extra2");
68900 } else {
68901 if ((table->proc_handler == proc_dostring) ||
68902 + (table->proc_handler == proc_dostring_modpriv) ||
68903 (table->proc_handler == proc_dointvec) ||
68904 (table->proc_handler == proc_dointvec_minmax) ||
68905 (table->proc_handler == proc_dointvec_jiffies) ||
68906 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
68907 index e660464..c8b9e67 100644
68908 --- a/kernel/taskstats.c
68909 +++ b/kernel/taskstats.c
68910 @@ -27,9 +27,12 @@
68911 #include <linux/cgroup.h>
68912 #include <linux/fs.h>
68913 #include <linux/file.h>
68914 +#include <linux/grsecurity.h>
68915 #include <net/genetlink.h>
68916 #include <linux/atomic.h>
68917
68918 +extern int gr_is_taskstats_denied(int pid);
68919 +
68920 /*
68921 * Maximum length of a cpumask that can be specified in
68922 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
68923 @@ -556,6 +559,9 @@ err:
68924
68925 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
68926 {
68927 + if (gr_is_taskstats_denied(current->pid))
68928 + return -EACCES;
68929 +
68930 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
68931 return cmd_attr_register_cpumask(info);
68932 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
68933 diff --git a/kernel/time.c b/kernel/time.c
68934 index d776062..fa8d186 100644
68935 --- a/kernel/time.c
68936 +++ b/kernel/time.c
68937 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
68938 return error;
68939
68940 if (tz) {
68941 + /* we log in do_settimeofday called below, so don't log twice
68942 + */
68943 + if (!tv)
68944 + gr_log_timechange();
68945 +
68946 /* SMP safe, global irq locking makes it work. */
68947 sys_tz = *tz;
68948 update_vsyscall_tz();
68949 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
68950 index ea5e1a9..8b8df07 100644
68951 --- a/kernel/time/alarmtimer.c
68952 +++ b/kernel/time/alarmtimer.c
68953 @@ -693,7 +693,7 @@ static int __init alarmtimer_init(void)
68954 {
68955 int error = 0;
68956 int i;
68957 - struct k_clock alarm_clock = {
68958 + static struct k_clock alarm_clock = {
68959 .clock_getres = alarm_clock_getres,
68960 .clock_get = alarm_clock_get,
68961 .timer_create = alarm_timer_create,
68962 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
68963 index c7218d1..5f4ecc6 100644
68964 --- a/kernel/time/tick-broadcast.c
68965 +++ b/kernel/time/tick-broadcast.c
68966 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
68967 * then clear the broadcast bit.
68968 */
68969 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
68970 - int cpu = smp_processor_id();
68971 + cpu = smp_processor_id();
68972
68973 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
68974 tick_broadcast_clear_oneshot(cpu);
68975 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
68976 index 2b021b0e..b673a32 100644
68977 --- a/kernel/time/timekeeping.c
68978 +++ b/kernel/time/timekeeping.c
68979 @@ -14,6 +14,7 @@
68980 #include <linux/init.h>
68981 #include <linux/mm.h>
68982 #include <linux/sched.h>
68983 +#include <linux/grsecurity.h>
68984 #include <linux/syscore_ops.h>
68985 #include <linux/clocksource.h>
68986 #include <linux/jiffies.h>
68987 @@ -361,6 +362,8 @@ int do_settimeofday(const struct timespec *tv)
68988 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
68989 return -EINVAL;
68990
68991 + gr_log_timechange();
68992 +
68993 write_seqlock_irqsave(&xtime_lock, flags);
68994
68995 timekeeping_forward_now();
68996 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
68997 index 3258455..f35227d 100644
68998 --- a/kernel/time/timer_list.c
68999 +++ b/kernel/time/timer_list.c
69000 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
69001
69002 static void print_name_offset(struct seq_file *m, void *sym)
69003 {
69004 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69005 + SEQ_printf(m, "<%p>", NULL);
69006 +#else
69007 char symname[KSYM_NAME_LEN];
69008
69009 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
69010 SEQ_printf(m, "<%pK>", sym);
69011 else
69012 SEQ_printf(m, "%s", symname);
69013 +#endif
69014 }
69015
69016 static void
69017 @@ -112,7 +116,11 @@ next_one:
69018 static void
69019 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
69020 {
69021 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69022 + SEQ_printf(m, " .base: %p\n", NULL);
69023 +#else
69024 SEQ_printf(m, " .base: %pK\n", base);
69025 +#endif
69026 SEQ_printf(m, " .index: %d\n",
69027 base->index);
69028 SEQ_printf(m, " .resolution: %Lu nsecs\n",
69029 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
69030 {
69031 struct proc_dir_entry *pe;
69032
69033 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69034 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
69035 +#else
69036 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
69037 +#endif
69038 if (!pe)
69039 return -ENOMEM;
69040 return 0;
69041 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
69042 index a5d0a3a..60c7948 100644
69043 --- a/kernel/time/timer_stats.c
69044 +++ b/kernel/time/timer_stats.c
69045 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
69046 static unsigned long nr_entries;
69047 static struct entry entries[MAX_ENTRIES];
69048
69049 -static atomic_t overflow_count;
69050 +static atomic_unchecked_t overflow_count;
69051
69052 /*
69053 * The entries are in a hash-table, for fast lookup:
69054 @@ -140,7 +140,7 @@ static void reset_entries(void)
69055 nr_entries = 0;
69056 memset(entries, 0, sizeof(entries));
69057 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
69058 - atomic_set(&overflow_count, 0);
69059 + atomic_set_unchecked(&overflow_count, 0);
69060 }
69061
69062 static struct entry *alloc_entry(void)
69063 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
69064 if (likely(entry))
69065 entry->count++;
69066 else
69067 - atomic_inc(&overflow_count);
69068 + atomic_inc_unchecked(&overflow_count);
69069
69070 out_unlock:
69071 raw_spin_unlock_irqrestore(lock, flags);
69072 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
69073
69074 static void print_name_offset(struct seq_file *m, unsigned long addr)
69075 {
69076 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69077 + seq_printf(m, "<%p>", NULL);
69078 +#else
69079 char symname[KSYM_NAME_LEN];
69080
69081 if (lookup_symbol_name(addr, symname) < 0)
69082 seq_printf(m, "<%p>", (void *)addr);
69083 else
69084 seq_printf(m, "%s", symname);
69085 +#endif
69086 }
69087
69088 static int tstats_show(struct seq_file *m, void *v)
69089 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
69090
69091 seq_puts(m, "Timer Stats Version: v0.2\n");
69092 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
69093 - if (atomic_read(&overflow_count))
69094 + if (atomic_read_unchecked(&overflow_count))
69095 seq_printf(m, "Overflow: %d entries\n",
69096 - atomic_read(&overflow_count));
69097 + atomic_read_unchecked(&overflow_count));
69098
69099 for (i = 0; i < nr_entries; i++) {
69100 entry = entries + i;
69101 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
69102 {
69103 struct proc_dir_entry *pe;
69104
69105 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69106 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
69107 +#else
69108 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
69109 +#endif
69110 if (!pe)
69111 return -ENOMEM;
69112 return 0;
69113 diff --git a/kernel/timer.c b/kernel/timer.c
69114 index 8cff361..0fb5cd8 100644
69115 --- a/kernel/timer.c
69116 +++ b/kernel/timer.c
69117 @@ -1304,7 +1304,7 @@ void update_process_times(int user_tick)
69118 /*
69119 * This function runs timers and the timer-tq in bottom half context.
69120 */
69121 -static void run_timer_softirq(struct softirq_action *h)
69122 +static void run_timer_softirq(void)
69123 {
69124 struct tvec_base *base = __this_cpu_read(tvec_bases);
69125
69126 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
69127 index 7c910a5..8b72104 100644
69128 --- a/kernel/trace/blktrace.c
69129 +++ b/kernel/trace/blktrace.c
69130 @@ -323,7 +323,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
69131 struct blk_trace *bt = filp->private_data;
69132 char buf[16];
69133
69134 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
69135 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
69136
69137 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
69138 }
69139 @@ -388,7 +388,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
69140 return 1;
69141
69142 bt = buf->chan->private_data;
69143 - atomic_inc(&bt->dropped);
69144 + atomic_inc_unchecked(&bt->dropped);
69145 return 0;
69146 }
69147
69148 @@ -489,7 +489,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
69149
69150 bt->dir = dir;
69151 bt->dev = dev;
69152 - atomic_set(&bt->dropped, 0);
69153 + atomic_set_unchecked(&bt->dropped, 0);
69154
69155 ret = -EIO;
69156 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
69157 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
69158 index c3e4575..cd9c767 100644
69159 --- a/kernel/trace/ftrace.c
69160 +++ b/kernel/trace/ftrace.c
69161 @@ -1585,12 +1585,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
69162 if (unlikely(ftrace_disabled))
69163 return 0;
69164
69165 + ret = ftrace_arch_code_modify_prepare();
69166 + FTRACE_WARN_ON(ret);
69167 + if (ret)
69168 + return 0;
69169 +
69170 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
69171 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
69172 if (ret) {
69173 ftrace_bug(ret, ip);
69174 - return 0;
69175 }
69176 - return 1;
69177 + return ret ? 0 : 1;
69178 }
69179
69180 /*
69181 @@ -2607,7 +2612,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
69182
69183 int
69184 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
69185 - void *data)
69186 + void *data)
69187 {
69188 struct ftrace_func_probe *entry;
69189 struct ftrace_page *pg;
69190 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
69191 index 17a2d44..85907e2 100644
69192 --- a/kernel/trace/trace.c
69193 +++ b/kernel/trace/trace.c
69194 @@ -3451,6 +3451,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
69195 size_t rem;
69196 unsigned int i;
69197
69198 + pax_track_stack();
69199 +
69200 if (splice_grow_spd(pipe, &spd))
69201 return -ENOMEM;
69202
69203 @@ -3926,6 +3928,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
69204 int entries, size, i;
69205 size_t ret;
69206
69207 + pax_track_stack();
69208 +
69209 if (splice_grow_spd(pipe, &spd))
69210 return -ENOMEM;
69211
69212 @@ -4093,10 +4097,9 @@ static const struct file_operations tracing_dyn_info_fops = {
69213 };
69214 #endif
69215
69216 -static struct dentry *d_tracer;
69217 -
69218 struct dentry *tracing_init_dentry(void)
69219 {
69220 + static struct dentry *d_tracer;
69221 static int once;
69222
69223 if (d_tracer)
69224 @@ -4116,10 +4119,9 @@ struct dentry *tracing_init_dentry(void)
69225 return d_tracer;
69226 }
69227
69228 -static struct dentry *d_percpu;
69229 -
69230 struct dentry *tracing_dentry_percpu(void)
69231 {
69232 + static struct dentry *d_percpu;
69233 static int once;
69234 struct dentry *d_tracer;
69235
69236 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
69237 index 581876f..a91e569 100644
69238 --- a/kernel/trace/trace_events.c
69239 +++ b/kernel/trace/trace_events.c
69240 @@ -1300,10 +1300,6 @@ static LIST_HEAD(ftrace_module_file_list);
69241 struct ftrace_module_file_ops {
69242 struct list_head list;
69243 struct module *mod;
69244 - struct file_operations id;
69245 - struct file_operations enable;
69246 - struct file_operations format;
69247 - struct file_operations filter;
69248 };
69249
69250 static struct ftrace_module_file_ops *
69251 @@ -1324,17 +1320,12 @@ trace_create_file_ops(struct module *mod)
69252
69253 file_ops->mod = mod;
69254
69255 - file_ops->id = ftrace_event_id_fops;
69256 - file_ops->id.owner = mod;
69257 -
69258 - file_ops->enable = ftrace_enable_fops;
69259 - file_ops->enable.owner = mod;
69260 -
69261 - file_ops->filter = ftrace_event_filter_fops;
69262 - file_ops->filter.owner = mod;
69263 -
69264 - file_ops->format = ftrace_event_format_fops;
69265 - file_ops->format.owner = mod;
69266 + pax_open_kernel();
69267 + *(void **)&mod->trace_id.owner = mod;
69268 + *(void **)&mod->trace_enable.owner = mod;
69269 + *(void **)&mod->trace_filter.owner = mod;
69270 + *(void **)&mod->trace_format.owner = mod;
69271 + pax_close_kernel();
69272
69273 list_add(&file_ops->list, &ftrace_module_file_list);
69274
69275 @@ -1358,8 +1349,8 @@ static void trace_module_add_events(struct module *mod)
69276
69277 for_each_event(call, start, end) {
69278 __trace_add_event_call(*call, mod,
69279 - &file_ops->id, &file_ops->enable,
69280 - &file_ops->filter, &file_ops->format);
69281 + &mod->trace_id, &mod->trace_enable,
69282 + &mod->trace_filter, &mod->trace_format);
69283 }
69284 }
69285
69286 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
69287 index 00d527c..7c5b1a3 100644
69288 --- a/kernel/trace/trace_kprobe.c
69289 +++ b/kernel/trace/trace_kprobe.c
69290 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69291 long ret;
69292 int maxlen = get_rloc_len(*(u32 *)dest);
69293 u8 *dst = get_rloc_data(dest);
69294 - u8 *src = addr;
69295 + const u8 __user *src = (const u8 __force_user *)addr;
69296 mm_segment_t old_fs = get_fs();
69297 if (!maxlen)
69298 return;
69299 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69300 pagefault_disable();
69301 do
69302 ret = __copy_from_user_inatomic(dst++, src++, 1);
69303 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
69304 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
69305 dst[-1] = '\0';
69306 pagefault_enable();
69307 set_fs(old_fs);
69308 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69309 ((u8 *)get_rloc_data(dest))[0] = '\0';
69310 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
69311 } else
69312 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
69313 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
69314 get_rloc_offs(*(u32 *)dest));
69315 }
69316 /* Return the length of string -- including null terminal byte */
69317 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
69318 set_fs(KERNEL_DS);
69319 pagefault_disable();
69320 do {
69321 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
69322 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
69323 len++;
69324 } while (c && ret == 0 && len < MAX_STRING_SIZE);
69325 pagefault_enable();
69326 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
69327 index fd3c8aa..5f324a6 100644
69328 --- a/kernel/trace/trace_mmiotrace.c
69329 +++ b/kernel/trace/trace_mmiotrace.c
69330 @@ -24,7 +24,7 @@ struct header_iter {
69331 static struct trace_array *mmio_trace_array;
69332 static bool overrun_detected;
69333 static unsigned long prev_overruns;
69334 -static atomic_t dropped_count;
69335 +static atomic_unchecked_t dropped_count;
69336
69337 static void mmio_reset_data(struct trace_array *tr)
69338 {
69339 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
69340
69341 static unsigned long count_overruns(struct trace_iterator *iter)
69342 {
69343 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
69344 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
69345 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
69346
69347 if (over > prev_overruns)
69348 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
69349 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
69350 sizeof(*entry), 0, pc);
69351 if (!event) {
69352 - atomic_inc(&dropped_count);
69353 + atomic_inc_unchecked(&dropped_count);
69354 return;
69355 }
69356 entry = ring_buffer_event_data(event);
69357 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
69358 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
69359 sizeof(*entry), 0, pc);
69360 if (!event) {
69361 - atomic_inc(&dropped_count);
69362 + atomic_inc_unchecked(&dropped_count);
69363 return;
69364 }
69365 entry = ring_buffer_event_data(event);
69366 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
69367 index 5199930..26c73a0 100644
69368 --- a/kernel/trace/trace_output.c
69369 +++ b/kernel/trace/trace_output.c
69370 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
69371
69372 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
69373 if (!IS_ERR(p)) {
69374 - p = mangle_path(s->buffer + s->len, p, "\n");
69375 + p = mangle_path(s->buffer + s->len, p, "\n\\");
69376 if (p) {
69377 s->len = p - s->buffer;
69378 return 1;
69379 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
69380 index 77575b3..6e623d1 100644
69381 --- a/kernel/trace/trace_stack.c
69382 +++ b/kernel/trace/trace_stack.c
69383 @@ -50,7 +50,7 @@ static inline void check_stack(void)
69384 return;
69385
69386 /* we do not handle interrupt stacks yet */
69387 - if (!object_is_on_stack(&this_size))
69388 + if (!object_starts_on_stack(&this_size))
69389 return;
69390
69391 local_irq_save(flags);
69392 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
69393 index 209b379..7f76423 100644
69394 --- a/kernel/trace/trace_workqueue.c
69395 +++ b/kernel/trace/trace_workqueue.c
69396 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
69397 int cpu;
69398 pid_t pid;
69399 /* Can be inserted from interrupt or user context, need to be atomic */
69400 - atomic_t inserted;
69401 + atomic_unchecked_t inserted;
69402 /*
69403 * Don't need to be atomic, works are serialized in a single workqueue thread
69404 * on a single CPU.
69405 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
69406 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
69407 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
69408 if (node->pid == wq_thread->pid) {
69409 - atomic_inc(&node->inserted);
69410 + atomic_inc_unchecked(&node->inserted);
69411 goto found;
69412 }
69413 }
69414 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
69415 tsk = get_pid_task(pid, PIDTYPE_PID);
69416 if (tsk) {
69417 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
69418 - atomic_read(&cws->inserted), cws->executed,
69419 + atomic_read_unchecked(&cws->inserted), cws->executed,
69420 tsk->comm);
69421 put_task_struct(tsk);
69422 }
69423 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
69424 index c0cb9c4..f33aa89 100644
69425 --- a/lib/Kconfig.debug
69426 +++ b/lib/Kconfig.debug
69427 @@ -1091,6 +1091,7 @@ config LATENCYTOP
69428 depends on DEBUG_KERNEL
69429 depends on STACKTRACE_SUPPORT
69430 depends on PROC_FS
69431 + depends on !GRKERNSEC_HIDESYM
69432 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
69433 select KALLSYMS
69434 select KALLSYMS_ALL
69435 diff --git a/lib/bitmap.c b/lib/bitmap.c
69436 index 2f4412e..a557e27 100644
69437 --- a/lib/bitmap.c
69438 +++ b/lib/bitmap.c
69439 @@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
69440 {
69441 int c, old_c, totaldigits, ndigits, nchunks, nbits;
69442 u32 chunk;
69443 - const char __user *ubuf = buf;
69444 + const char __user *ubuf = (const char __force_user *)buf;
69445
69446 bitmap_zero(maskp, nmaskbits);
69447
69448 @@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
69449 {
69450 if (!access_ok(VERIFY_READ, ubuf, ulen))
69451 return -EFAULT;
69452 - return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
69453 + return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
69454 }
69455 EXPORT_SYMBOL(bitmap_parse_user);
69456
69457 @@ -594,7 +594,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
69458 {
69459 unsigned a, b;
69460 int c, old_c, totaldigits;
69461 - const char __user *ubuf = buf;
69462 + const char __user *ubuf = (const char __force_user *)buf;
69463 int exp_digit, in_range;
69464
69465 totaldigits = c = 0;
69466 @@ -694,7 +694,7 @@ int bitmap_parselist_user(const char __user *ubuf,
69467 {
69468 if (!access_ok(VERIFY_READ, ubuf, ulen))
69469 return -EFAULT;
69470 - return __bitmap_parselist((const char *)ubuf,
69471 + return __bitmap_parselist((const char __force_kernel *)ubuf,
69472 ulen, 1, maskp, nmaskbits);
69473 }
69474 EXPORT_SYMBOL(bitmap_parselist_user);
69475 diff --git a/lib/bug.c b/lib/bug.c
69476 index 1955209..cbbb2ad 100644
69477 --- a/lib/bug.c
69478 +++ b/lib/bug.c
69479 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
69480 return BUG_TRAP_TYPE_NONE;
69481
69482 bug = find_bug(bugaddr);
69483 + if (!bug)
69484 + return BUG_TRAP_TYPE_NONE;
69485
69486 file = NULL;
69487 line = 0;
69488 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
69489 index a78b7c6..2c73084 100644
69490 --- a/lib/debugobjects.c
69491 +++ b/lib/debugobjects.c
69492 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
69493 if (limit > 4)
69494 return;
69495
69496 - is_on_stack = object_is_on_stack(addr);
69497 + is_on_stack = object_starts_on_stack(addr);
69498 if (is_on_stack == onstack)
69499 return;
69500
69501 diff --git a/lib/devres.c b/lib/devres.c
69502 index 7c0e953..f642b5c 100644
69503 --- a/lib/devres.c
69504 +++ b/lib/devres.c
69505 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
69506 void devm_iounmap(struct device *dev, void __iomem *addr)
69507 {
69508 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
69509 - (void *)addr));
69510 + (void __force *)addr));
69511 iounmap(addr);
69512 }
69513 EXPORT_SYMBOL(devm_iounmap);
69514 @@ -141,7 +141,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
69515 {
69516 ioport_unmap(addr);
69517 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
69518 - devm_ioport_map_match, (void *)addr));
69519 + devm_ioport_map_match, (void __force *)addr));
69520 }
69521 EXPORT_SYMBOL(devm_ioport_unmap);
69522
69523 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
69524 index db07bfd..719b5ab 100644
69525 --- a/lib/dma-debug.c
69526 +++ b/lib/dma-debug.c
69527 @@ -870,7 +870,7 @@ out:
69528
69529 static void check_for_stack(struct device *dev, void *addr)
69530 {
69531 - if (object_is_on_stack(addr))
69532 + if (object_starts_on_stack(addr))
69533 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
69534 "stack [addr=%p]\n", addr);
69535 }
69536 diff --git a/lib/extable.c b/lib/extable.c
69537 index 4cac81e..63e9b8f 100644
69538 --- a/lib/extable.c
69539 +++ b/lib/extable.c
69540 @@ -13,6 +13,7 @@
69541 #include <linux/init.h>
69542 #include <linux/sort.h>
69543 #include <asm/uaccess.h>
69544 +#include <asm/pgtable.h>
69545
69546 #ifndef ARCH_HAS_SORT_EXTABLE
69547 /*
69548 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
69549 void sort_extable(struct exception_table_entry *start,
69550 struct exception_table_entry *finish)
69551 {
69552 + pax_open_kernel();
69553 sort(start, finish - start, sizeof(struct exception_table_entry),
69554 cmp_ex, NULL);
69555 + pax_close_kernel();
69556 }
69557
69558 #ifdef CONFIG_MODULES
69559 diff --git a/lib/inflate.c b/lib/inflate.c
69560 index 013a761..c28f3fc 100644
69561 --- a/lib/inflate.c
69562 +++ b/lib/inflate.c
69563 @@ -269,7 +269,7 @@ static void free(void *where)
69564 malloc_ptr = free_mem_ptr;
69565 }
69566 #else
69567 -#define malloc(a) kmalloc(a, GFP_KERNEL)
69568 +#define malloc(a) kmalloc((a), GFP_KERNEL)
69569 #define free(a) kfree(a)
69570 #endif
69571
69572 diff --git a/lib/kref.c b/lib/kref.c
69573 index 3efb882..8492f4c 100644
69574 --- a/lib/kref.c
69575 +++ b/lib/kref.c
69576 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
69577 */
69578 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
69579 {
69580 - WARN_ON(release == NULL);
69581 + BUG_ON(release == NULL);
69582 WARN_ON(release == (void (*)(struct kref *))kfree);
69583
69584 if (atomic_dec_and_test(&kref->refcount)) {
69585 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
69586 index a2f9da5..3bcadb6 100644
69587 --- a/lib/radix-tree.c
69588 +++ b/lib/radix-tree.c
69589 @@ -80,7 +80,7 @@ struct radix_tree_preload {
69590 int nr;
69591 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
69592 };
69593 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
69594 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
69595
69596 static inline void *ptr_to_indirect(void *ptr)
69597 {
69598 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
69599 index d7222a9..2172edc 100644
69600 --- a/lib/vsprintf.c
69601 +++ b/lib/vsprintf.c
69602 @@ -16,6 +16,9 @@
69603 * - scnprintf and vscnprintf
69604 */
69605
69606 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69607 +#define __INCLUDED_BY_HIDESYM 1
69608 +#endif
69609 #include <stdarg.h>
69610 #include <linux/module.h>
69611 #include <linux/types.h>
69612 @@ -432,7 +435,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
69613 char sym[KSYM_SYMBOL_LEN];
69614 if (ext == 'B')
69615 sprint_backtrace(sym, value);
69616 - else if (ext != 'f' && ext != 's')
69617 + else if (ext != 'f' && ext != 's' && ext != 'a')
69618 sprint_symbol(sym, value);
69619 else
69620 kallsyms_lookup(value, NULL, NULL, NULL, sym);
69621 @@ -796,7 +799,11 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
69622 return string(buf, end, uuid, spec);
69623 }
69624
69625 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69626 +int kptr_restrict __read_mostly = 2;
69627 +#else
69628 int kptr_restrict __read_mostly;
69629 +#endif
69630
69631 /*
69632 * Show a '%p' thing. A kernel extension is that the '%p' is followed
69633 @@ -810,6 +817,8 @@ int kptr_restrict __read_mostly;
69634 * - 'S' For symbolic direct pointers with offset
69635 * - 's' For symbolic direct pointers without offset
69636 * - 'B' For backtraced symbolic direct pointers with offset
69637 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
69638 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
69639 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
69640 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
69641 * - 'M' For a 6-byte MAC address, it prints the address in the
69642 @@ -854,12 +863,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69643 {
69644 if (!ptr && *fmt != 'K') {
69645 /*
69646 - * Print (null) with the same width as a pointer so it makes
69647 + * Print (nil) with the same width as a pointer so it makes
69648 * tabular output look nice.
69649 */
69650 if (spec.field_width == -1)
69651 spec.field_width = 2 * sizeof(void *);
69652 - return string(buf, end, "(null)", spec);
69653 + return string(buf, end, "(nil)", spec);
69654 }
69655
69656 switch (*fmt) {
69657 @@ -869,6 +878,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69658 /* Fallthrough */
69659 case 'S':
69660 case 's':
69661 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69662 + break;
69663 +#else
69664 + return symbol_string(buf, end, ptr, spec, *fmt);
69665 +#endif
69666 + case 'A':
69667 + case 'a':
69668 case 'B':
69669 return symbol_string(buf, end, ptr, spec, *fmt);
69670 case 'R':
69671 @@ -1627,11 +1643,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69672 typeof(type) value; \
69673 if (sizeof(type) == 8) { \
69674 args = PTR_ALIGN(args, sizeof(u32)); \
69675 - *(u32 *)&value = *(u32 *)args; \
69676 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
69677 + *(u32 *)&value = *(const u32 *)args; \
69678 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
69679 } else { \
69680 args = PTR_ALIGN(args, sizeof(type)); \
69681 - value = *(typeof(type) *)args; \
69682 + value = *(const typeof(type) *)args; \
69683 } \
69684 args += sizeof(type); \
69685 value; \
69686 @@ -1694,7 +1710,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69687 case FORMAT_TYPE_STR: {
69688 const char *str_arg = args;
69689 args += strlen(str_arg) + 1;
69690 - str = string(str, end, (char *)str_arg, spec);
69691 + str = string(str, end, str_arg, spec);
69692 break;
69693 }
69694
69695 diff --git a/localversion-grsec b/localversion-grsec
69696 new file mode 100644
69697 index 0000000..7cd6065
69698 --- /dev/null
69699 +++ b/localversion-grsec
69700 @@ -0,0 +1 @@
69701 +-grsec
69702 diff --git a/mm/Kconfig b/mm/Kconfig
69703 index f2f1ca1..0645f06 100644
69704 --- a/mm/Kconfig
69705 +++ b/mm/Kconfig
69706 @@ -238,10 +238,10 @@ config KSM
69707 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
69708
69709 config DEFAULT_MMAP_MIN_ADDR
69710 - int "Low address space to protect from user allocation"
69711 + int "Low address space to protect from user allocation"
69712 depends on MMU
69713 - default 4096
69714 - help
69715 + default 65536
69716 + help
69717 This is the portion of low virtual memory which should be protected
69718 from userspace allocation. Keeping a user from writing to low pages
69719 can help reduce the impact of kernel NULL pointer bugs.
69720 diff --git a/mm/filemap.c b/mm/filemap.c
69721 index 7771871..91bcdb4 100644
69722 --- a/mm/filemap.c
69723 +++ b/mm/filemap.c
69724 @@ -1784,7 +1784,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
69725 struct address_space *mapping = file->f_mapping;
69726
69727 if (!mapping->a_ops->readpage)
69728 - return -ENOEXEC;
69729 + return -ENODEV;
69730 file_accessed(file);
69731 vma->vm_ops = &generic_file_vm_ops;
69732 vma->vm_flags |= VM_CAN_NONLINEAR;
69733 @@ -2190,6 +2190,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
69734 *pos = i_size_read(inode);
69735
69736 if (limit != RLIM_INFINITY) {
69737 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
69738 if (*pos >= limit) {
69739 send_sig(SIGXFSZ, current, 0);
69740 return -EFBIG;
69741 diff --git a/mm/fremap.c b/mm/fremap.c
69742 index b8e0e2d..076e171 100644
69743 --- a/mm/fremap.c
69744 +++ b/mm/fremap.c
69745 @@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
69746 retry:
69747 vma = find_vma(mm, start);
69748
69749 +#ifdef CONFIG_PAX_SEGMEXEC
69750 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
69751 + goto out;
69752 +#endif
69753 +
69754 /*
69755 * Make sure the vma is shared, that it supports prefaulting,
69756 * and that the remapped range is valid and fully within
69757 diff --git a/mm/highmem.c b/mm/highmem.c
69758 index 5ef672c..d7660f4 100644
69759 --- a/mm/highmem.c
69760 +++ b/mm/highmem.c
69761 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
69762 * So no dangers, even with speculative execution.
69763 */
69764 page = pte_page(pkmap_page_table[i]);
69765 + pax_open_kernel();
69766 pte_clear(&init_mm, (unsigned long)page_address(page),
69767 &pkmap_page_table[i]);
69768 -
69769 + pax_close_kernel();
69770 set_page_address(page, NULL);
69771 need_flush = 1;
69772 }
69773 @@ -186,9 +187,11 @@ start:
69774 }
69775 }
69776 vaddr = PKMAP_ADDR(last_pkmap_nr);
69777 +
69778 + pax_open_kernel();
69779 set_pte_at(&init_mm, vaddr,
69780 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
69781 -
69782 + pax_close_kernel();
69783 pkmap_count[last_pkmap_nr] = 1;
69784 set_page_address(page, (void *)vaddr);
69785
69786 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
69787 index d819d93..468e18f 100644
69788 --- a/mm/huge_memory.c
69789 +++ b/mm/huge_memory.c
69790 @@ -702,7 +702,7 @@ out:
69791 * run pte_offset_map on the pmd, if an huge pmd could
69792 * materialize from under us from a different thread.
69793 */
69794 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
69795 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
69796 return VM_FAULT_OOM;
69797 /* if an huge pmd materialized from under us just retry later */
69798 if (unlikely(pmd_trans_huge(*pmd)))
69799 @@ -829,7 +829,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
69800
69801 for (i = 0; i < HPAGE_PMD_NR; i++) {
69802 copy_user_highpage(pages[i], page + i,
69803 - haddr + PAGE_SHIFT*i, vma);
69804 + haddr + PAGE_SIZE*i, vma);
69805 __SetPageUptodate(pages[i]);
69806 cond_resched();
69807 }
69808 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
69809 index dae27ba..e8d42be 100644
69810 --- a/mm/hugetlb.c
69811 +++ b/mm/hugetlb.c
69812 @@ -2346,6 +2346,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
69813 return 1;
69814 }
69815
69816 +#ifdef CONFIG_PAX_SEGMEXEC
69817 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
69818 +{
69819 + struct mm_struct *mm = vma->vm_mm;
69820 + struct vm_area_struct *vma_m;
69821 + unsigned long address_m;
69822 + pte_t *ptep_m;
69823 +
69824 + vma_m = pax_find_mirror_vma(vma);
69825 + if (!vma_m)
69826 + return;
69827 +
69828 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
69829 + address_m = address + SEGMEXEC_TASK_SIZE;
69830 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
69831 + get_page(page_m);
69832 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
69833 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
69834 +}
69835 +#endif
69836 +
69837 /*
69838 * Hugetlb_cow() should be called with page lock of the original hugepage held.
69839 */
69840 @@ -2447,6 +2468,11 @@ retry_avoidcopy:
69841 make_huge_pte(vma, new_page, 1));
69842 page_remove_rmap(old_page);
69843 hugepage_add_new_anon_rmap(new_page, vma, address);
69844 +
69845 +#ifdef CONFIG_PAX_SEGMEXEC
69846 + pax_mirror_huge_pte(vma, address, new_page);
69847 +#endif
69848 +
69849 /* Make the old page be freed below */
69850 new_page = old_page;
69851 mmu_notifier_invalidate_range_end(mm,
69852 @@ -2598,6 +2624,10 @@ retry:
69853 && (vma->vm_flags & VM_SHARED)));
69854 set_huge_pte_at(mm, address, ptep, new_pte);
69855
69856 +#ifdef CONFIG_PAX_SEGMEXEC
69857 + pax_mirror_huge_pte(vma, address, page);
69858 +#endif
69859 +
69860 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
69861 /* Optimization, do the COW without a second fault */
69862 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
69863 @@ -2627,6 +2657,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69864 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
69865 struct hstate *h = hstate_vma(vma);
69866
69867 +#ifdef CONFIG_PAX_SEGMEXEC
69868 + struct vm_area_struct *vma_m;
69869 +#endif
69870 +
69871 ptep = huge_pte_offset(mm, address);
69872 if (ptep) {
69873 entry = huge_ptep_get(ptep);
69874 @@ -2638,6 +2672,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
69875 VM_FAULT_SET_HINDEX(h - hstates);
69876 }
69877
69878 +#ifdef CONFIG_PAX_SEGMEXEC
69879 + vma_m = pax_find_mirror_vma(vma);
69880 + if (vma_m) {
69881 + unsigned long address_m;
69882 +
69883 + if (vma->vm_start > vma_m->vm_start) {
69884 + address_m = address;
69885 + address -= SEGMEXEC_TASK_SIZE;
69886 + vma = vma_m;
69887 + h = hstate_vma(vma);
69888 + } else
69889 + address_m = address + SEGMEXEC_TASK_SIZE;
69890 +
69891 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
69892 + return VM_FAULT_OOM;
69893 + address_m &= HPAGE_MASK;
69894 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
69895 + }
69896 +#endif
69897 +
69898 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
69899 if (!ptep)
69900 return VM_FAULT_OOM;
69901 diff --git a/mm/internal.h b/mm/internal.h
69902 index 2189af4..f2ca332 100644
69903 --- a/mm/internal.h
69904 +++ b/mm/internal.h
69905 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
69906 * in mm/page_alloc.c
69907 */
69908 extern void __free_pages_bootmem(struct page *page, unsigned int order);
69909 +extern void free_compound_page(struct page *page);
69910 extern void prep_compound_page(struct page *page, unsigned long order);
69911 #ifdef CONFIG_MEMORY_FAILURE
69912 extern bool is_free_buddy_page(struct page *page);
69913 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
69914 index d6880f5..ed77913 100644
69915 --- a/mm/kmemleak.c
69916 +++ b/mm/kmemleak.c
69917 @@ -357,7 +357,7 @@ static void print_unreferenced(struct seq_file *seq,
69918
69919 for (i = 0; i < object->trace_len; i++) {
69920 void *ptr = (void *)object->trace[i];
69921 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
69922 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
69923 }
69924 }
69925
69926 diff --git a/mm/maccess.c b/mm/maccess.c
69927 index 4cee182..e00511d 100644
69928 --- a/mm/maccess.c
69929 +++ b/mm/maccess.c
69930 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
69931 set_fs(KERNEL_DS);
69932 pagefault_disable();
69933 ret = __copy_from_user_inatomic(dst,
69934 - (__force const void __user *)src, size);
69935 + (const void __force_user *)src, size);
69936 pagefault_enable();
69937 set_fs(old_fs);
69938
69939 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
69940
69941 set_fs(KERNEL_DS);
69942 pagefault_disable();
69943 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
69944 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
69945 pagefault_enable();
69946 set_fs(old_fs);
69947
69948 diff --git a/mm/madvise.c b/mm/madvise.c
69949 index 74bf193..feb6fd3 100644
69950 --- a/mm/madvise.c
69951 +++ b/mm/madvise.c
69952 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
69953 pgoff_t pgoff;
69954 unsigned long new_flags = vma->vm_flags;
69955
69956 +#ifdef CONFIG_PAX_SEGMEXEC
69957 + struct vm_area_struct *vma_m;
69958 +#endif
69959 +
69960 switch (behavior) {
69961 case MADV_NORMAL:
69962 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
69963 @@ -110,6 +114,13 @@ success:
69964 /*
69965 * vm_flags is protected by the mmap_sem held in write mode.
69966 */
69967 +
69968 +#ifdef CONFIG_PAX_SEGMEXEC
69969 + vma_m = pax_find_mirror_vma(vma);
69970 + if (vma_m)
69971 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
69972 +#endif
69973 +
69974 vma->vm_flags = new_flags;
69975
69976 out:
69977 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69978 struct vm_area_struct ** prev,
69979 unsigned long start, unsigned long end)
69980 {
69981 +
69982 +#ifdef CONFIG_PAX_SEGMEXEC
69983 + struct vm_area_struct *vma_m;
69984 +#endif
69985 +
69986 *prev = vma;
69987 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
69988 return -EINVAL;
69989 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
69990 zap_page_range(vma, start, end - start, &details);
69991 } else
69992 zap_page_range(vma, start, end - start, NULL);
69993 +
69994 +#ifdef CONFIG_PAX_SEGMEXEC
69995 + vma_m = pax_find_mirror_vma(vma);
69996 + if (vma_m) {
69997 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
69998 + struct zap_details details = {
69999 + .nonlinear_vma = vma_m,
70000 + .last_index = ULONG_MAX,
70001 + };
70002 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
70003 + } else
70004 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
70005 + }
70006 +#endif
70007 +
70008 return 0;
70009 }
70010
70011 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
70012 if (end < start)
70013 goto out;
70014
70015 +#ifdef CONFIG_PAX_SEGMEXEC
70016 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70017 + if (end > SEGMEXEC_TASK_SIZE)
70018 + goto out;
70019 + } else
70020 +#endif
70021 +
70022 + if (end > TASK_SIZE)
70023 + goto out;
70024 +
70025 error = 0;
70026 if (end == start)
70027 goto out;
70028 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
70029 index 2b43ba0..fc09657 100644
70030 --- a/mm/memory-failure.c
70031 +++ b/mm/memory-failure.c
70032 @@ -60,7 +60,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
70033
70034 int sysctl_memory_failure_recovery __read_mostly = 1;
70035
70036 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70037 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70038
70039 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
70040
70041 @@ -201,7 +201,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
70042 si.si_signo = SIGBUS;
70043 si.si_errno = 0;
70044 si.si_code = BUS_MCEERR_AO;
70045 - si.si_addr = (void *)addr;
70046 + si.si_addr = (void __user *)addr;
70047 #ifdef __ARCH_SI_TRAPNO
70048 si.si_trapno = trapno;
70049 #endif
70050 @@ -1009,7 +1009,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
70051 }
70052
70053 nr_pages = 1 << compound_trans_order(hpage);
70054 - atomic_long_add(nr_pages, &mce_bad_pages);
70055 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
70056
70057 /*
70058 * We need/can do nothing about count=0 pages.
70059 @@ -1039,7 +1039,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
70060 if (!PageHWPoison(hpage)
70061 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
70062 || (p != hpage && TestSetPageHWPoison(hpage))) {
70063 - atomic_long_sub(nr_pages, &mce_bad_pages);
70064 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70065 return 0;
70066 }
70067 set_page_hwpoison_huge_page(hpage);
70068 @@ -1097,7 +1097,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
70069 }
70070 if (hwpoison_filter(p)) {
70071 if (TestClearPageHWPoison(p))
70072 - atomic_long_sub(nr_pages, &mce_bad_pages);
70073 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70074 unlock_page(hpage);
70075 put_page(hpage);
70076 return 0;
70077 @@ -1314,7 +1314,7 @@ int unpoison_memory(unsigned long pfn)
70078 return 0;
70079 }
70080 if (TestClearPageHWPoison(p))
70081 - atomic_long_sub(nr_pages, &mce_bad_pages);
70082 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70083 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
70084 return 0;
70085 }
70086 @@ -1328,7 +1328,7 @@ int unpoison_memory(unsigned long pfn)
70087 */
70088 if (TestClearPageHWPoison(page)) {
70089 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
70090 - atomic_long_sub(nr_pages, &mce_bad_pages);
70091 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70092 freeit = 1;
70093 if (PageHuge(page))
70094 clear_page_hwpoison_huge_page(page);
70095 @@ -1441,7 +1441,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
70096 }
70097 done:
70098 if (!PageHWPoison(hpage))
70099 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
70100 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
70101 set_page_hwpoison_huge_page(hpage);
70102 dequeue_hwpoisoned_huge_page(hpage);
70103 /* keep elevated page count for bad page */
70104 @@ -1572,7 +1572,7 @@ int soft_offline_page(struct page *page, int flags)
70105 return ret;
70106
70107 done:
70108 - atomic_long_add(1, &mce_bad_pages);
70109 + atomic_long_add_unchecked(1, &mce_bad_pages);
70110 SetPageHWPoison(page);
70111 /* keep elevated page count for bad page */
70112 return ret;
70113 diff --git a/mm/memory.c b/mm/memory.c
70114 index b2b8731..6080174 100644
70115 --- a/mm/memory.c
70116 +++ b/mm/memory.c
70117 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
70118 return;
70119
70120 pmd = pmd_offset(pud, start);
70121 +
70122 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
70123 pud_clear(pud);
70124 pmd_free_tlb(tlb, pmd, start);
70125 +#endif
70126 +
70127 }
70128
70129 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
70130 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
70131 if (end - 1 > ceiling - 1)
70132 return;
70133
70134 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
70135 pud = pud_offset(pgd, start);
70136 pgd_clear(pgd);
70137 pud_free_tlb(tlb, pud, start);
70138 +#endif
70139 +
70140 }
70141
70142 /*
70143 @@ -1566,12 +1573,6 @@ no_page_table:
70144 return page;
70145 }
70146
70147 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
70148 -{
70149 - return stack_guard_page_start(vma, addr) ||
70150 - stack_guard_page_end(vma, addr+PAGE_SIZE);
70151 -}
70152 -
70153 /**
70154 * __get_user_pages() - pin user pages in memory
70155 * @tsk: task_struct of target task
70156 @@ -1644,10 +1645,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70157 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
70158 i = 0;
70159
70160 - do {
70161 + while (nr_pages) {
70162 struct vm_area_struct *vma;
70163
70164 - vma = find_extend_vma(mm, start);
70165 + vma = find_vma(mm, start);
70166 if (!vma && in_gate_area(mm, start)) {
70167 unsigned long pg = start & PAGE_MASK;
70168 pgd_t *pgd;
70169 @@ -1695,7 +1696,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70170 goto next_page;
70171 }
70172
70173 - if (!vma ||
70174 + if (!vma || start < vma->vm_start ||
70175 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
70176 !(vm_flags & vma->vm_flags))
70177 return i ? : -EFAULT;
70178 @@ -1722,11 +1723,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70179 int ret;
70180 unsigned int fault_flags = 0;
70181
70182 - /* For mlock, just skip the stack guard page. */
70183 - if (foll_flags & FOLL_MLOCK) {
70184 - if (stack_guard_page(vma, start))
70185 - goto next_page;
70186 - }
70187 if (foll_flags & FOLL_WRITE)
70188 fault_flags |= FAULT_FLAG_WRITE;
70189 if (nonblocking)
70190 @@ -1800,7 +1796,7 @@ next_page:
70191 start += PAGE_SIZE;
70192 nr_pages--;
70193 } while (nr_pages && start < vma->vm_end);
70194 - } while (nr_pages);
70195 + }
70196 return i;
70197 }
70198 EXPORT_SYMBOL(__get_user_pages);
70199 @@ -2007,6 +2003,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
70200 page_add_file_rmap(page);
70201 set_pte_at(mm, addr, pte, mk_pte(page, prot));
70202
70203 +#ifdef CONFIG_PAX_SEGMEXEC
70204 + pax_mirror_file_pte(vma, addr, page, ptl);
70205 +#endif
70206 +
70207 retval = 0;
70208 pte_unmap_unlock(pte, ptl);
70209 return retval;
70210 @@ -2041,10 +2041,22 @@ out:
70211 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
70212 struct page *page)
70213 {
70214 +
70215 +#ifdef CONFIG_PAX_SEGMEXEC
70216 + struct vm_area_struct *vma_m;
70217 +#endif
70218 +
70219 if (addr < vma->vm_start || addr >= vma->vm_end)
70220 return -EFAULT;
70221 if (!page_count(page))
70222 return -EINVAL;
70223 +
70224 +#ifdef CONFIG_PAX_SEGMEXEC
70225 + vma_m = pax_find_mirror_vma(vma);
70226 + if (vma_m)
70227 + vma_m->vm_flags |= VM_INSERTPAGE;
70228 +#endif
70229 +
70230 vma->vm_flags |= VM_INSERTPAGE;
70231 return insert_page(vma, addr, page, vma->vm_page_prot);
70232 }
70233 @@ -2130,6 +2142,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
70234 unsigned long pfn)
70235 {
70236 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
70237 + BUG_ON(vma->vm_mirror);
70238
70239 if (addr < vma->vm_start || addr >= vma->vm_end)
70240 return -EFAULT;
70241 @@ -2445,6 +2458,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
70242 copy_user_highpage(dst, src, va, vma);
70243 }
70244
70245 +#ifdef CONFIG_PAX_SEGMEXEC
70246 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
70247 +{
70248 + struct mm_struct *mm = vma->vm_mm;
70249 + spinlock_t *ptl;
70250 + pte_t *pte, entry;
70251 +
70252 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
70253 + entry = *pte;
70254 + if (!pte_present(entry)) {
70255 + if (!pte_none(entry)) {
70256 + BUG_ON(pte_file(entry));
70257 + free_swap_and_cache(pte_to_swp_entry(entry));
70258 + pte_clear_not_present_full(mm, address, pte, 0);
70259 + }
70260 + } else {
70261 + struct page *page;
70262 +
70263 + flush_cache_page(vma, address, pte_pfn(entry));
70264 + entry = ptep_clear_flush(vma, address, pte);
70265 + BUG_ON(pte_dirty(entry));
70266 + page = vm_normal_page(vma, address, entry);
70267 + if (page) {
70268 + update_hiwater_rss(mm);
70269 + if (PageAnon(page))
70270 + dec_mm_counter_fast(mm, MM_ANONPAGES);
70271 + else
70272 + dec_mm_counter_fast(mm, MM_FILEPAGES);
70273 + page_remove_rmap(page);
70274 + page_cache_release(page);
70275 + }
70276 + }
70277 + pte_unmap_unlock(pte, ptl);
70278 +}
70279 +
70280 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
70281 + *
70282 + * the ptl of the lower mapped page is held on entry and is not released on exit
70283 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
70284 + */
70285 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70286 +{
70287 + struct mm_struct *mm = vma->vm_mm;
70288 + unsigned long address_m;
70289 + spinlock_t *ptl_m;
70290 + struct vm_area_struct *vma_m;
70291 + pmd_t *pmd_m;
70292 + pte_t *pte_m, entry_m;
70293 +
70294 + BUG_ON(!page_m || !PageAnon(page_m));
70295 +
70296 + vma_m = pax_find_mirror_vma(vma);
70297 + if (!vma_m)
70298 + return;
70299 +
70300 + BUG_ON(!PageLocked(page_m));
70301 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70302 + address_m = address + SEGMEXEC_TASK_SIZE;
70303 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70304 + pte_m = pte_offset_map(pmd_m, address_m);
70305 + ptl_m = pte_lockptr(mm, pmd_m);
70306 + if (ptl != ptl_m) {
70307 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70308 + if (!pte_none(*pte_m))
70309 + goto out;
70310 + }
70311 +
70312 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70313 + page_cache_get(page_m);
70314 + page_add_anon_rmap(page_m, vma_m, address_m);
70315 + inc_mm_counter_fast(mm, MM_ANONPAGES);
70316 + set_pte_at(mm, address_m, pte_m, entry_m);
70317 + update_mmu_cache(vma_m, address_m, entry_m);
70318 +out:
70319 + if (ptl != ptl_m)
70320 + spin_unlock(ptl_m);
70321 + pte_unmap(pte_m);
70322 + unlock_page(page_m);
70323 +}
70324 +
70325 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70326 +{
70327 + struct mm_struct *mm = vma->vm_mm;
70328 + unsigned long address_m;
70329 + spinlock_t *ptl_m;
70330 + struct vm_area_struct *vma_m;
70331 + pmd_t *pmd_m;
70332 + pte_t *pte_m, entry_m;
70333 +
70334 + BUG_ON(!page_m || PageAnon(page_m));
70335 +
70336 + vma_m = pax_find_mirror_vma(vma);
70337 + if (!vma_m)
70338 + return;
70339 +
70340 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70341 + address_m = address + SEGMEXEC_TASK_SIZE;
70342 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70343 + pte_m = pte_offset_map(pmd_m, address_m);
70344 + ptl_m = pte_lockptr(mm, pmd_m);
70345 + if (ptl != ptl_m) {
70346 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70347 + if (!pte_none(*pte_m))
70348 + goto out;
70349 + }
70350 +
70351 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70352 + page_cache_get(page_m);
70353 + page_add_file_rmap(page_m);
70354 + inc_mm_counter_fast(mm, MM_FILEPAGES);
70355 + set_pte_at(mm, address_m, pte_m, entry_m);
70356 + update_mmu_cache(vma_m, address_m, entry_m);
70357 +out:
70358 + if (ptl != ptl_m)
70359 + spin_unlock(ptl_m);
70360 + pte_unmap(pte_m);
70361 +}
70362 +
70363 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
70364 +{
70365 + struct mm_struct *mm = vma->vm_mm;
70366 + unsigned long address_m;
70367 + spinlock_t *ptl_m;
70368 + struct vm_area_struct *vma_m;
70369 + pmd_t *pmd_m;
70370 + pte_t *pte_m, entry_m;
70371 +
70372 + vma_m = pax_find_mirror_vma(vma);
70373 + if (!vma_m)
70374 + return;
70375 +
70376 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70377 + address_m = address + SEGMEXEC_TASK_SIZE;
70378 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70379 + pte_m = pte_offset_map(pmd_m, address_m);
70380 + ptl_m = pte_lockptr(mm, pmd_m);
70381 + if (ptl != ptl_m) {
70382 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70383 + if (!pte_none(*pte_m))
70384 + goto out;
70385 + }
70386 +
70387 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
70388 + set_pte_at(mm, address_m, pte_m, entry_m);
70389 +out:
70390 + if (ptl != ptl_m)
70391 + spin_unlock(ptl_m);
70392 + pte_unmap(pte_m);
70393 +}
70394 +
70395 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
70396 +{
70397 + struct page *page_m;
70398 + pte_t entry;
70399 +
70400 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
70401 + goto out;
70402 +
70403 + entry = *pte;
70404 + page_m = vm_normal_page(vma, address, entry);
70405 + if (!page_m)
70406 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
70407 + else if (PageAnon(page_m)) {
70408 + if (pax_find_mirror_vma(vma)) {
70409 + pte_unmap_unlock(pte, ptl);
70410 + lock_page(page_m);
70411 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
70412 + if (pte_same(entry, *pte))
70413 + pax_mirror_anon_pte(vma, address, page_m, ptl);
70414 + else
70415 + unlock_page(page_m);
70416 + }
70417 + } else
70418 + pax_mirror_file_pte(vma, address, page_m, ptl);
70419 +
70420 +out:
70421 + pte_unmap_unlock(pte, ptl);
70422 +}
70423 +#endif
70424 +
70425 /*
70426 * This routine handles present pages, when users try to write
70427 * to a shared page. It is done by copying the page to a new address
70428 @@ -2656,6 +2849,12 @@ gotten:
70429 */
70430 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70431 if (likely(pte_same(*page_table, orig_pte))) {
70432 +
70433 +#ifdef CONFIG_PAX_SEGMEXEC
70434 + if (pax_find_mirror_vma(vma))
70435 + BUG_ON(!trylock_page(new_page));
70436 +#endif
70437 +
70438 if (old_page) {
70439 if (!PageAnon(old_page)) {
70440 dec_mm_counter_fast(mm, MM_FILEPAGES);
70441 @@ -2707,6 +2906,10 @@ gotten:
70442 page_remove_rmap(old_page);
70443 }
70444
70445 +#ifdef CONFIG_PAX_SEGMEXEC
70446 + pax_mirror_anon_pte(vma, address, new_page, ptl);
70447 +#endif
70448 +
70449 /* Free the old page.. */
70450 new_page = old_page;
70451 ret |= VM_FAULT_WRITE;
70452 @@ -2986,6 +3189,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70453 swap_free(entry);
70454 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
70455 try_to_free_swap(page);
70456 +
70457 +#ifdef CONFIG_PAX_SEGMEXEC
70458 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
70459 +#endif
70460 +
70461 unlock_page(page);
70462 if (swapcache) {
70463 /*
70464 @@ -3009,6 +3217,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70465
70466 /* No need to invalidate - it was non-present before */
70467 update_mmu_cache(vma, address, page_table);
70468 +
70469 +#ifdef CONFIG_PAX_SEGMEXEC
70470 + pax_mirror_anon_pte(vma, address, page, ptl);
70471 +#endif
70472 +
70473 unlock:
70474 pte_unmap_unlock(page_table, ptl);
70475 out:
70476 @@ -3028,40 +3241,6 @@ out_release:
70477 }
70478
70479 /*
70480 - * This is like a special single-page "expand_{down|up}wards()",
70481 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
70482 - * doesn't hit another vma.
70483 - */
70484 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
70485 -{
70486 - address &= PAGE_MASK;
70487 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
70488 - struct vm_area_struct *prev = vma->vm_prev;
70489 -
70490 - /*
70491 - * Is there a mapping abutting this one below?
70492 - *
70493 - * That's only ok if it's the same stack mapping
70494 - * that has gotten split..
70495 - */
70496 - if (prev && prev->vm_end == address)
70497 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
70498 -
70499 - expand_downwards(vma, address - PAGE_SIZE);
70500 - }
70501 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
70502 - struct vm_area_struct *next = vma->vm_next;
70503 -
70504 - /* As VM_GROWSDOWN but s/below/above/ */
70505 - if (next && next->vm_start == address + PAGE_SIZE)
70506 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
70507 -
70508 - expand_upwards(vma, address + PAGE_SIZE);
70509 - }
70510 - return 0;
70511 -}
70512 -
70513 -/*
70514 * We enter with non-exclusive mmap_sem (to exclude vma changes,
70515 * but allow concurrent faults), and pte mapped but not yet locked.
70516 * We return with mmap_sem still held, but pte unmapped and unlocked.
70517 @@ -3070,27 +3249,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70518 unsigned long address, pte_t *page_table, pmd_t *pmd,
70519 unsigned int flags)
70520 {
70521 - struct page *page;
70522 + struct page *page = NULL;
70523 spinlock_t *ptl;
70524 pte_t entry;
70525
70526 - pte_unmap(page_table);
70527 -
70528 - /* Check if we need to add a guard page to the stack */
70529 - if (check_stack_guard_page(vma, address) < 0)
70530 - return VM_FAULT_SIGBUS;
70531 -
70532 - /* Use the zero-page for reads */
70533 if (!(flags & FAULT_FLAG_WRITE)) {
70534 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
70535 vma->vm_page_prot));
70536 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70537 + ptl = pte_lockptr(mm, pmd);
70538 + spin_lock(ptl);
70539 if (!pte_none(*page_table))
70540 goto unlock;
70541 goto setpte;
70542 }
70543
70544 /* Allocate our own private page. */
70545 + pte_unmap(page_table);
70546 +
70547 if (unlikely(anon_vma_prepare(vma)))
70548 goto oom;
70549 page = alloc_zeroed_user_highpage_movable(vma, address);
70550 @@ -3109,6 +3284,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70551 if (!pte_none(*page_table))
70552 goto release;
70553
70554 +#ifdef CONFIG_PAX_SEGMEXEC
70555 + if (pax_find_mirror_vma(vma))
70556 + BUG_ON(!trylock_page(page));
70557 +#endif
70558 +
70559 inc_mm_counter_fast(mm, MM_ANONPAGES);
70560 page_add_new_anon_rmap(page, vma, address);
70561 setpte:
70562 @@ -3116,6 +3296,12 @@ setpte:
70563
70564 /* No need to invalidate - it was non-present before */
70565 update_mmu_cache(vma, address, page_table);
70566 +
70567 +#ifdef CONFIG_PAX_SEGMEXEC
70568 + if (page)
70569 + pax_mirror_anon_pte(vma, address, page, ptl);
70570 +#endif
70571 +
70572 unlock:
70573 pte_unmap_unlock(page_table, ptl);
70574 return 0;
70575 @@ -3259,6 +3445,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70576 */
70577 /* Only go through if we didn't race with anybody else... */
70578 if (likely(pte_same(*page_table, orig_pte))) {
70579 +
70580 +#ifdef CONFIG_PAX_SEGMEXEC
70581 + if (anon && pax_find_mirror_vma(vma))
70582 + BUG_ON(!trylock_page(page));
70583 +#endif
70584 +
70585 flush_icache_page(vma, page);
70586 entry = mk_pte(page, vma->vm_page_prot);
70587 if (flags & FAULT_FLAG_WRITE)
70588 @@ -3278,6 +3470,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70589
70590 /* no need to invalidate: a not-present page won't be cached */
70591 update_mmu_cache(vma, address, page_table);
70592 +
70593 +#ifdef CONFIG_PAX_SEGMEXEC
70594 + if (anon)
70595 + pax_mirror_anon_pte(vma, address, page, ptl);
70596 + else
70597 + pax_mirror_file_pte(vma, address, page, ptl);
70598 +#endif
70599 +
70600 } else {
70601 if (cow_page)
70602 mem_cgroup_uncharge_page(cow_page);
70603 @@ -3431,6 +3631,12 @@ int handle_pte_fault(struct mm_struct *mm,
70604 if (flags & FAULT_FLAG_WRITE)
70605 flush_tlb_fix_spurious_fault(vma, address);
70606 }
70607 +
70608 +#ifdef CONFIG_PAX_SEGMEXEC
70609 + pax_mirror_pte(vma, address, pte, pmd, ptl);
70610 + return 0;
70611 +#endif
70612 +
70613 unlock:
70614 pte_unmap_unlock(pte, ptl);
70615 return 0;
70616 @@ -3447,6 +3653,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70617 pmd_t *pmd;
70618 pte_t *pte;
70619
70620 +#ifdef CONFIG_PAX_SEGMEXEC
70621 + struct vm_area_struct *vma_m;
70622 +#endif
70623 +
70624 __set_current_state(TASK_RUNNING);
70625
70626 count_vm_event(PGFAULT);
70627 @@ -3458,6 +3668,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70628 if (unlikely(is_vm_hugetlb_page(vma)))
70629 return hugetlb_fault(mm, vma, address, flags);
70630
70631 +#ifdef CONFIG_PAX_SEGMEXEC
70632 + vma_m = pax_find_mirror_vma(vma);
70633 + if (vma_m) {
70634 + unsigned long address_m;
70635 + pgd_t *pgd_m;
70636 + pud_t *pud_m;
70637 + pmd_t *pmd_m;
70638 +
70639 + if (vma->vm_start > vma_m->vm_start) {
70640 + address_m = address;
70641 + address -= SEGMEXEC_TASK_SIZE;
70642 + vma = vma_m;
70643 + } else
70644 + address_m = address + SEGMEXEC_TASK_SIZE;
70645 +
70646 + pgd_m = pgd_offset(mm, address_m);
70647 + pud_m = pud_alloc(mm, pgd_m, address_m);
70648 + if (!pud_m)
70649 + return VM_FAULT_OOM;
70650 + pmd_m = pmd_alloc(mm, pud_m, address_m);
70651 + if (!pmd_m)
70652 + return VM_FAULT_OOM;
70653 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
70654 + return VM_FAULT_OOM;
70655 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
70656 + }
70657 +#endif
70658 +
70659 pgd = pgd_offset(mm, address);
70660 pud = pud_alloc(mm, pgd, address);
70661 if (!pud)
70662 @@ -3487,7 +3725,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70663 * run pte_offset_map on the pmd, if an huge pmd could
70664 * materialize from under us from a different thread.
70665 */
70666 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
70667 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
70668 return VM_FAULT_OOM;
70669 /* if an huge pmd materialized from under us just retry later */
70670 if (unlikely(pmd_trans_huge(*pmd)))
70671 @@ -3591,7 +3829,7 @@ static int __init gate_vma_init(void)
70672 gate_vma.vm_start = FIXADDR_USER_START;
70673 gate_vma.vm_end = FIXADDR_USER_END;
70674 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
70675 - gate_vma.vm_page_prot = __P101;
70676 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
70677 /*
70678 * Make sure the vDSO gets into every core dump.
70679 * Dumping its contents makes post-mortem fully interpretable later
70680 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
70681 index 9c51f9f..a9416cf 100644
70682 --- a/mm/mempolicy.c
70683 +++ b/mm/mempolicy.c
70684 @@ -639,6 +639,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
70685 unsigned long vmstart;
70686 unsigned long vmend;
70687
70688 +#ifdef CONFIG_PAX_SEGMEXEC
70689 + struct vm_area_struct *vma_m;
70690 +#endif
70691 +
70692 vma = find_vma_prev(mm, start, &prev);
70693 if (!vma || vma->vm_start > start)
70694 return -EFAULT;
70695 @@ -669,6 +673,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
70696 err = policy_vma(vma, new_pol);
70697 if (err)
70698 goto out;
70699 +
70700 +#ifdef CONFIG_PAX_SEGMEXEC
70701 + vma_m = pax_find_mirror_vma(vma);
70702 + if (vma_m) {
70703 + err = policy_vma(vma_m, new_pol);
70704 + if (err)
70705 + goto out;
70706 + }
70707 +#endif
70708 +
70709 }
70710
70711 out:
70712 @@ -1102,6 +1116,17 @@ static long do_mbind(unsigned long start, unsigned long len,
70713
70714 if (end < start)
70715 return -EINVAL;
70716 +
70717 +#ifdef CONFIG_PAX_SEGMEXEC
70718 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
70719 + if (end > SEGMEXEC_TASK_SIZE)
70720 + return -EINVAL;
70721 + } else
70722 +#endif
70723 +
70724 + if (end > TASK_SIZE)
70725 + return -EINVAL;
70726 +
70727 if (end == start)
70728 return 0;
70729
70730 @@ -1320,6 +1345,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70731 if (!mm)
70732 goto out;
70733
70734 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70735 + if (mm != current->mm &&
70736 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
70737 + err = -EPERM;
70738 + goto out;
70739 + }
70740 +#endif
70741 +
70742 /*
70743 * Check if this process has the right to modify the specified
70744 * process. The right exists if the process has administrative
70745 @@ -1329,8 +1362,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
70746 rcu_read_lock();
70747 tcred = __task_cred(task);
70748 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
70749 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
70750 - !capable(CAP_SYS_NICE)) {
70751 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
70752 rcu_read_unlock();
70753 err = -EPERM;
70754 goto out;
70755 diff --git a/mm/migrate.c b/mm/migrate.c
70756 index 14d0a6a..81ffe69 100644
70757 --- a/mm/migrate.c
70758 +++ b/mm/migrate.c
70759 @@ -1124,6 +1124,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
70760 unsigned long chunk_start;
70761 int err;
70762
70763 + pax_track_stack();
70764 +
70765 task_nodes = cpuset_mems_allowed(task);
70766
70767 err = -ENOMEM;
70768 @@ -1308,6 +1310,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
70769 if (!mm)
70770 return -EINVAL;
70771
70772 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
70773 + if (mm != current->mm &&
70774 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
70775 + err = -EPERM;
70776 + goto out;
70777 + }
70778 +#endif
70779 +
70780 /*
70781 * Check if this process has the right to modify the specified
70782 * process. The right exists if the process has administrative
70783 @@ -1317,8 +1327,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
70784 rcu_read_lock();
70785 tcred = __task_cred(task);
70786 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
70787 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
70788 - !capable(CAP_SYS_NICE)) {
70789 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
70790 rcu_read_unlock();
70791 err = -EPERM;
70792 goto out;
70793 diff --git a/mm/mlock.c b/mm/mlock.c
70794 index 048260c..57f4a4e 100644
70795 --- a/mm/mlock.c
70796 +++ b/mm/mlock.c
70797 @@ -13,6 +13,7 @@
70798 #include <linux/pagemap.h>
70799 #include <linux/mempolicy.h>
70800 #include <linux/syscalls.h>
70801 +#include <linux/security.h>
70802 #include <linux/sched.h>
70803 #include <linux/module.h>
70804 #include <linux/rmap.h>
70805 @@ -377,6 +378,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
70806 return -EINVAL;
70807 if (end == start)
70808 return 0;
70809 + if (end > TASK_SIZE)
70810 + return -EINVAL;
70811 +
70812 vma = find_vma_prev(current->mm, start, &prev);
70813 if (!vma || vma->vm_start > start)
70814 return -ENOMEM;
70815 @@ -387,6 +391,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
70816 for (nstart = start ; ; ) {
70817 vm_flags_t newflags;
70818
70819 +#ifdef CONFIG_PAX_SEGMEXEC
70820 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70821 + break;
70822 +#endif
70823 +
70824 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
70825
70826 newflags = vma->vm_flags | VM_LOCKED;
70827 @@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
70828 lock_limit >>= PAGE_SHIFT;
70829
70830 /* check against resource limits */
70831 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
70832 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
70833 error = do_mlock(start, len, 1);
70834 up_write(&current->mm->mmap_sem);
70835 @@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
70836 static int do_mlockall(int flags)
70837 {
70838 struct vm_area_struct * vma, * prev = NULL;
70839 - unsigned int def_flags = 0;
70840
70841 if (flags & MCL_FUTURE)
70842 - def_flags = VM_LOCKED;
70843 - current->mm->def_flags = def_flags;
70844 + current->mm->def_flags |= VM_LOCKED;
70845 + else
70846 + current->mm->def_flags &= ~VM_LOCKED;
70847 if (flags == MCL_FUTURE)
70848 goto out;
70849
70850 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
70851 vm_flags_t newflags;
70852
70853 +#ifdef CONFIG_PAX_SEGMEXEC
70854 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
70855 + break;
70856 +#endif
70857 +
70858 + BUG_ON(vma->vm_end > TASK_SIZE);
70859 newflags = vma->vm_flags | VM_LOCKED;
70860 if (!(flags & MCL_CURRENT))
70861 newflags &= ~VM_LOCKED;
70862 @@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
70863 lock_limit >>= PAGE_SHIFT;
70864
70865 ret = -ENOMEM;
70866 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
70867 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
70868 capable(CAP_IPC_LOCK))
70869 ret = do_mlockall(flags);
70870 diff --git a/mm/mmap.c b/mm/mmap.c
70871 index a65efd4..17d61ff 100644
70872 --- a/mm/mmap.c
70873 +++ b/mm/mmap.c
70874 @@ -46,6 +46,16 @@
70875 #define arch_rebalance_pgtables(addr, len) (addr)
70876 #endif
70877
70878 +static inline void verify_mm_writelocked(struct mm_struct *mm)
70879 +{
70880 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
70881 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
70882 + up_read(&mm->mmap_sem);
70883 + BUG();
70884 + }
70885 +#endif
70886 +}
70887 +
70888 static void unmap_region(struct mm_struct *mm,
70889 struct vm_area_struct *vma, struct vm_area_struct *prev,
70890 unsigned long start, unsigned long end);
70891 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
70892 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
70893 *
70894 */
70895 -pgprot_t protection_map[16] = {
70896 +pgprot_t protection_map[16] __read_only = {
70897 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
70898 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
70899 };
70900
70901 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
70902 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
70903 {
70904 - return __pgprot(pgprot_val(protection_map[vm_flags &
70905 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
70906 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
70907 pgprot_val(arch_vm_get_page_prot(vm_flags)));
70908 +
70909 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70910 + if (!(__supported_pte_mask & _PAGE_NX) &&
70911 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
70912 + (vm_flags & (VM_READ | VM_WRITE)))
70913 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
70914 +#endif
70915 +
70916 + return prot;
70917 }
70918 EXPORT_SYMBOL(vm_get_page_prot);
70919
70920 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
70921 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
70922 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
70923 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
70924 /*
70925 * Make sure vm_committed_as in one cacheline and not cacheline shared with
70926 * other variables. It can be updated by several CPUs frequently.
70927 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
70928 struct vm_area_struct *next = vma->vm_next;
70929
70930 might_sleep();
70931 + BUG_ON(vma->vm_mirror);
70932 if (vma->vm_ops && vma->vm_ops->close)
70933 vma->vm_ops->close(vma);
70934 if (vma->vm_file) {
70935 @@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
70936 * not page aligned -Ram Gupta
70937 */
70938 rlim = rlimit(RLIMIT_DATA);
70939 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
70940 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
70941 (mm->end_data - mm->start_data) > rlim)
70942 goto out;
70943 @@ -689,6 +711,12 @@ static int
70944 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
70945 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70946 {
70947 +
70948 +#ifdef CONFIG_PAX_SEGMEXEC
70949 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
70950 + return 0;
70951 +#endif
70952 +
70953 if (is_mergeable_vma(vma, file, vm_flags) &&
70954 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70955 if (vma->vm_pgoff == vm_pgoff)
70956 @@ -708,6 +736,12 @@ static int
70957 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70958 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
70959 {
70960 +
70961 +#ifdef CONFIG_PAX_SEGMEXEC
70962 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
70963 + return 0;
70964 +#endif
70965 +
70966 if (is_mergeable_vma(vma, file, vm_flags) &&
70967 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
70968 pgoff_t vm_pglen;
70969 @@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
70970 struct vm_area_struct *vma_merge(struct mm_struct *mm,
70971 struct vm_area_struct *prev, unsigned long addr,
70972 unsigned long end, unsigned long vm_flags,
70973 - struct anon_vma *anon_vma, struct file *file,
70974 + struct anon_vma *anon_vma, struct file *file,
70975 pgoff_t pgoff, struct mempolicy *policy)
70976 {
70977 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
70978 struct vm_area_struct *area, *next;
70979 int err;
70980
70981 +#ifdef CONFIG_PAX_SEGMEXEC
70982 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
70983 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
70984 +
70985 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
70986 +#endif
70987 +
70988 /*
70989 * We later require that vma->vm_flags == vm_flags,
70990 * so this tests vma->vm_flags & VM_SPECIAL, too.
70991 @@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
70992 if (next && next->vm_end == end) /* cases 6, 7, 8 */
70993 next = next->vm_next;
70994
70995 +#ifdef CONFIG_PAX_SEGMEXEC
70996 + if (prev)
70997 + prev_m = pax_find_mirror_vma(prev);
70998 + if (area)
70999 + area_m = pax_find_mirror_vma(area);
71000 + if (next)
71001 + next_m = pax_find_mirror_vma(next);
71002 +#endif
71003 +
71004 /*
71005 * Can it merge with the predecessor?
71006 */
71007 @@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71008 /* cases 1, 6 */
71009 err = vma_adjust(prev, prev->vm_start,
71010 next->vm_end, prev->vm_pgoff, NULL);
71011 - } else /* cases 2, 5, 7 */
71012 +
71013 +#ifdef CONFIG_PAX_SEGMEXEC
71014 + if (!err && prev_m)
71015 + err = vma_adjust(prev_m, prev_m->vm_start,
71016 + next_m->vm_end, prev_m->vm_pgoff, NULL);
71017 +#endif
71018 +
71019 + } else { /* cases 2, 5, 7 */
71020 err = vma_adjust(prev, prev->vm_start,
71021 end, prev->vm_pgoff, NULL);
71022 +
71023 +#ifdef CONFIG_PAX_SEGMEXEC
71024 + if (!err && prev_m)
71025 + err = vma_adjust(prev_m, prev_m->vm_start,
71026 + end_m, prev_m->vm_pgoff, NULL);
71027 +#endif
71028 +
71029 + }
71030 if (err)
71031 return NULL;
71032 khugepaged_enter_vma_merge(prev);
71033 @@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71034 mpol_equal(policy, vma_policy(next)) &&
71035 can_vma_merge_before(next, vm_flags,
71036 anon_vma, file, pgoff+pglen)) {
71037 - if (prev && addr < prev->vm_end) /* case 4 */
71038 + if (prev && addr < prev->vm_end) { /* case 4 */
71039 err = vma_adjust(prev, prev->vm_start,
71040 addr, prev->vm_pgoff, NULL);
71041 - else /* cases 3, 8 */
71042 +
71043 +#ifdef CONFIG_PAX_SEGMEXEC
71044 + if (!err && prev_m)
71045 + err = vma_adjust(prev_m, prev_m->vm_start,
71046 + addr_m, prev_m->vm_pgoff, NULL);
71047 +#endif
71048 +
71049 + } else { /* cases 3, 8 */
71050 err = vma_adjust(area, addr, next->vm_end,
71051 next->vm_pgoff - pglen, NULL);
71052 +
71053 +#ifdef CONFIG_PAX_SEGMEXEC
71054 + if (!err && area_m)
71055 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
71056 + next_m->vm_pgoff - pglen, NULL);
71057 +#endif
71058 +
71059 + }
71060 if (err)
71061 return NULL;
71062 khugepaged_enter_vma_merge(area);
71063 @@ -921,14 +1001,11 @@ none:
71064 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
71065 struct file *file, long pages)
71066 {
71067 - const unsigned long stack_flags
71068 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
71069 -
71070 if (file) {
71071 mm->shared_vm += pages;
71072 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
71073 mm->exec_vm += pages;
71074 - } else if (flags & stack_flags)
71075 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
71076 mm->stack_vm += pages;
71077 if (flags & (VM_RESERVED|VM_IO))
71078 mm->reserved_vm += pages;
71079 @@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71080 * (the exception is when the underlying filesystem is noexec
71081 * mounted, in which case we dont add PROT_EXEC.)
71082 */
71083 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
71084 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
71085 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
71086 prot |= PROT_EXEC;
71087
71088 @@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71089 /* Obtain the address to map to. we verify (or select) it and ensure
71090 * that it represents a valid section of the address space.
71091 */
71092 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
71093 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
71094 if (addr & ~PAGE_MASK)
71095 return addr;
71096
71097 @@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71098 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
71099 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
71100
71101 +#ifdef CONFIG_PAX_MPROTECT
71102 + if (mm->pax_flags & MF_PAX_MPROTECT) {
71103 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
71104 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
71105 + gr_log_rwxmmap(file);
71106 +
71107 +#ifdef CONFIG_PAX_EMUPLT
71108 + vm_flags &= ~VM_EXEC;
71109 +#else
71110 + return -EPERM;
71111 +#endif
71112 +
71113 + }
71114 +
71115 + if (!(vm_flags & VM_EXEC))
71116 + vm_flags &= ~VM_MAYEXEC;
71117 +#else
71118 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
71119 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
71120 +#endif
71121 + else
71122 + vm_flags &= ~VM_MAYWRITE;
71123 + }
71124 +#endif
71125 +
71126 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71127 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
71128 + vm_flags &= ~VM_PAGEEXEC;
71129 +#endif
71130 +
71131 if (flags & MAP_LOCKED)
71132 if (!can_do_mlock())
71133 return -EPERM;
71134 @@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71135 locked += mm->locked_vm;
71136 lock_limit = rlimit(RLIMIT_MEMLOCK);
71137 lock_limit >>= PAGE_SHIFT;
71138 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71139 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
71140 return -EAGAIN;
71141 }
71142 @@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71143 if (error)
71144 return error;
71145
71146 + if (!gr_acl_handle_mmap(file, prot))
71147 + return -EACCES;
71148 +
71149 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
71150 }
71151 EXPORT_SYMBOL(do_mmap_pgoff);
71152 @@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
71153 vm_flags_t vm_flags = vma->vm_flags;
71154
71155 /* If it was private or non-writable, the write bit is already clear */
71156 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
71157 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
71158 return 0;
71159
71160 /* The backer wishes to know when pages are first written to? */
71161 @@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
71162 unsigned long charged = 0;
71163 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
71164
71165 +#ifdef CONFIG_PAX_SEGMEXEC
71166 + struct vm_area_struct *vma_m = NULL;
71167 +#endif
71168 +
71169 + /*
71170 + * mm->mmap_sem is required to protect against another thread
71171 + * changing the mappings in case we sleep.
71172 + */
71173 + verify_mm_writelocked(mm);
71174 +
71175 /* Clear old maps */
71176 error = -ENOMEM;
71177 -munmap_back:
71178 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71179 if (vma && vma->vm_start < addr + len) {
71180 if (do_munmap(mm, addr, len))
71181 return -ENOMEM;
71182 - goto munmap_back;
71183 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71184 + BUG_ON(vma && vma->vm_start < addr + len);
71185 }
71186
71187 /* Check against address space limit. */
71188 @@ -1258,6 +1379,16 @@ munmap_back:
71189 goto unacct_error;
71190 }
71191
71192 +#ifdef CONFIG_PAX_SEGMEXEC
71193 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
71194 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71195 + if (!vma_m) {
71196 + error = -ENOMEM;
71197 + goto free_vma;
71198 + }
71199 + }
71200 +#endif
71201 +
71202 vma->vm_mm = mm;
71203 vma->vm_start = addr;
71204 vma->vm_end = addr + len;
71205 @@ -1281,6 +1412,19 @@ munmap_back:
71206 error = file->f_op->mmap(file, vma);
71207 if (error)
71208 goto unmap_and_free_vma;
71209 +
71210 +#ifdef CONFIG_PAX_SEGMEXEC
71211 + if (vma_m && (vm_flags & VM_EXECUTABLE))
71212 + added_exe_file_vma(mm);
71213 +#endif
71214 +
71215 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71216 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
71217 + vma->vm_flags |= VM_PAGEEXEC;
71218 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71219 + }
71220 +#endif
71221 +
71222 if (vm_flags & VM_EXECUTABLE)
71223 added_exe_file_vma(mm);
71224
71225 @@ -1316,6 +1460,11 @@ munmap_back:
71226 vma_link(mm, vma, prev, rb_link, rb_parent);
71227 file = vma->vm_file;
71228
71229 +#ifdef CONFIG_PAX_SEGMEXEC
71230 + if (vma_m)
71231 + BUG_ON(pax_mirror_vma(vma_m, vma));
71232 +#endif
71233 +
71234 /* Once vma denies write, undo our temporary denial count */
71235 if (correct_wcount)
71236 atomic_inc(&inode->i_writecount);
71237 @@ -1324,6 +1473,7 @@ out:
71238
71239 mm->total_vm += len >> PAGE_SHIFT;
71240 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
71241 + track_exec_limit(mm, addr, addr + len, vm_flags);
71242 if (vm_flags & VM_LOCKED) {
71243 if (!mlock_vma_pages_range(vma, addr, addr + len))
71244 mm->locked_vm += (len >> PAGE_SHIFT);
71245 @@ -1341,6 +1491,12 @@ unmap_and_free_vma:
71246 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
71247 charged = 0;
71248 free_vma:
71249 +
71250 +#ifdef CONFIG_PAX_SEGMEXEC
71251 + if (vma_m)
71252 + kmem_cache_free(vm_area_cachep, vma_m);
71253 +#endif
71254 +
71255 kmem_cache_free(vm_area_cachep, vma);
71256 unacct_error:
71257 if (charged)
71258 @@ -1348,6 +1504,44 @@ unacct_error:
71259 return error;
71260 }
71261
71262 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
71263 +{
71264 + if (!vma) {
71265 +#ifdef CONFIG_STACK_GROWSUP
71266 + if (addr > sysctl_heap_stack_gap)
71267 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
71268 + else
71269 + vma = find_vma(current->mm, 0);
71270 + if (vma && (vma->vm_flags & VM_GROWSUP))
71271 + return false;
71272 +#endif
71273 + return true;
71274 + }
71275 +
71276 + if (addr + len > vma->vm_start)
71277 + return false;
71278 +
71279 + if (vma->vm_flags & VM_GROWSDOWN)
71280 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
71281 +#ifdef CONFIG_STACK_GROWSUP
71282 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
71283 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
71284 +#endif
71285 +
71286 + return true;
71287 +}
71288 +
71289 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
71290 +{
71291 + if (vma->vm_start < len)
71292 + return -ENOMEM;
71293 + if (!(vma->vm_flags & VM_GROWSDOWN))
71294 + return vma->vm_start - len;
71295 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
71296 + return vma->vm_start - len - sysctl_heap_stack_gap;
71297 + return -ENOMEM;
71298 +}
71299 +
71300 /* Get an address range which is currently unmapped.
71301 * For shmat() with addr=0.
71302 *
71303 @@ -1374,18 +1568,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
71304 if (flags & MAP_FIXED)
71305 return addr;
71306
71307 +#ifdef CONFIG_PAX_RANDMMAP
71308 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71309 +#endif
71310 +
71311 if (addr) {
71312 addr = PAGE_ALIGN(addr);
71313 - vma = find_vma(mm, addr);
71314 - if (TASK_SIZE - len >= addr &&
71315 - (!vma || addr + len <= vma->vm_start))
71316 - return addr;
71317 + if (TASK_SIZE - len >= addr) {
71318 + vma = find_vma(mm, addr);
71319 + if (check_heap_stack_gap(vma, addr, len))
71320 + return addr;
71321 + }
71322 }
71323 if (len > mm->cached_hole_size) {
71324 - start_addr = addr = mm->free_area_cache;
71325 + start_addr = addr = mm->free_area_cache;
71326 } else {
71327 - start_addr = addr = TASK_UNMAPPED_BASE;
71328 - mm->cached_hole_size = 0;
71329 + start_addr = addr = mm->mmap_base;
71330 + mm->cached_hole_size = 0;
71331 }
71332
71333 full_search:
71334 @@ -1396,34 +1595,40 @@ full_search:
71335 * Start a new search - just in case we missed
71336 * some holes.
71337 */
71338 - if (start_addr != TASK_UNMAPPED_BASE) {
71339 - addr = TASK_UNMAPPED_BASE;
71340 - start_addr = addr;
71341 + if (start_addr != mm->mmap_base) {
71342 + start_addr = addr = mm->mmap_base;
71343 mm->cached_hole_size = 0;
71344 goto full_search;
71345 }
71346 return -ENOMEM;
71347 }
71348 - if (!vma || addr + len <= vma->vm_start) {
71349 - /*
71350 - * Remember the place where we stopped the search:
71351 - */
71352 - mm->free_area_cache = addr + len;
71353 - return addr;
71354 - }
71355 + if (check_heap_stack_gap(vma, addr, len))
71356 + break;
71357 if (addr + mm->cached_hole_size < vma->vm_start)
71358 mm->cached_hole_size = vma->vm_start - addr;
71359 addr = vma->vm_end;
71360 }
71361 +
71362 + /*
71363 + * Remember the place where we stopped the search:
71364 + */
71365 + mm->free_area_cache = addr + len;
71366 + return addr;
71367 }
71368 #endif
71369
71370 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
71371 {
71372 +
71373 +#ifdef CONFIG_PAX_SEGMEXEC
71374 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71375 + return;
71376 +#endif
71377 +
71378 /*
71379 * Is this a new hole at the lowest possible address?
71380 */
71381 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
71382 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
71383 mm->free_area_cache = addr;
71384 mm->cached_hole_size = ~0UL;
71385 }
71386 @@ -1441,7 +1646,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71387 {
71388 struct vm_area_struct *vma;
71389 struct mm_struct *mm = current->mm;
71390 - unsigned long addr = addr0;
71391 + unsigned long base = mm->mmap_base, addr = addr0;
71392
71393 /* requested length too big for entire address space */
71394 if (len > TASK_SIZE)
71395 @@ -1450,13 +1655,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71396 if (flags & MAP_FIXED)
71397 return addr;
71398
71399 +#ifdef CONFIG_PAX_RANDMMAP
71400 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71401 +#endif
71402 +
71403 /* requesting a specific address */
71404 if (addr) {
71405 addr = PAGE_ALIGN(addr);
71406 - vma = find_vma(mm, addr);
71407 - if (TASK_SIZE - len >= addr &&
71408 - (!vma || addr + len <= vma->vm_start))
71409 - return addr;
71410 + if (TASK_SIZE - len >= addr) {
71411 + vma = find_vma(mm, addr);
71412 + if (check_heap_stack_gap(vma, addr, len))
71413 + return addr;
71414 + }
71415 }
71416
71417 /* check if free_area_cache is useful for us */
71418 @@ -1471,7 +1681,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71419 /* make sure it can fit in the remaining address space */
71420 if (addr > len) {
71421 vma = find_vma(mm, addr-len);
71422 - if (!vma || addr <= vma->vm_start)
71423 + if (check_heap_stack_gap(vma, addr - len, len))
71424 /* remember the address as a hint for next time */
71425 return (mm->free_area_cache = addr-len);
71426 }
71427 @@ -1488,7 +1698,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71428 * return with success:
71429 */
71430 vma = find_vma(mm, addr);
71431 - if (!vma || addr+len <= vma->vm_start)
71432 + if (check_heap_stack_gap(vma, addr, len))
71433 /* remember the address as a hint for next time */
71434 return (mm->free_area_cache = addr);
71435
71436 @@ -1497,8 +1707,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71437 mm->cached_hole_size = vma->vm_start - addr;
71438
71439 /* try just below the current vma->vm_start */
71440 - addr = vma->vm_start-len;
71441 - } while (len < vma->vm_start);
71442 + addr = skip_heap_stack_gap(vma, len);
71443 + } while (!IS_ERR_VALUE(addr));
71444
71445 bottomup:
71446 /*
71447 @@ -1507,13 +1717,21 @@ bottomup:
71448 * can happen with large stack limits and large mmap()
71449 * allocations.
71450 */
71451 + mm->mmap_base = TASK_UNMAPPED_BASE;
71452 +
71453 +#ifdef CONFIG_PAX_RANDMMAP
71454 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71455 + mm->mmap_base += mm->delta_mmap;
71456 +#endif
71457 +
71458 + mm->free_area_cache = mm->mmap_base;
71459 mm->cached_hole_size = ~0UL;
71460 - mm->free_area_cache = TASK_UNMAPPED_BASE;
71461 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
71462 /*
71463 * Restore the topdown base:
71464 */
71465 - mm->free_area_cache = mm->mmap_base;
71466 + mm->mmap_base = base;
71467 + mm->free_area_cache = base;
71468 mm->cached_hole_size = ~0UL;
71469
71470 return addr;
71471 @@ -1522,6 +1740,12 @@ bottomup:
71472
71473 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71474 {
71475 +
71476 +#ifdef CONFIG_PAX_SEGMEXEC
71477 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71478 + return;
71479 +#endif
71480 +
71481 /*
71482 * Is this a new hole at the highest possible address?
71483 */
71484 @@ -1529,8 +1753,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71485 mm->free_area_cache = addr;
71486
71487 /* dont allow allocations above current base */
71488 - if (mm->free_area_cache > mm->mmap_base)
71489 + if (mm->free_area_cache > mm->mmap_base) {
71490 mm->free_area_cache = mm->mmap_base;
71491 + mm->cached_hole_size = ~0UL;
71492 + }
71493 }
71494
71495 unsigned long
71496 @@ -1638,6 +1864,28 @@ out:
71497 return prev ? prev->vm_next : vma;
71498 }
71499
71500 +#ifdef CONFIG_PAX_SEGMEXEC
71501 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
71502 +{
71503 + struct vm_area_struct *vma_m;
71504 +
71505 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
71506 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
71507 + BUG_ON(vma->vm_mirror);
71508 + return NULL;
71509 + }
71510 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
71511 + vma_m = vma->vm_mirror;
71512 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
71513 + BUG_ON(vma->vm_file != vma_m->vm_file);
71514 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
71515 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
71516 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
71517 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
71518 + return vma_m;
71519 +}
71520 +#endif
71521 +
71522 /*
71523 * Verify that the stack growth is acceptable and
71524 * update accounting. This is shared with both the
71525 @@ -1654,6 +1902,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71526 return -ENOMEM;
71527
71528 /* Stack limit test */
71529 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
71530 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
71531 return -ENOMEM;
71532
71533 @@ -1664,6 +1913,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71534 locked = mm->locked_vm + grow;
71535 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
71536 limit >>= PAGE_SHIFT;
71537 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71538 if (locked > limit && !capable(CAP_IPC_LOCK))
71539 return -ENOMEM;
71540 }
71541 @@ -1694,37 +1944,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71542 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
71543 * vma is the last one with address > vma->vm_end. Have to extend vma.
71544 */
71545 +#ifndef CONFIG_IA64
71546 +static
71547 +#endif
71548 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71549 {
71550 int error;
71551 + bool locknext;
71552
71553 if (!(vma->vm_flags & VM_GROWSUP))
71554 return -EFAULT;
71555
71556 + /* Also guard against wrapping around to address 0. */
71557 + if (address < PAGE_ALIGN(address+1))
71558 + address = PAGE_ALIGN(address+1);
71559 + else
71560 + return -ENOMEM;
71561 +
71562 /*
71563 * We must make sure the anon_vma is allocated
71564 * so that the anon_vma locking is not a noop.
71565 */
71566 if (unlikely(anon_vma_prepare(vma)))
71567 return -ENOMEM;
71568 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
71569 + if (locknext && anon_vma_prepare(vma->vm_next))
71570 + return -ENOMEM;
71571 vma_lock_anon_vma(vma);
71572 + if (locknext)
71573 + vma_lock_anon_vma(vma->vm_next);
71574
71575 /*
71576 * vma->vm_start/vm_end cannot change under us because the caller
71577 * is required to hold the mmap_sem in read mode. We need the
71578 - * anon_vma lock to serialize against concurrent expand_stacks.
71579 - * Also guard against wrapping around to address 0.
71580 + * anon_vma locks to serialize against concurrent expand_stacks
71581 + * and expand_upwards.
71582 */
71583 - if (address < PAGE_ALIGN(address+4))
71584 - address = PAGE_ALIGN(address+4);
71585 - else {
71586 - vma_unlock_anon_vma(vma);
71587 - return -ENOMEM;
71588 - }
71589 error = 0;
71590
71591 /* Somebody else might have raced and expanded it already */
71592 - if (address > vma->vm_end) {
71593 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
71594 + error = -ENOMEM;
71595 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
71596 unsigned long size, grow;
71597
71598 size = address - vma->vm_start;
71599 @@ -1739,6 +2000,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71600 }
71601 }
71602 }
71603 + if (locknext)
71604 + vma_unlock_anon_vma(vma->vm_next);
71605 vma_unlock_anon_vma(vma);
71606 khugepaged_enter_vma_merge(vma);
71607 return error;
71608 @@ -1752,6 +2015,8 @@ int expand_downwards(struct vm_area_struct *vma,
71609 unsigned long address)
71610 {
71611 int error;
71612 + bool lockprev = false;
71613 + struct vm_area_struct *prev;
71614
71615 /*
71616 * We must make sure the anon_vma is allocated
71617 @@ -1765,6 +2030,15 @@ int expand_downwards(struct vm_area_struct *vma,
71618 if (error)
71619 return error;
71620
71621 + prev = vma->vm_prev;
71622 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
71623 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
71624 +#endif
71625 + if (lockprev && anon_vma_prepare(prev))
71626 + return -ENOMEM;
71627 + if (lockprev)
71628 + vma_lock_anon_vma(prev);
71629 +
71630 vma_lock_anon_vma(vma);
71631
71632 /*
71633 @@ -1774,9 +2048,17 @@ int expand_downwards(struct vm_area_struct *vma,
71634 */
71635
71636 /* Somebody else might have raced and expanded it already */
71637 - if (address < vma->vm_start) {
71638 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
71639 + error = -ENOMEM;
71640 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
71641 unsigned long size, grow;
71642
71643 +#ifdef CONFIG_PAX_SEGMEXEC
71644 + struct vm_area_struct *vma_m;
71645 +
71646 + vma_m = pax_find_mirror_vma(vma);
71647 +#endif
71648 +
71649 size = vma->vm_end - address;
71650 grow = (vma->vm_start - address) >> PAGE_SHIFT;
71651
71652 @@ -1786,11 +2068,22 @@ int expand_downwards(struct vm_area_struct *vma,
71653 if (!error) {
71654 vma->vm_start = address;
71655 vma->vm_pgoff -= grow;
71656 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
71657 +
71658 +#ifdef CONFIG_PAX_SEGMEXEC
71659 + if (vma_m) {
71660 + vma_m->vm_start -= grow << PAGE_SHIFT;
71661 + vma_m->vm_pgoff -= grow;
71662 + }
71663 +#endif
71664 +
71665 perf_event_mmap(vma);
71666 }
71667 }
71668 }
71669 vma_unlock_anon_vma(vma);
71670 + if (lockprev)
71671 + vma_unlock_anon_vma(prev);
71672 khugepaged_enter_vma_merge(vma);
71673 return error;
71674 }
71675 @@ -1860,6 +2153,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
71676 do {
71677 long nrpages = vma_pages(vma);
71678
71679 +#ifdef CONFIG_PAX_SEGMEXEC
71680 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
71681 + vma = remove_vma(vma);
71682 + continue;
71683 + }
71684 +#endif
71685 +
71686 mm->total_vm -= nrpages;
71687 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
71688 vma = remove_vma(vma);
71689 @@ -1905,6 +2205,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
71690 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
71691 vma->vm_prev = NULL;
71692 do {
71693 +
71694 +#ifdef CONFIG_PAX_SEGMEXEC
71695 + if (vma->vm_mirror) {
71696 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
71697 + vma->vm_mirror->vm_mirror = NULL;
71698 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
71699 + vma->vm_mirror = NULL;
71700 + }
71701 +#endif
71702 +
71703 rb_erase(&vma->vm_rb, &mm->mm_rb);
71704 mm->map_count--;
71705 tail_vma = vma;
71706 @@ -1933,14 +2243,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71707 struct vm_area_struct *new;
71708 int err = -ENOMEM;
71709
71710 +#ifdef CONFIG_PAX_SEGMEXEC
71711 + struct vm_area_struct *vma_m, *new_m = NULL;
71712 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
71713 +#endif
71714 +
71715 if (is_vm_hugetlb_page(vma) && (addr &
71716 ~(huge_page_mask(hstate_vma(vma)))))
71717 return -EINVAL;
71718
71719 +#ifdef CONFIG_PAX_SEGMEXEC
71720 + vma_m = pax_find_mirror_vma(vma);
71721 +#endif
71722 +
71723 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71724 if (!new)
71725 goto out_err;
71726
71727 +#ifdef CONFIG_PAX_SEGMEXEC
71728 + if (vma_m) {
71729 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71730 + if (!new_m) {
71731 + kmem_cache_free(vm_area_cachep, new);
71732 + goto out_err;
71733 + }
71734 + }
71735 +#endif
71736 +
71737 /* most fields are the same, copy all, and then fixup */
71738 *new = *vma;
71739
71740 @@ -1953,6 +2282,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71741 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
71742 }
71743
71744 +#ifdef CONFIG_PAX_SEGMEXEC
71745 + if (vma_m) {
71746 + *new_m = *vma_m;
71747 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
71748 + new_m->vm_mirror = new;
71749 + new->vm_mirror = new_m;
71750 +
71751 + if (new_below)
71752 + new_m->vm_end = addr_m;
71753 + else {
71754 + new_m->vm_start = addr_m;
71755 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
71756 + }
71757 + }
71758 +#endif
71759 +
71760 pol = mpol_dup(vma_policy(vma));
71761 if (IS_ERR(pol)) {
71762 err = PTR_ERR(pol);
71763 @@ -1978,6 +2323,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71764 else
71765 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
71766
71767 +#ifdef CONFIG_PAX_SEGMEXEC
71768 + if (!err && vma_m) {
71769 + if (anon_vma_clone(new_m, vma_m))
71770 + goto out_free_mpol;
71771 +
71772 + mpol_get(pol);
71773 + vma_set_policy(new_m, pol);
71774 +
71775 + if (new_m->vm_file) {
71776 + get_file(new_m->vm_file);
71777 + if (vma_m->vm_flags & VM_EXECUTABLE)
71778 + added_exe_file_vma(mm);
71779 + }
71780 +
71781 + if (new_m->vm_ops && new_m->vm_ops->open)
71782 + new_m->vm_ops->open(new_m);
71783 +
71784 + if (new_below)
71785 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
71786 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
71787 + else
71788 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
71789 +
71790 + if (err) {
71791 + if (new_m->vm_ops && new_m->vm_ops->close)
71792 + new_m->vm_ops->close(new_m);
71793 + if (new_m->vm_file) {
71794 + if (vma_m->vm_flags & VM_EXECUTABLE)
71795 + removed_exe_file_vma(mm);
71796 + fput(new_m->vm_file);
71797 + }
71798 + mpol_put(pol);
71799 + }
71800 + }
71801 +#endif
71802 +
71803 /* Success. */
71804 if (!err)
71805 return 0;
71806 @@ -1990,10 +2371,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71807 removed_exe_file_vma(mm);
71808 fput(new->vm_file);
71809 }
71810 - unlink_anon_vmas(new);
71811 out_free_mpol:
71812 mpol_put(pol);
71813 out_free_vma:
71814 +
71815 +#ifdef CONFIG_PAX_SEGMEXEC
71816 + if (new_m) {
71817 + unlink_anon_vmas(new_m);
71818 + kmem_cache_free(vm_area_cachep, new_m);
71819 + }
71820 +#endif
71821 +
71822 + unlink_anon_vmas(new);
71823 kmem_cache_free(vm_area_cachep, new);
71824 out_err:
71825 return err;
71826 @@ -2006,6 +2395,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71827 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71828 unsigned long addr, int new_below)
71829 {
71830 +
71831 +#ifdef CONFIG_PAX_SEGMEXEC
71832 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71833 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
71834 + if (mm->map_count >= sysctl_max_map_count-1)
71835 + return -ENOMEM;
71836 + } else
71837 +#endif
71838 +
71839 if (mm->map_count >= sysctl_max_map_count)
71840 return -ENOMEM;
71841
71842 @@ -2017,11 +2415,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
71843 * work. This now handles partial unmappings.
71844 * Jeremy Fitzhardinge <jeremy@goop.org>
71845 */
71846 +#ifdef CONFIG_PAX_SEGMEXEC
71847 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71848 {
71849 + int ret = __do_munmap(mm, start, len);
71850 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
71851 + return ret;
71852 +
71853 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
71854 +}
71855 +
71856 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71857 +#else
71858 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71859 +#endif
71860 +{
71861 unsigned long end;
71862 struct vm_area_struct *vma, *prev, *last;
71863
71864 + /*
71865 + * mm->mmap_sem is required to protect against another thread
71866 + * changing the mappings in case we sleep.
71867 + */
71868 + verify_mm_writelocked(mm);
71869 +
71870 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
71871 return -EINVAL;
71872
71873 @@ -2096,6 +2513,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
71874 /* Fix up all other VM information */
71875 remove_vma_list(mm, vma);
71876
71877 + track_exec_limit(mm, start, end, 0UL);
71878 +
71879 return 0;
71880 }
71881
71882 @@ -2108,22 +2527,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
71883
71884 profile_munmap(addr);
71885
71886 +#ifdef CONFIG_PAX_SEGMEXEC
71887 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
71888 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
71889 + return -EINVAL;
71890 +#endif
71891 +
71892 down_write(&mm->mmap_sem);
71893 ret = do_munmap(mm, addr, len);
71894 up_write(&mm->mmap_sem);
71895 return ret;
71896 }
71897
71898 -static inline void verify_mm_writelocked(struct mm_struct *mm)
71899 -{
71900 -#ifdef CONFIG_DEBUG_VM
71901 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71902 - WARN_ON(1);
71903 - up_read(&mm->mmap_sem);
71904 - }
71905 -#endif
71906 -}
71907 -
71908 /*
71909 * this is really a simplified "do_mmap". it only handles
71910 * anonymous maps. eventually we may be able to do some
71911 @@ -2137,6 +2552,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71912 struct rb_node ** rb_link, * rb_parent;
71913 pgoff_t pgoff = addr >> PAGE_SHIFT;
71914 int error;
71915 + unsigned long charged;
71916
71917 len = PAGE_ALIGN(len);
71918 if (!len)
71919 @@ -2148,16 +2564,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71920
71921 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
71922
71923 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
71924 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
71925 + flags &= ~VM_EXEC;
71926 +
71927 +#ifdef CONFIG_PAX_MPROTECT
71928 + if (mm->pax_flags & MF_PAX_MPROTECT)
71929 + flags &= ~VM_MAYEXEC;
71930 +#endif
71931 +
71932 + }
71933 +#endif
71934 +
71935 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
71936 if (error & ~PAGE_MASK)
71937 return error;
71938
71939 + charged = len >> PAGE_SHIFT;
71940 +
71941 /*
71942 * mlock MCL_FUTURE?
71943 */
71944 if (mm->def_flags & VM_LOCKED) {
71945 unsigned long locked, lock_limit;
71946 - locked = len >> PAGE_SHIFT;
71947 + locked = charged;
71948 locked += mm->locked_vm;
71949 lock_limit = rlimit(RLIMIT_MEMLOCK);
71950 lock_limit >>= PAGE_SHIFT;
71951 @@ -2174,22 +2604,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71952 /*
71953 * Clear old maps. this also does some error checking for us
71954 */
71955 - munmap_back:
71956 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71957 if (vma && vma->vm_start < addr + len) {
71958 if (do_munmap(mm, addr, len))
71959 return -ENOMEM;
71960 - goto munmap_back;
71961 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71962 + BUG_ON(vma && vma->vm_start < addr + len);
71963 }
71964
71965 /* Check against address space limits *after* clearing old maps... */
71966 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
71967 + if (!may_expand_vm(mm, charged))
71968 return -ENOMEM;
71969
71970 if (mm->map_count > sysctl_max_map_count)
71971 return -ENOMEM;
71972
71973 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
71974 + if (security_vm_enough_memory(charged))
71975 return -ENOMEM;
71976
71977 /* Can we just expand an old private anonymous mapping? */
71978 @@ -2203,7 +2633,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71979 */
71980 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71981 if (!vma) {
71982 - vm_unacct_memory(len >> PAGE_SHIFT);
71983 + vm_unacct_memory(charged);
71984 return -ENOMEM;
71985 }
71986
71987 @@ -2217,11 +2647,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
71988 vma_link(mm, vma, prev, rb_link, rb_parent);
71989 out:
71990 perf_event_mmap(vma);
71991 - mm->total_vm += len >> PAGE_SHIFT;
71992 + mm->total_vm += charged;
71993 if (flags & VM_LOCKED) {
71994 if (!mlock_vma_pages_range(vma, addr, addr + len))
71995 - mm->locked_vm += (len >> PAGE_SHIFT);
71996 + mm->locked_vm += charged;
71997 }
71998 + track_exec_limit(mm, addr, addr + len, flags);
71999 return addr;
72000 }
72001
72002 @@ -2268,8 +2699,10 @@ void exit_mmap(struct mm_struct *mm)
72003 * Walk the list again, actually closing and freeing it,
72004 * with preemption enabled, without holding any MM locks.
72005 */
72006 - while (vma)
72007 + while (vma) {
72008 + vma->vm_mirror = NULL;
72009 vma = remove_vma(vma);
72010 + }
72011
72012 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
72013 }
72014 @@ -2283,6 +2716,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
72015 struct vm_area_struct * __vma, * prev;
72016 struct rb_node ** rb_link, * rb_parent;
72017
72018 +#ifdef CONFIG_PAX_SEGMEXEC
72019 + struct vm_area_struct *vma_m = NULL;
72020 +#endif
72021 +
72022 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
72023 + return -EPERM;
72024 +
72025 /*
72026 * The vm_pgoff of a purely anonymous vma should be irrelevant
72027 * until its first write fault, when page's anon_vma and index
72028 @@ -2305,7 +2745,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
72029 if ((vma->vm_flags & VM_ACCOUNT) &&
72030 security_vm_enough_memory_mm(mm, vma_pages(vma)))
72031 return -ENOMEM;
72032 +
72033 +#ifdef CONFIG_PAX_SEGMEXEC
72034 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
72035 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72036 + if (!vma_m)
72037 + return -ENOMEM;
72038 + }
72039 +#endif
72040 +
72041 vma_link(mm, vma, prev, rb_link, rb_parent);
72042 +
72043 +#ifdef CONFIG_PAX_SEGMEXEC
72044 + if (vma_m)
72045 + BUG_ON(pax_mirror_vma(vma_m, vma));
72046 +#endif
72047 +
72048 return 0;
72049 }
72050
72051 @@ -2323,6 +2778,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
72052 struct rb_node **rb_link, *rb_parent;
72053 struct mempolicy *pol;
72054
72055 + BUG_ON(vma->vm_mirror);
72056 +
72057 /*
72058 * If anonymous vma has not yet been faulted, update new pgoff
72059 * to match new location, to increase its chance of merging.
72060 @@ -2373,6 +2830,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
72061 return NULL;
72062 }
72063
72064 +#ifdef CONFIG_PAX_SEGMEXEC
72065 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
72066 +{
72067 + struct vm_area_struct *prev_m;
72068 + struct rb_node **rb_link_m, *rb_parent_m;
72069 + struct mempolicy *pol_m;
72070 +
72071 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
72072 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
72073 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
72074 + *vma_m = *vma;
72075 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
72076 + if (anon_vma_clone(vma_m, vma))
72077 + return -ENOMEM;
72078 + pol_m = vma_policy(vma_m);
72079 + mpol_get(pol_m);
72080 + vma_set_policy(vma_m, pol_m);
72081 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
72082 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
72083 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
72084 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
72085 + if (vma_m->vm_file)
72086 + get_file(vma_m->vm_file);
72087 + if (vma_m->vm_ops && vma_m->vm_ops->open)
72088 + vma_m->vm_ops->open(vma_m);
72089 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
72090 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
72091 + vma_m->vm_mirror = vma;
72092 + vma->vm_mirror = vma_m;
72093 + return 0;
72094 +}
72095 +#endif
72096 +
72097 /*
72098 * Return true if the calling process may expand its vm space by the passed
72099 * number of pages
72100 @@ -2383,7 +2873,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
72101 unsigned long lim;
72102
72103 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
72104 -
72105 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
72106 if (cur + npages > lim)
72107 return 0;
72108 return 1;
72109 @@ -2454,6 +2944,22 @@ int install_special_mapping(struct mm_struct *mm,
72110 vma->vm_start = addr;
72111 vma->vm_end = addr + len;
72112
72113 +#ifdef CONFIG_PAX_MPROTECT
72114 + if (mm->pax_flags & MF_PAX_MPROTECT) {
72115 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
72116 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
72117 + return -EPERM;
72118 + if (!(vm_flags & VM_EXEC))
72119 + vm_flags &= ~VM_MAYEXEC;
72120 +#else
72121 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
72122 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
72123 +#endif
72124 + else
72125 + vm_flags &= ~VM_MAYWRITE;
72126 + }
72127 +#endif
72128 +
72129 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
72130 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
72131
72132 diff --git a/mm/mprotect.c b/mm/mprotect.c
72133 index 5a688a2..27e031c 100644
72134 --- a/mm/mprotect.c
72135 +++ b/mm/mprotect.c
72136 @@ -23,10 +23,16 @@
72137 #include <linux/mmu_notifier.h>
72138 #include <linux/migrate.h>
72139 #include <linux/perf_event.h>
72140 +
72141 +#ifdef CONFIG_PAX_MPROTECT
72142 +#include <linux/elf.h>
72143 +#endif
72144 +
72145 #include <asm/uaccess.h>
72146 #include <asm/pgtable.h>
72147 #include <asm/cacheflush.h>
72148 #include <asm/tlbflush.h>
72149 +#include <asm/mmu_context.h>
72150
72151 #ifndef pgprot_modify
72152 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
72153 @@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
72154 flush_tlb_range(vma, start, end);
72155 }
72156
72157 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72158 +/* called while holding the mmap semaphor for writing except stack expansion */
72159 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
72160 +{
72161 + unsigned long oldlimit, newlimit = 0UL;
72162 +
72163 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
72164 + return;
72165 +
72166 + spin_lock(&mm->page_table_lock);
72167 + oldlimit = mm->context.user_cs_limit;
72168 + if ((prot & VM_EXEC) && oldlimit < end)
72169 + /* USER_CS limit moved up */
72170 + newlimit = end;
72171 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
72172 + /* USER_CS limit moved down */
72173 + newlimit = start;
72174 +
72175 + if (newlimit) {
72176 + mm->context.user_cs_limit = newlimit;
72177 +
72178 +#ifdef CONFIG_SMP
72179 + wmb();
72180 + cpus_clear(mm->context.cpu_user_cs_mask);
72181 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
72182 +#endif
72183 +
72184 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
72185 + }
72186 + spin_unlock(&mm->page_table_lock);
72187 + if (newlimit == end) {
72188 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
72189 +
72190 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
72191 + if (is_vm_hugetlb_page(vma))
72192 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
72193 + else
72194 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
72195 + }
72196 +}
72197 +#endif
72198 +
72199 int
72200 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72201 unsigned long start, unsigned long end, unsigned long newflags)
72202 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72203 int error;
72204 int dirty_accountable = 0;
72205
72206 +#ifdef CONFIG_PAX_SEGMEXEC
72207 + struct vm_area_struct *vma_m = NULL;
72208 + unsigned long start_m, end_m;
72209 +
72210 + start_m = start + SEGMEXEC_TASK_SIZE;
72211 + end_m = end + SEGMEXEC_TASK_SIZE;
72212 +#endif
72213 +
72214 if (newflags == oldflags) {
72215 *pprev = vma;
72216 return 0;
72217 }
72218
72219 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
72220 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
72221 +
72222 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
72223 + return -ENOMEM;
72224 +
72225 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
72226 + return -ENOMEM;
72227 + }
72228 +
72229 /*
72230 * If we make a private mapping writable we increase our commit;
72231 * but (without finer accounting) cannot reduce our commit if we
72232 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72233 }
72234 }
72235
72236 +#ifdef CONFIG_PAX_SEGMEXEC
72237 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
72238 + if (start != vma->vm_start) {
72239 + error = split_vma(mm, vma, start, 1);
72240 + if (error)
72241 + goto fail;
72242 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
72243 + *pprev = (*pprev)->vm_next;
72244 + }
72245 +
72246 + if (end != vma->vm_end) {
72247 + error = split_vma(mm, vma, end, 0);
72248 + if (error)
72249 + goto fail;
72250 + }
72251 +
72252 + if (pax_find_mirror_vma(vma)) {
72253 + error = __do_munmap(mm, start_m, end_m - start_m);
72254 + if (error)
72255 + goto fail;
72256 + } else {
72257 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72258 + if (!vma_m) {
72259 + error = -ENOMEM;
72260 + goto fail;
72261 + }
72262 + vma->vm_flags = newflags;
72263 + error = pax_mirror_vma(vma_m, vma);
72264 + if (error) {
72265 + vma->vm_flags = oldflags;
72266 + goto fail;
72267 + }
72268 + }
72269 + }
72270 +#endif
72271 +
72272 /*
72273 * First try to merge with previous and/or next vma.
72274 */
72275 @@ -204,9 +306,21 @@ success:
72276 * vm_flags and vm_page_prot are protected by the mmap_sem
72277 * held in write mode.
72278 */
72279 +
72280 +#ifdef CONFIG_PAX_SEGMEXEC
72281 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
72282 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
72283 +#endif
72284 +
72285 vma->vm_flags = newflags;
72286 +
72287 +#ifdef CONFIG_PAX_MPROTECT
72288 + if (mm->binfmt && mm->binfmt->handle_mprotect)
72289 + mm->binfmt->handle_mprotect(vma, newflags);
72290 +#endif
72291 +
72292 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
72293 - vm_get_page_prot(newflags));
72294 + vm_get_page_prot(vma->vm_flags));
72295
72296 if (vma_wants_writenotify(vma)) {
72297 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
72298 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72299 end = start + len;
72300 if (end <= start)
72301 return -ENOMEM;
72302 +
72303 +#ifdef CONFIG_PAX_SEGMEXEC
72304 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
72305 + if (end > SEGMEXEC_TASK_SIZE)
72306 + return -EINVAL;
72307 + } else
72308 +#endif
72309 +
72310 + if (end > TASK_SIZE)
72311 + return -EINVAL;
72312 +
72313 if (!arch_validate_prot(prot))
72314 return -EINVAL;
72315
72316 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72317 /*
72318 * Does the application expect PROT_READ to imply PROT_EXEC:
72319 */
72320 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72321 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72322 prot |= PROT_EXEC;
72323
72324 vm_flags = calc_vm_prot_bits(prot);
72325 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72326 if (start > vma->vm_start)
72327 prev = vma;
72328
72329 +#ifdef CONFIG_PAX_MPROTECT
72330 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
72331 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
72332 +#endif
72333 +
72334 for (nstart = start ; ; ) {
72335 unsigned long newflags;
72336
72337 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72338
72339 /* newflags >> 4 shift VM_MAY% in place of VM_% */
72340 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
72341 + if (prot & (PROT_WRITE | PROT_EXEC))
72342 + gr_log_rwxmprotect(vma->vm_file);
72343 +
72344 + error = -EACCES;
72345 + goto out;
72346 + }
72347 +
72348 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
72349 error = -EACCES;
72350 goto out;
72351 }
72352 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72353 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
72354 if (error)
72355 goto out;
72356 +
72357 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
72358 +
72359 nstart = tmp;
72360
72361 if (nstart < prev->vm_end)
72362 diff --git a/mm/mremap.c b/mm/mremap.c
72363 index 506fa44..ccc0ba9 100644
72364 --- a/mm/mremap.c
72365 +++ b/mm/mremap.c
72366 @@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
72367 continue;
72368 pte = ptep_clear_flush(vma, old_addr, old_pte);
72369 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
72370 +
72371 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72372 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
72373 + pte = pte_exprotect(pte);
72374 +#endif
72375 +
72376 set_pte_at(mm, new_addr, new_pte, pte);
72377 }
72378
72379 @@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
72380 if (is_vm_hugetlb_page(vma))
72381 goto Einval;
72382
72383 +#ifdef CONFIG_PAX_SEGMEXEC
72384 + if (pax_find_mirror_vma(vma))
72385 + goto Einval;
72386 +#endif
72387 +
72388 /* We can't remap across vm area boundaries */
72389 if (old_len > vma->vm_end - addr)
72390 goto Efault;
72391 @@ -328,20 +339,25 @@ static unsigned long mremap_to(unsigned long addr,
72392 unsigned long ret = -EINVAL;
72393 unsigned long charged = 0;
72394 unsigned long map_flags;
72395 + unsigned long pax_task_size = TASK_SIZE;
72396
72397 if (new_addr & ~PAGE_MASK)
72398 goto out;
72399
72400 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
72401 +#ifdef CONFIG_PAX_SEGMEXEC
72402 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72403 + pax_task_size = SEGMEXEC_TASK_SIZE;
72404 +#endif
72405 +
72406 + pax_task_size -= PAGE_SIZE;
72407 +
72408 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
72409 goto out;
72410
72411 /* Check if the location we're moving into overlaps the
72412 * old location at all, and fail if it does.
72413 */
72414 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
72415 - goto out;
72416 -
72417 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
72418 + if (addr + old_len > new_addr && new_addr + new_len > addr)
72419 goto out;
72420
72421 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72422 @@ -413,6 +429,7 @@ unsigned long do_mremap(unsigned long addr,
72423 struct vm_area_struct *vma;
72424 unsigned long ret = -EINVAL;
72425 unsigned long charged = 0;
72426 + unsigned long pax_task_size = TASK_SIZE;
72427
72428 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
72429 goto out;
72430 @@ -431,6 +448,17 @@ unsigned long do_mremap(unsigned long addr,
72431 if (!new_len)
72432 goto out;
72433
72434 +#ifdef CONFIG_PAX_SEGMEXEC
72435 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72436 + pax_task_size = SEGMEXEC_TASK_SIZE;
72437 +#endif
72438 +
72439 + pax_task_size -= PAGE_SIZE;
72440 +
72441 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
72442 + old_len > pax_task_size || addr > pax_task_size-old_len)
72443 + goto out;
72444 +
72445 if (flags & MREMAP_FIXED) {
72446 if (flags & MREMAP_MAYMOVE)
72447 ret = mremap_to(addr, old_len, new_addr, new_len);
72448 @@ -480,6 +508,7 @@ unsigned long do_mremap(unsigned long addr,
72449 addr + new_len);
72450 }
72451 ret = addr;
72452 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
72453 goto out;
72454 }
72455 }
72456 @@ -506,7 +535,13 @@ unsigned long do_mremap(unsigned long addr,
72457 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72458 if (ret)
72459 goto out;
72460 +
72461 + map_flags = vma->vm_flags;
72462 ret = move_vma(vma, addr, old_len, new_len, new_addr);
72463 + if (!(ret & ~PAGE_MASK)) {
72464 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
72465 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
72466 + }
72467 }
72468 out:
72469 if (ret & ~PAGE_MASK)
72470 diff --git a/mm/nobootmem.c b/mm/nobootmem.c
72471 index 6e93dc7..c98df0c 100644
72472 --- a/mm/nobootmem.c
72473 +++ b/mm/nobootmem.c
72474 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
72475 unsigned long __init free_all_memory_core_early(int nodeid)
72476 {
72477 int i;
72478 - u64 start, end;
72479 + u64 start, end, startrange, endrange;
72480 unsigned long count = 0;
72481 - struct range *range = NULL;
72482 + struct range *range = NULL, rangerange = { 0, 0 };
72483 int nr_range;
72484
72485 nr_range = get_free_all_memory_range(&range, nodeid);
72486 + startrange = __pa(range) >> PAGE_SHIFT;
72487 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
72488
72489 for (i = 0; i < nr_range; i++) {
72490 start = range[i].start;
72491 end = range[i].end;
72492 + if (start <= endrange && startrange < end) {
72493 + BUG_ON(rangerange.start | rangerange.end);
72494 + rangerange = range[i];
72495 + continue;
72496 + }
72497 count += end - start;
72498 __free_pages_memory(start, end);
72499 }
72500 + start = rangerange.start;
72501 + end = rangerange.end;
72502 + count += end - start;
72503 + __free_pages_memory(start, end);
72504
72505 return count;
72506 }
72507 diff --git a/mm/nommu.c b/mm/nommu.c
72508 index 4358032..e79b99f 100644
72509 --- a/mm/nommu.c
72510 +++ b/mm/nommu.c
72511 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
72512 int sysctl_overcommit_ratio = 50; /* default is 50% */
72513 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
72514 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
72515 -int heap_stack_gap = 0;
72516
72517 atomic_long_t mmap_pages_allocated;
72518
72519 @@ -825,15 +824,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
72520 EXPORT_SYMBOL(find_vma);
72521
72522 /*
72523 - * find a VMA
72524 - * - we don't extend stack VMAs under NOMMU conditions
72525 - */
72526 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
72527 -{
72528 - return find_vma(mm, addr);
72529 -}
72530 -
72531 -/*
72532 * expand a stack to a given address
72533 * - not supported under NOMMU conditions
72534 */
72535 @@ -1553,6 +1543,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72536
72537 /* most fields are the same, copy all, and then fixup */
72538 *new = *vma;
72539 + INIT_LIST_HEAD(&new->anon_vma_chain);
72540 *region = *vma->vm_region;
72541 new->vm_region = region;
72542
72543 diff --git a/mm/oom_kill.c b/mm/oom_kill.c
72544 index 626303b..e9a1785 100644
72545 --- a/mm/oom_kill.c
72546 +++ b/mm/oom_kill.c
72547 @@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p,
72548 unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
72549 const nodemask_t *nodemask, unsigned long totalpages)
72550 {
72551 - int points;
72552 + long points;
72553
72554 if (oom_unkillable_task(p, mem, nodemask))
72555 return 0;
72556 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
72557 index 6e8ecb6..50b8879 100644
72558 --- a/mm/page_alloc.c
72559 +++ b/mm/page_alloc.c
72560 @@ -340,7 +340,7 @@ out:
72561 * This usage means that zero-order pages may not be compound.
72562 */
72563
72564 -static void free_compound_page(struct page *page)
72565 +void free_compound_page(struct page *page)
72566 {
72567 __free_pages_ok(page, compound_order(page));
72568 }
72569 @@ -653,6 +653,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72570 int i;
72571 int bad = 0;
72572
72573 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
72574 + unsigned long index = 1UL << order;
72575 +#endif
72576 +
72577 trace_mm_page_free_direct(page, order);
72578 kmemcheck_free_shadow(page, order);
72579
72580 @@ -668,6 +672,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72581 debug_check_no_obj_freed(page_address(page),
72582 PAGE_SIZE << order);
72583 }
72584 +
72585 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
72586 + for (; index; --index)
72587 + sanitize_highpage(page + index - 1);
72588 +#endif
72589 +
72590 arch_free_page(page, order);
72591 kernel_map_pages(page, 1 << order, 0);
72592
72593 @@ -783,8 +793,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
72594 arch_alloc_page(page, order);
72595 kernel_map_pages(page, 1 << order, 1);
72596
72597 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
72598 if (gfp_flags & __GFP_ZERO)
72599 prep_zero_page(page, order, gfp_flags);
72600 +#endif
72601
72602 if (order && (gfp_flags & __GFP_COMP))
72603 prep_compound_page(page, order);
72604 @@ -2539,6 +2551,8 @@ void show_free_areas(unsigned int filter)
72605 int cpu;
72606 struct zone *zone;
72607
72608 + pax_track_stack();
72609 +
72610 for_each_populated_zone(zone) {
72611 if (skip_free_areas_node(filter, zone_to_nid(zone)))
72612 continue;
72613 @@ -3350,7 +3364,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
72614 unsigned long pfn;
72615
72616 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
72617 +#ifdef CONFIG_X86_32
72618 + /* boot failures in VMware 8 on 32bit vanilla since
72619 + this change */
72620 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
72621 +#else
72622 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
72623 +#endif
72624 return 1;
72625 }
72626 return 0;
72627 diff --git a/mm/percpu.c b/mm/percpu.c
72628 index bf80e55..c7c3f9a 100644
72629 --- a/mm/percpu.c
72630 +++ b/mm/percpu.c
72631 @@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu __read_mostly;
72632 static unsigned int pcpu_last_unit_cpu __read_mostly;
72633
72634 /* the address of the first chunk which starts with the kernel static area */
72635 -void *pcpu_base_addr __read_mostly;
72636 +void *pcpu_base_addr __read_only;
72637 EXPORT_SYMBOL_GPL(pcpu_base_addr);
72638
72639 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
72640 diff --git a/mm/rmap.c b/mm/rmap.c
72641 index 8005080..198c2cd 100644
72642 --- a/mm/rmap.c
72643 +++ b/mm/rmap.c
72644 @@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72645 struct anon_vma *anon_vma = vma->anon_vma;
72646 struct anon_vma_chain *avc;
72647
72648 +#ifdef CONFIG_PAX_SEGMEXEC
72649 + struct anon_vma_chain *avc_m = NULL;
72650 +#endif
72651 +
72652 might_sleep();
72653 if (unlikely(!anon_vma)) {
72654 struct mm_struct *mm = vma->vm_mm;
72655 @@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72656 if (!avc)
72657 goto out_enomem;
72658
72659 +#ifdef CONFIG_PAX_SEGMEXEC
72660 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
72661 + if (!avc_m)
72662 + goto out_enomem_free_avc;
72663 +#endif
72664 +
72665 anon_vma = find_mergeable_anon_vma(vma);
72666 allocated = NULL;
72667 if (!anon_vma) {
72668 @@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72669 /* page_table_lock to protect against threads */
72670 spin_lock(&mm->page_table_lock);
72671 if (likely(!vma->anon_vma)) {
72672 +
72673 +#ifdef CONFIG_PAX_SEGMEXEC
72674 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
72675 +
72676 + if (vma_m) {
72677 + BUG_ON(vma_m->anon_vma);
72678 + vma_m->anon_vma = anon_vma;
72679 + avc_m->anon_vma = anon_vma;
72680 + avc_m->vma = vma;
72681 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
72682 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
72683 + avc_m = NULL;
72684 + }
72685 +#endif
72686 +
72687 vma->anon_vma = anon_vma;
72688 avc->anon_vma = anon_vma;
72689 avc->vma = vma;
72690 @@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72691
72692 if (unlikely(allocated))
72693 put_anon_vma(allocated);
72694 +
72695 +#ifdef CONFIG_PAX_SEGMEXEC
72696 + if (unlikely(avc_m))
72697 + anon_vma_chain_free(avc_m);
72698 +#endif
72699 +
72700 if (unlikely(avc))
72701 anon_vma_chain_free(avc);
72702 }
72703 return 0;
72704
72705 out_enomem_free_avc:
72706 +
72707 +#ifdef CONFIG_PAX_SEGMEXEC
72708 + if (avc_m)
72709 + anon_vma_chain_free(avc_m);
72710 +#endif
72711 +
72712 anon_vma_chain_free(avc);
72713 out_enomem:
72714 return -ENOMEM;
72715 @@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
72716 * Attach the anon_vmas from src to dst.
72717 * Returns 0 on success, -ENOMEM on failure.
72718 */
72719 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
72720 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
72721 {
72722 struct anon_vma_chain *avc, *pavc;
72723 struct anon_vma *root = NULL;
72724 @@ -276,7 +313,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
72725 * the corresponding VMA in the parent process is attached to.
72726 * Returns 0 on success, non-zero on failure.
72727 */
72728 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
72729 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
72730 {
72731 struct anon_vma_chain *avc;
72732 struct anon_vma *anon_vma;
72733 diff --git a/mm/shmem.c b/mm/shmem.c
72734 index 32f6763..431c405 100644
72735 --- a/mm/shmem.c
72736 +++ b/mm/shmem.c
72737 @@ -31,7 +31,7 @@
72738 #include <linux/module.h>
72739 #include <linux/swap.h>
72740
72741 -static struct vfsmount *shm_mnt;
72742 +struct vfsmount *shm_mnt;
72743
72744 #ifdef CONFIG_SHMEM
72745 /*
72746 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
72747 #define BOGO_DIRENT_SIZE 20
72748
72749 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
72750 -#define SHORT_SYMLINK_LEN 128
72751 +#define SHORT_SYMLINK_LEN 64
72752
72753 struct shmem_xattr {
72754 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
72755 @@ -769,6 +769,8 @@ static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
72756 struct mempolicy mpol, *spol;
72757 struct vm_area_struct pvma;
72758
72759 + pax_track_stack();
72760 +
72761 spol = mpol_cond_copy(&mpol,
72762 mpol_shared_policy_lookup(&info->policy, index));
72763
72764 @@ -2149,8 +2151,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
72765 int err = -ENOMEM;
72766
72767 /* Round up to L1_CACHE_BYTES to resist false sharing */
72768 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
72769 - L1_CACHE_BYTES), GFP_KERNEL);
72770 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
72771 if (!sbinfo)
72772 return -ENOMEM;
72773
72774 diff --git a/mm/slab.c b/mm/slab.c
72775 index 6d90a09..3cab423 100644
72776 --- a/mm/slab.c
72777 +++ b/mm/slab.c
72778 @@ -151,7 +151,7 @@
72779
72780 /* Legal flag mask for kmem_cache_create(). */
72781 #if DEBUG
72782 -# define CREATE_MASK (SLAB_RED_ZONE | \
72783 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
72784 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
72785 SLAB_CACHE_DMA | \
72786 SLAB_STORE_USER | \
72787 @@ -159,7 +159,7 @@
72788 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72789 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
72790 #else
72791 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
72792 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
72793 SLAB_CACHE_DMA | \
72794 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
72795 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
72796 @@ -288,7 +288,7 @@ struct kmem_list3 {
72797 * Need this for bootstrapping a per node allocator.
72798 */
72799 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
72800 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
72801 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
72802 #define CACHE_CACHE 0
72803 #define SIZE_AC MAX_NUMNODES
72804 #define SIZE_L3 (2 * MAX_NUMNODES)
72805 @@ -389,10 +389,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
72806 if ((x)->max_freeable < i) \
72807 (x)->max_freeable = i; \
72808 } while (0)
72809 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
72810 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
72811 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
72812 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
72813 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
72814 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
72815 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
72816 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
72817 #else
72818 #define STATS_INC_ACTIVE(x) do { } while (0)
72819 #define STATS_DEC_ACTIVE(x) do { } while (0)
72820 @@ -538,7 +538,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
72821 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
72822 */
72823 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
72824 - const struct slab *slab, void *obj)
72825 + const struct slab *slab, const void *obj)
72826 {
72827 u32 offset = (obj - slab->s_mem);
72828 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
72829 @@ -564,7 +564,7 @@ struct cache_names {
72830 static struct cache_names __initdata cache_names[] = {
72831 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
72832 #include <linux/kmalloc_sizes.h>
72833 - {NULL,}
72834 + {NULL}
72835 #undef CACHE
72836 };
72837
72838 @@ -1571,7 +1571,7 @@ void __init kmem_cache_init(void)
72839 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
72840 sizes[INDEX_AC].cs_size,
72841 ARCH_KMALLOC_MINALIGN,
72842 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72843 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72844 NULL);
72845
72846 if (INDEX_AC != INDEX_L3) {
72847 @@ -1579,7 +1579,7 @@ void __init kmem_cache_init(void)
72848 kmem_cache_create(names[INDEX_L3].name,
72849 sizes[INDEX_L3].cs_size,
72850 ARCH_KMALLOC_MINALIGN,
72851 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72852 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72853 NULL);
72854 }
72855
72856 @@ -1597,7 +1597,7 @@ void __init kmem_cache_init(void)
72857 sizes->cs_cachep = kmem_cache_create(names->name,
72858 sizes->cs_size,
72859 ARCH_KMALLOC_MINALIGN,
72860 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
72861 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
72862 NULL);
72863 }
72864 #ifdef CONFIG_ZONE_DMA
72865 @@ -4324,10 +4324,10 @@ static int s_show(struct seq_file *m, void *p)
72866 }
72867 /* cpu stats */
72868 {
72869 - unsigned long allochit = atomic_read(&cachep->allochit);
72870 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
72871 - unsigned long freehit = atomic_read(&cachep->freehit);
72872 - unsigned long freemiss = atomic_read(&cachep->freemiss);
72873 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
72874 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
72875 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
72876 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
72877
72878 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
72879 allochit, allocmiss, freehit, freemiss);
72880 @@ -4584,15 +4584,70 @@ static const struct file_operations proc_slabstats_operations = {
72881
72882 static int __init slab_proc_init(void)
72883 {
72884 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
72885 + mode_t gr_mode = S_IRUGO;
72886 +
72887 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
72888 + gr_mode = S_IRUSR;
72889 +#endif
72890 +
72891 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
72892 #ifdef CONFIG_DEBUG_SLAB_LEAK
72893 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
72894 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
72895 #endif
72896 return 0;
72897 }
72898 module_init(slab_proc_init);
72899 #endif
72900
72901 +void check_object_size(const void *ptr, unsigned long n, bool to)
72902 +{
72903 +
72904 +#ifdef CONFIG_PAX_USERCOPY
72905 + struct page *page;
72906 + struct kmem_cache *cachep = NULL;
72907 + struct slab *slabp;
72908 + unsigned int objnr;
72909 + unsigned long offset;
72910 + const char *type;
72911 +
72912 + if (!n)
72913 + return;
72914 +
72915 + type = "<null>";
72916 + if (ZERO_OR_NULL_PTR(ptr))
72917 + goto report;
72918 +
72919 + if (!virt_addr_valid(ptr))
72920 + return;
72921 +
72922 + page = virt_to_head_page(ptr);
72923 +
72924 + type = "<process stack>";
72925 + if (!PageSlab(page)) {
72926 + if (object_is_on_stack(ptr, n) == -1)
72927 + goto report;
72928 + return;
72929 + }
72930 +
72931 + cachep = page_get_cache(page);
72932 + type = cachep->name;
72933 + if (!(cachep->flags & SLAB_USERCOPY))
72934 + goto report;
72935 +
72936 + slabp = page_get_slab(page);
72937 + objnr = obj_to_index(cachep, slabp, ptr);
72938 + BUG_ON(objnr >= cachep->num);
72939 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
72940 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
72941 + return;
72942 +
72943 +report:
72944 + pax_report_usercopy(ptr, n, to, type);
72945 +#endif
72946 +
72947 +}
72948 +EXPORT_SYMBOL(check_object_size);
72949 +
72950 /**
72951 * ksize - get the actual amount of memory allocated for a given object
72952 * @objp: Pointer to the object
72953 diff --git a/mm/slob.c b/mm/slob.c
72954 index bf39181..727f7a3 100644
72955 --- a/mm/slob.c
72956 +++ b/mm/slob.c
72957 @@ -29,7 +29,7 @@
72958 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
72959 * alloc_pages() directly, allocating compound pages so the page order
72960 * does not have to be separately tracked, and also stores the exact
72961 - * allocation size in page->private so that it can be used to accurately
72962 + * allocation size in slob_page->size so that it can be used to accurately
72963 * provide ksize(). These objects are detected in kfree() because slob_page()
72964 * is false for them.
72965 *
72966 @@ -58,6 +58,7 @@
72967 */
72968
72969 #include <linux/kernel.h>
72970 +#include <linux/sched.h>
72971 #include <linux/slab.h>
72972 #include <linux/mm.h>
72973 #include <linux/swap.h> /* struct reclaim_state */
72974 @@ -102,7 +103,8 @@ struct slob_page {
72975 unsigned long flags; /* mandatory */
72976 atomic_t _count; /* mandatory */
72977 slobidx_t units; /* free units left in page */
72978 - unsigned long pad[2];
72979 + unsigned long pad[1];
72980 + unsigned long size; /* size when >=PAGE_SIZE */
72981 slob_t *free; /* first free slob_t in page */
72982 struct list_head list; /* linked list of free pages */
72983 };
72984 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
72985 */
72986 static inline int is_slob_page(struct slob_page *sp)
72987 {
72988 - return PageSlab((struct page *)sp);
72989 + return PageSlab((struct page *)sp) && !sp->size;
72990 }
72991
72992 static inline void set_slob_page(struct slob_page *sp)
72993 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
72994
72995 static inline struct slob_page *slob_page(const void *addr)
72996 {
72997 - return (struct slob_page *)virt_to_page(addr);
72998 + return (struct slob_page *)virt_to_head_page(addr);
72999 }
73000
73001 /*
73002 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
73003 /*
73004 * Return the size of a slob block.
73005 */
73006 -static slobidx_t slob_units(slob_t *s)
73007 +static slobidx_t slob_units(const slob_t *s)
73008 {
73009 if (s->units > 0)
73010 return s->units;
73011 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
73012 /*
73013 * Return the next free slob block pointer after this one.
73014 */
73015 -static slob_t *slob_next(slob_t *s)
73016 +static slob_t *slob_next(const slob_t *s)
73017 {
73018 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
73019 slobidx_t next;
73020 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
73021 /*
73022 * Returns true if s is the last free block in its page.
73023 */
73024 -static int slob_last(slob_t *s)
73025 +static int slob_last(const slob_t *s)
73026 {
73027 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
73028 }
73029 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
73030 if (!page)
73031 return NULL;
73032
73033 + set_slob_page(page);
73034 return page_address(page);
73035 }
73036
73037 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
73038 if (!b)
73039 return NULL;
73040 sp = slob_page(b);
73041 - set_slob_page(sp);
73042
73043 spin_lock_irqsave(&slob_lock, flags);
73044 sp->units = SLOB_UNITS(PAGE_SIZE);
73045 sp->free = b;
73046 + sp->size = 0;
73047 INIT_LIST_HEAD(&sp->list);
73048 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
73049 set_slob_page_free(sp, slob_list);
73050 @@ -476,10 +479,9 @@ out:
73051 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
73052 */
73053
73054 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73055 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
73056 {
73057 - unsigned int *m;
73058 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73059 + slob_t *m;
73060 void *ret;
73061
73062 gfp &= gfp_allowed_mask;
73063 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73064
73065 if (!m)
73066 return NULL;
73067 - *m = size;
73068 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
73069 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
73070 + m[0].units = size;
73071 + m[1].units = align;
73072 ret = (void *)m + align;
73073
73074 trace_kmalloc_node(_RET_IP_, ret,
73075 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73076 gfp |= __GFP_COMP;
73077 ret = slob_new_pages(gfp, order, node);
73078 if (ret) {
73079 - struct page *page;
73080 - page = virt_to_page(ret);
73081 - page->private = size;
73082 + struct slob_page *sp;
73083 + sp = slob_page(ret);
73084 + sp->size = size;
73085 }
73086
73087 trace_kmalloc_node(_RET_IP_, ret,
73088 size, PAGE_SIZE << order, gfp, node);
73089 }
73090
73091 - kmemleak_alloc(ret, size, 1, gfp);
73092 + return ret;
73093 +}
73094 +
73095 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73096 +{
73097 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73098 + void *ret = __kmalloc_node_align(size, gfp, node, align);
73099 +
73100 + if (!ZERO_OR_NULL_PTR(ret))
73101 + kmemleak_alloc(ret, size, 1, gfp);
73102 return ret;
73103 }
73104 EXPORT_SYMBOL(__kmalloc_node);
73105 @@ -533,13 +547,92 @@ void kfree(const void *block)
73106 sp = slob_page(block);
73107 if (is_slob_page(sp)) {
73108 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73109 - unsigned int *m = (unsigned int *)(block - align);
73110 - slob_free(m, *m + align);
73111 - } else
73112 + slob_t *m = (slob_t *)(block - align);
73113 + slob_free(m, m[0].units + align);
73114 + } else {
73115 + clear_slob_page(sp);
73116 + free_slob_page(sp);
73117 + sp->size = 0;
73118 put_page(&sp->page);
73119 + }
73120 }
73121 EXPORT_SYMBOL(kfree);
73122
73123 +void check_object_size(const void *ptr, unsigned long n, bool to)
73124 +{
73125 +
73126 +#ifdef CONFIG_PAX_USERCOPY
73127 + struct slob_page *sp;
73128 + const slob_t *free;
73129 + const void *base;
73130 + unsigned long flags;
73131 + const char *type;
73132 +
73133 + if (!n)
73134 + return;
73135 +
73136 + type = "<null>";
73137 + if (ZERO_OR_NULL_PTR(ptr))
73138 + goto report;
73139 +
73140 + if (!virt_addr_valid(ptr))
73141 + return;
73142 +
73143 + type = "<process stack>";
73144 + sp = slob_page(ptr);
73145 + if (!PageSlab((struct page*)sp)) {
73146 + if (object_is_on_stack(ptr, n) == -1)
73147 + goto report;
73148 + return;
73149 + }
73150 +
73151 + type = "<slob>";
73152 + if (sp->size) {
73153 + base = page_address(&sp->page);
73154 + if (base <= ptr && n <= sp->size - (ptr - base))
73155 + return;
73156 + goto report;
73157 + }
73158 +
73159 + /* some tricky double walking to find the chunk */
73160 + spin_lock_irqsave(&slob_lock, flags);
73161 + base = (void *)((unsigned long)ptr & PAGE_MASK);
73162 + free = sp->free;
73163 +
73164 + while (!slob_last(free) && (void *)free <= ptr) {
73165 + base = free + slob_units(free);
73166 + free = slob_next(free);
73167 + }
73168 +
73169 + while (base < (void *)free) {
73170 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
73171 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
73172 + int offset;
73173 +
73174 + if (ptr < base + align)
73175 + break;
73176 +
73177 + offset = ptr - base - align;
73178 + if (offset >= m) {
73179 + base += size;
73180 + continue;
73181 + }
73182 +
73183 + if (n > m - offset)
73184 + break;
73185 +
73186 + spin_unlock_irqrestore(&slob_lock, flags);
73187 + return;
73188 + }
73189 +
73190 + spin_unlock_irqrestore(&slob_lock, flags);
73191 +report:
73192 + pax_report_usercopy(ptr, n, to, type);
73193 +#endif
73194 +
73195 +}
73196 +EXPORT_SYMBOL(check_object_size);
73197 +
73198 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
73199 size_t ksize(const void *block)
73200 {
73201 @@ -552,10 +645,10 @@ size_t ksize(const void *block)
73202 sp = slob_page(block);
73203 if (is_slob_page(sp)) {
73204 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73205 - unsigned int *m = (unsigned int *)(block - align);
73206 - return SLOB_UNITS(*m) * SLOB_UNIT;
73207 + slob_t *m = (slob_t *)(block - align);
73208 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
73209 } else
73210 - return sp->page.private;
73211 + return sp->size;
73212 }
73213 EXPORT_SYMBOL(ksize);
73214
73215 @@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73216 {
73217 struct kmem_cache *c;
73218
73219 +#ifdef CONFIG_PAX_USERCOPY
73220 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
73221 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
73222 +#else
73223 c = slob_alloc(sizeof(struct kmem_cache),
73224 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
73225 +#endif
73226
73227 if (c) {
73228 c->name = name;
73229 @@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
73230
73231 lockdep_trace_alloc(flags);
73232
73233 +#ifdef CONFIG_PAX_USERCOPY
73234 + b = __kmalloc_node_align(c->size, flags, node, c->align);
73235 +#else
73236 if (c->size < PAGE_SIZE) {
73237 b = slob_alloc(c->size, flags, c->align, node);
73238 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73239 SLOB_UNITS(c->size) * SLOB_UNIT,
73240 flags, node);
73241 } else {
73242 + struct slob_page *sp;
73243 +
73244 b = slob_new_pages(flags, get_order(c->size), node);
73245 + sp = slob_page(b);
73246 + sp->size = c->size;
73247 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73248 PAGE_SIZE << get_order(c->size),
73249 flags, node);
73250 }
73251 +#endif
73252
73253 if (c->ctor)
73254 c->ctor(b);
73255 @@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
73256
73257 static void __kmem_cache_free(void *b, int size)
73258 {
73259 - if (size < PAGE_SIZE)
73260 + struct slob_page *sp = slob_page(b);
73261 +
73262 + if (is_slob_page(sp))
73263 slob_free(b, size);
73264 - else
73265 + else {
73266 + clear_slob_page(sp);
73267 + free_slob_page(sp);
73268 + sp->size = 0;
73269 slob_free_pages(b, get_order(size));
73270 + }
73271 }
73272
73273 static void kmem_rcu_free(struct rcu_head *head)
73274 @@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
73275
73276 void kmem_cache_free(struct kmem_cache *c, void *b)
73277 {
73278 + int size = c->size;
73279 +
73280 +#ifdef CONFIG_PAX_USERCOPY
73281 + if (size + c->align < PAGE_SIZE) {
73282 + size += c->align;
73283 + b -= c->align;
73284 + }
73285 +#endif
73286 +
73287 kmemleak_free_recursive(b, c->flags);
73288 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
73289 struct slob_rcu *slob_rcu;
73290 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
73291 - slob_rcu->size = c->size;
73292 + slob_rcu = b + (size - sizeof(struct slob_rcu));
73293 + slob_rcu->size = size;
73294 call_rcu(&slob_rcu->head, kmem_rcu_free);
73295 } else {
73296 - __kmem_cache_free(b, c->size);
73297 + __kmem_cache_free(b, size);
73298 }
73299
73300 +#ifdef CONFIG_PAX_USERCOPY
73301 + trace_kfree(_RET_IP_, b);
73302 +#else
73303 trace_kmem_cache_free(_RET_IP_, b);
73304 +#endif
73305 +
73306 }
73307 EXPORT_SYMBOL(kmem_cache_free);
73308
73309 diff --git a/mm/slub.c b/mm/slub.c
73310 index 7c54fe8..0bb4ac5 100644
73311 --- a/mm/slub.c
73312 +++ b/mm/slub.c
73313 @@ -208,7 +208,7 @@ struct track {
73314
73315 enum track_item { TRACK_ALLOC, TRACK_FREE };
73316
73317 -#ifdef CONFIG_SYSFS
73318 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73319 static int sysfs_slab_add(struct kmem_cache *);
73320 static int sysfs_slab_alias(struct kmem_cache *, const char *);
73321 static void sysfs_slab_remove(struct kmem_cache *);
73322 @@ -556,7 +556,7 @@ static void print_track(const char *s, struct track *t)
73323 if (!t->addr)
73324 return;
73325
73326 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
73327 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
73328 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
73329 #ifdef CONFIG_STACKTRACE
73330 {
73331 @@ -2456,6 +2456,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
73332
73333 page = virt_to_head_page(x);
73334
73335 + BUG_ON(!PageSlab(page));
73336 +
73337 slab_free(s, page, x, _RET_IP_);
73338
73339 trace_kmem_cache_free(_RET_IP_, x);
73340 @@ -2489,7 +2491,7 @@ static int slub_min_objects;
73341 * Merge control. If this is set then no merging of slab caches will occur.
73342 * (Could be removed. This was introduced to pacify the merge skeptics.)
73343 */
73344 -static int slub_nomerge;
73345 +static int slub_nomerge = 1;
73346
73347 /*
73348 * Calculate the order of allocation given an slab object size.
73349 @@ -2912,7 +2914,7 @@ static int kmem_cache_open(struct kmem_cache *s,
73350 * list to avoid pounding the page allocator excessively.
73351 */
73352 set_min_partial(s, ilog2(s->size));
73353 - s->refcount = 1;
73354 + atomic_set(&s->refcount, 1);
73355 #ifdef CONFIG_NUMA
73356 s->remote_node_defrag_ratio = 1000;
73357 #endif
73358 @@ -3017,8 +3019,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
73359 void kmem_cache_destroy(struct kmem_cache *s)
73360 {
73361 down_write(&slub_lock);
73362 - s->refcount--;
73363 - if (!s->refcount) {
73364 + if (atomic_dec_and_test(&s->refcount)) {
73365 list_del(&s->list);
73366 if (kmem_cache_close(s)) {
73367 printk(KERN_ERR "SLUB %s: %s called for cache that "
73368 @@ -3228,6 +3229,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
73369 EXPORT_SYMBOL(__kmalloc_node);
73370 #endif
73371
73372 +void check_object_size(const void *ptr, unsigned long n, bool to)
73373 +{
73374 +
73375 +#ifdef CONFIG_PAX_USERCOPY
73376 + struct page *page;
73377 + struct kmem_cache *s = NULL;
73378 + unsigned long offset;
73379 + const char *type;
73380 +
73381 + if (!n)
73382 + return;
73383 +
73384 + type = "<null>";
73385 + if (ZERO_OR_NULL_PTR(ptr))
73386 + goto report;
73387 +
73388 + if (!virt_addr_valid(ptr))
73389 + return;
73390 +
73391 + page = virt_to_head_page(ptr);
73392 +
73393 + type = "<process stack>";
73394 + if (!PageSlab(page)) {
73395 + if (object_is_on_stack(ptr, n) == -1)
73396 + goto report;
73397 + return;
73398 + }
73399 +
73400 + s = page->slab;
73401 + type = s->name;
73402 + if (!(s->flags & SLAB_USERCOPY))
73403 + goto report;
73404 +
73405 + offset = (ptr - page_address(page)) % s->size;
73406 + if (offset <= s->objsize && n <= s->objsize - offset)
73407 + return;
73408 +
73409 +report:
73410 + pax_report_usercopy(ptr, n, to, type);
73411 +#endif
73412 +
73413 +}
73414 +EXPORT_SYMBOL(check_object_size);
73415 +
73416 size_t ksize(const void *object)
73417 {
73418 struct page *page;
73419 @@ -3502,7 +3547,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
73420 int node;
73421
73422 list_add(&s->list, &slab_caches);
73423 - s->refcount = -1;
73424 + atomic_set(&s->refcount, -1);
73425
73426 for_each_node_state(node, N_NORMAL_MEMORY) {
73427 struct kmem_cache_node *n = get_node(s, node);
73428 @@ -3619,17 +3664,17 @@ void __init kmem_cache_init(void)
73429
73430 /* Caches that are not of the two-to-the-power-of size */
73431 if (KMALLOC_MIN_SIZE <= 32) {
73432 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
73433 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
73434 caches++;
73435 }
73436
73437 if (KMALLOC_MIN_SIZE <= 64) {
73438 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
73439 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
73440 caches++;
73441 }
73442
73443 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
73444 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
73445 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
73446 caches++;
73447 }
73448
73449 @@ -3697,7 +3742,7 @@ static int slab_unmergeable(struct kmem_cache *s)
73450 /*
73451 * We may have set a slab to be unmergeable during bootstrap.
73452 */
73453 - if (s->refcount < 0)
73454 + if (atomic_read(&s->refcount) < 0)
73455 return 1;
73456
73457 return 0;
73458 @@ -3756,7 +3801,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73459 down_write(&slub_lock);
73460 s = find_mergeable(size, align, flags, name, ctor);
73461 if (s) {
73462 - s->refcount++;
73463 + atomic_inc(&s->refcount);
73464 /*
73465 * Adjust the object sizes so that we clear
73466 * the complete object on kzalloc.
73467 @@ -3765,7 +3810,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73468 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
73469
73470 if (sysfs_slab_alias(s, name)) {
73471 - s->refcount--;
73472 + atomic_dec(&s->refcount);
73473 goto err;
73474 }
73475 up_write(&slub_lock);
73476 @@ -3893,7 +3938,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
73477 }
73478 #endif
73479
73480 -#ifdef CONFIG_SYSFS
73481 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73482 static int count_inuse(struct page *page)
73483 {
73484 return page->inuse;
73485 @@ -4280,12 +4325,12 @@ static void resiliency_test(void)
73486 validate_slab_cache(kmalloc_caches[9]);
73487 }
73488 #else
73489 -#ifdef CONFIG_SYSFS
73490 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73491 static void resiliency_test(void) {};
73492 #endif
73493 #endif
73494
73495 -#ifdef CONFIG_SYSFS
73496 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73497 enum slab_stat_type {
73498 SL_ALL, /* All slabs */
73499 SL_PARTIAL, /* Only partially allocated slabs */
73500 @@ -4495,7 +4540,7 @@ SLAB_ATTR_RO(ctor);
73501
73502 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
73503 {
73504 - return sprintf(buf, "%d\n", s->refcount - 1);
73505 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
73506 }
73507 SLAB_ATTR_RO(aliases);
73508
73509 @@ -5025,6 +5070,7 @@ static char *create_unique_id(struct kmem_cache *s)
73510 return name;
73511 }
73512
73513 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73514 static int sysfs_slab_add(struct kmem_cache *s)
73515 {
73516 int err;
73517 @@ -5087,6 +5133,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
73518 kobject_del(&s->kobj);
73519 kobject_put(&s->kobj);
73520 }
73521 +#endif
73522
73523 /*
73524 * Need to buffer aliases during bootup until sysfs becomes
73525 @@ -5100,6 +5147,7 @@ struct saved_alias {
73526
73527 static struct saved_alias *alias_list;
73528
73529 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73530 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73531 {
73532 struct saved_alias *al;
73533 @@ -5122,6 +5170,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73534 alias_list = al;
73535 return 0;
73536 }
73537 +#endif
73538
73539 static int __init slab_sysfs_init(void)
73540 {
73541 @@ -5257,7 +5306,13 @@ static const struct file_operations proc_slabinfo_operations = {
73542
73543 static int __init slab_proc_init(void)
73544 {
73545 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
73546 + mode_t gr_mode = S_IRUGO;
73547 +
73548 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
73549 + gr_mode = S_IRUSR;
73550 +#endif
73551 +
73552 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
73553 return 0;
73554 }
73555 module_init(slab_proc_init);
73556 diff --git a/mm/swap.c b/mm/swap.c
73557 index 87627f1..8a9eb34 100644
73558 --- a/mm/swap.c
73559 +++ b/mm/swap.c
73560 @@ -31,6 +31,7 @@
73561 #include <linux/backing-dev.h>
73562 #include <linux/memcontrol.h>
73563 #include <linux/gfp.h>
73564 +#include <linux/hugetlb.h>
73565
73566 #include "internal.h"
73567
73568 @@ -71,6 +72,8 @@ static void __put_compound_page(struct page *page)
73569
73570 __page_cache_release(page);
73571 dtor = get_compound_page_dtor(page);
73572 + if (!PageHuge(page))
73573 + BUG_ON(dtor != free_compound_page);
73574 (*dtor)(page);
73575 }
73576
73577 diff --git a/mm/swapfile.c b/mm/swapfile.c
73578 index 17bc224..1677059 100644
73579 --- a/mm/swapfile.c
73580 +++ b/mm/swapfile.c
73581 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(swapon_mutex);
73582
73583 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
73584 /* Activity counter to indicate that a swapon or swapoff has occurred */
73585 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
73586 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
73587
73588 static inline unsigned char swap_count(unsigned char ent)
73589 {
73590 @@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
73591 }
73592 filp_close(swap_file, NULL);
73593 err = 0;
73594 - atomic_inc(&proc_poll_event);
73595 + atomic_inc_unchecked(&proc_poll_event);
73596 wake_up_interruptible(&proc_poll_wait);
73597
73598 out_dput:
73599 @@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
73600
73601 poll_wait(file, &proc_poll_wait, wait);
73602
73603 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
73604 - seq->poll_event = atomic_read(&proc_poll_event);
73605 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
73606 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73607 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
73608 }
73609
73610 @@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inode, struct file *file)
73611 return ret;
73612
73613 seq = file->private_data;
73614 - seq->poll_event = atomic_read(&proc_poll_event);
73615 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73616 return 0;
73617 }
73618
73619 @@ -2124,7 +2124,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
73620 (p->flags & SWP_DISCARDABLE) ? "D" : "");
73621
73622 mutex_unlock(&swapon_mutex);
73623 - atomic_inc(&proc_poll_event);
73624 + atomic_inc_unchecked(&proc_poll_event);
73625 wake_up_interruptible(&proc_poll_wait);
73626
73627 if (S_ISREG(inode->i_mode))
73628 diff --git a/mm/util.c b/mm/util.c
73629 index 88ea1bd..0f1dfdb 100644
73630 --- a/mm/util.c
73631 +++ b/mm/util.c
73632 @@ -114,6 +114,7 @@ EXPORT_SYMBOL(memdup_user);
73633 * allocated buffer. Use this if you don't want to free the buffer immediately
73634 * like, for example, with RCU.
73635 */
73636 +#undef __krealloc
73637 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
73638 {
73639 void *ret;
73640 @@ -147,6 +148,7 @@ EXPORT_SYMBOL(__krealloc);
73641 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
73642 * %NULL pointer, the object pointed to is freed.
73643 */
73644 +#undef krealloc
73645 void *krealloc(const void *p, size_t new_size, gfp_t flags)
73646 {
73647 void *ret;
73648 @@ -243,6 +245,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
73649 void arch_pick_mmap_layout(struct mm_struct *mm)
73650 {
73651 mm->mmap_base = TASK_UNMAPPED_BASE;
73652 +
73653 +#ifdef CONFIG_PAX_RANDMMAP
73654 + if (mm->pax_flags & MF_PAX_RANDMMAP)
73655 + mm->mmap_base += mm->delta_mmap;
73656 +#endif
73657 +
73658 mm->get_unmapped_area = arch_get_unmapped_area;
73659 mm->unmap_area = arch_unmap_area;
73660 }
73661 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
73662 index 56faf31..75c1a4c 100644
73663 --- a/mm/vmalloc.c
73664 +++ b/mm/vmalloc.c
73665 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
73666
73667 pte = pte_offset_kernel(pmd, addr);
73668 do {
73669 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
73670 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
73671 +
73672 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73673 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
73674 + BUG_ON(!pte_exec(*pte));
73675 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
73676 + continue;
73677 + }
73678 +#endif
73679 +
73680 + {
73681 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
73682 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
73683 + }
73684 } while (pte++, addr += PAGE_SIZE, addr != end);
73685 }
73686
73687 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
73688 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
73689 {
73690 pte_t *pte;
73691 + int ret = -ENOMEM;
73692
73693 /*
73694 * nr is a running index into the array which helps higher level
73695 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
73696 pte = pte_alloc_kernel(pmd, addr);
73697 if (!pte)
73698 return -ENOMEM;
73699 +
73700 + pax_open_kernel();
73701 do {
73702 struct page *page = pages[*nr];
73703
73704 - if (WARN_ON(!pte_none(*pte)))
73705 - return -EBUSY;
73706 - if (WARN_ON(!page))
73707 - return -ENOMEM;
73708 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73709 + if (pgprot_val(prot) & _PAGE_NX)
73710 +#endif
73711 +
73712 + if (WARN_ON(!pte_none(*pte))) {
73713 + ret = -EBUSY;
73714 + goto out;
73715 + }
73716 + if (WARN_ON(!page)) {
73717 + ret = -ENOMEM;
73718 + goto out;
73719 + }
73720 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
73721 (*nr)++;
73722 } while (pte++, addr += PAGE_SIZE, addr != end);
73723 - return 0;
73724 + ret = 0;
73725 +out:
73726 + pax_close_kernel();
73727 + return ret;
73728 }
73729
73730 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
73731 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
73732 * and fall back on vmalloc() if that fails. Others
73733 * just put it in the vmalloc space.
73734 */
73735 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
73736 +#ifdef CONFIG_MODULES
73737 +#ifdef MODULES_VADDR
73738 unsigned long addr = (unsigned long)x;
73739 if (addr >= MODULES_VADDR && addr < MODULES_END)
73740 return 1;
73741 #endif
73742 +
73743 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73744 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
73745 + return 1;
73746 +#endif
73747 +
73748 +#endif
73749 +
73750 return is_vmalloc_addr(x);
73751 }
73752
73753 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
73754
73755 if (!pgd_none(*pgd)) {
73756 pud_t *pud = pud_offset(pgd, addr);
73757 +#ifdef CONFIG_X86
73758 + if (!pud_large(*pud))
73759 +#endif
73760 if (!pud_none(*pud)) {
73761 pmd_t *pmd = pmd_offset(pud, addr);
73762 +#ifdef CONFIG_X86
73763 + if (!pmd_large(*pmd))
73764 +#endif
73765 if (!pmd_none(*pmd)) {
73766 pte_t *ptep, pte;
73767
73768 @@ -1294,6 +1334,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
73769 struct vm_struct *area;
73770
73771 BUG_ON(in_interrupt());
73772 +
73773 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73774 + if (flags & VM_KERNEXEC) {
73775 + if (start != VMALLOC_START || end != VMALLOC_END)
73776 + return NULL;
73777 + start = (unsigned long)MODULES_EXEC_VADDR;
73778 + end = (unsigned long)MODULES_EXEC_END;
73779 + }
73780 +#endif
73781 +
73782 if (flags & VM_IOREMAP) {
73783 int bit = fls(size);
73784
73785 @@ -1526,6 +1576,11 @@ void *vmap(struct page **pages, unsigned int count,
73786 if (count > totalram_pages)
73787 return NULL;
73788
73789 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73790 + if (!(pgprot_val(prot) & _PAGE_NX))
73791 + flags |= VM_KERNEXEC;
73792 +#endif
73793 +
73794 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
73795 __builtin_return_address(0));
73796 if (!area)
73797 @@ -1627,6 +1682,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
73798 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
73799 return NULL;
73800
73801 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
73802 + if (!(pgprot_val(prot) & _PAGE_NX))
73803 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
73804 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
73805 + else
73806 +#endif
73807 +
73808 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
73809 start, end, node, gfp_mask, caller);
73810
73811 @@ -1672,6 +1734,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
73812 gfp_mask, prot, node, caller);
73813 }
73814
73815 +#undef __vmalloc
73816 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
73817 {
73818 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
73819 @@ -1695,6 +1758,7 @@ static inline void *__vmalloc_node_flags(unsigned long size,
73820 * For tight control over page level allocator and protection flags
73821 * use __vmalloc() instead.
73822 */
73823 +#undef vmalloc
73824 void *vmalloc(unsigned long size)
73825 {
73826 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
73827 @@ -1711,6 +1775,7 @@ EXPORT_SYMBOL(vmalloc);
73828 * For tight control over page level allocator and protection flags
73829 * use __vmalloc() instead.
73830 */
73831 +#undef vzalloc
73832 void *vzalloc(unsigned long size)
73833 {
73834 return __vmalloc_node_flags(size, -1,
73835 @@ -1725,6 +1790,7 @@ EXPORT_SYMBOL(vzalloc);
73836 * The resulting memory area is zeroed so it can be mapped to userspace
73837 * without leaking data.
73838 */
73839 +#undef vmalloc_user
73840 void *vmalloc_user(unsigned long size)
73841 {
73842 struct vm_struct *area;
73843 @@ -1752,6 +1818,7 @@ EXPORT_SYMBOL(vmalloc_user);
73844 * For tight control over page level allocator and protection flags
73845 * use __vmalloc() instead.
73846 */
73847 +#undef vmalloc_node
73848 void *vmalloc_node(unsigned long size, int node)
73849 {
73850 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
73851 @@ -1771,6 +1838,7 @@ EXPORT_SYMBOL(vmalloc_node);
73852 * For tight control over page level allocator and protection flags
73853 * use __vmalloc_node() instead.
73854 */
73855 +#undef vzalloc_node
73856 void *vzalloc_node(unsigned long size, int node)
73857 {
73858 return __vmalloc_node_flags(size, node,
73859 @@ -1793,10 +1861,10 @@ EXPORT_SYMBOL(vzalloc_node);
73860 * For tight control over page level allocator and protection flags
73861 * use __vmalloc() instead.
73862 */
73863 -
73864 +#undef vmalloc_exec
73865 void *vmalloc_exec(unsigned long size)
73866 {
73867 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
73868 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
73869 -1, __builtin_return_address(0));
73870 }
73871
73872 @@ -1815,6 +1883,7 @@ void *vmalloc_exec(unsigned long size)
73873 * Allocate enough 32bit PA addressable pages to cover @size from the
73874 * page level allocator and map them into contiguous kernel virtual space.
73875 */
73876 +#undef vmalloc_32
73877 void *vmalloc_32(unsigned long size)
73878 {
73879 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
73880 @@ -1829,6 +1898,7 @@ EXPORT_SYMBOL(vmalloc_32);
73881 * The resulting memory area is 32bit addressable and zeroed so it can be
73882 * mapped to userspace without leaking data.
73883 */
73884 +#undef vmalloc_32_user
73885 void *vmalloc_32_user(unsigned long size)
73886 {
73887 struct vm_struct *area;
73888 @@ -2091,6 +2161,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
73889 unsigned long uaddr = vma->vm_start;
73890 unsigned long usize = vma->vm_end - vma->vm_start;
73891
73892 + BUG_ON(vma->vm_mirror);
73893 +
73894 if ((PAGE_SIZE-1) & (unsigned long)addr)
73895 return -EINVAL;
73896
73897 diff --git a/mm/vmstat.c b/mm/vmstat.c
73898 index d52b13d..381d1ac 100644
73899 --- a/mm/vmstat.c
73900 +++ b/mm/vmstat.c
73901 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
73902 *
73903 * vm_stat contains the global counters
73904 */
73905 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
73906 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
73907 EXPORT_SYMBOL(vm_stat);
73908
73909 #ifdef CONFIG_SMP
73910 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
73911 v = p->vm_stat_diff[i];
73912 p->vm_stat_diff[i] = 0;
73913 local_irq_restore(flags);
73914 - atomic_long_add(v, &zone->vm_stat[i]);
73915 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
73916 global_diff[i] += v;
73917 #ifdef CONFIG_NUMA
73918 /* 3 seconds idle till flush */
73919 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
73920
73921 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
73922 if (global_diff[i])
73923 - atomic_long_add(global_diff[i], &vm_stat[i]);
73924 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
73925 }
73926
73927 #endif
73928 @@ -1207,10 +1207,20 @@ static int __init setup_vmstat(void)
73929 start_cpu_timer(cpu);
73930 #endif
73931 #ifdef CONFIG_PROC_FS
73932 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
73933 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
73934 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
73935 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
73936 + {
73937 + mode_t gr_mode = S_IRUGO;
73938 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
73939 + gr_mode = S_IRUSR;
73940 +#endif
73941 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
73942 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
73943 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
73944 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
73945 +#else
73946 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
73947 +#endif
73948 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
73949 + }
73950 #endif
73951 return 0;
73952 }
73953 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
73954 index 8970ba1..e3361fe 100644
73955 --- a/net/8021q/vlan.c
73956 +++ b/net/8021q/vlan.c
73957 @@ -588,8 +588,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
73958 err = -EPERM;
73959 if (!capable(CAP_NET_ADMIN))
73960 break;
73961 - if ((args.u.name_type >= 0) &&
73962 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
73963 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
73964 struct vlan_net *vn;
73965
73966 vn = net_generic(net, vlan_net_id);
73967 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
73968 index fdfdb57..38d368c 100644
73969 --- a/net/9p/trans_fd.c
73970 +++ b/net/9p/trans_fd.c
73971 @@ -423,7 +423,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
73972 oldfs = get_fs();
73973 set_fs(get_ds());
73974 /* The cast to a user pointer is valid due to the set_fs() */
73975 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
73976 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
73977 set_fs(oldfs);
73978
73979 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
73980 diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
73981 index e317583..3c8aeaf 100644
73982 --- a/net/9p/trans_virtio.c
73983 +++ b/net/9p/trans_virtio.c
73984 @@ -327,7 +327,7 @@ req_retry_pinned:
73985 } else {
73986 char *pbuf;
73987 if (req->tc->pubuf)
73988 - pbuf = (__force char *) req->tc->pubuf;
73989 + pbuf = (char __force_kernel *) req->tc->pubuf;
73990 else
73991 pbuf = req->tc->pkbuf;
73992 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
73993 @@ -357,7 +357,7 @@ req_retry_pinned:
73994 } else {
73995 char *pbuf;
73996 if (req->tc->pubuf)
73997 - pbuf = (__force char *) req->tc->pubuf;
73998 + pbuf = (char __force_kernel *) req->tc->pubuf;
73999 else
74000 pbuf = req->tc->pkbuf;
74001
74002 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
74003 index f41f026..fe76ea8 100644
74004 --- a/net/atm/atm_misc.c
74005 +++ b/net/atm/atm_misc.c
74006 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
74007 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
74008 return 1;
74009 atm_return(vcc, truesize);
74010 - atomic_inc(&vcc->stats->rx_drop);
74011 + atomic_inc_unchecked(&vcc->stats->rx_drop);
74012 return 0;
74013 }
74014 EXPORT_SYMBOL(atm_charge);
74015 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
74016 }
74017 }
74018 atm_return(vcc, guess);
74019 - atomic_inc(&vcc->stats->rx_drop);
74020 + atomic_inc_unchecked(&vcc->stats->rx_drop);
74021 return NULL;
74022 }
74023 EXPORT_SYMBOL(atm_alloc_charge);
74024 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
74025
74026 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
74027 {
74028 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74029 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74030 __SONET_ITEMS
74031 #undef __HANDLE_ITEM
74032 }
74033 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
74034
74035 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
74036 {
74037 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
74038 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
74039 __SONET_ITEMS
74040 #undef __HANDLE_ITEM
74041 }
74042 diff --git a/net/atm/lec.h b/net/atm/lec.h
74043 index dfc0719..47c5322 100644
74044 --- a/net/atm/lec.h
74045 +++ b/net/atm/lec.h
74046 @@ -48,7 +48,7 @@ struct lane2_ops {
74047 const u8 *tlvs, u32 sizeoftlvs);
74048 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
74049 const u8 *tlvs, u32 sizeoftlvs);
74050 -};
74051 +} __no_const;
74052
74053 /*
74054 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
74055 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
74056 index 0919a88..a23d54e 100644
74057 --- a/net/atm/mpc.h
74058 +++ b/net/atm/mpc.h
74059 @@ -33,7 +33,7 @@ struct mpoa_client {
74060 struct mpc_parameters parameters; /* parameters for this client */
74061
74062 const struct net_device_ops *old_ops;
74063 - struct net_device_ops new_ops;
74064 + net_device_ops_no_const new_ops;
74065 };
74066
74067
74068 diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
74069 index d1b2d9a..7cc2219 100644
74070 --- a/net/atm/mpoa_caches.c
74071 +++ b/net/atm/mpoa_caches.c
74072 @@ -255,6 +255,8 @@ static void check_resolving_entries(struct mpoa_client *client)
74073 struct timeval now;
74074 struct k_message msg;
74075
74076 + pax_track_stack();
74077 +
74078 do_gettimeofday(&now);
74079
74080 read_lock_bh(&client->ingress_lock);
74081 diff --git a/net/atm/proc.c b/net/atm/proc.c
74082 index 0d020de..011c7bb 100644
74083 --- a/net/atm/proc.c
74084 +++ b/net/atm/proc.c
74085 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
74086 const struct k_atm_aal_stats *stats)
74087 {
74088 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
74089 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
74090 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
74091 - atomic_read(&stats->rx_drop));
74092 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
74093 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
74094 + atomic_read_unchecked(&stats->rx_drop));
74095 }
74096
74097 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
74098 diff --git a/net/atm/resources.c b/net/atm/resources.c
74099 index 23f45ce..c748f1a 100644
74100 --- a/net/atm/resources.c
74101 +++ b/net/atm/resources.c
74102 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
74103 static void copy_aal_stats(struct k_atm_aal_stats *from,
74104 struct atm_aal_stats *to)
74105 {
74106 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74107 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74108 __AAL_STAT_ITEMS
74109 #undef __HANDLE_ITEM
74110 }
74111 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
74112 static void subtract_aal_stats(struct k_atm_aal_stats *from,
74113 struct atm_aal_stats *to)
74114 {
74115 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
74116 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
74117 __AAL_STAT_ITEMS
74118 #undef __HANDLE_ITEM
74119 }
74120 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
74121 index db7aacf..991e539 100644
74122 --- a/net/batman-adv/hard-interface.c
74123 +++ b/net/batman-adv/hard-interface.c
74124 @@ -347,8 +347,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
74125 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
74126 dev_add_pack(&hard_iface->batman_adv_ptype);
74127
74128 - atomic_set(&hard_iface->seqno, 1);
74129 - atomic_set(&hard_iface->frag_seqno, 1);
74130 + atomic_set_unchecked(&hard_iface->seqno, 1);
74131 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
74132 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
74133 hard_iface->net_dev->name);
74134
74135 diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
74136 index 0f32c81..82d1895 100644
74137 --- a/net/batman-adv/routing.c
74138 +++ b/net/batman-adv/routing.c
74139 @@ -656,7 +656,7 @@ void receive_bat_packet(const struct ethhdr *ethhdr,
74140 return;
74141
74142 /* could be changed by schedule_own_packet() */
74143 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
74144 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
74145
74146 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
74147
74148 diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
74149 index 58d1447..2a66c8c 100644
74150 --- a/net/batman-adv/send.c
74151 +++ b/net/batman-adv/send.c
74152 @@ -326,7 +326,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
74153
74154 /* change sequence number to network order */
74155 batman_packet->seqno =
74156 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
74157 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
74158
74159 batman_packet->ttvn = atomic_read(&bat_priv->ttvn);
74160 batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc));
74161 @@ -343,7 +343,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
74162 else
74163 batman_packet->gw_flags = NO_FLAGS;
74164
74165 - atomic_inc(&hard_iface->seqno);
74166 + atomic_inc_unchecked(&hard_iface->seqno);
74167
74168 slide_own_bcast_window(hard_iface);
74169 send_time = own_send_time(bat_priv);
74170 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
74171 index 05dd351..2ecd19b 100644
74172 --- a/net/batman-adv/soft-interface.c
74173 +++ b/net/batman-adv/soft-interface.c
74174 @@ -632,7 +632,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
74175
74176 /* set broadcast sequence number */
74177 bcast_packet->seqno =
74178 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
74179 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
74180
74181 add_bcast_packet_to_list(bat_priv, skb, 1);
74182
74183 @@ -824,7 +824,7 @@ struct net_device *softif_create(const char *name)
74184 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
74185
74186 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
74187 - atomic_set(&bat_priv->bcast_seqno, 1);
74188 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
74189 atomic_set(&bat_priv->ttvn, 0);
74190 atomic_set(&bat_priv->tt_local_changes, 0);
74191 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
74192 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
74193 index 51a0db7..b8a62be 100644
74194 --- a/net/batman-adv/types.h
74195 +++ b/net/batman-adv/types.h
74196 @@ -38,8 +38,8 @@ struct hard_iface {
74197 int16_t if_num;
74198 char if_status;
74199 struct net_device *net_dev;
74200 - atomic_t seqno;
74201 - atomic_t frag_seqno;
74202 + atomic_unchecked_t seqno;
74203 + atomic_unchecked_t frag_seqno;
74204 unsigned char *packet_buff;
74205 int packet_len;
74206 struct kobject *hardif_obj;
74207 @@ -153,7 +153,7 @@ struct bat_priv {
74208 atomic_t orig_interval; /* uint */
74209 atomic_t hop_penalty; /* uint */
74210 atomic_t log_level; /* uint */
74211 - atomic_t bcast_seqno;
74212 + atomic_unchecked_t bcast_seqno;
74213 atomic_t bcast_queue_left;
74214 atomic_t batman_queue_left;
74215 atomic_t ttvn; /* tranlation table version number */
74216 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
74217 index 32b125f..f1447e0 100644
74218 --- a/net/batman-adv/unicast.c
74219 +++ b/net/batman-adv/unicast.c
74220 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
74221 frag1->flags = UNI_FRAG_HEAD | large_tail;
74222 frag2->flags = large_tail;
74223
74224 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
74225 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
74226 frag1->seqno = htons(seqno - 1);
74227 frag2->seqno = htons(seqno);
74228
74229 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
74230 index ea7f031..0615edc 100644
74231 --- a/net/bluetooth/hci_conn.c
74232 +++ b/net/bluetooth/hci_conn.c
74233 @@ -218,7 +218,7 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
74234 cp.handle = cpu_to_le16(conn->handle);
74235 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
74236 cp.ediv = ediv;
74237 - memcpy(cp.rand, rand, sizeof(rand));
74238 + memcpy(cp.rand, rand, sizeof(cp.rand));
74239
74240 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
74241 }
74242 @@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
74243 memset(&cp, 0, sizeof(cp));
74244
74245 cp.handle = cpu_to_le16(conn->handle);
74246 - memcpy(cp.ltk, ltk, sizeof(ltk));
74247 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
74248
74249 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
74250 }
74251 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
74252 index 995cbe0..c056d6c 100644
74253 --- a/net/bridge/br_multicast.c
74254 +++ b/net/bridge/br_multicast.c
74255 @@ -1485,7 +1485,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
74256 nexthdr = ip6h->nexthdr;
74257 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
74258
74259 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
74260 + if (nexthdr != IPPROTO_ICMPV6)
74261 return 0;
74262
74263 /* Okay, we found ICMPv6 header */
74264 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
74265 index 5864cc4..94cab18 100644
74266 --- a/net/bridge/netfilter/ebtables.c
74267 +++ b/net/bridge/netfilter/ebtables.c
74268 @@ -1513,7 +1513,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
74269 tmp.valid_hooks = t->table->valid_hooks;
74270 }
74271 mutex_unlock(&ebt_mutex);
74272 - if (copy_to_user(user, &tmp, *len) != 0){
74273 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
74274 BUGPRINT("c2u Didn't work\n");
74275 ret = -EFAULT;
74276 break;
74277 @@ -1781,6 +1781,8 @@ static int compat_copy_everything_to_user(struct ebt_table *t,
74278 int ret;
74279 void __user *pos;
74280
74281 + pax_track_stack();
74282 +
74283 memset(&tinfo, 0, sizeof(tinfo));
74284
74285 if (cmd == EBT_SO_GET_ENTRIES) {
74286 diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
74287 index a986280..13444a1 100644
74288 --- a/net/caif/caif_socket.c
74289 +++ b/net/caif/caif_socket.c
74290 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
74291 #ifdef CONFIG_DEBUG_FS
74292 struct debug_fs_counter {
74293 atomic_t caif_nr_socks;
74294 - atomic_t caif_sock_create;
74295 - atomic_t num_connect_req;
74296 - atomic_t num_connect_resp;
74297 - atomic_t num_connect_fail_resp;
74298 - atomic_t num_disconnect;
74299 - atomic_t num_remote_shutdown_ind;
74300 - atomic_t num_tx_flow_off_ind;
74301 - atomic_t num_tx_flow_on_ind;
74302 - atomic_t num_rx_flow_off;
74303 - atomic_t num_rx_flow_on;
74304 + atomic_unchecked_t caif_sock_create;
74305 + atomic_unchecked_t num_connect_req;
74306 + atomic_unchecked_t num_connect_resp;
74307 + atomic_unchecked_t num_connect_fail_resp;
74308 + atomic_unchecked_t num_disconnect;
74309 + atomic_unchecked_t num_remote_shutdown_ind;
74310 + atomic_unchecked_t num_tx_flow_off_ind;
74311 + atomic_unchecked_t num_tx_flow_on_ind;
74312 + atomic_unchecked_t num_rx_flow_off;
74313 + atomic_unchecked_t num_rx_flow_on;
74314 };
74315 static struct debug_fs_counter cnt;
74316 #define dbfs_atomic_inc(v) atomic_inc_return(v)
74317 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
74318 #define dbfs_atomic_dec(v) atomic_dec_return(v)
74319 #else
74320 #define dbfs_atomic_inc(v) 0
74321 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74322 atomic_read(&cf_sk->sk.sk_rmem_alloc),
74323 sk_rcvbuf_lowwater(cf_sk));
74324 set_rx_flow_off(cf_sk);
74325 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
74326 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
74327 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
74328 }
74329
74330 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74331 set_rx_flow_off(cf_sk);
74332 if (net_ratelimit())
74333 pr_debug("sending flow OFF due to rmem_schedule\n");
74334 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
74335 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
74336 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
74337 }
74338 skb->dev = NULL;
74339 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
74340 switch (flow) {
74341 case CAIF_CTRLCMD_FLOW_ON_IND:
74342 /* OK from modem to start sending again */
74343 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
74344 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
74345 set_tx_flow_on(cf_sk);
74346 cf_sk->sk.sk_state_change(&cf_sk->sk);
74347 break;
74348
74349 case CAIF_CTRLCMD_FLOW_OFF_IND:
74350 /* Modem asks us to shut up */
74351 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
74352 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
74353 set_tx_flow_off(cf_sk);
74354 cf_sk->sk.sk_state_change(&cf_sk->sk);
74355 break;
74356 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
74357 /* We're now connected */
74358 caif_client_register_refcnt(&cf_sk->layer,
74359 cfsk_hold, cfsk_put);
74360 - dbfs_atomic_inc(&cnt.num_connect_resp);
74361 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
74362 cf_sk->sk.sk_state = CAIF_CONNECTED;
74363 set_tx_flow_on(cf_sk);
74364 cf_sk->sk.sk_state_change(&cf_sk->sk);
74365 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
74366
74367 case CAIF_CTRLCMD_INIT_FAIL_RSP:
74368 /* Connect request failed */
74369 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
74370 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
74371 cf_sk->sk.sk_err = ECONNREFUSED;
74372 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
74373 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
74374 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
74375
74376 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
74377 /* Modem has closed this connection, or device is down. */
74378 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
74379 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
74380 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
74381 cf_sk->sk.sk_err = ECONNRESET;
74382 set_rx_flow_on(cf_sk);
74383 @@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
74384 return;
74385
74386 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
74387 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
74388 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
74389 set_rx_flow_on(cf_sk);
74390 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
74391 }
74392 @@ -854,7 +855,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
74393 /*ifindex = id of the interface.*/
74394 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
74395
74396 - dbfs_atomic_inc(&cnt.num_connect_req);
74397 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
74398 cf_sk->layer.receive = caif_sktrecv_cb;
74399
74400 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
74401 @@ -943,7 +944,7 @@ static int caif_release(struct socket *sock)
74402 spin_unlock_bh(&sk->sk_receive_queue.lock);
74403 sock->sk = NULL;
74404
74405 - dbfs_atomic_inc(&cnt.num_disconnect);
74406 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
74407
74408 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
74409 if (cf_sk->debugfs_socket_dir != NULL)
74410 @@ -1122,7 +1123,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
74411 cf_sk->conn_req.protocol = protocol;
74412 /* Increase the number of sockets created. */
74413 dbfs_atomic_inc(&cnt.caif_nr_socks);
74414 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
74415 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
74416 #ifdef CONFIG_DEBUG_FS
74417 if (!IS_ERR(debugfsdir)) {
74418
74419 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
74420 index e22671b..6598ea0 100644
74421 --- a/net/caif/cfctrl.c
74422 +++ b/net/caif/cfctrl.c
74423 @@ -9,6 +9,7 @@
74424 #include <linux/stddef.h>
74425 #include <linux/spinlock.h>
74426 #include <linux/slab.h>
74427 +#include <linux/sched.h>
74428 #include <net/caif/caif_layer.h>
74429 #include <net/caif/cfpkt.h>
74430 #include <net/caif/cfctrl.h>
74431 @@ -45,8 +46,8 @@ struct cflayer *cfctrl_create(void)
74432 dev_info.id = 0xff;
74433 memset(this, 0, sizeof(*this));
74434 cfsrvl_init(&this->serv, 0, &dev_info, false);
74435 - atomic_set(&this->req_seq_no, 1);
74436 - atomic_set(&this->rsp_seq_no, 1);
74437 + atomic_set_unchecked(&this->req_seq_no, 1);
74438 + atomic_set_unchecked(&this->rsp_seq_no, 1);
74439 this->serv.layer.receive = cfctrl_recv;
74440 sprintf(this->serv.layer.name, "ctrl");
74441 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
74442 @@ -132,8 +133,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
74443 struct cfctrl_request_info *req)
74444 {
74445 spin_lock_bh(&ctrl->info_list_lock);
74446 - atomic_inc(&ctrl->req_seq_no);
74447 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
74448 + atomic_inc_unchecked(&ctrl->req_seq_no);
74449 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
74450 list_add_tail(&req->list, &ctrl->list);
74451 spin_unlock_bh(&ctrl->info_list_lock);
74452 }
74453 @@ -151,7 +152,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
74454 if (p != first)
74455 pr_warn("Requests are not received in order\n");
74456
74457 - atomic_set(&ctrl->rsp_seq_no,
74458 + atomic_set_unchecked(&ctrl->rsp_seq_no,
74459 p->sequence_no);
74460 list_del(&p->list);
74461 goto out;
74462 @@ -364,6 +365,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
74463 struct cfctrl *cfctrl = container_obj(layer);
74464 struct cfctrl_request_info rsp, *req;
74465
74466 + pax_track_stack();
74467
74468 cfpkt_extr_head(pkt, &cmdrsp, 1);
74469 cmd = cmdrsp & CFCTRL_CMD_MASK;
74470 diff --git a/net/compat.c b/net/compat.c
74471 index c578d93..257fab7 100644
74472 --- a/net/compat.c
74473 +++ b/net/compat.c
74474 @@ -70,9 +70,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
74475 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
74476 __get_user(kmsg->msg_flags, &umsg->msg_flags))
74477 return -EFAULT;
74478 - kmsg->msg_name = compat_ptr(tmp1);
74479 - kmsg->msg_iov = compat_ptr(tmp2);
74480 - kmsg->msg_control = compat_ptr(tmp3);
74481 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
74482 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
74483 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
74484 return 0;
74485 }
74486
74487 @@ -84,7 +84,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74488
74489 if (kern_msg->msg_namelen) {
74490 if (mode == VERIFY_READ) {
74491 - int err = move_addr_to_kernel(kern_msg->msg_name,
74492 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
74493 kern_msg->msg_namelen,
74494 kern_address);
74495 if (err < 0)
74496 @@ -95,7 +95,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74497 kern_msg->msg_name = NULL;
74498
74499 tot_len = iov_from_user_compat_to_kern(kern_iov,
74500 - (struct compat_iovec __user *)kern_msg->msg_iov,
74501 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
74502 kern_msg->msg_iovlen);
74503 if (tot_len >= 0)
74504 kern_msg->msg_iov = kern_iov;
74505 @@ -115,20 +115,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74506
74507 #define CMSG_COMPAT_FIRSTHDR(msg) \
74508 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
74509 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
74510 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
74511 (struct compat_cmsghdr __user *)NULL)
74512
74513 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
74514 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
74515 (ucmlen) <= (unsigned long) \
74516 ((mhdr)->msg_controllen - \
74517 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
74518 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
74519
74520 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
74521 struct compat_cmsghdr __user *cmsg, int cmsg_len)
74522 {
74523 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
74524 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
74525 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
74526 msg->msg_controllen)
74527 return NULL;
74528 return (struct compat_cmsghdr __user *)ptr;
74529 @@ -220,7 +220,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
74530 {
74531 struct compat_timeval ctv;
74532 struct compat_timespec cts[3];
74533 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74534 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74535 struct compat_cmsghdr cmhdr;
74536 int cmlen;
74537
74538 @@ -272,7 +272,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
74539
74540 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
74541 {
74542 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74543 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74544 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
74545 int fdnum = scm->fp->count;
74546 struct file **fp = scm->fp->fp;
74547 @@ -369,7 +369,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
74548 return -EFAULT;
74549 old_fs = get_fs();
74550 set_fs(KERNEL_DS);
74551 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
74552 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
74553 set_fs(old_fs);
74554
74555 return err;
74556 @@ -430,7 +430,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
74557 len = sizeof(ktime);
74558 old_fs = get_fs();
74559 set_fs(KERNEL_DS);
74560 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
74561 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
74562 set_fs(old_fs);
74563
74564 if (!err) {
74565 @@ -565,7 +565,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74566 case MCAST_JOIN_GROUP:
74567 case MCAST_LEAVE_GROUP:
74568 {
74569 - struct compat_group_req __user *gr32 = (void *)optval;
74570 + struct compat_group_req __user *gr32 = (void __user *)optval;
74571 struct group_req __user *kgr =
74572 compat_alloc_user_space(sizeof(struct group_req));
74573 u32 interface;
74574 @@ -586,7 +586,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74575 case MCAST_BLOCK_SOURCE:
74576 case MCAST_UNBLOCK_SOURCE:
74577 {
74578 - struct compat_group_source_req __user *gsr32 = (void *)optval;
74579 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
74580 struct group_source_req __user *kgsr = compat_alloc_user_space(
74581 sizeof(struct group_source_req));
74582 u32 interface;
74583 @@ -607,7 +607,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74584 }
74585 case MCAST_MSFILTER:
74586 {
74587 - struct compat_group_filter __user *gf32 = (void *)optval;
74588 + struct compat_group_filter __user *gf32 = (void __user *)optval;
74589 struct group_filter __user *kgf;
74590 u32 interface, fmode, numsrc;
74591
74592 @@ -645,7 +645,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
74593 char __user *optval, int __user *optlen,
74594 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
74595 {
74596 - struct compat_group_filter __user *gf32 = (void *)optval;
74597 + struct compat_group_filter __user *gf32 = (void __user *)optval;
74598 struct group_filter __user *kgf;
74599 int __user *koptlen;
74600 u32 interface, fmode, numsrc;
74601 diff --git a/net/core/datagram.c b/net/core/datagram.c
74602 index 18ac112..fe95ed9 100644
74603 --- a/net/core/datagram.c
74604 +++ b/net/core/datagram.c
74605 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
74606 }
74607
74608 kfree_skb(skb);
74609 - atomic_inc(&sk->sk_drops);
74610 + atomic_inc_unchecked(&sk->sk_drops);
74611 sk_mem_reclaim_partial(sk);
74612
74613 return err;
74614 diff --git a/net/core/dev.c b/net/core/dev.c
74615 index ae5cf2d..2c950a1 100644
74616 --- a/net/core/dev.c
74617 +++ b/net/core/dev.c
74618 @@ -1135,10 +1135,14 @@ void dev_load(struct net *net, const char *name)
74619 if (no_module && capable(CAP_NET_ADMIN))
74620 no_module = request_module("netdev-%s", name);
74621 if (no_module && capable(CAP_SYS_MODULE)) {
74622 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
74623 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
74624 +#else
74625 if (!request_module("%s", name))
74626 pr_err("Loading kernel module for a network device "
74627 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
74628 "instead\n", name);
74629 +#endif
74630 }
74631 }
74632 EXPORT_SYMBOL(dev_load);
74633 @@ -1977,7 +1981,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
74634
74635 struct dev_gso_cb {
74636 void (*destructor)(struct sk_buff *skb);
74637 -};
74638 +} __no_const;
74639
74640 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
74641
74642 @@ -2930,7 +2934,7 @@ int netif_rx_ni(struct sk_buff *skb)
74643 }
74644 EXPORT_SYMBOL(netif_rx_ni);
74645
74646 -static void net_tx_action(struct softirq_action *h)
74647 +static void net_tx_action(void)
74648 {
74649 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74650
74651 @@ -3779,7 +3783,7 @@ void netif_napi_del(struct napi_struct *napi)
74652 }
74653 EXPORT_SYMBOL(netif_napi_del);
74654
74655 -static void net_rx_action(struct softirq_action *h)
74656 +static void net_rx_action(void)
74657 {
74658 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74659 unsigned long time_limit = jiffies + 2;
74660 diff --git a/net/core/flow.c b/net/core/flow.c
74661 index 555a456..de48421 100644
74662 --- a/net/core/flow.c
74663 +++ b/net/core/flow.c
74664 @@ -61,7 +61,7 @@ struct flow_cache {
74665 struct timer_list rnd_timer;
74666 };
74667
74668 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
74669 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
74670 EXPORT_SYMBOL(flow_cache_genid);
74671 static struct flow_cache flow_cache_global;
74672 static struct kmem_cache *flow_cachep __read_mostly;
74673 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
74674
74675 static int flow_entry_valid(struct flow_cache_entry *fle)
74676 {
74677 - if (atomic_read(&flow_cache_genid) != fle->genid)
74678 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
74679 return 0;
74680 if (fle->object && !fle->object->ops->check(fle->object))
74681 return 0;
74682 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
74683 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
74684 fcp->hash_count++;
74685 }
74686 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
74687 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
74688 flo = fle->object;
74689 if (!flo)
74690 goto ret_object;
74691 @@ -280,7 +280,7 @@ nocache:
74692 }
74693 flo = resolver(net, key, family, dir, flo, ctx);
74694 if (fle) {
74695 - fle->genid = atomic_read(&flow_cache_genid);
74696 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
74697 if (!IS_ERR(flo))
74698 fle->object = flo;
74699 else
74700 diff --git a/net/core/iovec.c b/net/core/iovec.c
74701 index c40f27e..7f49254 100644
74702 --- a/net/core/iovec.c
74703 +++ b/net/core/iovec.c
74704 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
74705 if (m->msg_namelen) {
74706 if (mode == VERIFY_READ) {
74707 void __user *namep;
74708 - namep = (void __user __force *) m->msg_name;
74709 + namep = (void __force_user *) m->msg_name;
74710 err = move_addr_to_kernel(namep, m->msg_namelen,
74711 address);
74712 if (err < 0)
74713 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
74714 }
74715
74716 size = m->msg_iovlen * sizeof(struct iovec);
74717 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
74718 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
74719 return -EFAULT;
74720
74721 m->msg_iov = iov;
74722 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
74723 index 99d9e95..209bae2 100644
74724 --- a/net/core/rtnetlink.c
74725 +++ b/net/core/rtnetlink.c
74726 @@ -57,7 +57,7 @@ struct rtnl_link {
74727 rtnl_doit_func doit;
74728 rtnl_dumpit_func dumpit;
74729 rtnl_calcit_func calcit;
74730 -};
74731 +} __no_const;
74732
74733 static DEFINE_MUTEX(rtnl_mutex);
74734 static u16 min_ifinfo_dump_size;
74735 diff --git a/net/core/scm.c b/net/core/scm.c
74736 index 811b53f..5d6c343 100644
74737 --- a/net/core/scm.c
74738 +++ b/net/core/scm.c
74739 @@ -218,7 +218,7 @@ EXPORT_SYMBOL(__scm_send);
74740 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
74741 {
74742 struct cmsghdr __user *cm
74743 - = (__force struct cmsghdr __user *)msg->msg_control;
74744 + = (struct cmsghdr __force_user *)msg->msg_control;
74745 struct cmsghdr cmhdr;
74746 int cmlen = CMSG_LEN(len);
74747 int err;
74748 @@ -241,7 +241,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
74749 err = -EFAULT;
74750 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
74751 goto out;
74752 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
74753 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
74754 goto out;
74755 cmlen = CMSG_SPACE(len);
74756 if (msg->msg_controllen < cmlen)
74757 @@ -257,7 +257,7 @@ EXPORT_SYMBOL(put_cmsg);
74758 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74759 {
74760 struct cmsghdr __user *cm
74761 - = (__force struct cmsghdr __user*)msg->msg_control;
74762 + = (struct cmsghdr __force_user *)msg->msg_control;
74763
74764 int fdmax = 0;
74765 int fdnum = scm->fp->count;
74766 @@ -277,7 +277,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
74767 if (fdnum < fdmax)
74768 fdmax = fdnum;
74769
74770 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
74771 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
74772 i++, cmfptr++)
74773 {
74774 int new_fd;
74775 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
74776 index 387703f..035abcf 100644
74777 --- a/net/core/skbuff.c
74778 +++ b/net/core/skbuff.c
74779 @@ -1650,6 +1650,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
74780 struct sock *sk = skb->sk;
74781 int ret = 0;
74782
74783 + pax_track_stack();
74784 +
74785 if (splice_grow_spd(pipe, &spd))
74786 return -ENOMEM;
74787
74788 diff --git a/net/core/sock.c b/net/core/sock.c
74789 index 11d67b3..df26d4b 100644
74790 --- a/net/core/sock.c
74791 +++ b/net/core/sock.c
74792 @@ -293,7 +293,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74793 */
74794 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
74795 (unsigned)sk->sk_rcvbuf) {
74796 - atomic_inc(&sk->sk_drops);
74797 + atomic_inc_unchecked(&sk->sk_drops);
74798 trace_sock_rcvqueue_full(sk, skb);
74799 return -ENOMEM;
74800 }
74801 @@ -303,7 +303,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74802 return err;
74803
74804 if (!sk_rmem_schedule(sk, skb->truesize)) {
74805 - atomic_inc(&sk->sk_drops);
74806 + atomic_inc_unchecked(&sk->sk_drops);
74807 return -ENOBUFS;
74808 }
74809
74810 @@ -323,7 +323,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74811 skb_dst_force(skb);
74812
74813 spin_lock_irqsave(&list->lock, flags);
74814 - skb->dropcount = atomic_read(&sk->sk_drops);
74815 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
74816 __skb_queue_tail(list, skb);
74817 spin_unlock_irqrestore(&list->lock, flags);
74818
74819 @@ -343,7 +343,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74820 skb->dev = NULL;
74821
74822 if (sk_rcvqueues_full(sk, skb)) {
74823 - atomic_inc(&sk->sk_drops);
74824 + atomic_inc_unchecked(&sk->sk_drops);
74825 goto discard_and_relse;
74826 }
74827 if (nested)
74828 @@ -361,7 +361,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
74829 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
74830 } else if (sk_add_backlog(sk, skb)) {
74831 bh_unlock_sock(sk);
74832 - atomic_inc(&sk->sk_drops);
74833 + atomic_inc_unchecked(&sk->sk_drops);
74834 goto discard_and_relse;
74835 }
74836
74837 @@ -924,7 +924,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74838 if (len > sizeof(peercred))
74839 len = sizeof(peercred);
74840 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
74841 - if (copy_to_user(optval, &peercred, len))
74842 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
74843 return -EFAULT;
74844 goto lenout;
74845 }
74846 @@ -937,7 +937,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74847 return -ENOTCONN;
74848 if (lv < len)
74849 return -EINVAL;
74850 - if (copy_to_user(optval, address, len))
74851 + if (len > sizeof(address) || copy_to_user(optval, address, len))
74852 return -EFAULT;
74853 goto lenout;
74854 }
74855 @@ -970,7 +970,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
74856
74857 if (len > lv)
74858 len = lv;
74859 - if (copy_to_user(optval, &v, len))
74860 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
74861 return -EFAULT;
74862 lenout:
74863 if (put_user(len, optlen))
74864 @@ -2029,7 +2029,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
74865 */
74866 smp_wmb();
74867 atomic_set(&sk->sk_refcnt, 1);
74868 - atomic_set(&sk->sk_drops, 0);
74869 + atomic_set_unchecked(&sk->sk_drops, 0);
74870 }
74871 EXPORT_SYMBOL(sock_init_data);
74872
74873 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
74874 index 02e75d1..9a57a7c 100644
74875 --- a/net/decnet/sysctl_net_decnet.c
74876 +++ b/net/decnet/sysctl_net_decnet.c
74877 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
74878
74879 if (len > *lenp) len = *lenp;
74880
74881 - if (copy_to_user(buffer, addr, len))
74882 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
74883 return -EFAULT;
74884
74885 *lenp = len;
74886 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
74887
74888 if (len > *lenp) len = *lenp;
74889
74890 - if (copy_to_user(buffer, devname, len))
74891 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
74892 return -EFAULT;
74893
74894 *lenp = len;
74895 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
74896 index 39a2d29..f39c0fe 100644
74897 --- a/net/econet/Kconfig
74898 +++ b/net/econet/Kconfig
74899 @@ -4,7 +4,7 @@
74900
74901 config ECONET
74902 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
74903 - depends on EXPERIMENTAL && INET
74904 + depends on EXPERIMENTAL && INET && BROKEN
74905 ---help---
74906 Econet is a fairly old and slow networking protocol mainly used by
74907 Acorn computers to access file and print servers. It uses native
74908 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
74909 index 92fc5f6..b790d91 100644
74910 --- a/net/ipv4/fib_frontend.c
74911 +++ b/net/ipv4/fib_frontend.c
74912 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
74913 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74914 fib_sync_up(dev);
74915 #endif
74916 - atomic_inc(&net->ipv4.dev_addr_genid);
74917 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74918 rt_cache_flush(dev_net(dev), -1);
74919 break;
74920 case NETDEV_DOWN:
74921 fib_del_ifaddr(ifa, NULL);
74922 - atomic_inc(&net->ipv4.dev_addr_genid);
74923 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74924 if (ifa->ifa_dev->ifa_list == NULL) {
74925 /* Last address was deleted from this interface.
74926 * Disable IP.
74927 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
74928 #ifdef CONFIG_IP_ROUTE_MULTIPATH
74929 fib_sync_up(dev);
74930 #endif
74931 - atomic_inc(&net->ipv4.dev_addr_genid);
74932 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
74933 rt_cache_flush(dev_net(dev), -1);
74934 break;
74935 case NETDEV_DOWN:
74936 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
74937 index 80106d8..232e898 100644
74938 --- a/net/ipv4/fib_semantics.c
74939 +++ b/net/ipv4/fib_semantics.c
74940 @@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
74941 nh->nh_saddr = inet_select_addr(nh->nh_dev,
74942 nh->nh_gw,
74943 nh->nh_parent->fib_scope);
74944 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
74945 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
74946
74947 return nh->nh_saddr;
74948 }
74949 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
74950 index 389a2e6..ac1c1de 100644
74951 --- a/net/ipv4/inet_diag.c
74952 +++ b/net/ipv4/inet_diag.c
74953 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct sock *sk,
74954 r->idiag_retrans = 0;
74955
74956 r->id.idiag_if = sk->sk_bound_dev_if;
74957 +
74958 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74959 + r->id.idiag_cookie[0] = 0;
74960 + r->id.idiag_cookie[1] = 0;
74961 +#else
74962 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
74963 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
74964 +#endif
74965
74966 r->id.idiag_sport = inet->inet_sport;
74967 r->id.idiag_dport = inet->inet_dport;
74968 @@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
74969 r->idiag_family = tw->tw_family;
74970 r->idiag_retrans = 0;
74971 r->id.idiag_if = tw->tw_bound_dev_if;
74972 +
74973 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74974 + r->id.idiag_cookie[0] = 0;
74975 + r->id.idiag_cookie[1] = 0;
74976 +#else
74977 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
74978 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
74979 +#endif
74980 +
74981 r->id.idiag_sport = tw->tw_sport;
74982 r->id.idiag_dport = tw->tw_dport;
74983 r->id.idiag_src[0] = tw->tw_rcv_saddr;
74984 @@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
74985 if (sk == NULL)
74986 goto unlock;
74987
74988 +#ifndef CONFIG_GRKERNSEC_HIDESYM
74989 err = -ESTALE;
74990 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
74991 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
74992 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
74993 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
74994 goto out;
74995 +#endif
74996
74997 err = -ENOMEM;
74998 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
74999 @@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
75000 r->idiag_retrans = req->retrans;
75001
75002 r->id.idiag_if = sk->sk_bound_dev_if;
75003 +
75004 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75005 + r->id.idiag_cookie[0] = 0;
75006 + r->id.idiag_cookie[1] = 0;
75007 +#else
75008 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
75009 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
75010 +#endif
75011
75012 tmo = req->expires - jiffies;
75013 if (tmo < 0)
75014 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
75015 index 984ec65..97ac518 100644
75016 --- a/net/ipv4/inet_hashtables.c
75017 +++ b/net/ipv4/inet_hashtables.c
75018 @@ -18,12 +18,15 @@
75019 #include <linux/sched.h>
75020 #include <linux/slab.h>
75021 #include <linux/wait.h>
75022 +#include <linux/security.h>
75023
75024 #include <net/inet_connection_sock.h>
75025 #include <net/inet_hashtables.h>
75026 #include <net/secure_seq.h>
75027 #include <net/ip.h>
75028
75029 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
75030 +
75031 /*
75032 * Allocate and initialize a new local port bind bucket.
75033 * The bindhash mutex for snum's hash chain must be held here.
75034 @@ -530,6 +533,8 @@ ok:
75035 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
75036 spin_unlock(&head->lock);
75037
75038 + gr_update_task_in_ip_table(current, inet_sk(sk));
75039 +
75040 if (tw) {
75041 inet_twsk_deschedule(tw, death_row);
75042 while (twrefcnt) {
75043 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
75044 index 86f13c67..0bce60f 100644
75045 --- a/net/ipv4/inetpeer.c
75046 +++ b/net/ipv4/inetpeer.c
75047 @@ -400,6 +400,8 @@ struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create)
75048 unsigned int sequence;
75049 int invalidated, gccnt = 0;
75050
75051 + pax_track_stack();
75052 +
75053 /* Attempt a lockless lookup first.
75054 * Because of a concurrent writer, we might not find an existing entry.
75055 */
75056 @@ -436,8 +438,8 @@ relookup:
75057 if (p) {
75058 p->daddr = *daddr;
75059 atomic_set(&p->refcnt, 1);
75060 - atomic_set(&p->rid, 0);
75061 - atomic_set(&p->ip_id_count,
75062 + atomic_set_unchecked(&p->rid, 0);
75063 + atomic_set_unchecked(&p->ip_id_count,
75064 (daddr->family == AF_INET) ?
75065 secure_ip_id(daddr->addr.a4) :
75066 secure_ipv6_id(daddr->addr.a6));
75067 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
75068 index 0e0ab98..2ed7dd5 100644
75069 --- a/net/ipv4/ip_fragment.c
75070 +++ b/net/ipv4/ip_fragment.c
75071 @@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
75072 return 0;
75073
75074 start = qp->rid;
75075 - end = atomic_inc_return(&peer->rid);
75076 + end = atomic_inc_return_unchecked(&peer->rid);
75077 qp->rid = end;
75078
75079 rc = qp->q.fragments && (end - start) > max;
75080 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
75081 index 8905e92..0b179fb 100644
75082 --- a/net/ipv4/ip_sockglue.c
75083 +++ b/net/ipv4/ip_sockglue.c
75084 @@ -1073,6 +1073,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
75085 int val;
75086 int len;
75087
75088 + pax_track_stack();
75089 +
75090 if (level != SOL_IP)
75091 return -EOPNOTSUPP;
75092
75093 @@ -1110,7 +1112,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
75094 len = min_t(unsigned int, len, opt->optlen);
75095 if (put_user(len, optlen))
75096 return -EFAULT;
75097 - if (copy_to_user(optval, opt->__data, len))
75098 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
75099 + copy_to_user(optval, opt->__data, len))
75100 return -EFAULT;
75101 return 0;
75102 }
75103 @@ -1238,7 +1241,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
75104 if (sk->sk_type != SOCK_STREAM)
75105 return -ENOPROTOOPT;
75106
75107 - msg.msg_control = optval;
75108 + msg.msg_control = (void __force_kernel *)optval;
75109 msg.msg_controllen = len;
75110 msg.msg_flags = flags;
75111
75112 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
75113 index 472a8c4..6507cd4 100644
75114 --- a/net/ipv4/ipconfig.c
75115 +++ b/net/ipv4/ipconfig.c
75116 @@ -313,7 +313,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
75117
75118 mm_segment_t oldfs = get_fs();
75119 set_fs(get_ds());
75120 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75121 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75122 set_fs(oldfs);
75123 return res;
75124 }
75125 @@ -324,7 +324,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
75126
75127 mm_segment_t oldfs = get_fs();
75128 set_fs(get_ds());
75129 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75130 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75131 set_fs(oldfs);
75132 return res;
75133 }
75134 @@ -335,7 +335,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
75135
75136 mm_segment_t oldfs = get_fs();
75137 set_fs(get_ds());
75138 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
75139 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
75140 set_fs(oldfs);
75141 return res;
75142 }
75143 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
75144 index 076b7c8..9c8d038 100644
75145 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
75146 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
75147 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
75148
75149 *len = 0;
75150
75151 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
75152 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
75153 if (*octets == NULL) {
75154 if (net_ratelimit())
75155 pr_notice("OOM in bsalg (%d)\n", __LINE__);
75156 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
75157 index 39b403f..8e6a0a8 100644
75158 --- a/net/ipv4/ping.c
75159 +++ b/net/ipv4/ping.c
75160 @@ -837,7 +837,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
75161 sk_rmem_alloc_get(sp),
75162 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75163 atomic_read(&sp->sk_refcnt), sp,
75164 - atomic_read(&sp->sk_drops), len);
75165 + atomic_read_unchecked(&sp->sk_drops), len);
75166 }
75167
75168 static int ping_seq_show(struct seq_file *seq, void *v)
75169 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
75170 index 61714bd..c9cee6d 100644
75171 --- a/net/ipv4/raw.c
75172 +++ b/net/ipv4/raw.c
75173 @@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
75174 int raw_rcv(struct sock *sk, struct sk_buff *skb)
75175 {
75176 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
75177 - atomic_inc(&sk->sk_drops);
75178 + atomic_inc_unchecked(&sk->sk_drops);
75179 kfree_skb(skb);
75180 return NET_RX_DROP;
75181 }
75182 @@ -737,16 +737,20 @@ static int raw_init(struct sock *sk)
75183
75184 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
75185 {
75186 + struct icmp_filter filter;
75187 +
75188 if (optlen > sizeof(struct icmp_filter))
75189 optlen = sizeof(struct icmp_filter);
75190 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
75191 + if (copy_from_user(&filter, optval, optlen))
75192 return -EFAULT;
75193 + raw_sk(sk)->filter = filter;
75194 return 0;
75195 }
75196
75197 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
75198 {
75199 int len, ret = -EFAULT;
75200 + struct icmp_filter filter;
75201
75202 if (get_user(len, optlen))
75203 goto out;
75204 @@ -756,8 +760,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
75205 if (len > sizeof(struct icmp_filter))
75206 len = sizeof(struct icmp_filter);
75207 ret = -EFAULT;
75208 - if (put_user(len, optlen) ||
75209 - copy_to_user(optval, &raw_sk(sk)->filter, len))
75210 + filter = raw_sk(sk)->filter;
75211 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
75212 goto out;
75213 ret = 0;
75214 out: return ret;
75215 @@ -985,7 +989,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
75216 sk_wmem_alloc_get(sp),
75217 sk_rmem_alloc_get(sp),
75218 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75219 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
75220 + atomic_read(&sp->sk_refcnt),
75221 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75222 + NULL,
75223 +#else
75224 + sp,
75225 +#endif
75226 + atomic_read_unchecked(&sp->sk_drops));
75227 }
75228
75229 static int raw_seq_show(struct seq_file *seq, void *v)
75230 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
75231 index 075212e..8713a00 100644
75232 --- a/net/ipv4/route.c
75233 +++ b/net/ipv4/route.c
75234 @@ -308,7 +308,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
75235
75236 static inline int rt_genid(struct net *net)
75237 {
75238 - return atomic_read(&net->ipv4.rt_genid);
75239 + return atomic_read_unchecked(&net->ipv4.rt_genid);
75240 }
75241
75242 #ifdef CONFIG_PROC_FS
75243 @@ -837,7 +837,7 @@ static void rt_cache_invalidate(struct net *net)
75244 unsigned char shuffle;
75245
75246 get_random_bytes(&shuffle, sizeof(shuffle));
75247 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
75248 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
75249 }
75250
75251 /*
75252 @@ -2872,7 +2872,7 @@ static int rt_fill_info(struct net *net,
75253 error = rt->dst.error;
75254 if (peer) {
75255 inet_peer_refcheck(rt->peer);
75256 - id = atomic_read(&peer->ip_id_count) & 0xffff;
75257 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
75258 if (peer->tcp_ts_stamp) {
75259 ts = peer->tcp_ts;
75260 tsage = get_seconds() - peer->tcp_ts_stamp;
75261 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
75262 index 46febca..98b73a4 100644
75263 --- a/net/ipv4/tcp.c
75264 +++ b/net/ipv4/tcp.c
75265 @@ -2122,6 +2122,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
75266 int val;
75267 int err = 0;
75268
75269 + pax_track_stack();
75270 +
75271 /* These are data/string values, all the others are ints */
75272 switch (optname) {
75273 case TCP_CONGESTION: {
75274 @@ -2501,6 +2503,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
75275 struct tcp_sock *tp = tcp_sk(sk);
75276 int val, len;
75277
75278 + pax_track_stack();
75279 +
75280 if (get_user(len, optlen))
75281 return -EFAULT;
75282
75283 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
75284 index 7963e03..c44f5d0 100644
75285 --- a/net/ipv4/tcp_ipv4.c
75286 +++ b/net/ipv4/tcp_ipv4.c
75287 @@ -87,6 +87,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
75288 int sysctl_tcp_low_latency __read_mostly;
75289 EXPORT_SYMBOL(sysctl_tcp_low_latency);
75290
75291 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75292 +extern int grsec_enable_blackhole;
75293 +#endif
75294
75295 #ifdef CONFIG_TCP_MD5SIG
75296 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
75297 @@ -1622,6 +1625,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
75298 return 0;
75299
75300 reset:
75301 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75302 + if (!grsec_enable_blackhole)
75303 +#endif
75304 tcp_v4_send_reset(rsk, skb);
75305 discard:
75306 kfree_skb(skb);
75307 @@ -1684,12 +1690,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
75308 TCP_SKB_CB(skb)->sacked = 0;
75309
75310 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75311 - if (!sk)
75312 + if (!sk) {
75313 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75314 + ret = 1;
75315 +#endif
75316 goto no_tcp_socket;
75317 -
75318 + }
75319 process:
75320 - if (sk->sk_state == TCP_TIME_WAIT)
75321 + if (sk->sk_state == TCP_TIME_WAIT) {
75322 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75323 + ret = 2;
75324 +#endif
75325 goto do_time_wait;
75326 + }
75327
75328 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
75329 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
75330 @@ -1739,6 +1752,10 @@ no_tcp_socket:
75331 bad_packet:
75332 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75333 } else {
75334 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75335 + if (!grsec_enable_blackhole || (ret == 1 &&
75336 + (skb->dev->flags & IFF_LOOPBACK)))
75337 +#endif
75338 tcp_v4_send_reset(NULL, skb);
75339 }
75340
75341 @@ -2403,7 +2420,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
75342 0, /* non standard timer */
75343 0, /* open_requests have no inode */
75344 atomic_read(&sk->sk_refcnt),
75345 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75346 + NULL,
75347 +#else
75348 req,
75349 +#endif
75350 len);
75351 }
75352
75353 @@ -2453,7 +2474,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
75354 sock_i_uid(sk),
75355 icsk->icsk_probes_out,
75356 sock_i_ino(sk),
75357 - atomic_read(&sk->sk_refcnt), sk,
75358 + atomic_read(&sk->sk_refcnt),
75359 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75360 + NULL,
75361 +#else
75362 + sk,
75363 +#endif
75364 jiffies_to_clock_t(icsk->icsk_rto),
75365 jiffies_to_clock_t(icsk->icsk_ack.ato),
75366 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
75367 @@ -2481,7 +2507,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
75368 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
75369 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
75370 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75371 - atomic_read(&tw->tw_refcnt), tw, len);
75372 + atomic_read(&tw->tw_refcnt),
75373 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75374 + NULL,
75375 +#else
75376 + tw,
75377 +#endif
75378 + len);
75379 }
75380
75381 #define TMPSZ 150
75382 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
75383 index 0ce3d06..e182e59 100644
75384 --- a/net/ipv4/tcp_minisocks.c
75385 +++ b/net/ipv4/tcp_minisocks.c
75386 @@ -27,6 +27,10 @@
75387 #include <net/inet_common.h>
75388 #include <net/xfrm.h>
75389
75390 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75391 +extern int grsec_enable_blackhole;
75392 +#endif
75393 +
75394 int sysctl_tcp_syncookies __read_mostly = 1;
75395 EXPORT_SYMBOL(sysctl_tcp_syncookies);
75396
75397 @@ -750,6 +754,10 @@ listen_overflow:
75398
75399 embryonic_reset:
75400 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
75401 +
75402 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75403 + if (!grsec_enable_blackhole)
75404 +#endif
75405 if (!(flg & TCP_FLAG_RST))
75406 req->rsk_ops->send_reset(sk, skb);
75407
75408 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
75409 index 882e0b0..2eba47f 100644
75410 --- a/net/ipv4/tcp_output.c
75411 +++ b/net/ipv4/tcp_output.c
75412 @@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
75413 int mss;
75414 int s_data_desired = 0;
75415
75416 + pax_track_stack();
75417 +
75418 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
75419 s_data_desired = cvp->s_data_desired;
75420 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
75421 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
75422 index 85ee7eb..53277ab 100644
75423 --- a/net/ipv4/tcp_probe.c
75424 +++ b/net/ipv4/tcp_probe.c
75425 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
75426 if (cnt + width >= len)
75427 break;
75428
75429 - if (copy_to_user(buf + cnt, tbuf, width))
75430 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
75431 return -EFAULT;
75432 cnt += width;
75433 }
75434 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
75435 index ecd44b0..b32fba6 100644
75436 --- a/net/ipv4/tcp_timer.c
75437 +++ b/net/ipv4/tcp_timer.c
75438 @@ -22,6 +22,10 @@
75439 #include <linux/gfp.h>
75440 #include <net/tcp.h>
75441
75442 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75443 +extern int grsec_lastack_retries;
75444 +#endif
75445 +
75446 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
75447 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
75448 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
75449 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock *sk)
75450 }
75451 }
75452
75453 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75454 + if ((sk->sk_state == TCP_LAST_ACK) &&
75455 + (grsec_lastack_retries > 0) &&
75456 + (grsec_lastack_retries < retry_until))
75457 + retry_until = grsec_lastack_retries;
75458 +#endif
75459 +
75460 if (retransmits_timed_out(sk, retry_until,
75461 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
75462 /* Has it gone just too far? */
75463 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
75464 index 1b5a193..bd354b0 100644
75465 --- a/net/ipv4/udp.c
75466 +++ b/net/ipv4/udp.c
75467 @@ -86,6 +86,7 @@
75468 #include <linux/types.h>
75469 #include <linux/fcntl.h>
75470 #include <linux/module.h>
75471 +#include <linux/security.h>
75472 #include <linux/socket.h>
75473 #include <linux/sockios.h>
75474 #include <linux/igmp.h>
75475 @@ -108,6 +109,10 @@
75476 #include <trace/events/udp.h>
75477 #include "udp_impl.h"
75478
75479 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75480 +extern int grsec_enable_blackhole;
75481 +#endif
75482 +
75483 struct udp_table udp_table __read_mostly;
75484 EXPORT_SYMBOL(udp_table);
75485
75486 @@ -565,6 +570,9 @@ found:
75487 return s;
75488 }
75489
75490 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
75491 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
75492 +
75493 /*
75494 * This routine is called by the ICMP module when it gets some
75495 * sort of error condition. If err < 0 then the socket should
75496 @@ -856,9 +864,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
75497 dport = usin->sin_port;
75498 if (dport == 0)
75499 return -EINVAL;
75500 +
75501 + err = gr_search_udp_sendmsg(sk, usin);
75502 + if (err)
75503 + return err;
75504 } else {
75505 if (sk->sk_state != TCP_ESTABLISHED)
75506 return -EDESTADDRREQ;
75507 +
75508 + err = gr_search_udp_sendmsg(sk, NULL);
75509 + if (err)
75510 + return err;
75511 +
75512 daddr = inet->inet_daddr;
75513 dport = inet->inet_dport;
75514 /* Open fast path for connected socket.
75515 @@ -1099,7 +1116,7 @@ static unsigned int first_packet_length(struct sock *sk)
75516 udp_lib_checksum_complete(skb)) {
75517 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75518 IS_UDPLITE(sk));
75519 - atomic_inc(&sk->sk_drops);
75520 + atomic_inc_unchecked(&sk->sk_drops);
75521 __skb_unlink(skb, rcvq);
75522 __skb_queue_tail(&list_kill, skb);
75523 }
75524 @@ -1185,6 +1202,10 @@ try_again:
75525 if (!skb)
75526 goto out;
75527
75528 + err = gr_search_udp_recvmsg(sk, skb);
75529 + if (err)
75530 + goto out_free;
75531 +
75532 ulen = skb->len - sizeof(struct udphdr);
75533 if (len > ulen)
75534 len = ulen;
75535 @@ -1485,7 +1506,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75536
75537 drop:
75538 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75539 - atomic_inc(&sk->sk_drops);
75540 + atomic_inc_unchecked(&sk->sk_drops);
75541 kfree_skb(skb);
75542 return -1;
75543 }
75544 @@ -1504,7 +1525,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
75545 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
75546
75547 if (!skb1) {
75548 - atomic_inc(&sk->sk_drops);
75549 + atomic_inc_unchecked(&sk->sk_drops);
75550 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
75551 IS_UDPLITE(sk));
75552 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75553 @@ -1673,6 +1694,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75554 goto csum_error;
75555
75556 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
75557 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75558 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75559 +#endif
75560 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
75561
75562 /*
75563 @@ -2100,8 +2124,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
75564 sk_wmem_alloc_get(sp),
75565 sk_rmem_alloc_get(sp),
75566 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75567 - atomic_read(&sp->sk_refcnt), sp,
75568 - atomic_read(&sp->sk_drops), len);
75569 + atomic_read(&sp->sk_refcnt),
75570 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75571 + NULL,
75572 +#else
75573 + sp,
75574 +#endif
75575 + atomic_read_unchecked(&sp->sk_drops), len);
75576 }
75577
75578 int udp4_seq_show(struct seq_file *seq, void *v)
75579 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
75580 index 12368c5..fbf899f 100644
75581 --- a/net/ipv6/addrconf.c
75582 +++ b/net/ipv6/addrconf.c
75583 @@ -2083,7 +2083,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
75584 p.iph.ihl = 5;
75585 p.iph.protocol = IPPROTO_IPV6;
75586 p.iph.ttl = 64;
75587 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
75588 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
75589
75590 if (ops->ndo_do_ioctl) {
75591 mm_segment_t oldfs = get_fs();
75592 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
75593 index 8a58e8c..8b5e631 100644
75594 --- a/net/ipv6/inet6_connection_sock.c
75595 +++ b/net/ipv6/inet6_connection_sock.c
75596 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
75597 #ifdef CONFIG_XFRM
75598 {
75599 struct rt6_info *rt = (struct rt6_info *)dst;
75600 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
75601 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
75602 }
75603 #endif
75604 }
75605 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
75606 #ifdef CONFIG_XFRM
75607 if (dst) {
75608 struct rt6_info *rt = (struct rt6_info *)dst;
75609 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
75610 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
75611 __sk_dst_reset(sk);
75612 dst = NULL;
75613 }
75614 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
75615 index 2fbda5f..26ed683 100644
75616 --- a/net/ipv6/ipv6_sockglue.c
75617 +++ b/net/ipv6/ipv6_sockglue.c
75618 @@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
75619 int val, valbool;
75620 int retv = -ENOPROTOOPT;
75621
75622 + pax_track_stack();
75623 +
75624 if (optval == NULL)
75625 val=0;
75626 else {
75627 @@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
75628 int len;
75629 int val;
75630
75631 + pax_track_stack();
75632 +
75633 if (ip6_mroute_opt(optname))
75634 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
75635
75636 @@ -960,7 +964,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
75637 if (sk->sk_type != SOCK_STREAM)
75638 return -ENOPROTOOPT;
75639
75640 - msg.msg_control = optval;
75641 + msg.msg_control = (void __force_kernel *)optval;
75642 msg.msg_controllen = len;
75643 msg.msg_flags = flags;
75644
75645 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
75646 index 343852e..c92bd15 100644
75647 --- a/net/ipv6/raw.c
75648 +++ b/net/ipv6/raw.c
75649 @@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
75650 {
75651 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
75652 skb_checksum_complete(skb)) {
75653 - atomic_inc(&sk->sk_drops);
75654 + atomic_inc_unchecked(&sk->sk_drops);
75655 kfree_skb(skb);
75656 return NET_RX_DROP;
75657 }
75658 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75659 struct raw6_sock *rp = raw6_sk(sk);
75660
75661 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
75662 - atomic_inc(&sk->sk_drops);
75663 + atomic_inc_unchecked(&sk->sk_drops);
75664 kfree_skb(skb);
75665 return NET_RX_DROP;
75666 }
75667 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75668
75669 if (inet->hdrincl) {
75670 if (skb_checksum_complete(skb)) {
75671 - atomic_inc(&sk->sk_drops);
75672 + atomic_inc_unchecked(&sk->sk_drops);
75673 kfree_skb(skb);
75674 return NET_RX_DROP;
75675 }
75676 @@ -601,7 +601,7 @@ out:
75677 return err;
75678 }
75679
75680 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
75681 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
75682 struct flowi6 *fl6, struct dst_entry **dstp,
75683 unsigned int flags)
75684 {
75685 @@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
75686 u16 proto;
75687 int err;
75688
75689 + pax_track_stack();
75690 +
75691 /* Rough check on arithmetic overflow,
75692 better check is made in ip6_append_data().
75693 */
75694 @@ -909,12 +911,15 @@ do_confirm:
75695 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
75696 char __user *optval, int optlen)
75697 {
75698 + struct icmp6_filter filter;
75699 +
75700 switch (optname) {
75701 case ICMPV6_FILTER:
75702 if (optlen > sizeof(struct icmp6_filter))
75703 optlen = sizeof(struct icmp6_filter);
75704 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
75705 + if (copy_from_user(&filter, optval, optlen))
75706 return -EFAULT;
75707 + raw6_sk(sk)->filter = filter;
75708 return 0;
75709 default:
75710 return -ENOPROTOOPT;
75711 @@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75712 char __user *optval, int __user *optlen)
75713 {
75714 int len;
75715 + struct icmp6_filter filter;
75716
75717 switch (optname) {
75718 case ICMPV6_FILTER:
75719 @@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75720 len = sizeof(struct icmp6_filter);
75721 if (put_user(len, optlen))
75722 return -EFAULT;
75723 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
75724 + filter = raw6_sk(sk)->filter;
75725 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
75726 return -EFAULT;
75727 return 0;
75728 default:
75729 @@ -1245,7 +1252,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
75730 0, 0L, 0,
75731 sock_i_uid(sp), 0,
75732 sock_i_ino(sp),
75733 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
75734 + atomic_read(&sp->sk_refcnt),
75735 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75736 + NULL,
75737 +#else
75738 + sp,
75739 +#endif
75740 + atomic_read_unchecked(&sp->sk_drops));
75741 }
75742
75743 static int raw6_seq_show(struct seq_file *seq, void *v)
75744 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
75745 index 7b8fc57..c6185da 100644
75746 --- a/net/ipv6/tcp_ipv6.c
75747 +++ b/net/ipv6/tcp_ipv6.c
75748 @@ -93,6 +93,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
75749 }
75750 #endif
75751
75752 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75753 +extern int grsec_enable_blackhole;
75754 +#endif
75755 +
75756 static void tcp_v6_hash(struct sock *sk)
75757 {
75758 if (sk->sk_state != TCP_CLOSE) {
75759 @@ -1647,6 +1651,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
75760 return 0;
75761
75762 reset:
75763 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75764 + if (!grsec_enable_blackhole)
75765 +#endif
75766 tcp_v6_send_reset(sk, skb);
75767 discard:
75768 if (opt_skb)
75769 @@ -1726,12 +1733,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
75770 TCP_SKB_CB(skb)->sacked = 0;
75771
75772 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75773 - if (!sk)
75774 + if (!sk) {
75775 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75776 + ret = 1;
75777 +#endif
75778 goto no_tcp_socket;
75779 + }
75780
75781 process:
75782 - if (sk->sk_state == TCP_TIME_WAIT)
75783 + if (sk->sk_state == TCP_TIME_WAIT) {
75784 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75785 + ret = 2;
75786 +#endif
75787 goto do_time_wait;
75788 + }
75789
75790 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
75791 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
75792 @@ -1779,6 +1794,10 @@ no_tcp_socket:
75793 bad_packet:
75794 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75795 } else {
75796 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75797 + if (!grsec_enable_blackhole || (ret == 1 &&
75798 + (skb->dev->flags & IFF_LOOPBACK)))
75799 +#endif
75800 tcp_v6_send_reset(NULL, skb);
75801 }
75802
75803 @@ -2039,7 +2058,13 @@ static void get_openreq6(struct seq_file *seq,
75804 uid,
75805 0, /* non standard timer */
75806 0, /* open_requests have no inode */
75807 - 0, req);
75808 + 0,
75809 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75810 + NULL
75811 +#else
75812 + req
75813 +#endif
75814 + );
75815 }
75816
75817 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
75818 @@ -2089,7 +2114,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
75819 sock_i_uid(sp),
75820 icsk->icsk_probes_out,
75821 sock_i_ino(sp),
75822 - atomic_read(&sp->sk_refcnt), sp,
75823 + atomic_read(&sp->sk_refcnt),
75824 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75825 + NULL,
75826 +#else
75827 + sp,
75828 +#endif
75829 jiffies_to_clock_t(icsk->icsk_rto),
75830 jiffies_to_clock_t(icsk->icsk_ack.ato),
75831 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
75832 @@ -2124,7 +2154,13 @@ static void get_timewait6_sock(struct seq_file *seq,
75833 dest->s6_addr32[2], dest->s6_addr32[3], destp,
75834 tw->tw_substate, 0, 0,
75835 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75836 - atomic_read(&tw->tw_refcnt), tw);
75837 + atomic_read(&tw->tw_refcnt),
75838 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75839 + NULL
75840 +#else
75841 + tw
75842 +#endif
75843 + );
75844 }
75845
75846 static int tcp6_seq_show(struct seq_file *seq, void *v)
75847 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
75848 index bb95e8e..ae0ee80 100644
75849 --- a/net/ipv6/udp.c
75850 +++ b/net/ipv6/udp.c
75851 @@ -50,6 +50,10 @@
75852 #include <linux/seq_file.h>
75853 #include "udp_impl.h"
75854
75855 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75856 +extern int grsec_enable_blackhole;
75857 +#endif
75858 +
75859 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
75860 {
75861 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
75862 @@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
75863
75864 return 0;
75865 drop:
75866 - atomic_inc(&sk->sk_drops);
75867 + atomic_inc_unchecked(&sk->sk_drops);
75868 drop_no_sk_drops_inc:
75869 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75870 kfree_skb(skb);
75871 @@ -624,7 +628,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
75872 continue;
75873 }
75874 drop:
75875 - atomic_inc(&sk->sk_drops);
75876 + atomic_inc_unchecked(&sk->sk_drops);
75877 UDP6_INC_STATS_BH(sock_net(sk),
75878 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
75879 UDP6_INC_STATS_BH(sock_net(sk),
75880 @@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75881 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
75882 proto == IPPROTO_UDPLITE);
75883
75884 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75885 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75886 +#endif
75887 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
75888
75889 kfree_skb(skb);
75890 @@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75891 if (!sock_owned_by_user(sk))
75892 udpv6_queue_rcv_skb(sk, skb);
75893 else if (sk_add_backlog(sk, skb)) {
75894 - atomic_inc(&sk->sk_drops);
75895 + atomic_inc_unchecked(&sk->sk_drops);
75896 bh_unlock_sock(sk);
75897 sock_put(sk);
75898 goto discard;
75899 @@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
75900 0, 0L, 0,
75901 sock_i_uid(sp), 0,
75902 sock_i_ino(sp),
75903 - atomic_read(&sp->sk_refcnt), sp,
75904 - atomic_read(&sp->sk_drops));
75905 + atomic_read(&sp->sk_refcnt),
75906 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75907 + NULL,
75908 +#else
75909 + sp,
75910 +#endif
75911 + atomic_read_unchecked(&sp->sk_drops));
75912 }
75913
75914 int udp6_seq_show(struct seq_file *seq, void *v)
75915 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
75916 index b3cc8b3..baa02d0 100644
75917 --- a/net/irda/ircomm/ircomm_tty.c
75918 +++ b/net/irda/ircomm/ircomm_tty.c
75919 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75920 add_wait_queue(&self->open_wait, &wait);
75921
75922 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
75923 - __FILE__,__LINE__, tty->driver->name, self->open_count );
75924 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
75925
75926 /* As far as I can see, we protect open_count - Jean II */
75927 spin_lock_irqsave(&self->spinlock, flags);
75928 if (!tty_hung_up_p(filp)) {
75929 extra_count = 1;
75930 - self->open_count--;
75931 + local_dec(&self->open_count);
75932 }
75933 spin_unlock_irqrestore(&self->spinlock, flags);
75934 - self->blocked_open++;
75935 + local_inc(&self->blocked_open);
75936
75937 while (1) {
75938 if (tty->termios->c_cflag & CBAUD) {
75939 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75940 }
75941
75942 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
75943 - __FILE__,__LINE__, tty->driver->name, self->open_count );
75944 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
75945
75946 schedule();
75947 }
75948 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
75949 if (extra_count) {
75950 /* ++ is not atomic, so this should be protected - Jean II */
75951 spin_lock_irqsave(&self->spinlock, flags);
75952 - self->open_count++;
75953 + local_inc(&self->open_count);
75954 spin_unlock_irqrestore(&self->spinlock, flags);
75955 }
75956 - self->blocked_open--;
75957 + local_dec(&self->blocked_open);
75958
75959 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
75960 - __FILE__,__LINE__, tty->driver->name, self->open_count);
75961 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
75962
75963 if (!retval)
75964 self->flags |= ASYNC_NORMAL_ACTIVE;
75965 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
75966 }
75967 /* ++ is not atomic, so this should be protected - Jean II */
75968 spin_lock_irqsave(&self->spinlock, flags);
75969 - self->open_count++;
75970 + local_inc(&self->open_count);
75971
75972 tty->driver_data = self;
75973 self->tty = tty;
75974 spin_unlock_irqrestore(&self->spinlock, flags);
75975
75976 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
75977 - self->line, self->open_count);
75978 + self->line, local_read(&self->open_count));
75979
75980 /* Not really used by us, but lets do it anyway */
75981 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
75982 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75983 return;
75984 }
75985
75986 - if ((tty->count == 1) && (self->open_count != 1)) {
75987 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
75988 /*
75989 * Uh, oh. tty->count is 1, which means that the tty
75990 * structure will be freed. state->count should always
75991 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
75992 */
75993 IRDA_DEBUG(0, "%s(), bad serial port count; "
75994 "tty->count is 1, state->count is %d\n", __func__ ,
75995 - self->open_count);
75996 - self->open_count = 1;
75997 + local_read(&self->open_count));
75998 + local_set(&self->open_count, 1);
75999 }
76000
76001 - if (--self->open_count < 0) {
76002 + if (local_dec_return(&self->open_count) < 0) {
76003 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
76004 - __func__, self->line, self->open_count);
76005 - self->open_count = 0;
76006 + __func__, self->line, local_read(&self->open_count));
76007 + local_set(&self->open_count, 0);
76008 }
76009 - if (self->open_count) {
76010 + if (local_read(&self->open_count)) {
76011 spin_unlock_irqrestore(&self->spinlock, flags);
76012
76013 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
76014 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76015 tty->closing = 0;
76016 self->tty = NULL;
76017
76018 - if (self->blocked_open) {
76019 + if (local_read(&self->blocked_open)) {
76020 if (self->close_delay)
76021 schedule_timeout_interruptible(self->close_delay);
76022 wake_up_interruptible(&self->open_wait);
76023 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
76024 spin_lock_irqsave(&self->spinlock, flags);
76025 self->flags &= ~ASYNC_NORMAL_ACTIVE;
76026 self->tty = NULL;
76027 - self->open_count = 0;
76028 + local_set(&self->open_count, 0);
76029 spin_unlock_irqrestore(&self->spinlock, flags);
76030
76031 wake_up_interruptible(&self->open_wait);
76032 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
76033 seq_putc(m, '\n');
76034
76035 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
76036 - seq_printf(m, "Open count: %d\n", self->open_count);
76037 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
76038 seq_printf(m, "Max data size: %d\n", self->max_data_size);
76039 seq_printf(m, "Max header size: %d\n", self->max_header_size);
76040
76041 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
76042 index e2013e4..edfc1e3 100644
76043 --- a/net/iucv/af_iucv.c
76044 +++ b/net/iucv/af_iucv.c
76045 @@ -648,10 +648,10 @@ static int iucv_sock_autobind(struct sock *sk)
76046
76047 write_lock_bh(&iucv_sk_list.lock);
76048
76049 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
76050 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76051 while (__iucv_get_sock_by_name(name)) {
76052 sprintf(name, "%08x",
76053 - atomic_inc_return(&iucv_sk_list.autobind_name));
76054 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76055 }
76056
76057 write_unlock_bh(&iucv_sk_list.lock);
76058 diff --git a/net/key/af_key.c b/net/key/af_key.c
76059 index 1e733e9..c84de2f 100644
76060 --- a/net/key/af_key.c
76061 +++ b/net/key/af_key.c
76062 @@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
76063 struct xfrm_migrate m[XFRM_MAX_DEPTH];
76064 struct xfrm_kmaddress k;
76065
76066 + pax_track_stack();
76067 +
76068 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
76069 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
76070 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
76071 @@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
76072 static u32 get_acqseq(void)
76073 {
76074 u32 res;
76075 - static atomic_t acqseq;
76076 + static atomic_unchecked_t acqseq;
76077
76078 do {
76079 - res = atomic_inc_return(&acqseq);
76080 + res = atomic_inc_return_unchecked(&acqseq);
76081 } while (!res);
76082 return res;
76083 }
76084 diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
76085 index 956b7e4..f01d328 100644
76086 --- a/net/lapb/lapb_iface.c
76087 +++ b/net/lapb/lapb_iface.c
76088 @@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
76089 goto out;
76090
76091 lapb->dev = dev;
76092 - lapb->callbacks = *callbacks;
76093 + lapb->callbacks = callbacks;
76094
76095 __lapb_insert_cb(lapb);
76096
76097 @@ -380,32 +380,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
76098
76099 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
76100 {
76101 - if (lapb->callbacks.connect_confirmation)
76102 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
76103 + if (lapb->callbacks->connect_confirmation)
76104 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
76105 }
76106
76107 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
76108 {
76109 - if (lapb->callbacks.connect_indication)
76110 - lapb->callbacks.connect_indication(lapb->dev, reason);
76111 + if (lapb->callbacks->connect_indication)
76112 + lapb->callbacks->connect_indication(lapb->dev, reason);
76113 }
76114
76115 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
76116 {
76117 - if (lapb->callbacks.disconnect_confirmation)
76118 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
76119 + if (lapb->callbacks->disconnect_confirmation)
76120 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
76121 }
76122
76123 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
76124 {
76125 - if (lapb->callbacks.disconnect_indication)
76126 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
76127 + if (lapb->callbacks->disconnect_indication)
76128 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
76129 }
76130
76131 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
76132 {
76133 - if (lapb->callbacks.data_indication)
76134 - return lapb->callbacks.data_indication(lapb->dev, skb);
76135 + if (lapb->callbacks->data_indication)
76136 + return lapb->callbacks->data_indication(lapb->dev, skb);
76137
76138 kfree_skb(skb);
76139 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
76140 @@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
76141 {
76142 int used = 0;
76143
76144 - if (lapb->callbacks.data_transmit) {
76145 - lapb->callbacks.data_transmit(lapb->dev, skb);
76146 + if (lapb->callbacks->data_transmit) {
76147 + lapb->callbacks->data_transmit(lapb->dev, skb);
76148 used = 1;
76149 }
76150
76151 diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
76152 index a01d213..6a1f1ab 100644
76153 --- a/net/mac80211/debugfs_sta.c
76154 +++ b/net/mac80211/debugfs_sta.c
76155 @@ -140,6 +140,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
76156 struct tid_ampdu_rx *tid_rx;
76157 struct tid_ampdu_tx *tid_tx;
76158
76159 + pax_track_stack();
76160 +
76161 rcu_read_lock();
76162
76163 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
76164 @@ -240,6 +242,8 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
76165 struct sta_info *sta = file->private_data;
76166 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
76167
76168 + pax_track_stack();
76169 +
76170 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
76171 htc->ht_supported ? "" : "not ");
76172 if (htc->ht_supported) {
76173 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
76174 index 9fab144..7f0fc14 100644
76175 --- a/net/mac80211/ieee80211_i.h
76176 +++ b/net/mac80211/ieee80211_i.h
76177 @@ -27,6 +27,7 @@
76178 #include <net/ieee80211_radiotap.h>
76179 #include <net/cfg80211.h>
76180 #include <net/mac80211.h>
76181 +#include <asm/local.h>
76182 #include "key.h"
76183 #include "sta_info.h"
76184
76185 @@ -754,7 +755,7 @@ struct ieee80211_local {
76186 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
76187 spinlock_t queue_stop_reason_lock;
76188
76189 - int open_count;
76190 + local_t open_count;
76191 int monitors, cooked_mntrs;
76192 /* number of interfaces with corresponding FIF_ flags */
76193 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
76194 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
76195 index 556e7e6..120dcaf 100644
76196 --- a/net/mac80211/iface.c
76197 +++ b/net/mac80211/iface.c
76198 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76199 break;
76200 }
76201
76202 - if (local->open_count == 0) {
76203 + if (local_read(&local->open_count) == 0) {
76204 res = drv_start(local);
76205 if (res)
76206 goto err_del_bss;
76207 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76208 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
76209
76210 if (!is_valid_ether_addr(dev->dev_addr)) {
76211 - if (!local->open_count)
76212 + if (!local_read(&local->open_count))
76213 drv_stop(local);
76214 return -EADDRNOTAVAIL;
76215 }
76216 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76217 mutex_unlock(&local->mtx);
76218
76219 if (coming_up)
76220 - local->open_count++;
76221 + local_inc(&local->open_count);
76222
76223 if (hw_reconf_flags) {
76224 ieee80211_hw_config(local, hw_reconf_flags);
76225 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76226 err_del_interface:
76227 drv_remove_interface(local, &sdata->vif);
76228 err_stop:
76229 - if (!local->open_count)
76230 + if (!local_read(&local->open_count))
76231 drv_stop(local);
76232 err_del_bss:
76233 sdata->bss = NULL;
76234 @@ -474,7 +474,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
76235 }
76236
76237 if (going_down)
76238 - local->open_count--;
76239 + local_dec(&local->open_count);
76240
76241 switch (sdata->vif.type) {
76242 case NL80211_IFTYPE_AP_VLAN:
76243 @@ -533,7 +533,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
76244
76245 ieee80211_recalc_ps(local, -1);
76246
76247 - if (local->open_count == 0) {
76248 + if (local_read(&local->open_count) == 0) {
76249 if (local->ops->napi_poll)
76250 napi_disable(&local->napi);
76251 ieee80211_clear_tx_pending(local);
76252 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
76253 index acb4423..278c8e5 100644
76254 --- a/net/mac80211/main.c
76255 +++ b/net/mac80211/main.c
76256 @@ -209,7 +209,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
76257 local->hw.conf.power_level = power;
76258 }
76259
76260 - if (changed && local->open_count) {
76261 + if (changed && local_read(&local->open_count)) {
76262 ret = drv_config(local, changed);
76263 /*
76264 * Goal:
76265 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
76266 index 0f48368..d48e688 100644
76267 --- a/net/mac80211/mlme.c
76268 +++ b/net/mac80211/mlme.c
76269 @@ -1464,6 +1464,8 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
76270 bool have_higher_than_11mbit = false;
76271 u16 ap_ht_cap_flags;
76272
76273 + pax_track_stack();
76274 +
76275 /* AssocResp and ReassocResp have identical structure */
76276
76277 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
76278 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
76279 index 6326d34..7225f61 100644
76280 --- a/net/mac80211/pm.c
76281 +++ b/net/mac80211/pm.c
76282 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76283 struct ieee80211_sub_if_data *sdata;
76284 struct sta_info *sta;
76285
76286 - if (!local->open_count)
76287 + if (!local_read(&local->open_count))
76288 goto suspend;
76289
76290 ieee80211_scan_cancel(local);
76291 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76292 cancel_work_sync(&local->dynamic_ps_enable_work);
76293 del_timer_sync(&local->dynamic_ps_timer);
76294
76295 - local->wowlan = wowlan && local->open_count;
76296 + local->wowlan = wowlan && local_read(&local->open_count);
76297 if (local->wowlan) {
76298 int err = drv_suspend(local, wowlan);
76299 if (err < 0) {
76300 @@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76301 }
76302
76303 /* stop hardware - this must stop RX */
76304 - if (local->open_count)
76305 + if (local_read(&local->open_count))
76306 ieee80211_stop_device(local);
76307
76308 suspend:
76309 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
76310 index 3d5a2cb..b17ad48 100644
76311 --- a/net/mac80211/rate.c
76312 +++ b/net/mac80211/rate.c
76313 @@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
76314
76315 ASSERT_RTNL();
76316
76317 - if (local->open_count)
76318 + if (local_read(&local->open_count))
76319 return -EBUSY;
76320
76321 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
76322 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
76323 index 4851e9e..d860e05 100644
76324 --- a/net/mac80211/rc80211_pid_debugfs.c
76325 +++ b/net/mac80211/rc80211_pid_debugfs.c
76326 @@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
76327
76328 spin_unlock_irqrestore(&events->lock, status);
76329
76330 - if (copy_to_user(buf, pb, p))
76331 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
76332 return -EFAULT;
76333
76334 return p;
76335 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
76336 index fd031e8..84fbfcf 100644
76337 --- a/net/mac80211/util.c
76338 +++ b/net/mac80211/util.c
76339 @@ -1170,7 +1170,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
76340 drv_set_coverage_class(local, hw->wiphy->coverage_class);
76341
76342 /* everything else happens only if HW was up & running */
76343 - if (!local->open_count)
76344 + if (!local_read(&local->open_count))
76345 goto wake_up;
76346
76347 /*
76348 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
76349 index 32bff6d..d0cf986 100644
76350 --- a/net/netfilter/Kconfig
76351 +++ b/net/netfilter/Kconfig
76352 @@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
76353
76354 To compile it as a module, choose M here. If unsure, say N.
76355
76356 +config NETFILTER_XT_MATCH_GRADM
76357 + tristate '"gradm" match support'
76358 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
76359 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
76360 + ---help---
76361 + The gradm match allows to match on grsecurity RBAC being enabled.
76362 + It is useful when iptables rules are applied early on bootup to
76363 + prevent connections to the machine (except from a trusted host)
76364 + while the RBAC system is disabled.
76365 +
76366 config NETFILTER_XT_MATCH_HASHLIMIT
76367 tristate '"hashlimit" match support'
76368 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
76369 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
76370 index 1a02853..5d8c22e 100644
76371 --- a/net/netfilter/Makefile
76372 +++ b/net/netfilter/Makefile
76373 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
76374 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
76375 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
76376 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
76377 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
76378 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
76379 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
76380 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
76381 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
76382 index 12571fb..fb73976 100644
76383 --- a/net/netfilter/ipvs/ip_vs_conn.c
76384 +++ b/net/netfilter/ipvs/ip_vs_conn.c
76385 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
76386 /* Increase the refcnt counter of the dest */
76387 atomic_inc(&dest->refcnt);
76388
76389 - conn_flags = atomic_read(&dest->conn_flags);
76390 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
76391 if (cp->protocol != IPPROTO_UDP)
76392 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
76393 /* Bind with the destination and its corresponding transmitter */
76394 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
76395 atomic_set(&cp->refcnt, 1);
76396
76397 atomic_set(&cp->n_control, 0);
76398 - atomic_set(&cp->in_pkts, 0);
76399 + atomic_set_unchecked(&cp->in_pkts, 0);
76400
76401 atomic_inc(&ipvs->conn_count);
76402 if (flags & IP_VS_CONN_F_NO_CPORT)
76403 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
76404
76405 /* Don't drop the entry if its number of incoming packets is not
76406 located in [0, 8] */
76407 - i = atomic_read(&cp->in_pkts);
76408 + i = atomic_read_unchecked(&cp->in_pkts);
76409 if (i > 8 || i < 0) return 0;
76410
76411 if (!todrop_rate[i]) return 0;
76412 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
76413 index 4f77bb1..5d0bc26 100644
76414 --- a/net/netfilter/ipvs/ip_vs_core.c
76415 +++ b/net/netfilter/ipvs/ip_vs_core.c
76416 @@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
76417 ret = cp->packet_xmit(skb, cp, pd->pp);
76418 /* do not touch skb anymore */
76419
76420 - atomic_inc(&cp->in_pkts);
76421 + atomic_inc_unchecked(&cp->in_pkts);
76422 ip_vs_conn_put(cp);
76423 return ret;
76424 }
76425 @@ -1612,7 +1612,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
76426 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
76427 pkts = sysctl_sync_threshold(ipvs);
76428 else
76429 - pkts = atomic_add_return(1, &cp->in_pkts);
76430 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76431
76432 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
76433 cp->protocol == IPPROTO_SCTP) {
76434 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
76435 index e3be48b..d658c8c 100644
76436 --- a/net/netfilter/ipvs/ip_vs_ctl.c
76437 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
76438 @@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
76439 ip_vs_rs_hash(ipvs, dest);
76440 write_unlock_bh(&ipvs->rs_lock);
76441 }
76442 - atomic_set(&dest->conn_flags, conn_flags);
76443 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
76444
76445 /* bind the service */
76446 if (!dest->svc) {
76447 @@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
76448 " %-7s %-6d %-10d %-10d\n",
76449 &dest->addr.in6,
76450 ntohs(dest->port),
76451 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
76452 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
76453 atomic_read(&dest->weight),
76454 atomic_read(&dest->activeconns),
76455 atomic_read(&dest->inactconns));
76456 @@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
76457 "%-7s %-6d %-10d %-10d\n",
76458 ntohl(dest->addr.ip),
76459 ntohs(dest->port),
76460 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
76461 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
76462 atomic_read(&dest->weight),
76463 atomic_read(&dest->activeconns),
76464 atomic_read(&dest->inactconns));
76465 @@ -2285,6 +2285,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
76466 struct ip_vs_dest_user_kern udest;
76467 struct netns_ipvs *ipvs = net_ipvs(net);
76468
76469 + pax_track_stack();
76470 +
76471 if (!capable(CAP_NET_ADMIN))
76472 return -EPERM;
76473
76474 @@ -2508,7 +2510,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
76475
76476 entry.addr = dest->addr.ip;
76477 entry.port = dest->port;
76478 - entry.conn_flags = atomic_read(&dest->conn_flags);
76479 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
76480 entry.weight = atomic_read(&dest->weight);
76481 entry.u_threshold = dest->u_threshold;
76482 entry.l_threshold = dest->l_threshold;
76483 @@ -3041,7 +3043,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
76484 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
76485
76486 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
76487 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76488 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76489 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
76490 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
76491 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
76492 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
76493 index 3cdd479..116afa8 100644
76494 --- a/net/netfilter/ipvs/ip_vs_sync.c
76495 +++ b/net/netfilter/ipvs/ip_vs_sync.c
76496 @@ -649,7 +649,7 @@ control:
76497 * i.e only increment in_pkts for Templates.
76498 */
76499 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
76500 - int pkts = atomic_add_return(1, &cp->in_pkts);
76501 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76502
76503 if (pkts % sysctl_sync_period(ipvs) != 1)
76504 return;
76505 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
76506
76507 if (opt)
76508 memcpy(&cp->in_seq, opt, sizeof(*opt));
76509 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76510 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76511 cp->state = state;
76512 cp->old_state = cp->state;
76513 /*
76514 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
76515 index ee319a4..8a285ee 100644
76516 --- a/net/netfilter/ipvs/ip_vs_xmit.c
76517 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
76518 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
76519 else
76520 rc = NF_ACCEPT;
76521 /* do not touch skb anymore */
76522 - atomic_inc(&cp->in_pkts);
76523 + atomic_inc_unchecked(&cp->in_pkts);
76524 goto out;
76525 }
76526
76527 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
76528 else
76529 rc = NF_ACCEPT;
76530 /* do not touch skb anymore */
76531 - atomic_inc(&cp->in_pkts);
76532 + atomic_inc_unchecked(&cp->in_pkts);
76533 goto out;
76534 }
76535
76536 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
76537 index 2d8158a..5dca296 100644
76538 --- a/net/netfilter/nfnetlink_log.c
76539 +++ b/net/netfilter/nfnetlink_log.c
76540 @@ -70,7 +70,7 @@ struct nfulnl_instance {
76541 };
76542
76543 static DEFINE_SPINLOCK(instances_lock);
76544 -static atomic_t global_seq;
76545 +static atomic_unchecked_t global_seq;
76546
76547 #define INSTANCE_BUCKETS 16
76548 static struct hlist_head instance_table[INSTANCE_BUCKETS];
76549 @@ -505,7 +505,7 @@ __build_packet_message(struct nfulnl_instance *inst,
76550 /* global sequence number */
76551 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
76552 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
76553 - htonl(atomic_inc_return(&global_seq)));
76554 + htonl(atomic_inc_return_unchecked(&global_seq)));
76555
76556 if (data_len) {
76557 struct nlattr *nla;
76558 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
76559 new file mode 100644
76560 index 0000000..6905327
76561 --- /dev/null
76562 +++ b/net/netfilter/xt_gradm.c
76563 @@ -0,0 +1,51 @@
76564 +/*
76565 + * gradm match for netfilter
76566 + * Copyright © Zbigniew Krzystolik, 2010
76567 + *
76568 + * This program is free software; you can redistribute it and/or modify
76569 + * it under the terms of the GNU General Public License; either version
76570 + * 2 or 3 as published by the Free Software Foundation.
76571 + */
76572 +#include <linux/module.h>
76573 +#include <linux/moduleparam.h>
76574 +#include <linux/skbuff.h>
76575 +#include <linux/netfilter/x_tables.h>
76576 +#include <linux/grsecurity.h>
76577 +#include <linux/netfilter/xt_gradm.h>
76578 +
76579 +static bool
76580 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
76581 +{
76582 + const struct xt_gradm_mtinfo *info = par->matchinfo;
76583 + bool retval = false;
76584 + if (gr_acl_is_enabled())
76585 + retval = true;
76586 + return retval ^ info->invflags;
76587 +}
76588 +
76589 +static struct xt_match gradm_mt_reg __read_mostly = {
76590 + .name = "gradm",
76591 + .revision = 0,
76592 + .family = NFPROTO_UNSPEC,
76593 + .match = gradm_mt,
76594 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
76595 + .me = THIS_MODULE,
76596 +};
76597 +
76598 +static int __init gradm_mt_init(void)
76599 +{
76600 + return xt_register_match(&gradm_mt_reg);
76601 +}
76602 +
76603 +static void __exit gradm_mt_exit(void)
76604 +{
76605 + xt_unregister_match(&gradm_mt_reg);
76606 +}
76607 +
76608 +module_init(gradm_mt_init);
76609 +module_exit(gradm_mt_exit);
76610 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
76611 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
76612 +MODULE_LICENSE("GPL");
76613 +MODULE_ALIAS("ipt_gradm");
76614 +MODULE_ALIAS("ip6t_gradm");
76615 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
76616 index 42ecb71..8d687c0 100644
76617 --- a/net/netfilter/xt_statistic.c
76618 +++ b/net/netfilter/xt_statistic.c
76619 @@ -18,7 +18,7 @@
76620 #include <linux/netfilter/x_tables.h>
76621
76622 struct xt_statistic_priv {
76623 - atomic_t count;
76624 + atomic_unchecked_t count;
76625 } ____cacheline_aligned_in_smp;
76626
76627 MODULE_LICENSE("GPL");
76628 @@ -41,9 +41,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
76629 break;
76630 case XT_STATISTIC_MODE_NTH:
76631 do {
76632 - oval = atomic_read(&info->master->count);
76633 + oval = atomic_read_unchecked(&info->master->count);
76634 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
76635 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
76636 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
76637 if (nval == 0)
76638 ret = !ret;
76639 break;
76640 @@ -63,7 +63,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
76641 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
76642 if (info->master == NULL)
76643 return -ENOMEM;
76644 - atomic_set(&info->master->count, info->u.nth.count);
76645 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
76646
76647 return 0;
76648 }
76649 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
76650 index 0a4db02..604f748 100644
76651 --- a/net/netlink/af_netlink.c
76652 +++ b/net/netlink/af_netlink.c
76653 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock *sk)
76654 sk->sk_error_report(sk);
76655 }
76656 }
76657 - atomic_inc(&sk->sk_drops);
76658 + atomic_inc_unchecked(&sk->sk_drops);
76659 }
76660
76661 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
76662 @@ -2000,7 +2000,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
76663 sk_wmem_alloc_get(s),
76664 nlk->cb,
76665 atomic_read(&s->sk_refcnt),
76666 - atomic_read(&s->sk_drops),
76667 + atomic_read_unchecked(&s->sk_drops),
76668 sock_i_ino(s)
76669 );
76670
76671 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
76672 index 732152f..60bb09e 100644
76673 --- a/net/netrom/af_netrom.c
76674 +++ b/net/netrom/af_netrom.c
76675 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
76676 struct sock *sk = sock->sk;
76677 struct nr_sock *nr = nr_sk(sk);
76678
76679 + memset(sax, 0, sizeof(*sax));
76680 lock_sock(sk);
76681 if (peer != 0) {
76682 if (sk->sk_state != TCP_ESTABLISHED) {
76683 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
76684 *uaddr_len = sizeof(struct full_sockaddr_ax25);
76685 } else {
76686 sax->fsa_ax25.sax25_family = AF_NETROM;
76687 - sax->fsa_ax25.sax25_ndigis = 0;
76688 sax->fsa_ax25.sax25_call = nr->source_addr;
76689 *uaddr_len = sizeof(struct sockaddr_ax25);
76690 }
76691 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
76692 index fabb4fa..e146b73 100644
76693 --- a/net/packet/af_packet.c
76694 +++ b/net/packet/af_packet.c
76695 @@ -954,7 +954,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
76696
76697 spin_lock(&sk->sk_receive_queue.lock);
76698 po->stats.tp_packets++;
76699 - skb->dropcount = atomic_read(&sk->sk_drops);
76700 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
76701 __skb_queue_tail(&sk->sk_receive_queue, skb);
76702 spin_unlock(&sk->sk_receive_queue.lock);
76703 sk->sk_data_ready(sk, skb->len);
76704 @@ -963,7 +963,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
76705 drop_n_acct:
76706 spin_lock(&sk->sk_receive_queue.lock);
76707 po->stats.tp_drops++;
76708 - atomic_inc(&sk->sk_drops);
76709 + atomic_inc_unchecked(&sk->sk_drops);
76710 spin_unlock(&sk->sk_receive_queue.lock);
76711
76712 drop_n_restore:
76713 @@ -2479,7 +2479,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
76714 case PACKET_HDRLEN:
76715 if (len > sizeof(int))
76716 len = sizeof(int);
76717 - if (copy_from_user(&val, optval, len))
76718 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
76719 return -EFAULT;
76720 switch (val) {
76721 case TPACKET_V1:
76722 @@ -2526,7 +2526,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
76723
76724 if (put_user(len, optlen))
76725 return -EFAULT;
76726 - if (copy_to_user(optval, data, len))
76727 + if (len > sizeof(st) || copy_to_user(optval, data, len))
76728 return -EFAULT;
76729 return 0;
76730 }
76731 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
76732 index c6fffd9..a7ffa0c 100644
76733 --- a/net/phonet/af_phonet.c
76734 +++ b/net/phonet/af_phonet.c
76735 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
76736 {
76737 struct phonet_protocol *pp;
76738
76739 - if (protocol >= PHONET_NPROTO)
76740 + if (protocol < 0 || protocol >= PHONET_NPROTO)
76741 return NULL;
76742
76743 rcu_read_lock();
76744 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
76745 {
76746 int err = 0;
76747
76748 - if (protocol >= PHONET_NPROTO)
76749 + if (protocol < 0 || protocol >= PHONET_NPROTO)
76750 return -EINVAL;
76751
76752 err = proto_register(pp->prot, 1);
76753 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
76754 index f17fd84..edffce8 100644
76755 --- a/net/phonet/pep.c
76756 +++ b/net/phonet/pep.c
76757 @@ -387,7 +387,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
76758
76759 case PNS_PEP_CTRL_REQ:
76760 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
76761 - atomic_inc(&sk->sk_drops);
76762 + atomic_inc_unchecked(&sk->sk_drops);
76763 break;
76764 }
76765 __skb_pull(skb, 4);
76766 @@ -408,7 +408,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
76767 }
76768
76769 if (pn->rx_credits == 0) {
76770 - atomic_inc(&sk->sk_drops);
76771 + atomic_inc_unchecked(&sk->sk_drops);
76772 err = -ENOBUFS;
76773 break;
76774 }
76775 @@ -556,7 +556,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
76776 }
76777
76778 if (pn->rx_credits == 0) {
76779 - atomic_inc(&sk->sk_drops);
76780 + atomic_inc_unchecked(&sk->sk_drops);
76781 err = NET_RX_DROP;
76782 break;
76783 }
76784 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
76785 index ab07711..9d4ac5d 100644
76786 --- a/net/phonet/socket.c
76787 +++ b/net/phonet/socket.c
76788 @@ -612,8 +612,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
76789 pn->resource, sk->sk_state,
76790 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
76791 sock_i_uid(sk), sock_i_ino(sk),
76792 - atomic_read(&sk->sk_refcnt), sk,
76793 - atomic_read(&sk->sk_drops), &len);
76794 + atomic_read(&sk->sk_refcnt),
76795 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76796 + NULL,
76797 +#else
76798 + sk,
76799 +#endif
76800 + atomic_read_unchecked(&sk->sk_drops), &len);
76801 }
76802 seq_printf(seq, "%*s\n", 127 - len, "");
76803 return 0;
76804 diff --git a/net/rds/cong.c b/net/rds/cong.c
76805 index 6daaa49..fbf6af5 100644
76806 --- a/net/rds/cong.c
76807 +++ b/net/rds/cong.c
76808 @@ -77,7 +77,7 @@
76809 * finds that the saved generation number is smaller than the global generation
76810 * number, it wakes up the process.
76811 */
76812 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
76813 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
76814
76815 /*
76816 * Congestion monitoring
76817 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
76818 rdsdebug("waking map %p for %pI4\n",
76819 map, &map->m_addr);
76820 rds_stats_inc(s_cong_update_received);
76821 - atomic_inc(&rds_cong_generation);
76822 + atomic_inc_unchecked(&rds_cong_generation);
76823 if (waitqueue_active(&map->m_waitq))
76824 wake_up(&map->m_waitq);
76825 if (waitqueue_active(&rds_poll_waitq))
76826 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
76827
76828 int rds_cong_updated_since(unsigned long *recent)
76829 {
76830 - unsigned long gen = atomic_read(&rds_cong_generation);
76831 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
76832
76833 if (likely(*recent == gen))
76834 return 0;
76835 diff --git a/net/rds/ib.h b/net/rds/ib.h
76836 index edfaaaf..8c89879 100644
76837 --- a/net/rds/ib.h
76838 +++ b/net/rds/ib.h
76839 @@ -128,7 +128,7 @@ struct rds_ib_connection {
76840 /* sending acks */
76841 unsigned long i_ack_flags;
76842 #ifdef KERNEL_HAS_ATOMIC64
76843 - atomic64_t i_ack_next; /* next ACK to send */
76844 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
76845 #else
76846 spinlock_t i_ack_lock; /* protect i_ack_next */
76847 u64 i_ack_next; /* next ACK to send */
76848 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
76849 index cd67026..0b9a54a 100644
76850 --- a/net/rds/ib_cm.c
76851 +++ b/net/rds/ib_cm.c
76852 @@ -720,7 +720,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
76853 /* Clear the ACK state */
76854 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
76855 #ifdef KERNEL_HAS_ATOMIC64
76856 - atomic64_set(&ic->i_ack_next, 0);
76857 + atomic64_set_unchecked(&ic->i_ack_next, 0);
76858 #else
76859 ic->i_ack_next = 0;
76860 #endif
76861 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
76862 index e29e0ca..fa3a6a3 100644
76863 --- a/net/rds/ib_recv.c
76864 +++ b/net/rds/ib_recv.c
76865 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
76866 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
76867 int ack_required)
76868 {
76869 - atomic64_set(&ic->i_ack_next, seq);
76870 + atomic64_set_unchecked(&ic->i_ack_next, seq);
76871 if (ack_required) {
76872 smp_mb__before_clear_bit();
76873 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76874 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
76875 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76876 smp_mb__after_clear_bit();
76877
76878 - return atomic64_read(&ic->i_ack_next);
76879 + return atomic64_read_unchecked(&ic->i_ack_next);
76880 }
76881 #endif
76882
76883 diff --git a/net/rds/iw.h b/net/rds/iw.h
76884 index 04ce3b1..48119a6 100644
76885 --- a/net/rds/iw.h
76886 +++ b/net/rds/iw.h
76887 @@ -134,7 +134,7 @@ struct rds_iw_connection {
76888 /* sending acks */
76889 unsigned long i_ack_flags;
76890 #ifdef KERNEL_HAS_ATOMIC64
76891 - atomic64_t i_ack_next; /* next ACK to send */
76892 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
76893 #else
76894 spinlock_t i_ack_lock; /* protect i_ack_next */
76895 u64 i_ack_next; /* next ACK to send */
76896 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
76897 index 9556d28..f046d0e 100644
76898 --- a/net/rds/iw_cm.c
76899 +++ b/net/rds/iw_cm.c
76900 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
76901 /* Clear the ACK state */
76902 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
76903 #ifdef KERNEL_HAS_ATOMIC64
76904 - atomic64_set(&ic->i_ack_next, 0);
76905 + atomic64_set_unchecked(&ic->i_ack_next, 0);
76906 #else
76907 ic->i_ack_next = 0;
76908 #endif
76909 diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
76910 index 4e1de17..d121708 100644
76911 --- a/net/rds/iw_rdma.c
76912 +++ b/net/rds/iw_rdma.c
76913 @@ -184,6 +184,8 @@ int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_i
76914 struct rdma_cm_id *pcm_id;
76915 int rc;
76916
76917 + pax_track_stack();
76918 +
76919 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
76920 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
76921
76922 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
76923 index 5e57347..3916042 100644
76924 --- a/net/rds/iw_recv.c
76925 +++ b/net/rds/iw_recv.c
76926 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
76927 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
76928 int ack_required)
76929 {
76930 - atomic64_set(&ic->i_ack_next, seq);
76931 + atomic64_set_unchecked(&ic->i_ack_next, seq);
76932 if (ack_required) {
76933 smp_mb__before_clear_bit();
76934 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76935 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
76936 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76937 smp_mb__after_clear_bit();
76938
76939 - return atomic64_read(&ic->i_ack_next);
76940 + return atomic64_read_unchecked(&ic->i_ack_next);
76941 }
76942 #endif
76943
76944 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
76945 index 8e0a320..ee8e38f 100644
76946 --- a/net/rds/tcp.c
76947 +++ b/net/rds/tcp.c
76948 @@ -58,7 +58,7 @@ void rds_tcp_nonagle(struct socket *sock)
76949 int val = 1;
76950
76951 set_fs(KERNEL_DS);
76952 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
76953 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
76954 sizeof(val));
76955 set_fs(oldfs);
76956 }
76957 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
76958 index 1b4fd68..2234175 100644
76959 --- a/net/rds/tcp_send.c
76960 +++ b/net/rds/tcp_send.c
76961 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
76962
76963 oldfs = get_fs();
76964 set_fs(KERNEL_DS);
76965 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
76966 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
76967 sizeof(val));
76968 set_fs(oldfs);
76969 }
76970 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
76971 index 74c064c..fdec26f 100644
76972 --- a/net/rxrpc/af_rxrpc.c
76973 +++ b/net/rxrpc/af_rxrpc.c
76974 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
76975 __be32 rxrpc_epoch;
76976
76977 /* current debugging ID */
76978 -atomic_t rxrpc_debug_id;
76979 +atomic_unchecked_t rxrpc_debug_id;
76980
76981 /* count of skbs currently in use */
76982 atomic_t rxrpc_n_skbs;
76983 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
76984 index f99cfce..3682692 100644
76985 --- a/net/rxrpc/ar-ack.c
76986 +++ b/net/rxrpc/ar-ack.c
76987 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
76988
76989 _enter("{%d,%d,%d,%d},",
76990 call->acks_hard, call->acks_unacked,
76991 - atomic_read(&call->sequence),
76992 + atomic_read_unchecked(&call->sequence),
76993 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
76994
76995 stop = 0;
76996 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
76997
76998 /* each Tx packet has a new serial number */
76999 sp->hdr.serial =
77000 - htonl(atomic_inc_return(&call->conn->serial));
77001 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
77002
77003 hdr = (struct rxrpc_header *) txb->head;
77004 hdr->serial = sp->hdr.serial;
77005 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
77006 */
77007 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
77008 {
77009 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
77010 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
77011 }
77012
77013 /*
77014 @@ -629,7 +629,7 @@ process_further:
77015
77016 latest = ntohl(sp->hdr.serial);
77017 hard = ntohl(ack.firstPacket);
77018 - tx = atomic_read(&call->sequence);
77019 + tx = atomic_read_unchecked(&call->sequence);
77020
77021 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
77022 latest,
77023 @@ -842,6 +842,8 @@ void rxrpc_process_call(struct work_struct *work)
77024 u32 abort_code = RX_PROTOCOL_ERROR;
77025 u8 *acks = NULL;
77026
77027 + pax_track_stack();
77028 +
77029 //printk("\n--------------------\n");
77030 _enter("{%d,%s,%lx} [%lu]",
77031 call->debug_id, rxrpc_call_states[call->state], call->events,
77032 @@ -1161,7 +1163,7 @@ void rxrpc_process_call(struct work_struct *work)
77033 goto maybe_reschedule;
77034
77035 send_ACK_with_skew:
77036 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
77037 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
77038 ntohl(ack.serial));
77039 send_ACK:
77040 mtu = call->conn->trans->peer->if_mtu;
77041 @@ -1173,7 +1175,7 @@ send_ACK:
77042 ackinfo.rxMTU = htonl(5692);
77043 ackinfo.jumbo_max = htonl(4);
77044
77045 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
77046 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
77047 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
77048 ntohl(hdr.serial),
77049 ntohs(ack.maxSkew),
77050 @@ -1191,7 +1193,7 @@ send_ACK:
77051 send_message:
77052 _debug("send message");
77053
77054 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
77055 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
77056 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
77057 send_message_2:
77058
77059 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
77060 index bf656c2..48f9d27 100644
77061 --- a/net/rxrpc/ar-call.c
77062 +++ b/net/rxrpc/ar-call.c
77063 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
77064 spin_lock_init(&call->lock);
77065 rwlock_init(&call->state_lock);
77066 atomic_set(&call->usage, 1);
77067 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
77068 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77069 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
77070
77071 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
77072 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
77073 index 4106ca9..a338d7a 100644
77074 --- a/net/rxrpc/ar-connection.c
77075 +++ b/net/rxrpc/ar-connection.c
77076 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
77077 rwlock_init(&conn->lock);
77078 spin_lock_init(&conn->state_lock);
77079 atomic_set(&conn->usage, 1);
77080 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
77081 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77082 conn->avail_calls = RXRPC_MAXCALLS;
77083 conn->size_align = 4;
77084 conn->header_size = sizeof(struct rxrpc_header);
77085 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
77086 index e7ed43a..6afa140 100644
77087 --- a/net/rxrpc/ar-connevent.c
77088 +++ b/net/rxrpc/ar-connevent.c
77089 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
77090
77091 len = iov[0].iov_len + iov[1].iov_len;
77092
77093 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
77094 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77095 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
77096
77097 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
77098 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
77099 index 1a2b0633..e8d1382 100644
77100 --- a/net/rxrpc/ar-input.c
77101 +++ b/net/rxrpc/ar-input.c
77102 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
77103 /* track the latest serial number on this connection for ACK packet
77104 * information */
77105 serial = ntohl(sp->hdr.serial);
77106 - hi_serial = atomic_read(&call->conn->hi_serial);
77107 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
77108 while (serial > hi_serial)
77109 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
77110 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
77111 serial);
77112
77113 /* request ACK generation for any ACK or DATA packet that requests
77114 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
77115 index 8e22bd3..f66d1c0 100644
77116 --- a/net/rxrpc/ar-internal.h
77117 +++ b/net/rxrpc/ar-internal.h
77118 @@ -272,8 +272,8 @@ struct rxrpc_connection {
77119 int error; /* error code for local abort */
77120 int debug_id; /* debug ID for printks */
77121 unsigned call_counter; /* call ID counter */
77122 - atomic_t serial; /* packet serial number counter */
77123 - atomic_t hi_serial; /* highest serial number received */
77124 + atomic_unchecked_t serial; /* packet serial number counter */
77125 + atomic_unchecked_t hi_serial; /* highest serial number received */
77126 u8 avail_calls; /* number of calls available */
77127 u8 size_align; /* data size alignment (for security) */
77128 u8 header_size; /* rxrpc + security header size */
77129 @@ -346,7 +346,7 @@ struct rxrpc_call {
77130 spinlock_t lock;
77131 rwlock_t state_lock; /* lock for state transition */
77132 atomic_t usage;
77133 - atomic_t sequence; /* Tx data packet sequence counter */
77134 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
77135 u32 abort_code; /* local/remote abort code */
77136 enum { /* current state of call */
77137 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
77138 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
77139 */
77140 extern atomic_t rxrpc_n_skbs;
77141 extern __be32 rxrpc_epoch;
77142 -extern atomic_t rxrpc_debug_id;
77143 +extern atomic_unchecked_t rxrpc_debug_id;
77144 extern struct workqueue_struct *rxrpc_workqueue;
77145
77146 /*
77147 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
77148 index 87f7135..74d3703 100644
77149 --- a/net/rxrpc/ar-local.c
77150 +++ b/net/rxrpc/ar-local.c
77151 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
77152 spin_lock_init(&local->lock);
77153 rwlock_init(&local->services_lock);
77154 atomic_set(&local->usage, 1);
77155 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
77156 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77157 memcpy(&local->srx, srx, sizeof(*srx));
77158 }
77159
77160 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
77161 index 5f22e26..e5bd20f 100644
77162 --- a/net/rxrpc/ar-output.c
77163 +++ b/net/rxrpc/ar-output.c
77164 @@ -681,9 +681,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
77165 sp->hdr.cid = call->cid;
77166 sp->hdr.callNumber = call->call_id;
77167 sp->hdr.seq =
77168 - htonl(atomic_inc_return(&call->sequence));
77169 + htonl(atomic_inc_return_unchecked(&call->sequence));
77170 sp->hdr.serial =
77171 - htonl(atomic_inc_return(&conn->serial));
77172 + htonl(atomic_inc_return_unchecked(&conn->serial));
77173 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
77174 sp->hdr.userStatus = 0;
77175 sp->hdr.securityIndex = conn->security_ix;
77176 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
77177 index 2754f09..b20e38f 100644
77178 --- a/net/rxrpc/ar-peer.c
77179 +++ b/net/rxrpc/ar-peer.c
77180 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
77181 INIT_LIST_HEAD(&peer->error_targets);
77182 spin_lock_init(&peer->lock);
77183 atomic_set(&peer->usage, 1);
77184 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
77185 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77186 memcpy(&peer->srx, srx, sizeof(*srx));
77187
77188 rxrpc_assess_MTU_size(peer);
77189 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
77190 index 38047f7..9f48511 100644
77191 --- a/net/rxrpc/ar-proc.c
77192 +++ b/net/rxrpc/ar-proc.c
77193 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
77194 atomic_read(&conn->usage),
77195 rxrpc_conn_states[conn->state],
77196 key_serial(conn->key),
77197 - atomic_read(&conn->serial),
77198 - atomic_read(&conn->hi_serial));
77199 + atomic_read_unchecked(&conn->serial),
77200 + atomic_read_unchecked(&conn->hi_serial));
77201
77202 return 0;
77203 }
77204 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
77205 index 92df566..87ec1bf 100644
77206 --- a/net/rxrpc/ar-transport.c
77207 +++ b/net/rxrpc/ar-transport.c
77208 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
77209 spin_lock_init(&trans->client_lock);
77210 rwlock_init(&trans->conn_lock);
77211 atomic_set(&trans->usage, 1);
77212 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
77213 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77214
77215 if (peer->srx.transport.family == AF_INET) {
77216 switch (peer->srx.transport_type) {
77217 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
77218 index 7635107..5000b71 100644
77219 --- a/net/rxrpc/rxkad.c
77220 +++ b/net/rxrpc/rxkad.c
77221 @@ -211,6 +211,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
77222 u16 check;
77223 int nsg;
77224
77225 + pax_track_stack();
77226 +
77227 sp = rxrpc_skb(skb);
77228
77229 _enter("");
77230 @@ -338,6 +340,8 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
77231 u16 check;
77232 int nsg;
77233
77234 + pax_track_stack();
77235 +
77236 _enter("");
77237
77238 sp = rxrpc_skb(skb);
77239 @@ -610,7 +614,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
77240
77241 len = iov[0].iov_len + iov[1].iov_len;
77242
77243 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
77244 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77245 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
77246
77247 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
77248 @@ -660,7 +664,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
77249
77250 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
77251
77252 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
77253 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77254 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
77255
77256 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
77257 diff --git a/net/sctp/auth.c b/net/sctp/auth.c
77258 index 865e68f..bf81204 100644
77259 --- a/net/sctp/auth.c
77260 +++ b/net/sctp/auth.c
77261 @@ -82,7 +82,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
77262 struct sctp_auth_bytes *key;
77263
77264 /* Verify that we are not going to overflow INT_MAX */
77265 - if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
77266 + if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
77267 return NULL;
77268
77269 /* Allocate the shared key */
77270 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
77271 index 05a6ce2..c8bf836 100644
77272 --- a/net/sctp/proc.c
77273 +++ b/net/sctp/proc.c
77274 @@ -318,7 +318,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
77275 seq_printf(seq,
77276 "%8pK %8pK %-3d %-3d %-2d %-4d "
77277 "%4d %8d %8d %7d %5lu %-5d %5d ",
77278 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
77279 + assoc, sk,
77280 + sctp_sk(sk)->type, sk->sk_state,
77281 assoc->state, hash,
77282 assoc->assoc_id,
77283 assoc->sndbuf_used,
77284 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
77285 index 836aa63..d779d7b 100644
77286 --- a/net/sctp/socket.c
77287 +++ b/net/sctp/socket.c
77288 @@ -4575,7 +4575,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
77289 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
77290 if (space_left < addrlen)
77291 return -ENOMEM;
77292 - if (copy_to_user(to, &temp, addrlen))
77293 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
77294 return -EFAULT;
77295 to += addrlen;
77296 cnt++;
77297 diff --git a/net/socket.c b/net/socket.c
77298 index ffe92ca..8057b85 100644
77299 --- a/net/socket.c
77300 +++ b/net/socket.c
77301 @@ -88,6 +88,7 @@
77302 #include <linux/nsproxy.h>
77303 #include <linux/magic.h>
77304 #include <linux/slab.h>
77305 +#include <linux/in.h>
77306
77307 #include <asm/uaccess.h>
77308 #include <asm/unistd.h>
77309 @@ -105,6 +106,8 @@
77310 #include <linux/sockios.h>
77311 #include <linux/atalk.h>
77312
77313 +#include <linux/grsock.h>
77314 +
77315 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
77316 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
77317 unsigned long nr_segs, loff_t pos);
77318 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
77319 &sockfs_dentry_operations, SOCKFS_MAGIC);
77320 }
77321
77322 -static struct vfsmount *sock_mnt __read_mostly;
77323 +struct vfsmount *sock_mnt __read_mostly;
77324
77325 static struct file_system_type sock_fs_type = {
77326 .name = "sockfs",
77327 @@ -1187,6 +1190,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
77328 return -EAFNOSUPPORT;
77329 if (type < 0 || type >= SOCK_MAX)
77330 return -EINVAL;
77331 + if (protocol < 0)
77332 + return -EINVAL;
77333
77334 /* Compatibility.
77335
77336 @@ -1319,6 +1324,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
77337 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
77338 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
77339
77340 + if(!gr_search_socket(family, type, protocol)) {
77341 + retval = -EACCES;
77342 + goto out;
77343 + }
77344 +
77345 + if (gr_handle_sock_all(family, type, protocol)) {
77346 + retval = -EACCES;
77347 + goto out;
77348 + }
77349 +
77350 retval = sock_create(family, type, protocol, &sock);
77351 if (retval < 0)
77352 goto out;
77353 @@ -1431,6 +1446,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
77354 if (sock) {
77355 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
77356 if (err >= 0) {
77357 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
77358 + err = -EACCES;
77359 + goto error;
77360 + }
77361 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
77362 + if (err)
77363 + goto error;
77364 +
77365 err = security_socket_bind(sock,
77366 (struct sockaddr *)&address,
77367 addrlen);
77368 @@ -1439,6 +1462,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
77369 (struct sockaddr *)
77370 &address, addrlen);
77371 }
77372 +error:
77373 fput_light(sock->file, fput_needed);
77374 }
77375 return err;
77376 @@ -1462,10 +1486,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
77377 if ((unsigned)backlog > somaxconn)
77378 backlog = somaxconn;
77379
77380 + if (gr_handle_sock_server_other(sock->sk)) {
77381 + err = -EPERM;
77382 + goto error;
77383 + }
77384 +
77385 + err = gr_search_listen(sock);
77386 + if (err)
77387 + goto error;
77388 +
77389 err = security_socket_listen(sock, backlog);
77390 if (!err)
77391 err = sock->ops->listen(sock, backlog);
77392
77393 +error:
77394 fput_light(sock->file, fput_needed);
77395 }
77396 return err;
77397 @@ -1509,6 +1543,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
77398 newsock->type = sock->type;
77399 newsock->ops = sock->ops;
77400
77401 + if (gr_handle_sock_server_other(sock->sk)) {
77402 + err = -EPERM;
77403 + sock_release(newsock);
77404 + goto out_put;
77405 + }
77406 +
77407 + err = gr_search_accept(sock);
77408 + if (err) {
77409 + sock_release(newsock);
77410 + goto out_put;
77411 + }
77412 +
77413 /*
77414 * We don't need try_module_get here, as the listening socket (sock)
77415 * has the protocol module (sock->ops->owner) held.
77416 @@ -1547,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
77417 fd_install(newfd, newfile);
77418 err = newfd;
77419
77420 + gr_attach_curr_ip(newsock->sk);
77421 +
77422 out_put:
77423 fput_light(sock->file, fput_needed);
77424 out:
77425 @@ -1579,6 +1627,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
77426 int, addrlen)
77427 {
77428 struct socket *sock;
77429 + struct sockaddr *sck;
77430 struct sockaddr_storage address;
77431 int err, fput_needed;
77432
77433 @@ -1589,6 +1638,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
77434 if (err < 0)
77435 goto out_put;
77436
77437 + sck = (struct sockaddr *)&address;
77438 +
77439 + if (gr_handle_sock_client(sck)) {
77440 + err = -EACCES;
77441 + goto out_put;
77442 + }
77443 +
77444 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
77445 + if (err)
77446 + goto out_put;
77447 +
77448 err =
77449 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
77450 if (err)
77451 @@ -1890,6 +1950,8 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
77452 unsigned char *ctl_buf = ctl;
77453 int err, ctl_len, iov_size, total_len;
77454
77455 + pax_track_stack();
77456 +
77457 err = -EFAULT;
77458 if (MSG_CMSG_COMPAT & flags) {
77459 if (get_compat_msghdr(msg_sys, msg_compat))
77460 @@ -1950,7 +2012,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
77461 * checking falls down on this.
77462 */
77463 if (copy_from_user(ctl_buf,
77464 - (void __user __force *)msg_sys->msg_control,
77465 + (void __force_user *)msg_sys->msg_control,
77466 ctl_len))
77467 goto out_freectl;
77468 msg_sys->msg_control = ctl_buf;
77469 @@ -2120,7 +2182,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
77470 * kernel msghdr to use the kernel address space)
77471 */
77472
77473 - uaddr = (__force void __user *)msg_sys->msg_name;
77474 + uaddr = (void __force_user *)msg_sys->msg_name;
77475 uaddr_len = COMPAT_NAMELEN(msg);
77476 if (MSG_CMSG_COMPAT & flags) {
77477 err = verify_compat_iovec(msg_sys, iov,
77478 @@ -2748,7 +2810,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
77479 }
77480
77481 ifr = compat_alloc_user_space(buf_size);
77482 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
77483 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
77484
77485 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
77486 return -EFAULT;
77487 @@ -2772,12 +2834,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
77488 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
77489
77490 if (copy_in_user(rxnfc, compat_rxnfc,
77491 - (void *)(&rxnfc->fs.m_ext + 1) -
77492 - (void *)rxnfc) ||
77493 + (void __user *)(&rxnfc->fs.m_ext + 1) -
77494 + (void __user *)rxnfc) ||
77495 copy_in_user(&rxnfc->fs.ring_cookie,
77496 &compat_rxnfc->fs.ring_cookie,
77497 - (void *)(&rxnfc->fs.location + 1) -
77498 - (void *)&rxnfc->fs.ring_cookie) ||
77499 + (void __user *)(&rxnfc->fs.location + 1) -
77500 + (void __user *)&rxnfc->fs.ring_cookie) ||
77501 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
77502 sizeof(rxnfc->rule_cnt)))
77503 return -EFAULT;
77504 @@ -2789,12 +2851,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
77505
77506 if (convert_out) {
77507 if (copy_in_user(compat_rxnfc, rxnfc,
77508 - (const void *)(&rxnfc->fs.m_ext + 1) -
77509 - (const void *)rxnfc) ||
77510 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
77511 + (const void __user *)rxnfc) ||
77512 copy_in_user(&compat_rxnfc->fs.ring_cookie,
77513 &rxnfc->fs.ring_cookie,
77514 - (const void *)(&rxnfc->fs.location + 1) -
77515 - (const void *)&rxnfc->fs.ring_cookie) ||
77516 + (const void __user *)(&rxnfc->fs.location + 1) -
77517 + (const void __user *)&rxnfc->fs.ring_cookie) ||
77518 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
77519 sizeof(rxnfc->rule_cnt)))
77520 return -EFAULT;
77521 @@ -2864,7 +2926,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
77522 old_fs = get_fs();
77523 set_fs(KERNEL_DS);
77524 err = dev_ioctl(net, cmd,
77525 - (struct ifreq __user __force *) &kifr);
77526 + (struct ifreq __force_user *) &kifr);
77527 set_fs(old_fs);
77528
77529 return err;
77530 @@ -2973,7 +3035,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
77531
77532 old_fs = get_fs();
77533 set_fs(KERNEL_DS);
77534 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
77535 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
77536 set_fs(old_fs);
77537
77538 if (cmd == SIOCGIFMAP && !err) {
77539 @@ -3078,7 +3140,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
77540 ret |= __get_user(rtdev, &(ur4->rt_dev));
77541 if (rtdev) {
77542 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
77543 - r4.rt_dev = (char __user __force *)devname;
77544 + r4.rt_dev = (char __force_user *)devname;
77545 devname[15] = 0;
77546 } else
77547 r4.rt_dev = NULL;
77548 @@ -3318,8 +3380,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
77549 int __user *uoptlen;
77550 int err;
77551
77552 - uoptval = (char __user __force *) optval;
77553 - uoptlen = (int __user __force *) optlen;
77554 + uoptval = (char __force_user *) optval;
77555 + uoptlen = (int __force_user *) optlen;
77556
77557 set_fs(KERNEL_DS);
77558 if (level == SOL_SOCKET)
77559 @@ -3339,7 +3401,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
77560 char __user *uoptval;
77561 int err;
77562
77563 - uoptval = (char __user __force *) optval;
77564 + uoptval = (char __force_user *) optval;
77565
77566 set_fs(KERNEL_DS);
77567 if (level == SOL_SOCKET)
77568 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
77569 index d12ffa5..0b5a6e2 100644
77570 --- a/net/sunrpc/sched.c
77571 +++ b/net/sunrpc/sched.c
77572 @@ -238,9 +238,9 @@ static int rpc_wait_bit_killable(void *word)
77573 #ifdef RPC_DEBUG
77574 static void rpc_task_set_debuginfo(struct rpc_task *task)
77575 {
77576 - static atomic_t rpc_pid;
77577 + static atomic_unchecked_t rpc_pid;
77578
77579 - task->tk_pid = atomic_inc_return(&rpc_pid);
77580 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
77581 }
77582 #else
77583 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
77584 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
77585 index 767d494..fe17e9d 100644
77586 --- a/net/sunrpc/svcsock.c
77587 +++ b/net/sunrpc/svcsock.c
77588 @@ -394,7 +394,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
77589 int buflen, unsigned int base)
77590 {
77591 size_t save_iovlen;
77592 - void __user *save_iovbase;
77593 + void *save_iovbase;
77594 unsigned int i;
77595 int ret;
77596
77597 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
77598 index 09af4fa..77110a9 100644
77599 --- a/net/sunrpc/xprtrdma/svc_rdma.c
77600 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
77601 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
77602 static unsigned int min_max_inline = 4096;
77603 static unsigned int max_max_inline = 65536;
77604
77605 -atomic_t rdma_stat_recv;
77606 -atomic_t rdma_stat_read;
77607 -atomic_t rdma_stat_write;
77608 -atomic_t rdma_stat_sq_starve;
77609 -atomic_t rdma_stat_rq_starve;
77610 -atomic_t rdma_stat_rq_poll;
77611 -atomic_t rdma_stat_rq_prod;
77612 -atomic_t rdma_stat_sq_poll;
77613 -atomic_t rdma_stat_sq_prod;
77614 +atomic_unchecked_t rdma_stat_recv;
77615 +atomic_unchecked_t rdma_stat_read;
77616 +atomic_unchecked_t rdma_stat_write;
77617 +atomic_unchecked_t rdma_stat_sq_starve;
77618 +atomic_unchecked_t rdma_stat_rq_starve;
77619 +atomic_unchecked_t rdma_stat_rq_poll;
77620 +atomic_unchecked_t rdma_stat_rq_prod;
77621 +atomic_unchecked_t rdma_stat_sq_poll;
77622 +atomic_unchecked_t rdma_stat_sq_prod;
77623
77624 /* Temporary NFS request map and context caches */
77625 struct kmem_cache *svc_rdma_map_cachep;
77626 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *table, int write,
77627 len -= *ppos;
77628 if (len > *lenp)
77629 len = *lenp;
77630 - if (len && copy_to_user(buffer, str_buf, len))
77631 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
77632 return -EFAULT;
77633 *lenp = len;
77634 *ppos += len;
77635 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] = {
77636 {
77637 .procname = "rdma_stat_read",
77638 .data = &rdma_stat_read,
77639 - .maxlen = sizeof(atomic_t),
77640 + .maxlen = sizeof(atomic_unchecked_t),
77641 .mode = 0644,
77642 .proc_handler = read_reset_stat,
77643 },
77644 {
77645 .procname = "rdma_stat_recv",
77646 .data = &rdma_stat_recv,
77647 - .maxlen = sizeof(atomic_t),
77648 + .maxlen = sizeof(atomic_unchecked_t),
77649 .mode = 0644,
77650 .proc_handler = read_reset_stat,
77651 },
77652 {
77653 .procname = "rdma_stat_write",
77654 .data = &rdma_stat_write,
77655 - .maxlen = sizeof(atomic_t),
77656 + .maxlen = sizeof(atomic_unchecked_t),
77657 .mode = 0644,
77658 .proc_handler = read_reset_stat,
77659 },
77660 {
77661 .procname = "rdma_stat_sq_starve",
77662 .data = &rdma_stat_sq_starve,
77663 - .maxlen = sizeof(atomic_t),
77664 + .maxlen = sizeof(atomic_unchecked_t),
77665 .mode = 0644,
77666 .proc_handler = read_reset_stat,
77667 },
77668 {
77669 .procname = "rdma_stat_rq_starve",
77670 .data = &rdma_stat_rq_starve,
77671 - .maxlen = sizeof(atomic_t),
77672 + .maxlen = sizeof(atomic_unchecked_t),
77673 .mode = 0644,
77674 .proc_handler = read_reset_stat,
77675 },
77676 {
77677 .procname = "rdma_stat_rq_poll",
77678 .data = &rdma_stat_rq_poll,
77679 - .maxlen = sizeof(atomic_t),
77680 + .maxlen = sizeof(atomic_unchecked_t),
77681 .mode = 0644,
77682 .proc_handler = read_reset_stat,
77683 },
77684 {
77685 .procname = "rdma_stat_rq_prod",
77686 .data = &rdma_stat_rq_prod,
77687 - .maxlen = sizeof(atomic_t),
77688 + .maxlen = sizeof(atomic_unchecked_t),
77689 .mode = 0644,
77690 .proc_handler = read_reset_stat,
77691 },
77692 {
77693 .procname = "rdma_stat_sq_poll",
77694 .data = &rdma_stat_sq_poll,
77695 - .maxlen = sizeof(atomic_t),
77696 + .maxlen = sizeof(atomic_unchecked_t),
77697 .mode = 0644,
77698 .proc_handler = read_reset_stat,
77699 },
77700 {
77701 .procname = "rdma_stat_sq_prod",
77702 .data = &rdma_stat_sq_prod,
77703 - .maxlen = sizeof(atomic_t),
77704 + .maxlen = sizeof(atomic_unchecked_t),
77705 .mode = 0644,
77706 .proc_handler = read_reset_stat,
77707 },
77708 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
77709 index df67211..c354b13 100644
77710 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
77711 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
77712 @@ -499,7 +499,7 @@ next_sge:
77713 svc_rdma_put_context(ctxt, 0);
77714 goto out;
77715 }
77716 - atomic_inc(&rdma_stat_read);
77717 + atomic_inc_unchecked(&rdma_stat_read);
77718
77719 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
77720 chl_map->ch[ch_no].count -= read_wr.num_sge;
77721 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
77722 dto_q);
77723 list_del_init(&ctxt->dto_q);
77724 } else {
77725 - atomic_inc(&rdma_stat_rq_starve);
77726 + atomic_inc_unchecked(&rdma_stat_rq_starve);
77727 clear_bit(XPT_DATA, &xprt->xpt_flags);
77728 ctxt = NULL;
77729 }
77730 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
77731 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
77732 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
77733 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
77734 - atomic_inc(&rdma_stat_recv);
77735 + atomic_inc_unchecked(&rdma_stat_recv);
77736
77737 /* Build up the XDR from the receive buffers. */
77738 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
77739 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
77740 index 249a835..fb2794b 100644
77741 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
77742 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
77743 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
77744 write_wr.wr.rdma.remote_addr = to;
77745
77746 /* Post It */
77747 - atomic_inc(&rdma_stat_write);
77748 + atomic_inc_unchecked(&rdma_stat_write);
77749 if (svc_rdma_send(xprt, &write_wr))
77750 goto err;
77751 return 0;
77752 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
77753 index a385430..32254ea 100644
77754 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
77755 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
77756 @@ -299,7 +299,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
77757 return;
77758
77759 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
77760 - atomic_inc(&rdma_stat_rq_poll);
77761 + atomic_inc_unchecked(&rdma_stat_rq_poll);
77762
77763 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
77764 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
77765 @@ -321,7 +321,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
77766 }
77767
77768 if (ctxt)
77769 - atomic_inc(&rdma_stat_rq_prod);
77770 + atomic_inc_unchecked(&rdma_stat_rq_prod);
77771
77772 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
77773 /*
77774 @@ -393,7 +393,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
77775 return;
77776
77777 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
77778 - atomic_inc(&rdma_stat_sq_poll);
77779 + atomic_inc_unchecked(&rdma_stat_sq_poll);
77780 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
77781 if (wc.status != IB_WC_SUCCESS)
77782 /* Close the transport */
77783 @@ -411,7 +411,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
77784 }
77785
77786 if (ctxt)
77787 - atomic_inc(&rdma_stat_sq_prod);
77788 + atomic_inc_unchecked(&rdma_stat_sq_prod);
77789 }
77790
77791 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
77792 @@ -1273,7 +1273,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
77793 spin_lock_bh(&xprt->sc_lock);
77794 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
77795 spin_unlock_bh(&xprt->sc_lock);
77796 - atomic_inc(&rdma_stat_sq_starve);
77797 + atomic_inc_unchecked(&rdma_stat_sq_starve);
77798
77799 /* See if we can opportunistically reap SQ WR to make room */
77800 sq_cq_reap(xprt);
77801 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
77802 index ca84212..3aa338f 100644
77803 --- a/net/sysctl_net.c
77804 +++ b/net/sysctl_net.c
77805 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
77806 struct ctl_table *table)
77807 {
77808 /* Allow network administrator to have same access as root. */
77809 - if (capable(CAP_NET_ADMIN)) {
77810 + if (capable_nolog(CAP_NET_ADMIN)) {
77811 int mode = (table->mode >> 6) & 7;
77812 return (mode << 6) | (mode << 3) | mode;
77813 }
77814 diff --git a/net/tipc/link.c b/net/tipc/link.c
77815 index f89570c..016cf63 100644
77816 --- a/net/tipc/link.c
77817 +++ b/net/tipc/link.c
77818 @@ -1170,7 +1170,7 @@ static int link_send_sections_long(struct tipc_port *sender,
77819 struct tipc_msg fragm_hdr;
77820 struct sk_buff *buf, *buf_chain, *prev;
77821 u32 fragm_crs, fragm_rest, hsz, sect_rest;
77822 - const unchar *sect_crs;
77823 + const unchar __user *sect_crs;
77824 int curr_sect;
77825 u32 fragm_no;
77826
77827 @@ -1214,7 +1214,7 @@ again:
77828
77829 if (!sect_rest) {
77830 sect_rest = msg_sect[++curr_sect].iov_len;
77831 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
77832 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
77833 }
77834
77835 if (sect_rest < fragm_rest)
77836 @@ -1233,7 +1233,7 @@ error:
77837 }
77838 } else
77839 skb_copy_to_linear_data_offset(buf, fragm_crs,
77840 - sect_crs, sz);
77841 + (const void __force_kernel *)sect_crs, sz);
77842 sect_crs += sz;
77843 sect_rest -= sz;
77844 fragm_crs += sz;
77845 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
77846 index 83d5096..dcba497 100644
77847 --- a/net/tipc/msg.c
77848 +++ b/net/tipc/msg.c
77849 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
77850 msg_sect[cnt].iov_len);
77851 else
77852 skb_copy_to_linear_data_offset(*buf, pos,
77853 - msg_sect[cnt].iov_base,
77854 + (const void __force_kernel *)msg_sect[cnt].iov_base,
77855 msg_sect[cnt].iov_len);
77856 pos += msg_sect[cnt].iov_len;
77857 }
77858 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
77859 index 6cf7268..7a488ce 100644
77860 --- a/net/tipc/subscr.c
77861 +++ b/net/tipc/subscr.c
77862 @@ -101,7 +101,7 @@ static void subscr_send_event(struct subscription *sub,
77863 {
77864 struct iovec msg_sect;
77865
77866 - msg_sect.iov_base = (void *)&sub->evt;
77867 + msg_sect.iov_base = (void __force_user *)&sub->evt;
77868 msg_sect.iov_len = sizeof(struct tipc_event);
77869
77870 sub->evt.event = htohl(event, sub->swap);
77871 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
77872 index ec68e1c..fdd792f 100644
77873 --- a/net/unix/af_unix.c
77874 +++ b/net/unix/af_unix.c
77875 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(struct net *net,
77876 err = -ECONNREFUSED;
77877 if (!S_ISSOCK(inode->i_mode))
77878 goto put_fail;
77879 +
77880 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
77881 + err = -EACCES;
77882 + goto put_fail;
77883 + }
77884 +
77885 u = unix_find_socket_byinode(inode);
77886 if (!u)
77887 goto put_fail;
77888 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(struct net *net,
77889 if (u) {
77890 struct dentry *dentry;
77891 dentry = unix_sk(u)->dentry;
77892 +
77893 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
77894 + err = -EPERM;
77895 + sock_put(u);
77896 + goto fail;
77897 + }
77898 +
77899 if (dentry)
77900 touch_atime(unix_sk(u)->mnt, dentry);
77901 } else
77902 @@ -869,11 +882,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
77903 err = security_path_mknod(&path, dentry, mode, 0);
77904 if (err)
77905 goto out_mknod_drop_write;
77906 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
77907 + err = -EACCES;
77908 + goto out_mknod_drop_write;
77909 + }
77910 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
77911 out_mknod_drop_write:
77912 mnt_drop_write(path.mnt);
77913 if (err)
77914 goto out_mknod_dput;
77915 +
77916 + gr_handle_create(dentry, path.mnt);
77917 +
77918 mutex_unlock(&path.dentry->d_inode->i_mutex);
77919 dput(path.dentry);
77920 path.dentry = dentry;
77921 diff --git a/net/wireless/core.h b/net/wireless/core.h
77922 index 8672e02..48782dd 100644
77923 --- a/net/wireless/core.h
77924 +++ b/net/wireless/core.h
77925 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
77926 struct mutex mtx;
77927
77928 /* rfkill support */
77929 - struct rfkill_ops rfkill_ops;
77930 + rfkill_ops_no_const rfkill_ops;
77931 struct rfkill *rfkill;
77932 struct work_struct rfkill_sync;
77933
77934 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
77935 index fdbc23c..212d53e 100644
77936 --- a/net/wireless/wext-core.c
77937 +++ b/net/wireless/wext-core.c
77938 @@ -746,8 +746,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
77939 */
77940
77941 /* Support for very large requests */
77942 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
77943 - (user_length > descr->max_tokens)) {
77944 + if (user_length > descr->max_tokens) {
77945 /* Allow userspace to GET more than max so
77946 * we can support any size GET requests.
77947 * There is still a limit : -ENOMEM.
77948 @@ -784,22 +783,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
77949 }
77950 }
77951
77952 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
77953 - /*
77954 - * If this is a GET, but not NOMAX, it means that the extra
77955 - * data is not bounded by userspace, but by max_tokens. Thus
77956 - * set the length to max_tokens. This matches the extra data
77957 - * allocation.
77958 - * The driver should fill it with the number of tokens it
77959 - * provided, and it may check iwp->length rather than having
77960 - * knowledge of max_tokens. If the driver doesn't change the
77961 - * iwp->length, this ioctl just copies back max_token tokens
77962 - * filled with zeroes. Hopefully the driver isn't claiming
77963 - * them to be valid data.
77964 - */
77965 - iwp->length = descr->max_tokens;
77966 - }
77967 -
77968 err = handler(dev, info, (union iwreq_data *) iwp, extra);
77969
77970 iwp->length += essid_compat;
77971 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
77972 index 552df27..8e7f238 100644
77973 --- a/net/xfrm/xfrm_policy.c
77974 +++ b/net/xfrm/xfrm_policy.c
77975 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
77976 {
77977 policy->walk.dead = 1;
77978
77979 - atomic_inc(&policy->genid);
77980 + atomic_inc_unchecked(&policy->genid);
77981
77982 if (del_timer(&policy->timer))
77983 xfrm_pol_put(policy);
77984 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
77985 hlist_add_head(&policy->bydst, chain);
77986 xfrm_pol_hold(policy);
77987 net->xfrm.policy_count[dir]++;
77988 - atomic_inc(&flow_cache_genid);
77989 + atomic_inc_unchecked(&flow_cache_genid);
77990 if (delpol)
77991 __xfrm_policy_unlink(delpol, dir);
77992 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
77993 @@ -1530,7 +1530,7 @@ free_dst:
77994 goto out;
77995 }
77996
77997 -static int inline
77998 +static inline int
77999 xfrm_dst_alloc_copy(void **target, const void *src, int size)
78000 {
78001 if (!*target) {
78002 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
78003 return 0;
78004 }
78005
78006 -static int inline
78007 +static inline int
78008 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
78009 {
78010 #ifdef CONFIG_XFRM_SUB_POLICY
78011 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
78012 #endif
78013 }
78014
78015 -static int inline
78016 +static inline int
78017 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
78018 {
78019 #ifdef CONFIG_XFRM_SUB_POLICY
78020 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
78021
78022 xdst->num_pols = num_pols;
78023 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
78024 - xdst->policy_genid = atomic_read(&pols[0]->genid);
78025 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
78026
78027 return xdst;
78028 }
78029 @@ -2335,7 +2335,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
78030 if (xdst->xfrm_genid != dst->xfrm->genid)
78031 return 0;
78032 if (xdst->num_pols > 0 &&
78033 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
78034 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
78035 return 0;
78036
78037 mtu = dst_mtu(dst->child);
78038 @@ -2870,7 +2870,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
78039 sizeof(pol->xfrm_vec[i].saddr));
78040 pol->xfrm_vec[i].encap_family = mp->new_family;
78041 /* flush bundles */
78042 - atomic_inc(&pol->genid);
78043 + atomic_inc_unchecked(&pol->genid);
78044 }
78045 }
78046
78047 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
78048 index 0256b8a..9341ef6 100644
78049 --- a/net/xfrm/xfrm_user.c
78050 +++ b/net/xfrm/xfrm_user.c
78051 @@ -1394,6 +1394,8 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
78052 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
78053 int i;
78054
78055 + pax_track_stack();
78056 +
78057 if (xp->xfrm_nr == 0)
78058 return 0;
78059
78060 @@ -2062,6 +2064,8 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
78061 int err;
78062 int n = 0;
78063
78064 + pax_track_stack();
78065 +
78066 if (attrs[XFRMA_MIGRATE] == NULL)
78067 return -EINVAL;
78068
78069 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
78070 index a0fd502..a8e6e83 100644
78071 --- a/scripts/Makefile.build
78072 +++ b/scripts/Makefile.build
78073 @@ -109,7 +109,7 @@ endif
78074 endif
78075
78076 # Do not include host rules unless needed
78077 -ifneq ($(hostprogs-y)$(hostprogs-m),)
78078 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
78079 include scripts/Makefile.host
78080 endif
78081
78082 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
78083 index 686cb0d..9d653bf 100644
78084 --- a/scripts/Makefile.clean
78085 +++ b/scripts/Makefile.clean
78086 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
78087 __clean-files := $(extra-y) $(always) \
78088 $(targets) $(clean-files) \
78089 $(host-progs) \
78090 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
78091 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
78092 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
78093
78094 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
78095
78096 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
78097 index 1ac414f..a1c1451 100644
78098 --- a/scripts/Makefile.host
78099 +++ b/scripts/Makefile.host
78100 @@ -31,6 +31,7 @@
78101 # Note: Shared libraries consisting of C++ files are not supported
78102
78103 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
78104 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
78105
78106 # C code
78107 # Executables compiled from a single .c file
78108 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
78109 # Shared libaries (only .c supported)
78110 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
78111 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
78112 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
78113 # Remove .so files from "xxx-objs"
78114 host-cobjs := $(filter-out %.so,$(host-cobjs))
78115
78116 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
78117 index 291228e..6c55203 100644
78118 --- a/scripts/basic/fixdep.c
78119 +++ b/scripts/basic/fixdep.c
78120 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
78121 /*
78122 * Lookup a value in the configuration string.
78123 */
78124 -static int is_defined_config(const char *name, int len, unsigned int hash)
78125 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
78126 {
78127 struct item *aux;
78128
78129 @@ -211,10 +211,10 @@ static void clear_config(void)
78130 /*
78131 * Record the use of a CONFIG_* word.
78132 */
78133 -static void use_config(const char *m, int slen)
78134 +static void use_config(const char *m, unsigned int slen)
78135 {
78136 unsigned int hash = strhash(m, slen);
78137 - int c, i;
78138 + unsigned int c, i;
78139
78140 if (is_defined_config(m, slen, hash))
78141 return;
78142 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
78143
78144 static void parse_config_file(const char *map, size_t len)
78145 {
78146 - const int *end = (const int *) (map + len);
78147 + const unsigned int *end = (const unsigned int *) (map + len);
78148 /* start at +1, so that p can never be < map */
78149 - const int *m = (const int *) map + 1;
78150 + const unsigned int *m = (const unsigned int *) map + 1;
78151 const char *p, *q;
78152
78153 for (; m < end; m++) {
78154 @@ -405,7 +405,7 @@ static void print_deps(void)
78155 static void traps(void)
78156 {
78157 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
78158 - int *p = (int *)test;
78159 + unsigned int *p = (unsigned int *)test;
78160
78161 if (*p != INT_CONF) {
78162 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
78163 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
78164 new file mode 100644
78165 index 0000000..8729101
78166 --- /dev/null
78167 +++ b/scripts/gcc-plugin.sh
78168 @@ -0,0 +1,2 @@
78169 +#!/bin/sh
78170 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
78171 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
78172 index e26e2fb..f84937b 100644
78173 --- a/scripts/mod/file2alias.c
78174 +++ b/scripts/mod/file2alias.c
78175 @@ -72,7 +72,7 @@ static void device_id_check(const char *modname, const char *device_id,
78176 unsigned long size, unsigned long id_size,
78177 void *symval)
78178 {
78179 - int i;
78180 + unsigned int i;
78181
78182 if (size % id_size || size < id_size) {
78183 if (cross_build != 0)
78184 @@ -102,7 +102,7 @@ static void device_id_check(const char *modname, const char *device_id,
78185 /* USB is special because the bcdDevice can be matched against a numeric range */
78186 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
78187 static void do_usb_entry(struct usb_device_id *id,
78188 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
78189 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
78190 unsigned char range_lo, unsigned char range_hi,
78191 unsigned char max, struct module *mod)
78192 {
78193 @@ -203,7 +203,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
78194 {
78195 unsigned int devlo, devhi;
78196 unsigned char chi, clo, max;
78197 - int ndigits;
78198 + unsigned int ndigits;
78199
78200 id->match_flags = TO_NATIVE(id->match_flags);
78201 id->idVendor = TO_NATIVE(id->idVendor);
78202 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
78203 for (i = 0; i < count; i++) {
78204 const char *id = (char *)devs[i].id;
78205 char acpi_id[sizeof(devs[0].id)];
78206 - int j;
78207 + unsigned int j;
78208
78209 buf_printf(&mod->dev_table_buf,
78210 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
78211 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
78212
78213 for (j = 0; j < PNP_MAX_DEVICES; j++) {
78214 const char *id = (char *)card->devs[j].id;
78215 - int i2, j2;
78216 + unsigned int i2, j2;
78217 int dup = 0;
78218
78219 if (!id[0])
78220 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
78221 /* add an individual alias for every device entry */
78222 if (!dup) {
78223 char acpi_id[sizeof(card->devs[0].id)];
78224 - int k;
78225 + unsigned int k;
78226
78227 buf_printf(&mod->dev_table_buf,
78228 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
78229 @@ -786,7 +786,7 @@ static void dmi_ascii_filter(char *d, const char *s)
78230 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
78231 char *alias)
78232 {
78233 - int i, j;
78234 + unsigned int i, j;
78235
78236 sprintf(alias, "dmi*");
78237
78238 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
78239 index a509ff8..5822633 100644
78240 --- a/scripts/mod/modpost.c
78241 +++ b/scripts/mod/modpost.c
78242 @@ -919,6 +919,7 @@ enum mismatch {
78243 ANY_INIT_TO_ANY_EXIT,
78244 ANY_EXIT_TO_ANY_INIT,
78245 EXPORT_TO_INIT_EXIT,
78246 + DATA_TO_TEXT
78247 };
78248
78249 struct sectioncheck {
78250 @@ -1027,6 +1028,12 @@ const struct sectioncheck sectioncheck[] = {
78251 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
78252 .mismatch = EXPORT_TO_INIT_EXIT,
78253 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
78254 +},
78255 +/* Do not reference code from writable data */
78256 +{
78257 + .fromsec = { DATA_SECTIONS, NULL },
78258 + .tosec = { TEXT_SECTIONS, NULL },
78259 + .mismatch = DATA_TO_TEXT
78260 }
78261 };
78262
78263 @@ -1149,10 +1156,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
78264 continue;
78265 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
78266 continue;
78267 - if (sym->st_value == addr)
78268 - return sym;
78269 /* Find a symbol nearby - addr are maybe negative */
78270 d = sym->st_value - addr;
78271 + if (d == 0)
78272 + return sym;
78273 if (d < 0)
78274 d = addr - sym->st_value;
78275 if (d < distance) {
78276 @@ -1431,6 +1438,14 @@ static void report_sec_mismatch(const char *modname,
78277 tosym, prl_to, prl_to, tosym);
78278 free(prl_to);
78279 break;
78280 + case DATA_TO_TEXT:
78281 +/*
78282 + fprintf(stderr,
78283 + "The variable %s references\n"
78284 + "the %s %s%s%s\n",
78285 + fromsym, to, sec2annotation(tosec), tosym, to_p);
78286 +*/
78287 + break;
78288 }
78289 fprintf(stderr, "\n");
78290 }
78291 @@ -1656,7 +1671,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
78292 static void check_sec_ref(struct module *mod, const char *modname,
78293 struct elf_info *elf)
78294 {
78295 - int i;
78296 + unsigned int i;
78297 Elf_Shdr *sechdrs = elf->sechdrs;
78298
78299 /* Walk through all sections */
78300 @@ -1754,7 +1769,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
78301 va_end(ap);
78302 }
78303
78304 -void buf_write(struct buffer *buf, const char *s, int len)
78305 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
78306 {
78307 if (buf->size - buf->pos < len) {
78308 buf->size += len + SZ;
78309 @@ -1966,7 +1981,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
78310 if (fstat(fileno(file), &st) < 0)
78311 goto close_write;
78312
78313 - if (st.st_size != b->pos)
78314 + if (st.st_size != (off_t)b->pos)
78315 goto close_write;
78316
78317 tmp = NOFAIL(malloc(b->pos));
78318 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
78319 index 2031119..b5433af 100644
78320 --- a/scripts/mod/modpost.h
78321 +++ b/scripts/mod/modpost.h
78322 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
78323
78324 struct buffer {
78325 char *p;
78326 - int pos;
78327 - int size;
78328 + unsigned int pos;
78329 + unsigned int size;
78330 };
78331
78332 void __attribute__((format(printf, 2, 3)))
78333 buf_printf(struct buffer *buf, const char *fmt, ...);
78334
78335 void
78336 -buf_write(struct buffer *buf, const char *s, int len);
78337 +buf_write(struct buffer *buf, const char *s, unsigned int len);
78338
78339 struct module {
78340 struct module *next;
78341 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
78342 index 9dfcd6d..099068e 100644
78343 --- a/scripts/mod/sumversion.c
78344 +++ b/scripts/mod/sumversion.c
78345 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
78346 goto out;
78347 }
78348
78349 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
78350 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
78351 warn("writing sum in %s failed: %s\n",
78352 filename, strerror(errno));
78353 goto out;
78354 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
78355 index 5c11312..72742b5 100644
78356 --- a/scripts/pnmtologo.c
78357 +++ b/scripts/pnmtologo.c
78358 @@ -237,14 +237,14 @@ static void write_header(void)
78359 fprintf(out, " * Linux logo %s\n", logoname);
78360 fputs(" */\n\n", out);
78361 fputs("#include <linux/linux_logo.h>\n\n", out);
78362 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
78363 + fprintf(out, "static unsigned char %s_data[] = {\n",
78364 logoname);
78365 }
78366
78367 static void write_footer(void)
78368 {
78369 fputs("\n};\n\n", out);
78370 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
78371 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
78372 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
78373 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
78374 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
78375 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
78376 fputs("\n};\n\n", out);
78377
78378 /* write logo clut */
78379 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
78380 + fprintf(out, "static unsigned char %s_clut[] = {\n",
78381 logoname);
78382 write_hex_cnt = 0;
78383 for (i = 0; i < logo_clutsize; i++) {
78384 diff --git a/security/Kconfig b/security/Kconfig
78385 index e0f08b5..7388edd 100644
78386 --- a/security/Kconfig
78387 +++ b/security/Kconfig
78388 @@ -4,6 +4,586 @@
78389
78390 menu "Security options"
78391
78392 +source grsecurity/Kconfig
78393 +
78394 +menu "PaX"
78395 +
78396 + config ARCH_TRACK_EXEC_LIMIT
78397 + bool
78398 +
78399 + config PAX_KERNEXEC_PLUGIN
78400 + bool
78401 +
78402 + config PAX_PER_CPU_PGD
78403 + bool
78404 +
78405 + config TASK_SIZE_MAX_SHIFT
78406 + int
78407 + depends on X86_64
78408 + default 47 if !PAX_PER_CPU_PGD
78409 + default 42 if PAX_PER_CPU_PGD
78410 +
78411 + config PAX_ENABLE_PAE
78412 + bool
78413 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
78414 +
78415 +config PAX
78416 + bool "Enable various PaX features"
78417 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
78418 + help
78419 + This allows you to enable various PaX features. PaX adds
78420 + intrusion prevention mechanisms to the kernel that reduce
78421 + the risks posed by exploitable memory corruption bugs.
78422 +
78423 +menu "PaX Control"
78424 + depends on PAX
78425 +
78426 +config PAX_SOFTMODE
78427 + bool 'Support soft mode'
78428 + select PAX_PT_PAX_FLAGS
78429 + help
78430 + Enabling this option will allow you to run PaX in soft mode, that
78431 + is, PaX features will not be enforced by default, only on executables
78432 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
78433 + is the only way to mark executables for soft mode use.
78434 +
78435 + Soft mode can be activated by using the "pax_softmode=1" kernel command
78436 + line option on boot. Furthermore you can control various PaX features
78437 + at runtime via the entries in /proc/sys/kernel/pax.
78438 +
78439 +config PAX_EI_PAX
78440 + bool 'Use legacy ELF header marking'
78441 + help
78442 + Enabling this option will allow you to control PaX features on
78443 + a per executable basis via the 'chpax' utility available at
78444 + http://pax.grsecurity.net/. The control flags will be read from
78445 + an otherwise reserved part of the ELF header. This marking has
78446 + numerous drawbacks (no support for soft-mode, toolchain does not
78447 + know about the non-standard use of the ELF header) therefore it
78448 + has been deprecated in favour of PT_PAX_FLAGS support.
78449 +
78450 + Note that if you enable PT_PAX_FLAGS marking support as well,
78451 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
78452 +
78453 +config PAX_PT_PAX_FLAGS
78454 + bool 'Use ELF program header marking'
78455 + help
78456 + Enabling this option will allow you to control PaX features on
78457 + a per executable basis via the 'paxctl' utility available at
78458 + http://pax.grsecurity.net/. The control flags will be read from
78459 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
78460 + has the benefits of supporting both soft mode and being fully
78461 + integrated into the toolchain (the binutils patch is available
78462 + from http://pax.grsecurity.net).
78463 +
78464 + If your toolchain does not support PT_PAX_FLAGS markings,
78465 + you can create one in most cases with 'paxctl -C'.
78466 +
78467 + Note that if you enable the legacy EI_PAX marking support as well,
78468 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
78469 +
78470 +choice
78471 + prompt 'MAC system integration'
78472 + default PAX_HAVE_ACL_FLAGS
78473 + help
78474 + Mandatory Access Control systems have the option of controlling
78475 + PaX flags on a per executable basis, choose the method supported
78476 + by your particular system.
78477 +
78478 + - "none": if your MAC system does not interact with PaX,
78479 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
78480 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
78481 +
78482 + NOTE: this option is for developers/integrators only.
78483 +
78484 + config PAX_NO_ACL_FLAGS
78485 + bool 'none'
78486 +
78487 + config PAX_HAVE_ACL_FLAGS
78488 + bool 'direct'
78489 +
78490 + config PAX_HOOK_ACL_FLAGS
78491 + bool 'hook'
78492 +endchoice
78493 +
78494 +endmenu
78495 +
78496 +menu "Non-executable pages"
78497 + depends on PAX
78498 +
78499 +config PAX_NOEXEC
78500 + bool "Enforce non-executable pages"
78501 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
78502 + help
78503 + By design some architectures do not allow for protecting memory
78504 + pages against execution or even if they do, Linux does not make
78505 + use of this feature. In practice this means that if a page is
78506 + readable (such as the stack or heap) it is also executable.
78507 +
78508 + There is a well known exploit technique that makes use of this
78509 + fact and a common programming mistake where an attacker can
78510 + introduce code of his choice somewhere in the attacked program's
78511 + memory (typically the stack or the heap) and then execute it.
78512 +
78513 + If the attacked program was running with different (typically
78514 + higher) privileges than that of the attacker, then he can elevate
78515 + his own privilege level (e.g. get a root shell, write to files for
78516 + which he does not have write access to, etc).
78517 +
78518 + Enabling this option will let you choose from various features
78519 + that prevent the injection and execution of 'foreign' code in
78520 + a program.
78521 +
78522 + This will also break programs that rely on the old behaviour and
78523 + expect that dynamically allocated memory via the malloc() family
78524 + of functions is executable (which it is not). Notable examples
78525 + are the XFree86 4.x server, the java runtime and wine.
78526 +
78527 +config PAX_PAGEEXEC
78528 + bool "Paging based non-executable pages"
78529 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
78530 + select S390_SWITCH_AMODE if S390
78531 + select S390_EXEC_PROTECT if S390
78532 + select ARCH_TRACK_EXEC_LIMIT if X86_32
78533 + help
78534 + This implementation is based on the paging feature of the CPU.
78535 + On i386 without hardware non-executable bit support there is a
78536 + variable but usually low performance impact, however on Intel's
78537 + P4 core based CPUs it is very high so you should not enable this
78538 + for kernels meant to be used on such CPUs.
78539 +
78540 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
78541 + with hardware non-executable bit support there is no performance
78542 + impact, on ppc the impact is negligible.
78543 +
78544 + Note that several architectures require various emulations due to
78545 + badly designed userland ABIs, this will cause a performance impact
78546 + but will disappear as soon as userland is fixed. For example, ppc
78547 + userland MUST have been built with secure-plt by a recent toolchain.
78548 +
78549 +config PAX_SEGMEXEC
78550 + bool "Segmentation based non-executable pages"
78551 + depends on PAX_NOEXEC && X86_32
78552 + help
78553 + This implementation is based on the segmentation feature of the
78554 + CPU and has a very small performance impact, however applications
78555 + will be limited to a 1.5 GB address space instead of the normal
78556 + 3 GB.
78557 +
78558 +config PAX_EMUTRAMP
78559 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
78560 + default y if PARISC
78561 + help
78562 + There are some programs and libraries that for one reason or
78563 + another attempt to execute special small code snippets from
78564 + non-executable memory pages. Most notable examples are the
78565 + signal handler return code generated by the kernel itself and
78566 + the GCC trampolines.
78567 +
78568 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
78569 + such programs will no longer work under your kernel.
78570 +
78571 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
78572 + utilities to enable trampoline emulation for the affected programs
78573 + yet still have the protection provided by the non-executable pages.
78574 +
78575 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
78576 + your system will not even boot.
78577 +
78578 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
78579 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
78580 + for the affected files.
78581 +
78582 + NOTE: enabling this feature *may* open up a loophole in the
78583 + protection provided by non-executable pages that an attacker
78584 + could abuse. Therefore the best solution is to not have any
78585 + files on your system that would require this option. This can
78586 + be achieved by not using libc5 (which relies on the kernel
78587 + signal handler return code) and not using or rewriting programs
78588 + that make use of the nested function implementation of GCC.
78589 + Skilled users can just fix GCC itself so that it implements
78590 + nested function calls in a way that does not interfere with PaX.
78591 +
78592 +config PAX_EMUSIGRT
78593 + bool "Automatically emulate sigreturn trampolines"
78594 + depends on PAX_EMUTRAMP && PARISC
78595 + default y
78596 + help
78597 + Enabling this option will have the kernel automatically detect
78598 + and emulate signal return trampolines executing on the stack
78599 + that would otherwise lead to task termination.
78600 +
78601 + This solution is intended as a temporary one for users with
78602 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
78603 + Modula-3 runtime, etc) or executables linked to such, basically
78604 + everything that does not specify its own SA_RESTORER function in
78605 + normal executable memory like glibc 2.1+ does.
78606 +
78607 + On parisc you MUST enable this option, otherwise your system will
78608 + not even boot.
78609 +
78610 + NOTE: this feature cannot be disabled on a per executable basis
78611 + and since it *does* open up a loophole in the protection provided
78612 + by non-executable pages, the best solution is to not have any
78613 + files on your system that would require this option.
78614 +
78615 +config PAX_MPROTECT
78616 + bool "Restrict mprotect()"
78617 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
78618 + help
78619 + Enabling this option will prevent programs from
78620 + - changing the executable status of memory pages that were
78621 + not originally created as executable,
78622 + - making read-only executable pages writable again,
78623 + - creating executable pages from anonymous memory,
78624 + - making read-only-after-relocations (RELRO) data pages writable again.
78625 +
78626 + You should say Y here to complete the protection provided by
78627 + the enforcement of non-executable pages.
78628 +
78629 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
78630 + this feature on a per file basis.
78631 +
78632 +config PAX_MPROTECT_COMPAT
78633 + bool "Use legacy/compat protection demoting (read help)"
78634 + depends on PAX_MPROTECT
78635 + default n
78636 + help
78637 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
78638 + by sending the proper error code to the application. For some broken
78639 + userland, this can cause problems with Python or other applications. The
78640 + current implementation however allows for applications like clamav to
78641 + detect if JIT compilation/execution is allowed and to fall back gracefully
78642 + to an interpreter-based mode if it does not. While we encourage everyone
78643 + to use the current implementation as-is and push upstream to fix broken
78644 + userland (note that the RWX logging option can assist with this), in some
78645 + environments this may not be possible. Having to disable MPROTECT
78646 + completely on certain binaries reduces the security benefit of PaX,
78647 + so this option is provided for those environments to revert to the old
78648 + behavior.
78649 +
78650 +config PAX_ELFRELOCS
78651 + bool "Allow ELF text relocations (read help)"
78652 + depends on PAX_MPROTECT
78653 + default n
78654 + help
78655 + Non-executable pages and mprotect() restrictions are effective
78656 + in preventing the introduction of new executable code into an
78657 + attacked task's address space. There remain only two venues
78658 + for this kind of attack: if the attacker can execute already
78659 + existing code in the attacked task then he can either have it
78660 + create and mmap() a file containing his code or have it mmap()
78661 + an already existing ELF library that does not have position
78662 + independent code in it and use mprotect() on it to make it
78663 + writable and copy his code there. While protecting against
78664 + the former approach is beyond PaX, the latter can be prevented
78665 + by having only PIC ELF libraries on one's system (which do not
78666 + need to relocate their code). If you are sure this is your case,
78667 + as is the case with all modern Linux distributions, then leave
78668 + this option disabled. You should say 'n' here.
78669 +
78670 +config PAX_ETEXECRELOCS
78671 + bool "Allow ELF ET_EXEC text relocations"
78672 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
78673 + select PAX_ELFRELOCS
78674 + default y
78675 + help
78676 + On some architectures there are incorrectly created applications
78677 + that require text relocations and would not work without enabling
78678 + this option. If you are an alpha, ia64 or parisc user, you should
78679 + enable this option and disable it once you have made sure that
78680 + none of your applications need it.
78681 +
78682 +config PAX_EMUPLT
78683 + bool "Automatically emulate ELF PLT"
78684 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
78685 + default y
78686 + help
78687 + Enabling this option will have the kernel automatically detect
78688 + and emulate the Procedure Linkage Table entries in ELF files.
78689 + On some architectures such entries are in writable memory, and
78690 + become non-executable leading to task termination. Therefore
78691 + it is mandatory that you enable this option on alpha, parisc,
78692 + sparc and sparc64, otherwise your system would not even boot.
78693 +
78694 + NOTE: this feature *does* open up a loophole in the protection
78695 + provided by the non-executable pages, therefore the proper
78696 + solution is to modify the toolchain to produce a PLT that does
78697 + not need to be writable.
78698 +
78699 +config PAX_DLRESOLVE
78700 + bool 'Emulate old glibc resolver stub'
78701 + depends on PAX_EMUPLT && SPARC
78702 + default n
78703 + help
78704 + This option is needed if userland has an old glibc (before 2.4)
78705 + that puts a 'save' instruction into the runtime generated resolver
78706 + stub that needs special emulation.
78707 +
78708 +config PAX_KERNEXEC
78709 + bool "Enforce non-executable kernel pages"
78710 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
78711 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
78712 + select PAX_KERNEXEC_PLUGIN if X86_64
78713 + help
78714 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
78715 + that is, enabling this option will make it harder to inject
78716 + and execute 'foreign' code in kernel memory itself.
78717 +
78718 + Note that on x86_64 kernels there is a known regression when
78719 + this feature and KVM/VMX are both enabled in the host kernel.
78720 +
78721 +choice
78722 + prompt "Return Address Instrumentation Method"
78723 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
78724 + depends on PAX_KERNEXEC_PLUGIN
78725 + help
78726 + Select the method used to instrument function pointer dereferences.
78727 + Note that binary modules cannot be instrumented by this approach.
78728 +
78729 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
78730 + bool "bts"
78731 + help
78732 + This method is compatible with binary only modules but has
78733 + a higher runtime overhead.
78734 +
78735 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
78736 + bool "or"
78737 + depends on !PARAVIRT
78738 + help
78739 + This method is incompatible with binary only modules but has
78740 + a lower runtime overhead.
78741 +endchoice
78742 +
78743 +config PAX_KERNEXEC_PLUGIN_METHOD
78744 + string
78745 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
78746 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
78747 + default ""
78748 +
78749 +config PAX_KERNEXEC_MODULE_TEXT
78750 + int "Minimum amount of memory reserved for module code"
78751 + default "4"
78752 + depends on PAX_KERNEXEC && X86_32 && MODULES
78753 + help
78754 + Due to implementation details the kernel must reserve a fixed
78755 + amount of memory for module code at compile time that cannot be
78756 + changed at runtime. Here you can specify the minimum amount
78757 + in MB that will be reserved. Due to the same implementation
78758 + details this size will always be rounded up to the next 2/4 MB
78759 + boundary (depends on PAE) so the actually available memory for
78760 + module code will usually be more than this minimum.
78761 +
78762 + The default 4 MB should be enough for most users but if you have
78763 + an excessive number of modules (e.g., most distribution configs
78764 + compile many drivers as modules) or use huge modules such as
78765 + nvidia's kernel driver, you will need to adjust this amount.
78766 + A good rule of thumb is to look at your currently loaded kernel
78767 + modules and add up their sizes.
78768 +
78769 +endmenu
78770 +
78771 +menu "Address Space Layout Randomization"
78772 + depends on PAX
78773 +
78774 +config PAX_ASLR
78775 + bool "Address Space Layout Randomization"
78776 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
78777 + help
78778 + Many if not most exploit techniques rely on the knowledge of
78779 + certain addresses in the attacked program. The following options
78780 + will allow the kernel to apply a certain amount of randomization
78781 + to specific parts of the program thereby forcing an attacker to
78782 + guess them in most cases. Any failed guess will most likely crash
78783 + the attacked program which allows the kernel to detect such attempts
78784 + and react on them. PaX itself provides no reaction mechanisms,
78785 + instead it is strongly encouraged that you make use of Nergal's
78786 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
78787 + (http://www.grsecurity.net/) built-in crash detection features or
78788 + develop one yourself.
78789 +
78790 + By saying Y here you can choose to randomize the following areas:
78791 + - top of the task's kernel stack
78792 + - top of the task's userland stack
78793 + - base address for mmap() requests that do not specify one
78794 + (this includes all libraries)
78795 + - base address of the main executable
78796 +
78797 + It is strongly recommended to say Y here as address space layout
78798 + randomization has negligible impact on performance yet it provides
78799 + a very effective protection.
78800 +
78801 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
78802 + this feature on a per file basis.
78803 +
78804 +config PAX_RANDKSTACK
78805 + bool "Randomize kernel stack base"
78806 + depends on X86_TSC && X86
78807 + help
78808 + By saying Y here the kernel will randomize every task's kernel
78809 + stack on every system call. This will not only force an attacker
78810 + to guess it but also prevent him from making use of possible
78811 + leaked information about it.
78812 +
78813 + Since the kernel stack is a rather scarce resource, randomization
78814 + may cause unexpected stack overflows, therefore you should very
78815 + carefully test your system. Note that once enabled in the kernel
78816 + configuration, this feature cannot be disabled on a per file basis.
78817 +
78818 +config PAX_RANDUSTACK
78819 + bool "Randomize user stack base"
78820 + depends on PAX_ASLR
78821 + help
78822 + By saying Y here the kernel will randomize every task's userland
78823 + stack. The randomization is done in two steps where the second
78824 + one may apply a big amount of shift to the top of the stack and
78825 + cause problems for programs that want to use lots of memory (more
78826 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
78827 + For this reason the second step can be controlled by 'chpax' or
78828 + 'paxctl' on a per file basis.
78829 +
78830 +config PAX_RANDMMAP
78831 + bool "Randomize mmap() base"
78832 + depends on PAX_ASLR
78833 + help
78834 + By saying Y here the kernel will use a randomized base address for
78835 + mmap() requests that do not specify one themselves. As a result
78836 + all dynamically loaded libraries will appear at random addresses
78837 + and therefore be harder to exploit by a technique where an attacker
78838 + attempts to execute library code for his purposes (e.g. spawn a
78839 + shell from an exploited program that is running at an elevated
78840 + privilege level).
78841 +
78842 + Furthermore, if a program is relinked as a dynamic ELF file, its
78843 + base address will be randomized as well, completing the full
78844 + randomization of the address space layout. Attacking such programs
78845 + becomes a guess game. You can find an example of doing this at
78846 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
78847 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
78848 +
78849 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
78850 + feature on a per file basis.
78851 +
78852 +endmenu
78853 +
78854 +menu "Miscellaneous hardening features"
78855 +
78856 +config PAX_MEMORY_SANITIZE
78857 + bool "Sanitize all freed memory"
78858 + help
78859 + By saying Y here the kernel will erase memory pages as soon as they
78860 + are freed. This in turn reduces the lifetime of data stored in the
78861 + pages, making it less likely that sensitive information such as
78862 + passwords, cryptographic secrets, etc stay in memory for too long.
78863 +
78864 + This is especially useful for programs whose runtime is short, long
78865 + lived processes and the kernel itself benefit from this as long as
78866 + they operate on whole memory pages and ensure timely freeing of pages
78867 + that may hold sensitive information.
78868 +
78869 + The tradeoff is performance impact, on a single CPU system kernel
78870 + compilation sees a 3% slowdown, other systems and workloads may vary
78871 + and you are advised to test this feature on your expected workload
78872 + before deploying it.
78873 +
78874 + Note that this feature does not protect data stored in live pages,
78875 + e.g., process memory swapped to disk may stay there for a long time.
78876 +
78877 +config PAX_MEMORY_STACKLEAK
78878 + bool "Sanitize kernel stack"
78879 + depends on X86
78880 + help
78881 + By saying Y here the kernel will erase the kernel stack before it
78882 + returns from a system call. This in turn reduces the information
78883 + that a kernel stack leak bug can reveal.
78884 +
78885 + Note that such a bug can still leak information that was put on
78886 + the stack by the current system call (the one eventually triggering
78887 + the bug) but traces of earlier system calls on the kernel stack
78888 + cannot leak anymore.
78889 +
78890 + The tradeoff is performance impact: on a single CPU system kernel
78891 + compilation sees a 1% slowdown, other systems and workloads may vary
78892 + and you are advised to test this feature on your expected workload
78893 + before deploying it.
78894 +
78895 + Note: full support for this feature requires gcc with plugin support
78896 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
78897 + is not supported). Using older gcc versions means that functions
78898 + with large enough stack frames may leave uninitialized memory behind
78899 + that may be exposed to a later syscall leaking the stack.
78900 +
78901 +config PAX_MEMORY_UDEREF
78902 + bool "Prevent invalid userland pointer dereference"
78903 + depends on X86 && !UML_X86 && !XEN
78904 + select PAX_PER_CPU_PGD if X86_64
78905 + help
78906 + By saying Y here the kernel will be prevented from dereferencing
78907 + userland pointers in contexts where the kernel expects only kernel
78908 + pointers. This is both a useful runtime debugging feature and a
78909 + security measure that prevents exploiting a class of kernel bugs.
78910 +
78911 + The tradeoff is that some virtualization solutions may experience
78912 + a huge slowdown and therefore you should not enable this feature
78913 + for kernels meant to run in such environments. Whether a given VM
78914 + solution is affected or not is best determined by simply trying it
78915 + out, the performance impact will be obvious right on boot as this
78916 + mechanism engages from very early on. A good rule of thumb is that
78917 + VMs running on CPUs without hardware virtualization support (i.e.,
78918 + the majority of IA-32 CPUs) will likely experience the slowdown.
78919 +
78920 +config PAX_REFCOUNT
78921 + bool "Prevent various kernel object reference counter overflows"
78922 + depends on GRKERNSEC && (X86 || SPARC64)
78923 + help
78924 + By saying Y here the kernel will detect and prevent overflowing
78925 + various (but not all) kinds of object reference counters. Such
78926 + overflows can normally occur due to bugs only and are often, if
78927 + not always, exploitable.
78928 +
78929 + The tradeoff is that data structures protected by an overflowed
78930 + refcount will never be freed and therefore will leak memory. Note
78931 + that this leak also happens even without this protection but in
78932 + that case the overflow can eventually trigger the freeing of the
78933 + data structure while it is still being used elsewhere, resulting
78934 + in the exploitable situation that this feature prevents.
78935 +
78936 + Since this has a negligible performance impact, you should enable
78937 + this feature.
78938 +
78939 +config PAX_USERCOPY
78940 + bool "Harden heap object copies between kernel and userland"
78941 + depends on X86 || PPC || SPARC || ARM
78942 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
78943 + help
78944 + By saying Y here the kernel will enforce the size of heap objects
78945 + when they are copied in either direction between the kernel and
78946 + userland, even if only a part of the heap object is copied.
78947 +
78948 + Specifically, this checking prevents information leaking from the
78949 + kernel heap during kernel to userland copies (if the kernel heap
78950 + object is otherwise fully initialized) and prevents kernel heap
78951 + overflows during userland to kernel copies.
78952 +
78953 + Note that the current implementation provides the strictest bounds
78954 + checks for the SLUB allocator.
78955 +
78956 + Enabling this option also enables per-slab cache protection against
78957 + data in a given cache being copied into/out of via userland
78958 + accessors. Though the whitelist of regions will be reduced over
78959 + time, it notably protects important data structures like task structs.
78960 +
78961 + If frame pointers are enabled on x86, this option will also restrict
78962 + copies into and out of the kernel stack to local variables within a
78963 + single frame.
78964 +
78965 + Since this has a negligible performance impact, you should enable
78966 + this feature.
78967 +
78968 +endmenu
78969 +
78970 +endmenu
78971 +
78972 config KEYS
78973 bool "Enable access key retention support"
78974 help
78975 @@ -167,7 +747,7 @@ config INTEL_TXT
78976 config LSM_MMAP_MIN_ADDR
78977 int "Low address space for LSM to protect from user allocation"
78978 depends on SECURITY && SECURITY_SELINUX
78979 - default 32768 if ARM
78980 + default 32768 if ALPHA || ARM || PARISC || SPARC32
78981 default 65536
78982 help
78983 This is the portion of low virtual memory which should be protected
78984 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
78985 index 3783202..1852837 100644
78986 --- a/security/apparmor/lsm.c
78987 +++ b/security/apparmor/lsm.c
78988 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
78989 return error;
78990 }
78991
78992 -static struct security_operations apparmor_ops = {
78993 +static struct security_operations apparmor_ops __read_only = {
78994 .name = "apparmor",
78995
78996 .ptrace_access_check = apparmor_ptrace_access_check,
78997 diff --git a/security/commoncap.c b/security/commoncap.c
78998 index a93b3b7..4410df9 100644
78999 --- a/security/commoncap.c
79000 +++ b/security/commoncap.c
79001 @@ -28,6 +28,7 @@
79002 #include <linux/prctl.h>
79003 #include <linux/securebits.h>
79004 #include <linux/user_namespace.h>
79005 +#include <net/sock.h>
79006
79007 /*
79008 * If a non-root user executes a setuid-root binary in
79009 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
79010
79011 int cap_netlink_recv(struct sk_buff *skb, int cap)
79012 {
79013 - if (!cap_raised(current_cap(), cap))
79014 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
79015 return -EPERM;
79016 return 0;
79017 }
79018 @@ -575,6 +576,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
79019 {
79020 const struct cred *cred = current_cred();
79021
79022 + if (gr_acl_enable_at_secure())
79023 + return 1;
79024 +
79025 if (cred->uid != 0) {
79026 if (bprm->cap_effective)
79027 return 1;
79028 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
79029 index 08408bd..67e6e78 100644
79030 --- a/security/integrity/ima/ima.h
79031 +++ b/security/integrity/ima/ima.h
79032 @@ -85,8 +85,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
79033 extern spinlock_t ima_queue_lock;
79034
79035 struct ima_h_table {
79036 - atomic_long_t len; /* number of stored measurements in the list */
79037 - atomic_long_t violations;
79038 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
79039 + atomic_long_unchecked_t violations;
79040 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
79041 };
79042 extern struct ima_h_table ima_htable;
79043 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
79044 index da36d2c..e1e1965 100644
79045 --- a/security/integrity/ima/ima_api.c
79046 +++ b/security/integrity/ima/ima_api.c
79047 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
79048 int result;
79049
79050 /* can overflow, only indicator */
79051 - atomic_long_inc(&ima_htable.violations);
79052 + atomic_long_inc_unchecked(&ima_htable.violations);
79053
79054 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
79055 if (!entry) {
79056 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
79057 index ef21b96..d53e674 100644
79058 --- a/security/integrity/ima/ima_fs.c
79059 +++ b/security/integrity/ima/ima_fs.c
79060 @@ -28,12 +28,12 @@
79061 static int valid_policy = 1;
79062 #define TMPBUFLEN 12
79063 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
79064 - loff_t *ppos, atomic_long_t *val)
79065 + loff_t *ppos, atomic_long_unchecked_t *val)
79066 {
79067 char tmpbuf[TMPBUFLEN];
79068 ssize_t len;
79069
79070 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
79071 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
79072 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
79073 }
79074
79075 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
79076 index 8e28f04..d5951b1 100644
79077 --- a/security/integrity/ima/ima_queue.c
79078 +++ b/security/integrity/ima/ima_queue.c
79079 @@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
79080 INIT_LIST_HEAD(&qe->later);
79081 list_add_tail_rcu(&qe->later, &ima_measurements);
79082
79083 - atomic_long_inc(&ima_htable.len);
79084 + atomic_long_inc_unchecked(&ima_htable.len);
79085 key = ima_hash_key(entry->digest);
79086 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
79087 return 0;
79088 diff --git a/security/keys/compat.c b/security/keys/compat.c
79089 index 338b510..a235861 100644
79090 --- a/security/keys/compat.c
79091 +++ b/security/keys/compat.c
79092 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
79093 if (ret == 0)
79094 goto no_payload_free;
79095
79096 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
79097 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
79098
79099 if (iov != iovstack)
79100 kfree(iov);
79101 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
79102 index eca5191..da9c7f0 100644
79103 --- a/security/keys/keyctl.c
79104 +++ b/security/keys/keyctl.c
79105 @@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
79106 /*
79107 * Copy the iovec data from userspace
79108 */
79109 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
79110 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
79111 unsigned ioc)
79112 {
79113 for (; ioc > 0; ioc--) {
79114 @@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
79115 * If successful, 0 will be returned.
79116 */
79117 long keyctl_instantiate_key_common(key_serial_t id,
79118 - const struct iovec *payload_iov,
79119 + const struct iovec __user *payload_iov,
79120 unsigned ioc,
79121 size_t plen,
79122 key_serial_t ringid)
79123 @@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t id,
79124 [0].iov_len = plen
79125 };
79126
79127 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
79128 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
79129 }
79130
79131 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
79132 @@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
79133 if (ret == 0)
79134 goto no_payload_free;
79135
79136 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
79137 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
79138
79139 if (iov != iovstack)
79140 kfree(iov);
79141 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
79142 index 30e242f..ec111ab 100644
79143 --- a/security/keys/keyring.c
79144 +++ b/security/keys/keyring.c
79145 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
79146 ret = -EFAULT;
79147
79148 for (loop = 0; loop < klist->nkeys; loop++) {
79149 + key_serial_t serial;
79150 key = klist->keys[loop];
79151 + serial = key->serial;
79152
79153 tmp = sizeof(key_serial_t);
79154 if (tmp > buflen)
79155 tmp = buflen;
79156
79157 - if (copy_to_user(buffer,
79158 - &key->serial,
79159 - tmp) != 0)
79160 + if (copy_to_user(buffer, &serial, tmp))
79161 goto error;
79162
79163 buflen -= tmp;
79164 diff --git a/security/min_addr.c b/security/min_addr.c
79165 index f728728..6457a0c 100644
79166 --- a/security/min_addr.c
79167 +++ b/security/min_addr.c
79168 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
79169 */
79170 static void update_mmap_min_addr(void)
79171 {
79172 +#ifndef SPARC
79173 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
79174 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
79175 mmap_min_addr = dac_mmap_min_addr;
79176 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
79177 #else
79178 mmap_min_addr = dac_mmap_min_addr;
79179 #endif
79180 +#endif
79181 }
79182
79183 /*
79184 diff --git a/security/security.c b/security/security.c
79185 index d9e1533..91427f2 100644
79186 --- a/security/security.c
79187 +++ b/security/security.c
79188 @@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
79189 /* things that live in capability.c */
79190 extern void __init security_fixup_ops(struct security_operations *ops);
79191
79192 -static struct security_operations *security_ops;
79193 -static struct security_operations default_security_ops = {
79194 +static struct security_operations *security_ops __read_only;
79195 +static struct security_operations default_security_ops __read_only = {
79196 .name = "default",
79197 };
79198
79199 @@ -67,7 +67,9 @@ int __init security_init(void)
79200
79201 void reset_security_ops(void)
79202 {
79203 + pax_open_kernel();
79204 security_ops = &default_security_ops;
79205 + pax_close_kernel();
79206 }
79207
79208 /* Save user chosen LSM */
79209 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
79210 index 266a229..61bd553 100644
79211 --- a/security/selinux/hooks.c
79212 +++ b/security/selinux/hooks.c
79213 @@ -93,7 +93,6 @@
79214 #define NUM_SEL_MNT_OPTS 5
79215
79216 extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
79217 -extern struct security_operations *security_ops;
79218
79219 /* SECMARK reference count */
79220 atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
79221 @@ -5455,7 +5454,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
79222
79223 #endif
79224
79225 -static struct security_operations selinux_ops = {
79226 +static struct security_operations selinux_ops __read_only = {
79227 .name = "selinux",
79228
79229 .ptrace_access_check = selinux_ptrace_access_check,
79230 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
79231 index b43813c..74be837 100644
79232 --- a/security/selinux/include/xfrm.h
79233 +++ b/security/selinux/include/xfrm.h
79234 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
79235
79236 static inline void selinux_xfrm_notify_policyload(void)
79237 {
79238 - atomic_inc(&flow_cache_genid);
79239 + atomic_inc_unchecked(&flow_cache_genid);
79240 }
79241 #else
79242 static inline int selinux_xfrm_enabled(void)
79243 diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
79244 index f6917bc..8e8713e 100644
79245 --- a/security/selinux/ss/services.c
79246 +++ b/security/selinux/ss/services.c
79247 @@ -1814,6 +1814,8 @@ int security_load_policy(void *data, size_t len)
79248 int rc = 0;
79249 struct policy_file file = { data, len }, *fp = &file;
79250
79251 + pax_track_stack();
79252 +
79253 if (!ss_initialized) {
79254 avtab_cache_init();
79255 rc = policydb_read(&policydb, fp);
79256 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
79257 index b9c5e14..20ab779 100644
79258 --- a/security/smack/smack_lsm.c
79259 +++ b/security/smack/smack_lsm.c
79260 @@ -3393,7 +3393,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
79261 return 0;
79262 }
79263
79264 -struct security_operations smack_ops = {
79265 +struct security_operations smack_ops __read_only = {
79266 .name = "smack",
79267
79268 .ptrace_access_check = smack_ptrace_access_check,
79269 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
79270 index f776400..f95b158c 100644
79271 --- a/security/tomoyo/tomoyo.c
79272 +++ b/security/tomoyo/tomoyo.c
79273 @@ -446,7 +446,7 @@ static int tomoyo_sb_pivotroot(struct path *old_path, struct path *new_path)
79274 * tomoyo_security_ops is a "struct security_operations" which is used for
79275 * registering TOMOYO.
79276 */
79277 -static struct security_operations tomoyo_security_ops = {
79278 +static struct security_operations tomoyo_security_ops __read_only = {
79279 .name = "tomoyo",
79280 .cred_alloc_blank = tomoyo_cred_alloc_blank,
79281 .cred_prepare = tomoyo_cred_prepare,
79282 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
79283 index 3687a6c..652565e 100644
79284 --- a/sound/aoa/codecs/onyx.c
79285 +++ b/sound/aoa/codecs/onyx.c
79286 @@ -54,7 +54,7 @@ struct onyx {
79287 spdif_locked:1,
79288 analog_locked:1,
79289 original_mute:2;
79290 - int open_count;
79291 + local_t open_count;
79292 struct codec_info *codec_info;
79293
79294 /* mutex serializes concurrent access to the device
79295 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
79296 struct onyx *onyx = cii->codec_data;
79297
79298 mutex_lock(&onyx->mutex);
79299 - onyx->open_count++;
79300 + local_inc(&onyx->open_count);
79301 mutex_unlock(&onyx->mutex);
79302
79303 return 0;
79304 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
79305 struct onyx *onyx = cii->codec_data;
79306
79307 mutex_lock(&onyx->mutex);
79308 - onyx->open_count--;
79309 - if (!onyx->open_count)
79310 + if (local_dec_and_test(&onyx->open_count))
79311 onyx->spdif_locked = onyx->analog_locked = 0;
79312 mutex_unlock(&onyx->mutex);
79313
79314 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
79315 index ffd2025..df062c9 100644
79316 --- a/sound/aoa/codecs/onyx.h
79317 +++ b/sound/aoa/codecs/onyx.h
79318 @@ -11,6 +11,7 @@
79319 #include <linux/i2c.h>
79320 #include <asm/pmac_low_i2c.h>
79321 #include <asm/prom.h>
79322 +#include <asm/local.h>
79323
79324 /* PCM3052 register definitions */
79325
79326 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
79327 index 23c34a0..a2673a5 100644
79328 --- a/sound/core/oss/pcm_oss.c
79329 +++ b/sound/core/oss/pcm_oss.c
79330 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
79331 if (in_kernel) {
79332 mm_segment_t fs;
79333 fs = snd_enter_user();
79334 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
79335 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
79336 snd_leave_user(fs);
79337 } else {
79338 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
79339 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
79340 }
79341 if (ret != -EPIPE && ret != -ESTRPIPE)
79342 break;
79343 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
79344 if (in_kernel) {
79345 mm_segment_t fs;
79346 fs = snd_enter_user();
79347 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
79348 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
79349 snd_leave_user(fs);
79350 } else {
79351 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
79352 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
79353 }
79354 if (ret == -EPIPE) {
79355 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
79356 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
79357 struct snd_pcm_plugin_channel *channels;
79358 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
79359 if (!in_kernel) {
79360 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
79361 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
79362 return -EFAULT;
79363 buf = runtime->oss.buffer;
79364 }
79365 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
79366 }
79367 } else {
79368 tmp = snd_pcm_oss_write2(substream,
79369 - (const char __force *)buf,
79370 + (const char __force_kernel *)buf,
79371 runtime->oss.period_bytes, 0);
79372 if (tmp <= 0)
79373 goto err;
79374 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
79375 struct snd_pcm_runtime *runtime = substream->runtime;
79376 snd_pcm_sframes_t frames, frames1;
79377 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
79378 - char __user *final_dst = (char __force __user *)buf;
79379 + char __user *final_dst = (char __force_user *)buf;
79380 if (runtime->oss.plugin_first) {
79381 struct snd_pcm_plugin_channel *channels;
79382 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
79383 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
79384 xfer += tmp;
79385 runtime->oss.buffer_used -= tmp;
79386 } else {
79387 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
79388 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
79389 runtime->oss.period_bytes, 0);
79390 if (tmp <= 0)
79391 goto err;
79392 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
79393 size1);
79394 size1 /= runtime->channels; /* frames */
79395 fs = snd_enter_user();
79396 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
79397 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
79398 snd_leave_user(fs);
79399 }
79400 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
79401 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
79402 index 91cdf94..4085161 100644
79403 --- a/sound/core/pcm_compat.c
79404 +++ b/sound/core/pcm_compat.c
79405 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
79406 int err;
79407
79408 fs = snd_enter_user();
79409 - err = snd_pcm_delay(substream, &delay);
79410 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
79411 snd_leave_user(fs);
79412 if (err < 0)
79413 return err;
79414 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
79415 index 1c6be91..c761a59 100644
79416 --- a/sound/core/pcm_native.c
79417 +++ b/sound/core/pcm_native.c
79418 @@ -2770,11 +2770,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
79419 switch (substream->stream) {
79420 case SNDRV_PCM_STREAM_PLAYBACK:
79421 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
79422 - (void __user *)arg);
79423 + (void __force_user *)arg);
79424 break;
79425 case SNDRV_PCM_STREAM_CAPTURE:
79426 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
79427 - (void __user *)arg);
79428 + (void __force_user *)arg);
79429 break;
79430 default:
79431 result = -EINVAL;
79432 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
79433 index 1f99767..14636533 100644
79434 --- a/sound/core/seq/seq_device.c
79435 +++ b/sound/core/seq/seq_device.c
79436 @@ -63,7 +63,7 @@ struct ops_list {
79437 int argsize; /* argument size */
79438
79439 /* operators */
79440 - struct snd_seq_dev_ops ops;
79441 + struct snd_seq_dev_ops *ops;
79442
79443 /* registred devices */
79444 struct list_head dev_list; /* list of devices */
79445 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
79446
79447 mutex_lock(&ops->reg_mutex);
79448 /* copy driver operators */
79449 - ops->ops = *entry;
79450 + ops->ops = entry;
79451 ops->driver |= DRIVER_LOADED;
79452 ops->argsize = argsize;
79453
79454 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
79455 dev->name, ops->id, ops->argsize, dev->argsize);
79456 return -EINVAL;
79457 }
79458 - if (ops->ops.init_device(dev) >= 0) {
79459 + if (ops->ops->init_device(dev) >= 0) {
79460 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
79461 ops->num_init_devices++;
79462 } else {
79463 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
79464 dev->name, ops->id, ops->argsize, dev->argsize);
79465 return -EINVAL;
79466 }
79467 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
79468 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
79469 dev->status = SNDRV_SEQ_DEVICE_FREE;
79470 dev->driver_data = NULL;
79471 ops->num_init_devices--;
79472 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
79473 index 8539ab0..be8a121 100644
79474 --- a/sound/drivers/mts64.c
79475 +++ b/sound/drivers/mts64.c
79476 @@ -28,6 +28,7 @@
79477 #include <sound/initval.h>
79478 #include <sound/rawmidi.h>
79479 #include <sound/control.h>
79480 +#include <asm/local.h>
79481
79482 #define CARD_NAME "Miditerminal 4140"
79483 #define DRIVER_NAME "MTS64"
79484 @@ -66,7 +67,7 @@ struct mts64 {
79485 struct pardevice *pardev;
79486 int pardev_claimed;
79487
79488 - int open_count;
79489 + local_t open_count;
79490 int current_midi_output_port;
79491 int current_midi_input_port;
79492 u8 mode[MTS64_NUM_INPUT_PORTS];
79493 @@ -696,7 +697,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
79494 {
79495 struct mts64 *mts = substream->rmidi->private_data;
79496
79497 - if (mts->open_count == 0) {
79498 + if (local_read(&mts->open_count) == 0) {
79499 /* We don't need a spinlock here, because this is just called
79500 if the device has not been opened before.
79501 So there aren't any IRQs from the device */
79502 @@ -704,7 +705,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
79503
79504 msleep(50);
79505 }
79506 - ++(mts->open_count);
79507 + local_inc(&mts->open_count);
79508
79509 return 0;
79510 }
79511 @@ -714,8 +715,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
79512 struct mts64 *mts = substream->rmidi->private_data;
79513 unsigned long flags;
79514
79515 - --(mts->open_count);
79516 - if (mts->open_count == 0) {
79517 + if (local_dec_return(&mts->open_count) == 0) {
79518 /* We need the spinlock_irqsave here because we can still
79519 have IRQs at this point */
79520 spin_lock_irqsave(&mts->lock, flags);
79521 @@ -724,8 +724,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
79522
79523 msleep(500);
79524
79525 - } else if (mts->open_count < 0)
79526 - mts->open_count = 0;
79527 + } else if (local_read(&mts->open_count) < 0)
79528 + local_set(&mts->open_count, 0);
79529
79530 return 0;
79531 }
79532 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
79533 index f07e38d..7aae69a 100644
79534 --- a/sound/drivers/opl4/opl4_lib.c
79535 +++ b/sound/drivers/opl4/opl4_lib.c
79536 @@ -28,7 +28,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
79537 MODULE_DESCRIPTION("OPL4 driver");
79538 MODULE_LICENSE("GPL");
79539
79540 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
79541 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
79542 {
79543 int timeout = 10;
79544 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
79545 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
79546 index f2b0ba2..429efc5 100644
79547 --- a/sound/drivers/portman2x4.c
79548 +++ b/sound/drivers/portman2x4.c
79549 @@ -47,6 +47,7 @@
79550 #include <sound/initval.h>
79551 #include <sound/rawmidi.h>
79552 #include <sound/control.h>
79553 +#include <asm/local.h>
79554
79555 #define CARD_NAME "Portman 2x4"
79556 #define DRIVER_NAME "portman"
79557 @@ -84,7 +85,7 @@ struct portman {
79558 struct pardevice *pardev;
79559 int pardev_claimed;
79560
79561 - int open_count;
79562 + local_t open_count;
79563 int mode[PORTMAN_NUM_INPUT_PORTS];
79564 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
79565 };
79566 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
79567 index 87657dd..a8268d4 100644
79568 --- a/sound/firewire/amdtp.c
79569 +++ b/sound/firewire/amdtp.c
79570 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
79571 ptr = s->pcm_buffer_pointer + data_blocks;
79572 if (ptr >= pcm->runtime->buffer_size)
79573 ptr -= pcm->runtime->buffer_size;
79574 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
79575 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
79576
79577 s->pcm_period_pointer += data_blocks;
79578 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
79579 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
79580 */
79581 void amdtp_out_stream_update(struct amdtp_out_stream *s)
79582 {
79583 - ACCESS_ONCE(s->source_node_id_field) =
79584 + ACCESS_ONCE_RW(s->source_node_id_field) =
79585 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
79586 }
79587 EXPORT_SYMBOL(amdtp_out_stream_update);
79588 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
79589 index 537a9cb..8e8c8e9 100644
79590 --- a/sound/firewire/amdtp.h
79591 +++ b/sound/firewire/amdtp.h
79592 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
79593 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
79594 struct snd_pcm_substream *pcm)
79595 {
79596 - ACCESS_ONCE(s->pcm) = pcm;
79597 + ACCESS_ONCE_RW(s->pcm) = pcm;
79598 }
79599
79600 /**
79601 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
79602 index 4400308..261e9f3 100644
79603 --- a/sound/firewire/isight.c
79604 +++ b/sound/firewire/isight.c
79605 @@ -97,7 +97,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
79606 ptr += count;
79607 if (ptr >= runtime->buffer_size)
79608 ptr -= runtime->buffer_size;
79609 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
79610 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
79611
79612 isight->period_counter += count;
79613 if (isight->period_counter >= runtime->period_size) {
79614 @@ -308,7 +308,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
79615 if (err < 0)
79616 return err;
79617
79618 - ACCESS_ONCE(isight->pcm_active) = true;
79619 + ACCESS_ONCE_RW(isight->pcm_active) = true;
79620
79621 return 0;
79622 }
79623 @@ -341,7 +341,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
79624 {
79625 struct isight *isight = substream->private_data;
79626
79627 - ACCESS_ONCE(isight->pcm_active) = false;
79628 + ACCESS_ONCE_RW(isight->pcm_active) = false;
79629
79630 mutex_lock(&isight->mutex);
79631 isight_stop_streaming(isight);
79632 @@ -434,10 +434,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
79633
79634 switch (cmd) {
79635 case SNDRV_PCM_TRIGGER_START:
79636 - ACCESS_ONCE(isight->pcm_running) = true;
79637 + ACCESS_ONCE_RW(isight->pcm_running) = true;
79638 break;
79639 case SNDRV_PCM_TRIGGER_STOP:
79640 - ACCESS_ONCE(isight->pcm_running) = false;
79641 + ACCESS_ONCE_RW(isight->pcm_running) = false;
79642 break;
79643 default:
79644 return -EINVAL;
79645 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
79646 index fe79a16..4d9714e 100644
79647 --- a/sound/isa/cmi8330.c
79648 +++ b/sound/isa/cmi8330.c
79649 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
79650
79651 struct snd_pcm *pcm;
79652 struct snd_cmi8330_stream {
79653 - struct snd_pcm_ops ops;
79654 + snd_pcm_ops_no_const ops;
79655 snd_pcm_open_callback_t open;
79656 void *private_data; /* sb or wss */
79657 } streams[2];
79658 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
79659 index 733b014..56ce96f 100644
79660 --- a/sound/oss/sb_audio.c
79661 +++ b/sound/oss/sb_audio.c
79662 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
79663 buf16 = (signed short *)(localbuf + localoffs);
79664 while (c)
79665 {
79666 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
79667 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
79668 if (copy_from_user(lbuf8,
79669 userbuf+useroffs + p,
79670 locallen))
79671 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
79672 index 09d4648..cf234c7 100644
79673 --- a/sound/oss/swarm_cs4297a.c
79674 +++ b/sound/oss/swarm_cs4297a.c
79675 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
79676 {
79677 struct cs4297a_state *s;
79678 u32 pwr, id;
79679 - mm_segment_t fs;
79680 int rval;
79681 #ifndef CONFIG_BCM_CS4297A_CSWARM
79682 u64 cfg;
79683 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
79684 if (!rval) {
79685 char *sb1250_duart_present;
79686
79687 +#if 0
79688 + mm_segment_t fs;
79689 fs = get_fs();
79690 set_fs(KERNEL_DS);
79691 -#if 0
79692 val = SOUND_MASK_LINE;
79693 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
79694 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
79695 val = initvol[i].vol;
79696 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
79697 }
79698 + set_fs(fs);
79699 // cs4297a_write_ac97(s, 0x18, 0x0808);
79700 #else
79701 // cs4297a_write_ac97(s, 0x5e, 0x180);
79702 cs4297a_write_ac97(s, 0x02, 0x0808);
79703 cs4297a_write_ac97(s, 0x18, 0x0808);
79704 #endif
79705 - set_fs(fs);
79706
79707 list_add(&s->list, &cs4297a_devs);
79708
79709 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
79710 index 755f2b0..5c12361 100644
79711 --- a/sound/pci/hda/hda_codec.h
79712 +++ b/sound/pci/hda/hda_codec.h
79713 @@ -611,7 +611,7 @@ struct hda_bus_ops {
79714 /* notify power-up/down from codec to controller */
79715 void (*pm_notify)(struct hda_bus *bus);
79716 #endif
79717 -};
79718 +} __no_const;
79719
79720 /* template to pass to the bus constructor */
79721 struct hda_bus_template {
79722 @@ -713,6 +713,7 @@ struct hda_codec_ops {
79723 #endif
79724 void (*reboot_notify)(struct hda_codec *codec);
79725 };
79726 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
79727
79728 /* record for amp information cache */
79729 struct hda_cache_head {
79730 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
79731 struct snd_pcm_substream *substream);
79732 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
79733 struct snd_pcm_substream *substream);
79734 -};
79735 +} __no_const;
79736
79737 /* PCM information for each substream */
79738 struct hda_pcm_stream {
79739 @@ -801,7 +802,7 @@ struct hda_codec {
79740 const char *modelname; /* model name for preset */
79741
79742 /* set by patch */
79743 - struct hda_codec_ops patch_ops;
79744 + hda_codec_ops_no_const patch_ops;
79745
79746 /* PCM to create, set by patch_ops.build_pcms callback */
79747 unsigned int num_pcms;
79748 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
79749 index 0da778a..bc38b84 100644
79750 --- a/sound/pci/ice1712/ice1712.h
79751 +++ b/sound/pci/ice1712/ice1712.h
79752 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
79753 unsigned int mask_flags; /* total mask bits */
79754 struct snd_akm4xxx_ops {
79755 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
79756 - } ops;
79757 + } __no_const ops;
79758 };
79759
79760 struct snd_ice1712_spdif {
79761 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
79762 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
79763 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
79764 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
79765 - } ops;
79766 + } __no_const ops;
79767 };
79768
79769
79770 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
79771 index f3260e6..4a285d8 100644
79772 --- a/sound/pci/ymfpci/ymfpci_main.c
79773 +++ b/sound/pci/ymfpci/ymfpci_main.c
79774 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
79775 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
79776 break;
79777 }
79778 - if (atomic_read(&chip->interrupt_sleep_count)) {
79779 - atomic_set(&chip->interrupt_sleep_count, 0);
79780 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
79781 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
79782 wake_up(&chip->interrupt_sleep);
79783 }
79784 __end:
79785 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
79786 continue;
79787 init_waitqueue_entry(&wait, current);
79788 add_wait_queue(&chip->interrupt_sleep, &wait);
79789 - atomic_inc(&chip->interrupt_sleep_count);
79790 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
79791 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
79792 remove_wait_queue(&chip->interrupt_sleep, &wait);
79793 }
79794 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
79795 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
79796 spin_unlock(&chip->reg_lock);
79797
79798 - if (atomic_read(&chip->interrupt_sleep_count)) {
79799 - atomic_set(&chip->interrupt_sleep_count, 0);
79800 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
79801 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
79802 wake_up(&chip->interrupt_sleep);
79803 }
79804 }
79805 @@ -2363,7 +2363,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
79806 spin_lock_init(&chip->reg_lock);
79807 spin_lock_init(&chip->voice_lock);
79808 init_waitqueue_head(&chip->interrupt_sleep);
79809 - atomic_set(&chip->interrupt_sleep_count, 0);
79810 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
79811 chip->card = card;
79812 chip->pci = pci;
79813 chip->irq = -1;
79814 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
79815 index 2879c88..224159e 100644
79816 --- a/sound/soc/soc-pcm.c
79817 +++ b/sound/soc/soc-pcm.c
79818 @@ -568,7 +568,7 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
79819 }
79820
79821 /* ASoC PCM operations */
79822 -static struct snd_pcm_ops soc_pcm_ops = {
79823 +static snd_pcm_ops_no_const soc_pcm_ops = {
79824 .open = soc_pcm_open,
79825 .close = soc_pcm_close,
79826 .hw_params = soc_pcm_hw_params,
79827 diff --git a/sound/usb/card.h b/sound/usb/card.h
79828 index ae4251d..0961361 100644
79829 --- a/sound/usb/card.h
79830 +++ b/sound/usb/card.h
79831 @@ -44,6 +44,7 @@ struct snd_urb_ops {
79832 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
79833 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
79834 };
79835 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
79836
79837 struct snd_usb_substream {
79838 struct snd_usb_stream *stream;
79839 @@ -93,7 +94,7 @@ struct snd_usb_substream {
79840 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
79841 spinlock_t lock;
79842
79843 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
79844 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
79845 };
79846
79847 struct snd_usb_stream {
79848 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
79849 new file mode 100644
79850 index 0000000..b044b80
79851 --- /dev/null
79852 +++ b/tools/gcc/Makefile
79853 @@ -0,0 +1,21 @@
79854 +#CC := gcc
79855 +#PLUGIN_SOURCE_FILES := pax_plugin.c
79856 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
79857 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
79858 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
79859 +
79860 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
79861 +
79862 +hostlibs-y := constify_plugin.so
79863 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
79864 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
79865 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
79866 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
79867 +
79868 +always := $(hostlibs-y)
79869 +
79870 +constify_plugin-objs := constify_plugin.o
79871 +stackleak_plugin-objs := stackleak_plugin.o
79872 +kallocstat_plugin-objs := kallocstat_plugin.o
79873 +kernexec_plugin-objs := kernexec_plugin.o
79874 +checker_plugin-objs := checker_plugin.o
79875 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
79876 new file mode 100644
79877 index 0000000..d41b5af
79878 --- /dev/null
79879 +++ b/tools/gcc/checker_plugin.c
79880 @@ -0,0 +1,171 @@
79881 +/*
79882 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
79883 + * Licensed under the GPL v2
79884 + *
79885 + * Note: the choice of the license means that the compilation process is
79886 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
79887 + * but for the kernel it doesn't matter since it doesn't link against
79888 + * any of the gcc libraries
79889 + *
79890 + * gcc plugin to implement various sparse (source code checker) features
79891 + *
79892 + * TODO:
79893 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
79894 + *
79895 + * BUGS:
79896 + * - none known
79897 + */
79898 +#include "gcc-plugin.h"
79899 +#include "config.h"
79900 +#include "system.h"
79901 +#include "coretypes.h"
79902 +#include "tree.h"
79903 +#include "tree-pass.h"
79904 +#include "flags.h"
79905 +#include "intl.h"
79906 +#include "toplev.h"
79907 +#include "plugin.h"
79908 +//#include "expr.h" where are you...
79909 +#include "diagnostic.h"
79910 +#include "plugin-version.h"
79911 +#include "tm.h"
79912 +#include "function.h"
79913 +#include "basic-block.h"
79914 +#include "gimple.h"
79915 +#include "rtl.h"
79916 +#include "emit-rtl.h"
79917 +#include "tree-flow.h"
79918 +#include "target.h"
79919 +
79920 +extern void c_register_addr_space (const char *str, addr_space_t as);
79921 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
79922 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
79923 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
79924 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
79925 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
79926 +
79927 +extern void print_gimple_stmt(FILE *, gimple, int, int);
79928 +extern rtx emit_move_insn(rtx x, rtx y);
79929 +
79930 +int plugin_is_GPL_compatible;
79931 +
79932 +static struct plugin_info checker_plugin_info = {
79933 + .version = "201111150100",
79934 +};
79935 +
79936 +#define ADDR_SPACE_KERNEL 0
79937 +#define ADDR_SPACE_FORCE_KERNEL 1
79938 +#define ADDR_SPACE_USER 2
79939 +#define ADDR_SPACE_FORCE_USER 3
79940 +#define ADDR_SPACE_IOMEM 0
79941 +#define ADDR_SPACE_FORCE_IOMEM 0
79942 +#define ADDR_SPACE_PERCPU 0
79943 +#define ADDR_SPACE_FORCE_PERCPU 0
79944 +#define ADDR_SPACE_RCU 0
79945 +#define ADDR_SPACE_FORCE_RCU 0
79946 +
79947 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
79948 +{
79949 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
79950 +}
79951 +
79952 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
79953 +{
79954 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
79955 +}
79956 +
79957 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
79958 +{
79959 + return default_addr_space_valid_pointer_mode(mode, as);
79960 +}
79961 +
79962 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
79963 +{
79964 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
79965 +}
79966 +
79967 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
79968 +{
79969 + return default_addr_space_legitimize_address(x, oldx, mode, as);
79970 +}
79971 +
79972 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
79973 +{
79974 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
79975 + return true;
79976 +
79977 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
79978 + return true;
79979 +
79980 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
79981 + return true;
79982 +
79983 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
79984 + return true;
79985 +
79986 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
79987 + return true;
79988 +
79989 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
79990 + return true;
79991 +
79992 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
79993 + return true;
79994 +
79995 + return subset == superset;
79996 +}
79997 +
79998 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
79999 +{
80000 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
80001 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
80002 +
80003 + return op;
80004 +}
80005 +
80006 +static void register_checker_address_spaces(void *event_data, void *data)
80007 +{
80008 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
80009 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
80010 + c_register_addr_space("__user", ADDR_SPACE_USER);
80011 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
80012 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
80013 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
80014 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
80015 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
80016 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
80017 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
80018 +
80019 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
80020 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
80021 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
80022 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
80023 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
80024 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
80025 + targetm.addr_space.convert = checker_addr_space_convert;
80026 +}
80027 +
80028 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80029 +{
80030 + const char * const plugin_name = plugin_info->base_name;
80031 + const int argc = plugin_info->argc;
80032 + const struct plugin_argument * const argv = plugin_info->argv;
80033 + int i;
80034 +
80035 + if (!plugin_default_version_check(version, &gcc_version)) {
80036 + error(G_("incompatible gcc/plugin versions"));
80037 + return 1;
80038 + }
80039 +
80040 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
80041 +
80042 + for (i = 0; i < argc; ++i)
80043 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80044 +
80045 + if (TARGET_64BIT == 0)
80046 + return 0;
80047 +
80048 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
80049 +
80050 + return 0;
80051 +}
80052 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
80053 new file mode 100644
80054 index 0000000..5b07edd
80055 --- /dev/null
80056 +++ b/tools/gcc/constify_plugin.c
80057 @@ -0,0 +1,303 @@
80058 +/*
80059 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
80060 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
80061 + * Licensed under the GPL v2, or (at your option) v3
80062 + *
80063 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
80064 + *
80065 + * Homepage:
80066 + * http://www.grsecurity.net/~ephox/const_plugin/
80067 + *
80068 + * Usage:
80069 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
80070 + * $ gcc -fplugin=constify_plugin.so test.c -O2
80071 + */
80072 +
80073 +#include "gcc-plugin.h"
80074 +#include "config.h"
80075 +#include "system.h"
80076 +#include "coretypes.h"
80077 +#include "tree.h"
80078 +#include "tree-pass.h"
80079 +#include "flags.h"
80080 +#include "intl.h"
80081 +#include "toplev.h"
80082 +#include "plugin.h"
80083 +#include "diagnostic.h"
80084 +#include "plugin-version.h"
80085 +#include "tm.h"
80086 +#include "function.h"
80087 +#include "basic-block.h"
80088 +#include "gimple.h"
80089 +#include "rtl.h"
80090 +#include "emit-rtl.h"
80091 +#include "tree-flow.h"
80092 +
80093 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
80094 +
80095 +int plugin_is_GPL_compatible;
80096 +
80097 +static struct plugin_info const_plugin_info = {
80098 + .version = "201111150100",
80099 + .help = "no-constify\tturn off constification\n",
80100 +};
80101 +
80102 +static void constify_type(tree type);
80103 +static bool walk_struct(tree node);
80104 +
80105 +static tree deconstify_type(tree old_type)
80106 +{
80107 + tree new_type, field;
80108 +
80109 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
80110 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
80111 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
80112 + DECL_FIELD_CONTEXT(field) = new_type;
80113 + TYPE_READONLY(new_type) = 0;
80114 + C_TYPE_FIELDS_READONLY(new_type) = 0;
80115 + return new_type;
80116 +}
80117 +
80118 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
80119 +{
80120 + tree type;
80121 +
80122 + *no_add_attrs = true;
80123 + if (TREE_CODE(*node) == FUNCTION_DECL) {
80124 + error("%qE attribute does not apply to functions", name);
80125 + return NULL_TREE;
80126 + }
80127 +
80128 + if (TREE_CODE(*node) == VAR_DECL) {
80129 + error("%qE attribute does not apply to variables", name);
80130 + return NULL_TREE;
80131 + }
80132 +
80133 + if (TYPE_P(*node)) {
80134 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
80135 + *no_add_attrs = false;
80136 + else
80137 + error("%qE attribute applies to struct and union types only", name);
80138 + return NULL_TREE;
80139 + }
80140 +
80141 + type = TREE_TYPE(*node);
80142 +
80143 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
80144 + error("%qE attribute applies to struct and union types only", name);
80145 + return NULL_TREE;
80146 + }
80147 +
80148 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
80149 + error("%qE attribute is already applied to the type", name);
80150 + return NULL_TREE;
80151 + }
80152 +
80153 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
80154 + error("%qE attribute used on type that is not constified", name);
80155 + return NULL_TREE;
80156 + }
80157 +
80158 + if (TREE_CODE(*node) == TYPE_DECL) {
80159 + TREE_TYPE(*node) = deconstify_type(type);
80160 + TREE_READONLY(*node) = 0;
80161 + return NULL_TREE;
80162 + }
80163 +
80164 + return NULL_TREE;
80165 +}
80166 +
80167 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
80168 +{
80169 + *no_add_attrs = true;
80170 + if (!TYPE_P(*node)) {
80171 + error("%qE attribute applies to types only", name);
80172 + return NULL_TREE;
80173 + }
80174 +
80175 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
80176 + error("%qE attribute applies to struct and union types only", name);
80177 + return NULL_TREE;
80178 + }
80179 +
80180 + *no_add_attrs = false;
80181 + constify_type(*node);
80182 + return NULL_TREE;
80183 +}
80184 +
80185 +static struct attribute_spec no_const_attr = {
80186 + .name = "no_const",
80187 + .min_length = 0,
80188 + .max_length = 0,
80189 + .decl_required = false,
80190 + .type_required = false,
80191 + .function_type_required = false,
80192 + .handler = handle_no_const_attribute,
80193 +#if __GNUC__ > 4 || __GNUC_MINOR__ >= 7
80194 + .affects_type_identity = true
80195 +#endif
80196 +};
80197 +
80198 +static struct attribute_spec do_const_attr = {
80199 + .name = "do_const",
80200 + .min_length = 0,
80201 + .max_length = 0,
80202 + .decl_required = false,
80203 + .type_required = false,
80204 + .function_type_required = false,
80205 + .handler = handle_do_const_attribute,
80206 +#if __GNUC__ > 4 || __GNUC_MINOR__ >= 7
80207 + .affects_type_identity = true
80208 +#endif
80209 +};
80210 +
80211 +static void register_attributes(void *event_data, void *data)
80212 +{
80213 + register_attribute(&no_const_attr);
80214 + register_attribute(&do_const_attr);
80215 +}
80216 +
80217 +static void constify_type(tree type)
80218 +{
80219 + TYPE_READONLY(type) = 1;
80220 + C_TYPE_FIELDS_READONLY(type) = 1;
80221 +}
80222 +
80223 +static bool is_fptr(tree field)
80224 +{
80225 + tree ptr = TREE_TYPE(field);
80226 +
80227 + if (TREE_CODE(ptr) != POINTER_TYPE)
80228 + return false;
80229 +
80230 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
80231 +}
80232 +
80233 +static bool walk_struct(tree node)
80234 +{
80235 + tree field;
80236 +
80237 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
80238 + return false;
80239 +
80240 + if (TYPE_FIELDS(node) == NULL_TREE)
80241 + return false;
80242 +
80243 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
80244 + tree type = TREE_TYPE(field);
80245 + enum tree_code code = TREE_CODE(type);
80246 + if (code == RECORD_TYPE || code == UNION_TYPE) {
80247 + if (!(walk_struct(type)))
80248 + return false;
80249 + } else if (!is_fptr(field) && !TREE_READONLY(field))
80250 + return false;
80251 + }
80252 + return true;
80253 +}
80254 +
80255 +static void finish_type(void *event_data, void *data)
80256 +{
80257 + tree type = (tree)event_data;
80258 +
80259 + if (type == NULL_TREE)
80260 + return;
80261 +
80262 + if (TYPE_READONLY(type))
80263 + return;
80264 +
80265 + if (walk_struct(type))
80266 + constify_type(type);
80267 +}
80268 +
80269 +static unsigned int check_local_variables(void);
80270 +
80271 +struct gimple_opt_pass pass_local_variable = {
80272 + {
80273 + .type = GIMPLE_PASS,
80274 + .name = "check_local_variables",
80275 + .gate = NULL,
80276 + .execute = check_local_variables,
80277 + .sub = NULL,
80278 + .next = NULL,
80279 + .static_pass_number = 0,
80280 + .tv_id = TV_NONE,
80281 + .properties_required = 0,
80282 + .properties_provided = 0,
80283 + .properties_destroyed = 0,
80284 + .todo_flags_start = 0,
80285 + .todo_flags_finish = 0
80286 + }
80287 +};
80288 +
80289 +static unsigned int check_local_variables(void)
80290 +{
80291 + tree var;
80292 + referenced_var_iterator rvi;
80293 +
80294 +#if __GNUC__ == 4 && __GNUC_MINOR__ == 5
80295 + FOR_EACH_REFERENCED_VAR(var, rvi) {
80296 +#else
80297 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
80298 +#endif
80299 + tree type = TREE_TYPE(var);
80300 +
80301 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
80302 + continue;
80303 +
80304 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
80305 + continue;
80306 +
80307 + if (!TYPE_READONLY(type))
80308 + continue;
80309 +
80310 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
80311 +// continue;
80312 +
80313 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
80314 +// continue;
80315 +
80316 + if (walk_struct(type)) {
80317 + error("constified variable %qE cannot be local", var);
80318 + return 1;
80319 + }
80320 + }
80321 + return 0;
80322 +}
80323 +
80324 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80325 +{
80326 + const char * const plugin_name = plugin_info->base_name;
80327 + const int argc = plugin_info->argc;
80328 + const struct plugin_argument * const argv = plugin_info->argv;
80329 + int i;
80330 + bool constify = true;
80331 +
80332 + struct register_pass_info local_variable_pass_info = {
80333 + .pass = &pass_local_variable.pass,
80334 + .reference_pass_name = "*referenced_vars",
80335 + .ref_pass_instance_number = 0,
80336 + .pos_op = PASS_POS_INSERT_AFTER
80337 + };
80338 +
80339 + if (!plugin_default_version_check(version, &gcc_version)) {
80340 + error(G_("incompatible gcc/plugin versions"));
80341 + return 1;
80342 + }
80343 +
80344 + for (i = 0; i < argc; ++i) {
80345 + if (!(strcmp(argv[i].key, "no-constify"))) {
80346 + constify = false;
80347 + continue;
80348 + }
80349 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80350 + }
80351 +
80352 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
80353 + if (constify) {
80354 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
80355 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
80356 + }
80357 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
80358 +
80359 + return 0;
80360 +}
80361 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
80362 new file mode 100644
80363 index 0000000..a5eabce
80364 --- /dev/null
80365 +++ b/tools/gcc/kallocstat_plugin.c
80366 @@ -0,0 +1,167 @@
80367 +/*
80368 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
80369 + * Licensed under the GPL v2
80370 + *
80371 + * Note: the choice of the license means that the compilation process is
80372 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80373 + * but for the kernel it doesn't matter since it doesn't link against
80374 + * any of the gcc libraries
80375 + *
80376 + * gcc plugin to find the distribution of k*alloc sizes
80377 + *
80378 + * TODO:
80379 + *
80380 + * BUGS:
80381 + * - none known
80382 + */
80383 +#include "gcc-plugin.h"
80384 +#include "config.h"
80385 +#include "system.h"
80386 +#include "coretypes.h"
80387 +#include "tree.h"
80388 +#include "tree-pass.h"
80389 +#include "flags.h"
80390 +#include "intl.h"
80391 +#include "toplev.h"
80392 +#include "plugin.h"
80393 +//#include "expr.h" where are you...
80394 +#include "diagnostic.h"
80395 +#include "plugin-version.h"
80396 +#include "tm.h"
80397 +#include "function.h"
80398 +#include "basic-block.h"
80399 +#include "gimple.h"
80400 +#include "rtl.h"
80401 +#include "emit-rtl.h"
80402 +
80403 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80404 +
80405 +int plugin_is_GPL_compatible;
80406 +
80407 +static const char * const kalloc_functions[] = {
80408 + "__kmalloc",
80409 + "kmalloc",
80410 + "kmalloc_large",
80411 + "kmalloc_node",
80412 + "kmalloc_order",
80413 + "kmalloc_order_trace",
80414 + "kmalloc_slab",
80415 + "kzalloc",
80416 + "kzalloc_node",
80417 +};
80418 +
80419 +static struct plugin_info kallocstat_plugin_info = {
80420 + .version = "201111150100",
80421 +};
80422 +
80423 +static unsigned int execute_kallocstat(void);
80424 +
80425 +static struct gimple_opt_pass kallocstat_pass = {
80426 + .pass = {
80427 + .type = GIMPLE_PASS,
80428 + .name = "kallocstat",
80429 + .gate = NULL,
80430 + .execute = execute_kallocstat,
80431 + .sub = NULL,
80432 + .next = NULL,
80433 + .static_pass_number = 0,
80434 + .tv_id = TV_NONE,
80435 + .properties_required = 0,
80436 + .properties_provided = 0,
80437 + .properties_destroyed = 0,
80438 + .todo_flags_start = 0,
80439 + .todo_flags_finish = 0
80440 + }
80441 +};
80442 +
80443 +static bool is_kalloc(const char *fnname)
80444 +{
80445 + size_t i;
80446 +
80447 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
80448 + if (!strcmp(fnname, kalloc_functions[i]))
80449 + return true;
80450 + return false;
80451 +}
80452 +
80453 +static unsigned int execute_kallocstat(void)
80454 +{
80455 + basic_block bb;
80456 +
80457 + // 1. loop through BBs and GIMPLE statements
80458 + FOR_EACH_BB(bb) {
80459 + gimple_stmt_iterator gsi;
80460 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80461 + // gimple match:
80462 + tree fndecl, size;
80463 + gimple call_stmt;
80464 + const char *fnname;
80465 +
80466 + // is it a call
80467 + call_stmt = gsi_stmt(gsi);
80468 + if (!is_gimple_call(call_stmt))
80469 + continue;
80470 + fndecl = gimple_call_fndecl(call_stmt);
80471 + if (fndecl == NULL_TREE)
80472 + continue;
80473 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
80474 + continue;
80475 +
80476 + // is it a call to k*alloc
80477 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
80478 + if (!is_kalloc(fnname))
80479 + continue;
80480 +
80481 + // is the size arg the result of a simple const assignment
80482 + size = gimple_call_arg(call_stmt, 0);
80483 + while (true) {
80484 + gimple def_stmt;
80485 + expanded_location xloc;
80486 + size_t size_val;
80487 +
80488 + if (TREE_CODE(size) != SSA_NAME)
80489 + break;
80490 + def_stmt = SSA_NAME_DEF_STMT(size);
80491 + if (!def_stmt || !is_gimple_assign(def_stmt))
80492 + break;
80493 + if (gimple_num_ops(def_stmt) != 2)
80494 + break;
80495 + size = gimple_assign_rhs1(def_stmt);
80496 + if (!TREE_CONSTANT(size))
80497 + continue;
80498 + xloc = expand_location(gimple_location(def_stmt));
80499 + if (!xloc.file)
80500 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
80501 + size_val = TREE_INT_CST_LOW(size);
80502 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
80503 + break;
80504 + }
80505 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
80506 +//debug_tree(gimple_call_fn(call_stmt));
80507 +//print_node(stderr, "pax", fndecl, 4);
80508 + }
80509 + }
80510 +
80511 + return 0;
80512 +}
80513 +
80514 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80515 +{
80516 + const char * const plugin_name = plugin_info->base_name;
80517 + struct register_pass_info kallocstat_pass_info = {
80518 + .pass = &kallocstat_pass.pass,
80519 + .reference_pass_name = "ssa",
80520 + .ref_pass_instance_number = 0,
80521 + .pos_op = PASS_POS_INSERT_AFTER
80522 + };
80523 +
80524 + if (!plugin_default_version_check(version, &gcc_version)) {
80525 + error(G_("incompatible gcc/plugin versions"));
80526 + return 1;
80527 + }
80528 +
80529 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
80530 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
80531 +
80532 + return 0;
80533 +}
80534 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
80535 new file mode 100644
80536 index 0000000..51f747e
80537 --- /dev/null
80538 +++ b/tools/gcc/kernexec_plugin.c
80539 @@ -0,0 +1,348 @@
80540 +/*
80541 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
80542 + * Licensed under the GPL v2
80543 + *
80544 + * Note: the choice of the license means that the compilation process is
80545 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80546 + * but for the kernel it doesn't matter since it doesn't link against
80547 + * any of the gcc libraries
80548 + *
80549 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
80550 + *
80551 + * TODO:
80552 + *
80553 + * BUGS:
80554 + * - none known
80555 + */
80556 +#include "gcc-plugin.h"
80557 +#include "config.h"
80558 +#include "system.h"
80559 +#include "coretypes.h"
80560 +#include "tree.h"
80561 +#include "tree-pass.h"
80562 +#include "flags.h"
80563 +#include "intl.h"
80564 +#include "toplev.h"
80565 +#include "plugin.h"
80566 +//#include "expr.h" where are you...
80567 +#include "diagnostic.h"
80568 +#include "plugin-version.h"
80569 +#include "tm.h"
80570 +#include "function.h"
80571 +#include "basic-block.h"
80572 +#include "gimple.h"
80573 +#include "rtl.h"
80574 +#include "emit-rtl.h"
80575 +#include "tree-flow.h"
80576 +
80577 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80578 +extern rtx emit_move_insn(rtx x, rtx y);
80579 +
80580 +int plugin_is_GPL_compatible;
80581 +
80582 +static struct plugin_info kernexec_plugin_info = {
80583 + .version = "201111291120",
80584 + .help = "method=[bts|or]\tinstrumentation method\n"
80585 +};
80586 +
80587 +static unsigned int execute_kernexec_fptr(void);
80588 +static unsigned int execute_kernexec_retaddr(void);
80589 +static bool kernexec_cmodel_check(void);
80590 +
80591 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator);
80592 +static void (*kernexec_instrument_retaddr)(rtx);
80593 +
80594 +static struct gimple_opt_pass kernexec_fptr_pass = {
80595 + .pass = {
80596 + .type = GIMPLE_PASS,
80597 + .name = "kernexec_fptr",
80598 + .gate = kernexec_cmodel_check,
80599 + .execute = execute_kernexec_fptr,
80600 + .sub = NULL,
80601 + .next = NULL,
80602 + .static_pass_number = 0,
80603 + .tv_id = TV_NONE,
80604 + .properties_required = 0,
80605 + .properties_provided = 0,
80606 + .properties_destroyed = 0,
80607 + .todo_flags_start = 0,
80608 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
80609 + }
80610 +};
80611 +
80612 +static struct rtl_opt_pass kernexec_retaddr_pass = {
80613 + .pass = {
80614 + .type = RTL_PASS,
80615 + .name = "kernexec_retaddr",
80616 + .gate = kernexec_cmodel_check,
80617 + .execute = execute_kernexec_retaddr,
80618 + .sub = NULL,
80619 + .next = NULL,
80620 + .static_pass_number = 0,
80621 + .tv_id = TV_NONE,
80622 + .properties_required = 0,
80623 + .properties_provided = 0,
80624 + .properties_destroyed = 0,
80625 + .todo_flags_start = 0,
80626 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
80627 + }
80628 +};
80629 +
80630 +static bool kernexec_cmodel_check(void)
80631 +{
80632 + tree section;
80633 +
80634 + if (ix86_cmodel != CM_KERNEL)
80635 + return false;
80636 +
80637 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
80638 + if (!section || !TREE_VALUE(section))
80639 + return true;
80640 +
80641 + section = TREE_VALUE(TREE_VALUE(section));
80642 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
80643 + return true;
80644 +
80645 + return false;
80646 +}
80647 +
80648 +/*
80649 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
80650 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
80651 + */
80652 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator gsi)
80653 +{
80654 + gimple assign_intptr, assign_new_fptr, call_stmt;
80655 + tree intptr, old_fptr, new_fptr, kernexec_mask;
80656 +
80657 + call_stmt = gsi_stmt(gsi);
80658 + old_fptr = gimple_call_fn(call_stmt);
80659 +
80660 + // create temporary unsigned long variable used for bitops and cast fptr to it
80661 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
80662 + add_referenced_var(intptr);
80663 + mark_sym_for_renaming(intptr);
80664 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
80665 + gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
80666 + update_stmt(assign_intptr);
80667 +
80668 + // apply logical or to temporary unsigned long and bitmask
80669 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
80670 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
80671 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
80672 + gsi_insert_before(&gsi, assign_intptr, GSI_SAME_STMT);
80673 + update_stmt(assign_intptr);
80674 +
80675 + // cast temporary unsigned long back to a temporary fptr variable
80676 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec");
80677 + add_referenced_var(new_fptr);
80678 + mark_sym_for_renaming(new_fptr);
80679 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
80680 + gsi_insert_before(&gsi, assign_new_fptr, GSI_SAME_STMT);
80681 + update_stmt(assign_new_fptr);
80682 +
80683 + // replace call stmt fn with the new fptr
80684 + gimple_call_set_fn(call_stmt, new_fptr);
80685 + update_stmt(call_stmt);
80686 +}
80687 +
80688 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator gsi)
80689 +{
80690 + gimple asm_or_stmt, call_stmt;
80691 + tree old_fptr, new_fptr, input, output;
80692 + VEC(tree, gc) *inputs = NULL;
80693 + VEC(tree, gc) *outputs = NULL;
80694 +
80695 + call_stmt = gsi_stmt(gsi);
80696 + old_fptr = gimple_call_fn(call_stmt);
80697 +
80698 + // create temporary fptr variable
80699 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
80700 + add_referenced_var(new_fptr);
80701 + mark_sym_for_renaming(new_fptr);
80702 +
80703 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
80704 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
80705 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
80706 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
80707 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
80708 + VEC_safe_push(tree, gc, inputs, input);
80709 + VEC_safe_push(tree, gc, outputs, output);
80710 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
80711 + gimple_asm_set_volatile(asm_or_stmt, true);
80712 + gsi_insert_before(&gsi, asm_or_stmt, GSI_SAME_STMT);
80713 + update_stmt(asm_or_stmt);
80714 +
80715 + // replace call stmt fn with the new fptr
80716 + gimple_call_set_fn(call_stmt, new_fptr);
80717 + update_stmt(call_stmt);
80718 +}
80719 +
80720 +/*
80721 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
80722 + */
80723 +static unsigned int execute_kernexec_fptr(void)
80724 +{
80725 + basic_block bb;
80726 + gimple_stmt_iterator gsi;
80727 +
80728 + // 1. loop through BBs and GIMPLE statements
80729 + FOR_EACH_BB(bb) {
80730 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
80731 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
80732 + tree fn;
80733 + gimple call_stmt;
80734 +
80735 + // is it a call ...
80736 + call_stmt = gsi_stmt(gsi);
80737 + if (!is_gimple_call(call_stmt))
80738 + continue;
80739 + fn = gimple_call_fn(call_stmt);
80740 + if (TREE_CODE(fn) == ADDR_EXPR)
80741 + continue;
80742 + if (TREE_CODE(fn) != SSA_NAME)
80743 + gcc_unreachable();
80744 +
80745 + // ... through a function pointer
80746 + fn = SSA_NAME_VAR(fn);
80747 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
80748 + continue;
80749 + fn = TREE_TYPE(fn);
80750 + if (TREE_CODE(fn) != POINTER_TYPE)
80751 + continue;
80752 + fn = TREE_TYPE(fn);
80753 + if (TREE_CODE(fn) != FUNCTION_TYPE)
80754 + continue;
80755 +
80756 + kernexec_instrument_fptr(gsi);
80757 +
80758 +//debug_tree(gimple_call_fn(call_stmt));
80759 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
80760 + }
80761 + }
80762 +
80763 + return 0;
80764 +}
80765 +
80766 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
80767 +static void kernexec_instrument_retaddr_bts(rtx insn)
80768 +{
80769 + rtx btsq;
80770 + rtvec argvec, constraintvec, labelvec;
80771 + int line;
80772 +
80773 + // create asm volatile("btsq $63,(%%rsp)":::)
80774 + argvec = rtvec_alloc(0);
80775 + constraintvec = rtvec_alloc(0);
80776 + labelvec = rtvec_alloc(0);
80777 + line = expand_location(RTL_LOCATION(insn)).line;
80778 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
80779 + MEM_VOLATILE_P(btsq) = 1;
80780 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
80781 + emit_insn_before(btsq, insn);
80782 +}
80783 +
80784 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
80785 +static void kernexec_instrument_retaddr_or(rtx insn)
80786 +{
80787 + rtx orq;
80788 + rtvec argvec, constraintvec, labelvec;
80789 + int line;
80790 +
80791 + // create asm volatile("orq %%r10,(%%rsp)":::)
80792 + argvec = rtvec_alloc(0);
80793 + constraintvec = rtvec_alloc(0);
80794 + labelvec = rtvec_alloc(0);
80795 + line = expand_location(RTL_LOCATION(insn)).line;
80796 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
80797 + MEM_VOLATILE_P(orq) = 1;
80798 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
80799 + emit_insn_before(orq, insn);
80800 +}
80801 +
80802 +/*
80803 + * find all asm level function returns and forcibly set the highest bit of the return address
80804 + */
80805 +static unsigned int execute_kernexec_retaddr(void)
80806 +{
80807 + rtx insn;
80808 +
80809 + // 1. find function returns
80810 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
80811 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
80812 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
80813 + rtx body;
80814 +
80815 + // is it a retn
80816 + if (!JUMP_P(insn))
80817 + continue;
80818 + body = PATTERN(insn);
80819 + if (GET_CODE(body) == PARALLEL)
80820 + body = XVECEXP(body, 0, 0);
80821 + if (GET_CODE(body) != RETURN)
80822 + continue;
80823 + kernexec_instrument_retaddr(insn);
80824 + }
80825 +
80826 +// print_simple_rtl(stderr, get_insns());
80827 +// print_rtl(stderr, get_insns());
80828 +
80829 + return 0;
80830 +}
80831 +
80832 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80833 +{
80834 + const char * const plugin_name = plugin_info->base_name;
80835 + const int argc = plugin_info->argc;
80836 + const struct plugin_argument * const argv = plugin_info->argv;
80837 + int i;
80838 + struct register_pass_info kernexec_fptr_pass_info = {
80839 + .pass = &kernexec_fptr_pass.pass,
80840 + .reference_pass_name = "ssa",
80841 + .ref_pass_instance_number = 0,
80842 + .pos_op = PASS_POS_INSERT_AFTER
80843 + };
80844 + struct register_pass_info kernexec_retaddr_pass_info = {
80845 + .pass = &kernexec_retaddr_pass.pass,
80846 + .reference_pass_name = "pro_and_epilogue",
80847 + .ref_pass_instance_number = 0,
80848 + .pos_op = PASS_POS_INSERT_AFTER
80849 + };
80850 +
80851 + if (!plugin_default_version_check(version, &gcc_version)) {
80852 + error(G_("incompatible gcc/plugin versions"));
80853 + return 1;
80854 + }
80855 +
80856 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
80857 +
80858 + if (TARGET_64BIT == 0)
80859 + return 0;
80860 +
80861 + for (i = 0; i < argc; ++i) {
80862 + if (!strcmp(argv[i].key, "method")) {
80863 + if (!argv[i].value) {
80864 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80865 + continue;
80866 + }
80867 + if (!strcmp(argv[i].value, "bts")) {
80868 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
80869 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
80870 + } else if (!strcmp(argv[i].value, "or")) {
80871 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
80872 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
80873 + fix_register("r10", 1, 1);
80874 + } else
80875 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
80876 + continue;
80877 + }
80878 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80879 + }
80880 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
80881 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
80882 +
80883 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
80884 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
80885 +
80886 + return 0;
80887 +}
80888 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
80889 new file mode 100644
80890 index 0000000..41dd4b1
80891 --- /dev/null
80892 +++ b/tools/gcc/stackleak_plugin.c
80893 @@ -0,0 +1,291 @@
80894 +/*
80895 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
80896 + * Licensed under the GPL v2
80897 + *
80898 + * Note: the choice of the license means that the compilation process is
80899 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80900 + * but for the kernel it doesn't matter since it doesn't link against
80901 + * any of the gcc libraries
80902 + *
80903 + * gcc plugin to help implement various PaX features
80904 + *
80905 + * - track lowest stack pointer
80906 + *
80907 + * TODO:
80908 + * - initialize all local variables
80909 + *
80910 + * BUGS:
80911 + * - none known
80912 + */
80913 +#include "gcc-plugin.h"
80914 +#include "config.h"
80915 +#include "system.h"
80916 +#include "coretypes.h"
80917 +#include "tree.h"
80918 +#include "tree-pass.h"
80919 +#include "flags.h"
80920 +#include "intl.h"
80921 +#include "toplev.h"
80922 +#include "plugin.h"
80923 +//#include "expr.h" where are you...
80924 +#include "diagnostic.h"
80925 +#include "plugin-version.h"
80926 +#include "tm.h"
80927 +#include "function.h"
80928 +#include "basic-block.h"
80929 +#include "gimple.h"
80930 +#include "rtl.h"
80931 +#include "emit-rtl.h"
80932 +
80933 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80934 +
80935 +int plugin_is_GPL_compatible;
80936 +
80937 +static int track_frame_size = -1;
80938 +static const char track_function[] = "pax_track_stack";
80939 +static const char check_function[] = "pax_check_alloca";
80940 +static bool init_locals;
80941 +
80942 +static struct plugin_info stackleak_plugin_info = {
80943 + .version = "201111150100",
80944 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
80945 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
80946 +};
80947 +
80948 +static bool gate_stackleak_track_stack(void);
80949 +static unsigned int execute_stackleak_tree_instrument(void);
80950 +static unsigned int execute_stackleak_final(void);
80951 +
80952 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
80953 + .pass = {
80954 + .type = GIMPLE_PASS,
80955 + .name = "stackleak_tree_instrument",
80956 + .gate = gate_stackleak_track_stack,
80957 + .execute = execute_stackleak_tree_instrument,
80958 + .sub = NULL,
80959 + .next = NULL,
80960 + .static_pass_number = 0,
80961 + .tv_id = TV_NONE,
80962 + .properties_required = PROP_gimple_leh | PROP_cfg,
80963 + .properties_provided = 0,
80964 + .properties_destroyed = 0,
80965 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
80966 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
80967 + }
80968 +};
80969 +
80970 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
80971 + .pass = {
80972 + .type = RTL_PASS,
80973 + .name = "stackleak_final",
80974 + .gate = gate_stackleak_track_stack,
80975 + .execute = execute_stackleak_final,
80976 + .sub = NULL,
80977 + .next = NULL,
80978 + .static_pass_number = 0,
80979 + .tv_id = TV_NONE,
80980 + .properties_required = 0,
80981 + .properties_provided = 0,
80982 + .properties_destroyed = 0,
80983 + .todo_flags_start = 0,
80984 + .todo_flags_finish = TODO_dump_func
80985 + }
80986 +};
80987 +
80988 +static bool gate_stackleak_track_stack(void)
80989 +{
80990 + return track_frame_size >= 0;
80991 +}
80992 +
80993 +static void stackleak_check_alloca(gimple_stmt_iterator gsi)
80994 +{
80995 + gimple check_alloca;
80996 + tree fndecl, fntype, alloca_size;
80997 +
80998 + // insert call to void pax_check_alloca(unsigned long size)
80999 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
81000 + fndecl = build_fn_decl(check_function, fntype);
81001 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
81002 + alloca_size = gimple_call_arg(gsi_stmt(gsi), 0);
81003 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
81004 + gsi_insert_before(&gsi, check_alloca, GSI_CONTINUE_LINKING);
81005 +}
81006 +
81007 +static void stackleak_add_instrumentation(gimple_stmt_iterator gsi)
81008 +{
81009 + gimple track_stack;
81010 + tree fndecl, fntype;
81011 +
81012 + // insert call to void pax_track_stack(void)
81013 + fntype = build_function_type_list(void_type_node, NULL_TREE);
81014 + fndecl = build_fn_decl(track_function, fntype);
81015 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
81016 + track_stack = gimple_build_call(fndecl, 0);
81017 + gsi_insert_after(&gsi, track_stack, GSI_CONTINUE_LINKING);
81018 +}
81019 +
81020 +#if __GNUC__ == 4 && __GNUC_MINOR__ == 5
81021 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
81022 +{
81023 + tree fndecl;
81024 +
81025 + if (!is_gimple_call(stmt))
81026 + return false;
81027 + fndecl = gimple_call_fndecl(stmt);
81028 + if (!fndecl)
81029 + return false;
81030 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
81031 + return false;
81032 +// print_node(stderr, "pax", fndecl, 4);
81033 + return DECL_FUNCTION_CODE(fndecl) == code;
81034 +}
81035 +#endif
81036 +
81037 +static bool is_alloca(gimple stmt)
81038 +{
81039 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
81040 + return true;
81041 +
81042 +#if __GNUC__ > 4 || __GNUC_MINOR__ >= 7
81043 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
81044 + return true;
81045 +#endif
81046 +
81047 + return false;
81048 +}
81049 +
81050 +static unsigned int execute_stackleak_tree_instrument(void)
81051 +{
81052 + basic_block bb, entry_bb;
81053 + bool prologue_instrumented = false;
81054 +
81055 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
81056 +
81057 + // 1. loop through BBs and GIMPLE statements
81058 + FOR_EACH_BB(bb) {
81059 + gimple_stmt_iterator gsi;
81060 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
81061 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
81062 + if (!is_alloca(gsi_stmt(gsi)))
81063 + continue;
81064 +
81065 + // 2. insert stack overflow check before each __builtin_alloca call
81066 + stackleak_check_alloca(gsi);
81067 +
81068 + // 3. insert track call after each __builtin_alloca call
81069 + stackleak_add_instrumentation(gsi);
81070 + if (bb == entry_bb)
81071 + prologue_instrumented = true;
81072 + }
81073 + }
81074 +
81075 + // 4. insert track call at the beginning
81076 + if (!prologue_instrumented) {
81077 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
81078 + if (dom_info_available_p(CDI_DOMINATORS))
81079 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
81080 + stackleak_add_instrumentation(gsi_start_bb(bb));
81081 + }
81082 +
81083 + return 0;
81084 +}
81085 +
81086 +static unsigned int execute_stackleak_final(void)
81087 +{
81088 + rtx insn;
81089 +
81090 + if (cfun->calls_alloca)
81091 + return 0;
81092 +
81093 + // keep calls only if function frame is big enough
81094 + if (get_frame_size() >= track_frame_size)
81095 + return 0;
81096 +
81097 + // 1. find pax_track_stack calls
81098 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
81099 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
81100 + rtx body;
81101 +
81102 + if (!CALL_P(insn))
81103 + continue;
81104 + body = PATTERN(insn);
81105 + if (GET_CODE(body) != CALL)
81106 + continue;
81107 + body = XEXP(body, 0);
81108 + if (GET_CODE(body) != MEM)
81109 + continue;
81110 + body = XEXP(body, 0);
81111 + if (GET_CODE(body) != SYMBOL_REF)
81112 + continue;
81113 + if (strcmp(XSTR(body, 0), track_function))
81114 + continue;
81115 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
81116 + // 2. delete call
81117 + insn = delete_insn_and_edges(insn);
81118 +#if __GNUC__ > 4 || __GNUC_MINOR__ >= 7
81119 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
81120 + insn = delete_insn_and_edges(insn);
81121 +#endif
81122 + }
81123 +
81124 +// print_simple_rtl(stderr, get_insns());
81125 +// print_rtl(stderr, get_insns());
81126 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
81127 +
81128 + return 0;
81129 +}
81130 +
81131 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
81132 +{
81133 + const char * const plugin_name = plugin_info->base_name;
81134 + const int argc = plugin_info->argc;
81135 + const struct plugin_argument * const argv = plugin_info->argv;
81136 + int i;
81137 + struct register_pass_info stackleak_tree_instrument_pass_info = {
81138 + .pass = &stackleak_tree_instrument_pass.pass,
81139 +// .reference_pass_name = "tree_profile",
81140 + .reference_pass_name = "optimized",
81141 + .ref_pass_instance_number = 0,
81142 + .pos_op = PASS_POS_INSERT_AFTER
81143 + };
81144 + struct register_pass_info stackleak_final_pass_info = {
81145 + .pass = &stackleak_final_rtl_opt_pass.pass,
81146 + .reference_pass_name = "final",
81147 + .ref_pass_instance_number = 0,
81148 + .pos_op = PASS_POS_INSERT_BEFORE
81149 + };
81150 +
81151 + if (!plugin_default_version_check(version, &gcc_version)) {
81152 + error(G_("incompatible gcc/plugin versions"));
81153 + return 1;
81154 + }
81155 +
81156 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
81157 +
81158 + for (i = 0; i < argc; ++i) {
81159 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
81160 + if (!argv[i].value) {
81161 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81162 + continue;
81163 + }
81164 + track_frame_size = atoi(argv[i].value);
81165 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
81166 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
81167 + continue;
81168 + }
81169 + if (!strcmp(argv[i].key, "initialize-locals")) {
81170 + if (argv[i].value) {
81171 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
81172 + continue;
81173 + }
81174 + init_locals = true;
81175 + continue;
81176 + }
81177 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81178 + }
81179 +
81180 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
81181 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
81182 +
81183 + return 0;
81184 +}
81185 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
81186 index 6789d78..4afd019 100644
81187 --- a/tools/perf/util/include/asm/alternative-asm.h
81188 +++ b/tools/perf/util/include/asm/alternative-asm.h
81189 @@ -5,4 +5,7 @@
81190
81191 #define altinstruction_entry #
81192
81193 + .macro pax_force_retaddr rip=0, reload=0
81194 + .endm
81195 +
81196 #endif
81197 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
81198 index af0f22f..9a7d479 100644
81199 --- a/usr/gen_init_cpio.c
81200 +++ b/usr/gen_init_cpio.c
81201 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
81202 int retval;
81203 int rc = -1;
81204 int namesize;
81205 - int i;
81206 + unsigned int i;
81207
81208 mode |= S_IFREG;
81209
81210 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
81211 *env_var = *expanded = '\0';
81212 strncat(env_var, start + 2, end - start - 2);
81213 strncat(expanded, new_location, start - new_location);
81214 - strncat(expanded, getenv(env_var), PATH_MAX);
81215 - strncat(expanded, end + 1, PATH_MAX);
81216 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
81217 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
81218 strncpy(new_location, expanded, PATH_MAX);
81219 + new_location[PATH_MAX] = 0;
81220 } else
81221 break;
81222 }
81223 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
81224 index aefdda3..8e8fbb9 100644
81225 --- a/virt/kvm/kvm_main.c
81226 +++ b/virt/kvm/kvm_main.c
81227 @@ -73,7 +73,7 @@ LIST_HEAD(vm_list);
81228
81229 static cpumask_var_t cpus_hardware_enabled;
81230 static int kvm_usage_count = 0;
81231 -static atomic_t hardware_enable_failed;
81232 +static atomic_unchecked_t hardware_enable_failed;
81233
81234 struct kmem_cache *kvm_vcpu_cache;
81235 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
81236 @@ -2266,7 +2266,7 @@ static void hardware_enable_nolock(void *junk)
81237
81238 if (r) {
81239 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
81240 - atomic_inc(&hardware_enable_failed);
81241 + atomic_inc_unchecked(&hardware_enable_failed);
81242 printk(KERN_INFO "kvm: enabling virtualization on "
81243 "CPU%d failed\n", cpu);
81244 }
81245 @@ -2320,10 +2320,10 @@ static int hardware_enable_all(void)
81246
81247 kvm_usage_count++;
81248 if (kvm_usage_count == 1) {
81249 - atomic_set(&hardware_enable_failed, 0);
81250 + atomic_set_unchecked(&hardware_enable_failed, 0);
81251 on_each_cpu(hardware_enable_nolock, NULL, 1);
81252
81253 - if (atomic_read(&hardware_enable_failed)) {
81254 + if (atomic_read_unchecked(&hardware_enable_failed)) {
81255 hardware_disable_all_nolock();
81256 r = -EBUSY;
81257 }
81258 @@ -2588,7 +2588,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
81259 kvm_arch_vcpu_put(vcpu);
81260 }
81261
81262 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
81263 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
81264 struct module *module)
81265 {
81266 int r;
81267 @@ -2651,7 +2651,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
81268 if (!vcpu_align)
81269 vcpu_align = __alignof__(struct kvm_vcpu);
81270 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
81271 - 0, NULL);
81272 + SLAB_USERCOPY, NULL);
81273 if (!kvm_vcpu_cache) {
81274 r = -ENOMEM;
81275 goto out_free_3;
81276 @@ -2661,9 +2661,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
81277 if (r)
81278 goto out_free;
81279
81280 - kvm_chardev_ops.owner = module;
81281 - kvm_vm_fops.owner = module;
81282 - kvm_vcpu_fops.owner = module;
81283 + pax_open_kernel();
81284 + *(void **)&kvm_chardev_ops.owner = module;
81285 + *(void **)&kvm_vm_fops.owner = module;
81286 + *(void **)&kvm_vcpu_fops.owner = module;
81287 + pax_close_kernel();
81288
81289 r = misc_register(&kvm_dev);
81290 if (r) {